HTSeq 2.1.2__cp313-cp313-macosx_10_15_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- HTSeq/StepVector.py +629 -0
- HTSeq/StretchVector.py +491 -0
- HTSeq/_HTSeq.cpython-313-darwin.so +0 -0
- HTSeq/_HTSeq_internal.py +85 -0
- HTSeq/_StepVector.cpython-313-darwin.so +0 -0
- HTSeq/__init__.py +1249 -0
- HTSeq/features.py +489 -0
- HTSeq/scripts/__init__.py +0 -0
- HTSeq/scripts/count.py +528 -0
- HTSeq/scripts/count_features/__init__.py +0 -0
- HTSeq/scripts/count_features/count_features_per_file.py +465 -0
- HTSeq/scripts/count_features/reads_io_processor.py +187 -0
- HTSeq/scripts/count_features/reads_stats.py +92 -0
- HTSeq/scripts/count_with_barcodes.py +746 -0
- HTSeq/scripts/qa.py +336 -0
- HTSeq/scripts/utils.py +372 -0
- HTSeq/utils.py +92 -0
- htseq-2.1.2.dist-info/METADATA +813 -0
- htseq-2.1.2.dist-info/RECORD +23 -0
- htseq-2.1.2.dist-info/WHEEL +5 -0
- htseq-2.1.2.dist-info/entry_points.txt +4 -0
- htseq-2.1.2.dist-info/licenses/LICENSE +674 -0
- htseq-2.1.2.dist-info/top_level.txt +1 -0
HTSeq/__init__.py
ADDED
|
@@ -0,0 +1,1249 @@
|
|
|
1
|
+
"""HTSeq is a package to process high-throughput sequencing data.
|
|
2
|
+
|
|
3
|
+
See htseq.readthedocs.io/en/master/index.html for documentation.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import importlib.metadata
|
|
7
|
+
import itertools
|
|
8
|
+
import warnings
|
|
9
|
+
import os
|
|
10
|
+
import shlex
|
|
11
|
+
import sys
|
|
12
|
+
import re
|
|
13
|
+
|
|
14
|
+
import HTSeq
|
|
15
|
+
from HTSeq._HTSeq import *
|
|
16
|
+
from HTSeq.utils import FileOrSequence
|
|
17
|
+
from HTSeq.features import *
|
|
18
|
+
from HTSeq.StretchVector import StretchVector
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
#########################
|
|
22
|
+
# Version
|
|
23
|
+
#########################
|
|
24
|
+
__version__ = importlib.metadata.version("HTSeq")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
#########################
|
|
28
|
+
# GenomicArray
|
|
29
|
+
#########################
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def read_chrom_lens(filename, delimiter="\t"):
|
|
33
|
+
return dict(
|
|
34
|
+
(
|
|
35
|
+
(chrom, int(len))
|
|
36
|
+
for chrom, len in csv.reader(open(filename), delimiter=delimiter)
|
|
37
|
+
)
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
#########################
|
|
42
|
+
# Sequence readers
|
|
43
|
+
#########################
|
|
44
|
+
|
|
45
|
+
_re_fasta_header_line = re.compile(r">\s*(\S+)\s*(.*)")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class FastaReader(FileOrSequence):
|
|
49
|
+
"""A Fasta_Reader is associated with a FASTA file or an open connection
|
|
50
|
+
to a file-like object with content in FASTA format.
|
|
51
|
+
It can generate an iterator over the sequences.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(self, file_, raw_iterator=False):
|
|
55
|
+
FileOrSequence.__init__(self, file_)
|
|
56
|
+
self.raw_iterator = raw_iterator
|
|
57
|
+
|
|
58
|
+
def __iter__(self):
|
|
59
|
+
seq = None
|
|
60
|
+
name = None
|
|
61
|
+
descr = None
|
|
62
|
+
for line in FileOrSequence.__iter__(self):
|
|
63
|
+
if line.startswith(">"):
|
|
64
|
+
if seq:
|
|
65
|
+
if self.raw_iterator:
|
|
66
|
+
s = (seq, name, descr)
|
|
67
|
+
else:
|
|
68
|
+
s = Sequence(seq.encode(), name)
|
|
69
|
+
s.descr = descr
|
|
70
|
+
yield s
|
|
71
|
+
mo = _re_fasta_header_line.match(line)
|
|
72
|
+
name = mo.group(1)
|
|
73
|
+
descr = mo.group(2)
|
|
74
|
+
seq = ""
|
|
75
|
+
else:
|
|
76
|
+
assert seq is not None, "FASTA file does not start with '>'."
|
|
77
|
+
seq += line[:-1]
|
|
78
|
+
if seq is not None:
|
|
79
|
+
if self.raw_iterator:
|
|
80
|
+
s = (seq, name, descr)
|
|
81
|
+
else:
|
|
82
|
+
s = Sequence(seq.encode(), name)
|
|
83
|
+
s.descr = descr
|
|
84
|
+
yield s
|
|
85
|
+
|
|
86
|
+
def get_sequence_lengths(self):
|
|
87
|
+
seqname = None
|
|
88
|
+
length = 0
|
|
89
|
+
seqlengths = {}
|
|
90
|
+
for line in FileOrSequence.__iter__(self):
|
|
91
|
+
if line.startswith(">"):
|
|
92
|
+
if seqname is not None:
|
|
93
|
+
seqlengths[seqname] = length
|
|
94
|
+
mo = _re_fasta_header_line.match(line)
|
|
95
|
+
seqname = mo.group(1)
|
|
96
|
+
length = 0
|
|
97
|
+
else:
|
|
98
|
+
assert seqname is not None, "FASTA file does not start with '>'."
|
|
99
|
+
length += len(line.rstrip())
|
|
100
|
+
if seqname is not None:
|
|
101
|
+
seqlengths[seqname] = length
|
|
102
|
+
return seqlengths
|
|
103
|
+
|
|
104
|
+
@staticmethod
|
|
105
|
+
def _import_pysam():
|
|
106
|
+
global pysam
|
|
107
|
+
try:
|
|
108
|
+
import pysam
|
|
109
|
+
except ImportError:
|
|
110
|
+
sys.stderr.write(
|
|
111
|
+
"Please install the 'pysam' package to be able to use the Fasta indexing functionality."
|
|
112
|
+
)
|
|
113
|
+
raise
|
|
114
|
+
|
|
115
|
+
def build_index(self, force=False):
|
|
116
|
+
self._import_pysam()
|
|
117
|
+
if not isinstance(self.fos, str):
|
|
118
|
+
raise TypeError(
|
|
119
|
+
"This function only works with FastaReader objects "
|
|
120
|
+
+ "connected to a fasta file via file name"
|
|
121
|
+
)
|
|
122
|
+
index_filename = self.fos + ".fai"
|
|
123
|
+
if os.access(index_filename, os.R_OK):
|
|
124
|
+
if (not force) and os.stat(self.filename_or_sequence).st_mtime <= os.stat(
|
|
125
|
+
index_filename
|
|
126
|
+
).st_mtime:
|
|
127
|
+
# index is up to date
|
|
128
|
+
return
|
|
129
|
+
pysam.faidx(self.fos)
|
|
130
|
+
if not os.access(index_filename, os.R_OK):
|
|
131
|
+
raise SystemError("Building of Fasta index failed due to unknown error.")
|
|
132
|
+
|
|
133
|
+
def __getitem__(self, iv):
|
|
134
|
+
if not isinstance(iv, GenomicInterval):
|
|
135
|
+
raise TypeError("GenomicInterval expected as key.")
|
|
136
|
+
if not isinstance(self.fos, str):
|
|
137
|
+
raise TypeError(
|
|
138
|
+
"This function only works with FastaReader objects "
|
|
139
|
+
+ "connected to a fasta file via file name"
|
|
140
|
+
)
|
|
141
|
+
self._import_pysam()
|
|
142
|
+
fasta = pysam.faidx(self.fos, "%s:%d-%d" % (iv.chrom, iv.start, iv.end - 1))
|
|
143
|
+
ans = list(FastaReader(fasta))
|
|
144
|
+
assert len(ans) == 1
|
|
145
|
+
ans[0].name = str(iv)
|
|
146
|
+
if iv.strand != "-":
|
|
147
|
+
return ans[0]
|
|
148
|
+
else:
|
|
149
|
+
return ans[0].get_reverse_complement()
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class FastqReader(FileOrSequence):
|
|
153
|
+
"""A Fastq object is associated with a FASTQ self.file. When an iterator
|
|
154
|
+
is requested from the object, the FASTQ file is read.
|
|
155
|
+
|
|
156
|
+
qual_scale is one of "phred", "solexa", "solexa-old".
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
def __init__(self, file_, qual_scale="phred", raw_iterator=False):
|
|
160
|
+
FileOrSequence.__init__(self, file_)
|
|
161
|
+
self.qual_scale = qual_scale
|
|
162
|
+
if qual_scale not in ("phred", "solexa", "solexa-old"):
|
|
163
|
+
raise ValueError("Illegal quality scale.")
|
|
164
|
+
self.raw_iterator = raw_iterator
|
|
165
|
+
|
|
166
|
+
def __iter__(self):
|
|
167
|
+
fin = FileOrSequence.__iter__(self)
|
|
168
|
+
il = 0
|
|
169
|
+
id1 = None
|
|
170
|
+
id2 = None
|
|
171
|
+
seq = None
|
|
172
|
+
qual = None
|
|
173
|
+
for line in fin:
|
|
174
|
+
if il == 0:
|
|
175
|
+
id1 = line
|
|
176
|
+
il += 1
|
|
177
|
+
continue
|
|
178
|
+
elif il == 1:
|
|
179
|
+
seq = line
|
|
180
|
+
il += 1
|
|
181
|
+
continue
|
|
182
|
+
elif il == 2:
|
|
183
|
+
id2 = line
|
|
184
|
+
il += 1
|
|
185
|
+
continue
|
|
186
|
+
|
|
187
|
+
qual = line
|
|
188
|
+
il = 0
|
|
189
|
+
|
|
190
|
+
if qual == "":
|
|
191
|
+
if id1 != "":
|
|
192
|
+
warnings.warn(
|
|
193
|
+
"Number of lines in FASTQ file is not "
|
|
194
|
+
"a multiple of 4. Discarding the last, "
|
|
195
|
+
"incomplete record"
|
|
196
|
+
)
|
|
197
|
+
break
|
|
198
|
+
|
|
199
|
+
if not qual.endswith("\n"):
|
|
200
|
+
qual += "\n"
|
|
201
|
+
if not id1.startswith("@"):
|
|
202
|
+
raise ValueError(
|
|
203
|
+
"Primary ID line in FASTQ file does "
|
|
204
|
+
"not start with '@'. Either this is not FASTQ data or the "
|
|
205
|
+
"parser got out of sync."
|
|
206
|
+
)
|
|
207
|
+
if not id2.startswith("+"):
|
|
208
|
+
raise ValueError(
|
|
209
|
+
"Secondary ID line in FASTQ file does"
|
|
210
|
+
"not start with '+'. Maybe got out of sync."
|
|
211
|
+
)
|
|
212
|
+
if len(id2) > 2 and id1[1:] != id2[1:]:
|
|
213
|
+
raise ValueError("Primary and secondary ID line in FASTQdisagree.")
|
|
214
|
+
|
|
215
|
+
if self.raw_iterator:
|
|
216
|
+
s = (seq[:-1], id1[1:-1], qual[:-1], self.qual_scale)
|
|
217
|
+
else:
|
|
218
|
+
s = SequenceWithQualities(
|
|
219
|
+
seq[:-1].encode(), id1[1:-1], qual[:-1].encode(), self.qual_scale
|
|
220
|
+
)
|
|
221
|
+
yield s
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
class BowtieReader(FileOrSequence):
|
|
225
|
+
"""A BowtieFile object is associated with a Bowtie output file that
|
|
226
|
+
contains short read alignments. It can generate an iterator of Alignment
|
|
227
|
+
objects."""
|
|
228
|
+
|
|
229
|
+
def __iter__(self):
|
|
230
|
+
for line in FileOrSequence.__iter__(self):
|
|
231
|
+
try:
|
|
232
|
+
algnt = BowtieAlignment(line)
|
|
233
|
+
except ValueError:
|
|
234
|
+
if line.startswith("Reported "):
|
|
235
|
+
continue
|
|
236
|
+
warnings.warn(
|
|
237
|
+
"BowtieReader: Ignoring the following line, which could "
|
|
238
|
+
"not be parsed:\n%s\n" % line,
|
|
239
|
+
RuntimeWarning,
|
|
240
|
+
)
|
|
241
|
+
yield algnt
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def bundle_multiple_alignments(sequence_of_alignments):
|
|
245
|
+
"""Some alignment programs, e.g., Bowtie, can output multiple alignments,
|
|
246
|
+
i.e., the same read is reported consecutively with different alignments.
|
|
247
|
+
This function takes an iterator over alignments and bundles consecutive
|
|
248
|
+
alignments regarding the same read to a list of Alignment objects and
|
|
249
|
+
returns an iterator over these.
|
|
250
|
+
"""
|
|
251
|
+
alignment_iter = iter(sequence_of_alignments)
|
|
252
|
+
algnt = next(alignment_iter)
|
|
253
|
+
ma = [algnt]
|
|
254
|
+
for algnt in alignment_iter:
|
|
255
|
+
if algnt.read.name != ma[0].read.name:
|
|
256
|
+
yield ma
|
|
257
|
+
ma = [algnt]
|
|
258
|
+
else:
|
|
259
|
+
ma.append(algnt)
|
|
260
|
+
yield ma
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class SolexaExportAlignment(Alignment):
|
|
264
|
+
"""Iterating over SolexaExportReader objects will yield SoelxaExportRecord
|
|
265
|
+
objects. These have four fields:
|
|
266
|
+
read - a SequenceWithQualities object
|
|
267
|
+
aligned - a boolean, indicating whether the object was aligned
|
|
268
|
+
iv - a GenomicInterval giving the alignment (or None, if not aligned)
|
|
269
|
+
passed_filter - a boolean, indicating whether the object passed the filter
|
|
270
|
+
nomatch_code - a code indicating why no match was found (or None, if the
|
|
271
|
+
read was aligned)
|
|
272
|
+
|
|
273
|
+
As long as 'aligned' is True, a SolexaExportRecord can be treated as an
|
|
274
|
+
Alignment object.
|
|
275
|
+
"""
|
|
276
|
+
|
|
277
|
+
def __init__(self):
|
|
278
|
+
# Data is filled in by SolexaExportRecord
|
|
279
|
+
pass
|
|
280
|
+
|
|
281
|
+
def __repr__(self):
|
|
282
|
+
if self.aligned:
|
|
283
|
+
return "< %s object: Read '%s', aligned to %s >" % (
|
|
284
|
+
self.__class__.__name__,
|
|
285
|
+
self.read.name,
|
|
286
|
+
self.iv,
|
|
287
|
+
)
|
|
288
|
+
else:
|
|
289
|
+
return "< %s object: Non-aligned read '%s' >" % (
|
|
290
|
+
self.__class__.__name__,
|
|
291
|
+
self.read.name,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
class SolexaExportReader(FileOrSequence):
|
|
296
|
+
"""Parser for *_export.txt files from the SolexaPipeline software.
|
|
297
|
+
|
|
298
|
+
Iterating over a SolexaExportReader yields SolexaExportRecord objects.
|
|
299
|
+
"""
|
|
300
|
+
|
|
301
|
+
def __init__(self, filename_or_sequence, solexa_old=False):
|
|
302
|
+
FileOrSequence.__init__(self, filename_or_sequence)
|
|
303
|
+
if solexa_old:
|
|
304
|
+
self.qualscale = "solexa-old"
|
|
305
|
+
else:
|
|
306
|
+
self.qualscale = "solexa"
|
|
307
|
+
|
|
308
|
+
@classmethod
|
|
309
|
+
def parse_line_bare(dummy, line):
|
|
310
|
+
if line[-1] == "\n":
|
|
311
|
+
line = line[:-1]
|
|
312
|
+
res = {}
|
|
313
|
+
(
|
|
314
|
+
res["machine"],
|
|
315
|
+
res["run_number"],
|
|
316
|
+
res["lane"],
|
|
317
|
+
res["tile"],
|
|
318
|
+
res["x_coord"],
|
|
319
|
+
res["y_coord"],
|
|
320
|
+
res["index_string"],
|
|
321
|
+
res["read_nbr"],
|
|
322
|
+
res["read_seq"],
|
|
323
|
+
res["qual_str"],
|
|
324
|
+
res["chrom"],
|
|
325
|
+
res["contig"],
|
|
326
|
+
res["pos"],
|
|
327
|
+
res["strand"],
|
|
328
|
+
res["match_descr"],
|
|
329
|
+
res["single_read_algnt_score"],
|
|
330
|
+
res["paired_read_algnt_score"],
|
|
331
|
+
res["partner_chrom"],
|
|
332
|
+
res["partner_contig"],
|
|
333
|
+
res["partner_offset"],
|
|
334
|
+
res["partner_strand"],
|
|
335
|
+
res["passed_filtering"],
|
|
336
|
+
) = line.split("\t")
|
|
337
|
+
return res
|
|
338
|
+
|
|
339
|
+
def __iter__(self):
|
|
340
|
+
for line in FileOrSequence.__iter__(self):
|
|
341
|
+
record = SolexaExportAlignment()
|
|
342
|
+
fields = SolexaExportReader.parse_line_bare(line)
|
|
343
|
+
if fields["read_nbr"] != "1":
|
|
344
|
+
warnings.warn(
|
|
345
|
+
"Paired-end read encountered. PE is so far supported only "
|
|
346
|
+
"for SAM files, not yet for SolexaExport. All PE-related "
|
|
347
|
+
"fields are ignored."
|
|
348
|
+
)
|
|
349
|
+
record.read = SequenceWithQualities(
|
|
350
|
+
fields["read_seq"],
|
|
351
|
+
"%s:%s:%s:%s:%s#0"
|
|
352
|
+
% (
|
|
353
|
+
fields["machine"],
|
|
354
|
+
fields["lane"],
|
|
355
|
+
fields["tile"],
|
|
356
|
+
fields["x_coord"],
|
|
357
|
+
fields["y_coord"],
|
|
358
|
+
),
|
|
359
|
+
fields["qual_str"],
|
|
360
|
+
self.qualscale,
|
|
361
|
+
)
|
|
362
|
+
if fields["passed_filtering"] == "Y":
|
|
363
|
+
record.passed_filter = True
|
|
364
|
+
elif fields["passed_filtering"] == "N":
|
|
365
|
+
record.passed_filter = False
|
|
366
|
+
else:
|
|
367
|
+
raise ValueError(
|
|
368
|
+
"Illegal 'passed filter' value in Solexa export data: '%s'."
|
|
369
|
+
% fields["passed_filtering"]
|
|
370
|
+
)
|
|
371
|
+
record.index_string = fields["index_string"]
|
|
372
|
+
if fields["pos"] == "":
|
|
373
|
+
record.iv = None
|
|
374
|
+
record.nomatch_code = fields["chrom"]
|
|
375
|
+
else:
|
|
376
|
+
if fields["strand"] == "F":
|
|
377
|
+
strand = "+"
|
|
378
|
+
elif fields["strand"] == "R":
|
|
379
|
+
strand = "-"
|
|
380
|
+
else:
|
|
381
|
+
raise ValueError("Illegal strand value in Solexa export data.")
|
|
382
|
+
start = int(fields["pos"])
|
|
383
|
+
chrom = fields["chrom"]
|
|
384
|
+
if fields["chrom"] == "":
|
|
385
|
+
chrom = fields["contig"]
|
|
386
|
+
record.iv = GenomicInterval(
|
|
387
|
+
chrom, start, start + len(fields["read_seq"]), strand
|
|
388
|
+
)
|
|
389
|
+
yield record
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
class GenomicArrayOfSets(GenomicArray):
|
|
393
|
+
"""A GenomicArrayOfSets is a specialization of GenomicArray that allows to store
|
|
394
|
+
sets of objects. On construction, the step vectors are initialized with empty sets.
|
|
395
|
+
By using the 'add_value' method, objects can be added to intervals. If an object
|
|
396
|
+
is already present in the set(s) at this interval, an the new object is added to
|
|
397
|
+
the present set, and the set is split if necessary.
|
|
398
|
+
"""
|
|
399
|
+
|
|
400
|
+
def __init__(self, chroms, stranded=True, storage="step", memmap_dir=""):
|
|
401
|
+
GenomicArray.__init__(self, chroms, stranded, "O", storage, memmap_dir)
|
|
402
|
+
|
|
403
|
+
def add_chrom(self, chrom, length=sys.maxsize, start_index=0):
|
|
404
|
+
GenomicArray.add_chrom(self, chrom, length, start_index)
|
|
405
|
+
for cv in list(self.chrom_vectors[chrom].values()):
|
|
406
|
+
cv[:] = set()
|
|
407
|
+
cv.is_vector_of_sets = True
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
###########################
|
|
411
|
+
# paired-end handling
|
|
412
|
+
###########################
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def pair_SAM_alignments(alignments, bundle=False, primary_only=False):
|
|
416
|
+
"""Iterate over SAM aligments, name-sorted paired-end
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
alignments (iterator of SAM/BAM alignments): the alignments to wrap
|
|
420
|
+
bundle (bool): if True, bundle all alignments from one read pair into a
|
|
421
|
+
single yield. If False (default), each pair of alignments is
|
|
422
|
+
yielded separately.
|
|
423
|
+
primary_only (bool): for each read, consider only the primary line
|
|
424
|
+
(SAM flag 0x900 = 0). The SAM specification requires one and only
|
|
425
|
+
one of those for each read.
|
|
426
|
+
|
|
427
|
+
Yields:
|
|
428
|
+
2-tuples with each pair of alignments or, if bundle==True, each bundled
|
|
429
|
+
list of alignments.
|
|
430
|
+
"""
|
|
431
|
+
|
|
432
|
+
mate_missing_count = [0]
|
|
433
|
+
|
|
434
|
+
def process_list(almnt_list):
|
|
435
|
+
"""Transform a list of alignment with the same read name into pairs
|
|
436
|
+
|
|
437
|
+
Args:
|
|
438
|
+
almnt_list (list): alignments to process
|
|
439
|
+
|
|
440
|
+
Yields:
|
|
441
|
+
each pair of alignments.
|
|
442
|
+
|
|
443
|
+
This function is needed because each line of a BAM file is not a read
|
|
444
|
+
but an alignment. For uniquely mapped and unmapped reads, those two are
|
|
445
|
+
the same. For multimapped reads, however, there can be more than one
|
|
446
|
+
alignment for each read. Also, it is normal for a mapper to uniquely
|
|
447
|
+
map one read and multimap its mate.
|
|
448
|
+
|
|
449
|
+
This function goes down the list of alignments for a given read name
|
|
450
|
+
and tries to find the first mate. So if read 1 is uniquely mapped but
|
|
451
|
+
read 2 is mapped 4 times, only (read 1, read 2 - first occurrence) will
|
|
452
|
+
yield; the other 3 alignments of read 2 are ignored.
|
|
453
|
+
"""
|
|
454
|
+
|
|
455
|
+
while len(almnt_list) > 0:
|
|
456
|
+
a1 = almnt_list.pop(0)
|
|
457
|
+
# Find its mate
|
|
458
|
+
for a2 in almnt_list:
|
|
459
|
+
if a1.pe_which == a2.pe_which:
|
|
460
|
+
continue
|
|
461
|
+
if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
|
|
462
|
+
continue
|
|
463
|
+
if not (a1.aligned and a2.aligned):
|
|
464
|
+
break
|
|
465
|
+
if (
|
|
466
|
+
a1.iv.chrom == a2.mate_start.chrom
|
|
467
|
+
and a1.iv.start == a2.mate_start.pos
|
|
468
|
+
and a2.iv.chrom == a1.mate_start.chrom
|
|
469
|
+
and a2.iv.start == a1.mate_start.pos
|
|
470
|
+
):
|
|
471
|
+
break
|
|
472
|
+
else:
|
|
473
|
+
if a1.mate_aligned:
|
|
474
|
+
mate_missing_count[0] += 1
|
|
475
|
+
if mate_missing_count[0] == 1:
|
|
476
|
+
warnings.warn(
|
|
477
|
+
"Read "
|
|
478
|
+
+ a1.read.name
|
|
479
|
+
+ " claims to have an aligned mate "
|
|
480
|
+
+ "which could not be found in an adjacent line."
|
|
481
|
+
)
|
|
482
|
+
a2 = None
|
|
483
|
+
if a2 is not None:
|
|
484
|
+
almnt_list.remove(a2)
|
|
485
|
+
if a1.pe_which == "first":
|
|
486
|
+
yield (a1, a2)
|
|
487
|
+
else:
|
|
488
|
+
assert a1.pe_which == "second"
|
|
489
|
+
yield (a2, a1)
|
|
490
|
+
|
|
491
|
+
almnt_list = []
|
|
492
|
+
current_name = None
|
|
493
|
+
for almnt in alignments:
|
|
494
|
+
if not almnt.paired_end:
|
|
495
|
+
raise ValueError(
|
|
496
|
+
"'pair_alignments' needs a sequence of paired-end alignments"
|
|
497
|
+
)
|
|
498
|
+
if almnt.pe_which == "unknown":
|
|
499
|
+
raise ValueError("Paired-end read found with 'unknown' 'pe_which' status.")
|
|
500
|
+
|
|
501
|
+
# FIXME: almnt.not_primary_alignment currently means secondary
|
|
502
|
+
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
|
|
503
|
+
continue
|
|
504
|
+
|
|
505
|
+
if almnt.read.name == current_name:
|
|
506
|
+
almnt_list.append(almnt)
|
|
507
|
+
else:
|
|
508
|
+
if bundle:
|
|
509
|
+
yield list(process_list(almnt_list))
|
|
510
|
+
else:
|
|
511
|
+
for p in process_list(almnt_list):
|
|
512
|
+
yield p
|
|
513
|
+
current_name = almnt.read.name
|
|
514
|
+
almnt_list = [almnt]
|
|
515
|
+
if bundle:
|
|
516
|
+
yield list(process_list(almnt_list))
|
|
517
|
+
else:
|
|
518
|
+
for p in process_list(almnt_list):
|
|
519
|
+
yield p
|
|
520
|
+
if mate_missing_count[0] > 1:
|
|
521
|
+
warnings.warn("%d reads with missing mate encountered." % mate_missing_count[0])
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
def pair_SAM_alignments_with_buffer(
|
|
525
|
+
alignments, max_buffer_size=30000000, primary_only=False
|
|
526
|
+
):
|
|
527
|
+
"""Iterate over SAM aligments with buffer, position-sorted paired-end
|
|
528
|
+
|
|
529
|
+
Args:
|
|
530
|
+
alignments (iterator of SAM/BAM alignments): the alignments to wrap
|
|
531
|
+
max_buffer_size (int): maxmal numer of alignments to keep in memory.
|
|
532
|
+
primary_only (bool): for each read, consider only the primary line
|
|
533
|
+
(SAM flag 0x900 = 0). The SAM specification requires one and only
|
|
534
|
+
one of those for each read.
|
|
535
|
+
|
|
536
|
+
Yields:
|
|
537
|
+
2-tuples with each pair of alignments.
|
|
538
|
+
"""
|
|
539
|
+
|
|
540
|
+
almnt_buffer = {}
|
|
541
|
+
ambiguous_pairing_counter = 0
|
|
542
|
+
for almnt in alignments:
|
|
543
|
+
if not almnt.paired_end:
|
|
544
|
+
raise ValueError(
|
|
545
|
+
"Sequence of paired-end alignments expected, but got single-end alignment."
|
|
546
|
+
)
|
|
547
|
+
if almnt.pe_which == "unknown":
|
|
548
|
+
raise ValueError(
|
|
549
|
+
"Cannot process paired-end alignment found with 'unknown' 'pe_which' status."
|
|
550
|
+
)
|
|
551
|
+
# FIXME: almnt.not_primary_alignment currently means secondary
|
|
552
|
+
if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
|
|
553
|
+
continue
|
|
554
|
+
|
|
555
|
+
matekey = (
|
|
556
|
+
almnt.read.name,
|
|
557
|
+
"second" if almnt.pe_which == "first" else "first",
|
|
558
|
+
almnt.mate_start.chrom if almnt.mate_aligned else None,
|
|
559
|
+
almnt.mate_start.pos if almnt.mate_aligned else None,
|
|
560
|
+
almnt.iv.chrom if almnt.aligned else None,
|
|
561
|
+
almnt.iv.start if almnt.aligned else None,
|
|
562
|
+
-almnt.inferred_insert_size
|
|
563
|
+
if almnt.aligned and almnt.mate_aligned
|
|
564
|
+
else None,
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
if matekey in almnt_buffer:
|
|
568
|
+
if len(almnt_buffer[matekey]) == 1:
|
|
569
|
+
mate = almnt_buffer[matekey][0]
|
|
570
|
+
del almnt_buffer[matekey]
|
|
571
|
+
else:
|
|
572
|
+
mate = almnt_buffer[matekey].pop(0)
|
|
573
|
+
if ambiguous_pairing_counter == 0:
|
|
574
|
+
ambiguous_pairing_first_occurance = matekey
|
|
575
|
+
ambiguous_pairing_counter += 1
|
|
576
|
+
if almnt.pe_which == "first":
|
|
577
|
+
yield (almnt, mate)
|
|
578
|
+
else:
|
|
579
|
+
yield (mate, almnt)
|
|
580
|
+
else:
|
|
581
|
+
almntkey = (
|
|
582
|
+
almnt.read.name,
|
|
583
|
+
almnt.pe_which,
|
|
584
|
+
almnt.iv.chrom if almnt.aligned else None,
|
|
585
|
+
almnt.iv.start if almnt.aligned else None,
|
|
586
|
+
almnt.mate_start.chrom if almnt.mate_aligned else None,
|
|
587
|
+
almnt.mate_start.pos if almnt.mate_aligned else None,
|
|
588
|
+
almnt.inferred_insert_size
|
|
589
|
+
if almnt.aligned and almnt.mate_aligned
|
|
590
|
+
else None,
|
|
591
|
+
)
|
|
592
|
+
if almntkey not in almnt_buffer:
|
|
593
|
+
almnt_buffer[almntkey] = [almnt]
|
|
594
|
+
else:
|
|
595
|
+
almnt_buffer[almntkey].append(almnt)
|
|
596
|
+
if len(almnt_buffer) > max_buffer_size:
|
|
597
|
+
raise ValueError(
|
|
598
|
+
"Maximum alignment buffer size exceeded while pairing SAM alignments."
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
if len(almnt_buffer) > 0:
|
|
602
|
+
warnings.warn(
|
|
603
|
+
"Mate records missing for %d records; first such record: %s."
|
|
604
|
+
% (len(almnt_buffer), str(list(almnt_buffer.values())[0][0]))
|
|
605
|
+
)
|
|
606
|
+
for almnt_list in list(almnt_buffer.values()):
|
|
607
|
+
for almnt in almnt_list:
|
|
608
|
+
if almnt.pe_which == "first":
|
|
609
|
+
yield (almnt, None)
|
|
610
|
+
else:
|
|
611
|
+
yield (None, almnt)
|
|
612
|
+
|
|
613
|
+
if ambiguous_pairing_counter > 0:
|
|
614
|
+
warnings.warn(
|
|
615
|
+
"Mate pairing was ambiguous for %d records; mate key for first such record: %s."
|
|
616
|
+
% (ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance))
|
|
617
|
+
)
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
###########################
|
|
621
|
+
# variant calls
|
|
622
|
+
###########################
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
_re_vcf_meta_comment = re.compile("^##([a-zA-Z]+)\=(.*)$")
|
|
626
|
+
|
|
627
|
+
_re_vcf_meta_descr = re.compile(
|
|
628
|
+
'ID=[^,]+,?|Number=[^,]+,?|Type=[^,]+,?|Description="[^"]+",?'
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
_re_vcf_meta_types = re.compile("[INFO|FILTER|FORMAT]")
|
|
632
|
+
|
|
633
|
+
_vcf_typemap = {"Integer": int, "Float": float, "String": str, "Flag": bool}
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
class VariantCall:
|
|
637
|
+
"""Class representing a variant call, close to VCF format"""
|
|
638
|
+
|
|
639
|
+
def __init__(
|
|
640
|
+
self,
|
|
641
|
+
chrom=None,
|
|
642
|
+
pos=None,
|
|
643
|
+
identifier=None,
|
|
644
|
+
ref=None,
|
|
645
|
+
alt=None,
|
|
646
|
+
qual=None,
|
|
647
|
+
filtr=None,
|
|
648
|
+
info=None,
|
|
649
|
+
):
|
|
650
|
+
"""Class representing a variant call.
|
|
651
|
+
|
|
652
|
+
Arguments:
|
|
653
|
+
chrom (str): Chromosome
|
|
654
|
+
pos (int): Position on the chromosome
|
|
655
|
+
identifier (str): ID of the variant
|
|
656
|
+
ref (str): Reference allele
|
|
657
|
+
alt (str): Alternate allele
|
|
658
|
+
qual (str): Quality of the variant
|
|
659
|
+
filtr (str): Filter flag indicating if the variant passed QC.
|
|
660
|
+
info (str): Additional info on the variant
|
|
661
|
+
"""
|
|
662
|
+
self.chrom = chrom
|
|
663
|
+
self.pos = pos
|
|
664
|
+
self.id = identifier
|
|
665
|
+
self.ref = ref
|
|
666
|
+
self.alt = alt
|
|
667
|
+
self.qual = qual
|
|
668
|
+
self.filter = filtr
|
|
669
|
+
self.info = info
|
|
670
|
+
self._original_line = None
|
|
671
|
+
|
|
672
|
+
@classmethod
|
|
673
|
+
def fromdict(cls, dictionary):
|
|
674
|
+
"""Create a VariantCall instance from a dict of properties"""
|
|
675
|
+
ret = cls()
|
|
676
|
+
ret.chrom = dictionary["chrom"]
|
|
677
|
+
ret.pos = dictionary["pos"]
|
|
678
|
+
ret.id = dictionary["id"]
|
|
679
|
+
ret.ref = dictionary["ref"]
|
|
680
|
+
ret.alt = dictionary["alt"]
|
|
681
|
+
ret.qual = dictionary["qual"]
|
|
682
|
+
ret.filter = dictionary["filter"]
|
|
683
|
+
ret.info = dictionary["info"]
|
|
684
|
+
ret._original_line = None
|
|
685
|
+
|
|
686
|
+
@classmethod
|
|
687
|
+
def fromline(cls, line, nsamples=0, sampleids=[]):
|
|
688
|
+
"""Create a VariantCall instance from a VCF line"""
|
|
689
|
+
ret = cls()
|
|
690
|
+
if nsamples == 0:
|
|
691
|
+
ret.format = None
|
|
692
|
+
(
|
|
693
|
+
ret.chrom,
|
|
694
|
+
ret.pos,
|
|
695
|
+
ret.id,
|
|
696
|
+
ret.ref,
|
|
697
|
+
ret.alt,
|
|
698
|
+
ret.qual,
|
|
699
|
+
ret.filter,
|
|
700
|
+
ret.info,
|
|
701
|
+
) = line.rstrip("\n").split("\t", 7)
|
|
702
|
+
else:
|
|
703
|
+
lsplit = line.rstrip("\n").split("\t")
|
|
704
|
+
(
|
|
705
|
+
ret.chrom,
|
|
706
|
+
ret.pos,
|
|
707
|
+
ret.id,
|
|
708
|
+
ret.ref,
|
|
709
|
+
ret.alt,
|
|
710
|
+
ret.qual,
|
|
711
|
+
ret.filter,
|
|
712
|
+
ret.info,
|
|
713
|
+
) = lsplit[:8]
|
|
714
|
+
ret.format = lsplit[8].split(":")
|
|
715
|
+
ret.samples = {}
|
|
716
|
+
spos = 9
|
|
717
|
+
for sid in sampleids:
|
|
718
|
+
ret.samples[sid] = dict(
|
|
719
|
+
(name, value)
|
|
720
|
+
for (name, value) in zip(ret.format, lsplit[spos].split(":"))
|
|
721
|
+
)
|
|
722
|
+
spos += 1
|
|
723
|
+
ret.pos = GenomicPosition(ret.chrom, int(ret.pos))
|
|
724
|
+
ret.alt = ret.alt.split(",")
|
|
725
|
+
ret._original_line = line
|
|
726
|
+
return ret
|
|
727
|
+
|
|
728
|
+
def infoline(self):
|
|
729
|
+
if self.info.__class__ == dict:
|
|
730
|
+
return ";".join(
|
|
731
|
+
map((lambda key: str(key) + "=" + str(self.info[key])), self.info)
|
|
732
|
+
)
|
|
733
|
+
else:
|
|
734
|
+
return self.info
|
|
735
|
+
|
|
736
|
+
def get_original_line(self):
|
|
737
|
+
warnings.warn(
|
|
738
|
+
"Original line is empty, probably this object was created from scratch and not from a line in a .vcf file!"
|
|
739
|
+
)
|
|
740
|
+
return self._original_line
|
|
741
|
+
|
|
742
|
+
def sampleline(self):
|
|
743
|
+
if self.format == None:
|
|
744
|
+
sys.stderr.write("No samples in this variant call!\n")
|
|
745
|
+
return ""
|
|
746
|
+
keys = self.format
|
|
747
|
+
ret = [":".join(keys)]
|
|
748
|
+
for sid in self.samples:
|
|
749
|
+
tmp = []
|
|
750
|
+
for k in keys:
|
|
751
|
+
if k in self.samples[sid]:
|
|
752
|
+
tmp.append(self.samples[sid][k])
|
|
753
|
+
ret.append(":".join(tmp))
|
|
754
|
+
return "\t".join(ret)
|
|
755
|
+
|
|
756
|
+
def to_line(self):
|
|
757
|
+
"""Convert into a VCF line"""
|
|
758
|
+
if self.format == None:
|
|
759
|
+
return (
|
|
760
|
+
"\t".join(
|
|
761
|
+
map(
|
|
762
|
+
str,
|
|
763
|
+
[
|
|
764
|
+
self.pos.chrom,
|
|
765
|
+
self.pos.pos,
|
|
766
|
+
self.id,
|
|
767
|
+
self.ref,
|
|
768
|
+
",".join(self.alt),
|
|
769
|
+
self.qual,
|
|
770
|
+
self.filter,
|
|
771
|
+
self.infoline(),
|
|
772
|
+
],
|
|
773
|
+
)
|
|
774
|
+
)
|
|
775
|
+
+ "\n"
|
|
776
|
+
)
|
|
777
|
+
else:
|
|
778
|
+
return (
|
|
779
|
+
"\t".join(
|
|
780
|
+
map(
|
|
781
|
+
str,
|
|
782
|
+
[
|
|
783
|
+
self.pos.chrom,
|
|
784
|
+
self.pos.pos,
|
|
785
|
+
self.id,
|
|
786
|
+
self.ref,
|
|
787
|
+
",".join(self.alt),
|
|
788
|
+
self.qual,
|
|
789
|
+
self.filter,
|
|
790
|
+
self.infoline(),
|
|
791
|
+
self.sampleline(),
|
|
792
|
+
],
|
|
793
|
+
)
|
|
794
|
+
)
|
|
795
|
+
+ "\n"
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
def __descr__(self):
|
|
799
|
+
return "<VariantCall at %s, ref '%s', alt %s >" % (
|
|
800
|
+
str(self.pos).rstrip("/."),
|
|
801
|
+
self.ref,
|
|
802
|
+
str(self.alt).strip("[]"),
|
|
803
|
+
)
|
|
804
|
+
|
|
805
|
+
def __str__(self):
|
|
806
|
+
return "%s:'%s'->%s" % (
|
|
807
|
+
str(self.pos).rstrip("/."),
|
|
808
|
+
self.ref,
|
|
809
|
+
str(self.alt).strip("[]"),
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
def unpack_info(self, infodict):
|
|
813
|
+
tmp = {}
|
|
814
|
+
for token in self.info.strip(";").split(";"):
|
|
815
|
+
if re.compile("=").search(token):
|
|
816
|
+
token = token.split("=")
|
|
817
|
+
if token[0] in infodict:
|
|
818
|
+
tmp[token[0]] = list(map(infodict[token[0]], token[1].split(",")))
|
|
819
|
+
else:
|
|
820
|
+
tmp[token[0]] = token[1].split(",")
|
|
821
|
+
if len(tmp[token[0]]) == 1:
|
|
822
|
+
tmp[token[0]] = tmp[token[0]][0]
|
|
823
|
+
else: # Flag attribute found
|
|
824
|
+
tmp[token] = True
|
|
825
|
+
diff = set(infodict.keys()).difference(set(tmp.keys()))
|
|
826
|
+
for key in diff:
|
|
827
|
+
if infodict[key] == bool:
|
|
828
|
+
tmp[key] = False
|
|
829
|
+
self.info = tmp
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
class VCF_Reader(FileOrSequence):
|
|
833
|
+
"""Reader for VCF files.
|
|
834
|
+
|
|
835
|
+
This class parses text VCF files from scratch, independently of pysam.
|
|
836
|
+
"""
|
|
837
|
+
|
|
838
|
+
def __init__(self, filename_or_sequence):
|
|
839
|
+
FileOrSequence.__init__(self, filename_or_sequence)
|
|
840
|
+
self.metadata = {}
|
|
841
|
+
self.info = {}
|
|
842
|
+
self.filters = {}
|
|
843
|
+
self.formats = {}
|
|
844
|
+
self.nsamples = 0
|
|
845
|
+
self.sampleids = []
|
|
846
|
+
|
|
847
|
+
def make_info_dict(self):
|
|
848
|
+
self.infodict = {}
|
|
849
|
+
for key in self.info.keys():
|
|
850
|
+
self.infodict[key] = _vcf_typemap[self.info[key]["Type"]]
|
|
851
|
+
|
|
852
|
+
def parse_meta(self, header_filename=None):
|
|
853
|
+
if header_filename is None:
|
|
854
|
+
the_iter = FileOrSequence.__iter__(self)
|
|
855
|
+
else:
|
|
856
|
+
the_iter = open(header_filename, "r")
|
|
857
|
+
|
|
858
|
+
for line in the_iter:
|
|
859
|
+
if line.startswith("#"):
|
|
860
|
+
if line.startswith("##"):
|
|
861
|
+
mo = _re_vcf_meta_comment.match(line)
|
|
862
|
+
if mo:
|
|
863
|
+
value = mo.group(2)
|
|
864
|
+
if mo.group(1) == "INFO":
|
|
865
|
+
value = dict(
|
|
866
|
+
e.rstrip(",").split("=", 1)
|
|
867
|
+
for e in _re_vcf_meta_descr.findall(value)
|
|
868
|
+
)
|
|
869
|
+
key = value["ID"]
|
|
870
|
+
del value["ID"]
|
|
871
|
+
self.info[key] = value
|
|
872
|
+
elif mo.group(1) == "FILTER":
|
|
873
|
+
value = dict(
|
|
874
|
+
e.rstrip(",").split("=", 1)
|
|
875
|
+
for e in _re_vcf_meta_descr.findall(value)
|
|
876
|
+
)
|
|
877
|
+
key = value["ID"]
|
|
878
|
+
del value["ID"]
|
|
879
|
+
self.filters[key] = value
|
|
880
|
+
elif mo.group(1) == "FORMAT":
|
|
881
|
+
value = dict(
|
|
882
|
+
e.rstrip(",").split("=", 1)
|
|
883
|
+
for e in _re_vcf_meta_descr.findall(value)
|
|
884
|
+
)
|
|
885
|
+
key = value["ID"]
|
|
886
|
+
del value["ID"]
|
|
887
|
+
self.formats[key] = value
|
|
888
|
+
else:
|
|
889
|
+
self.metadata[mo.group(1)] = mo.group(2)
|
|
890
|
+
else:
|
|
891
|
+
self.sampleids = line.rstrip("\t\n").split("\t")[9:]
|
|
892
|
+
self.nsamples = len(self.sampleids)
|
|
893
|
+
continue
|
|
894
|
+
else:
|
|
895
|
+
break
|
|
896
|
+
|
|
897
|
+
def meta_info(self, header_filename=None):
|
|
898
|
+
ret = []
|
|
899
|
+
if header_filename is None:
|
|
900
|
+
the_iter = FileOrSequence.__iter__(self)
|
|
901
|
+
else:
|
|
902
|
+
the_iter = open(header_filename, "r")
|
|
903
|
+
|
|
904
|
+
for line in the_iter:
|
|
905
|
+
if line.startswith("#"):
|
|
906
|
+
ret.append(line)
|
|
907
|
+
else:
|
|
908
|
+
break
|
|
909
|
+
return ret
|
|
910
|
+
|
|
911
|
+
def __iter__(self):
|
|
912
|
+
for line in FileOrSequence.__iter__(self):
|
|
913
|
+
if line == "\n" or line.startswith("#"):
|
|
914
|
+
continue
|
|
915
|
+
vc = VariantCall.fromline(line, self.nsamples, self.sampleids)
|
|
916
|
+
yield vc
|
|
917
|
+
|
|
918
|
+
|
|
919
|
+
class WiggleReader(FileOrSequence):
|
|
920
|
+
def __init__(self, filename_or_sequence, verbose=True):
|
|
921
|
+
FileOrSequence.__init__(self, filename_or_sequence)
|
|
922
|
+
self.attributes = {}
|
|
923
|
+
self.stepType = "none"
|
|
924
|
+
self.verbose = verbose
|
|
925
|
+
|
|
926
|
+
def __iter__(self):
|
|
927
|
+
span = 1
|
|
928
|
+
pos = None
|
|
929
|
+
step = None
|
|
930
|
+
chrom = None
|
|
931
|
+
for line in FileOrSequence.__iter__(self):
|
|
932
|
+
if line.startswith("track"):
|
|
933
|
+
fields = shlex.split(line)[1:]
|
|
934
|
+
self.attributes = dict(
|
|
935
|
+
[(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]]
|
|
936
|
+
)
|
|
937
|
+
elif line.startswith("fixedStep"): # do fixed step stuff
|
|
938
|
+
self.stepType = "fixed"
|
|
939
|
+
fields = shlex.split(line)[1:]
|
|
940
|
+
declarations = dict(
|
|
941
|
+
[(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]]
|
|
942
|
+
)
|
|
943
|
+
pos = int(declarations["start"])
|
|
944
|
+
step = int(declarations["step"])
|
|
945
|
+
chrom = declarations["chrom"]
|
|
946
|
+
if "span" in declarations:
|
|
947
|
+
span = int(declarations["span"])
|
|
948
|
+
else:
|
|
949
|
+
span = 1
|
|
950
|
+
elif line.startswith("variableStep"): # do variable step stuff
|
|
951
|
+
self.stepType = "variable"
|
|
952
|
+
fields = shlex.split(line)[1:]
|
|
953
|
+
declarations = dict(
|
|
954
|
+
[(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]]
|
|
955
|
+
)
|
|
956
|
+
chrom = declarations["chrom"]
|
|
957
|
+
if "span" in declarations:
|
|
958
|
+
span = int(declarations["span"])
|
|
959
|
+
else:
|
|
960
|
+
span = 1
|
|
961
|
+
elif line.startswith("browser") or line.startswith(
|
|
962
|
+
"#"
|
|
963
|
+
): # Comment or ignored
|
|
964
|
+
if self.verbose:
|
|
965
|
+
print("Ignored line:", line)
|
|
966
|
+
continue
|
|
967
|
+
else:
|
|
968
|
+
if self.stepType == "fixed":
|
|
969
|
+
yield (
|
|
970
|
+
GenomicInterval(chrom, pos, pos + span, "."),
|
|
971
|
+
float(line.strip()),
|
|
972
|
+
)
|
|
973
|
+
pos += step
|
|
974
|
+
elif self.stepType == "variable":
|
|
975
|
+
tmp = line.strip().split(" ")
|
|
976
|
+
pos = int(tmp[0])
|
|
977
|
+
yield (GenomicInterval(chrom, pos, pos + span, "."), float(tmp[1]))
|
|
978
|
+
|
|
979
|
+
|
|
980
|
+
class BAM_Reader:
|
|
981
|
+
"""Parser for SAM/BAM/CRAM files.
|
|
982
|
+
|
|
983
|
+
This is a thin wrapper on top of pysam.AlignmentFile. It detects
|
|
984
|
+
automatically whether the input file is text (SAM) or binary (BAM/CRAM) via
|
|
985
|
+
the HTSlib library.
|
|
986
|
+
"""
|
|
987
|
+
|
|
988
|
+
def __init__(self, filename, check_sq=True):
|
|
989
|
+
"""Parser for SAM/BAM/CRAM files, a thin layer over pysam.AlignmentFile.
|
|
990
|
+
|
|
991
|
+
Arguments:
|
|
992
|
+
filename (str, Path): The path to the input file to read
|
|
993
|
+
check_sq (bool): check if SQ entries are present in header
|
|
994
|
+
"""
|
|
995
|
+
|
|
996
|
+
global pysam
|
|
997
|
+
self.filename = filename
|
|
998
|
+
self.sf = None
|
|
999
|
+
self.record_no = -1
|
|
1000
|
+
self.check_sq = check_sq
|
|
1001
|
+
try:
|
|
1002
|
+
import pysam
|
|
1003
|
+
except ImportError:
|
|
1004
|
+
sys.stderr.write("Please install pysam to use the BAM_Reader class")
|
|
1005
|
+
raise
|
|
1006
|
+
self._open_file()
|
|
1007
|
+
|
|
1008
|
+
def _open_file(self):
|
|
1009
|
+
self.sf = pysam.AlignmentFile(
|
|
1010
|
+
self.filename,
|
|
1011
|
+
check_sq=self.check_sq,
|
|
1012
|
+
)
|
|
1013
|
+
|
|
1014
|
+
def close(self):
|
|
1015
|
+
"""Close the BAM file for clean up"""
|
|
1016
|
+
self.sf.close()
|
|
1017
|
+
|
|
1018
|
+
def __enter__(self):
|
|
1019
|
+
return self
|
|
1020
|
+
|
|
1021
|
+
def __exit__(self, type, value, traceback):
|
|
1022
|
+
self.close()
|
|
1023
|
+
|
|
1024
|
+
def __iter__(self):
|
|
1025
|
+
self.record_no = 0
|
|
1026
|
+
for pa in self.sf:
|
|
1027
|
+
yield SAM_Alignment.from_pysam_AlignedSegment(pa, self.sf)
|
|
1028
|
+
self.record_no += 1
|
|
1029
|
+
|
|
1030
|
+
def fetch(self, reference=None, start=None, end=None, region=None):
|
|
1031
|
+
self.record_no = 0
|
|
1032
|
+
try:
|
|
1033
|
+
for pa in self.sf.fetch(reference, start, end, region):
|
|
1034
|
+
yield SAM_Alignment.from_pysam_AlignedRead(pa, self.sf)
|
|
1035
|
+
self.record_no += 1
|
|
1036
|
+
except ValueError as e:
|
|
1037
|
+
if e.message == "fetch called on bamfile without index":
|
|
1038
|
+
print("Error: ", e.message)
|
|
1039
|
+
print(
|
|
1040
|
+
"Your bam index file is missing or wrongly named, convention is that file 'x.bam' has index file 'x.bam.bai'!"
|
|
1041
|
+
)
|
|
1042
|
+
else:
|
|
1043
|
+
raise
|
|
1044
|
+
except:
|
|
1045
|
+
raise
|
|
1046
|
+
|
|
1047
|
+
def get_line_number_string(self):
|
|
1048
|
+
if self.record_no == -1:
|
|
1049
|
+
return "unopened file %s" % (self.filename)
|
|
1050
|
+
else:
|
|
1051
|
+
return "record #%d in file %s" % (self.record_no, self.filename)
|
|
1052
|
+
|
|
1053
|
+
def __getitem__(self, iv):
|
|
1054
|
+
if not isinstance(iv, GenomicInterval):
|
|
1055
|
+
raise TypeError(
|
|
1056
|
+
"Use a HTSeq.GenomicInterval to access regions within .bam-file!"
|
|
1057
|
+
)
|
|
1058
|
+
if self.sf is None:
|
|
1059
|
+
self._open_file()
|
|
1060
|
+
|
|
1061
|
+
if (hasattr(self.sf, "_hasIndex") and (not self.sf._hasIndex())) or (
|
|
1062
|
+
not self.sf.has_index()
|
|
1063
|
+
):
|
|
1064
|
+
raise ValueError("The .bam-file has no index, random-access is disabled!")
|
|
1065
|
+
for pa in self.sf.fetch(iv.chrom, iv.start + 1, iv.end):
|
|
1066
|
+
yield SAM_Alignment.from_pysam_AlignedRead(pa, self.sf)
|
|
1067
|
+
|
|
1068
|
+
def get_header_dict(self):
|
|
1069
|
+
return self.sf.header
|
|
1070
|
+
|
|
1071
|
+
def get_template(self):
|
|
1072
|
+
return self.sf
|
|
1073
|
+
|
|
1074
|
+
|
|
1075
|
+
# NOTE: this will be deprecated
|
|
1076
|
+
SAM_Reader = BAM_Reader
|
|
1077
|
+
|
|
1078
|
+
|
|
1079
|
+
class BAM_Writer:
|
|
1080
|
+
"""Writer for SAM/BAM/CRAM files, a thin layer over pysam.AlignmentFile"""
|
|
1081
|
+
|
|
1082
|
+
def __init__(
|
|
1083
|
+
self,
|
|
1084
|
+
filename,
|
|
1085
|
+
template=None,
|
|
1086
|
+
referencenames=None,
|
|
1087
|
+
referencelengths=None,
|
|
1088
|
+
text=None,
|
|
1089
|
+
header=None,
|
|
1090
|
+
):
|
|
1091
|
+
try:
|
|
1092
|
+
import pysam
|
|
1093
|
+
except ImportError:
|
|
1094
|
+
sys.stderr.write("Please Install pysam to use the BAM_Writer Class")
|
|
1095
|
+
raise
|
|
1096
|
+
|
|
1097
|
+
self.filename = filename
|
|
1098
|
+
self.template = template
|
|
1099
|
+
self.referencenames = referencenames
|
|
1100
|
+
self.referencelengths = referencelengths
|
|
1101
|
+
self.text = text
|
|
1102
|
+
self.header = header
|
|
1103
|
+
self.sf = pysam.AlignmentFile(
|
|
1104
|
+
self.filename,
|
|
1105
|
+
mode="wb",
|
|
1106
|
+
template=self.template,
|
|
1107
|
+
referencenames=self.referencenames,
|
|
1108
|
+
referencelengths=self.referencelengths,
|
|
1109
|
+
text=self.text,
|
|
1110
|
+
header=self.header,
|
|
1111
|
+
)
|
|
1112
|
+
|
|
1113
|
+
@classmethod
|
|
1114
|
+
def from_BAM_Reader(cls, fn, br):
|
|
1115
|
+
return BAM_Writer(filename=fn, header=br.get_header_dict())
|
|
1116
|
+
|
|
1117
|
+
def write(self, alnmt):
|
|
1118
|
+
self.sf.write(alnmt.to_pysam_AlignedSegment(self.sf))
|
|
1119
|
+
|
|
1120
|
+
def close(self):
|
|
1121
|
+
self.sf.close()
|
|
1122
|
+
|
|
1123
|
+
def __enter__(self):
|
|
1124
|
+
return self
|
|
1125
|
+
|
|
1126
|
+
def __exit__(self, type, value, traceback):
|
|
1127
|
+
self.close()
|
|
1128
|
+
|
|
1129
|
+
|
|
1130
|
+
class BED_Reader(FileOrSequence):
|
|
1131
|
+
"""Reader for BED files.
|
|
1132
|
+
|
|
1133
|
+
This class simply parses the BED as a text file and converts the various
|
|
1134
|
+
columns into HTSeq objects. For each row it extracts:
|
|
1135
|
+
|
|
1136
|
+
- a GenomicInterval with chromosome equal to the first column, start and
|
|
1137
|
+
end to the second and third columns, and strandedness equal to the sixth
|
|
1138
|
+
column;
|
|
1139
|
+
|
|
1140
|
+
- a GenomicFeature with name equal to the fourth column (or "unnamed"),
|
|
1141
|
+
type set to "BED line", score equal to the fifth column and interval set
|
|
1142
|
+
as the GenomicInterval above.
|
|
1143
|
+
|
|
1144
|
+
- If the "thick" start and end are provided in the BED file as seventh and
|
|
1145
|
+
eight columns, they are stored as a GenomicInterval in the "thick"
|
|
1146
|
+
attribute of the GenomicFeature above (i.e. feature.thick).
|
|
1147
|
+
|
|
1148
|
+
- If the itemRgb color line is provided in the BED file as ninth column, it
|
|
1149
|
+
is stored as "itemRgb" attribute in the GenomicFeature above (i.e.
|
|
1150
|
+
feature.itemRgb).
|
|
1151
|
+
|
|
1152
|
+
- The blockCount, blockStart, blockSizes columns (tenth to twelth) are
|
|
1153
|
+
currently ignored, this might change in the future.
|
|
1154
|
+
|
|
1155
|
+
Rows starting with "track" are skipped.
|
|
1156
|
+
"""
|
|
1157
|
+
|
|
1158
|
+
def __init__(self, filename_or_sequence):
|
|
1159
|
+
FileOrSequence.__init__(self, filename_or_sequence)
|
|
1160
|
+
|
|
1161
|
+
def __iter__(self):
|
|
1162
|
+
for line in FileOrSequence.__iter__(self):
|
|
1163
|
+
if line.startswith("track"):
|
|
1164
|
+
continue
|
|
1165
|
+
fields = line.split()
|
|
1166
|
+
if len(fields) < 3:
|
|
1167
|
+
raise ValueError("BED file line contains less than 3 fields")
|
|
1168
|
+
if len(fields) > 12:
|
|
1169
|
+
raise ValueError("BED file line contains more than 12 fields")
|
|
1170
|
+
iv = GenomicInterval(
|
|
1171
|
+
fields[0],
|
|
1172
|
+
int(fields[1]),
|
|
1173
|
+
int(fields[2]),
|
|
1174
|
+
fields[5] if len(fields) > 5 else ".",
|
|
1175
|
+
)
|
|
1176
|
+
f = GenomicFeature(
|
|
1177
|
+
fields[3] if len(fields) > 3 else "unnamed", "BED line", iv
|
|
1178
|
+
)
|
|
1179
|
+
f.score = float(fields[4]) if len(fields) > 4 else None
|
|
1180
|
+
f.thick = (
|
|
1181
|
+
GenomicInterval(iv.chrom, int(fields[6]), int(fields[7]), iv.strand)
|
|
1182
|
+
if len(fields) > 7
|
|
1183
|
+
else None
|
|
1184
|
+
)
|
|
1185
|
+
f.itemRgb = (
|
|
1186
|
+
[int(a) for a in fields[8].split(",")] if len(fields) > 8 else None
|
|
1187
|
+
)
|
|
1188
|
+
yield (f)
|
|
1189
|
+
|
|
1190
|
+
|
|
1191
|
+
class BigWig_Reader:
|
|
1192
|
+
"""A simple reader for BigWig files (using pyBigWig)"""
|
|
1193
|
+
|
|
1194
|
+
def __init__(self, filename):
|
|
1195
|
+
"""Parser for BigWig files, a thin layer over pyBigWig.
|
|
1196
|
+
|
|
1197
|
+
Arguments:
|
|
1198
|
+
filename (str, Path): The path to the input file to read
|
|
1199
|
+
"""
|
|
1200
|
+
global pyBigWig
|
|
1201
|
+
|
|
1202
|
+
try:
|
|
1203
|
+
import pyBigWig
|
|
1204
|
+
except ImportError:
|
|
1205
|
+
sys.stderr.write("Please Install pyBigWig to use the BigWig_Reader Class")
|
|
1206
|
+
raise
|
|
1207
|
+
|
|
1208
|
+
self.filename = filename
|
|
1209
|
+
self.sf = pyBigWig.open(filename)
|
|
1210
|
+
|
|
1211
|
+
def close(self):
|
|
1212
|
+
self.sf.close()
|
|
1213
|
+
|
|
1214
|
+
def __enter__(self):
|
|
1215
|
+
return self
|
|
1216
|
+
|
|
1217
|
+
def __exit__(self, type, value, traceback):
|
|
1218
|
+
self.close()
|
|
1219
|
+
|
|
1220
|
+
def chroms(self):
|
|
1221
|
+
"""Return the list of chromosomes and their lengths, as a dictionary.
|
|
1222
|
+
|
|
1223
|
+
Example:
|
|
1224
|
+
|
|
1225
|
+
bw.chroms() -> {'chr1': 4568999, 'chr2': 87422, ...}
|
|
1226
|
+
"""
|
|
1227
|
+
return self.sf.chroms()
|
|
1228
|
+
|
|
1229
|
+
def intervals(self, chrom, strand=".", raw=False):
|
|
1230
|
+
"""Lazy iterator over genomic intervals
|
|
1231
|
+
|
|
1232
|
+
Args:
|
|
1233
|
+
chrom (str): The chromosome/scaffold to find intervals for.
|
|
1234
|
+
strand ('.', '+', or '-'): Strandedness of the yielded
|
|
1235
|
+
GenomicInterval. If raw=True, this argument is ignored.
|
|
1236
|
+
raw (bool): If True, return the raw triplet from pyBigWig. If False,
|
|
1237
|
+
return the result wrapped in a GenomicInterval with the
|
|
1238
|
+
appropriate strandedness.
|
|
1239
|
+
"""
|
|
1240
|
+
for chrom, start, end in self.sf.intervals(chrom):
|
|
1241
|
+
if raw:
|
|
1242
|
+
yield (chrom, start, end)
|
|
1243
|
+
else:
|
|
1244
|
+
yield GenomicInterval(chrom, start, end, strand=strand)
|
|
1245
|
+
|
|
1246
|
+
|
|
1247
|
+
# TODO: make a BigWig_Writer class with buffered write operations, i.e. move it
|
|
1248
|
+
# from the .pyx file. One would probably want to lazy out the header by element
|
|
1249
|
+
# as well.
|