HTSeq 2.0.7__cp312-cp312-macosx_10_9_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
HTSeq/__init__.py ADDED
@@ -0,0 +1,1131 @@
1
+ """HTSeq is a package to process high-throughput sequencing data.
2
+
3
+ See htseq.readthedocs.io/en/master/index.html for documentation.
4
+ """
5
+
6
+ import itertools
7
+ import warnings
8
+ import os
9
+ import shlex
10
+ import sys
11
+
12
+ import HTSeq
13
+ from HTSeq._HTSeq import *
14
+ from HTSeq.utils import FileOrSequence
15
+ from HTSeq.features import *
16
+ from HTSeq.StretchVector import StretchVector
17
+ from HTSeq._version import __version__
18
+
19
+
20
+ #########################
21
+ # GenomicArray
22
+ #########################
23
+
24
+ def read_chrom_lens(filename, delimiter="\t"):
25
+ return dict(
26
+ ((chrom, int(len))
27
+ for chrom, len in csv.reader(open(filename), delimiter=delimiter)))
28
+
29
+
30
+ #########################
31
+ # Sequence readers
32
+ #########################
33
+
34
+ _re_fasta_header_line = re.compile(r'>\s*(\S+)\s*(.*)')
35
+
36
+
37
+ class FastaReader(FileOrSequence):
38
+ """A Fasta_Reader is associated with a FASTA file or an open connection
39
+ to a file-like object with content in FASTA format.
40
+ It can generate an iterator over the sequences.
41
+ """
42
+
43
+ def __init__(self, file_, raw_iterator=False):
44
+ FileOrSequence.__init__(self, file_)
45
+ self.raw_iterator = raw_iterator
46
+
47
+ def __iter__(self):
48
+ seq = None
49
+ name = None
50
+ descr = None
51
+ for line in FileOrSequence.__iter__(self):
52
+ if line.startswith(">"):
53
+ if seq:
54
+ if self.raw_iterator:
55
+ s = (seq, name, descr)
56
+ else:
57
+ s = Sequence(seq.encode(), name)
58
+ s.descr = descr
59
+ yield s
60
+ mo = _re_fasta_header_line.match(line)
61
+ name = mo.group(1)
62
+ descr = mo.group(2)
63
+ seq = ""
64
+ else:
65
+ assert seq is not None, "FASTA file does not start with '>'."
66
+ seq += line[:-1]
67
+ if seq is not None:
68
+ if self.raw_iterator:
69
+ s = (seq, name, descr)
70
+ else:
71
+ s = Sequence(seq.encode(), name)
72
+ s.descr = descr
73
+ yield s
74
+
75
+ def get_sequence_lengths(self):
76
+ seqname = None
77
+ length = 0
78
+ seqlengths = {}
79
+ for line in FileOrSequence.__iter__(self):
80
+ if line.startswith(">"):
81
+ if seqname is not None:
82
+ seqlengths[seqname] = length
83
+ mo = _re_fasta_header_line.match(line)
84
+ seqname = mo.group(1)
85
+ length = 0
86
+ else:
87
+ assert seqname is not None, "FASTA file does not start with '>'."
88
+ length += len(line.rstrip())
89
+ if seqname is not None:
90
+ seqlengths[seqname] = length
91
+ return seqlengths
92
+
93
+ @staticmethod
94
+ def _import_pysam():
95
+ global pysam
96
+ try:
97
+ import pysam
98
+ except ImportError:
99
+ sys.stderr.write(
100
+ "Please install the 'pysam' package to be able to use the Fasta indexing functionality.")
101
+ raise
102
+
103
+ def build_index(self, force=False):
104
+ self._import_pysam()
105
+ if not isinstance(self.fos, str):
106
+ raise TypeError(
107
+ "This function only works with FastaReader objects " +
108
+ "connected to a fasta file via file name")
109
+ index_filename = self.fos + ".fai"
110
+ if os.access(index_filename, os.R_OK):
111
+ if (not force) and os.stat(self.filename_or_sequence).st_mtime <= \
112
+ os.stat(index_filename).st_mtime:
113
+ # index is up to date
114
+ return
115
+ pysam.faidx(self.fos)
116
+ if not os.access(index_filename, os.R_OK):
117
+ raise SystemError(
118
+ "Building of Fasta index failed due to unknown error.")
119
+
120
+ def __getitem__(self, iv):
121
+ if not isinstance(iv, GenomicInterval):
122
+ raise TypeError("GenomicInterval expected as key.")
123
+ if not isinstance(self.fos, str):
124
+ raise TypeError(
125
+ "This function only works with FastaReader objects " +
126
+ "connected to a fasta file via file name")
127
+ self._import_pysam()
128
+ fasta = pysam.faidx(
129
+ self.fos,
130
+ "%s:%d-%d" % (iv.chrom, iv.start, iv.end - 1))
131
+ ans = list(FastaReader(fasta))
132
+ assert len(ans) == 1
133
+ ans[0].name = str(iv)
134
+ if iv.strand != "-":
135
+ return ans[0]
136
+ else:
137
+ return ans[0].get_reverse_complement()
138
+
139
+
140
+ class FastqReader(FileOrSequence):
141
+ """A Fastq object is associated with a FASTQ self.file. When an iterator
142
+ is requested from the object, the FASTQ file is read.
143
+
144
+ qual_scale is one of "phred", "solexa", "solexa-old".
145
+ """
146
+
147
+ def __init__(self, file_, qual_scale="phred", raw_iterator=False):
148
+ FileOrSequence.__init__(self, file_)
149
+ self.qual_scale = qual_scale
150
+ if qual_scale not in ("phred", "solexa", "solexa-old"):
151
+ raise ValueError("Illegal quality scale.")
152
+ self.raw_iterator = raw_iterator
153
+
154
+ def __iter__(self):
155
+ fin = FileOrSequence.__iter__(self)
156
+ il = 0
157
+ id1 = None
158
+ id2 = None
159
+ seq = None
160
+ qual = None
161
+ for line in fin:
162
+ if il == 0:
163
+ id1 = line
164
+ il += 1
165
+ continue
166
+ elif il == 1:
167
+ seq = line
168
+ il += 1
169
+ continue
170
+ elif il == 2:
171
+ id2 = line
172
+ il += 1
173
+ continue
174
+
175
+ qual = line
176
+ il = 0
177
+
178
+ if qual == "":
179
+ if id1 != "":
180
+ warnings.warn(
181
+ "Number of lines in FASTQ file is not "
182
+ "a multiple of 4. Discarding the last, "
183
+ "incomplete record")
184
+ break
185
+
186
+ if not qual.endswith("\n"):
187
+ qual += "\n"
188
+ if not id1.startswith("@"):
189
+ raise ValueError(
190
+ "Primary ID line in FASTQ file does "
191
+ "not start with '@'. Either this is not FASTQ data or the "
192
+ "parser got out of sync.")
193
+ if not id2.startswith("+"):
194
+ raise ValueError(
195
+ "Secondary ID line in FASTQ file does"
196
+ "not start with '+'. Maybe got out of sync.")
197
+ if len(id2) > 2 and id1[1:] != id2[1:]:
198
+ raise ValueError(
199
+ "Primary and secondary ID line in FASTQ"
200
+ "disagree.")
201
+
202
+ if self.raw_iterator:
203
+ s = (seq[:-1], id1[1:-1], qual[:-1], self.qual_scale)
204
+ else:
205
+ s = SequenceWithQualities(
206
+ seq[:-1].encode(), id1[1:-1],
207
+ qual[:-1].encode(),
208
+ self.qual_scale)
209
+ yield s
210
+
211
+
212
+ class BowtieReader(FileOrSequence):
213
+ """A BowtieFile object is associated with a Bowtie output file that
214
+ contains short read alignments. It can generate an iterator of Alignment
215
+ objects."""
216
+
217
+ def __iter__(self):
218
+ for line in FileOrSequence.__iter__(self):
219
+ try:
220
+ algnt = BowtieAlignment(line)
221
+ except ValueError:
222
+ if line.startswith("Reported "):
223
+ continue
224
+ warnings.warn(
225
+ "BowtieReader: Ignoring the following line, which could "
226
+ "not be parsed:\n%s\n" % line,
227
+ RuntimeWarning)
228
+ yield algnt
229
+
230
+
231
+ def bundle_multiple_alignments(sequence_of_alignments):
232
+ """Some alignment programs, e.g., Bowtie, can output multiple alignments,
233
+ i.e., the same read is reported consecutively with different alignments.
234
+ This function takes an iterator over alignments and bundles consecutive
235
+ alignments regarding the same read to a list of Alignment objects and
236
+ returns an iterator over these.
237
+ """
238
+ alignment_iter = iter(sequence_of_alignments)
239
+ algnt = next(alignment_iter)
240
+ ma = [algnt]
241
+ for algnt in alignment_iter:
242
+ if algnt.read.name != ma[0].read.name:
243
+ yield ma
244
+ ma = [algnt]
245
+ else:
246
+ ma.append(algnt)
247
+ yield ma
248
+
249
+
250
+ class SolexaExportAlignment(Alignment):
251
+ """Iterating over SolexaExportReader objects will yield SoelxaExportRecord
252
+ objects. These have four fields:
253
+ read - a SequenceWithQualities object
254
+ aligned - a boolean, indicating whether the object was aligned
255
+ iv - a GenomicInterval giving the alignment (or None, if not aligned)
256
+ passed_filter - a boolean, indicating whether the object passed the filter
257
+ nomatch_code - a code indicating why no match was found (or None, if the
258
+ read was aligned)
259
+
260
+ As long as 'aligned' is True, a SolexaExportRecord can be treated as an
261
+ Alignment object.
262
+ """
263
+
264
+ def __init__(self):
265
+ # Data is filled in by SolexaExportRecord
266
+ pass
267
+
268
+ def __repr__(self):
269
+ if self.aligned:
270
+ return "< %s object: Read '%s', aligned to %s >" % (
271
+ self.__class__.__name__, self.read.name, self.iv)
272
+ else:
273
+ return "< %s object: Non-aligned read '%s' >" % (
274
+ self.__class__.__name__, self.read.name)
275
+
276
+
277
+ class SolexaExportReader(FileOrSequence):
278
+ """Parser for *_export.txt files from the SolexaPipeline software.
279
+
280
+ Iterating over a SolexaExportReader yields SolexaExportRecord objects.
281
+ """
282
+
283
+ def __init__(self, filename_or_sequence, solexa_old=False):
284
+ FileOrSequence.__init__(self, filename_or_sequence)
285
+ if solexa_old:
286
+ self.qualscale = "solexa-old"
287
+ else:
288
+ self.qualscale = "solexa"
289
+
290
+ @classmethod
291
+ def parse_line_bare(dummy, line):
292
+ if line[-1] == "\n":
293
+ line = line[:-1]
294
+ res = {}
295
+ (res['machine'],
296
+ res['run_number'],
297
+ res['lane'],
298
+ res['tile'],
299
+ res['x_coord'],
300
+ res['y_coord'],
301
+ res['index_string'],
302
+ res['read_nbr'],
303
+ res['read_seq'],
304
+ res['qual_str'],
305
+ res['chrom'],
306
+ res['contig'],
307
+ res['pos'],
308
+ res['strand'],
309
+ res['match_descr'],
310
+ res['single_read_algnt_score'],
311
+ res['paired_read_algnt_score'],
312
+ res['partner_chrom'],
313
+ res['partner_contig'],
314
+ res['partner_offset'],
315
+ res['partner_strand'],
316
+ res['passed_filtering']) = line.split("\t")
317
+ return res
318
+
319
+ def __iter__(self):
320
+ for line in FileOrSequence.__iter__(self):
321
+ record = SolexaExportAlignment()
322
+ fields = SolexaExportReader.parse_line_bare(line)
323
+ if fields['read_nbr'] != "1":
324
+ warnings.warn(
325
+ "Paired-end read encountered. PE is so far supported only "
326
+ "for SAM files, not yet for SolexaExport. All PE-related "
327
+ "fields are ignored.")
328
+ record.read = SequenceWithQualities(
329
+ fields['read_seq'],
330
+ "%s:%s:%s:%s:%s#0" % (fields['machine'],
331
+ fields['lane'],
332
+ fields['tile'],
333
+ fields['x_coord'],
334
+ fields['y_coord']),
335
+ fields['qual_str'], self.qualscale)
336
+ if fields['passed_filtering'] == 'Y':
337
+ record.passed_filter = True
338
+ elif fields['passed_filtering'] == 'N':
339
+ record.passed_filter = False
340
+ else:
341
+ raise ValueError(
342
+ "Illegal 'passed filter' value in Solexa export data: '%s'." % fields['passed_filtering'])
343
+ record.index_string = fields['index_string']
344
+ if fields['pos'] == '':
345
+ record.iv = None
346
+ record.nomatch_code = fields['chrom']
347
+ else:
348
+ if fields['strand'] == 'F':
349
+ strand = '+'
350
+ elif fields['strand'] == 'R':
351
+ strand = '-'
352
+ else:
353
+ raise ValueError(
354
+ "Illegal strand value in Solexa export data.")
355
+ start = int(fields['pos'])
356
+ chrom = fields['chrom']
357
+ if fields['chrom'] == "":
358
+ chrom = fields['contig']
359
+ record.iv = GenomicInterval(
360
+ chrom, start,
361
+ start + len(fields['read_seq']), strand)
362
+ yield record
363
+
364
+
365
+ class GenomicArrayOfSets(GenomicArray):
366
+ """A GenomicArrayOfSets is a specialization of GenomicArray that allows to store
367
+ sets of objects. On construction, the step vectors are initialized with empty sets.
368
+ By using the 'add_value' method, objects can be added to intervals. If an object
369
+ is already present in the set(s) at this interval, an the new object is added to
370
+ the present set, and the set is split if necessary.
371
+ """
372
+
373
+ def __init__(self, chroms, stranded=True, storage='step', memmap_dir=""):
374
+ GenomicArray.__init__(self, chroms, stranded, 'O', storage, memmap_dir)
375
+
376
+ def add_chrom(self, chrom, length=sys.maxsize, start_index=0):
377
+ GenomicArray.add_chrom(self, chrom, length, start_index)
378
+ for cv in list(self.chrom_vectors[chrom].values()):
379
+ cv[:] = set()
380
+ cv.is_vector_of_sets = True
381
+
382
+
383
+ ###########################
384
+ # paired-end handling
385
+ ###########################
386
+
387
+ def pair_SAM_alignments(
388
+ alignments,
389
+ bundle=False,
390
+ primary_only=False):
391
+ '''Iterate over SAM aligments, name-sorted paired-end
392
+
393
+ Args:
394
+ alignments (iterator of SAM/BAM alignments): the alignments to wrap
395
+ bundle (bool): if True, bundle all alignments from one read pair into a
396
+ single yield. If False (default), each pair of alignments is
397
+ yielded separately.
398
+ primary_only (bool): for each read, consider only the primary line
399
+ (SAM flag 0x900 = 0). The SAM specification requires one and only
400
+ one of those for each read.
401
+
402
+ Yields:
403
+ 2-tuples with each pair of alignments or, if bundle==True, each bundled
404
+ list of alignments.
405
+ '''
406
+
407
+ mate_missing_count = [0]
408
+
409
+ def process_list(almnt_list):
410
+ '''Transform a list of alignment with the same read name into pairs
411
+
412
+ Args:
413
+ almnt_list (list): alignments to process
414
+
415
+ Yields:
416
+ each pair of alignments.
417
+
418
+ This function is needed because each line of a BAM file is not a read
419
+ but an alignment. For uniquely mapped and unmapped reads, those two are
420
+ the same. For multimapped reads, however, there can be more than one
421
+ alignment for each read. Also, it is normal for a mapper to uniquely
422
+ map one read and multimap its mate.
423
+
424
+ This function goes down the list of alignments for a given read name
425
+ and tries to find the first mate. So if read 1 is uniquely mapped but
426
+ read 2 is mapped 4 times, only (read 1, read 2 - first occurrence) will
427
+ yield; the other 3 alignments of read 2 are ignored.
428
+ '''
429
+
430
+ while len(almnt_list) > 0:
431
+ a1 = almnt_list.pop(0)
432
+ # Find its mate
433
+ for a2 in almnt_list:
434
+ if a1.pe_which == a2.pe_which:
435
+ continue
436
+ if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
437
+ continue
438
+ if not (a1.aligned and a2.aligned):
439
+ break
440
+ if a1.iv.chrom == a2.mate_start.chrom and a1.iv.start == a2.mate_start.pos and \
441
+ a2.iv.chrom == a1.mate_start.chrom and a2.iv.start == a1.mate_start.pos:
442
+ break
443
+ else:
444
+ if a1.mate_aligned:
445
+ mate_missing_count[0] += 1
446
+ if mate_missing_count[0] == 1:
447
+ warnings.warn(
448
+ "Read " + a1.read.name + " claims to have an aligned mate " +
449
+ "which could not be found in an adjacent line.")
450
+ a2 = None
451
+ if a2 is not None:
452
+ almnt_list.remove(a2)
453
+ if a1.pe_which == "first":
454
+ yield (a1, a2)
455
+ else:
456
+ assert a1.pe_which == "second"
457
+ yield (a2, a1)
458
+
459
+ almnt_list = []
460
+ current_name = None
461
+ for almnt in alignments:
462
+ if not almnt.paired_end:
463
+ raise ValueError(
464
+ "'pair_alignments' needs a sequence of paired-end alignments")
465
+ if almnt.pe_which == "unknown":
466
+ raise ValueError(
467
+ "Paired-end read found with 'unknown' 'pe_which' status.")
468
+
469
+ # FIXME: almnt.not_primary_alignment currently means secondary
470
+ if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
471
+ continue
472
+
473
+ if almnt.read.name == current_name:
474
+ almnt_list.append(almnt)
475
+ else:
476
+ if bundle:
477
+ yield list(process_list(almnt_list))
478
+ else:
479
+ for p in process_list(almnt_list):
480
+ yield p
481
+ current_name = almnt.read.name
482
+ almnt_list = [almnt]
483
+ if bundle:
484
+ yield list(process_list(almnt_list))
485
+ else:
486
+ for p in process_list(almnt_list):
487
+ yield p
488
+ if mate_missing_count[0] > 1:
489
+ warnings.warn("%d reads with missing mate encountered." %
490
+ mate_missing_count[0])
491
+
492
+
493
+ def pair_SAM_alignments_with_buffer(
494
+ alignments,
495
+ max_buffer_size=30000000,
496
+ primary_only=False):
497
+ '''Iterate over SAM aligments with buffer, position-sorted paired-end
498
+
499
+ Args:
500
+ alignments (iterator of SAM/BAM alignments): the alignments to wrap
501
+ max_buffer_size (int): maxmal numer of alignments to keep in memory.
502
+ primary_only (bool): for each read, consider only the primary line
503
+ (SAM flag 0x900 = 0). The SAM specification requires one and only
504
+ one of those for each read.
505
+
506
+ Yields:
507
+ 2-tuples with each pair of alignments.
508
+ '''
509
+
510
+ almnt_buffer = {}
511
+ ambiguous_pairing_counter = 0
512
+ for almnt in alignments:
513
+ if not almnt.paired_end:
514
+ raise ValueError(
515
+ "Sequence of paired-end alignments expected, but got single-end alignment.")
516
+ if almnt.pe_which == "unknown":
517
+ raise ValueError(
518
+ "Cannot process paired-end alignment found with 'unknown' 'pe_which' status.")
519
+ # FIXME: almnt.not_primary_alignment currently means secondary
520
+ if primary_only and (almnt.not_primary_alignment or almnt.supplementary):
521
+ continue
522
+
523
+ matekey = (
524
+ almnt.read.name,
525
+ "second" if almnt.pe_which == "first" else "first",
526
+ almnt.mate_start.chrom if almnt.mate_aligned else None,
527
+ almnt.mate_start.pos if almnt.mate_aligned else None,
528
+ almnt.iv.chrom if almnt.aligned else None,
529
+ almnt.iv.start if almnt.aligned else None,
530
+ -almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
531
+
532
+ if matekey in almnt_buffer:
533
+ if len(almnt_buffer[matekey]) == 1:
534
+ mate = almnt_buffer[matekey][0]
535
+ del almnt_buffer[matekey]
536
+ else:
537
+ mate = almnt_buffer[matekey].pop(0)
538
+ if ambiguous_pairing_counter == 0:
539
+ ambiguous_pairing_first_occurance = matekey
540
+ ambiguous_pairing_counter += 1
541
+ if almnt.pe_which == "first":
542
+ yield (almnt, mate)
543
+ else:
544
+ yield (mate, almnt)
545
+ else:
546
+ almntkey = (
547
+ almnt.read.name, almnt.pe_which,
548
+ almnt.iv.chrom if almnt.aligned else None,
549
+ almnt.iv.start if almnt.aligned else None,
550
+ almnt.mate_start.chrom if almnt.mate_aligned else None,
551
+ almnt.mate_start.pos if almnt.mate_aligned else None,
552
+ almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None)
553
+ if almntkey not in almnt_buffer:
554
+ almnt_buffer[almntkey] = [almnt]
555
+ else:
556
+ almnt_buffer[almntkey].append(almnt)
557
+ if len(almnt_buffer) > max_buffer_size:
558
+ raise ValueError(
559
+ "Maximum alignment buffer size exceeded while pairing SAM alignments.")
560
+
561
+ if len(almnt_buffer) > 0:
562
+ warnings.warn(
563
+ "Mate records missing for %d records; first such record: %s." %
564
+ (len(almnt_buffer), str(list(almnt_buffer.values())[0][0])))
565
+ for almnt_list in list(almnt_buffer.values()):
566
+ for almnt in almnt_list:
567
+ if almnt.pe_which == "first":
568
+ yield (almnt, None)
569
+ else:
570
+ yield (None, almnt)
571
+
572
+ if ambiguous_pairing_counter > 0:
573
+ warnings.warn(
574
+ "Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
575
+ (ambiguous_pairing_counter, str(ambiguous_pairing_first_occurance)))
576
+
577
+
578
+ ###########################
579
+ # variant calls
580
+ ###########################
581
+
582
+
583
+ _re_vcf_meta_comment = re.compile("^##([a-zA-Z]+)\=(.*)$")
584
+
585
+ _re_vcf_meta_descr = re.compile(
586
+ 'ID=[^,]+,?|Number=[^,]+,?|Type=[^,]+,?|Description="[^"]+",?')
587
+
588
+ _re_vcf_meta_types = re.compile("[INFO|FILTER|FORMAT]")
589
+
590
+ _vcf_typemap = {
591
+ "Integer": int,
592
+ "Float": float,
593
+ "String": str,
594
+ "Flag": bool
595
+ }
596
+
597
+
598
+ class VariantCall:
599
+ '''Class representing a variant call, close to VCF format'''
600
+
601
+ def __init__(
602
+ self,
603
+ chrom=None,
604
+ pos=None,
605
+ identifier=None,
606
+ ref=None,
607
+ alt=None,
608
+ qual=None,
609
+ filtr=None,
610
+ info=None):
611
+ '''Class representing a variant call.
612
+
613
+ Arguments:
614
+ chrom (str): Chromosome
615
+ pos (int): Position on the chromosome
616
+ identifier (str): ID of the variant
617
+ ref (str): Reference allele
618
+ alt (str): Alternate allele
619
+ qual (str): Quality of the variant
620
+ filtr (str): Filter flag indicating if the variant passed QC.
621
+ info (str): Additional info on the variant
622
+ '''
623
+ self.chrom = chrom
624
+ self.pos = pos
625
+ self.id = identifier
626
+ self.ref = ref
627
+ self.alt = alt
628
+ self.qual = qual
629
+ self.filter = filtr
630
+ self.info = info
631
+ self._original_line = None
632
+
633
+ @classmethod
634
+ def fromdict(cls, dictionary):
635
+ '''Create a VariantCall instance from a dict of properties'''
636
+ ret = cls()
637
+ ret.chrom = dictionary["chrom"]
638
+ ret.pos = dictionary["pos"]
639
+ ret.id = dictionary["id"]
640
+ ret.ref = dictionary["ref"]
641
+ ret.alt = dictionary["alt"]
642
+ ret.qual = dictionary["qual"]
643
+ ret.filter = dictionary["filter"]
644
+ ret.info = dictionary["info"]
645
+ ret._original_line = None
646
+
647
+ @classmethod
648
+ def fromline(cls, line, nsamples=0, sampleids=[]):
649
+ '''Create a VariantCall instance from a VCF line'''
650
+ ret = cls()
651
+ if nsamples == 0:
652
+ ret.format = None
653
+ ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = line.rstrip("\n").split("\t", 7)
654
+ else:
655
+ lsplit = line.rstrip("\n").split("\t")
656
+ ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = lsplit[:8]
657
+ ret.format = lsplit[8].split(":")
658
+ ret.samples = {}
659
+ spos = 9
660
+ for sid in sampleids:
661
+ ret.samples[sid] = dict((name, value) for (
662
+ name, value) in zip(ret.format, lsplit[spos].split(":")))
663
+ spos += 1
664
+ ret.pos = GenomicPosition(ret.chrom, int(ret.pos))
665
+ ret.alt = ret.alt.split(",")
666
+ ret._original_line = line
667
+ return ret
668
+
669
+ def infoline(self):
670
+ if self.info.__class__ == dict:
671
+ return ";".join(map((lambda key: str(key) + "=" + str(self.info[key])), self.info))
672
+ else:
673
+ return self.info
674
+
675
+ def get_original_line(self):
676
+ warnings.warn(
677
+ "Original line is empty, probably this object was created from scratch and not from a line in a .vcf file!")
678
+ return self._original_line
679
+
680
+ def sampleline(self):
681
+ if self.format == None:
682
+ sys.stderr.write("No samples in this variant call!\n")
683
+ return ""
684
+ keys = self.format
685
+ ret = [":".join(keys)]
686
+ for sid in self.samples:
687
+ tmp = []
688
+ for k in keys:
689
+ if k in self.samples[sid]:
690
+ tmp.append(self.samples[sid][k])
691
+ ret.append(":".join(tmp))
692
+ return "\t".join(ret)
693
+
694
+ def to_line(self):
695
+ '''Convert into a VCF line'''
696
+ if self.format == None:
697
+ return "\t".join(map(str, [self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join(self.alt), self.qual, self.filter, self.infoline()])) + "\n"
698
+ else:
699
+ return "\t".join(map(str, [self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join(self.alt), self.qual, self.filter, self.infoline(), self.sampleline()])) + "\n"
700
+
701
+ def __descr__(self):
702
+ return "<VariantCall at %s, ref '%s', alt %s >" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
703
+
704
+ def __str__(self):
705
+ return "%s:'%s'->%s" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
706
+
707
+ def unpack_info(self, infodict):
708
+ tmp = {}
709
+ for token in self.info.strip(";").split(";"):
710
+ if re.compile("=").search(token):
711
+ token = token.split("=")
712
+ if token[0] in infodict:
713
+ tmp[token[0]] = list(
714
+ map(infodict[token[0]], token[1].split(",")))
715
+ else:
716
+ tmp[token[0]] = token[1].split(",")
717
+ if len(tmp[token[0]]) == 1:
718
+ tmp[token[0]] = tmp[token[0]][0]
719
+ else: # Flag attribute found
720
+ tmp[token] = True
721
+ diff = set(infodict.keys()).difference(set(tmp.keys()))
722
+ for key in diff:
723
+ if infodict[key] == bool:
724
+ tmp[key] = False
725
+ self.info = tmp
726
+
727
+
728
+ class VCF_Reader(FileOrSequence):
729
+ '''Reader for VCF files.
730
+
731
+ This class parses text VCF files from scratch, independently of pysam.
732
+ '''
733
+
734
+ def __init__(self, filename_or_sequence):
735
+ FileOrSequence.__init__(self, filename_or_sequence)
736
+ self.metadata = {}
737
+ self.info = {}
738
+ self.filters = {}
739
+ self.formats = {}
740
+ self.nsamples = 0
741
+ self.sampleids = []
742
+
743
+ def make_info_dict(self):
744
+ self.infodict = {}
745
+ for key in self.info.keys():
746
+ self.infodict[key] = _vcf_typemap[self.info[key]["Type"]]
747
+
748
+ def parse_meta(self, header_filename=None):
749
+ if header_filename is None:
750
+ the_iter = FileOrSequence.__iter__(self)
751
+ else:
752
+ the_iter = open(header_filename, "r")
753
+
754
+ for line in the_iter:
755
+ if line.startswith('#'):
756
+ if line.startswith("##"):
757
+ mo = _re_vcf_meta_comment.match(line)
758
+ if mo:
759
+ value = mo.group(2)
760
+ if mo.group(1) == "INFO":
761
+ value = dict(e.rstrip(",").split("=", 1)
762
+ for e in _re_vcf_meta_descr.findall(value))
763
+ key = value["ID"]
764
+ del value["ID"]
765
+ self.info[key] = value
766
+ elif mo.group(1) == "FILTER":
767
+ value = dict(e.rstrip(",").split("=", 1)
768
+ for e in _re_vcf_meta_descr.findall(value))
769
+ key = value["ID"]
770
+ del value["ID"]
771
+ self.filters[key] = value
772
+ elif mo.group(1) == "FORMAT":
773
+ value = dict(e.rstrip(",").split("=", 1)
774
+ for e in _re_vcf_meta_descr.findall(value))
775
+ key = value["ID"]
776
+ del value["ID"]
777
+ self.formats[key] = value
778
+ else:
779
+ self.metadata[mo.group(1)] = mo.group(2)
780
+ else:
781
+ self.sampleids = line.rstrip("\t\n").split("\t")[9:]
782
+ self.nsamples = len(self.sampleids)
783
+ continue
784
+ else:
785
+ break
786
+
787
+ def meta_info(self, header_filename=None):
788
+ ret = []
789
+ if header_filename is None:
790
+ the_iter = FileOrSequence.__iter__(self)
791
+ else:
792
+ the_iter = open(header_filename, "r")
793
+
794
+ for line in the_iter:
795
+ if line.startswith('#'):
796
+ ret.append(line)
797
+ else:
798
+ break
799
+ return ret
800
+
801
+ def __iter__(self):
802
+ for line in FileOrSequence.__iter__(self):
803
+ if line == "\n" or line.startswith('#'):
804
+ continue
805
+ vc = VariantCall.fromline(line, self.nsamples, self.sampleids)
806
+ yield vc
807
+
808
+
809
+ class WiggleReader(FileOrSequence):
810
+
811
+ def __init__(self, filename_or_sequence, verbose=True):
812
+ FileOrSequence.__init__(self, filename_or_sequence)
813
+ self.attributes = {}
814
+ self.stepType = 'none'
815
+ self.verbose = verbose
816
+
817
+ def __iter__(self):
818
+ span = 1
819
+ pos = None
820
+ step = None
821
+ chrom = None
822
+ for line in FileOrSequence.__iter__(self):
823
+ if line.startswith('track'):
824
+ fields = shlex.split(line)[1:]
825
+ self.attributes = dict([(p[0], p[1].strip('"'))
826
+ for p in [x.split("=") for x in fields]])
827
+ elif line.startswith('fixedStep'): # do fixed step stuff
828
+ self.stepType = 'fixed'
829
+ fields = shlex.split(line)[1:]
830
+ declarations = dict([(p[0], p[1].strip('"'))
831
+ for p in [x.split("=") for x in fields]])
832
+ pos = int(declarations['start'])
833
+ step = int(declarations['step'])
834
+ chrom = declarations['chrom']
835
+ if 'span' in declarations:
836
+ span = int(declarations['span'])
837
+ else:
838
+ span = 1
839
+ elif line.startswith('variableStep'): # do variable step stuff
840
+ self.stepType = 'variable'
841
+ fields = shlex.split(line)[1:]
842
+ declarations = dict([(p[0], p[1].strip('"'))
843
+ for p in [x.split("=") for x in fields]])
844
+ chrom = declarations['chrom']
845
+ if 'span' in declarations:
846
+ span = int(declarations['span'])
847
+ else:
848
+ span = 1
849
+ elif line.startswith('browser') or line.startswith('#'): # Comment or ignored
850
+ if self.verbose:
851
+ print("Ignored line:", line)
852
+ continue
853
+ else:
854
+ if self.stepType == 'fixed':
855
+ yield (GenomicInterval(chrom, pos, pos + span, '.'), float(line.strip()))
856
+ pos += step
857
+ elif self.stepType == 'variable':
858
+ tmp = line.strip().split(" ")
859
+ pos = int(tmp[0])
860
+ yield (GenomicInterval(chrom, pos, pos + span, '.'), float(tmp[1]))
861
+
862
+
863
+ class BAM_Reader:
864
+ '''Parser for SAM/BAM/CRAM files.
865
+
866
+ This is a thin wrapper on top of pysam.AlignmentFile. It detects
867
+ automatically whether the input file is text (SAM) or binary (BAM/CRAM) via
868
+ the HTSlib library.
869
+ '''
870
+
871
+ def __init__(
872
+ self,
873
+ filename,
874
+ check_sq=True):
875
+ '''Parser for SAM/BAM/CRAM files, a thin layer over pysam.AlignmentFile.
876
+
877
+ Arguments:
878
+ filename (str, Path): The path to the input file to read
879
+ check_sq (bool): check if SQ entries are present in header
880
+ '''
881
+
882
+ global pysam
883
+ self.filename = filename
884
+ self.sf = None
885
+ self.record_no = -1
886
+ self.check_sq = check_sq
887
+ try:
888
+ import pysam
889
+ except ImportError:
890
+ sys.stderr.write(
891
+ "Please install pysam to use the BAM_Reader class")
892
+ raise
893
+ self._open_file()
894
+
895
+ def _open_file(self):
896
+ self.sf = pysam.AlignmentFile(
897
+ self.filename,
898
+ check_sq=self.check_sq,
899
+ )
900
+
901
+ def close(self):
902
+ """Close the BAM file for clean up"""
903
+ self.sf.close()
904
+
905
+ def __enter__(self):
906
+ return self
907
+
908
+ def __exit__(self, type, value, traceback):
909
+ self.close()
910
+
911
+ def __iter__(self):
912
+ self.record_no = 0
913
+ for pa in self.sf:
914
+ yield SAM_Alignment.from_pysam_AlignedSegment(pa, self.sf)
915
+ self.record_no += 1
916
+
917
+ def fetch(self, reference=None, start=None, end=None, region=None):
918
+ self.record_no = 0
919
+ try:
920
+ for pa in self.sf.fetch(reference, start, end, region):
921
+ yield SAM_Alignment.from_pysam_AlignedRead(pa, self.sf)
922
+ self.record_no += 1
923
+ except ValueError as e:
924
+ if e.message == "fetch called on bamfile without index":
925
+ print("Error: ", e.message)
926
+ print(
927
+ "Your bam index file is missing or wrongly named, convention is that file 'x.bam' has index file 'x.bam.bai'!")
928
+ else:
929
+ raise
930
+ except:
931
+ raise
932
+
933
+ def get_line_number_string(self):
934
+ if self.record_no == -1:
935
+ return "unopened file %s" % (self.filename)
936
+ else:
937
+ return "record #%d in file %s" % (self.record_no, self.filename)
938
+
939
+ def __getitem__(self, iv):
940
+ if not isinstance(iv, GenomicInterval):
941
+ raise TypeError(
942
+ "Use a HTSeq.GenomicInterval to access regions within .bam-file!")
943
+ if self.sf is None:
944
+ self._open_file()
945
+
946
+ if (hasattr(self.sf, '_hasIndex') and (not self.sf._hasIndex())) or (not self.sf.has_index()):
947
+ raise ValueError(
948
+ "The .bam-file has no index, random-access is disabled!")
949
+ for pa in self.sf.fetch(iv.chrom, iv.start + 1, iv.end):
950
+ yield SAM_Alignment.from_pysam_AlignedRead(pa, self.sf)
951
+
952
+ def get_header_dict(self):
953
+ return self.sf.header
954
+
955
+ def get_template(self):
956
+ return self.sf
957
+
958
+
959
+ # NOTE: this will be deprecated
960
+ SAM_Reader = BAM_Reader
961
+
962
+
963
+ class BAM_Writer:
964
+ '''Writer for SAM/BAM/CRAM files, a thin layer over pysam.AlignmentFile'''
965
+ def __init__(
966
+ self,
967
+ filename,
968
+ template=None,
969
+ referencenames=None,
970
+ referencelengths=None,
971
+ text=None,
972
+ header=None):
973
+ try:
974
+ import pysam
975
+ except ImportError:
976
+ sys.stderr.write(
977
+ "Please Install pysam to use the BAM_Writer Class")
978
+ raise
979
+
980
+ self.filename = filename
981
+ self.template = template
982
+ self.referencenames = referencenames
983
+ self.referencelengths = referencelengths
984
+ self.text = text
985
+ self.header = header
986
+ self.sf = pysam.AlignmentFile(
987
+ self.filename,
988
+ mode="wb",
989
+ template=self.template,
990
+ referencenames=self.referencenames,
991
+ referencelengths=self.referencelengths,
992
+ text=self.text,
993
+ header=self.header)
994
+
995
+ @classmethod
996
+ def from_BAM_Reader(cls, fn, br):
997
+ return BAM_Writer(filename=fn, header=br.get_header_dict())
998
+
999
+ def write(self, alnmt):
1000
+ self.sf.write(alnmt.to_pysam_AlignedSegment(self.sf))
1001
+
1002
+ def close(self):
1003
+ self.sf.close()
1004
+
1005
+ def __enter__(self):
1006
+ return self
1007
+
1008
+ def __exit__(self, type, value, traceback):
1009
+ self.close()
1010
+
1011
+
1012
+ class BED_Reader(FileOrSequence):
1013
+ '''Reader for BED files.
1014
+
1015
+ This class simply parses the BED as a text file and converts the various
1016
+ columns into HTSeq objects. For each row it extracts:
1017
+
1018
+ - a GenomicInterval with chromosome equal to the first column, start and
1019
+ end to the second and third columns, and strandedness equal to the sixth
1020
+ column;
1021
+
1022
+ - a GenomicFeature with name equal to the fourth column (or "unnamed"),
1023
+ type set to "BED line", score equal to the fifth column and interval set
1024
+ as the GenomicInterval above.
1025
+
1026
+ - If the "thick" start and end are provided in the BED file as seventh and
1027
+ eight columns, they are stored as a GenomicInterval in the "thick"
1028
+ attribute of the GenomicFeature above (i.e. feature.thick).
1029
+
1030
+ - If the itemRgb color line is provided in the BED file as ninth column, it
1031
+ is stored as "itemRgb" attribute in the GenomicFeature above (i.e.
1032
+ feature.itemRgb).
1033
+
1034
+ - The blockCount, blockStart, blockSizes columns (tenth to twelth) are
1035
+ currently ignored, this might change in the future.
1036
+
1037
+ Rows starting with "track" are skipped.
1038
+ '''
1039
+
1040
+ def __init__(self, filename_or_sequence):
1041
+ FileOrSequence.__init__(self, filename_or_sequence)
1042
+
1043
+ def __iter__(self):
1044
+ for line in FileOrSequence.__iter__(self):
1045
+ if line.startswith("track"):
1046
+ continue
1047
+ fields = line.split()
1048
+ if len(fields) < 3:
1049
+ raise ValueError("BED file line contains less than 3 fields")
1050
+ if len(fields) > 12:
1051
+ raise ValueError("BED file line contains more than 12 fields")
1052
+ iv = GenomicInterval(
1053
+ fields[0],
1054
+ int(fields[1]),
1055
+ int(fields[2]),
1056
+ fields[5] if len(fields) > 5 else ".")
1057
+ f = GenomicFeature(
1058
+ fields[3] if len(fields) > 3 else "unnamed",
1059
+ "BED line",
1060
+ iv)
1061
+ f.score = float(fields[4]) if len(fields) > 4 else None
1062
+ f.thick = GenomicInterval(
1063
+ iv.chrom,
1064
+ int(fields[6]),
1065
+ int(fields[7]),
1066
+ iv.strand) if len(fields) > 7 else None
1067
+ f.itemRgb = [int(a) for a in fields[8].split(",")
1068
+ ] if len(fields) > 8 else None
1069
+ yield(f)
1070
+
1071
+
1072
+ class BigWig_Reader:
1073
+ '''A simple reader for BigWig files (using pyBigWig)'''
1074
+
1075
+ def __init__(self, filename):
1076
+ '''Parser for BigWig files, a thin layer over pyBigWig.
1077
+
1078
+ Arguments:
1079
+ filename (str, Path): The path to the input file to read
1080
+ '''
1081
+ global pyBigWig
1082
+
1083
+ try:
1084
+ import pyBigWig
1085
+ except ImportError:
1086
+ sys.stderr.write(
1087
+ "Please Install pyBigWig to use the BigWig_Reader Class")
1088
+ raise
1089
+
1090
+ self.filename = filename
1091
+ self.sf = pyBigWig.open(filename)
1092
+
1093
+ def close(self):
1094
+ self.sf.close()
1095
+
1096
+ def __enter__(self):
1097
+ return self
1098
+
1099
+ def __exit__(self, type, value, traceback):
1100
+ self.close()
1101
+
1102
+ def chroms(self):
1103
+ '''Return the list of chromosomes and their lengths, as a dictionary.
1104
+
1105
+ Example:
1106
+
1107
+ bw.chroms() -> {'chr1': 4568999, 'chr2': 87422, ...}
1108
+ '''
1109
+ return self.sf.chroms()
1110
+
1111
+ def intervals(self, chrom, strand='.', raw=False):
1112
+ '''Lazy iterator over genomic intervals
1113
+
1114
+ Args:
1115
+ chrom (str): The chromosome/scaffold to find intervals for.
1116
+ strand ('.', '+', or '-'): Strandedness of the yielded
1117
+ GenomicInterval. If raw=True, this argument is ignored.
1118
+ raw (bool): If True, return the raw triplet from pyBigWig. If False,
1119
+ return the result wrapped in a GenomicInterval with the
1120
+ appropriate strandedness.
1121
+ '''
1122
+ for (chrom, start, end) in self.sf.intervals(chrom):
1123
+ if raw:
1124
+ yield (chrom, start, end)
1125
+ else:
1126
+ yield GenomicInterval(chrom, start, end, strand=strand)
1127
+
1128
+
1129
+ # TODO: make a BigWig_Writer class with buffered write operations, i.e. move it
1130
+ # from the .pyx file. One would probably want to lazy out the header by element
1131
+ # as well.