Anchor-annotator 0.0.11__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {Anchor_annotator-0.0.11.dist-info → Anchor_annotator-0.2.0.dist-info}/METADATA +2 -2
- Anchor_annotator-0.2.0.dist-info/RECORD +21 -0
- {Anchor_annotator-0.0.11.dist-info → Anchor_annotator-0.2.0.dist-info}/WHEEL +1 -1
- anchor/_version.py +2 -2
- anchor/main.py +77 -50
- anchor/models.py +827 -455
- anchor/plot.py +471 -412
- anchor/settings.py +9 -1
- anchor/ui_preferences.py +78 -54
- anchor/undo.py +173 -128
- anchor/widgets.py +56 -48
- anchor/workers.py +61 -36
- Anchor_annotator-0.0.11.dist-info/RECORD +0 -21
- {Anchor_annotator-0.0.11.dist-info → Anchor_annotator-0.2.0.dist-info}/LICENSE +0 -0
- {Anchor_annotator-0.0.11.dist-info → Anchor_annotator-0.2.0.dist-info}/top_level.txt +0 -0
anchor/plot.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import functools
|
3
4
|
import logging
|
4
5
|
import os.path
|
5
6
|
import re
|
@@ -10,6 +11,7 @@ import numpy as np
|
|
10
11
|
import pyqtgraph as pg
|
11
12
|
import sqlalchemy
|
12
13
|
from Bio import pairwise2
|
14
|
+
from line_profiler_pycharm import profile
|
13
15
|
from montreal_forced_aligner.data import CtmInterval
|
14
16
|
from montreal_forced_aligner.db import Speaker, Utterance
|
15
17
|
from PySide6 import QtCore, QtGui, QtWidgets
|
@@ -19,6 +21,8 @@ from anchor.models import (
|
|
19
21
|
CorpusModel,
|
20
22
|
CorpusSelectionModel,
|
21
23
|
DictionaryTableModel,
|
24
|
+
FileSelectionModel,
|
25
|
+
FileUtterancesModel,
|
22
26
|
SpeakerModel,
|
23
27
|
TextFilterQuery,
|
24
28
|
)
|
@@ -237,6 +241,7 @@ class UtteranceClusterView(pg.PlotWidget):
|
|
237
241
|
def clear_plot(self):
|
238
242
|
self.legend_item.clear()
|
239
243
|
self.scatter_item.clear()
|
244
|
+
self.getPlotItem().update()
|
240
245
|
|
241
246
|
def update_point(self, sender, spots, ev: pg.GraphicsScene.mouseEvents.MouseClickEvent):
|
242
247
|
spot = spots[0]
|
@@ -356,12 +361,20 @@ class UtteranceClusterView(pg.PlotWidget):
|
|
356
361
|
self.scatter_item.setPen(pens)
|
357
362
|
|
358
363
|
|
364
|
+
class TimeAxis(pg.AxisItem):
|
365
|
+
def tickStrings(self, values, scale, spacing):
|
366
|
+
strings = super().tickStrings(values, scale, spacing)
|
367
|
+
strings = [x.replace("-", "") for x in strings]
|
368
|
+
return strings
|
369
|
+
|
370
|
+
|
359
371
|
class AudioPlotItem(pg.PlotItem):
|
360
372
|
def __init__(self, top_point, bottom_point):
|
361
|
-
super().__init__()
|
373
|
+
super().__init__(axisItems={"bottom": TimeAxis("bottom")})
|
362
374
|
self.settings = AnchorSettings()
|
363
375
|
self.setDefaultPadding(0)
|
364
376
|
self.setClipToView(True)
|
377
|
+
|
365
378
|
self.getAxis("bottom").setPen(self.settings.value(self.settings.ACCENT_LIGHT_COLOR))
|
366
379
|
self.getAxis("bottom").setTextPen(self.settings.value(self.settings.ACCENT_LIGHT_COLOR))
|
367
380
|
self.getAxis("bottom").setTickFont(self.settings.small_font)
|
@@ -399,6 +412,31 @@ class SpeakerTierItem(pg.PlotItem):
|
|
399
412
|
self.setMenuEnabled(False)
|
400
413
|
self.hideButtons()
|
401
414
|
|
415
|
+
def contextMenuEvent(self, event: QtWidgets.QGraphicsSceneContextMenuEvent):
|
416
|
+
vb = self.getViewBox()
|
417
|
+
item = self.items[0]
|
418
|
+
x = vb.mapFromItemToView(self, event.pos()).x()
|
419
|
+
begin = max(x - 0.5, 0)
|
420
|
+
end = min(x + 0.5, item.selection_model.model().file.duration)
|
421
|
+
for x in item.visible_utterances.values():
|
422
|
+
if begin >= x.item_min and end <= x.item_max:
|
423
|
+
event.accept()
|
424
|
+
return
|
425
|
+
if begin < x.item_max and begin > x.item_max:
|
426
|
+
begin = x.item_max
|
427
|
+
if end > x.item_min and end < x.item_min:
|
428
|
+
end = x.item_min
|
429
|
+
break
|
430
|
+
if end - begin > 0.001:
|
431
|
+
menu = QtWidgets.QMenu()
|
432
|
+
|
433
|
+
a = QtGui.QAction(menu)
|
434
|
+
a.setText("Create utterance")
|
435
|
+
a.triggered.connect(functools.partial(item.create_utterance, begin=begin, end=end))
|
436
|
+
menu.addAction(a)
|
437
|
+
menu.setStyleSheet(item.settings.menu_style_sheet)
|
438
|
+
menu.exec_(event.screenPos())
|
439
|
+
|
402
440
|
|
403
441
|
class UtteranceView(QtWidgets.QWidget):
|
404
442
|
undoRequested = QtCore.Signal()
|
@@ -409,23 +447,15 @@ class UtteranceView(QtWidgets.QWidget):
|
|
409
447
|
super().__init__(*args)
|
410
448
|
self.settings = AnchorSettings()
|
411
449
|
self.corpus_model: typing.Optional[CorpusModel] = None
|
450
|
+
self.file_model: typing.Optional[FileUtterancesModel] = None
|
412
451
|
self.dictionary_model: typing.Optional[DictionaryTableModel] = None
|
413
|
-
self.selection_model: typing.Optional[
|
452
|
+
self.selection_model: typing.Optional[FileSelectionModel] = None
|
414
453
|
layout = QtWidgets.QVBoxLayout()
|
415
454
|
self.bottom_point = 0
|
416
455
|
self.top_point = 8
|
417
456
|
self.height = self.top_point - self.bottom_point
|
418
457
|
self.separator_point = (self.height / 2) + self.bottom_point
|
419
|
-
|
420
|
-
self.auto_waveform_worker = workers.AutoWaveformWorker()
|
421
|
-
self.spectrogram_worker = workers.SpectrogramWorker()
|
422
|
-
self.pitch_track_worker = workers.PitchWorker()
|
423
|
-
self.speaker_tier_worker = workers.SpeakerTierWorker()
|
424
|
-
self.waveform_worker.signals.result.connect(self.finalize_loading_wave_form)
|
425
|
-
self.auto_waveform_worker.signals.result.connect(self.finalize_loading_auto_wave_form)
|
426
|
-
self.spectrogram_worker.signals.result.connect(self.finalize_loading_spectrogram)
|
427
|
-
self.pitch_track_worker.signals.result.connect(self.finalize_loading_pitch_track)
|
428
|
-
self.speaker_tier_worker.signals.result.connect(self.finalize_loading_utterances)
|
458
|
+
|
429
459
|
# self.break_line.setZValue(30)
|
430
460
|
self.audio_layout = pg.GraphicsLayoutWidget()
|
431
461
|
self.audio_layout.centralWidget.layout.setContentsMargins(0, 0, 0, 0)
|
@@ -445,7 +475,9 @@ class UtteranceView(QtWidgets.QWidget):
|
|
445
475
|
self.speaker_tier_layout.centralWidget.layout.setContentsMargins(0, 0, 0, 0)
|
446
476
|
self.speaker_tier_layout.centralWidget.layout.setSpacing(0)
|
447
477
|
self.speaker_tiers: dict[SpeakerTier] = {}
|
478
|
+
self.speaker_tier_items = {}
|
448
479
|
self.search_term = None
|
480
|
+
self.default_speaker_id = None
|
449
481
|
self.extra_tiers = {}
|
450
482
|
self.tier_scroll_area = QtWidgets.QScrollArea()
|
451
483
|
self.audio_scroll_area = QtWidgets.QScrollArea()
|
@@ -467,22 +499,16 @@ class UtteranceView(QtWidgets.QWidget):
|
|
467
499
|
scroll_layout.setSpacing(0)
|
468
500
|
self.setLayout(layout)
|
469
501
|
|
470
|
-
def clean_up_for_close(self):
|
471
|
-
self.spectrogram_worker.stop()
|
472
|
-
self.pitch_track_worker.stop()
|
473
|
-
self.waveform_worker.stop()
|
474
|
-
self.auto_waveform_worker.stop()
|
475
|
-
self.speaker_tier_worker.stop()
|
476
|
-
|
477
502
|
def set_models(
|
478
503
|
self,
|
479
504
|
corpus_model: CorpusModel,
|
480
|
-
|
505
|
+
file_model: FileUtterancesModel,
|
506
|
+
selection_model: FileSelectionModel,
|
481
507
|
dictionary_model: DictionaryTableModel,
|
482
508
|
):
|
483
509
|
self.corpus_model = corpus_model
|
510
|
+
self.file_model = file_model
|
484
511
|
self.corpus_model.corpusLoaded.connect(self.set_extra_tiers)
|
485
|
-
self.corpus_model.refreshTiers.connect(self.set_up_new_file)
|
486
512
|
self.selection_model = selection_model
|
487
513
|
self.dictionary_model = dictionary_model
|
488
514
|
for t in self.speaker_tiers.values():
|
@@ -490,55 +516,61 @@ class UtteranceView(QtWidgets.QWidget):
|
|
490
516
|
self.audio_plot.set_models(self.selection_model)
|
491
517
|
self.selection_model.viewChanged.connect(self.update_plot)
|
492
518
|
# self.corpus_model.utteranceTextUpdated.connect(self.refresh_utterance_text)
|
493
|
-
self.selection_model.fileChanged.connect(self.set_up_new_file)
|
494
|
-
self.selection_model.channelChanged.connect(self.update_channel)
|
495
519
|
self.selection_model.resetView.connect(self.reset_plot)
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
520
|
+
self.file_model.utterancesReady.connect(self.finalize_loading_utterances)
|
521
|
+
self.selection_model.spectrogramReady.connect(self.finalize_loading_spectrogram)
|
522
|
+
self.selection_model.pitchTrackReady.connect(self.finalize_loading_pitch_track)
|
523
|
+
self.selection_model.waveformReady.connect(self.finalize_loading_auto_wave_form)
|
524
|
+
self.selection_model.speakerRequested.connect(self.set_default_speaker)
|
525
|
+
self.file_model.selectionRequested.connect(self.finalize_loading_utterances)
|
526
|
+
|
527
|
+
def finalize_loading_utterances(self):
|
528
|
+
if self.file_model.file is None:
|
503
529
|
return
|
530
|
+
scroll_to = None
|
531
|
+
|
504
532
|
self.speaker_tiers = {}
|
505
533
|
self.speaker_tier_items = {}
|
506
534
|
self.speaker_tier_layout.clear()
|
507
535
|
available_speakers = {}
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
self.
|
536
|
+
speaker_tier_height = self.separator_point - self.bottom_point
|
537
|
+
for i, speaker_id in enumerate(self.file_model.speakers):
|
538
|
+
speaker_name = self.corpus_model.get_speaker_name(speaker_id)
|
539
|
+
top_point = i * speaker_tier_height
|
540
|
+
bottom_point = top_point - speaker_tier_height
|
541
|
+
tier = SpeakerTier(
|
542
|
+
top_point,
|
543
|
+
bottom_point,
|
544
|
+
speaker_id,
|
545
|
+
speaker_name,
|
546
|
+
self.corpus_model,
|
547
|
+
self.file_model,
|
548
|
+
self.selection_model,
|
549
|
+
self.dictionary_model,
|
550
|
+
search_term=self.search_term,
|
551
|
+
)
|
552
|
+
tier.draggingLine.connect(self.audio_plot.update_drag_line)
|
553
|
+
tier.lineDragFinished.connect(self.audio_plot.hide_drag_line)
|
554
|
+
tier.receivedWheelEvent.connect(self.audio_plot.wheelEvent)
|
555
|
+
tier.set_extra_tiers(self.extra_tiers)
|
556
|
+
tier.setZValue(30)
|
557
|
+
available_speakers[speaker_name] = speaker_id
|
558
|
+
self.speaker_tiers[speaker_id] = tier
|
528
559
|
for i, (key, tier) in enumerate(self.speaker_tiers.items()):
|
529
|
-
tier.set_speaker_index(0, 1)
|
530
560
|
tier.set_available_speakers(available_speakers)
|
531
561
|
tier.refresh()
|
532
|
-
|
562
|
+
top_point = i * speaker_tier_height
|
563
|
+
bottom_point = top_point - speaker_tier_height
|
564
|
+
tier_item = SpeakerTierItem(top_point, bottom_point)
|
533
565
|
tier_item.setRange(
|
534
|
-
xRange=[self.selection_model.
|
566
|
+
xRange=[self.selection_model.plot_min, self.selection_model.plot_max]
|
535
567
|
)
|
536
568
|
tier_item.addItem(tier)
|
537
569
|
self.speaker_tier_items[key] = tier_item
|
538
570
|
self.speaker_tier_layout.addItem(tier_item, i, 0)
|
571
|
+
if tier.speaker_id == self.default_speaker_id:
|
572
|
+
scroll_to = i
|
539
573
|
row_height = self.audio_plot_item.height()
|
540
|
-
if len(self.speaker_tiers) > 1 and len(self.extra_tiers) < 2:
|
541
|
-
row_height = int(row_height / 2)
|
542
574
|
self.speaker_tier_layout.setFixedHeight(len(self.speaker_tiers) * row_height)
|
543
575
|
if len(self.speaker_tiers) > 1:
|
544
576
|
self.tier_scroll_area.verticalScrollBar().setSingleStep(row_height)
|
@@ -553,82 +585,73 @@ class UtteranceView(QtWidgets.QWidget):
|
|
553
585
|
self.audio_layout.centralWidget.layout.setContentsMargins(
|
554
586
|
0, 0, self.settings.scroll_bar_height, 0
|
555
587
|
)
|
588
|
+
if scroll_to is not None:
|
589
|
+
# self.tier_scroll_area.scrollContentsBy(0, scroll_to * tier_height)
|
590
|
+
self.tier_scroll_area.verticalScrollBar().setValue(
|
591
|
+
scroll_to * self.tier_scroll_area.height()
|
592
|
+
)
|
593
|
+
self.default_speaker_id = None
|
556
594
|
else:
|
557
595
|
self.audio_layout.centralWidget.layout.setContentsMargins(0, 0, 0, 0)
|
558
596
|
self.tier_scroll_area.setVerticalScrollBarPolicy(
|
559
597
|
QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff
|
560
598
|
)
|
561
599
|
|
562
|
-
def
|
563
|
-
|
564
|
-
if (
|
565
|
-
self.selection_model.current_file is None
|
566
|
-
or file_path != self.selection_model.current_file.sound_file.sound_file_path
|
567
|
-
):
|
568
|
-
return
|
569
|
-
self.audio_plot.wave_form.y = y
|
570
|
-
self.get_latest_waveform()
|
600
|
+
def set_default_speaker(self, speaker_id):
|
601
|
+
self.default_speaker_id = speaker_id
|
571
602
|
|
572
|
-
def finalize_loading_spectrogram(self
|
573
|
-
|
574
|
-
if
|
603
|
+
def finalize_loading_spectrogram(self):
|
604
|
+
self.audio_plot.spectrogram.hide()
|
605
|
+
if self.selection_model.spectrogram is None:
|
606
|
+
self.audio_plot.spectrogram.clear()
|
575
607
|
return
|
576
|
-
self.audio_plot.spectrogram.setData(
|
608
|
+
self.audio_plot.spectrogram.setData(
|
609
|
+
self.selection_model.spectrogram,
|
610
|
+
self.selection_model.selected_channel,
|
611
|
+
self.selection_model.plot_min,
|
612
|
+
self.selection_model.plot_max,
|
613
|
+
self.selection_model.min_db,
|
614
|
+
self.selection_model.max_db,
|
615
|
+
)
|
577
616
|
|
578
|
-
def finalize_loading_pitch_track(self
|
579
|
-
pitch_track
|
580
|
-
|
581
|
-
|
582
|
-
if pitch_track is None:
|
617
|
+
def finalize_loading_pitch_track(self):
|
618
|
+
self.audio_plot.pitch_track.hide()
|
619
|
+
self.audio_plot.pitch_track.clear()
|
620
|
+
if self.selection_model.pitch_track_y is None:
|
583
621
|
return
|
584
|
-
|
585
|
-
|
586
|
-
|
587
|
-
|
622
|
+
self.audio_plot.pitch_track.setData(
|
623
|
+
x=self.selection_model.pitch_track_x,
|
624
|
+
y=self.selection_model.pitch_track_y,
|
625
|
+
connect="finite",
|
626
|
+
)
|
627
|
+
self.audio_plot.pitch_track.set_range(
|
628
|
+
self.settings.value(self.settings.PITCH_MIN_F0),
|
629
|
+
self.settings.value(self.settings.PITCH_MAX_F0),
|
630
|
+
self.selection_model.plot_max,
|
588
631
|
)
|
589
|
-
self.audio_plot.pitch_track.hide()
|
590
|
-
self.audio_plot.pitch_track.setData(x=x, y=pitch_track, connect="finite")
|
591
|
-
self.audio_plot.pitch_track.set_range(min_f0, max_f0, end)
|
592
632
|
self.audio_plot.pitch_track.show()
|
593
633
|
|
594
|
-
def finalize_loading_auto_wave_form(self
|
595
|
-
|
596
|
-
if
|
634
|
+
def finalize_loading_auto_wave_form(self):
|
635
|
+
self.audio_plot.wave_form.hide()
|
636
|
+
if self.selection_model.waveform_y is None:
|
597
637
|
return
|
598
|
-
|
599
|
-
|
638
|
+
self.audio_plot_item.setRange(
|
639
|
+
xRange=[self.selection_model.plot_min, self.selection_model.plot_max]
|
640
|
+
)
|
641
|
+
self.audio_plot.update_plot()
|
642
|
+
self.audio_plot.wave_form.setData(
|
643
|
+
x=self.selection_model.waveform_x, y=self.selection_model.waveform_y
|
600
644
|
)
|
601
|
-
# self.audio_plot.wave_form.hide()
|
602
|
-
self.audio_plot.wave_form.setData(x=x, y=y)
|
603
645
|
self.audio_plot.wave_form.show()
|
604
646
|
|
605
|
-
def get_utterances(self):
|
606
|
-
for tier in self.speaker_tiers.values():
|
607
|
-
tier.reset_tier()
|
608
|
-
self.speaker_tier_layout.removeItem(tier)
|
609
|
-
if self.selection_model.current_file is None:
|
610
|
-
return
|
611
|
-
self.speaker_tier_worker.stop()
|
612
|
-
self.speaker_tier_worker.set_params(self.selection_model.current_file.id)
|
613
|
-
self.speaker_tier_worker.start()
|
614
|
-
|
615
647
|
def set_extra_tiers(self):
|
616
|
-
self.speaker_tier_worker.query_alignment = False
|
617
|
-
self.speaker_tier_worker.session = self.corpus_model.session
|
618
648
|
self.extra_tiers = {}
|
619
|
-
visible_tiers = self.settings.visible_tiers
|
620
649
|
self.extra_tiers["Normalized text"] = "normalized_text"
|
621
650
|
if self.corpus_model.has_alignments and "Words" not in self.extra_tiers:
|
622
651
|
self.extra_tiers["Words"] = "aligned_word_intervals"
|
623
|
-
if visible_tiers.get("Words", True):
|
624
|
-
self.speaker_tier_worker.query_alignment = True
|
625
652
|
self.extra_tiers["Phones"] = "aligned_phone_intervals"
|
626
|
-
if visible_tiers.get("Phones", True):
|
627
|
-
self.speaker_tier_worker.query_alignment = True
|
628
653
|
if self.corpus_model.has_reference_alignments and "Reference" not in self.extra_tiers:
|
629
654
|
self.extra_tiers["Reference"] = "reference_phone_intervals"
|
630
|
-
if visible_tiers.get("Reference", True):
|
631
|
-
self.speaker_tier_worker.query_alignment = True
|
632
655
|
if (
|
633
656
|
self.corpus_model.has_transcribed_alignments
|
634
657
|
and "Transcription" not in self.extra_tiers
|
@@ -636,10 +659,6 @@ class UtteranceView(QtWidgets.QWidget):
|
|
636
659
|
self.extra_tiers["Transcription"] = "transcription_text"
|
637
660
|
self.extra_tiers["Transcribed words"] = "transcribed_word_intervals"
|
638
661
|
self.extra_tiers["Transcribed phones"] = "transcribed_phone_intervals"
|
639
|
-
if visible_tiers.get("Transcribed words", True):
|
640
|
-
self.speaker_tier_worker.query_alignment = True
|
641
|
-
if visible_tiers.get("Transcribed phones", True):
|
642
|
-
self.speaker_tier_worker.query_alignment = True
|
643
662
|
if (
|
644
663
|
self.corpus_model.has_per_speaker_transcribed_alignments
|
645
664
|
and "Transcription" not in self.extra_tiers
|
@@ -647,29 +666,6 @@ class UtteranceView(QtWidgets.QWidget):
|
|
647
666
|
self.extra_tiers["Transcription"] = "transcription_text"
|
648
667
|
self.extra_tiers["Transcribed words"] = "per_speaker_transcribed_word_intervals"
|
649
668
|
self.extra_tiers["Transcribed phones"] = "per_speaker_transcribed_phone_intervals"
|
650
|
-
if visible_tiers.get("Transcribed words", True):
|
651
|
-
self.speaker_tier_worker.query_alignment = True
|
652
|
-
if visible_tiers.get("Transcribed phones", True):
|
653
|
-
self.speaker_tier_worker.query_alignment = True
|
654
|
-
|
655
|
-
def update_channel(self):
|
656
|
-
self.get_latest_waveform()
|
657
|
-
|
658
|
-
def set_up_new_file(self, *args):
|
659
|
-
self.audio_plot.spectrogram.cached_begin = None
|
660
|
-
self.audio_plot.spectrogram.cached_end = None
|
661
|
-
self.audio_plot.wave_form.y = None
|
662
|
-
for t in self.speaker_tiers.values():
|
663
|
-
t.visible_utterances = {}
|
664
|
-
self.speaker_tiers = {}
|
665
|
-
if self.selection_model.current_file is None:
|
666
|
-
return
|
667
|
-
self.get_utterances()
|
668
|
-
self.waveform_worker.stop()
|
669
|
-
self.waveform_worker.set_params(
|
670
|
-
self.selection_model.current_file.sound_file.sound_file_path
|
671
|
-
)
|
672
|
-
self.waveform_worker.start()
|
673
669
|
|
674
670
|
def set_search_term(self):
|
675
671
|
term = self.corpus_model.text_filter
|
@@ -686,74 +682,23 @@ class UtteranceView(QtWidgets.QWidget):
|
|
686
682
|
def draw_text_grid(self):
|
687
683
|
scroll_to = None
|
688
684
|
for i, (key, tier) in enumerate(self.speaker_tiers.items()):
|
685
|
+
self.speaker_tier_items[key].hide()
|
689
686
|
tier.refresh()
|
690
|
-
if tier.
|
687
|
+
if tier.speaker_id == self.default_speaker_id:
|
691
688
|
scroll_to = i
|
692
689
|
tier_height = self.speaker_tier_items[key].height()
|
693
690
|
self.speaker_tier_items[key].setRange(
|
694
|
-
xRange=[self.selection_model.
|
691
|
+
xRange=[self.selection_model.plot_min, self.selection_model.plot_max]
|
695
692
|
)
|
693
|
+
self.speaker_tier_items[key].show()
|
696
694
|
if scroll_to is not None:
|
697
|
-
self.tier_scroll_area.
|
695
|
+
self.tier_scroll_area.verticalScrollBar().setValue(scroll_to * tier_height)
|
696
|
+
self.default_speaker_id = None
|
698
697
|
|
699
698
|
def update_show_speakers(self, state):
|
700
699
|
self.show_all_speakers = state > 0
|
701
700
|
self.update_plot()
|
702
701
|
|
703
|
-
def get_latest_waveform(self):
|
704
|
-
if self.audio_plot.wave_form.y is None:
|
705
|
-
return
|
706
|
-
self.audio_plot.wave_form.hide()
|
707
|
-
# self.audio_plot.spectrogram.hide()
|
708
|
-
self.audio_plot.pitch_track.hide()
|
709
|
-
begin_samp = int(
|
710
|
-
self.selection_model.min_time * self.selection_model.current_file.sample_rate
|
711
|
-
)
|
712
|
-
end_samp = int(
|
713
|
-
self.selection_model.max_time * self.selection_model.current_file.sample_rate
|
714
|
-
)
|
715
|
-
if len(self.audio_plot.wave_form.y.shape) > 1:
|
716
|
-
y = self.audio_plot.wave_form.y[
|
717
|
-
begin_samp:end_samp, self.selection_model.selected_channel
|
718
|
-
]
|
719
|
-
else:
|
720
|
-
y = self.audio_plot.wave_form.y[begin_samp:end_samp]
|
721
|
-
self.spectrogram_worker.stop()
|
722
|
-
self.spectrogram_worker.set_params(
|
723
|
-
y,
|
724
|
-
self.selection_model.current_file.sound_file.sample_rate,
|
725
|
-
self.selection_model.min_time,
|
726
|
-
self.selection_model.max_time,
|
727
|
-
self.selection_model.selected_channel,
|
728
|
-
)
|
729
|
-
self.spectrogram_worker.start()
|
730
|
-
if self.selection_model.max_time - self.selection_model.min_time <= 10:
|
731
|
-
self.pitch_track_worker.stop()
|
732
|
-
self.pitch_track_worker.set_params(
|
733
|
-
y,
|
734
|
-
self.selection_model.current_file.sound_file.sample_rate,
|
735
|
-
self.selection_model.min_time,
|
736
|
-
self.selection_model.max_time,
|
737
|
-
self.selection_model.selected_channel,
|
738
|
-
self.audio_plot.pitch_track.bottom_point,
|
739
|
-
self.audio_plot.pitch_track.top_point,
|
740
|
-
)
|
741
|
-
self.pitch_track_worker.start()
|
742
|
-
self.auto_waveform_worker.stop()
|
743
|
-
self.auto_waveform_worker.set_params(
|
744
|
-
y,
|
745
|
-
self.audio_plot.wave_form.bottom_point,
|
746
|
-
self.audio_plot.wave_form.top_point,
|
747
|
-
self.selection_model.min_time,
|
748
|
-
self.selection_model.max_time,
|
749
|
-
self.selection_model.selected_channel,
|
750
|
-
)
|
751
|
-
self.auto_waveform_worker.start()
|
752
|
-
self.audio_plot_item.setRange(
|
753
|
-
xRange=[self.selection_model.min_time, self.selection_model.max_time]
|
754
|
-
)
|
755
|
-
self.audio_plot.update_plot()
|
756
|
-
|
757
702
|
def reset_plot(self, *args):
|
758
703
|
self.reset_text_grid()
|
759
704
|
self.audio_plot.wave_form.clear()
|
@@ -763,9 +708,8 @@ class UtteranceView(QtWidgets.QWidget):
|
|
763
708
|
def update_plot(self, *args):
|
764
709
|
if self.corpus_model.rowCount() == 0:
|
765
710
|
return
|
766
|
-
if self.
|
711
|
+
if self.file_model.file is None or self.selection_model.min_time is None:
|
767
712
|
return
|
768
|
-
self.get_latest_waveform()
|
769
713
|
self.audio_plot.update_plot()
|
770
714
|
self.draw_text_grid()
|
771
715
|
|
@@ -780,7 +724,7 @@ class UtteranceView(QtWidgets.QWidget):
|
|
780
724
|
if tier.top_point > pos > tier.bottom_point:
|
781
725
|
new_speaker_id = tier.speaker_id
|
782
726
|
if new_speaker_id is not None and new_speaker_id != old_speaker_id:
|
783
|
-
self.
|
727
|
+
self.file_model.update_utterance_speaker(utterance, new_speaker_id)
|
784
728
|
|
785
729
|
|
786
730
|
class UtteranceLine(pg.InfiniteLine):
|
@@ -989,6 +933,8 @@ class UtterancePGTextItem(pg.TextItem):
|
|
989
933
|
):
|
990
934
|
self.anchor = pg.Point(anchor)
|
991
935
|
self.rotateAxis = None
|
936
|
+
if selection_model.settings.right_to_left:
|
937
|
+
begin, end = -end, -begin
|
992
938
|
self.begin = begin
|
993
939
|
self.end = end
|
994
940
|
self.selection_model = selection_model
|
@@ -1016,25 +962,28 @@ class UtterancePGTextItem(pg.TextItem):
|
|
1016
962
|
self.top_point = top_point
|
1017
963
|
self.bottom_point = bottom_point
|
1018
964
|
self.per_tier_range = per_tier_range
|
1019
|
-
self.view_min = self.selection_model.
|
1020
|
-
self.view_max = self.selection_model.
|
965
|
+
self.view_min = self.selection_model.plot_min
|
966
|
+
self.view_max = self.selection_model.plot_max
|
1021
967
|
self.selection_model.viewChanged.connect(self.update_times)
|
1022
968
|
|
1023
969
|
def update_times(self, begin, end):
|
1024
|
-
self.hide()
|
1025
970
|
self.view_min = begin
|
1026
971
|
self.view_max = end
|
1027
|
-
|
972
|
+
if self.end <= self.view_min or self.begin >= self.view_max:
|
973
|
+
return
|
974
|
+
self.hide()
|
1028
975
|
if (
|
1029
976
|
self.view_min <= self.begin < self.view_max
|
1030
977
|
or self.view_max >= self.end > self.view_min
|
1031
978
|
or (self.begin <= self.view_min and self.end >= self.view_max)
|
1032
|
-
)
|
979
|
+
):
|
1033
980
|
self.show()
|
1034
981
|
|
1035
982
|
def boundingRect(self):
|
1036
983
|
br = QtCore.QRectF(self.viewRect()) # bounds of containing ViewBox mapped to local coords.
|
1037
984
|
vb = self.getViewBox()
|
985
|
+
if self.begin is None or self.view_min is None:
|
986
|
+
return br
|
1038
987
|
visible_begin = max(self.begin, self.view_min)
|
1039
988
|
visible_end = min(self.end, self.view_max)
|
1040
989
|
|
@@ -1512,24 +1461,24 @@ class Highlighter(QtGui.QSyntaxHighlighter):
|
|
1512
1461
|
|
1513
1462
|
|
1514
1463
|
class MfaRegion(pg.LinearRegionItem):
|
1515
|
-
dragFinished = QtCore.Signal(object)
|
1516
1464
|
textEdited = QtCore.Signal(object, object)
|
1517
1465
|
undoRequested = QtCore.Signal()
|
1518
1466
|
redoRequested = QtCore.Signal()
|
1519
1467
|
playRequested = QtCore.Signal()
|
1520
|
-
selectRequested = QtCore.Signal(object, object, object
|
1468
|
+
selectRequested = QtCore.Signal(object, object, object)
|
1521
1469
|
audioSelected = QtCore.Signal(object, object)
|
1522
1470
|
viewRequested = QtCore.Signal(object, object)
|
1523
1471
|
|
1524
1472
|
settings = AnchorSettings()
|
1525
1473
|
|
1474
|
+
@profile
|
1526
1475
|
def __init__(
|
1527
1476
|
self,
|
1528
1477
|
item: CtmInterval,
|
1529
1478
|
corpus_model: CorpusModel,
|
1479
|
+
file_model: FileUtterancesModel,
|
1530
1480
|
dictionary_model: typing.Optional[DictionaryTableModel],
|
1531
|
-
selection_model:
|
1532
|
-
selected: bool = False,
|
1481
|
+
selection_model: FileSelectionModel,
|
1533
1482
|
bottom_point: float = 0,
|
1534
1483
|
top_point: float = 1,
|
1535
1484
|
):
|
@@ -1538,12 +1487,14 @@ class MfaRegion(pg.LinearRegionItem):
|
|
1538
1487
|
|
1539
1488
|
self.item_min = self.item.begin
|
1540
1489
|
self.item_max = self.item.end
|
1490
|
+
if selection_model.settings.right_to_left:
|
1491
|
+
self.item_min, self.item_max = -self.item_max, -self.item_min
|
1541
1492
|
self.corpus_model = corpus_model
|
1493
|
+
self.file_model = file_model
|
1542
1494
|
self.dictionary_model = dictionary_model
|
1543
1495
|
self.selection_model = selection_model
|
1544
1496
|
self.bottom_point = bottom_point
|
1545
1497
|
self.top_point = top_point
|
1546
|
-
self.selected = selected
|
1547
1498
|
self.span = (self.bottom_point, self.top_point)
|
1548
1499
|
self.text_margin_pixels = 2
|
1549
1500
|
|
@@ -1562,7 +1513,7 @@ class MfaRegion(pg.LinearRegionItem):
|
|
1562
1513
|
self.border_pen = pg.mkPen(self.break_line_color, width=2)
|
1563
1514
|
self.border_pen.setCapStyle(QtCore.Qt.PenCapStyle.FlatCap)
|
1564
1515
|
|
1565
|
-
if self.
|
1516
|
+
if self.selection_model.checkSelected(getattr(self.item, "id", None)):
|
1566
1517
|
self.background_brush = pg.mkBrush(self.selected_interval_color)
|
1567
1518
|
else:
|
1568
1519
|
# self.interval_background_color.setAlpha(0)
|
@@ -1582,44 +1533,6 @@ class MfaRegion(pg.LinearRegionItem):
|
|
1582
1533
|
self._boundingRectCache = None
|
1583
1534
|
self.setBrush(self.background_brush)
|
1584
1535
|
self.movable = False
|
1585
|
-
|
1586
|
-
# note LinearRegionItem.Horizontal and LinearRegionItem.Vertical
|
1587
|
-
# are kept for backward compatibility.
|
1588
|
-
lineKwds = dict(
|
1589
|
-
movable=False,
|
1590
|
-
bounds=None,
|
1591
|
-
span=self.span,
|
1592
|
-
pen=self.pen,
|
1593
|
-
hoverPen=self.hoverPen,
|
1594
|
-
movingPen=self.movingPen,
|
1595
|
-
)
|
1596
|
-
self.lines = [
|
1597
|
-
UtteranceLine(
|
1598
|
-
QtCore.QPointF(self.item_min, 0),
|
1599
|
-
angle=90,
|
1600
|
-
initial=True,
|
1601
|
-
view_min=self.selection_model.min_time,
|
1602
|
-
view_max=self.selection_model.max_time,
|
1603
|
-
**lineKwds,
|
1604
|
-
),
|
1605
|
-
UtteranceLine(
|
1606
|
-
QtCore.QPointF(self.item_max, 0),
|
1607
|
-
angle=90,
|
1608
|
-
initial=False,
|
1609
|
-
view_min=self.selection_model.min_time,
|
1610
|
-
view_max=self.selection_model.max_time,
|
1611
|
-
**lineKwds,
|
1612
|
-
),
|
1613
|
-
]
|
1614
|
-
|
1615
|
-
for line in self.lines:
|
1616
|
-
line.setZValue(30)
|
1617
|
-
line.setParentItem(self)
|
1618
|
-
line.sigPositionChangeFinished.connect(self.lineMoveFinished)
|
1619
|
-
self.lines[0].sigPositionChanged.connect(self._line0Moved)
|
1620
|
-
self.lines[1].sigPositionChanged.connect(self._line1Moved)
|
1621
|
-
self.lines[0].hoverChanged.connect(self.popup)
|
1622
|
-
self.lines[1].hoverChanged.connect(self.popup)
|
1623
1536
|
self.cached_visible_duration = None
|
1624
1537
|
self.cached_view = None
|
1625
1538
|
|
@@ -1628,33 +1541,6 @@ class MfaRegion(pg.LinearRegionItem):
|
|
1628
1541
|
p.setPen(self.border_pen)
|
1629
1542
|
p.drawRect(self.boundingRect())
|
1630
1543
|
|
1631
|
-
def mouseDragEvent(self, ev):
|
1632
|
-
if not self.movable or ev.button() != QtCore.Qt.MouseButton.LeftButton:
|
1633
|
-
return
|
1634
|
-
ev.accept()
|
1635
|
-
|
1636
|
-
if ev.isStart():
|
1637
|
-
bdp = ev.buttonDownPos()
|
1638
|
-
self.cursorOffsets = [line.pos() - bdp for line in self.lines]
|
1639
|
-
self.startPositions = [line.pos() for line in self.lines]
|
1640
|
-
self.moving = True
|
1641
|
-
|
1642
|
-
if not self.moving:
|
1643
|
-
return
|
1644
|
-
|
1645
|
-
# self.lines[0].blockSignals(True) # only want to update once
|
1646
|
-
# for i, l in enumerate(self.lines):
|
1647
|
-
# l.setPos(self.cursorOffsets[i] + ev.pos())
|
1648
|
-
# self.lines[0].blockSignals(False)
|
1649
|
-
self.prepareGeometryChange()
|
1650
|
-
|
1651
|
-
if ev.isFinish():
|
1652
|
-
self.moving = False
|
1653
|
-
self.dragFinished.emit(ev.pos())
|
1654
|
-
self.sigRegionChangeFinished.emit(self)
|
1655
|
-
else:
|
1656
|
-
self.sigRegionChanged.emit(self)
|
1657
|
-
|
1658
1544
|
def mouseClickEvent(self, ev: QtGui.QMouseEvent):
|
1659
1545
|
if ev.button() != QtCore.Qt.MouseButton.LeftButton:
|
1660
1546
|
ev.ignore()
|
@@ -1671,26 +1557,14 @@ class MfaRegion(pg.LinearRegionItem):
|
|
1671
1557
|
self.viewRequested.emit(self.item_min - padding, self.item_max + padding)
|
1672
1558
|
ev.accept()
|
1673
1559
|
|
1674
|
-
def change_editing(self, editable: bool):
|
1675
|
-
self.movable = editable
|
1676
|
-
self.lines[0].movable = editable
|
1677
|
-
self.lines[1].movable = editable
|
1678
|
-
|
1679
1560
|
def setSelected(self, selected: bool):
|
1680
|
-
|
1681
|
-
if self.selected:
|
1561
|
+
if selected:
|
1682
1562
|
self.setBrush(pg.mkBrush(self.selected_interval_color))
|
1683
1563
|
else:
|
1684
1564
|
# self.interval_background_color.setAlpha(0)
|
1685
1565
|
self.setBrush(pg.mkBrush(self.interval_background_color))
|
1686
1566
|
self.update()
|
1687
1567
|
|
1688
|
-
def popup(self, hover: bool):
|
1689
|
-
if hover or self.moving or self.lines[0].moving or self.lines[1].moving:
|
1690
|
-
self.setZValue(30)
|
1691
|
-
else:
|
1692
|
-
self.setZValue(0)
|
1693
|
-
|
1694
1568
|
def setMouseHover(self, hover: bool):
|
1695
1569
|
# Inform the item that the mouse is(not) hovering over it
|
1696
1570
|
if self.mouseHovering == hover:
|
@@ -1699,30 +1573,38 @@ class MfaRegion(pg.LinearRegionItem):
|
|
1699
1573
|
self.popup(hover)
|
1700
1574
|
self.update()
|
1701
1575
|
|
1702
|
-
def select_self(self, deselect=False, reset=True
|
1576
|
+
def select_self(self, deselect=False, reset=True):
|
1703
1577
|
self.selected = True
|
1704
1578
|
if self.selected and not deselect and not reset:
|
1705
1579
|
return
|
1706
1580
|
|
1707
1581
|
|
1708
1582
|
class AlignmentRegion(MfaRegion):
|
1583
|
+
@profile
|
1709
1584
|
def __init__(
|
1710
1585
|
self,
|
1711
1586
|
phone_interval: CtmInterval,
|
1712
1587
|
corpus_model: CorpusModel,
|
1588
|
+
file_model: FileUtterancesModel,
|
1713
1589
|
selection_model: CorpusSelectionModel,
|
1714
|
-
selected: bool = False,
|
1715
1590
|
bottom_point: float = 0,
|
1716
1591
|
top_point: float = 1,
|
1717
1592
|
):
|
1718
1593
|
super().__init__(
|
1719
|
-
phone_interval,
|
1594
|
+
phone_interval,
|
1595
|
+
corpus_model,
|
1596
|
+
file_model,
|
1597
|
+
None,
|
1598
|
+
selection_model,
|
1599
|
+
bottom_point,
|
1600
|
+
top_point,
|
1720
1601
|
)
|
1721
1602
|
self.original_text = self.item.label
|
1722
1603
|
|
1723
1604
|
self.text = pg.TextItem(
|
1724
1605
|
self.item.label, anchor=(0.5, 0.5), color=self.text_color # , border=pg.mkColor("r")
|
1725
1606
|
)
|
1607
|
+
self.text.setVisible(False)
|
1726
1608
|
|
1727
1609
|
self.text.setFont(self.settings.font)
|
1728
1610
|
options = QtGui.QTextOption()
|
@@ -1731,22 +1613,30 @@ class AlignmentRegion(MfaRegion):
|
|
1731
1613
|
self.text.setParentItem(self)
|
1732
1614
|
self.per_tier_range = self.top_point - self.bottom_point
|
1733
1615
|
|
1616
|
+
def viewRangeChanged(self):
|
1617
|
+
if (self.item_max - self.item_min) / (
|
1618
|
+
self.selection_model.max_time - self.selection_model.min_time
|
1619
|
+
) < 0.01:
|
1620
|
+
self.hide()
|
1621
|
+
else:
|
1622
|
+
self.show()
|
1623
|
+
super().viewRangeChanged()
|
1624
|
+
|
1734
1625
|
def boundingRect(self):
|
1735
1626
|
br = QtCore.QRectF(self.viewRect()) # bounds of containing ViewBox mapped to local coords.
|
1736
1627
|
vb = self.getViewBox()
|
1737
1628
|
|
1738
1629
|
pixel_size = vb.viewPixelSize()
|
1739
|
-
rng = self.getRegion()
|
1740
1630
|
|
1741
|
-
br.setLeft(
|
1742
|
-
br.setRight(
|
1631
|
+
br.setLeft(self.item_min)
|
1632
|
+
br.setRight(self.item_max)
|
1743
1633
|
|
1744
1634
|
br.setTop(self.top_point)
|
1745
1635
|
# br.setBottom(self.top_point-self.per_tier_range)
|
1746
1636
|
br.setBottom(self.bottom_point + 0.01)
|
1747
1637
|
try:
|
1748
|
-
visible_begin = max(
|
1749
|
-
visible_end = min(
|
1638
|
+
visible_begin = max(self.item_min, self.selection_model.plot_min)
|
1639
|
+
visible_end = min(self.item_max, self.selection_model.plot_max)
|
1750
1640
|
except TypeError:
|
1751
1641
|
return br
|
1752
1642
|
visible_duration = visible_end - visible_begin
|
@@ -1772,39 +1662,47 @@ class PhoneRegion(AlignmentRegion):
|
|
1772
1662
|
self,
|
1773
1663
|
phone_interval: CtmInterval,
|
1774
1664
|
corpus_model: CorpusModel,
|
1665
|
+
file_model: FileUtterancesModel,
|
1775
1666
|
selection_model: CorpusSelectionModel,
|
1776
|
-
selected: bool = False,
|
1777
1667
|
bottom_point: float = 0,
|
1778
1668
|
top_point: float = 1,
|
1779
1669
|
):
|
1780
1670
|
super().__init__(
|
1781
|
-
phone_interval, corpus_model,
|
1671
|
+
phone_interval, corpus_model, file_model, selection_model, bottom_point, top_point
|
1782
1672
|
)
|
1783
1673
|
|
1784
1674
|
|
1785
1675
|
class WordRegion(AlignmentRegion):
|
1676
|
+
highlightRequested = QtCore.Signal(object)
|
1677
|
+
|
1786
1678
|
def __init__(
|
1787
1679
|
self,
|
1788
|
-
|
1680
|
+
word_interval: CtmInterval,
|
1789
1681
|
corpus_model: CorpusModel,
|
1682
|
+
file_model: FileUtterancesModel,
|
1790
1683
|
selection_model: CorpusSelectionModel,
|
1791
|
-
selected: bool = False,
|
1792
1684
|
bottom_point: float = 0,
|
1793
1685
|
top_point: float = 1,
|
1794
1686
|
):
|
1795
1687
|
super().__init__(
|
1796
|
-
|
1688
|
+
word_interval, corpus_model, file_model, selection_model, bottom_point, top_point
|
1797
1689
|
)
|
1798
1690
|
|
1691
|
+
def mouseClickEvent(self, ev: QtGui.QMouseEvent):
|
1692
|
+
search_term = TextFilterQuery(self.item.label, word=True)
|
1693
|
+
self.highlightRequested.emit(search_term)
|
1694
|
+
super().mouseClickEvent(ev)
|
1695
|
+
|
1799
1696
|
|
1800
1697
|
class UtteranceRegion(MfaRegion):
|
1698
|
+
@profile
|
1801
1699
|
def __init__(
|
1802
1700
|
self,
|
1803
1701
|
utterance: workers.UtteranceData,
|
1804
1702
|
corpus_model: CorpusModel,
|
1703
|
+
file_model: FileUtterancesModel,
|
1805
1704
|
dictionary_model: DictionaryTableModel,
|
1806
|
-
selection_model:
|
1807
|
-
selected: bool = False,
|
1705
|
+
selection_model: FileSelectionModel,
|
1808
1706
|
bottom_point: float = 0,
|
1809
1707
|
top_point: float = 1,
|
1810
1708
|
extra_tiers=None,
|
@@ -1814,9 +1712,9 @@ class UtteranceRegion(MfaRegion):
|
|
1814
1712
|
super().__init__(
|
1815
1713
|
utterance,
|
1816
1714
|
corpus_model,
|
1715
|
+
file_model,
|
1817
1716
|
dictionary_model,
|
1818
1717
|
selection_model,
|
1819
|
-
selected,
|
1820
1718
|
bottom_point,
|
1821
1719
|
top_point,
|
1822
1720
|
)
|
@@ -1830,8 +1728,45 @@ class UtteranceRegion(MfaRegion):
|
|
1830
1728
|
visible_tiers = self.settings.visible_tiers
|
1831
1729
|
self.num_tiers = len([x for x in extra_tiers if visible_tiers[x]]) + 1
|
1832
1730
|
self.per_tier_range = (top_point - bottom_point) / self.num_tiers
|
1731
|
+
self.selected = self.selection_model.checkSelected(self.item.id)
|
1833
1732
|
|
1834
|
-
|
1733
|
+
# note LinearRegionItem.Horizontal and LinearRegionItem.Vertical
|
1734
|
+
# are kept for backward compatibility.
|
1735
|
+
lineKwds = dict(
|
1736
|
+
movable=True,
|
1737
|
+
bounds=None,
|
1738
|
+
span=self.span,
|
1739
|
+
pen=self.pen,
|
1740
|
+
hoverPen=self.hoverPen,
|
1741
|
+
movingPen=self.movingPen,
|
1742
|
+
)
|
1743
|
+
self.lines = [
|
1744
|
+
UtteranceLine(
|
1745
|
+
QtCore.QPointF(self.item_min, 0),
|
1746
|
+
angle=90,
|
1747
|
+
initial=True,
|
1748
|
+
view_min=self.selection_model.plot_min,
|
1749
|
+
view_max=self.selection_model.plot_max,
|
1750
|
+
**lineKwds,
|
1751
|
+
),
|
1752
|
+
UtteranceLine(
|
1753
|
+
QtCore.QPointF(self.item_max, 0),
|
1754
|
+
angle=90,
|
1755
|
+
initial=False,
|
1756
|
+
view_min=self.selection_model.plot_min,
|
1757
|
+
view_max=self.selection_model.plot_max,
|
1758
|
+
**lineKwds,
|
1759
|
+
),
|
1760
|
+
]
|
1761
|
+
|
1762
|
+
for line in self.lines:
|
1763
|
+
line.setZValue(30)
|
1764
|
+
line.setParentItem(self)
|
1765
|
+
line.sigPositionChangeFinished.connect(self.lineMoveFinished)
|
1766
|
+
self.lines[0].sigPositionChanged.connect(self._line0Moved)
|
1767
|
+
self.lines[1].sigPositionChanged.connect(self._line1Moved)
|
1768
|
+
self.lines[0].hoverChanged.connect(self.popup)
|
1769
|
+
self.lines[1].hoverChanged.connect(self.popup)
|
1835
1770
|
|
1836
1771
|
self.corpus_model.utteranceTextUpdated.connect(self.update_text_from_model)
|
1837
1772
|
self.original_text = self.item.text
|
@@ -1923,6 +1858,8 @@ class UtteranceRegion(MfaRegion):
|
|
1923
1858
|
if intervals is None:
|
1924
1859
|
continue
|
1925
1860
|
for interval in intervals:
|
1861
|
+
# if (interval.end - interval.begin) /(self.selection_model.max_time -self.selection_model.min_time) < 0.01:
|
1862
|
+
# continue
|
1926
1863
|
if lookup == "transcription_text":
|
1927
1864
|
interval_reg = TranscriberTextRegion(
|
1928
1865
|
self,
|
@@ -1942,8 +1879,8 @@ class UtteranceRegion(MfaRegion):
|
|
1942
1879
|
interval_reg = PhoneRegion(
|
1943
1880
|
interval,
|
1944
1881
|
self.corpus_model,
|
1882
|
+
self.file_model,
|
1945
1883
|
selection_model=selection_model,
|
1946
|
-
selected=False,
|
1947
1884
|
top_point=tier_top_point,
|
1948
1885
|
bottom_point=tier_bottom_point,
|
1949
1886
|
)
|
@@ -1952,12 +1889,13 @@ class UtteranceRegion(MfaRegion):
|
|
1952
1889
|
interval_reg = WordRegion(
|
1953
1890
|
interval,
|
1954
1891
|
self.corpus_model,
|
1892
|
+
self.file_model,
|
1955
1893
|
selection_model=selection_model,
|
1956
|
-
selected=False,
|
1957
1894
|
top_point=tier_top_point,
|
1958
1895
|
bottom_point=tier_bottom_point,
|
1959
1896
|
)
|
1960
1897
|
interval_reg.setParentItem(self)
|
1898
|
+
interval_reg.highlightRequested.connect(self.highlighter.setSearchTerm)
|
1961
1899
|
|
1962
1900
|
else:
|
1963
1901
|
interval_reg = IntervalTextRegion(
|
@@ -1979,6 +1917,25 @@ class UtteranceRegion(MfaRegion):
|
|
1979
1917
|
self.show()
|
1980
1918
|
self.available_speakers = available_speakers
|
1981
1919
|
|
1920
|
+
def change_editing(self, editable: bool):
|
1921
|
+
self.lines[0].movable = editable
|
1922
|
+
self.lines[1].movable = editable
|
1923
|
+
self.text_edit.setReadOnly(not editable)
|
1924
|
+
|
1925
|
+
def popup(self, hover: bool):
|
1926
|
+
if hover or self.moving or self.lines[0].moving or self.lines[1].moving:
|
1927
|
+
self.setZValue(30)
|
1928
|
+
else:
|
1929
|
+
self.setZValue(0)
|
1930
|
+
|
1931
|
+
def setMovable(self, m=True):
|
1932
|
+
"""Set lines to be movable by the user, or not. If lines are movable, they will
|
1933
|
+
also accept HoverEvents."""
|
1934
|
+
for line in self.lines:
|
1935
|
+
line.setMovable(m)
|
1936
|
+
self.movable = False
|
1937
|
+
self.setAcceptHoverEvents(False)
|
1938
|
+
|
1982
1939
|
def contextMenuEvent(self, ev: QtWidgets.QGraphicsSceneContextMenuEvent):
|
1983
1940
|
menu = QtWidgets.QMenu()
|
1984
1941
|
change_speaker_menu = QtWidgets.QMenu("Change speaker")
|
@@ -2010,6 +1967,17 @@ class UtteranceRegion(MfaRegion):
|
|
2010
1967
|
a.toggled.connect(self.update_tier_visibility)
|
2011
1968
|
visible_tiers_menu.addAction(a)
|
2012
1969
|
menu.addMenu(visible_tiers_menu)
|
1970
|
+
menu.addSeparator()
|
1971
|
+
|
1972
|
+
a = QtGui.QAction(menu)
|
1973
|
+
a.setText("Split utterance")
|
1974
|
+
a.triggered.connect(self.split_utterance)
|
1975
|
+
menu.addAction(a)
|
1976
|
+
|
1977
|
+
a = QtGui.QAction(menu)
|
1978
|
+
a.setText("Delete utterance")
|
1979
|
+
a.triggered.connect(self.delete_utterance)
|
1980
|
+
menu.addAction(a)
|
2013
1981
|
change_speaker_menu.setStyleSheet(self.settings.menu_style_sheet)
|
2014
1982
|
visible_tiers_menu.setStyleSheet(self.settings.menu_style_sheet)
|
2015
1983
|
menu.setStyleSheet(self.settings.menu_style_sheet)
|
@@ -2028,7 +1996,7 @@ class UtteranceRegion(MfaRegion):
|
|
2028
1996
|
if dialog.exec_():
|
2029
1997
|
speaker_id = dialog.speaker_dropdown.current_text()
|
2030
1998
|
if isinstance(speaker_id, int):
|
2031
|
-
self.
|
1999
|
+
self.file_model.update_utterance_speaker(self.item, speaker_id)
|
2032
2000
|
|
2033
2001
|
def update_speaker(self):
|
2034
2002
|
speaker_name = self.sender().text()
|
@@ -2036,21 +2004,21 @@ class UtteranceRegion(MfaRegion):
|
|
2036
2004
|
speaker_id = 0
|
2037
2005
|
else:
|
2038
2006
|
speaker_id = self.available_speakers[speaker_name]
|
2039
|
-
self.
|
2007
|
+
self.file_model.update_utterance_speaker(self.item, speaker_id)
|
2008
|
+
|
2009
|
+
def split_utterance(self):
|
2010
|
+
self.file_model.split_utterances([self.item])
|
2011
|
+
|
2012
|
+
def delete_utterance(self):
|
2013
|
+
self.file_model.delete_utterances([self.item])
|
2040
2014
|
|
2041
2015
|
def refresh_timer(self):
|
2042
2016
|
self.timer.start(500)
|
2043
2017
|
self.update()
|
2044
2018
|
|
2045
|
-
def
|
2046
|
-
|
2047
|
-
self.
|
2048
|
-
|
2049
|
-
def select_self(self, deselect=False, reset=True, focus=False):
|
2050
|
-
self.selected = True
|
2051
|
-
if self.selected and not deselect and not reset:
|
2052
|
-
return
|
2053
|
-
self.selectRequested.emit(self.item.id, deselect, reset, focus)
|
2019
|
+
def select_self(self, deselect=False, reset=True):
|
2020
|
+
self.setSelected(not deselect)
|
2021
|
+
self.selectRequested.emit(self.item.id, deselect, reset)
|
2054
2022
|
|
2055
2023
|
def mouseDoubleClickEvent(self, ev: QtGui.QMouseEvent):
|
2056
2024
|
if ev.button() != QtCore.Qt.MouseButton.LeftButton:
|
@@ -2058,7 +2026,10 @@ class UtteranceRegion(MfaRegion):
|
|
2058
2026
|
return
|
2059
2027
|
deselect = False
|
2060
2028
|
reset = True
|
2061
|
-
if ev.modifiers()
|
2029
|
+
if ev.modifiers() in [
|
2030
|
+
QtCore.Qt.KeyboardModifier.ControlModifier,
|
2031
|
+
QtCore.Qt.KeyboardModifier.ShiftModifier,
|
2032
|
+
]:
|
2062
2033
|
reset = False
|
2063
2034
|
if self.selected:
|
2064
2035
|
deselect = True
|
@@ -2067,7 +2038,7 @@ class UtteranceRegion(MfaRegion):
|
|
2067
2038
|
self.selected = True
|
2068
2039
|
else:
|
2069
2040
|
self.selected = True
|
2070
|
-
self.select_self(deselect=deselect, reset=reset
|
2041
|
+
self.select_self(deselect=deselect, reset=reset)
|
2071
2042
|
ev.accept()
|
2072
2043
|
|
2073
2044
|
def mouseClickEvent(self, ev: QtGui.QMouseEvent):
|
@@ -2076,7 +2047,10 @@ class UtteranceRegion(MfaRegion):
|
|
2076
2047
|
return
|
2077
2048
|
deselect = False
|
2078
2049
|
reset = True
|
2079
|
-
if ev.modifiers()
|
2050
|
+
if ev.modifiers() in [
|
2051
|
+
ev.modifiers().ControlModifier,
|
2052
|
+
ev.modifiers().ShiftModifier,
|
2053
|
+
]:
|
2080
2054
|
reset = False
|
2081
2055
|
if self.selected:
|
2082
2056
|
deselect = True
|
@@ -2085,14 +2059,14 @@ class UtteranceRegion(MfaRegion):
|
|
2085
2059
|
self.selected = True
|
2086
2060
|
else:
|
2087
2061
|
self.selected = True
|
2088
|
-
self.select_self(deselect=deselect, reset=reset
|
2062
|
+
self.select_self(deselect=deselect, reset=reset)
|
2089
2063
|
ev.accept()
|
2090
2064
|
|
2091
|
-
def update_view_times(self
|
2092
|
-
self.lines[0].view_min =
|
2093
|
-
self.lines[0].view_max =
|
2094
|
-
self.lines[1].view_min =
|
2095
|
-
self.lines[1].view_max =
|
2065
|
+
def update_view_times(self):
|
2066
|
+
self.lines[0].view_min = self.selection_model.plot_min
|
2067
|
+
self.lines[0].view_max = self.selection_model.plot_max
|
2068
|
+
self.lines[1].view_min = self.selection_model.plot_min
|
2069
|
+
self.lines[1].view_max = self.selection_model.plot_max
|
2096
2070
|
self.update()
|
2097
2071
|
|
2098
2072
|
def boundingRect(self):
|
@@ -2313,7 +2287,7 @@ class SelectionArea(pg.LinearRegionItem):
|
|
2313
2287
|
if (
|
2314
2288
|
begin is None
|
2315
2289
|
or end is None
|
2316
|
-
or (begin == self.selection_model.
|
2290
|
+
or (begin == self.selection_model.plot_min and end == self.selection_model.plot_max)
|
2317
2291
|
):
|
2318
2292
|
self.setVisible(False)
|
2319
2293
|
else:
|
@@ -2327,7 +2301,7 @@ class AudioPlots(pg.GraphicsObject):
|
|
2327
2301
|
def __init__(self, top_point, separator_point, bottom_point):
|
2328
2302
|
super().__init__()
|
2329
2303
|
self.settings = AnchorSettings()
|
2330
|
-
self.selection_model: typing.Optional[
|
2304
|
+
self.selection_model: typing.Optional[FileSelectionModel] = None
|
2331
2305
|
self.top_point = top_point
|
2332
2306
|
self.separator_point = separator_point
|
2333
2307
|
self.bottom_point = bottom_point
|
@@ -2397,11 +2371,11 @@ class AudioPlots(pg.GraphicsObject):
|
|
2397
2371
|
if ev.button() != QtCore.Qt.MouseButton.LeftButton:
|
2398
2372
|
ev.ignore()
|
2399
2373
|
return
|
2400
|
-
if self.selection_model.
|
2374
|
+
if self.selection_model.plot_min is None:
|
2401
2375
|
ev.ignore()
|
2402
2376
|
return
|
2403
|
-
min_time = max(min(ev.buttonDownPos().x(), ev.pos().x()), self.selection_model.
|
2404
|
-
max_time = min(max(ev.buttonDownPos().x(), ev.pos().x()), self.selection_model.
|
2377
|
+
min_time = max(min(ev.buttonDownPos().x(), ev.pos().x()), self.selection_model.plot_min)
|
2378
|
+
max_time = min(max(ev.buttonDownPos().x(), ev.pos().x()), self.selection_model.plot_max)
|
2405
2379
|
if ev.isStart():
|
2406
2380
|
self.selection_area.setVisible(True)
|
2407
2381
|
if ev.isFinish():
|
@@ -2412,7 +2386,45 @@ class AudioPlots(pg.GraphicsObject):
|
|
2412
2386
|
if ev.button() != QtCore.Qt.MouseButton.LeftButton:
|
2413
2387
|
ev.ignore()
|
2414
2388
|
return
|
2415
|
-
|
2389
|
+
if ev.modifiers() in [
|
2390
|
+
QtCore.Qt.KeyboardModifier.ControlModifier,
|
2391
|
+
QtCore.Qt.KeyboardModifier.ShiftModifier,
|
2392
|
+
]:
|
2393
|
+
time = ev.pos().x()
|
2394
|
+
if self.selection_model.selected_max_time is not None:
|
2395
|
+
if (
|
2396
|
+
self.selection_model.selected_min_time
|
2397
|
+
< time
|
2398
|
+
< self.selection_model.selected_max_time
|
2399
|
+
):
|
2400
|
+
if (
|
2401
|
+
time - self.selection_model.selected_min_time
|
2402
|
+
< self.selection_model.selected_max_time - time
|
2403
|
+
):
|
2404
|
+
min_time = time
|
2405
|
+
max_time = self.selection_model.selected_max_time
|
2406
|
+
else:
|
2407
|
+
min_time = self.selection_model.selected_min_time
|
2408
|
+
max_time = time
|
2409
|
+
else:
|
2410
|
+
min_time = min(
|
2411
|
+
time,
|
2412
|
+
self.selection_model.selected_min_time,
|
2413
|
+
self.selection_model.selected_max_time,
|
2414
|
+
)
|
2415
|
+
max_time = max(
|
2416
|
+
time,
|
2417
|
+
self.selection_model.selected_min_time,
|
2418
|
+
self.selection_model.selected_max_time,
|
2419
|
+
)
|
2420
|
+
else:
|
2421
|
+
min_time = min(time, self.selection_model.selected_min_time)
|
2422
|
+
max_time = max(time, self.selection_model.selected_min_time)
|
2423
|
+
self.selection_area.setRegion((min_time, max_time))
|
2424
|
+
self.selection_area.setVisible(True)
|
2425
|
+
self.selection_model.select_audio(min_time, max_time)
|
2426
|
+
else:
|
2427
|
+
self.selection_model.request_start_time(ev.pos().x())
|
2416
2428
|
ev.accept()
|
2417
2429
|
|
2418
2430
|
def hoverEvent(self, ev):
|
@@ -2454,62 +2466,82 @@ class AudioPlots(pg.GraphicsObject):
|
|
2454
2466
|
|
2455
2467
|
def update_plot(self):
|
2456
2468
|
if (
|
2457
|
-
self.selection_model.
|
2458
|
-
or self.selection_model.
|
2459
|
-
or not os.path.exists(self.selection_model.
|
2469
|
+
self.selection_model.model().file is None
|
2470
|
+
or self.selection_model.model().file.sound_file is None
|
2471
|
+
or not os.path.exists(self.selection_model.model().file.sound_file.sound_file_path)
|
2460
2472
|
):
|
2461
2473
|
return
|
2462
|
-
self.rect.setLeft(self.selection_model.
|
2463
|
-
self.rect.setRight(self.selection_model.
|
2474
|
+
self.rect.setLeft(self.selection_model.plot_min)
|
2475
|
+
self.rect.setRight(self.selection_model.plot_max)
|
2464
2476
|
self._generate_picture()
|
2465
|
-
self.update_play_line(self.selection_model.
|
2477
|
+
self.update_play_line(self.selection_model.plot_min)
|
2466
2478
|
self.selection_area.update_region()
|
2467
2479
|
self.update()
|
2468
2480
|
|
2469
2481
|
|
2470
2482
|
class SpeakerTier(pg.GraphicsObject):
|
2471
|
-
dragFinished = QtCore.Signal(object, object)
|
2472
2483
|
receivedWheelEvent = QtCore.Signal(object)
|
2473
2484
|
draggingLine = QtCore.Signal(object)
|
2474
2485
|
lineDragFinished = QtCore.Signal(object)
|
2475
2486
|
|
2476
2487
|
def __init__(
|
2477
|
-
self,
|
2488
|
+
self,
|
2489
|
+
top_point,
|
2490
|
+
bottom_point,
|
2491
|
+
speaker_id: int,
|
2492
|
+
speaker_name: str,
|
2493
|
+
corpus_model: CorpusModel,
|
2494
|
+
file_model: FileUtterancesModel,
|
2495
|
+
selection_model: FileSelectionModel,
|
2496
|
+
dictionary_model: DictionaryTableModel,
|
2497
|
+
search_term: str = None,
|
2478
2498
|
):
|
2479
2499
|
super().__init__()
|
2500
|
+
self.file_model = file_model
|
2501
|
+
self.corpus_model = corpus_model
|
2502
|
+
self.selection_model = selection_model
|
2503
|
+
self.dictionary_model = dictionary_model
|
2480
2504
|
self.settings = AnchorSettings()
|
2481
|
-
self.corpus_model: Optional[CorpusModel] = None
|
2482
|
-
self.selection_model: Optional[CorpusSelectionModel] = None
|
2483
2505
|
self.search_term = search_term
|
2484
2506
|
self.speaker_id = speaker_id
|
2485
2507
|
self.speaker_name = speaker_name
|
2486
2508
|
self.speaker_index = 0
|
2487
|
-
self.textgrid_top_point = top_point
|
2488
2509
|
self.top_point = top_point
|
2489
2510
|
self.speaker_label = pg.TextItem(self.speaker_name, color=self.settings.accent_base_color)
|
2490
2511
|
self.speaker_label.setFont(self.settings.font)
|
2491
2512
|
self.speaker_label.setParentItem(self)
|
2492
2513
|
self.speaker_label.setZValue(40)
|
2493
2514
|
self.bottom_point = bottom_point
|
2494
|
-
self.textgrid_bottom_point = bottom_point
|
2495
2515
|
self.annotation_range = self.top_point - self.bottom_point
|
2496
2516
|
self.extra_tiers = {}
|
2497
|
-
self.utterances = []
|
2498
2517
|
self.visible_utterances: dict[str, UtteranceRegion] = {}
|
2499
2518
|
self.background_brush = pg.mkBrush(self.settings.primary_very_dark_color)
|
2500
2519
|
self.border = pg.mkPen(self.settings.accent_light_color)
|
2501
2520
|
self.picture = QtGui.QPicture()
|
2521
|
+
self.has_visible_utterances = False
|
2522
|
+
self.has_selected_utterances = False
|
2523
|
+
self.rect = QtCore.QRectF(
|
2524
|
+
left=self.selection_model.plot_min,
|
2525
|
+
riht=self.selection_model.plot_max,
|
2526
|
+
top=self.top_point,
|
2527
|
+
bottom=self.bottom_point,
|
2528
|
+
)
|
2529
|
+
self._generate_picture()
|
2530
|
+
self.corpus_model.lockCorpus.connect(self.lock)
|
2531
|
+
self.corpus_model.refreshUtteranceText.connect(self.refreshTexts)
|
2532
|
+
self.selection_model.selectionChanged.connect(self.update_select)
|
2533
|
+
self.selection_model.model().utterancesReady.connect(self.refresh)
|
2502
2534
|
|
2503
2535
|
def wheelEvent(self, ev):
|
2504
2536
|
self.receivedWheelEvent.emit(ev)
|
2505
2537
|
|
2506
|
-
def
|
2507
|
-
if ev.button() != QtCore.Qt.MouseButton.
|
2538
|
+
def mouseClickEvent(self, ev):
|
2539
|
+
if ev.button() != QtCore.Qt.MouseButton.RightButton:
|
2508
2540
|
ev.ignore()
|
2509
2541
|
return
|
2510
2542
|
x = ev.pos().x()
|
2511
2543
|
begin = max(x - 0.5, 0)
|
2512
|
-
end = min(x + 0.5, self.selection_model.
|
2544
|
+
end = min(x + 0.5, self.selection_model.model().file.duration)
|
2513
2545
|
for x in self.visible_utterances.values():
|
2514
2546
|
if begin >= x.item_min and end <= x.item_max:
|
2515
2547
|
ev.accept()
|
@@ -2520,10 +2552,40 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2520
2552
|
end = x.item_min
|
2521
2553
|
break
|
2522
2554
|
if end - begin > 0.001:
|
2523
|
-
|
2524
|
-
|
2525
|
-
)
|
2526
|
-
|
2555
|
+
menu = QtWidgets.QMenu()
|
2556
|
+
|
2557
|
+
a = QtGui.QAction(menu)
|
2558
|
+
a.setText("Create utterance")
|
2559
|
+
a.triggered.connect(functools.partial(self.create_utterance, begin=begin, end=end))
|
2560
|
+
menu.addAction(a)
|
2561
|
+
menu.setStyleSheet(self.settings.menu_style_sheet)
|
2562
|
+
menu.exec_(ev.screenPos())
|
2563
|
+
|
2564
|
+
def contextMenuEvent(self, ev):
|
2565
|
+
x = ev.pos().x()
|
2566
|
+
begin = max(x - 0.5, 0)
|
2567
|
+
end = min(x + 0.5, self.selection_model.model().file.duration)
|
2568
|
+
for x in self.visible_utterances.values():
|
2569
|
+
if begin >= x.item_min and end <= x.item_max:
|
2570
|
+
ev.accept()
|
2571
|
+
return
|
2572
|
+
if begin < x.item_max and begin > x.item_max:
|
2573
|
+
begin = x.item_max
|
2574
|
+
if end > x.item_min and end < x.item_min:
|
2575
|
+
end = x.item_min
|
2576
|
+
break
|
2577
|
+
if end - begin > 0.001:
|
2578
|
+
menu = QtWidgets.QMenu()
|
2579
|
+
|
2580
|
+
a = QtGui.QAction(menu)
|
2581
|
+
a.setText("Create utterance")
|
2582
|
+
a.triggered.connect(functools.partial(self.create_utterance, begin=begin, end=end))
|
2583
|
+
menu.addAction(a)
|
2584
|
+
menu.setStyleSheet(self.settings.menu_style_sheet)
|
2585
|
+
menu.exec_(ev.screenPos())
|
2586
|
+
|
2587
|
+
def create_utterance(self, begin, end):
|
2588
|
+
self.file_model.create_utterance(self.speaker_id, begin, end)
|
2527
2589
|
|
2528
2590
|
def setSearchterm(self, term):
|
2529
2591
|
self.search_term = term
|
@@ -2536,22 +2598,7 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2536
2598
|
def paint(self, p, *args):
|
2537
2599
|
p.drawPicture(0, 0, self.picture)
|
2538
2600
|
|
2539
|
-
def set_speaker_index(self, index, num_speakers):
|
2540
|
-
self.speaker_index = index
|
2541
|
-
speaker_tier_range = self.annotation_range / num_speakers
|
2542
|
-
self.top_point = self.textgrid_top_point - (speaker_tier_range * self.speaker_index)
|
2543
|
-
self.bottom_point = self.top_point - speaker_tier_range
|
2544
|
-
self.rect = QtCore.QRectF(
|
2545
|
-
left=self.selection_model.min_time,
|
2546
|
-
top=self.top_point,
|
2547
|
-
width=self.selection_model.max_time - self.selection_model.min_time,
|
2548
|
-
height=speaker_tier_range,
|
2549
|
-
)
|
2550
|
-
self.rect.setHeight(speaker_tier_range)
|
2551
|
-
self._generate_picture()
|
2552
|
-
|
2553
2601
|
def _generate_picture(self):
|
2554
|
-
self.speaker_label.setPos(self.selection_model.min_time, self.top_point)
|
2555
2602
|
self.picture = QtGui.QPicture()
|
2556
2603
|
painter = QtGui.QPainter(self.picture)
|
2557
2604
|
painter.setPen(self.border)
|
@@ -2566,22 +2613,6 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2566
2613
|
def set_available_speakers(self, available_speakers):
|
2567
2614
|
self.available_speakers = available_speakers
|
2568
2615
|
|
2569
|
-
def set_models(
|
2570
|
-
self,
|
2571
|
-
corpus_model: CorpusModel,
|
2572
|
-
selection_model: CorpusSelectionModel,
|
2573
|
-
dictionary_model: DictionaryTableModel,
|
2574
|
-
):
|
2575
|
-
self.corpus_model = corpus_model
|
2576
|
-
self.selection_model = selection_model
|
2577
|
-
self.dictionary_model = dictionary_model
|
2578
|
-
for reg in self.visible_utterances.values():
|
2579
|
-
reg.highlighter.set_models(self.dictionary_model)
|
2580
|
-
# self.corpus_model.changeCommandFired.connect(self.refresh)
|
2581
|
-
self.corpus_model.lockCorpus.connect(self.lock)
|
2582
|
-
self.corpus_model.refreshUtteranceText.connect(self.refreshTexts)
|
2583
|
-
self.selection_model.selectionChanged.connect(self.update_select)
|
2584
|
-
|
2585
2616
|
def lock(self):
|
2586
2617
|
for utt in self.visible_utterances.values():
|
2587
2618
|
utt.setMovable(False)
|
@@ -2607,36 +2638,55 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2607
2638
|
if reg.scene() is not None:
|
2608
2639
|
reg.scene().removeItem(reg)
|
2609
2640
|
self.visible_utterances = {}
|
2610
|
-
self.other_intervals = []
|
2611
2641
|
|
2642
|
+
@profile
|
2612
2643
|
def refresh(self, *args):
|
2613
|
-
|
2644
|
+
self.hide()
|
2645
|
+
if self.selection_model.plot_min is None:
|
2614
2646
|
return
|
2615
|
-
self.rect.setLeft(self.selection_model.
|
2616
|
-
self.rect.setRight(self.selection_model.
|
2617
|
-
self._generate_picture()
|
2647
|
+
# self.rect.setLeft(self.selection_model.plot_min)
|
2648
|
+
# self.rect.setRight(self.selection_model.plot_max)
|
2649
|
+
# self._generate_picture()
|
2618
2650
|
self.has_visible_utterances = False
|
2619
|
-
|
2620
|
-
|
2621
|
-
|
2622
|
-
|
2623
|
-
|
2624
|
-
|
2625
|
-
|
2626
|
-
|
2651
|
+
self.has_selected_utterances = False
|
2652
|
+
self.speaker_label.setPos(self.selection_model.plot_min, self.top_point)
|
2653
|
+
cleanup_ids = []
|
2654
|
+
model_visible_utterances = self.selection_model.visible_utterances()
|
2655
|
+
visible_ids = [x.id for x in model_visible_utterances]
|
2656
|
+
for reg in self.visible_utterances.values():
|
2657
|
+
reg.hide()
|
2658
|
+
if (
|
2659
|
+
self.selection_model.min_time - reg.item.end > 15
|
2660
|
+
or reg.item.begin - self.selection_model.max_time > 15
|
2661
|
+
or (
|
2662
|
+
reg.item.id not in visible_ids
|
2663
|
+
and (
|
2664
|
+
reg.item.begin < self.selection_model.max_time
|
2665
|
+
or reg.item.end > self.selection_model.min_time
|
2666
|
+
)
|
2667
|
+
)
|
2668
|
+
):
|
2669
|
+
if reg.scene() is not None:
|
2670
|
+
reg.scene().removeItem(reg)
|
2671
|
+
cleanup_ids.append(reg.item.id)
|
2672
|
+
self.visible_utterances = {
|
2673
|
+
k: v for k, v in self.visible_utterances.items() if k not in cleanup_ids
|
2674
|
+
}
|
2675
|
+
for u in model_visible_utterances:
|
2676
|
+
if u.speaker_id != self.speaker_id:
|
2627
2677
|
continue
|
2628
|
-
self.has_visible_utterances = True
|
2629
2678
|
if u.id in self.visible_utterances:
|
2679
|
+
self.visible_utterances[u.id].setSelected(self.selection_model.checkSelected(u.id))
|
2630
2680
|
self.visible_utterances[u.id].show()
|
2631
2681
|
continue
|
2632
|
-
|
2682
|
+
self.has_visible_utterances = True
|
2633
2683
|
# Utterance region always at the top
|
2634
2684
|
reg = UtteranceRegion(
|
2635
2685
|
u,
|
2636
2686
|
self.corpus_model,
|
2687
|
+
self.file_model,
|
2637
2688
|
self.dictionary_model,
|
2638
2689
|
selection_model=self.selection_model,
|
2639
|
-
selected=selected,
|
2640
2690
|
extra_tiers=self.extra_tiers,
|
2641
2691
|
available_speakers=self.available_speakers,
|
2642
2692
|
bottom_point=self.bottom_point,
|
@@ -2645,7 +2695,6 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2645
2695
|
)
|
2646
2696
|
reg.sigRegionChanged.connect(self.check_utterance_bounds)
|
2647
2697
|
reg.sigRegionChangeFinished.connect(self.update_utterance)
|
2648
|
-
reg.dragFinished.connect(self.update_selected_speaker)
|
2649
2698
|
reg.lines[0].sigPositionChanged.connect(self.draggingLine.emit)
|
2650
2699
|
reg.lines[0].sigPositionChangeFinished.connect(self.lineDragFinished.emit)
|
2651
2700
|
reg.lines[1].sigPositionChanged.connect(self.draggingLine.emit)
|
@@ -2661,17 +2710,13 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2661
2710
|
reg.setParentItem(self)
|
2662
2711
|
self.visible_utterances[u.id] = reg
|
2663
2712
|
|
2664
|
-
|
2665
|
-
self.corpus_model.update_utterance_text(utterance, text=new_text)
|
2713
|
+
self.show()
|
2666
2714
|
|
2667
|
-
def
|
2668
|
-
|
2669
|
-
reg = self.sender()
|
2670
|
-
utterance = reg.item
|
2671
|
-
self.dragFinished.emit(utterance, pos)
|
2715
|
+
def update_utterance_text(self, utterance, new_text):
|
2716
|
+
self.selection_model.model().update_utterance_text(utterance, text=new_text)
|
2672
2717
|
|
2673
2718
|
def update_select(self):
|
2674
|
-
selected_rows = {x.id for x in self.selection_model.
|
2719
|
+
selected_rows = {x.id for x in self.selection_model.selected_utterances()}
|
2675
2720
|
for r in self.visible_utterances.values():
|
2676
2721
|
if r.item.id in selected_rows:
|
2677
2722
|
r.setSelected(True)
|
@@ -2682,12 +2727,26 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2682
2727
|
reg = self.sender()
|
2683
2728
|
with QtCore.QSignalBlocker(reg):
|
2684
2729
|
beg, end = reg.getRegion()
|
2685
|
-
if
|
2686
|
-
|
2687
|
-
|
2688
|
-
|
2689
|
-
|
2690
|
-
|
2730
|
+
if self.settings.right_to_left:
|
2731
|
+
if end > 0:
|
2732
|
+
reg.setRegion([beg, 0])
|
2733
|
+
return
|
2734
|
+
if (
|
2735
|
+
self.selection_model.model().file is not None
|
2736
|
+
and -end > self.selection_model.model().file.duration
|
2737
|
+
):
|
2738
|
+
reg.setRegion([beg, self.selection_model.model().file.duration])
|
2739
|
+
return
|
2740
|
+
else:
|
2741
|
+
if beg < 0:
|
2742
|
+
reg.setRegion([0, end])
|
2743
|
+
return
|
2744
|
+
if (
|
2745
|
+
self.selection_model.model().file is not None
|
2746
|
+
and end > self.selection_model.model().file.duration
|
2747
|
+
):
|
2748
|
+
reg.setRegion([beg, self.selection_model.model().file.duration])
|
2749
|
+
return
|
2691
2750
|
for r in self.visible_utterances.values():
|
2692
2751
|
if r == reg:
|
2693
2752
|
continue
|
@@ -2699,11 +2758,11 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2699
2758
|
reg.setRegion([beg, other_begin])
|
2700
2759
|
break
|
2701
2760
|
reg.text.begin, reg.text.end = reg.getRegion()
|
2702
|
-
reg.text.update_times(self.selection_model.
|
2761
|
+
reg.text.update_times(self.selection_model.plot_min, self.selection_model.plot_max)
|
2703
2762
|
if reg.normalized_text is not None:
|
2704
2763
|
reg.normalized_text.text.begin, reg.normalized_text.text.end = reg.getRegion()
|
2705
2764
|
reg.normalized_text.text.update_times(
|
2706
|
-
self.selection_model.
|
2765
|
+
self.selection_model.plot_min, self.selection_model.plot_max
|
2707
2766
|
)
|
2708
2767
|
reg.select_self()
|
2709
2768
|
reg.update()
|
@@ -2717,7 +2776,7 @@ class SpeakerTier(pg.GraphicsObject):
|
|
2717
2776
|
new_end = round(end, 4)
|
2718
2777
|
if new_begin == utt.begin and new_end == utt.end:
|
2719
2778
|
return
|
2720
|
-
self.
|
2779
|
+
self.selection_model.model().update_utterance_times(utt, begin=new_begin, end=new_end)
|
2721
2780
|
self.selection_model.select_audio(new_begin, None)
|
2722
2781
|
reg.text.begin = new_begin
|
2723
2782
|
reg.text.end = new_end
|