bloom-player 2.8.11 → 2.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1333 @@
1
+ // This file contains code for playing audio in a bloom page, including a draggable page.
2
+ // The file is designed to be shared between Bloom Desktop and Bloom Player, with the original
3
+ // being in the Bloom Player and provided in its package. See comments in dragActivityRuntime.ts.
4
+
5
+ // It is quite difficult to know how to handle audio in a drag activity page.
6
+ // We need to be able to play it both during "Play" and when showing the page in BP.
7
+ // The code in this file so far represents a reuniting of the original Bloom Player
8
+ // narration code with the code developed for narration in drag activities.
9
+ // There is existing code for playing audio in Bloom Desktop, but it is entangled with the
10
+ // code that manages audio recording and the talking book tool. I hope eventually
11
+ // this code, or parts of it, can be used there as well as in 'Play' mode.
12
+ // The code here has hooks and some methods which are only useful to BP, for things such as
13
+ // autoplay and page advance, and also cases where the text being played is
14
+ // not fully visible. It deals with pause and continue, which also interact with
15
+ // the Bloom Player controls and with video and background music.
16
+ // Drag activities added the need to handle situations where the audio on the page
17
+ // should not all be played in succession, and more complicated sequencing of audio
18
+ // and video
19
+
20
+ import LiteEvent from "./event";
21
+ // Note: trying to avoid other imports, as part of the process of moving this code to a module
22
+ // that can be shared with BloomDesktop.
23
+
24
+ //----This first block is the old narrationUtils.ts. Everything here is now meant to be reusable
25
+ // code connected with narration, not specific to Bloom Player.
26
+ export interface ISetHighlightParams {
27
+ newElement: Element;
28
+ shouldScrollToElement: boolean;
29
+ disableHighlightIfNoAudio?: boolean;
30
+ oldElement?: Element | null | undefined; // Optional. Provides some minor optimization if set.
31
+ }
32
+
33
+ export enum PlaybackMode {
34
+ NewPage, // starting a new page ready to play
35
+ NewPageMediaPaused, // starting a new page in the "paused" state
36
+ VideoPlaying, // video is playing
37
+ VideoPaused, // video is paused
38
+ AudioPlaying, // narration and/or animation are playing (or possibly finished)
39
+ AudioPaused, // narration and/or animation are paused
40
+ MediaFinished, // video, narration, and/or animation has played (possibly no media to play)
41
+ // Note that music can be playing when the state is either AudioPlaying or MediaFinished.
42
+ }
43
+
44
+ export let currentPlaybackMode: PlaybackMode = PlaybackMode.NewPage;
45
+ export function setCurrentPlaybackMode(mode: PlaybackMode) {
46
+ currentPlaybackMode = mode;
47
+ }
48
+
49
+ // These functions support allowing a client (typically BloomPlayerCore) to register as
50
+ // the object that wants to receive notification of how long audio was played.
51
+ // Duration is in seconds.
52
+ export let durationReporter: (duration: number) => void;
53
+ export function listenForPlayDuration(reporter: (duration: number) => void) {
54
+ durationReporter = reporter;
55
+ }
56
+
57
+ // A client may configure a function which can be called to find out whether a swipe
58
+ // is in progress...in BloomPlayerCore, this is implemented by a test on SWiper.
59
+ // It is currently only used when we need to scroll the content of a field we are
60
+ // playing. Bloom Desktop does not need to set it.
61
+ export let isSwipeInProgress: () => boolean;
62
+ export function setTestIsSwipeInProgress(tester: () => boolean) {
63
+ isSwipeInProgress = tester;
64
+ }
65
+
66
+ // A client may configure a function which is passed the URL of each audio file narration plays.
67
+ export let logNarration: (src: string) => void = () => {
68
+ // do nothing by default
69
+ };
70
+ export function setLogNarration(logger: (src: string) => void) {
71
+ logNarration =
72
+ logger ??
73
+ (() => {
74
+ // do nothing by default (so we don't have to check for null)
75
+ });
76
+ }
77
+
78
+ let playerUrlPrefix = "";
79
+ // In bloom player, figuring the url prefix (to put before file JS needs to locate, like
80
+ // sounds to play) is complicated. We pass it in.
81
+ // In Bloom desktop, it's almost more tricky. If we're executing in the page iframe, we can
82
+ // easily derive it from the page's URL. But if we're executing in the toolbox, that doesn't work.
83
+ // So we arrange for newPageReady, called at least as often as changing book, to set it up
84
+ // using setPlayerUrlPrefixFromWindowLocationHref
85
+ export function setPlayerUrlPrefix(prefix: string) {
86
+ playerUrlPrefix = prefix;
87
+ }
88
+
89
+ export function setPlayerUrlPrefixFromWindowLocationHref(bookSrc: string) {
90
+ setPlayerUrlPrefix(getUrlPrefixFromWindowHref(bookSrc));
91
+ }
92
+
93
+ export function getUrlPrefixFromWindowHref(bookSrc: string) {
94
+ const index = bookSrc.lastIndexOf("/");
95
+ return bookSrc.substring(0, index);
96
+ }
97
+
98
+ export function urlPrefix(): string {
99
+ if (playerUrlPrefix) {
100
+ return playerUrlPrefix;
101
+ }
102
+ return getUrlPrefixFromWindowHref(window.location.href);
103
+ }
104
+
105
+ // We need to sort these by the tabindex of the containing bloom-translationGroup element.
106
+ // We need a stable sort, which array.sort() does not provide: elements in the same
107
+ // bloom-translationGroup, or where the translationGroup does not have a tabindex,
108
+ // should remain in their current order.
109
+ // It's not obvious what should happen to TGs with no tabindex when others have it.
110
+ // At this point we're going with the approach that no tabindex is equivalent to tabindex 999.
111
+ // This should cause text with no tabindex to sort to the bottom, if other text has a tabindex;
112
+ // It should also not affect order in situations where no text has a tabindex
113
+ // (An earlier algorithm attempted to preserve document order for the no-tab-index case
114
+ // by comparing any two elements using document order if either lacks tabindex.
115
+ // This works well for many cases, but if there's a no-tabindex element between two
116
+ // that get re-ordered (e.g., ABCDEF where the only tabindexes are C=2 and E=1),
117
+ // the function is not transitive (e.g. C < D < E but E < C) which will produce
118
+ // unpredictable results.
119
+ export function sortAudioElements(input: HTMLElement[]): HTMLElement[] {
120
+ const keyedItems = input.map((item, index) => {
121
+ return { tabindex: getTgTabIndex(item), index, item };
122
+ });
123
+ keyedItems.sort((x, y) => {
124
+ // If either is not in a translation group with a tabindex,
125
+ // order is determined by their original index.
126
+ // Likewise if the tabindexes are the same.
127
+ if (!x.tabindex || !y.tabindex || x.tabindex === y.tabindex) {
128
+ return x.index - y.index;
129
+ }
130
+ // Otherwise, determined by the numerical order of tab indexes.
131
+ return parseInt(x.tabindex, 10) - parseInt(y.tabindex, 10);
132
+ });
133
+ return keyedItems.map((x) => x.item);
134
+ }
135
+
136
+ function getTgTabIndex(input: HTMLElement): string | null {
137
+ let tg: HTMLElement | null = input;
138
+ while (tg && !tg.classList.contains("bloom-translationGroup")) {
139
+ tg = tg.parentElement;
140
+ }
141
+ if (!tg) {
142
+ return "999";
143
+ }
144
+ return tg.getAttribute("tabindex") || "999";
145
+ }
146
+ ///---- end of the bit that ended up in narrationUtils.ts before the merge.
147
+
148
+ export const kHighlightSegmentClass = "bloom-highlightSegment";
149
+
150
+ // Indicates that the element should be highlighted.
151
+ const kEnableHighlightClass = "ui-enableHighlight";
152
+
153
+ // Indicates that the element should NOT be highlighted.
154
+ // For example, some elements have highlighting prevented at this level
155
+ // because its content has been broken into child elements, only some of which show the highlight
156
+ const kDisableHighlightClass = "ui-disableHighlight";
157
+ // Indicates that highlighting is briefly/temporarily suppressed,
158
+ // but may become highlighted later.
159
+ // For example, audio highlighting is suppressed until the related audio starts playing (to avoid flashes)
160
+ const kSuppressHighlightClass = "ui-suppressHighlight";
161
+
162
+ let durationOfPagesWithoutNarration = 3.0; // seconds
163
+ export function setDurationOfPagesWithoutNarration(d: number) {
164
+ durationOfPagesWithoutNarration = d;
165
+ }
166
+
167
+ // Even though these can now encompass more than strict sentences,
168
+ // we continue to use this class name for backwards compatability reasons.
169
+ export const kAudioSentence = "audio-sentence";
170
+
171
+ const kImageDescriptionClass = "bloom-imageDescription";
172
+
173
+ // The page that is currently being played (or edited, when we use this code in Bloom Desktop).
174
+ let currentPlayPage: HTMLElement | null = null;
175
+ // When we have recently changed pages, stores a value returned from setTimeout, which can be used to cancel the old timeout
176
+ // if we change pages again. After three seconds, the timerout sets it to zero. A non-zero value also prevents the code that
177
+ // tries to scroll the currently-playing text into view from doing so in the early stages of viewing a page, when it
178
+ // can cause problems for swiper.
179
+ let recentPageChange: any = 0; // any because typescript thinks we're in Nodejs and setTimeout will return an object.
180
+ // Unused in Bloom desktop, but in Bloom player, current page might change while a series of sounds
181
+ // is playing. This lets us avoid starting the next sound if the page has changed in the meantime.
182
+ export function setCurrentPage(page: HTMLElement) {
183
+ if (page === currentPlayPage) return;
184
+ if (recentPageChange) {
185
+ clearTimeout(recentPageChange);
186
+ }
187
+ recentPageChange = setTimeout(() => {
188
+ recentPageChange = 0;
189
+ }, 3000);
190
+ currentPlayPage = page;
191
+ }
192
+ // Get the page that the narration system thinks is current.
193
+ export function getCurrentNarrationPage(): HTMLElement {
194
+ return currentPlayPage!;
195
+ }
196
+
197
+ // The time we started playing the current narration. If we pause and resume this is adjusted
198
+ // by the length of the pause, so that "now" minus startPlay is always how much of the sound
199
+ // has actually been played.
200
+ let startPlay: Date;
201
+
202
+ // When the most recent pause happened.
203
+ let startPause: Date;
204
+
205
+ // Timer used to raise PageNarrationComplete after a delay when there is no audio on the page.
206
+ // Gets canceled if we pause and restarted if we resume.
207
+ let fakeNarrationTimer: number;
208
+
209
+ // List of segments that are currently being played, or will be resumed (or restarted) when play resumes.
210
+ // Typically the output of getPageAudioElements, but Bloom Games sometimes gives a different list.
211
+ // Unlike elementsToPlayConsecutivelyStack, this list is not reversed, nor do we remove items from it as we play them.
212
+ let segmentsWeArePlaying: HTMLElement[];
213
+
214
+ let currentAudioId = "";
215
+
216
+ // The first one to play should be at the end for both of these
217
+ let elementsToPlayConsecutivelyStack: HTMLElement[] = [];
218
+ let subElementsWithTimings: Array<[Element, number]> = [];
219
+
220
+ // On a typical page with narration, these are raised at the same time, when the last narration
221
+ // on the page finishes. But if there is no narration at all, PlayCompleted will be raised
222
+ // immediately (useful for example to disable a pause button), but PageNarrationComplete will
223
+ // be raised only after the standard delay for non-audio page (useful for auto-advancing to the next page).
224
+ export const PageNarrationComplete = new LiteEvent<HTMLElement>();
225
+ export const PlayCompleted = new LiteEvent<HTMLElement>();
226
+ // Raised when we can't play narration, specifically because the browser won't allow it until
227
+ // the user has interacted with the page.
228
+ export const PlayFailed = new LiteEvent<HTMLElement>();
229
+
230
+ // This event allows Narration to inform its controllers when we start/stop reading
231
+ // image descriptions. It is raised for each segment we read and passed true if the one
232
+ // we are about to read is an image description, false otherwise.
233
+ // Todo: wants a better name, it's not about toggling whether something is an image description,
234
+ // but about possibly updating the UI to reflect whether we are reading one.
235
+ export const ToggleImageDescription = new LiteEvent<boolean>();
236
+
237
+ // A Session Number that keeps track of each time playAllAudio started.
238
+ // This might be needed to keep track of changing pages, or when we start new audio
239
+ // that will replace something already playing.
240
+ let currentAudioSessionNum: number = 0;
241
+
242
+ let includeImageDescriptions: boolean = true;
243
+ export function setIncludeImageDescriptions(b: boolean) {
244
+ includeImageDescriptions = b;
245
+ }
246
+
247
+ // This represents the start time of the current playing of the audio. If the user presses pause/play, it will be reset.
248
+ // This is used for analytics reporting purposes
249
+ let audioPlayCurrentStartTime: number | null = null; // milliseconds (since 1970/01/01, from new Date().getTime())
250
+
251
+ // Roughly equivalent to BloomDesktop's AudioRecording::listen() function.
252
+ // As long as there is audio on the page, this method will play it.
253
+ export function playAllSentences(page: HTMLElement | null): void {
254
+ if (!page && !currentPlayPage) {
255
+ return; // this shouldn't happen
256
+ }
257
+ const pageToPlay = page ?? currentPlayPage!;
258
+ playAllAudio(getPageAudioElements(pageToPlay), pageToPlay);
259
+ }
260
+
261
+ export function playAllAudio(elements: HTMLElement[], page: HTMLElement): void {
262
+ setCurrentPage(page);
263
+ segmentsWeArePlaying = elements;
264
+ startPlay = new Date();
265
+ const mediaPlayer = getPlayer();
266
+ if (mediaPlayer) {
267
+ // This felt like a good idea to do always. But we are about to set a new src on the media player and play that,
268
+ // which will deal with any sound that is still playing.
269
+ // And if we explicitly pause it now, that actually starts an async process of getting it paused, which
270
+ // may not have completed by the time we attempt to play the new audio. And then play() fails.
271
+ // OTOH, if there is nothing new to play, we should terminate anything that is playing
272
+ // (perhaps from a previous page).
273
+ if (elements.length == 0) {
274
+ mediaPlayer.pause();
275
+ }
276
+ mediaPlayer.currentTime = 0;
277
+ }
278
+
279
+ // Invalidate old ID, even if there's no new audio to play.
280
+ // (Deals with the case where you are on a page with audio, switch to a page without audio, then switch back to original page)
281
+ ++currentAudioSessionNum;
282
+
283
+ fixHighlighting(elements);
284
+
285
+ // Sorted into the order we want to play them, then reversed so we
286
+ // can more conveniently pop the next one to play from the end of the stack.
287
+ elementsToPlayConsecutivelyStack = sortAudioElements(elements).reverse();
288
+
289
+ const stackSize = elementsToPlayConsecutivelyStack.length;
290
+ if (stackSize === 0) {
291
+ // Nothing to play. First, raise the event that indicates nothing is playing.
292
+ // It typically sets mode to MediaFinsished, and we want to override that.
293
+ if (PlayCompleted) {
294
+ PlayCompleted?.raise(page);
295
+ }
296
+ // Simulate playing for a fixed amount of time before raising PageNarrationComplete, in case we're autoadvancing.
297
+ // We're not really playing, but we're pretending, so things work best if we go to that mode.
298
+ // For example, if we leave it in MediaFinished from the previous page or from raising PlayCompleted, pause won't work.
299
+ setCurrentPlaybackMode(PlaybackMode.AudioPlaying);
300
+ if (PageNarrationComplete) {
301
+ fakeNarrationTimer = window.setTimeout(() => {
302
+ setCurrentPlaybackMode(PlaybackMode.MediaFinished);
303
+ PageNarrationComplete?.raise(page);
304
+ }, durationOfPagesWithoutNarration * 1000);
305
+ }
306
+ return;
307
+ }
308
+
309
+ const firstElementToPlay = elementsToPlayConsecutivelyStack[stackSize - 1]; // Remember to pop it when you're done playing it. (i.e., in playEnded)
310
+ // At one point it seemed to help something to delete the media player and make a new one each time.
311
+ // I didn't comment this at the time, but my recollection is that this could help with some cases
312
+ // where the old one was in a bad state, such as in the middle of pausing.
313
+ // Currently, though, we're being more careful not to pause except when there is nothing more to play currently,
314
+ // (or when the user clicks the button),
315
+ // since changing the src will stop any old play, but pausing right before setting a new src and calling play()
316
+ // can cause the play() to fail. And somehow, deleting the media player here before we set up for a new play
317
+ // was causing play to fail, reporting an abort because the media was removed from the document.
318
+ // I don't fully understand why that was happening, but for now, things seem to be working best by just
319
+ // continuing to use the same player as long as it can be found.
320
+ // For sure, don't delete the player and make a new one between setting highlight and playing,
321
+ // or the handler that removes the highlight suppression will be lost.
322
+ //mediaPlayer.remove();
323
+
324
+ setSoundAndHighlight(firstElementToPlay, true);
325
+ // Review: do we need to do something to let the rest of the world know about this?
326
+ setCurrentPlaybackMode(PlaybackMode.AudioPlaying);
327
+ playCurrentInternal();
328
+ return;
329
+ }
330
+
331
+ // Match space or &nbsp; (\u00a0) or &ZeroWidthSpace; (\u200b). Must have three or more in a row to match.
332
+ // Geckofx would typically give something like `&nbsp;&nbsp;&nbsp; ` but wv2 usually gives something like `&nbsp; &nbsp; `
333
+ const multiSpaceRegex = /[ \u00a0\u200b]{3,}/;
334
+ const multiSpaceRegexGlobal = new RegExp(multiSpaceRegex, "g");
335
+ /**
336
+ * Finds and fixes any elements on the page that should have their audio-highlighting disabled.
337
+ *
338
+ * Note, all this logic is essentially duplicated from BloomDesktop where there are quite a few unit tests.
339
+ */
340
+ function fixHighlighting(audioElements: HTMLElement[]) {
341
+ // Note: Only relevant when playing by sentence (but note, this can make Record by Text Box -> Split or Record by Sentence, Play by Sentence)
342
+ // Play by Text Box highlights the whole paragraph and none of this really matters.
343
+ // (the span selector won't match anyway)
344
+ audioElements.forEach((audioElement) => {
345
+ // FYI, don't need to process the bloom-linebreak spans. Nothing bad happens, just unnecessary.
346
+ const matches = findAll(
347
+ "span[id]:not(.bloom-linebreak)",
348
+ audioElement,
349
+ true,
350
+ );
351
+ matches.forEach((element) => {
352
+ // Remove all existing highlight classes from element and element's descendants.
353
+ // These shouldn't be in the dom as the editor is supposed to clean them up,
354
+ // but we have seen at least on case where it didn't. BL-13428.
355
+ removeHighlightClasses(element);
356
+
357
+ // Simple check to help ensure that elements that don't need to be modified will remain untouched.
358
+ // This doesn't consider whether text that shouldn't be highlighted is already in inside an
359
+ // element with highlight disabled, but that's ok. The code down the stack checks that.
360
+ const containsNonHighlightText =
361
+ !!element.innerText.match(multiSpaceRegex);
362
+
363
+ if (containsNonHighlightText) {
364
+ fixHighlightingInNode(element, element);
365
+ }
366
+ });
367
+ });
368
+ }
369
+
370
+ // Remove all existing highlight classes from element and element's descendants.
371
+ function removeHighlightClasses(element: HTMLElement) {
372
+ element.classList.remove(kDisableHighlightClass);
373
+ element.classList.remove(kEnableHighlightClass);
374
+
375
+ Array.from(element.children).forEach((child: HTMLElement) => {
376
+ removeHighlightClasses(child);
377
+ });
378
+ }
379
+
380
+ /**
381
+ * Recursively fixes the audio-highlighting within a node (whether element node or text node)
382
+ * @param node The node to recursively fix
383
+ * @param startingSpan The starting span, AKA the one that will receive .ui-audioCurrent in the future.
384
+ */
385
+ function fixHighlightingInNode(node: Node, startingSpan: HTMLSpanElement) {
386
+ if (
387
+ node.nodeType === Node.ELEMENT_NODE &&
388
+ (node as Element).classList.contains(kDisableHighlightClass)
389
+ ) {
390
+ // No need to process bloom-highlightDisabled elements (they've already been processed)
391
+ return;
392
+ } else if (node.nodeType === Node.TEXT_NODE) {
393
+ // Leaf node. Fix the highlighting, then go back up the stack.
394
+ fixHighlightingInTextNode(node, startingSpan);
395
+ return;
396
+ } else {
397
+ // Recursive case
398
+ const childNodesCopy = Array.from(node.childNodes); // Make a copy because node.childNodes is being mutated
399
+ childNodesCopy.forEach((childNode) => {
400
+ fixHighlightingInNode(childNode, startingSpan);
401
+ });
402
+ }
403
+ }
404
+
405
+ /**
406
+ * Analyzes a text node and fixes its highlighting.
407
+ */
408
+ function fixHighlightingInTextNode(
409
+ textNode: Node,
410
+ startingSpan: HTMLSpanElement,
411
+ ) {
412
+ if (textNode.nodeType !== Node.TEXT_NODE) {
413
+ throw new Error(
414
+ "Invalid argument to fixMultiSpaceInTextNode: node must be a TextNode",
415
+ );
416
+ }
417
+
418
+ if (!textNode.nodeValue) {
419
+ return;
420
+ }
421
+
422
+ // string.matchAll would be cleaner, but not supported in all browsers (in particular, FF60)
423
+ // Use RegExp.exec for greater compatibility.
424
+ multiSpaceRegexGlobal.lastIndex = 0; // RegExp.exec is stateful! Need to reset the state.
425
+ const matches: {
426
+ text: string;
427
+ startIndex: number;
428
+ endIndex: number; // the index of the first character to exclude
429
+ }[] = [];
430
+ let regexResult: RegExpExecArray | null;
431
+ while (
432
+ (regexResult = multiSpaceRegexGlobal.exec(textNode.nodeValue)) != null
433
+ ) {
434
+ regexResult.forEach((matchingText) => {
435
+ matches.push({
436
+ text: matchingText,
437
+ startIndex:
438
+ multiSpaceRegexGlobal.lastIndex - matchingText.length,
439
+ endIndex: multiSpaceRegexGlobal.lastIndex, // the index of the first character to exclude
440
+ });
441
+ });
442
+ }
443
+
444
+ // First, generate the new DOM elements with the fixed highlighting.
445
+ const newNodes: Node[] = [];
446
+ if (matches.length === 0) {
447
+ // No matches
448
+ newNodes.push(makeHighlightedSpan(textNode.nodeValue));
449
+ } else {
450
+ let lastMatchEndIndex = 0; // the index of the first character to exclude of the last match
451
+ for (let i = 0; i < matches.length; ++i) {
452
+ const match = matches[i];
453
+
454
+ const preMatchText = textNode.nodeValue.slice(
455
+ lastMatchEndIndex,
456
+ match.startIndex,
457
+ );
458
+ lastMatchEndIndex = match.endIndex;
459
+ if (preMatchText) newNodes.push(makeHighlightedSpan(preMatchText));
460
+
461
+ newNodes.push(document.createTextNode(match.text));
462
+
463
+ if (i === matches.length - 1) {
464
+ const postMatchText = textNode.nodeValue.slice(match.endIndex);
465
+ if (postMatchText) {
466
+ newNodes.push(makeHighlightedSpan(postMatchText));
467
+ }
468
+ }
469
+ }
470
+ }
471
+
472
+ // Next, replace the old DOM element with the new DOM elements
473
+ const oldNode = textNode;
474
+ if (oldNode.parentNode && newNodes && newNodes.length > 0) {
475
+ for (let i = 0; i < newNodes.length; ++i) {
476
+ const nodeToInsert = newNodes[i];
477
+ oldNode.parentNode.insertBefore(nodeToInsert, oldNode);
478
+ }
479
+
480
+ oldNode.parentNode.removeChild(oldNode);
481
+
482
+ // We need to set ancestor's background back to transparent (instead of highlighted),
483
+ // and let each of the newNodes's styles control whether to be highlighted or transparent.
484
+ // If ancestor was highlighted but one of its new descendant nodes was transparent,
485
+ // all that would happen is the descendant would allow the ancestor's highlight color to show through,
486
+ // which doesn't achieve what we want :(
487
+ startingSpan.classList.add(kDisableHighlightClass);
488
+ }
489
+ }
490
+
491
+ function makeHighlightedSpan(textContent: string) {
492
+ const newSpan = document.createElement("span");
493
+ newSpan.classList.add(kEnableHighlightClass);
494
+ newSpan.appendChild(document.createTextNode(textContent));
495
+ return newSpan;
496
+ }
497
+
498
+ function playCurrentInternal() {
499
+ if (currentPlaybackMode === PlaybackMode.AudioPlaying) {
500
+ const mediaPlayer = getPlayer();
501
+ if (mediaPlayer) {
502
+ const element = getCurrentNarrationPage().querySelector(
503
+ `#${currentAudioId}`,
504
+ );
505
+ if (!element || !canPlayAudio(element)) {
506
+ playEnded();
507
+ return;
508
+ }
509
+
510
+ // Regardless of whether we end up using timingsStr or not,
511
+ // we should reset this now in case the previous page used it and was still playing
512
+ // when the user flipped to the next page.
513
+ subElementsWithTimings = [];
514
+
515
+ const timingsStr: string | null = element.getAttribute(
516
+ "data-audioRecordingEndTimes",
517
+ );
518
+ if (timingsStr) {
519
+ const childSpanElements = element.querySelectorAll(
520
+ `span.${kHighlightSegmentClass}`,
521
+ );
522
+ const fields = timingsStr.split(" ");
523
+ const subElementCount = Math.min(
524
+ fields.length,
525
+ childSpanElements.length,
526
+ );
527
+
528
+ for (let i = subElementCount - 1; i >= 0; --i) {
529
+ const durationSecs: number = Number(fields[i]);
530
+ if (isNaN(durationSecs)) {
531
+ continue;
532
+ }
533
+ subElementsWithTimings.push([
534
+ childSpanElements.item(i),
535
+ durationSecs,
536
+ ]);
537
+ }
538
+ } else {
539
+ // No timings string available.
540
+ // No need for us to do anything. The correct element is already highlighted by playAllSentences() (which needed to call setCurrent... anyway to set the audio player source).
541
+ // We'll just proceed along, start playing the audio, and playNextSubElement() will return immediately because there are no sub-elements in this case.
542
+ }
543
+
544
+ const currentSegment = element as HTMLElement;
545
+ if (currentSegment && ToggleImageDescription) {
546
+ ToggleImageDescription?.raise(
547
+ isImageDescriptionSegment(currentSegment),
548
+ );
549
+ }
550
+
551
+ gotErrorPlaying = false;
552
+ const promise = mediaPlayer.play();
553
+ ++currentAudioSessionNum;
554
+ audioPlayCurrentStartTime = new Date().getTime();
555
+ highlightNextSubElement(currentAudioSessionNum);
556
+ handlePlayPromise(promise);
557
+ }
558
+ }
559
+ }
560
+
561
+ function isImageDescriptionSegment(segment: HTMLElement): boolean {
562
+ return segment.closest("." + kImageDescriptionClass) !== null;
563
+ }
564
+
565
+ function handlePlayPromise(promise: Promise<void>, player?: HTMLMediaElement) {
566
+ // In newer browsers, play() returns a promise which fails
567
+ // if the browser disobeys the command to play, as some do
568
+ // if the user hasn't 'interacted' with the page in some
569
+ // way that makes the browser think they are OK with it
570
+ // playing audio. In Gecko45, the return value is undefined,
571
+ // so we mustn't call catch.
572
+ if (promise && promise.catch) {
573
+ promise.catch((reason: any) => {
574
+ // The HTMLMediaElement also has an error handler (which calls playEnded()).
575
+ // We do NOT want to call that here, both to stop it happening twice, but also because
576
+ // we do NOT want to call playEnded (which in autoplay causes advance to next page)
577
+ // when we get NotAllowedError. That error seems to only come here, and not to raise the
578
+ // ended event.
579
+
580
+ // This promise.catch error handler is the only one that handles NotAllowedException (that is, playback not started because user has not interacted with the page yet).
581
+ // However, older versions of browsers don't support promise from HTMLMediaElement.play(). So this cannot be the only error handler.
582
+ // Thus we need both the promise.catch error handler as well as the HTMLMediaElement's error handler.
583
+ //
584
+ // In many cases (such as NotSupportedError, which happens when the audio file isn't found), both error handlers will run.
585
+ // That is a little annoying but if the two don't conflict with each other it's not problematic.
586
+
587
+ const playingWhat = player?.getAttribute("src") ?? "unknown";
588
+ console.log("could not play sound: " + reason + " " + playingWhat);
589
+
590
+ if (
591
+ reason &&
592
+ reason
593
+ .toString()
594
+ .includes(
595
+ "The play() request was interrupted by a call to pause().",
596
+ )
597
+ ) {
598
+ // We were getting this error Aug 2020. I tried wrapping the line above which calls mediaPlayer.play()
599
+ // (currently `promise = mediaPlayer.play();`) in a setTimeout with 0ms. This seemed to fix the bug (with
600
+ // landscape books not having audio play initially -- BL-8887). But the root cause was actually that
601
+ // we ended up calling playAllSentences twice when the book first loaded.
602
+ // I fixed that in bloom-player-core. But I wanted to document the possible setTimeout fix here
603
+ // in case this issue ever comes up for a different reason.
604
+ console.log(
605
+ "See comment in narration.ts for possibly useful information regarding this error.",
606
+ );
607
+ }
608
+
609
+ // Don't call removeAudioCurrent() here. The HTMLMediaElement's error handler will call playEnded() and calling removeAudioCurrent() here will mess up playEnded().
610
+ // removeAudioCurrent();
611
+
612
+ // With some kinds of invalid sound file it keeps trying and plays over and over.
613
+ // But when we move on to play another sound, a pause here will mess things up.
614
+ // So instead I put a pause after we run out of sounds to try to play.
615
+ //getPlayer().pause();
616
+ // if (Pause) {
617
+ // Pause?.raise();
618
+ // }
619
+
620
+ // Get all the state (and UI) set correctly again.
621
+ // Not entirely sure about limiting this to NotAllowedError, but that's
622
+ // the one kind of play error that is fixed by the user just interacting.
623
+ // If there's some other reason we can't play, showing as paused may not
624
+ // be useful. See comments on the similar code in music.ts
625
+ if (reason.name === "NotAllowedError" && PlayFailed) {
626
+ PlayFailed?.raise();
627
+ }
628
+ });
629
+ }
630
+ }
631
+
632
+ // Moves the highlight to the next sub-element
633
+ // originalSessionNum: The value of currentAudioSessionNum at the time when the audio file started playing.
634
+ // This is used to check in the future if the timeouts we started are for the right session.
635
+ // startTimeInSecs is an optional fallback that will be used in case the currentTime cannot be determined from the audio player element.
636
+ function highlightNextSubElement(
637
+ originalSessionNum: number,
638
+ startTimeInSecs: number = 0,
639
+ ) {
640
+ // the item should not be popped off the stack until it's completely done with.
641
+ const subElementCount = subElementsWithTimings.length;
642
+
643
+ if (subElementCount <= 0) {
644
+ return;
645
+ }
646
+
647
+ const topTuple = subElementsWithTimings[subElementCount - 1];
648
+ const element = topTuple[0];
649
+ const endTimeInSecs: number = topTuple[1];
650
+
651
+ setHighlightTo({
652
+ newElement: element,
653
+ shouldScrollToElement: true,
654
+ disableHighlightIfNoAudio: false,
655
+ });
656
+
657
+ const mediaPlayer: HTMLMediaElement = document.getElementById(
658
+ "bloom-audio-player",
659
+ )! as HTMLMediaElement;
660
+ let currentTimeInSecs: number = mediaPlayer.currentTime;
661
+ if (currentTimeInSecs <= 0) {
662
+ currentTimeInSecs = startTimeInSecs;
663
+ }
664
+
665
+ // Handle cases where the currentTime has already exceeded the nextStartTime
666
+ // (might happen if you're unlucky in the thread queue... or if in debugger, etc.)
667
+ // But instead of setting time to 0, set the minimum highlight time threshold to 0.1 (this threshold is arbitrary).
668
+ const durationInSecs = Math.max(endTimeInSecs - currentTimeInSecs, 0.1);
669
+
670
+ setTimeout(() => {
671
+ onSubElementHighlightTimeEnded(originalSessionNum);
672
+ }, durationInSecs * 1000);
673
+ }
674
+
675
+ // Handles a timeout indicating that the expected time for highlighting the current subElement has ended.
676
+ // If we've really played to the end of that subElement, highlight the next one (if any).
677
+ // originalSessionNum: The value of currentAudioSessionNum at the time when the audio file started playing.
678
+ // This is used to check in the future if the timeouts we started are for the right session
679
+ function onSubElementHighlightTimeEnded(originalSessionNum: number) {
680
+ // Check if the user has changed pages since the original audio for this started playing.
681
+ // Note: Using the timestamp allows us to detect switching to the next page and then back to this page.
682
+ // Using playerPage (HTMLElement) does not detect that.
683
+ if (originalSessionNum !== currentAudioSessionNum) {
684
+ return;
685
+ }
686
+ // Seems to be needed to prevent jumping to the next subelement when not permitted to play by browser.
687
+ // Not sure why the check below on mediaPlayer.currentTime does not prevent this.
688
+ if (currentPlaybackMode === PlaybackMode.AudioPaused) {
689
+ return;
690
+ }
691
+
692
+ const subElementCount = subElementsWithTimings.length;
693
+ if (subElementCount <= 0) {
694
+ return;
695
+ }
696
+
697
+ const mediaPlayer: HTMLMediaElement = document.getElementById(
698
+ "bloom-audio-player",
699
+ )! as HTMLMediaElement;
700
+ if (mediaPlayer.ended || mediaPlayer.error) {
701
+ // audio playback ended. No need to highlight anything else.
702
+ // (No real need to remove the highlights either, because playEnded() is supposed to take care of that.)
703
+ return;
704
+ }
705
+ const playedDurationInSecs: number | undefined | null =
706
+ mediaPlayer.currentTime;
707
+
708
+ // Peek at the next sentence and see if we're ready to start that one. (We might not be ready to play the next audio if the current audio got paused).
709
+ const subElementWithTiming = subElementsWithTimings[subElementCount - 1];
710
+ const nextStartTimeInSecs = subElementWithTiming[1];
711
+
712
+ if (playedDurationInSecs && playedDurationInSecs < nextStartTimeInSecs) {
713
+ // Still need to wait. Exit this function early and re-check later.
714
+ const minRemainingDurationInSecs =
715
+ nextStartTimeInSecs - playedDurationInSecs;
716
+ setTimeout(() => {
717
+ onSubElementHighlightTimeEnded(originalSessionNum);
718
+ }, minRemainingDurationInSecs * 1000);
719
+
720
+ return;
721
+ }
722
+
723
+ subElementsWithTimings.pop();
724
+
725
+ highlightNextSubElement(originalSessionNum, nextStartTimeInSecs);
726
+ }
727
+
728
+ // Removes the .ui-audioCurrent class from all elements (also ui-audioCurrentImg)
729
+ // Equivalent of removeAudioCurrentFromPageDocBody() in BloomDesktop.
730
+ // "around" might be the element that has the highlight, or the one getting it;
731
+ // the important thing is that it belongs to the right document (which is in question
732
+ // with multiple iframes in Bloom desktop).
733
+ function removeAudioCurrent(around: HTMLElement = document.body) {
734
+ // Note that HTMLCollectionOf's length can change if you change the number of elements matching the selector.
735
+ // For safety we get rid of all existing ones.
736
+ const audioCurrentArray = Array.from(
737
+ around.ownerDocument.getElementsByClassName("ui-audioCurrent"),
738
+ );
739
+
740
+ for (let i = 0; i < audioCurrentArray.length; i++) {
741
+ audioCurrentArray[i].classList.remove("ui-audioCurrent");
742
+ }
743
+ const currentImg =
744
+ around.ownerDocument.getElementsByClassName("ui-audioCurrentImg")[0];
745
+ if (currentImg) {
746
+ currentImg.classList.remove("ui-audioCurrentImg");
747
+ }
748
+ }
749
+
750
+ function setSoundAndHighlight(
751
+ newElement: Element,
752
+ disableHighlightIfNoAudio: boolean,
753
+ oldElement?: Element | null | undefined,
754
+ ) {
755
+ setHighlightTo({
756
+ newElement,
757
+ shouldScrollToElement: true, // Always true in bloom-player version
758
+ disableHighlightIfNoAudio,
759
+ oldElement,
760
+ });
761
+ setSoundFrom(newElement);
762
+ }
763
+
764
+ function setHighlightTo({
765
+ newElement,
766
+ shouldScrollToElement,
767
+ disableHighlightIfNoAudio,
768
+ oldElement,
769
+ }: ISetHighlightParams) {
770
+ // This should happen even if oldElement and newElement are the same.
771
+ if (shouldScrollToElement) {
772
+ // Wrap it in a try/catch so that if something breaks with this minor/nice-to-have feature of scrolling,
773
+ // the main responsibilities of this method can still proceed
774
+ try {
775
+ scrollElementIntoView(newElement);
776
+ } catch (e) {
777
+ console.error(e);
778
+ }
779
+ }
780
+
781
+ if (oldElement === newElement) {
782
+ // No need to do much, and better not to, so that we can avoid any temporary flashes as the highlight is removed and re-applied
783
+ return;
784
+ }
785
+
786
+ removeAudioCurrent((oldElement || newElement) as HTMLElement);
787
+
788
+ if (disableHighlightIfNoAudio) {
789
+ const mediaPlayer = getPlayer();
790
+ const isAlreadyPlaying = mediaPlayer.currentTime > 0;
791
+
792
+ // If it's already playing, no need to disable (Especially in the Soft Split case, where only one file is playing but multiple sentences need to be highlighted).
793
+ if (!isAlreadyPlaying) {
794
+ // Start off in a highlight-disabled state so we don't display any momentary highlight for cases where there is no audio for this element.
795
+ // In react-based bloom-player, canPlayAudio() can't trivially identify whether or not audio exists,
796
+ // so we need to incorporate a derivative of Bloom Desktop's .ui-suppressHighlight code
797
+ newElement.classList.add(kSuppressHighlightClass);
798
+ // When it starts playing, we know we really have such an audio file, so we can stop
799
+ // suppressing the highlight.
800
+ mediaPlayer.addEventListener("playing", () => {
801
+ newElement.classList.remove(kSuppressHighlightClass);
802
+ });
803
+ }
804
+ }
805
+
806
+ newElement.classList.add("ui-audioCurrent");
807
+ // If the current audio is part of a (currently typically hidden) image description,
808
+ // highlight the image.
809
+ // it's important to check for imageDescription on the translationGroup;
810
+ // we don't want to highlight the image while, for example, playing a TOP box content.
811
+ const translationGroup = newElement.closest(".bloom-translationGroup");
812
+ if (
813
+ translationGroup &&
814
+ translationGroup.classList.contains(kImageDescriptionClass)
815
+ ) {
816
+ const imgContainer = translationGroup.closest(".bloom-imageContainer");
817
+ if (imgContainer) {
818
+ imgContainer.classList.add("ui-audioCurrentImg");
819
+ }
820
+ }
821
+ }
822
+
823
+ // Scrolls an element into view.
824
+ function scrollElementIntoView(element: Element) {
825
+ // In Bloom Player, scrollIntoView can interfere with page swipes,
826
+ // so Bloom Player needs some smarts about when to call it...
827
+ if (isSwipeInProgress?.() || recentPageChange) {
828
+ // This alternative implementation doesn't use scrollIntoView (Which interferes with swiper).
829
+ // Since swiping is only active at the beginning (usually while the 1st element is playing)
830
+ // it should generally be good enough just to reset the scroll of the scroll parent to the top.
831
+
832
+ // Assumption: Assumes the editable is the scrollbox.
833
+ // If this is not the case, you can use JQuery's scrollParent() function or other equivalent
834
+ const scrollAncestor = getEditable(element);
835
+ if (scrollAncestor) {
836
+ scrollAncestor.scrollTop = 0;
837
+ }
838
+ return;
839
+ }
840
+
841
+ let mover = element as HTMLElement; // by default make the element itself scrollIntoView
842
+ if (window.getComputedStyle(element.parentElement!).position !== "static") {
843
+ // We can make a new element absolutely positioned and it will be relative to the parent.
844
+ // The idea is to make an element much narrower than the element we are
845
+ // trying to make visible, since we don't want horizontal movement. Quite possibly,
846
+ // as in BL-11038, only some white space is actually off-screen. But even if the author
847
+ // has positioned a bubble so some text is cut off, we don't want horizontal scrolling,
848
+ // which inside swiper will weirdly pull in part of the next page.
849
+ // (In the pathological case that the bubble is more than half hidden, we'll do the
850
+ // horizontal scroll, despite the ugliness of possibly showing part of the next page.)
851
+ // Note that elt may be a span, when scrolling chunks of text into view to play.
852
+ // I thought about using scrollWidth/Height to include any part of the element
853
+ // that is scrolled out of view, but for some reason these are always zero for spans.
854
+ // OffsetHeight seems to give the full height, though docs seem to indicate that it
855
+ // should not include invisible areas.
856
+ const elt = element as HTMLElement;
857
+ mover = document.createElement("div");
858
+ mover.style.position = "absolute";
859
+ mover.style.top = elt.offsetTop + "px";
860
+
861
+ // now we need what for a block would be offsetLeft. However, for a span, that
862
+ // yields the offset of the top left corner, which may be in the middle
863
+ // of a line.
864
+ const bounds = elt.getBoundingClientRect();
865
+ const parent = elt.parentElement;
866
+ const parentBounds = parent?.getBoundingClientRect();
867
+ const scale = parentBounds!.width / parent!.offsetWidth;
868
+ const leftRelativeToParent = (bounds.left - parentBounds!.left) / scale;
869
+
870
+ mover.style.left = leftRelativeToParent + elt.offsetWidth / 2 + "px";
871
+ mover.style.height = elt.offsetHeight + "px";
872
+ mover.style.width = "0";
873
+ element.parentElement?.insertBefore(mover, element);
874
+ }
875
+
876
+ mover.scrollIntoView({
877
+ // Animated instead of sudden
878
+ behavior: "smooth",
879
+
880
+ // "nearest" setting does lots of smarts for us (compared to us deciding when to use "start" or "end")
881
+ // Seems to reduce unnecessary scrolling compared to start (aka true) or end (aka false).
882
+ // Refer to https://drafts.csswg.org/cssom-view/#scroll-an-element-into-view,
883
+ // which seems to imply that it won't do any scrolling if the two relevant edges are already inside.
884
+ block: "nearest",
885
+
886
+ // horizontal alignment is controlled by "inline". We'll leave it as its default ("nearest")
887
+ // which typically won't move things at all horizontally
888
+ });
889
+ if (mover !== element) {
890
+ mover.parentElement?.removeChild(mover);
891
+ }
892
+ }
893
+
894
+ function getEditable(element: Element): Element | null {
895
+ if (element.classList.contains("bloom-editable")) {
896
+ return element;
897
+ } else {
898
+ return element.closest(".bloom-editable"); // Might be null
899
+ }
900
+ }
901
+
902
+ function setSoundFrom(element: Element) {
903
+ const firstAudioSentence = getFirstAudioSentenceWithinElement(element);
904
+ const id: string = firstAudioSentence ? firstAudioSentence.id : element.id;
905
+ setCurrentAudioId(id);
906
+ }
907
+
908
+ function getFirstAudioSentenceWithinElement(
909
+ element: Element | null,
910
+ ): Element | null {
911
+ const audioSentences = getAudioSegmentsWithinElement(element);
912
+ if (!audioSentences || audioSentences.length === 0) {
913
+ return null;
914
+ }
915
+
916
+ return audioSentences[0];
917
+ }
918
+
919
+ function getAudioSegmentsWithinElement(element: Element | null): Element[] {
920
+ const audioSegments: Element[] = [];
921
+
922
+ if (element) {
923
+ if (element.classList.contains(kAudioSentence)) {
924
+ audioSegments.push(element);
925
+ } else {
926
+ const collection = element.getElementsByClassName(kAudioSentence);
927
+ for (let i = 0; i < collection.length; ++i) {
928
+ const audioSentenceElement = collection.item(i);
929
+ if (audioSentenceElement) {
930
+ audioSegments.push(audioSentenceElement);
931
+ }
932
+ }
933
+ }
934
+ }
935
+
936
+ return audioSegments;
937
+ }
938
+
939
+ function setCurrentAudioId(id: string) {
940
+ if (!currentAudioId || currentAudioId !== id) {
941
+ currentAudioId = id;
942
+ updatePlayerStatus();
943
+ }
944
+ }
945
+
946
+ function updatePlayerStatus() {
947
+ const player = getPlayer();
948
+ if (!player) {
949
+ return;
950
+ }
951
+ // Any time we change the src, the player will pause.
952
+ // So if we're playing currently, we'd better report whatever time
953
+ // we played.
954
+ if (player.currentTime > 0 && !player.paused && !player.ended) {
955
+ reportPlayDuration();
956
+ }
957
+ const url = currentAudioUrl(currentAudioId);
958
+ logNarration(url);
959
+ // because this code is meant to work in both Bloom and BloomPlayer, we can't call a Bloom API to find
960
+ // out whether we actually have a recording (as we well might not, if we just opened the talking book
961
+ // tool and haven't recorded anything yet). So we just try to play it and see what happens.
962
+ // The optional param tells Bloom not to report an error if the file isn't found, and is ignored in
963
+ // other contexts.
964
+ player.setAttribute(
965
+ "src",
966
+ url + "?nocache=" + new Date().getTime() + "&optional=true",
967
+ );
968
+ }
969
+
970
+ function currentAudioUrl(id: string): string {
971
+ const result = urlPrefix() + "/audio/" + id + ".mp3";
972
+ return result;
973
+ }
974
+
975
+ function getPlayer(): HTMLMediaElement {
976
+ const audio = getAudio("bloom-audio-player", (a) => {
977
+ a.addEventListener("ended", handlePlayEnded);
978
+ a.addEventListener("error", handlePlayError);
979
+ });
980
+ return audio;
981
+ }
982
+
983
+ function handlePlayEnded() {
984
+ playEnded();
985
+ }
986
+
987
+ // Stop any audio that is currently playing.
988
+ // This will also raise the PlayCompleted and PageNarrationComplete events
989
+ export function abortNarrationPlayback() {
990
+ if (currentPlaybackMode !== PlaybackMode.AudioPlaying) {
991
+ return; // no need to abort
992
+ }
993
+
994
+ // I hesitated to put this comment here because I'm afraid it won't make any sense.
995
+ // But I also don't feel right about not attempting to capture what happened.
996
+ // Previously, we didn't have this "keepPlayingTheStack" parameter, and we just
997
+ // modified the stack to have only the top element in it.
998
+ // But for no reason we can explain, in BloomPUB Viewer, and only there,
999
+ // even if we weren't calling this code, referencing elementsToPlayConsecutivelyStack
1000
+ // caused an error to be thrown and a black screen to result.
1001
+ // So if you're tempted to modify this code, especially if you need to
1002
+ // reference elementsToPlayConsecutivelyStack, be sure to test in BloomPUB Viewer.
1003
+
1004
+ playEnded(false);
1005
+ }
1006
+
1007
+ // Handles ending the current playback. If there are more things stacked to play, it plays the next one.
1008
+ // otherwise, it reports that play ended. Note that the latter raises the PlayCompleted and PageNarrationComplete events.
1009
+ function playEnded(keepPlayingTheStack = true): void {
1010
+ // Not sure if this is necessary, since both 'playCurrentInternal()' and 'reportPlayEnded()'
1011
+ // will toggle image description already, but if we've just gotten to the end of our "stack",
1012
+ // it may be needed.
1013
+ if (ToggleImageDescription) {
1014
+ ToggleImageDescription?.raise(false);
1015
+ }
1016
+ reportPlayDuration();
1017
+ if (
1018
+ elementsToPlayConsecutivelyStack &&
1019
+ elementsToPlayConsecutivelyStack.length > 0
1020
+ ) {
1021
+ const elementJustPlayed = elementsToPlayConsecutivelyStack.pop(); // get rid of the last one we played
1022
+ const newStackCount = elementsToPlayConsecutivelyStack.length;
1023
+ if (newStackCount > 0 && keepPlayingTheStack) {
1024
+ // More items to play
1025
+ const nextElement =
1026
+ elementsToPlayConsecutivelyStack[newStackCount - 1];
1027
+ setSoundAndHighlight(nextElement, true);
1028
+ playCurrentInternal();
1029
+ } else {
1030
+ reportPlayEnded();
1031
+ removeAudioCurrent(elementJustPlayed);
1032
+ // In some error conditions, we need to stop repeating attempts to play.
1033
+ getPlayer().pause();
1034
+ }
1035
+ }
1036
+ }
1037
+
1038
+ function reportPlayEnded() {
1039
+ elementsToPlayConsecutivelyStack = [];
1040
+ subElementsWithTimings = [];
1041
+
1042
+ removeAudioCurrent();
1043
+ PageNarrationComplete?.raise(currentPlayPage!);
1044
+ PlayCompleted?.raise();
1045
+ }
1046
+
1047
+ function reportPlayDuration() {
1048
+ if (!audioPlayCurrentStartTime || !durationReporter) {
1049
+ return;
1050
+ }
1051
+ const currentTime = new Date().getTime();
1052
+ const duration = (currentTime - audioPlayCurrentStartTime) / 1000;
1053
+ durationReporter(duration);
1054
+ }
1055
+
1056
+ function getAudio(id: string, init: (audio: HTMLAudioElement) => void) {
1057
+ let player: HTMLAudioElement | null = document.querySelector(
1058
+ "#" + id,
1059
+ ) as HTMLAudioElement;
1060
+ // If (somehow?) it exists but is not a valid HTMLAudioElement, remove it.
1061
+ if (player && !player.play) {
1062
+ player.remove();
1063
+ player = null;
1064
+ }
1065
+ if (!player) {
1066
+ player = document.createElement("audio") as HTMLAudioElement;
1067
+ player.setAttribute("id", id);
1068
+ document.body.appendChild(player);
1069
+ init(player);
1070
+ }
1071
+ return player as HTMLMediaElement;
1072
+ }
1073
+
1074
+ function canPlayAudio(current: Element): boolean {
1075
+ return true; // currently no way to check
1076
+ }
1077
+
1078
+ // If something goes wrong playing a media element, typically that we don't actually have a recording
1079
+ // for a particular one, we seem to sometimes get an error event, while other times, the promise returned
1080
+ // by play() is rejected. Both cases call handlePlayError, which calls playEnded, but in case we get both,
1081
+ // we don't want to call playEnded twice.
1082
+ let gotErrorPlaying = false;
1083
+
1084
+ function handlePlayError() {
1085
+ if (gotErrorPlaying) {
1086
+ console.log("Already got error playing, not handling again");
1087
+ return;
1088
+ }
1089
+ gotErrorPlaying = true;
1090
+ console.log("Error playing, handling");
1091
+ setTimeout(() => {
1092
+ playEnded();
1093
+ }, 100);
1094
+ }
1095
+
1096
+ // Returns all elements that match CSS selector {expr} as an array.
1097
+ // Querying can optionally be restricted to {container}'s descendants
1098
+ // If includeSelf is true, it includes both itself as well as its descendants.
1099
+ // Otherwise, it only includes descendants.
1100
+ // Also filters out imageDescriptions if we aren't supposed to be reading them.
1101
+ function findAll(
1102
+ expr: string,
1103
+ container: HTMLElement,
1104
+ includeSelf: boolean = false,
1105
+ ): HTMLElement[] {
1106
+ // querySelectorAll checks all the descendants
1107
+ const allMatches: HTMLElement[] = [].slice.call(
1108
+ (container || document).querySelectorAll(expr),
1109
+ );
1110
+
1111
+ // Now check itself
1112
+ if (includeSelf && container && container.matches(expr)) {
1113
+ allMatches.push(container);
1114
+ }
1115
+
1116
+ return includeImageDescriptions
1117
+ ? allMatches
1118
+ : allMatches.filter((match) => !isImageDescriptionSegment(match));
1119
+ }
1120
+
1121
+ function getPlayableDivs(container: HTMLElement) {
1122
+ // We want to play any audio we have from divs the user can see.
1123
+ // This is a crude test, but currently we always use display:none to hide unwanted languages.
1124
+ return findAll(".bloom-editable", container).filter(
1125
+ (e) => window.getComputedStyle(e).display !== "none",
1126
+ );
1127
+ }
1128
+
1129
+ // Optional param is for use when 'playerPage' has NOT been initialized.
1130
+ // Not using the optional param assumes 'playerPage' has been initialized
1131
+ function getPagePlayableDivs(page?: HTMLElement): HTMLElement[] {
1132
+ return getPlayableDivs(page ? page : currentPlayPage!);
1133
+ }
1134
+
1135
+ // Optional param is for use when 'playerPage' has NOT been initialized.
1136
+ // Not using the optional param assumes 'playerPage' has been initialized
1137
+ function getPageAudioElements(page?: HTMLElement): HTMLElement[] {
1138
+ return [].concat.apply(
1139
+ [],
1140
+ getPagePlayableDivs(page).map((x) =>
1141
+ findAll(".audio-sentence", x, true),
1142
+ ),
1143
+ );
1144
+ }
1145
+
1146
+ export function pageHasAudio(page: HTMLElement): boolean {
1147
+ return getPageAudioElements(page).length ? true : false;
1148
+ }
1149
+
1150
+ // Called when the user clicks the play/pause button, and we want to resume playing.
1151
+ // If we're in the middle of playing, we resume it.
1152
+ // If we have finished playing, we start over.
1153
+ // If the page nas no audio, we assume the user paused as long as wanted on
1154
+ // the page, and raise the PageNarrationComplete event at once (to move to the
1155
+ // next page if we are in autoplay).
1156
+ export function playNarration() {
1157
+ if (currentPlaybackMode === PlaybackMode.AudioPlaying) {
1158
+ return; // no change.
1159
+ }
1160
+ setCurrentPlaybackMode(PlaybackMode.AudioPlaying);
1161
+ // I'm not sure how getPlayer() can return null/undefined, but have seen it happen
1162
+ // typically when doing something odd like trying to go back from the first page.
1163
+ if (segmentsWeArePlaying.length && getPlayer()) {
1164
+ if (elementsToPlayConsecutivelyStack.length) {
1165
+ handlePlayPromise(getPlayer().play());
1166
+
1167
+ // Resuming play. Only currentStartTime needs to be adjusted, but originalStartTime shouldn't be changed.
1168
+ audioPlayCurrentStartTime = new Date().getTime();
1169
+ // in case we're resuming play, we need a new timout when the current subelement is finished
1170
+ highlightNextSubElement(currentAudioSessionNum);
1171
+ return;
1172
+ } else {
1173
+ // Pressing the play button in this case is triggering a replay of the current page,
1174
+ // so we need to reset the highlighting.
1175
+ playAllSentences(null);
1176
+ return;
1177
+ }
1178
+ }
1179
+ // Nothing real to play on this page, so PageNarrationComplete depends on a timeout.
1180
+ // We only get here following a pause, so assume the reader has paused as long as wanted,
1181
+ // and move on.
1182
+ PageNarrationComplete?.raise(currentPlayPage!);
1183
+ }
1184
+
1185
+ export function pauseNarration() {
1186
+ if (currentPlaybackMode === PlaybackMode.AudioPaused) {
1187
+ return;
1188
+ }
1189
+ pausePlaying();
1190
+ startPause = new Date();
1191
+
1192
+ // Note that neither music.pause() nor animations.PauseAnimations() check the state.
1193
+ // If that changes, then this state setting might need attention.
1194
+ setCurrentPlaybackMode(PlaybackMode.AudioPaused);
1195
+ }
1196
+
1197
+ // This pauses the current player without setting the "AudioPaused" state or setting the
1198
+ // startPause timestamp. If this method is called when resumption is possible, the calling
1199
+ // method must take care of these values (as in the pause method directly above).
1200
+ // Note that there's no "stop" method on player, only a "pause" method. This method is
1201
+ // used both when "pausing" the narration while viewing a page and when stopping narration
1202
+ // when changing pages.
1203
+ function pausePlaying() {
1204
+ const player = getPlayer();
1205
+ // We're paused, so if we have a timer running to switch pages after a certain time, cancel it.
1206
+ clearTimeout(fakeNarrationTimer);
1207
+ if (segmentsWeArePlaying && segmentsWeArePlaying.length && player) {
1208
+ // Before reporting duration, try to check that we really are playing.
1209
+ // a separate report is sent if play ends.
1210
+ if (player.currentTime > 0 && !player.paused && !player.ended) {
1211
+ reportPlayDuration();
1212
+ }
1213
+ player.pause();
1214
+ }
1215
+ }
1216
+
1217
+ // Figure out the total duration of the audio on the page.
1218
+ // An earlier version of this code (see narration.ts around November 2023)
1219
+ // was designed to run asnychronously so that if we don't have audio
1220
+ // durations in the file, it would try to get the actual duration of the audio
1221
+ // from the server. However, comments indicated that this approach did not
1222
+ // work in mobile apps, and bloompubs have now long shipped with the durations.
1223
+ // So I decided to simplify.
1224
+ export function computeDuration(page: HTMLElement): number {
1225
+ let pageDuration = 0.0;
1226
+ getPageAudioElements(page).forEach((segment) => {
1227
+ const attrDuration = segment.getAttribute("data-duration");
1228
+ if (attrDuration) {
1229
+ pageDuration += parseFloat(attrDuration);
1230
+ }
1231
+ });
1232
+ if (pageDuration < durationOfPagesWithoutNarration) {
1233
+ pageDuration = durationOfPagesWithoutNarration;
1234
+ }
1235
+ return pageDuration;
1236
+ }
1237
+
1238
+ export function hidingPage() {
1239
+ // This causes problems. When we're hiding one page, we immediately show another.
1240
+ // If that page has no audio, we pause the player then.
1241
+ // If it DOES have audio, a pause here can interfere with playing it.
1242
+ //pausePlaying(); // Doesn't set AudioPaused state. Caller sets NewPage state.
1243
+ clearTimeout(fakeNarrationTimer);
1244
+ }
1245
+
1246
+ // Play the specified elements, one after the other. When the last completes (or at once if the array is empty),
1247
+ // perform the 'then' action (typically used to play narration, which we put after videos).
1248
+ //
1249
+ // Note, there is a very similar function in video.ts. It would be nice to combine them, but
1250
+ // there are various reasons that is difficult at the moment. e.g.:
1251
+ // 1. See comment below about sharing code with Bloom Desktop.
1252
+ // 2. The other version handles play/pause which doesn't apply in BloomDesktop.
1253
+ //
1254
+ // (This function would be more natural in video.ts. But at least for now I'm trying to minimize the
1255
+ // number of source files shared with Bloom Desktop, and we need this for Bloom Games.)
1256
+ export function playAllVideo(elements: HTMLVideoElement[], then: () => void) {
1257
+ if (elements.length === 0) {
1258
+ then();
1259
+ return;
1260
+ }
1261
+ const video = elements[0];
1262
+
1263
+ // If there is an error, try to continue with the next video.
1264
+ if (
1265
+ video.networkState === HTMLMediaElement.NETWORK_NO_SOURCE &&
1266
+ video.readyState === HTMLMediaElement.HAVE_NOTHING
1267
+ ) {
1268
+ showVideoError(video);
1269
+ playAllVideo(elements.slice(1), then);
1270
+ } else {
1271
+ hideVideoError(video);
1272
+ setCurrentPlaybackMode(PlaybackMode.VideoPlaying);
1273
+ const promise = video.play();
1274
+ promise
1275
+ .then(() => {
1276
+ // The promise resolves when the video starts playing. We want to know when it ends.
1277
+ // Note: in Bloom Desktop, sometimes this event does not fire normally, even when the video is
1278
+ // played to the end. I have not figured out why. It may be something to do with how we are
1279
+ // trimming the videos.
1280
+ // In Bloom Desktop, this is worked around by raising the ended event when we detect that it has
1281
+ // paused past the end point in resetToStartAfterPlayingToEndPoint.
1282
+ // In BloomPlayer,I don't think this is a problem. Videos are trimmed when published, so we always
1283
+ // play to the real end (unless the user pauses). So one way or another, we should get the ended
1284
+ // event.
1285
+ video.addEventListener(
1286
+ "ended",
1287
+ () => {
1288
+ playAllVideo(elements.slice(1), then);
1289
+ },
1290
+ { once: true },
1291
+ );
1292
+ })
1293
+ .catch((reason) => {
1294
+ console.error("Video play failed", reason);
1295
+ showVideoError(video);
1296
+ playAllVideo(elements.slice(1), then);
1297
+ });
1298
+ }
1299
+ }
1300
+
1301
+ // These methods live here instead of video.ts because video.ts is already importing
1302
+ // from narration.ts, and we don't want to create a circular dependency.
1303
+
1304
+ // We're living with this message not being localized.
1305
+ const badVideoMessage = "Sorry, this video cannot be played in this browser.";
1306
+
1307
+ export function showVideoError(video: HTMLVideoElement): void {
1308
+ const parent = video.parentElement;
1309
+ if (parent) {
1310
+ const divs = parent.getElementsByClassName("video-error-message");
1311
+ if (divs.length === 0) {
1312
+ const msgDiv = parent.ownerDocument.createElement("div");
1313
+ msgDiv.className = "video-error-message normal-style";
1314
+ msgDiv.textContent = badVideoMessage;
1315
+ msgDiv.style.display = "block";
1316
+ msgDiv.style.color = "black";
1317
+ msgDiv.style.backgroundColor = "rgba(255, 255, 255, 0.5)"; // semi-transparent white
1318
+ msgDiv.style.position = "absolute";
1319
+ msgDiv.style.left = "10%";
1320
+ msgDiv.style.top = "10%";
1321
+ msgDiv.style.width = "80%";
1322
+ msgDiv.style.fontSize = "x-large";
1323
+ parent.appendChild(msgDiv);
1324
+ }
1325
+ }
1326
+ }
1327
+ export function hideVideoError(video: HTMLVideoElement): void {
1328
+ const parent = video.parentElement;
1329
+ if (parent) {
1330
+ const divs = parent.getElementsByClassName("video-error-message");
1331
+ while (divs.length > 1) parent.removeChild(divs[0]);
1332
+ }
1333
+ }