@storyteller-platform/align 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. package/LICENSE.txt +21 -0
  2. package/README.md +3 -0
  3. package/dist/align/align.cjs +525 -0
  4. package/dist/align/align.d.cts +58 -0
  5. package/dist/align/align.d.ts +58 -0
  6. package/dist/align/align.js +458 -0
  7. package/dist/align/fuzzy.cjs +164 -0
  8. package/dist/align/fuzzy.d.cts +6 -0
  9. package/dist/align/fuzzy.d.ts +6 -0
  10. package/dist/align/fuzzy.js +141 -0
  11. package/dist/align/getSentenceRanges.cjs +304 -0
  12. package/dist/align/getSentenceRanges.d.cts +31 -0
  13. package/dist/align/getSentenceRanges.d.ts +31 -0
  14. package/dist/align/getSentenceRanges.js +277 -0
  15. package/dist/align/parse.cjs +63 -0
  16. package/dist/align/parse.d.cts +30 -0
  17. package/dist/align/parse.d.ts +30 -0
  18. package/dist/align/parse.js +51 -0
  19. package/dist/chunk-BIEQXUOY.js +50 -0
  20. package/dist/cli/bin.cjs +368 -0
  21. package/dist/cli/bin.d.cts +1 -0
  22. package/dist/cli/bin.d.ts +1 -0
  23. package/dist/cli/bin.js +319 -0
  24. package/dist/common/ffmpeg.cjs +232 -0
  25. package/dist/common/ffmpeg.d.cts +33 -0
  26. package/dist/common/ffmpeg.d.ts +33 -0
  27. package/dist/common/ffmpeg.js +196 -0
  28. package/dist/common/logging.cjs +45 -0
  29. package/dist/common/logging.d.cts +5 -0
  30. package/dist/common/logging.d.ts +5 -0
  31. package/dist/common/logging.js +12 -0
  32. package/dist/common/parse.cjs +73 -0
  33. package/dist/common/parse.d.cts +28 -0
  34. package/dist/common/parse.d.ts +28 -0
  35. package/dist/common/parse.js +56 -0
  36. package/dist/common/shell.cjs +30 -0
  37. package/dist/common/shell.d.cts +3 -0
  38. package/dist/common/shell.d.ts +3 -0
  39. package/dist/common/shell.js +7 -0
  40. package/dist/index.cjs +37 -0
  41. package/dist/index.d.cts +12 -0
  42. package/dist/index.d.ts +12 -0
  43. package/dist/index.js +11 -0
  44. package/dist/markup/__tests__/markup.test.cjs +464 -0
  45. package/dist/markup/__tests__/markup.test.d.cts +2 -0
  46. package/dist/markup/__tests__/markup.test.d.ts +2 -0
  47. package/dist/markup/__tests__/markup.test.js +441 -0
  48. package/dist/markup/markup.cjs +316 -0
  49. package/dist/markup/markup.d.cts +24 -0
  50. package/dist/markup/markup.d.ts +24 -0
  51. package/dist/markup/markup.js +254 -0
  52. package/dist/markup/parse.cjs +55 -0
  53. package/dist/markup/parse.d.cts +17 -0
  54. package/dist/markup/parse.d.ts +17 -0
  55. package/dist/markup/parse.js +43 -0
  56. package/dist/markup/segmentation.cjs +87 -0
  57. package/dist/markup/segmentation.d.cts +8 -0
  58. package/dist/markup/segmentation.d.ts +8 -0
  59. package/dist/markup/segmentation.js +67 -0
  60. package/dist/markup/semantics.cjs +79 -0
  61. package/dist/markup/semantics.d.cts +6 -0
  62. package/dist/markup/semantics.d.ts +6 -0
  63. package/dist/markup/semantics.js +53 -0
  64. package/dist/process/AudioEncoding.cjs +16 -0
  65. package/dist/process/AudioEncoding.d.cts +8 -0
  66. package/dist/process/AudioEncoding.d.ts +8 -0
  67. package/dist/process/AudioEncoding.js +0 -0
  68. package/dist/process/__tests__/processAudiobook.test.cjs +232 -0
  69. package/dist/process/__tests__/processAudiobook.test.d.cts +2 -0
  70. package/dist/process/__tests__/processAudiobook.test.d.ts +2 -0
  71. package/dist/process/__tests__/processAudiobook.test.js +209 -0
  72. package/dist/process/mime.cjs +43 -0
  73. package/dist/process/mime.d.cts +3 -0
  74. package/dist/process/mime.d.ts +3 -0
  75. package/dist/process/mime.js +24 -0
  76. package/dist/process/parse.cjs +84 -0
  77. package/dist/process/parse.d.cts +28 -0
  78. package/dist/process/parse.d.ts +28 -0
  79. package/dist/process/parse.js +73 -0
  80. package/dist/process/processAudiobook.cjs +220 -0
  81. package/dist/process/processAudiobook.d.cts +24 -0
  82. package/dist/process/processAudiobook.d.ts +24 -0
  83. package/dist/process/processAudiobook.js +166 -0
  84. package/dist/process/ranges.cjs +203 -0
  85. package/dist/process/ranges.d.cts +15 -0
  86. package/dist/process/ranges.d.ts +15 -0
  87. package/dist/process/ranges.js +137 -0
  88. package/dist/transcribe/parse.cjs +149 -0
  89. package/dist/transcribe/parse.d.cts +114 -0
  90. package/dist/transcribe/parse.d.ts +114 -0
  91. package/dist/transcribe/parse.js +143 -0
  92. package/dist/transcribe/transcribe.cjs +400 -0
  93. package/dist/transcribe/transcribe.d.cts +41 -0
  94. package/dist/transcribe/transcribe.d.ts +41 -0
  95. package/dist/transcribe/transcribe.js +330 -0
  96. package/package.json +96 -0
@@ -0,0 +1,277 @@
1
+ import "../chunk-BIEQXUOY.js";
2
+ import { segmentText } from "@echogarden/text-segmentation";
3
+ import { getTrackDuration } from "../common/ffmpeg.js";
4
+ import { findNearestMatch } from "./fuzzy.js";
5
+ async function getSentencesWithOffsets(text) {
6
+ const sentences = await segmentText(text).then(
7
+ (r) => r.sentences.map((s) => s.text)
8
+ );
9
+ const sentencesWithOffsets = [];
10
+ let lastSentenceEnd = 0;
11
+ for (const sentence of sentences) {
12
+ const sentenceStart = text.indexOf(sentence, lastSentenceEnd);
13
+ if (sentenceStart > lastSentenceEnd) {
14
+ sentencesWithOffsets.push(text.slice(lastSentenceEnd, sentenceStart));
15
+ }
16
+ sentencesWithOffsets.push(sentence);
17
+ lastSentenceEnd = sentenceStart + sentence.length;
18
+ }
19
+ if (text.length > lastSentenceEnd) {
20
+ sentencesWithOffsets.push(text.slice(lastSentenceEnd));
21
+ }
22
+ return sentencesWithOffsets;
23
+ }
24
+ function findStartTimestamp(matchStartIndex, transcription) {
25
+ const entry = transcription.timeline.find(
26
+ (entry2) => (entry2.endOffsetUtf16 ?? 0) > matchStartIndex
27
+ );
28
+ if (!entry) return null;
29
+ return {
30
+ start: entry.startTime,
31
+ end: entry.endTime,
32
+ audiofile: entry.audiofile
33
+ };
34
+ }
35
+ function findEndTimestamp(matchEndIndex, transcription) {
36
+ const entry = transcription.timeline.findLast(
37
+ (entry2) => (entry2.startOffsetUtf16 ?? 0) < matchEndIndex
38
+ );
39
+ return (entry == null ? void 0 : entry.endTime) ?? null;
40
+ }
41
+ function getWindowIndexFromOffset(window, offset) {
42
+ let index = 0;
43
+ while (index < window.length - 1 && offset >= window[index].length) {
44
+ offset -= window[index].length;
45
+ index += 1;
46
+ }
47
+ return { index, offset };
48
+ }
49
+ function collapseWhitespace(input) {
50
+ return input.replaceAll(/\s+/g, " ");
51
+ }
52
+ async function getSentenceRanges(startSentence, transcription, sentences, chapterOffset, lastSentenceRange) {
53
+ const sentenceRanges = [];
54
+ const fullTranscriptionText = transcription.transcript;
55
+ const transcriptionText = fullTranscriptionText.slice(chapterOffset);
56
+ const transcriptionSentences = await getSentencesWithOffsets(
57
+ transcriptionText
58
+ ).then((s) => s.map((sentence) => sentence.toLowerCase()));
59
+ let startSentenceEntry = startSentence;
60
+ const sentenceEntries = sentences.map((sentence, index) => [index, sentence]).filter(([index, sentence]) => {
61
+ if (sentence.replaceAll(/[.-_()[\],/?!@#$%^^&*`~;:='"<>+ˌˈ]/g, "").length <= 3) {
62
+ if (index < startSentence) startSentenceEntry--;
63
+ return false;
64
+ }
65
+ return true;
66
+ });
67
+ let transcriptionWindowIndex = 0;
68
+ let transcriptionWindowOffset = 0;
69
+ let lastGoodTranscriptionWindow = 0;
70
+ let notFound = 0;
71
+ let sentenceIndex = startSentenceEntry;
72
+ let lastMatchEnd = chapterOffset;
73
+ while (sentenceIndex < sentenceEntries.length) {
74
+ const [sentenceId, sentence] = sentenceEntries[sentenceIndex];
75
+ const transcriptionWindowList = transcriptionSentences.slice(
76
+ transcriptionWindowIndex,
77
+ transcriptionWindowIndex + 10
78
+ );
79
+ const transcriptionWindow = transcriptionWindowList.join("").slice(transcriptionWindowOffset);
80
+ const query = collapseWhitespace(sentence.trim()).toLowerCase();
81
+ const firstMatch = findNearestMatch(
82
+ query,
83
+ transcriptionWindow,
84
+ Math.max(Math.floor(0.25 * query.length), 1)
85
+ );
86
+ if (!firstMatch) {
87
+ sentenceIndex += 1;
88
+ notFound += 1;
89
+ if (notFound === 3 || sentenceIndex === sentenceEntries.length) {
90
+ transcriptionWindowIndex += 1;
91
+ if (transcriptionWindowIndex == lastGoodTranscriptionWindow + 30) {
92
+ transcriptionWindowIndex = lastGoodTranscriptionWindow;
93
+ notFound = 0;
94
+ continue;
95
+ }
96
+ sentenceIndex -= notFound;
97
+ notFound = 0;
98
+ }
99
+ continue;
100
+ }
101
+ const transcriptionOffset = transcriptionSentences.slice(0, transcriptionWindowIndex).join("").length;
102
+ const startResult = findStartTimestamp(
103
+ firstMatch.index + transcriptionOffset + transcriptionWindowOffset + chapterOffset,
104
+ transcription
105
+ );
106
+ if (!startResult) {
107
+ sentenceIndex += 1;
108
+ continue;
109
+ }
110
+ let start = startResult.start;
111
+ const audiofile = startResult.audiofile;
112
+ const end = findEndTimestamp(
113
+ firstMatch.index + firstMatch.match.length + transcriptionOffset + transcriptionWindowOffset + chapterOffset,
114
+ transcription
115
+ ) ?? startResult.end;
116
+ if (sentenceRanges.length > 0) {
117
+ const previousSentenceRange = sentenceRanges[sentenceRanges.length - 1];
118
+ const previousAudiofile = previousSentenceRange.audiofile;
119
+ if (audiofile === previousAudiofile) {
120
+ if (previousSentenceRange.id === sentenceId - 1) {
121
+ previousSentenceRange.end = start;
122
+ }
123
+ } else {
124
+ if (previousSentenceRange.id === sentenceId - 1) {
125
+ const lastTrackDuration = await getTrackDuration(previousAudiofile);
126
+ previousSentenceRange.end = lastTrackDuration;
127
+ start = 0;
128
+ }
129
+ }
130
+ } else if (lastSentenceRange !== null) {
131
+ if (audiofile === lastSentenceRange.audiofile) {
132
+ if (sentenceId === 0) {
133
+ lastSentenceRange.end = start;
134
+ }
135
+ } else {
136
+ const lastTrackDuration = await getTrackDuration(
137
+ lastSentenceRange.audiofile
138
+ );
139
+ lastSentenceRange.end = lastTrackDuration;
140
+ if (sentenceId === 0) {
141
+ start = 0;
142
+ }
143
+ }
144
+ } else if (sentenceId === 0) {
145
+ start = 0;
146
+ }
147
+ sentenceRanges.push({
148
+ id: sentenceId,
149
+ start,
150
+ end,
151
+ audiofile
152
+ });
153
+ notFound = 0;
154
+ lastMatchEnd = firstMatch.index + firstMatch.match.length + transcriptionOffset + transcriptionWindowOffset + chapterOffset;
155
+ const windowIndexResult = getWindowIndexFromOffset(
156
+ transcriptionWindowList,
157
+ firstMatch.index + firstMatch.match.length + transcriptionWindowOffset
158
+ );
159
+ transcriptionWindowIndex += windowIndexResult.index;
160
+ transcriptionWindowOffset = windowIndexResult.offset;
161
+ lastGoodTranscriptionWindow = transcriptionWindowIndex;
162
+ sentenceIndex += 1;
163
+ }
164
+ return {
165
+ sentenceRanges,
166
+ transcriptionOffset: lastMatchEnd
167
+ };
168
+ }
169
+ async function getLargestGap(trailing, leading) {
170
+ const leadingGap = leading.start;
171
+ const trailingGap = await getTrackDuration(trailing.audiofile) - trailing.end;
172
+ if (trailingGap > leadingGap) return [trailingGap, trailing.audiofile];
173
+ return [leadingGap, leading.audiofile];
174
+ }
175
+ async function interpolateSentenceRanges(sentenceRanges, lastSentenceRange) {
176
+ const interpolated = [];
177
+ const [first, ...rest] = sentenceRanges;
178
+ if (!first) return interpolated;
179
+ if (first.id !== 0) {
180
+ const count = first.id;
181
+ const crossesAudioBoundary = !lastSentenceRange || first.audiofile !== lastSentenceRange.audiofile;
182
+ let diff = crossesAudioBoundary ? first.start : first.start - lastSentenceRange.end;
183
+ if (!crossesAudioBoundary && diff <= 0) {
184
+ diff = 0.25;
185
+ lastSentenceRange.end = first.start - diff;
186
+ }
187
+ const interpolatedLength = diff / count;
188
+ const start = crossesAudioBoundary ? 0 : lastSentenceRange.end;
189
+ for (let i = 0; i < count; i++) {
190
+ interpolated.push({
191
+ id: i,
192
+ start: start + interpolatedLength * i,
193
+ end: start + interpolatedLength * (i + 1),
194
+ audiofile: first.audiofile
195
+ });
196
+ }
197
+ interpolated.push(first);
198
+ } else {
199
+ rest.unshift(first);
200
+ }
201
+ for (const sentenceRange of rest) {
202
+ if (interpolated.length === 0) {
203
+ interpolated.push(sentenceRange);
204
+ continue;
205
+ }
206
+ const lastSentenceRange2 = interpolated[interpolated.length - 1];
207
+ const count = sentenceRange.id - lastSentenceRange2.id - 1;
208
+ if (count === 0) {
209
+ interpolated.push(sentenceRange);
210
+ continue;
211
+ }
212
+ const crossesAudioBoundary = sentenceRange.audiofile !== lastSentenceRange2.audiofile;
213
+ let [diff, audiofile] = crossesAudioBoundary ? await getLargestGap(lastSentenceRange2, sentenceRange) : [sentenceRange.start - lastSentenceRange2.end, sentenceRange.audiofile];
214
+ if (diff <= 0) {
215
+ if (crossesAudioBoundary) {
216
+ const rangeLength = sentenceRange.end - sentenceRange.start;
217
+ diff = rangeLength < 0.5 ? rangeLength / 2 : 0.25;
218
+ sentenceRange.start = diff;
219
+ } else {
220
+ diff = 0.25;
221
+ lastSentenceRange2.end = sentenceRange.start - diff;
222
+ }
223
+ }
224
+ const interpolatedLength = diff / count;
225
+ const start = crossesAudioBoundary ? 0 : lastSentenceRange2.end;
226
+ for (let i = 0; i < count; i++) {
227
+ interpolated.push({
228
+ id: lastSentenceRange2.id + i + 1,
229
+ start: start + interpolatedLength * i,
230
+ end: start + interpolatedLength * (i + 1),
231
+ audiofile
232
+ });
233
+ }
234
+ interpolated.push(sentenceRange);
235
+ }
236
+ return interpolated;
237
+ }
238
+ function expandEmptySentenceRanges(sentenceRanges) {
239
+ const expandedRanges = [];
240
+ for (const sentenceRange of sentenceRanges) {
241
+ const previousSentenceRange = expandedRanges[expandedRanges.length - 1];
242
+ if (!previousSentenceRange) {
243
+ expandedRanges.push(sentenceRange);
244
+ continue;
245
+ }
246
+ const nudged = previousSentenceRange.end > sentenceRange.start && previousSentenceRange.audiofile === sentenceRange.audiofile ? { ...sentenceRange, start: previousSentenceRange.end } : sentenceRange;
247
+ const expanded = nudged.end <= nudged.start ? { ...nudged, end: nudged.start + 1e-3 } : nudged;
248
+ expandedRanges.push(expanded);
249
+ }
250
+ return expandedRanges;
251
+ }
252
+ function getChapterDuration(sentenceRanges) {
253
+ let i = 0;
254
+ let duration = 0;
255
+ let audiofile = null;
256
+ let start = 0;
257
+ let end = 0;
258
+ while (i < sentenceRanges.length) {
259
+ const sentenceRange = sentenceRanges[i];
260
+ if (sentenceRange.audiofile !== audiofile) {
261
+ duration += end - start;
262
+ start = sentenceRange.start;
263
+ audiofile = sentenceRange.audiofile;
264
+ }
265
+ end = sentenceRange.end;
266
+ i++;
267
+ }
268
+ duration += end - start;
269
+ return duration;
270
+ }
271
+ export {
272
+ expandEmptySentenceRanges,
273
+ findEndTimestamp,
274
+ getChapterDuration,
275
+ getSentenceRanges,
276
+ interpolateSentenceRanges
277
+ };
@@ -0,0 +1,63 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+ var parse_exports = {};
20
+ __export(parse_exports, {
21
+ alignCommand: () => alignCommand,
22
+ alignParser: () => alignParser
23
+ });
24
+ module.exports = __toCommonJS(parse_exports);
25
+ var import_core = require("@optique/core");
26
+ var import_valueparser = require("@optique/run/valueparser");
27
+ var import_parse = require("../common/parse.cjs");
28
+ const alignParser = (0, import_core.object)("Alignment", {
29
+ audiobook: (0, import_core.option)(
30
+ "--audiobook",
31
+ (0, import_valueparser.path)({ mustExist: true, type: "directory" })
32
+ ),
33
+ epub: (0, import_core.option)(
34
+ "--epub",
35
+ (0, import_valueparser.path)({ mustExist: true, type: "file", extensions: [".epub"] })
36
+ ),
37
+ reports: (0, import_core.optional)((0, import_core.option)("--reports", (0, import_valueparser.path)({ type: "directory" })))
38
+ });
39
+ const alignCommand = (0, import_core.command)(
40
+ "align",
41
+ (0, import_core.merge)(
42
+ (0, import_core.object)({
43
+ action: (0, import_core.constant)("align"),
44
+ transcriptions: (0, import_core.option)(
45
+ "--transcriptions",
46
+ (0, import_valueparser.path)({ mustExist: true, type: "directory" })
47
+ ),
48
+ output: (0, import_core.option)("--output", (0, import_valueparser.path)({ type: "file", extensions: [".epub"] }))
49
+ }),
50
+ alignParser,
51
+ import_parse.loggingParser,
52
+ import_parse.languageParser,
53
+ import_parse.granularityParser
54
+ ),
55
+ {
56
+ description: import_core.message`Run forced alignment to determine where each sentence|word is spoken in the audiobook and produce a new EPUB package with Media Overlays and embedded audio.`
57
+ }
58
+ );
59
+ // Annotate the CommonJS export names for ESM import in node:
60
+ 0 && (module.exports = {
61
+ alignCommand,
62
+ alignParser
63
+ });
@@ -0,0 +1,30 @@
1
+ import * as _optique_core from '@optique/core';
2
+
3
+ declare const alignParser: _optique_core.Parser<"sync", {
4
+ readonly audiobook: string;
5
+ readonly epub: string;
6
+ readonly reports: string | undefined;
7
+ }, {
8
+ readonly audiobook: _optique_core.ValueParserResult<string> | undefined;
9
+ readonly epub: _optique_core.ValueParserResult<string> | undefined;
10
+ readonly reports: [_optique_core.ValueParserResult<string> | undefined] | undefined;
11
+ }>;
12
+ declare const alignCommand: _optique_core.Parser<"sync", {
13
+ readonly action: "align";
14
+ readonly transcriptions: string;
15
+ readonly output: string;
16
+ } & {
17
+ readonly audiobook: string;
18
+ readonly epub: string;
19
+ readonly reports: string | undefined;
20
+ } & {
21
+ readonly noProgress: boolean;
22
+ readonly logLevel: "silent" | "debug" | "info" | "warn" | "error";
23
+ readonly time: boolean;
24
+ } & {
25
+ readonly language: Intl.Locale | undefined;
26
+ } & {
27
+ readonly granularity: "word" | "sentence";
28
+ }, ["matched", string] | ["parsing", Record<string | symbol, unknown>] | undefined>;
29
+
30
+ export { alignCommand, alignParser };
@@ -0,0 +1,30 @@
1
+ import * as _optique_core from '@optique/core';
2
+
3
+ declare const alignParser: _optique_core.Parser<"sync", {
4
+ readonly audiobook: string;
5
+ readonly epub: string;
6
+ readonly reports: string | undefined;
7
+ }, {
8
+ readonly audiobook: _optique_core.ValueParserResult<string> | undefined;
9
+ readonly epub: _optique_core.ValueParserResult<string> | undefined;
10
+ readonly reports: [_optique_core.ValueParserResult<string> | undefined] | undefined;
11
+ }>;
12
+ declare const alignCommand: _optique_core.Parser<"sync", {
13
+ readonly action: "align";
14
+ readonly transcriptions: string;
15
+ readonly output: string;
16
+ } & {
17
+ readonly audiobook: string;
18
+ readonly epub: string;
19
+ readonly reports: string | undefined;
20
+ } & {
21
+ readonly noProgress: boolean;
22
+ readonly logLevel: "silent" | "debug" | "info" | "warn" | "error";
23
+ readonly time: boolean;
24
+ } & {
25
+ readonly language: Intl.Locale | undefined;
26
+ } & {
27
+ readonly granularity: "word" | "sentence";
28
+ }, ["matched", string] | ["parsing", Record<string | symbol, unknown>] | undefined>;
29
+
30
+ export { alignCommand, alignParser };
@@ -0,0 +1,51 @@
1
+ import "../chunk-BIEQXUOY.js";
2
+ import {
3
+ command,
4
+ constant,
5
+ merge,
6
+ message,
7
+ object,
8
+ option,
9
+ optional
10
+ } from "@optique/core";
11
+ import { path } from "@optique/run/valueparser";
12
+ import {
13
+ granularityParser,
14
+ languageParser,
15
+ loggingParser
16
+ } from "../common/parse.js";
17
+ const alignParser = object("Alignment", {
18
+ audiobook: option(
19
+ "--audiobook",
20
+ path({ mustExist: true, type: "directory" })
21
+ ),
22
+ epub: option(
23
+ "--epub",
24
+ path({ mustExist: true, type: "file", extensions: [".epub"] })
25
+ ),
26
+ reports: optional(option("--reports", path({ type: "directory" })))
27
+ });
28
+ const alignCommand = command(
29
+ "align",
30
+ merge(
31
+ object({
32
+ action: constant("align"),
33
+ transcriptions: option(
34
+ "--transcriptions",
35
+ path({ mustExist: true, type: "directory" })
36
+ ),
37
+ output: option("--output", path({ type: "file", extensions: [".epub"] }))
38
+ }),
39
+ alignParser,
40
+ loggingParser,
41
+ languageParser,
42
+ granularityParser
43
+ ),
44
+ {
45
+ description: message`Run forced alignment to determine where each sentence|word is spoken in the audiobook and produce a new EPUB package with Media Overlays and embedded audio.`
46
+ }
47
+ );
48
+ export {
49
+ alignCommand,
50
+ alignParser
51
+ };
@@ -0,0 +1,50 @@
1
+ var __knownSymbol = (name, symbol) => (symbol = Symbol[name]) ? symbol : Symbol.for("Symbol." + name);
2
+ var __typeError = (msg) => {
3
+ throw TypeError(msg);
4
+ };
5
+ var __using = (stack, value, async) => {
6
+ if (value != null) {
7
+ if (typeof value !== "object" && typeof value !== "function") __typeError("Object expected");
8
+ var dispose, inner;
9
+ if (async) dispose = value[__knownSymbol("asyncDispose")];
10
+ if (dispose === void 0) {
11
+ dispose = value[__knownSymbol("dispose")];
12
+ if (async) inner = dispose;
13
+ }
14
+ if (typeof dispose !== "function") __typeError("Object not disposable");
15
+ if (inner) dispose = function() {
16
+ try {
17
+ inner.call(this);
18
+ } catch (e) {
19
+ return Promise.reject(e);
20
+ }
21
+ };
22
+ stack.push([async, dispose, value]);
23
+ } else if (async) {
24
+ stack.push([async]);
25
+ }
26
+ return value;
27
+ };
28
+ var __callDispose = (stack, error, hasError) => {
29
+ var E = typeof SuppressedError === "function" ? SuppressedError : function(e, s, m, _) {
30
+ return _ = Error(m), _.name = "SuppressedError", _.error = e, _.suppressed = s, _;
31
+ };
32
+ var fail = (e) => error = hasError ? new E(e, error, "An error was suppressed during disposal") : (hasError = true, e);
33
+ var next = (it) => {
34
+ while (it = stack.pop()) {
35
+ try {
36
+ var result = it[1] && it[1].call(it[2]);
37
+ if (it[0]) return Promise.resolve(result).then(next, (e) => (fail(e), next()));
38
+ } catch (e) {
39
+ fail(e);
40
+ }
41
+ }
42
+ if (hasError) throw error;
43
+ };
44
+ return next();
45
+ };
46
+
47
+ export {
48
+ __using,
49
+ __callDispose
50
+ };