mediabunny 1.7.6 → 1.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundles/mediabunny.cjs +590 -131
- package/dist/bundles/mediabunny.min.cjs +5 -5
- package/dist/bundles/mediabunny.min.mjs +4 -4
- package/dist/bundles/mediabunny.mjs +590 -131
- package/dist/mediabunny.d.ts +28 -1
- package/dist/modules/src/adts/adts-demuxer.d.ts +38 -0
- package/dist/modules/src/adts/adts-demuxer.d.ts.map +1 -0
- package/dist/modules/src/adts/adts-demuxer.js +221 -0
- package/dist/modules/src/adts/adts-muxer.d.ts +26 -0
- package/dist/modules/src/adts/adts-muxer.d.ts.map +1 -0
- package/dist/modules/src/adts/adts-muxer.js +79 -0
- package/dist/modules/src/adts/adts-reader.d.ts +26 -0
- package/dist/modules/src/adts/adts-reader.d.ts.map +1 -0
- package/dist/modules/src/adts/adts-reader.js +72 -0
- package/dist/modules/src/codec.d.ts +4 -1
- package/dist/modules/src/codec.d.ts.map +1 -1
- package/dist/modules/src/codec.js +8 -16
- package/dist/modules/src/index.d.ts +1 -1
- package/dist/modules/src/index.d.ts.map +1 -1
- package/dist/modules/src/index.js +1 -1
- package/dist/modules/src/input-format.d.ts +13 -0
- package/dist/modules/src/input-format.d.ts.map +1 -1
- package/dist/modules/src/input-format.js +48 -1
- package/dist/modules/src/isobmff/isobmff-demuxer.js +1 -1
- package/dist/modules/src/media-source.d.ts +1 -1
- package/dist/modules/src/media-source.d.ts.map +1 -1
- package/dist/modules/src/media-source.js +7 -6
- package/dist/modules/src/misc.d.ts +1 -0
- package/dist/modules/src/misc.d.ts.map +1 -1
- package/dist/modules/src/misc.js +13 -0
- package/dist/modules/src/mp3/mp3-demuxer.d.ts +1 -1
- package/dist/modules/src/mp3/mp3-demuxer.d.ts.map +1 -1
- package/dist/modules/src/mp3/mp3-demuxer.js +51 -49
- package/dist/modules/src/output-format.d.ts +25 -0
- package/dist/modules/src/output-format.d.ts.map +1 -1
- package/dist/modules/src/output-format.js +45 -0
- package/dist/modules/src/sample.d.ts.map +1 -1
- package/dist/modules/src/sample.js +34 -3
- package/dist/modules/src/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
- package/src/adts/adts-demuxer.ts +312 -0
- package/src/adts/adts-muxer.ts +109 -0
- package/src/adts/adts-reader.ts +96 -0
- package/src/codec.ts +19 -17
- package/src/index.ts +2 -0
- package/src/input-format.ts +56 -1
- package/src/isobmff/isobmff-demuxer.ts +1 -1
- package/src/media-source.ts +8 -7
- package/src/misc.ts +16 -0
- package/src/mp3/mp3-demuxer.ts +64 -62
- package/src/output-format.ts +72 -0
- package/src/sample.ts +46 -3
|
@@ -31,6 +31,7 @@ var Mediabunny = (() => {
|
|
|
31
31
|
ALL_FORMATS: () => ALL_FORMATS,
|
|
32
32
|
ALL_TRACK_TYPES: () => ALL_TRACK_TYPES,
|
|
33
33
|
AUDIO_CODECS: () => AUDIO_CODECS,
|
|
34
|
+
AdtsOutputFormat: () => AdtsOutputFormat,
|
|
34
35
|
AudioBufferSink: () => AudioBufferSink,
|
|
35
36
|
AudioBufferSource: () => AudioBufferSource,
|
|
36
37
|
AudioSample: () => AudioSample,
|
|
@@ -169,6 +170,18 @@ var Mediabunny = (() => {
|
|
|
169
170
|
}
|
|
170
171
|
return result;
|
|
171
172
|
}
|
|
173
|
+
writeBits(n, value) {
|
|
174
|
+
const end = this.pos + n;
|
|
175
|
+
for (let i = this.pos; i < end; i++) {
|
|
176
|
+
const byteIndex = Math.floor(i / 8);
|
|
177
|
+
let byte = this.bytes[byteIndex];
|
|
178
|
+
const bitIndex = 7 - (i & 7);
|
|
179
|
+
byte &= ~(1 << bitIndex);
|
|
180
|
+
byte |= (value & 1 << end - i - 1) >> end - i - 1 << bitIndex;
|
|
181
|
+
this.bytes[byteIndex] = byte;
|
|
182
|
+
}
|
|
183
|
+
this.pos = end;
|
|
184
|
+
}
|
|
172
185
|
readAlignedByte() {
|
|
173
186
|
if (this.pos % 8 !== 0) {
|
|
174
187
|
throw new Error("Bitstream is not byte-aligned.");
|
|
@@ -1098,6 +1111,22 @@ var Mediabunny = (() => {
|
|
|
1098
1111
|
}
|
|
1099
1112
|
throw new TypeError(`Unhandled codec '${codec}'.`);
|
|
1100
1113
|
};
|
|
1114
|
+
var aacFrequencyTable = [
|
|
1115
|
+
96e3,
|
|
1116
|
+
88200,
|
|
1117
|
+
64e3,
|
|
1118
|
+
48e3,
|
|
1119
|
+
44100,
|
|
1120
|
+
32e3,
|
|
1121
|
+
24e3,
|
|
1122
|
+
22050,
|
|
1123
|
+
16e3,
|
|
1124
|
+
12e3,
|
|
1125
|
+
11025,
|
|
1126
|
+
8e3,
|
|
1127
|
+
7350
|
|
1128
|
+
];
|
|
1129
|
+
var aacChannelMap = [-1, 1, 2, 3, 4, 5, 6, 8];
|
|
1101
1130
|
var parseAacAudioSpecificConfig = (bytes2) => {
|
|
1102
1131
|
if (!bytes2 || bytes2.byteLength < 2) {
|
|
1103
1132
|
throw new TypeError("AAC description must be at least 2 bytes long.");
|
|
@@ -1112,38 +1141,14 @@ var Mediabunny = (() => {
|
|
|
1112
1141
|
if (frequencyIndex === 15) {
|
|
1113
1142
|
sampleRate = bitstream.readBits(24);
|
|
1114
1143
|
} else {
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
88200,
|
|
1118
|
-
64e3,
|
|
1119
|
-
48e3,
|
|
1120
|
-
44100,
|
|
1121
|
-
32e3,
|
|
1122
|
-
24e3,
|
|
1123
|
-
22050,
|
|
1124
|
-
16e3,
|
|
1125
|
-
12e3,
|
|
1126
|
-
11025,
|
|
1127
|
-
8e3,
|
|
1128
|
-
7350
|
|
1129
|
-
];
|
|
1130
|
-
if (frequencyIndex < freqTable.length) {
|
|
1131
|
-
sampleRate = freqTable[frequencyIndex];
|
|
1144
|
+
if (frequencyIndex < aacFrequencyTable.length) {
|
|
1145
|
+
sampleRate = aacFrequencyTable[frequencyIndex];
|
|
1132
1146
|
}
|
|
1133
1147
|
}
|
|
1134
1148
|
const channelConfiguration = bitstream.readBits(4);
|
|
1135
1149
|
let numberOfChannels = null;
|
|
1136
1150
|
if (channelConfiguration >= 1 && channelConfiguration <= 7) {
|
|
1137
|
-
|
|
1138
|
-
1: 1,
|
|
1139
|
-
2: 2,
|
|
1140
|
-
3: 3,
|
|
1141
|
-
4: 4,
|
|
1142
|
-
5: 5,
|
|
1143
|
-
6: 6,
|
|
1144
|
-
7: 8
|
|
1145
|
-
};
|
|
1146
|
-
numberOfChannels = channelMap[channelConfiguration];
|
|
1151
|
+
numberOfChannels = aacChannelMap[channelConfiguration];
|
|
1147
1152
|
}
|
|
1148
1153
|
return {
|
|
1149
1154
|
objectType,
|
|
@@ -1695,6 +1700,119 @@ var Mediabunny = (() => {
|
|
|
1695
1700
|
return null;
|
|
1696
1701
|
};
|
|
1697
1702
|
|
|
1703
|
+
// src/muxer.ts
|
|
1704
|
+
var Muxer = class {
|
|
1705
|
+
constructor(output) {
|
|
1706
|
+
this.mutex = new AsyncMutex();
|
|
1707
|
+
/**
|
|
1708
|
+
* This field is used to synchronize multiple MediaStreamTracks. They use the same time coordinate system across
|
|
1709
|
+
* tracks, and to ensure correct audio-video sync, we must use the same offset for all of them. The reason an offset
|
|
1710
|
+
* is needed at all is because the timestamps typically don't start at zero.
|
|
1711
|
+
*/
|
|
1712
|
+
this.firstMediaStreamTimestamp = null;
|
|
1713
|
+
this.trackTimestampInfo = /* @__PURE__ */ new WeakMap();
|
|
1714
|
+
this.output = output;
|
|
1715
|
+
}
|
|
1716
|
+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
1717
|
+
onTrackClose(track) {
|
|
1718
|
+
}
|
|
1719
|
+
validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyFrame) {
|
|
1720
|
+
timestampInSeconds += track.source._timestampOffset;
|
|
1721
|
+
let timestampInfo = this.trackTimestampInfo.get(track);
|
|
1722
|
+
if (!timestampInfo) {
|
|
1723
|
+
if (!isKeyFrame) {
|
|
1724
|
+
throw new Error("First frame must be a key frame.");
|
|
1725
|
+
}
|
|
1726
|
+
timestampInfo = {
|
|
1727
|
+
maxTimestamp: timestampInSeconds,
|
|
1728
|
+
maxTimestampBeforeLastKeyFrame: timestampInSeconds
|
|
1729
|
+
};
|
|
1730
|
+
this.trackTimestampInfo.set(track, timestampInfo);
|
|
1731
|
+
}
|
|
1732
|
+
if (timestampInSeconds < 0) {
|
|
1733
|
+
throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);
|
|
1734
|
+
}
|
|
1735
|
+
if (isKeyFrame) {
|
|
1736
|
+
timestampInfo.maxTimestampBeforeLastKeyFrame = timestampInfo.maxTimestamp;
|
|
1737
|
+
}
|
|
1738
|
+
if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyFrame) {
|
|
1739
|
+
throw new Error(
|
|
1740
|
+
`Timestamps cannot be smaller than the highest timestamp of the previous run (a run begins with a key frame and ends right before the next key frame). Got ${timestampInSeconds}s, but highest timestamp is ${timestampInfo.maxTimestampBeforeLastKeyFrame}s.`
|
|
1741
|
+
);
|
|
1742
|
+
}
|
|
1743
|
+
timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);
|
|
1744
|
+
return timestampInSeconds;
|
|
1745
|
+
}
|
|
1746
|
+
};
|
|
1747
|
+
|
|
1748
|
+
// src/adts/adts-muxer.ts
|
|
1749
|
+
var AdtsMuxer = class extends Muxer {
|
|
1750
|
+
constructor(output, format) {
|
|
1751
|
+
super(output);
|
|
1752
|
+
this.header = new Uint8Array(7);
|
|
1753
|
+
this.headerBitstream = new Bitstream(this.header);
|
|
1754
|
+
this.audioSpecificConfig = null;
|
|
1755
|
+
this.format = format;
|
|
1756
|
+
this.writer = output._writer;
|
|
1757
|
+
}
|
|
1758
|
+
async start() {
|
|
1759
|
+
}
|
|
1760
|
+
async getMimeType() {
|
|
1761
|
+
return "audio/aac";
|
|
1762
|
+
}
|
|
1763
|
+
async addEncodedVideoPacket() {
|
|
1764
|
+
throw new Error("ADTS does not support video.");
|
|
1765
|
+
}
|
|
1766
|
+
async addEncodedAudioPacket(track, packet, meta) {
|
|
1767
|
+
const release = await this.mutex.acquire();
|
|
1768
|
+
try {
|
|
1769
|
+
if (!this.audioSpecificConfig) {
|
|
1770
|
+
validateAudioChunkMetadata(meta);
|
|
1771
|
+
const description = meta?.decoderConfig?.description;
|
|
1772
|
+
assert(description);
|
|
1773
|
+
this.audioSpecificConfig = parseAacAudioSpecificConfig(toUint8Array(description));
|
|
1774
|
+
const { objectType, frequencyIndex, channelConfiguration } = this.audioSpecificConfig;
|
|
1775
|
+
const profile = objectType - 1;
|
|
1776
|
+
this.headerBitstream.writeBits(12, 4095);
|
|
1777
|
+
this.headerBitstream.writeBits(1, 0);
|
|
1778
|
+
this.headerBitstream.writeBits(2, 0);
|
|
1779
|
+
this.headerBitstream.writeBits(1, 1);
|
|
1780
|
+
this.headerBitstream.writeBits(2, profile);
|
|
1781
|
+
this.headerBitstream.writeBits(4, frequencyIndex);
|
|
1782
|
+
this.headerBitstream.writeBits(1, 0);
|
|
1783
|
+
this.headerBitstream.writeBits(3, channelConfiguration);
|
|
1784
|
+
this.headerBitstream.writeBits(1, 0);
|
|
1785
|
+
this.headerBitstream.writeBits(1, 0);
|
|
1786
|
+
this.headerBitstream.writeBits(1, 0);
|
|
1787
|
+
this.headerBitstream.writeBits(1, 0);
|
|
1788
|
+
this.headerBitstream.skipBits(13);
|
|
1789
|
+
this.headerBitstream.writeBits(11, 2047);
|
|
1790
|
+
this.headerBitstream.writeBits(2, 0);
|
|
1791
|
+
}
|
|
1792
|
+
const frameLength = packet.data.byteLength + this.header.byteLength;
|
|
1793
|
+
this.headerBitstream.pos = 30;
|
|
1794
|
+
this.headerBitstream.writeBits(13, frameLength);
|
|
1795
|
+
const startPos = this.writer.getPos();
|
|
1796
|
+
this.writer.write(this.header);
|
|
1797
|
+
this.writer.write(packet.data);
|
|
1798
|
+
if (this.format._options.onFrame) {
|
|
1799
|
+
const frameBytes = new Uint8Array(frameLength);
|
|
1800
|
+
frameBytes.set(this.header, 0);
|
|
1801
|
+
frameBytes.set(packet.data, this.header.byteLength);
|
|
1802
|
+
this.format._options.onFrame(frameBytes, startPos);
|
|
1803
|
+
}
|
|
1804
|
+
await this.writer.flush();
|
|
1805
|
+
} finally {
|
|
1806
|
+
release();
|
|
1807
|
+
}
|
|
1808
|
+
}
|
|
1809
|
+
async addSubtitleCue() {
|
|
1810
|
+
throw new Error("ADTS does not support subtitles.");
|
|
1811
|
+
}
|
|
1812
|
+
async finalize() {
|
|
1813
|
+
}
|
|
1814
|
+
};
|
|
1815
|
+
|
|
1698
1816
|
// src/subtitles.ts
|
|
1699
1817
|
var cueBlockHeaderRegex = /(?:(.+?)\n)?((?:\d{2}:)?\d{2}:\d{2}.\d{3})\s+-->\s+((?:\d{2}:)?\d{2}:\d{2}.\d{3})/g;
|
|
1700
1818
|
var preambleStartRegex = /^WEBVTT(.|\n)*?\n{2}/;
|
|
@@ -4064,51 +4182,6 @@ var Mediabunny = (() => {
|
|
|
4064
4182
|
webvtt: vttC
|
|
4065
4183
|
};
|
|
4066
4184
|
|
|
4067
|
-
// src/muxer.ts
|
|
4068
|
-
var Muxer = class {
|
|
4069
|
-
constructor(output) {
|
|
4070
|
-
this.mutex = new AsyncMutex();
|
|
4071
|
-
/**
|
|
4072
|
-
* This field is used to synchronize multiple MediaStreamTracks. They use the same time coordinate system across
|
|
4073
|
-
* tracks, and to ensure correct audio-video sync, we must use the same offset for all of them. The reason an offset
|
|
4074
|
-
* is needed at all is because the timestamps typically don't start at zero.
|
|
4075
|
-
*/
|
|
4076
|
-
this.firstMediaStreamTimestamp = null;
|
|
4077
|
-
this.trackTimestampInfo = /* @__PURE__ */ new WeakMap();
|
|
4078
|
-
this.output = output;
|
|
4079
|
-
}
|
|
4080
|
-
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
4081
|
-
onTrackClose(track) {
|
|
4082
|
-
}
|
|
4083
|
-
validateAndNormalizeTimestamp(track, timestampInSeconds, isKeyFrame) {
|
|
4084
|
-
timestampInSeconds += track.source._timestampOffset;
|
|
4085
|
-
let timestampInfo = this.trackTimestampInfo.get(track);
|
|
4086
|
-
if (!timestampInfo) {
|
|
4087
|
-
if (!isKeyFrame) {
|
|
4088
|
-
throw new Error("First frame must be a key frame.");
|
|
4089
|
-
}
|
|
4090
|
-
timestampInfo = {
|
|
4091
|
-
maxTimestamp: timestampInSeconds,
|
|
4092
|
-
maxTimestampBeforeLastKeyFrame: timestampInSeconds
|
|
4093
|
-
};
|
|
4094
|
-
this.trackTimestampInfo.set(track, timestampInfo);
|
|
4095
|
-
}
|
|
4096
|
-
if (timestampInSeconds < 0) {
|
|
4097
|
-
throw new Error(`Timestamps must be non-negative (got ${timestampInSeconds}s).`);
|
|
4098
|
-
}
|
|
4099
|
-
if (isKeyFrame) {
|
|
4100
|
-
timestampInfo.maxTimestampBeforeLastKeyFrame = timestampInfo.maxTimestamp;
|
|
4101
|
-
}
|
|
4102
|
-
if (timestampInSeconds < timestampInfo.maxTimestampBeforeLastKeyFrame) {
|
|
4103
|
-
throw new Error(
|
|
4104
|
-
`Timestamps cannot be smaller than the highest timestamp of the previous run (a run begins with a key frame and ends right before the next key frame). Got ${timestampInSeconds}s, but highest timestamp is ${timestampInfo.maxTimestampBeforeLastKeyFrame}s.`
|
|
4105
|
-
);
|
|
4106
|
-
}
|
|
4107
|
-
timestampInfo.maxTimestamp = Math.max(timestampInfo.maxTimestamp, timestampInSeconds);
|
|
4108
|
-
return timestampInSeconds;
|
|
4109
|
-
}
|
|
4110
|
-
};
|
|
4111
|
-
|
|
4112
4185
|
// src/writer.ts
|
|
4113
4186
|
var Writer = class {
|
|
4114
4187
|
constructor() {
|
|
@@ -8164,6 +8237,40 @@ ${cue.notes ?? ""}`;
|
|
|
8164
8237
|
}
|
|
8165
8238
|
this.timestamp = newTimestamp;
|
|
8166
8239
|
}
|
|
8240
|
+
/** @internal */
|
|
8241
|
+
static *_fromAudioBuffer(audioBuffer, timestamp) {
|
|
8242
|
+
if (!(audioBuffer instanceof AudioBuffer)) {
|
|
8243
|
+
throw new TypeError("audioBuffer must be an AudioBuffer.");
|
|
8244
|
+
}
|
|
8245
|
+
const MAX_FLOAT_COUNT = 48e3 * 5;
|
|
8246
|
+
const numberOfChannels = audioBuffer.numberOfChannels;
|
|
8247
|
+
const sampleRate = audioBuffer.sampleRate;
|
|
8248
|
+
const totalFrames = audioBuffer.length;
|
|
8249
|
+
const maxFramesPerChunk = Math.floor(MAX_FLOAT_COUNT / numberOfChannels);
|
|
8250
|
+
let currentRelativeFrame = 0;
|
|
8251
|
+
let remainingFrames = totalFrames;
|
|
8252
|
+
while (remainingFrames > 0) {
|
|
8253
|
+
const framesToCopy = Math.min(maxFramesPerChunk, remainingFrames);
|
|
8254
|
+
const chunkData = new Float32Array(numberOfChannels * framesToCopy);
|
|
8255
|
+
for (let channel = 0; channel < numberOfChannels; channel++) {
|
|
8256
|
+
audioBuffer.copyFromChannel(
|
|
8257
|
+
chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy),
|
|
8258
|
+
channel,
|
|
8259
|
+
currentRelativeFrame
|
|
8260
|
+
);
|
|
8261
|
+
}
|
|
8262
|
+
yield new _AudioSample({
|
|
8263
|
+
format: "f32-planar",
|
|
8264
|
+
sampleRate,
|
|
8265
|
+
numberOfFrames: framesToCopy,
|
|
8266
|
+
numberOfChannels,
|
|
8267
|
+
timestamp: timestamp + currentRelativeFrame / sampleRate,
|
|
8268
|
+
data: chunkData
|
|
8269
|
+
});
|
|
8270
|
+
currentRelativeFrame += framesToCopy;
|
|
8271
|
+
remainingFrames -= framesToCopy;
|
|
8272
|
+
}
|
|
8273
|
+
}
|
|
8167
8274
|
/**
|
|
8168
8275
|
* Creates AudioSamples from an AudioBuffer, starting at the given timestamp in seconds. Typically creates exactly
|
|
8169
8276
|
* one sample, but may create multiple if the AudioBuffer is exceedingly large.
|
|
@@ -8172,7 +8279,7 @@ ${cue.notes ?? ""}`;
|
|
|
8172
8279
|
if (!(audioBuffer instanceof AudioBuffer)) {
|
|
8173
8280
|
throw new TypeError("audioBuffer must be an AudioBuffer.");
|
|
8174
8281
|
}
|
|
8175
|
-
const MAX_FLOAT_COUNT =
|
|
8282
|
+
const MAX_FLOAT_COUNT = 48e3 * 5;
|
|
8176
8283
|
const numberOfChannels = audioBuffer.numberOfChannels;
|
|
8177
8284
|
const sampleRate = audioBuffer.sampleRate;
|
|
8178
8285
|
const totalFrames = audioBuffer.length;
|
|
@@ -8185,7 +8292,7 @@ ${cue.notes ?? ""}`;
|
|
|
8185
8292
|
const chunkData = new Float32Array(numberOfChannels * framesToCopy);
|
|
8186
8293
|
for (let channel = 0; channel < numberOfChannels; channel++) {
|
|
8187
8294
|
audioBuffer.copyFromChannel(
|
|
8188
|
-
chunkData.subarray(channel * framesToCopy, channel
|
|
8295
|
+
chunkData.subarray(channel * framesToCopy, (channel + 1) * framesToCopy),
|
|
8189
8296
|
channel,
|
|
8190
8297
|
currentRelativeFrame
|
|
8191
8298
|
);
|
|
@@ -10591,6 +10698,46 @@ ${cue.notes ?? ""}`;
|
|
|
10591
10698
|
return false;
|
|
10592
10699
|
}
|
|
10593
10700
|
};
|
|
10701
|
+
var AdtsOutputFormat = class extends OutputFormat {
|
|
10702
|
+
constructor(options = {}) {
|
|
10703
|
+
if (!options || typeof options !== "object") {
|
|
10704
|
+
throw new TypeError("options must be an object.");
|
|
10705
|
+
}
|
|
10706
|
+
if (options.onFrame !== void 0 && typeof options.onFrame !== "function") {
|
|
10707
|
+
throw new TypeError("options.onFrame, when provided, must be a function.");
|
|
10708
|
+
}
|
|
10709
|
+
super();
|
|
10710
|
+
this._options = options;
|
|
10711
|
+
}
|
|
10712
|
+
/** @internal */
|
|
10713
|
+
_createMuxer(output) {
|
|
10714
|
+
return new AdtsMuxer(output, this);
|
|
10715
|
+
}
|
|
10716
|
+
/** @internal */
|
|
10717
|
+
get _name() {
|
|
10718
|
+
return "ADTS";
|
|
10719
|
+
}
|
|
10720
|
+
getSupportedTrackCounts() {
|
|
10721
|
+
return {
|
|
10722
|
+
video: { min: 0, max: 0 },
|
|
10723
|
+
audio: { min: 1, max: 1 },
|
|
10724
|
+
subtitle: { min: 0, max: 0 },
|
|
10725
|
+
total: { min: 1, max: 1 }
|
|
10726
|
+
};
|
|
10727
|
+
}
|
|
10728
|
+
get fileExtension() {
|
|
10729
|
+
return ".aac";
|
|
10730
|
+
}
|
|
10731
|
+
get mimeType() {
|
|
10732
|
+
return "audio/aac";
|
|
10733
|
+
}
|
|
10734
|
+
getSupportedCodecs() {
|
|
10735
|
+
return ["aac"];
|
|
10736
|
+
}
|
|
10737
|
+
get supportsVideoRotationMetadata() {
|
|
10738
|
+
return false;
|
|
10739
|
+
}
|
|
10740
|
+
};
|
|
10594
10741
|
|
|
10595
10742
|
// src/media-source.ts
|
|
10596
10743
|
var MediaSource = class {
|
|
@@ -11527,14 +11674,15 @@ ${cue.notes ?? ""}`;
|
|
|
11527
11674
|
* @returns A Promise that resolves once the output is ready to receive more samples. You should await this Promise
|
|
11528
11675
|
* to respect writer and encoder backpressure.
|
|
11529
11676
|
*/
|
|
11530
|
-
add(audioBuffer) {
|
|
11677
|
+
async add(audioBuffer) {
|
|
11531
11678
|
if (!(audioBuffer instanceof AudioBuffer)) {
|
|
11532
11679
|
throw new TypeError("audioBuffer must be an AudioBuffer.");
|
|
11533
11680
|
}
|
|
11534
|
-
const
|
|
11535
|
-
const promises = audioSamples.map((sample) => this._encoder.add(sample, true));
|
|
11681
|
+
const iterator = AudioSample._fromAudioBuffer(audioBuffer, this._accumulatedTime);
|
|
11536
11682
|
this._accumulatedTime += audioBuffer.duration;
|
|
11537
|
-
|
|
11683
|
+
for (const audioSample of iterator) {
|
|
11684
|
+
await this._encoder.add(audioSample, true);
|
|
11685
|
+
}
|
|
11538
11686
|
}
|
|
11539
11687
|
/** @internal */
|
|
11540
11688
|
_flushAndClose(forceClose) {
|
|
@@ -11619,9 +11767,9 @@ ${cue.notes ?? ""}`;
|
|
|
11619
11767
|
let audioReceived = false;
|
|
11620
11768
|
let totalDuration = 0;
|
|
11621
11769
|
this._scriptProcessorNode.onaudioprocess = (event) => {
|
|
11622
|
-
const
|
|
11770
|
+
const iterator = AudioSample._fromAudioBuffer(event.inputBuffer, totalDuration);
|
|
11623
11771
|
totalDuration += event.inputBuffer.duration;
|
|
11624
|
-
for (const audioSample of
|
|
11772
|
+
for (const audioSample of iterator) {
|
|
11625
11773
|
if (!audioReceived) {
|
|
11626
11774
|
audioReceived = true;
|
|
11627
11775
|
const muxer = this._connectedTrack.output._muxer;
|
|
@@ -12919,7 +13067,7 @@ ${cue.notes ?? ""}`;
|
|
|
12919
13067
|
const chromaSubsamplingX = thirdByte >> 3 & 1;
|
|
12920
13068
|
const chromaSubsamplingY = thirdByte >> 2 & 1;
|
|
12921
13069
|
const chromaSamplePosition = thirdByte & 3;
|
|
12922
|
-
const bitDepth = profile
|
|
13070
|
+
const bitDepth = profile === 2 && highBitDepth ? twelveBit ? 12 : 10 : highBitDepth ? 10 : 8;
|
|
12923
13071
|
track.info.av1CodecInfo = {
|
|
12924
13072
|
profile,
|
|
12925
13073
|
level,
|
|
@@ -15729,7 +15877,7 @@ ${cue.notes ?? ""}`;
|
|
|
15729
15877
|
this.loadedSamples = [];
|
|
15730
15878
|
// All samples from the start of the file to lastLoadedPos
|
|
15731
15879
|
this.tracks = [];
|
|
15732
|
-
this.
|
|
15880
|
+
this.readingMutex = new AsyncMutex();
|
|
15733
15881
|
this.lastLoadedPos = 0;
|
|
15734
15882
|
this.fileSize = 0;
|
|
15735
15883
|
this.nextTimestampInSamples = 0;
|
|
@@ -15742,32 +15890,25 @@ ${cue.notes ?? ""}`;
|
|
|
15742
15890
|
while (!this.firstFrameHeader && this.lastLoadedPos < this.fileSize) {
|
|
15743
15891
|
await this.loadNextChunk();
|
|
15744
15892
|
}
|
|
15745
|
-
|
|
15746
|
-
throw new Error("No MP3 frames found.");
|
|
15747
|
-
}
|
|
15893
|
+
assert(this.firstFrameHeader);
|
|
15748
15894
|
this.tracks = [new InputAudioTrack(new Mp3AudioTrackBacking(this))];
|
|
15749
15895
|
})();
|
|
15750
15896
|
}
|
|
15751
15897
|
/** Loads the next 0.5 MiB of frames. */
|
|
15752
15898
|
async loadNextChunk() {
|
|
15753
|
-
|
|
15754
|
-
|
|
15755
|
-
|
|
15756
|
-
|
|
15757
|
-
|
|
15758
|
-
|
|
15759
|
-
|
|
15760
|
-
|
|
15761
|
-
if (
|
|
15762
|
-
|
|
15763
|
-
|
|
15764
|
-
this.reader.pos += id3Tag.size;
|
|
15765
|
-
}
|
|
15766
|
-
}
|
|
15767
|
-
this.parseFramesFromLoadedData();
|
|
15768
|
-
} finally {
|
|
15769
|
-
release();
|
|
15899
|
+
assert(this.lastLoadedPos < this.fileSize);
|
|
15900
|
+
const chunkSize = 0.5 * 1024 * 1024;
|
|
15901
|
+
const endPos = Math.min(this.lastLoadedPos + chunkSize, this.fileSize);
|
|
15902
|
+
await this.reader.reader.loadRange(this.lastLoadedPos, endPos);
|
|
15903
|
+
this.lastLoadedPos = endPos;
|
|
15904
|
+
assert(this.lastLoadedPos <= this.fileSize);
|
|
15905
|
+
if (this.reader.pos === 0) {
|
|
15906
|
+
const id3Tag = this.reader.readId3();
|
|
15907
|
+
if (id3Tag) {
|
|
15908
|
+
this.reader.pos += id3Tag.size;
|
|
15909
|
+
}
|
|
15770
15910
|
}
|
|
15911
|
+
this.parseFramesFromLoadedData();
|
|
15771
15912
|
}
|
|
15772
15913
|
parseFramesFromLoadedData() {
|
|
15773
15914
|
while (true) {
|
|
@@ -15882,43 +16023,50 @@ ${cue.notes ?? ""}`;
|
|
|
15882
16023
|
);
|
|
15883
16024
|
}
|
|
15884
16025
|
async getFirstPacket(options) {
|
|
15885
|
-
while (this.demuxer.loadedSamples.length === 0 && this.demuxer.lastLoadedPos < this.demuxer.fileSize) {
|
|
15886
|
-
await this.demuxer.loadNextChunk();
|
|
15887
|
-
}
|
|
15888
16026
|
return this.getPacketAtIndex(0, options);
|
|
15889
16027
|
}
|
|
15890
16028
|
async getNextPacket(packet, options) {
|
|
15891
|
-
const
|
|
15892
|
-
|
|
15893
|
-
|
|
15894
|
-
(x) => x.timestamp
|
|
15895
|
-
);
|
|
15896
|
-
if (sampleIndex === -1) {
|
|
15897
|
-
throw new Error("Packet was not created from this track.");
|
|
15898
|
-
}
|
|
15899
|
-
const nextIndex = sampleIndex + 1;
|
|
15900
|
-
while (nextIndex >= this.demuxer.loadedSamples.length && this.demuxer.lastLoadedPos < this.demuxer.fileSize) {
|
|
15901
|
-
await this.demuxer.loadNextChunk();
|
|
15902
|
-
}
|
|
15903
|
-
return this.getPacketAtIndex(nextIndex, options);
|
|
15904
|
-
}
|
|
15905
|
-
async getPacket(timestamp, options) {
|
|
15906
|
-
while (true) {
|
|
15907
|
-
const index = binarySearchLessOrEqual(
|
|
16029
|
+
const release = await this.demuxer.readingMutex.acquire();
|
|
16030
|
+
try {
|
|
16031
|
+
const sampleIndex = binarySearchExact(
|
|
15908
16032
|
this.demuxer.loadedSamples,
|
|
15909
|
-
timestamp,
|
|
16033
|
+
packet.timestamp,
|
|
15910
16034
|
(x) => x.timestamp
|
|
15911
16035
|
);
|
|
15912
|
-
if (
|
|
15913
|
-
|
|
16036
|
+
if (sampleIndex === -1) {
|
|
16037
|
+
throw new Error("Packet was not created from this track.");
|
|
15914
16038
|
}
|
|
15915
|
-
|
|
15916
|
-
|
|
16039
|
+
const nextIndex = sampleIndex + 1;
|
|
16040
|
+
while (nextIndex >= this.demuxer.loadedSamples.length && this.demuxer.lastLoadedPos < this.demuxer.fileSize) {
|
|
16041
|
+
await this.demuxer.loadNextChunk();
|
|
15917
16042
|
}
|
|
15918
|
-
|
|
15919
|
-
|
|
16043
|
+
return this.getPacketAtIndex(nextIndex, options);
|
|
16044
|
+
} finally {
|
|
16045
|
+
release();
|
|
16046
|
+
}
|
|
16047
|
+
}
|
|
16048
|
+
async getPacket(timestamp, options) {
|
|
16049
|
+
const release = await this.demuxer.readingMutex.acquire();
|
|
16050
|
+
try {
|
|
16051
|
+
while (true) {
|
|
16052
|
+
const index = binarySearchLessOrEqual(
|
|
16053
|
+
this.demuxer.loadedSamples,
|
|
16054
|
+
timestamp,
|
|
16055
|
+
(x) => x.timestamp
|
|
16056
|
+
);
|
|
16057
|
+
if (index === -1 && this.demuxer.loadedSamples.length > 0) {
|
|
16058
|
+
return null;
|
|
16059
|
+
}
|
|
16060
|
+
if (this.demuxer.lastLoadedPos === this.demuxer.fileSize) {
|
|
16061
|
+
return this.getPacketAtIndex(index, options);
|
|
16062
|
+
}
|
|
16063
|
+
if (index >= 0 && index + 1 < this.demuxer.loadedSamples.length) {
|
|
16064
|
+
return this.getPacketAtIndex(index, options);
|
|
16065
|
+
}
|
|
16066
|
+
await this.demuxer.loadNextChunk();
|
|
15920
16067
|
}
|
|
15921
|
-
|
|
16068
|
+
} finally {
|
|
16069
|
+
release();
|
|
15922
16070
|
}
|
|
15923
16071
|
}
|
|
15924
16072
|
getKeyPacket(timestamp, options) {
|
|
@@ -16624,6 +16772,282 @@ ${cue.notes ?? ""}`;
|
|
|
16624
16772
|
return { page: previousPage, segmentIndex: previousPage.lacingValues.length - 1 };
|
|
16625
16773
|
};
|
|
16626
16774
|
|
|
16775
|
+
// src/adts/adts-reader.ts
|
|
16776
|
+
var MAX_FRAME_HEADER_SIZE = 9;
|
|
16777
|
+
var AdtsReader = class {
|
|
16778
|
+
constructor(reader) {
|
|
16779
|
+
this.reader = reader;
|
|
16780
|
+
this.pos = 0;
|
|
16781
|
+
}
|
|
16782
|
+
readBytes(length) {
|
|
16783
|
+
const { view: view2, offset } = this.reader.getViewAndOffset(this.pos, this.pos + length);
|
|
16784
|
+
this.pos += length;
|
|
16785
|
+
return new Uint8Array(view2.buffer, offset, length);
|
|
16786
|
+
}
|
|
16787
|
+
readFrameHeader() {
|
|
16788
|
+
const startPos = this.pos;
|
|
16789
|
+
const bytes2 = this.readBytes(9);
|
|
16790
|
+
const bitstream = new Bitstream(bytes2);
|
|
16791
|
+
const syncword = bitstream.readBits(12);
|
|
16792
|
+
if (syncword !== 4095) {
|
|
16793
|
+
return null;
|
|
16794
|
+
}
|
|
16795
|
+
bitstream.skipBits(1);
|
|
16796
|
+
const layer = bitstream.readBits(2);
|
|
16797
|
+
if (layer !== 0) {
|
|
16798
|
+
return null;
|
|
16799
|
+
}
|
|
16800
|
+
const protectionAbsence = bitstream.readBits(1);
|
|
16801
|
+
const objectType = bitstream.readBits(2) + 1;
|
|
16802
|
+
const samplingFrequencyIndex = bitstream.readBits(4);
|
|
16803
|
+
if (samplingFrequencyIndex === 15) {
|
|
16804
|
+
return null;
|
|
16805
|
+
}
|
|
16806
|
+
bitstream.skipBits(1);
|
|
16807
|
+
const channelConfiguration = bitstream.readBits(3);
|
|
16808
|
+
if (channelConfiguration === 0) {
|
|
16809
|
+
throw new Error("ADTS frames with channel configuration 0 are not supported.");
|
|
16810
|
+
}
|
|
16811
|
+
bitstream.skipBits(1);
|
|
16812
|
+
bitstream.skipBits(1);
|
|
16813
|
+
bitstream.skipBits(1);
|
|
16814
|
+
bitstream.skipBits(1);
|
|
16815
|
+
const frameLength = bitstream.readBits(13);
|
|
16816
|
+
bitstream.skipBits(11);
|
|
16817
|
+
const numberOfAacFrames = bitstream.readBits(2) + 1;
|
|
16818
|
+
if (numberOfAacFrames !== 1) {
|
|
16819
|
+
throw new Error("ADTS frames with more than one AAC frame are not supported.");
|
|
16820
|
+
}
|
|
16821
|
+
let crcCheck = null;
|
|
16822
|
+
if (protectionAbsence === 1) {
|
|
16823
|
+
this.pos -= 2;
|
|
16824
|
+
} else {
|
|
16825
|
+
crcCheck = bitstream.readBits(16);
|
|
16826
|
+
}
|
|
16827
|
+
return {
|
|
16828
|
+
objectType,
|
|
16829
|
+
samplingFrequencyIndex,
|
|
16830
|
+
channelConfiguration,
|
|
16831
|
+
frameLength,
|
|
16832
|
+
numberOfAacFrames,
|
|
16833
|
+
crcCheck,
|
|
16834
|
+
startPos
|
|
16835
|
+
};
|
|
16836
|
+
}
|
|
16837
|
+
};
|
|
16838
|
+
|
|
16839
|
+
// src/adts/adts-demuxer.ts
|
|
16840
|
+
var SAMPLES_PER_AAC_FRAME = 1024;
|
|
16841
|
+
var AdtsDemuxer = class extends Demuxer {
|
|
16842
|
+
constructor(input) {
|
|
16843
|
+
super(input);
|
|
16844
|
+
this.metadataPromise = null;
|
|
16845
|
+
this.firstFrameHeader = null;
|
|
16846
|
+
this.loadedSamples = [];
|
|
16847
|
+
// All samples from the start of the file to lastLoadedPos
|
|
16848
|
+
this.tracks = [];
|
|
16849
|
+
this.readingMutex = new AsyncMutex();
|
|
16850
|
+
this.lastLoadedPos = 0;
|
|
16851
|
+
this.fileSize = 0;
|
|
16852
|
+
this.nextTimestampInSamples = 0;
|
|
16853
|
+
this.reader = new AdtsReader(input._mainReader);
|
|
16854
|
+
}
|
|
16855
|
+
async readMetadata() {
|
|
16856
|
+
return this.metadataPromise ??= (async () => {
|
|
16857
|
+
this.fileSize = await this.input.source.getSize();
|
|
16858
|
+
await this.loadNextChunk();
|
|
16859
|
+
assert(this.firstFrameHeader);
|
|
16860
|
+
this.tracks = [new InputAudioTrack(new AdtsAudioTrackBacking(this))];
|
|
16861
|
+
})();
|
|
16862
|
+
}
|
|
16863
|
+
async loadNextChunk() {
|
|
16864
|
+
assert(this.lastLoadedPos < this.fileSize);
|
|
16865
|
+
const chunkSize = 0.5 * 1024 * 1024;
|
|
16866
|
+
const endPos = Math.min(this.lastLoadedPos + chunkSize, this.fileSize);
|
|
16867
|
+
await this.reader.reader.loadRange(this.lastLoadedPos, endPos);
|
|
16868
|
+
this.lastLoadedPos = endPos;
|
|
16869
|
+
assert(this.lastLoadedPos <= this.fileSize);
|
|
16870
|
+
this.parseFramesFromLoadedData();
|
|
16871
|
+
}
|
|
16872
|
+
parseFramesFromLoadedData() {
|
|
16873
|
+
while (this.reader.pos <= this.fileSize - MAX_FRAME_HEADER_SIZE) {
|
|
16874
|
+
const startPos = this.reader.pos;
|
|
16875
|
+
const header = this.reader.readFrameHeader();
|
|
16876
|
+
if (!header) {
|
|
16877
|
+
break;
|
|
16878
|
+
}
|
|
16879
|
+
if (startPos + header.frameLength > this.lastLoadedPos) {
|
|
16880
|
+
this.reader.pos = startPos;
|
|
16881
|
+
this.lastLoadedPos = startPos;
|
|
16882
|
+
break;
|
|
16883
|
+
}
|
|
16884
|
+
if (!this.firstFrameHeader) {
|
|
16885
|
+
this.firstFrameHeader = header;
|
|
16886
|
+
}
|
|
16887
|
+
const sampleRate = aacFrequencyTable[header.samplingFrequencyIndex];
|
|
16888
|
+
assert(sampleRate !== void 0);
|
|
16889
|
+
const sampleDuration = SAMPLES_PER_AAC_FRAME / sampleRate;
|
|
16890
|
+
const headerSize = header.crcCheck ? MAX_FRAME_HEADER_SIZE : MAX_FRAME_HEADER_SIZE - 2;
|
|
16891
|
+
const sample = {
|
|
16892
|
+
timestamp: this.nextTimestampInSamples / sampleRate,
|
|
16893
|
+
duration: sampleDuration,
|
|
16894
|
+
dataStart: startPos + headerSize,
|
|
16895
|
+
dataSize: header.frameLength - headerSize
|
|
16896
|
+
};
|
|
16897
|
+
this.loadedSamples.push(sample);
|
|
16898
|
+
this.nextTimestampInSamples += SAMPLES_PER_AAC_FRAME;
|
|
16899
|
+
this.reader.pos = startPos + header.frameLength;
|
|
16900
|
+
}
|
|
16901
|
+
}
|
|
16902
|
+
async getMimeType() {
|
|
16903
|
+
return "audio/aac";
|
|
16904
|
+
}
|
|
16905
|
+
async getTracks() {
|
|
16906
|
+
await this.readMetadata();
|
|
16907
|
+
return this.tracks;
|
|
16908
|
+
}
|
|
16909
|
+
async computeDuration() {
|
|
16910
|
+
await this.readMetadata();
|
|
16911
|
+
const track = this.tracks[0];
|
|
16912
|
+
assert(track);
|
|
16913
|
+
return track.computeDuration();
|
|
16914
|
+
}
|
|
16915
|
+
};
|
|
16916
|
+
var AdtsAudioTrackBacking = class {
|
|
16917
|
+
constructor(demuxer) {
|
|
16918
|
+
this.demuxer = demuxer;
|
|
16919
|
+
}
|
|
16920
|
+
getId() {
|
|
16921
|
+
return 1;
|
|
16922
|
+
}
|
|
16923
|
+
async getFirstTimestamp() {
|
|
16924
|
+
return 0;
|
|
16925
|
+
}
|
|
16926
|
+
getTimeResolution() {
|
|
16927
|
+
const sampleRate = this.getSampleRate();
|
|
16928
|
+
return sampleRate / SAMPLES_PER_AAC_FRAME;
|
|
16929
|
+
}
|
|
16930
|
+
async computeDuration() {
|
|
16931
|
+
const lastPacket = await this.getPacket(Infinity, { metadataOnly: true });
|
|
16932
|
+
return (lastPacket?.timestamp ?? 0) + (lastPacket?.duration ?? 0);
|
|
16933
|
+
}
|
|
16934
|
+
getLanguageCode() {
|
|
16935
|
+
return UNDETERMINED_LANGUAGE;
|
|
16936
|
+
}
|
|
16937
|
+
getCodec() {
|
|
16938
|
+
return "aac";
|
|
16939
|
+
}
|
|
16940
|
+
getNumberOfChannels() {
|
|
16941
|
+
assert(this.demuxer.firstFrameHeader);
|
|
16942
|
+
const numberOfChannels = aacChannelMap[this.demuxer.firstFrameHeader.channelConfiguration];
|
|
16943
|
+
assert(numberOfChannels !== void 0);
|
|
16944
|
+
return numberOfChannels;
|
|
16945
|
+
}
|
|
16946
|
+
getSampleRate() {
|
|
16947
|
+
assert(this.demuxer.firstFrameHeader);
|
|
16948
|
+
const sampleRate = aacFrequencyTable[this.demuxer.firstFrameHeader.samplingFrequencyIndex];
|
|
16949
|
+
assert(sampleRate !== void 0);
|
|
16950
|
+
return sampleRate;
|
|
16951
|
+
}
|
|
16952
|
+
async getDecoderConfig() {
|
|
16953
|
+
assert(this.demuxer.firstFrameHeader);
|
|
16954
|
+
const bytes2 = new Uint8Array(3);
|
|
16955
|
+
const bitstream = new Bitstream(bytes2);
|
|
16956
|
+
const { objectType, samplingFrequencyIndex, channelConfiguration } = this.demuxer.firstFrameHeader;
|
|
16957
|
+
if (objectType > 31) {
|
|
16958
|
+
bitstream.writeBits(5, 31);
|
|
16959
|
+
bitstream.writeBits(6, objectType - 32);
|
|
16960
|
+
} else {
|
|
16961
|
+
bitstream.writeBits(5, objectType);
|
|
16962
|
+
}
|
|
16963
|
+
bitstream.writeBits(4, samplingFrequencyIndex);
|
|
16964
|
+
bitstream.writeBits(4, channelConfiguration);
|
|
16965
|
+
return {
|
|
16966
|
+
codec: `mp4a.40.${this.demuxer.firstFrameHeader.objectType}`,
|
|
16967
|
+
numberOfChannels: this.getNumberOfChannels(),
|
|
16968
|
+
sampleRate: this.getSampleRate(),
|
|
16969
|
+
description: bytes2.subarray(0, Math.ceil((bitstream.pos - 1) / 8))
|
|
16970
|
+
};
|
|
16971
|
+
}
|
|
16972
|
+
getPacketAtIndex(sampleIndex, options) {
|
|
16973
|
+
if (sampleIndex === -1) {
|
|
16974
|
+
return null;
|
|
16975
|
+
}
|
|
16976
|
+
const rawSample = this.demuxer.loadedSamples[sampleIndex];
|
|
16977
|
+
if (!rawSample) {
|
|
16978
|
+
return null;
|
|
16979
|
+
}
|
|
16980
|
+
let data;
|
|
16981
|
+
if (options.metadataOnly) {
|
|
16982
|
+
data = PLACEHOLDER_DATA;
|
|
16983
|
+
} else {
|
|
16984
|
+
this.demuxer.reader.pos = rawSample.dataStart;
|
|
16985
|
+
data = this.demuxer.reader.readBytes(rawSample.dataSize);
|
|
16986
|
+
}
|
|
16987
|
+
return new EncodedPacket(
|
|
16988
|
+
data,
|
|
16989
|
+
"key",
|
|
16990
|
+
rawSample.timestamp,
|
|
16991
|
+
rawSample.duration,
|
|
16992
|
+
sampleIndex,
|
|
16993
|
+
rawSample.dataSize
|
|
16994
|
+
);
|
|
16995
|
+
}
|
|
16996
|
+
async getFirstPacket(options) {
|
|
16997
|
+
return this.getPacketAtIndex(0, options);
|
|
16998
|
+
}
|
|
16999
|
+
async getNextPacket(packet, options) {
|
|
17000
|
+
const release = await this.demuxer.readingMutex.acquire();
|
|
17001
|
+
try {
|
|
17002
|
+
const sampleIndex = binarySearchExact(
|
|
17003
|
+
this.demuxer.loadedSamples,
|
|
17004
|
+
packet.timestamp,
|
|
17005
|
+
(x) => x.timestamp
|
|
17006
|
+
);
|
|
17007
|
+
if (sampleIndex === -1) {
|
|
17008
|
+
throw new Error("Packet was not created from this track.");
|
|
17009
|
+
}
|
|
17010
|
+
const nextIndex = sampleIndex + 1;
|
|
17011
|
+
while (nextIndex >= this.demuxer.loadedSamples.length && this.demuxer.lastLoadedPos < this.demuxer.fileSize) {
|
|
17012
|
+
await this.demuxer.loadNextChunk();
|
|
17013
|
+
}
|
|
17014
|
+
return this.getPacketAtIndex(nextIndex, options);
|
|
17015
|
+
} finally {
|
|
17016
|
+
release();
|
|
17017
|
+
}
|
|
17018
|
+
}
|
|
17019
|
+
async getPacket(timestamp, options) {
|
|
17020
|
+
const release = await this.demuxer.readingMutex.acquire();
|
|
17021
|
+
try {
|
|
17022
|
+
while (true) {
|
|
17023
|
+
const index = binarySearchLessOrEqual(
|
|
17024
|
+
this.demuxer.loadedSamples,
|
|
17025
|
+
timestamp,
|
|
17026
|
+
(x) => x.timestamp
|
|
17027
|
+
);
|
|
17028
|
+
if (index === -1 && this.demuxer.loadedSamples.length > 0) {
|
|
17029
|
+
return null;
|
|
17030
|
+
}
|
|
17031
|
+
if (this.demuxer.lastLoadedPos === this.demuxer.fileSize) {
|
|
17032
|
+
return this.getPacketAtIndex(index, options);
|
|
17033
|
+
}
|
|
17034
|
+
if (index >= 0 && index + 1 < this.demuxer.loadedSamples.length) {
|
|
17035
|
+
return this.getPacketAtIndex(index, options);
|
|
17036
|
+
}
|
|
17037
|
+
await this.demuxer.loadNextChunk();
|
|
17038
|
+
}
|
|
17039
|
+
} finally {
|
|
17040
|
+
release();
|
|
17041
|
+
}
|
|
17042
|
+
}
|
|
17043
|
+
getKeyPacket(timestamp, options) {
|
|
17044
|
+
return this.getPacket(timestamp, options);
|
|
17045
|
+
}
|
|
17046
|
+
getNextKeyPacket(packet, options) {
|
|
17047
|
+
return this.getNextPacket(packet, options);
|
|
17048
|
+
}
|
|
17049
|
+
};
|
|
17050
|
+
|
|
16627
17051
|
// src/input-format.ts
|
|
16628
17052
|
var InputFormat = class {
|
|
16629
17053
|
};
|
|
@@ -16868,6 +17292,40 @@ ${cue.notes ?? ""}`;
|
|
|
16868
17292
|
return "application/ogg";
|
|
16869
17293
|
}
|
|
16870
17294
|
};
|
|
17295
|
+
var AdtsInputFormat = class extends InputFormat {
|
|
17296
|
+
/** @internal */
|
|
17297
|
+
async _canReadInput(input) {
|
|
17298
|
+
const sourceSize = await input._mainReader.source.getSize();
|
|
17299
|
+
if (sourceSize < MAX_FRAME_HEADER_SIZE) {
|
|
17300
|
+
return false;
|
|
17301
|
+
}
|
|
17302
|
+
const adtsReader = new AdtsReader(input._mainReader);
|
|
17303
|
+
const firstHeader = adtsReader.readFrameHeader();
|
|
17304
|
+
if (!firstHeader) {
|
|
17305
|
+
return false;
|
|
17306
|
+
}
|
|
17307
|
+
if (sourceSize < firstHeader.frameLength + MAX_FRAME_HEADER_SIZE) {
|
|
17308
|
+
return false;
|
|
17309
|
+
}
|
|
17310
|
+
adtsReader.pos = firstHeader.frameLength;
|
|
17311
|
+
await adtsReader.reader.loadRange(adtsReader.pos, adtsReader.pos + MAX_FRAME_HEADER_SIZE);
|
|
17312
|
+
const secondHeader = adtsReader.readFrameHeader();
|
|
17313
|
+
if (!secondHeader) {
|
|
17314
|
+
return false;
|
|
17315
|
+
}
|
|
17316
|
+
return firstHeader.objectType === secondHeader.objectType && firstHeader.samplingFrequencyIndex === secondHeader.samplingFrequencyIndex && firstHeader.channelConfiguration === secondHeader.channelConfiguration;
|
|
17317
|
+
}
|
|
17318
|
+
/** @internal */
|
|
17319
|
+
_createDemuxer(input) {
|
|
17320
|
+
return new AdtsDemuxer(input);
|
|
17321
|
+
}
|
|
17322
|
+
get name() {
|
|
17323
|
+
return "ADTS";
|
|
17324
|
+
}
|
|
17325
|
+
get mimeType() {
|
|
17326
|
+
return "audio/aac";
|
|
17327
|
+
}
|
|
17328
|
+
};
|
|
16871
17329
|
var MP4 = new Mp4InputFormat();
|
|
16872
17330
|
var QTFF = new QuickTimeInputFormat();
|
|
16873
17331
|
var MATROSKA = new MatroskaInputFormat();
|
|
@@ -16875,7 +17333,8 @@ ${cue.notes ?? ""}`;
|
|
|
16875
17333
|
var MP3 = new Mp3InputFormat();
|
|
16876
17334
|
var WAVE = new WaveInputFormat();
|
|
16877
17335
|
var OGG = new OggInputFormat();
|
|
16878
|
-
var
|
|
17336
|
+
var ADTS = new AdtsInputFormat();
|
|
17337
|
+
var ALL_FORMATS = [MP4, QTFF, MATROSKA, WEBM, WAVE, OGG, MP3, ADTS];
|
|
16879
17338
|
|
|
16880
17339
|
// src/input.ts
|
|
16881
17340
|
var Input = class {
|