@editframe/assets 0.18.3-beta.0 → 0.18.7-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Probe.d.ts +441 -29
- package/dist/Probe.js +156 -21
- package/dist/generateTrackFragmentIndexMediabunny.d.ts +3 -0
- package/dist/generateTrackFragmentIndexMediabunny.js +343 -0
- package/dist/generateTrackMediabunny.d.ts +8 -0
- package/dist/generateTrackMediabunny.js +69 -0
- package/dist/idempotentTask.js +33 -35
- package/dist/index.d.ts +2 -2
- package/dist/index.js +2 -2
- package/dist/tasks/findOrCreateCaptions.js +1 -1
- package/dist/tasks/generateTrack.d.ts +1 -2
- package/dist/tasks/generateTrack.js +5 -32
- package/dist/tasks/generateTrackFragmentIndex.js +11 -75
- package/dist/truncateDecimal.d.ts +1 -0
- package/dist/truncateDecimal.js +5 -0
- package/package.json +2 -14
- package/src/tasks/generateTrack.test.ts +90 -0
- package/src/tasks/generateTrack.ts +7 -48
- package/src/tasks/generateTrackFragmentIndex.test.ts +115 -0
- package/src/tasks/generateTrackFragmentIndex.ts +27 -98
- package/types.json +1 -1
- package/dist/DecoderManager.d.ts +0 -62
- package/dist/DecoderManager.js +0 -114
- package/dist/EncodedAsset.d.ts +0 -143
- package/dist/EncodedAsset.js +0 -443
- package/dist/FrameBuffer.d.ts +0 -62
- package/dist/FrameBuffer.js +0 -89
- package/dist/MP4File.d.ts +0 -37
- package/dist/MP4File.js +0 -209
- package/dist/MP4SampleAnalyzer.d.ts +0 -59
- package/dist/MP4SampleAnalyzer.js +0 -119
- package/dist/SeekStrategy.d.ts +0 -82
- package/dist/SeekStrategy.js +0 -101
- package/dist/memoize.js +0 -11
- package/dist/mp4FileWritable.d.ts +0 -3
- package/dist/mp4FileWritable.js +0 -19
package/dist/EncodedAsset.js
DELETED
|
@@ -1,443 +0,0 @@
|
|
|
1
|
-
import { MP4File } from "./MP4File.js";
|
|
2
|
-
import { memoize } from "./memoize.js";
|
|
3
|
-
import { FrameBuffer } from "./FrameBuffer.js";
|
|
4
|
-
import { SeekStrategy } from "./SeekStrategy.js";
|
|
5
|
-
import { MP4SampleAnalyzer } from "./MP4SampleAnalyzer.js";
|
|
6
|
-
import { DecoderManager } from "./DecoderManager.js";
|
|
7
|
-
import debug from "debug";
|
|
8
|
-
import _decorate from "@oxc-project/runtime/helpers/decorate";
|
|
9
|
-
const log = debug("ef:av");
|
|
10
|
-
const BUFFER_SIZE = 30;
|
|
11
|
-
var AssetNotAvailableLocally = class extends Error {};
|
|
12
|
-
var FileAsset = class {
|
|
13
|
-
constructor(localName, file) {
|
|
14
|
-
this.localName = localName;
|
|
15
|
-
this.file = file;
|
|
16
|
-
}
|
|
17
|
-
async arrayBuffer() {
|
|
18
|
-
return this.file.arrayBuffer();
|
|
19
|
-
}
|
|
20
|
-
get byteSize() {
|
|
21
|
-
return this.file.size;
|
|
22
|
-
}
|
|
23
|
-
get fileExtension() {
|
|
24
|
-
return this.file.name.split(".").pop();
|
|
25
|
-
}
|
|
26
|
-
slice(start, end) {
|
|
27
|
-
return this.file.slice(start, end);
|
|
28
|
-
}
|
|
29
|
-
};
|
|
30
|
-
var ISOFileAsset = class extends FileAsset {
|
|
31
|
-
constructor(localName, file, mp4boxFile) {
|
|
32
|
-
super(localName, file);
|
|
33
|
-
this.localName = localName;
|
|
34
|
-
this.file = file;
|
|
35
|
-
this.mp4boxFile = mp4boxFile;
|
|
36
|
-
}
|
|
37
|
-
get fileInfo() {
|
|
38
|
-
return this.mp4boxFile.getInfo();
|
|
39
|
-
}
|
|
40
|
-
get containerFormat() {
|
|
41
|
-
return "mp4";
|
|
42
|
-
}
|
|
43
|
-
};
|
|
44
|
-
_decorate([memoize], ISOFileAsset.prototype, "fileInfo", null);
|
|
45
|
-
var VideoAsset = class VideoAsset extends ISOFileAsset {
|
|
46
|
-
static async createFromReadableStream(id, stream, file, options = {}) {
|
|
47
|
-
let fileStart = 0;
|
|
48
|
-
const inputFile = new MP4File();
|
|
49
|
-
const reader = stream.getReader();
|
|
50
|
-
const processChunk = ({ done, value }) => {
|
|
51
|
-
if (done) return;
|
|
52
|
-
if (!value) return;
|
|
53
|
-
const mp4buffer = value.buffer;
|
|
54
|
-
mp4buffer.fileStart = fileStart;
|
|
55
|
-
const isLast = file.size === fileStart + value.byteLength;
|
|
56
|
-
inputFile.appendBuffer(mp4buffer, isLast);
|
|
57
|
-
fileStart += value.byteLength;
|
|
58
|
-
return reader.read().then(processChunk);
|
|
59
|
-
};
|
|
60
|
-
await reader.read().then(processChunk);
|
|
61
|
-
const asset = new VideoAsset(id, inputFile, file);
|
|
62
|
-
asset.startTimeOffsetMs = options.startTimeOffsetMs;
|
|
63
|
-
return asset;
|
|
64
|
-
}
|
|
65
|
-
/**
|
|
66
|
-
* Creates a VideoAsset from a complete MP4 file (like JIT transcoded segments).
|
|
67
|
-
*
|
|
68
|
-
* This is used for JIT transcoded segments which are complete MP4 files that always
|
|
69
|
-
* start on keyframes, unlike fragmented MP4s used in the asset pipeline.
|
|
70
|
-
*/
|
|
71
|
-
static async createFromCompleteMP4(id, file, options = {}) {
|
|
72
|
-
const fileBuffer = await file.arrayBuffer();
|
|
73
|
-
const inputFile = new MP4File();
|
|
74
|
-
const mp4buffer = fileBuffer;
|
|
75
|
-
mp4buffer.fileStart = 0;
|
|
76
|
-
inputFile.appendBuffer(mp4buffer, true);
|
|
77
|
-
inputFile.flush();
|
|
78
|
-
await inputFile.readyPromise;
|
|
79
|
-
const asset = new VideoAsset(id, inputFile, file);
|
|
80
|
-
asset.isJitSegment = true;
|
|
81
|
-
asset.startTimeOffsetMs = options.startTimeOffsetMs;
|
|
82
|
-
return asset;
|
|
83
|
-
}
|
|
84
|
-
/**
|
|
85
|
-
* **Only use this function in tests to reset a VideoAsset to its initial state.**
|
|
86
|
-
*
|
|
87
|
-
* @deprecated
|
|
88
|
-
*/
|
|
89
|
-
async TEST_ONLY_RESET() {
|
|
90
|
-
if (this.decoderManager.state !== "closed") await this.decoderManager.flush();
|
|
91
|
-
this.decoderManager.configureDecoder();
|
|
92
|
-
this.requestedSampleNumber = 0;
|
|
93
|
-
this.outCursor = 0;
|
|
94
|
-
this.sampleCursor = 0;
|
|
95
|
-
this.frameBuffer.clear();
|
|
96
|
-
this.lastDecodedSample = void 0;
|
|
97
|
-
this.lastSoughtFrame?.close();
|
|
98
|
-
this.lastSoughtFrame = void 0;
|
|
99
|
-
}
|
|
100
|
-
addEventListener(type, callback) {
|
|
101
|
-
this.eventListeners[type] ||= /* @__PURE__ */ new Set();
|
|
102
|
-
this.eventListeners[type]?.add(callback);
|
|
103
|
-
}
|
|
104
|
-
removeEventListener(type, callback) {
|
|
105
|
-
this.eventListeners[type]?.delete(callback);
|
|
106
|
-
}
|
|
107
|
-
emit(type, ...args) {
|
|
108
|
-
for (const listener of this.eventListeners[type] ?? []) listener(...args);
|
|
109
|
-
}
|
|
110
|
-
constructor(localName, mp4boxFile, file) {
|
|
111
|
-
super(localName, file, mp4boxFile);
|
|
112
|
-
this.requestedSampleNumber = 0;
|
|
113
|
-
this.outCursor = 0;
|
|
114
|
-
this.sampleCursor = 0;
|
|
115
|
-
this.latestSeekCts = 0;
|
|
116
|
-
this.isJitSegment = false;
|
|
117
|
-
this.isBeingReplaced = false;
|
|
118
|
-
this.eventListeners = {};
|
|
119
|
-
this.frameBuffer = new FrameBuffer(BUFFER_SIZE);
|
|
120
|
-
this.sampleAnalyzer = new MP4SampleAnalyzer(mp4boxFile, this.defaultVideoTrack);
|
|
121
|
-
this.seekStrategy = new SeekStrategy();
|
|
122
|
-
this.decoderManager = new DecoderManager(mp4boxFile, this.defaultVideoTrack, (decodedFrame) => {
|
|
123
|
-
const clone = decodedFrame.clone();
|
|
124
|
-
this.frameBuffer.add(clone);
|
|
125
|
-
decodedFrame.close();
|
|
126
|
-
this.outCursor = this.samples.findIndex((sample) => sample.cts === decodedFrame.timestamp);
|
|
127
|
-
this.emit("frame", clone);
|
|
128
|
-
}, (e) => {
|
|
129
|
-
console.error("Video Decoder Error", e);
|
|
130
|
-
throw e;
|
|
131
|
-
});
|
|
132
|
-
this.decoderManager.configureDecoder();
|
|
133
|
-
}
|
|
134
|
-
get videoDecoder() {
|
|
135
|
-
return this.decoderManager.videoDecoder;
|
|
136
|
-
}
|
|
137
|
-
get decodedFrames() {
|
|
138
|
-
return this.frameBuffer.frames;
|
|
139
|
-
}
|
|
140
|
-
set decodedFrames(frames) {
|
|
141
|
-
this.frameBuffer.clear();
|
|
142
|
-
frames.forEach((frame) => this.frameBuffer.add(frame));
|
|
143
|
-
}
|
|
144
|
-
get videoCodec() {
|
|
145
|
-
if (!this.defaultVideoTrack) throw new Error("No default video track found");
|
|
146
|
-
return this.defaultVideoTrack?.codec;
|
|
147
|
-
}
|
|
148
|
-
get fragmentInfo() {
|
|
149
|
-
const fragments = [];
|
|
150
|
-
const [first, ...samples] = this.samples;
|
|
151
|
-
if (!first) return fragments;
|
|
152
|
-
let currentFragment = {
|
|
153
|
-
offset: first.offset,
|
|
154
|
-
size: first.size,
|
|
155
|
-
start_ms: first.cts,
|
|
156
|
-
duration_ms: 0
|
|
157
|
-
};
|
|
158
|
-
for (const sample of samples) if (sample.is_sync) {
|
|
159
|
-
if (currentFragment) {
|
|
160
|
-
currentFragment.duration_ms = sample.cts - currentFragment.start_ms;
|
|
161
|
-
fragments.push(currentFragment);
|
|
162
|
-
}
|
|
163
|
-
currentFragment = {
|
|
164
|
-
offset: sample.offset,
|
|
165
|
-
size: sample.size,
|
|
166
|
-
start_ms: sample.cts,
|
|
167
|
-
duration_ms: 0
|
|
168
|
-
};
|
|
169
|
-
} else currentFragment.size += sample.size;
|
|
170
|
-
return fragments;
|
|
171
|
-
}
|
|
172
|
-
pruneBuffer() {}
|
|
173
|
-
get editsOffset() {
|
|
174
|
-
return this.sampleAnalyzer.editsOffset;
|
|
175
|
-
}
|
|
176
|
-
async waitUntilVideoQueueDrained() {
|
|
177
|
-
return this.decoderManager.waitUntilVideoQueueDrained();
|
|
178
|
-
}
|
|
179
|
-
get canDecodeNextSample() {
|
|
180
|
-
return this.sampleCursor < this.samples.length;
|
|
181
|
-
}
|
|
182
|
-
async decodeNextSample() {
|
|
183
|
-
if (!this.canDecodeNextSample) throw new Error("No more samples to decode");
|
|
184
|
-
await this.decodeSlice(this.sampleCursor, this.sampleCursor);
|
|
185
|
-
this.sampleCursor++;
|
|
186
|
-
}
|
|
187
|
-
async decodeSlice(start, end) {
|
|
188
|
-
const samples = this.samples.slice(start, end + 1);
|
|
189
|
-
const firstSample = samples[0];
|
|
190
|
-
const lastSample = samples[samples.length - 1];
|
|
191
|
-
if (!firstSample || !lastSample) throw new Error("Samples not found");
|
|
192
|
-
const sliceStart = firstSample.offset;
|
|
193
|
-
const sliceEnd = lastSample.offset + lastSample.size;
|
|
194
|
-
const buffer = await this.file.slice(sliceStart, sliceEnd).arrayBuffer();
|
|
195
|
-
const firstSampleOffset = firstSample.offset;
|
|
196
|
-
for (let i = start; i <= end; i++) {
|
|
197
|
-
await this.waitUntilVideoQueueDrained();
|
|
198
|
-
const sample = this.getSample(i);
|
|
199
|
-
log("Decoding sample #", i, `cts=${sample.cts}`);
|
|
200
|
-
const sampleStart = sample.offset - firstSampleOffset;
|
|
201
|
-
const sampleEnd = sample.offset + sample.size - firstSampleOffset;
|
|
202
|
-
const chunk = new EncodedVideoChunk({
|
|
203
|
-
data: buffer.slice(sampleStart, sampleEnd),
|
|
204
|
-
timestamp: sample.cts,
|
|
205
|
-
duration: sample.duration,
|
|
206
|
-
type: sample.is_sync ? "key" : "delta"
|
|
207
|
-
});
|
|
208
|
-
this.decoderManager.decode(chunk);
|
|
209
|
-
const nextSample = this.defaultVideoTrak?.samples?.[i + 1];
|
|
210
|
-
if (nextSample === void 0) {
|
|
211
|
-
log("ENDFLUSH");
|
|
212
|
-
await this.decoderManager.flush();
|
|
213
|
-
}
|
|
214
|
-
}
|
|
215
|
-
}
|
|
216
|
-
get decoderConfiguration() {
|
|
217
|
-
return this.decoderManager.decoderConfiguration;
|
|
218
|
-
}
|
|
219
|
-
configureDecoder() {
|
|
220
|
-
this.decoderManager.configureDecoder();
|
|
221
|
-
}
|
|
222
|
-
getSample(index = -1) {
|
|
223
|
-
return this.sampleAnalyzer.getSample(index);
|
|
224
|
-
}
|
|
225
|
-
get timescale() {
|
|
226
|
-
return this.sampleAnalyzer.timescale;
|
|
227
|
-
}
|
|
228
|
-
get samples() {
|
|
229
|
-
return this.sampleAnalyzer.samples;
|
|
230
|
-
}
|
|
231
|
-
get displayOrderedSamples() {
|
|
232
|
-
return this.sampleAnalyzer.displayOrderedSamples;
|
|
233
|
-
}
|
|
234
|
-
getSampleClosetToTime(seconds) {
|
|
235
|
-
return this.sampleAnalyzer.getSampleClosetToTime(seconds);
|
|
236
|
-
}
|
|
237
|
-
seekingWillEmitNewFrame(seconds) {
|
|
238
|
-
if (!this.lastSoughtFrame) return true;
|
|
239
|
-
if (this.seekingWillGoBackwards(seconds)) return true;
|
|
240
|
-
const nextCts = this.getSampleClosetToTime(seconds).cts;
|
|
241
|
-
return nextCts > this.lastSoughtFrame.timestamp;
|
|
242
|
-
}
|
|
243
|
-
seekingWillSkipPictureGroup(seconds) {
|
|
244
|
-
const targetSample = this.getSampleClosetToTime(seconds);
|
|
245
|
-
const state = {
|
|
246
|
-
sampleCursor: this.sampleCursor,
|
|
247
|
-
outCursor: this.outCursor,
|
|
248
|
-
frameBuffer: this.frameBuffer
|
|
249
|
-
};
|
|
250
|
-
return this.seekStrategy.seekingWillSkipPictureGroup(state, targetSample, this.samples);
|
|
251
|
-
}
|
|
252
|
-
seekingWillGoBackwards(seconds) {
|
|
253
|
-
const targetSample = this.getSampleClosetToTime(seconds);
|
|
254
|
-
const state = {
|
|
255
|
-
sampleCursor: this.sampleCursor,
|
|
256
|
-
outCursor: this.outCursor,
|
|
257
|
-
frameBuffer: this.frameBuffer
|
|
258
|
-
};
|
|
259
|
-
return this.seekStrategy.seekingWillGoBackwards(state, targetSample, this.displayOrderedSamples);
|
|
260
|
-
}
|
|
261
|
-
/**
|
|
262
|
-
* Optimized flush decision for JIT segments that always start on keyframes.
|
|
263
|
-
* JIT segments have better keyframe distribution and shorter duration,
|
|
264
|
-
* so we can be less aggressive about flushing.
|
|
265
|
-
*/
|
|
266
|
-
shouldFlushForJitSegment(seconds, _shouldFlushPictureGroup, shouldFlushBackwards) {
|
|
267
|
-
if (shouldFlushBackwards) {
|
|
268
|
-
const targetSample = this.getSampleClosetToTime(seconds);
|
|
269
|
-
const targetInCache = this.frameBuffer.findByTimestamp(targetSample.cts);
|
|
270
|
-
if (!targetInCache) {
|
|
271
|
-
const currentPosition = this.outCursor;
|
|
272
|
-
const targetPosition = this.samples.findIndex((s) => s.cts === targetSample.cts);
|
|
273
|
-
const jumpDistance = currentPosition - targetPosition;
|
|
274
|
-
if (jumpDistance > 10) return true;
|
|
275
|
-
return false;
|
|
276
|
-
}
|
|
277
|
-
}
|
|
278
|
-
return false;
|
|
279
|
-
}
|
|
280
|
-
/**
|
|
281
|
-
* Finds the optimal sample cursor position for segments with keyframes.
|
|
282
|
-
* Uses optimal keyframe selection for both single and multi-keyframe segments.
|
|
283
|
-
*/
|
|
284
|
-
findOptimalSampleCursorForJitSeek(targetSample) {
|
|
285
|
-
let syncSampleNumber = targetSample.number;
|
|
286
|
-
while (!this.getSample(syncSampleNumber).is_sync) {
|
|
287
|
-
syncSampleNumber--;
|
|
288
|
-
if (syncSampleNumber < 0) throw new Error("No sync sample found when traversing backwards");
|
|
289
|
-
}
|
|
290
|
-
return syncSampleNumber;
|
|
291
|
-
}
|
|
292
|
-
/**
|
|
293
|
-
* Marks this VideoAsset as being replaced, which will abort any ongoing seek operations
|
|
294
|
-
*/
|
|
295
|
-
markAsBeingReplaced() {
|
|
296
|
-
this.isBeingReplaced = true;
|
|
297
|
-
if (this.activeSeekAbortController) this.activeSeekAbortController.abort("VideoAsset being replaced");
|
|
298
|
-
}
|
|
299
|
-
async seekToTime(seconds) {
|
|
300
|
-
seconds += (this.startTimeOffsetMs ?? 0) / 1e3;
|
|
301
|
-
const correctedSeconds = seconds;
|
|
302
|
-
this.activeSeekAbortController = new AbortController();
|
|
303
|
-
const abortSignal = this.activeSeekAbortController.signal;
|
|
304
|
-
if (this.isBeingReplaced) throw new Error("VideoAsset seek aborted - VideoAsset being replaced");
|
|
305
|
-
const sample = this.getSampleClosetToTime(correctedSeconds);
|
|
306
|
-
const cts = sample.cts;
|
|
307
|
-
this.latestSeekCts = cts;
|
|
308
|
-
const alreadyDecodedFrame = this.frameBuffer.findByTimestamp(cts);
|
|
309
|
-
if (alreadyDecodedFrame) return alreadyDecodedFrame;
|
|
310
|
-
const shouldFlushPictureGroup = this.seekingWillSkipPictureGroup(seconds);
|
|
311
|
-
const shouldFlushBackwards = this.seekingWillGoBackwards(seconds);
|
|
312
|
-
const shouldFlush = this.isJitSegment ? this.shouldFlushForJitSegment(seconds, shouldFlushPictureGroup, shouldFlushBackwards) : shouldFlushPictureGroup || shouldFlushBackwards;
|
|
313
|
-
if (shouldFlush) {
|
|
314
|
-
if (this.isBeingReplaced) throw new Error("VideoAsset seek aborted - VideoAsset being replaced");
|
|
315
|
-
if (this.decoderManager.state === "closed") throw new Error("VideoAsset decoder closed - recreation in progress");
|
|
316
|
-
try {
|
|
317
|
-
await this.decoderManager.flush();
|
|
318
|
-
} catch (error) {
|
|
319
|
-
if (error instanceof Error && error.name === "InvalidStateError" && error.message.includes("closed codec")) throw new Error("VideoAsset decoder closed during flush - recreation in progress");
|
|
320
|
-
throw error;
|
|
321
|
-
}
|
|
322
|
-
this.sampleCursor = this.findOptimalSampleCursorForJitSeek(sample);
|
|
323
|
-
}
|
|
324
|
-
let frame;
|
|
325
|
-
const maybeFrame = (_frame) => {
|
|
326
|
-
if (frame) return;
|
|
327
|
-
log("Maybe frame", _frame.timestamp, cts);
|
|
328
|
-
if (_frame.timestamp === cts) {
|
|
329
|
-
this.removeEventListener("frame", maybeFrame);
|
|
330
|
-
frame = _frame;
|
|
331
|
-
}
|
|
332
|
-
};
|
|
333
|
-
this.addEventListener("frame", maybeFrame);
|
|
334
|
-
while (frame === void 0 && this.canDecodeNextSample) {
|
|
335
|
-
if (abortSignal.aborted) {
|
|
336
|
-
this.removeEventListener("frame", maybeFrame);
|
|
337
|
-
throw new Error("VideoAsset seek aborted - VideoAsset being replaced");
|
|
338
|
-
}
|
|
339
|
-
if (this.isBeingReplaced) {
|
|
340
|
-
this.removeEventListener("frame", maybeFrame);
|
|
341
|
-
throw new Error("VideoAsset seek aborted - VideoAsset being replaced");
|
|
342
|
-
}
|
|
343
|
-
if (this.decoderManager.state === "closed") {
|
|
344
|
-
this.removeEventListener("frame", maybeFrame);
|
|
345
|
-
throw new Error("VideoAsset decoder closed during seek - recreation in progress");
|
|
346
|
-
}
|
|
347
|
-
try {
|
|
348
|
-
await this.decodeNextSample();
|
|
349
|
-
} catch (error) {
|
|
350
|
-
if (error instanceof Error && error.name === "InvalidStateError" && error.message.includes("closed codec")) {
|
|
351
|
-
console.log("🎬 VideoAsset: Decoder was closed during decode - VideoAsset being replaced");
|
|
352
|
-
this.removeEventListener("frame", maybeFrame);
|
|
353
|
-
throw new Error("VideoAsset decoder closed during decode - recreation in progress");
|
|
354
|
-
}
|
|
355
|
-
throw error;
|
|
356
|
-
}
|
|
357
|
-
}
|
|
358
|
-
this.removeEventListener("frame", maybeFrame);
|
|
359
|
-
this.activeSeekAbortController = void 0;
|
|
360
|
-
if (frame) {
|
|
361
|
-
if (this.lastSoughtFrame && !this.frameBuffer.frames.includes(this.lastSoughtFrame)) try {
|
|
362
|
-
this.lastSoughtFrame.close();
|
|
363
|
-
} catch (error) {}
|
|
364
|
-
this.lastSoughtFrame = frame;
|
|
365
|
-
}
|
|
366
|
-
return frame;
|
|
367
|
-
}
|
|
368
|
-
get defaultVideoTrack() {
|
|
369
|
-
return this.fileInfo.videoTracks?.[0];
|
|
370
|
-
}
|
|
371
|
-
get defaultVideoTrak() {
|
|
372
|
-
return this.mp4boxFile.getTrackById(this.defaultVideoTrack?.id ?? -1);
|
|
373
|
-
}
|
|
374
|
-
get duration() {
|
|
375
|
-
return this.fileInfo.duration / this.fileInfo.timescale;
|
|
376
|
-
}
|
|
377
|
-
};
|
|
378
|
-
_decorate([memoize], VideoAsset.prototype, "editsOffset", null);
|
|
379
|
-
_decorate([memoize], VideoAsset.prototype, "timescale", null);
|
|
380
|
-
_decorate([memoize], VideoAsset.prototype, "samples", null);
|
|
381
|
-
_decorate([memoize], VideoAsset.prototype, "displayOrderedSamples", null);
|
|
382
|
-
_decorate([memoize], VideoAsset.prototype, "defaultVideoTrack", null);
|
|
383
|
-
_decorate([memoize], VideoAsset.prototype, "defaultVideoTrak", null);
|
|
384
|
-
_decorate([memoize], VideoAsset.prototype, "duration", null);
|
|
385
|
-
var AudioAsset = class AudioAsset extends ISOFileAsset {
|
|
386
|
-
static async createFromReadableStream(id, stream, file) {
|
|
387
|
-
let fileStart = 0;
|
|
388
|
-
const inputFile = new MP4File();
|
|
389
|
-
const reader = stream.getReader();
|
|
390
|
-
const processChunk = ({ done, value }) => {
|
|
391
|
-
if (done) return;
|
|
392
|
-
if (!value) return;
|
|
393
|
-
const mp4buffer = value.buffer;
|
|
394
|
-
mp4buffer.fileStart = fileStart;
|
|
395
|
-
fileStart += value.byteLength;
|
|
396
|
-
inputFile.appendBuffer(mp4buffer);
|
|
397
|
-
return reader.read().then(processChunk);
|
|
398
|
-
};
|
|
399
|
-
await reader.read().then(processChunk);
|
|
400
|
-
return new AudioAsset(id, file, inputFile);
|
|
401
|
-
}
|
|
402
|
-
get defaultAudioTrack() {
|
|
403
|
-
return this.fileInfo.audioTracks[0];
|
|
404
|
-
}
|
|
405
|
-
get defaultAudioTrak() {
|
|
406
|
-
return this.mp4boxFile.getTrackById(this.defaultAudioTrack?.id ?? -1);
|
|
407
|
-
}
|
|
408
|
-
get audioCodec() {
|
|
409
|
-
if (!this.defaultAudioTrack) throw new Error("No default audio track found");
|
|
410
|
-
return this.defaultAudioTrack.codec;
|
|
411
|
-
}
|
|
412
|
-
get samplerate() {
|
|
413
|
-
if (!this.defaultAudioTrack) throw new Error("No default audio track found");
|
|
414
|
-
return this.defaultAudioTrack.audio.sample_rate;
|
|
415
|
-
}
|
|
416
|
-
get channelCount() {
|
|
417
|
-
if (!this.defaultAudioTrack) throw new Error("No default audio track found");
|
|
418
|
-
return this.defaultAudioTrack.audio.channel_count;
|
|
419
|
-
}
|
|
420
|
-
};
|
|
421
|
-
_decorate([memoize], AudioAsset.prototype, "defaultAudioTrack", null);
|
|
422
|
-
_decorate([memoize], AudioAsset.prototype, "defaultAudioTrak", null);
|
|
423
|
-
_decorate([memoize], AudioAsset.prototype, "audioCodec", null);
|
|
424
|
-
_decorate([memoize], AudioAsset.prototype, "samplerate", null);
|
|
425
|
-
_decorate([memoize], AudioAsset.prototype, "channelCount", null);
|
|
426
|
-
var ImageAsset = class ImageAsset extends FileAsset {
|
|
427
|
-
static async createFromReadableStream(id, file) {
|
|
428
|
-
if (file.size === 0) throw new AssetNotAvailableLocally();
|
|
429
|
-
return new ImageAsset(id, file);
|
|
430
|
-
}
|
|
431
|
-
get objectUrl() {
|
|
432
|
-
return URL.createObjectURL(this.file);
|
|
433
|
-
}
|
|
434
|
-
get format() {
|
|
435
|
-
return this.fileExtension;
|
|
436
|
-
}
|
|
437
|
-
get type() {
|
|
438
|
-
return `image/${this.format}`;
|
|
439
|
-
}
|
|
440
|
-
};
|
|
441
|
-
_decorate([memoize], ImageAsset.prototype, "objectUrl", null);
|
|
442
|
-
_decorate([memoize], ImageAsset.prototype, "format", null);
|
|
443
|
-
export { AssetNotAvailableLocally, AudioAsset, FileAsset, ISOFileAsset, ImageAsset, VideoAsset };
|
package/dist/FrameBuffer.d.ts
DELETED
|
@@ -1,62 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* FrameBuffer manages a collection of decoded VideoFrames with automatic LRU eviction.
|
|
3
|
-
*
|
|
4
|
-
* This class extracts the frame buffering logic from VideoAsset to improve
|
|
5
|
-
* testability and separation of concerns while maintaining the exact same
|
|
6
|
-
* memory management behavior.
|
|
7
|
-
*/
|
|
8
|
-
export declare class FrameBuffer {
|
|
9
|
-
private _frames;
|
|
10
|
-
private readonly _maxSize;
|
|
11
|
-
/**
|
|
12
|
-
* Creates a new FrameBuffer with the specified maximum size.
|
|
13
|
-
*
|
|
14
|
-
* @param maxSize Maximum number of frames to keep in buffer (default: 30)
|
|
15
|
-
*/
|
|
16
|
-
constructor(maxSize?: number);
|
|
17
|
-
/**
|
|
18
|
-
* Gets the maximum size of the buffer.
|
|
19
|
-
*/
|
|
20
|
-
get maxSize(): number;
|
|
21
|
-
/**
|
|
22
|
-
* Gets the current number of frames in the buffer.
|
|
23
|
-
*/
|
|
24
|
-
get size(): number;
|
|
25
|
-
/**
|
|
26
|
-
* Gets a copy of the frames array for compatibility.
|
|
27
|
-
* Note: Returns a shallow copy to prevent external modification while allowing access.
|
|
28
|
-
*/
|
|
29
|
-
get frames(): VideoFrame[];
|
|
30
|
-
/**
|
|
31
|
-
* Adds a frame to the buffer. If the buffer exceeds maxSize after adding,
|
|
32
|
-
* the oldest frames will be automatically pruned and closed.
|
|
33
|
-
*
|
|
34
|
-
* @param frame The VideoFrame to add to the buffer
|
|
35
|
-
*/
|
|
36
|
-
add(frame: VideoFrame): void;
|
|
37
|
-
/**
|
|
38
|
-
* Finds a frame in the buffer by its timestamp.
|
|
39
|
-
*
|
|
40
|
-
* @param timestamp The timestamp to search for
|
|
41
|
-
* @returns The VideoFrame with matching timestamp, or undefined if not found
|
|
42
|
-
*/
|
|
43
|
-
findByTimestamp(timestamp: number): VideoFrame | undefined;
|
|
44
|
-
/**
|
|
45
|
-
* Checks if the buffer contains the specified frame instance.
|
|
46
|
-
*
|
|
47
|
-
* @param frame The VideoFrame instance to check for
|
|
48
|
-
* @returns true if the frame is in the buffer, false otherwise
|
|
49
|
-
*/
|
|
50
|
-
includes(frame: VideoFrame): boolean;
|
|
51
|
-
/**
|
|
52
|
-
* Removes all frames from the buffer and closes them to free memory.
|
|
53
|
-
* This method is safe to call even if some frames are already closed.
|
|
54
|
-
*/
|
|
55
|
-
clear(): void;
|
|
56
|
-
/**
|
|
57
|
-
* Prunes the buffer to the maximum size by removing and closing the oldest frames.
|
|
58
|
-
* This maintains the LRU (Least Recently Used) eviction policy where oldest
|
|
59
|
-
* frames are removed first.
|
|
60
|
-
*/
|
|
61
|
-
pruneToSize(): void;
|
|
62
|
-
}
|
package/dist/FrameBuffer.js
DELETED
|
@@ -1,89 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* FrameBuffer manages a collection of decoded VideoFrames with automatic LRU eviction.
|
|
3
|
-
*
|
|
4
|
-
* This class extracts the frame buffering logic from VideoAsset to improve
|
|
5
|
-
* testability and separation of concerns while maintaining the exact same
|
|
6
|
-
* memory management behavior.
|
|
7
|
-
*/
|
|
8
|
-
var FrameBuffer = class {
|
|
9
|
-
/**
|
|
10
|
-
* Creates a new FrameBuffer with the specified maximum size.
|
|
11
|
-
*
|
|
12
|
-
* @param maxSize Maximum number of frames to keep in buffer (default: 30)
|
|
13
|
-
*/
|
|
14
|
-
constructor(maxSize = 30) {
|
|
15
|
-
this._frames = [];
|
|
16
|
-
this._maxSize = maxSize;
|
|
17
|
-
}
|
|
18
|
-
/**
|
|
19
|
-
* Gets the maximum size of the buffer.
|
|
20
|
-
*/
|
|
21
|
-
get maxSize() {
|
|
22
|
-
return this._maxSize;
|
|
23
|
-
}
|
|
24
|
-
/**
|
|
25
|
-
* Gets the current number of frames in the buffer.
|
|
26
|
-
*/
|
|
27
|
-
get size() {
|
|
28
|
-
return this._frames.length;
|
|
29
|
-
}
|
|
30
|
-
/**
|
|
31
|
-
* Gets a copy of the frames array for compatibility.
|
|
32
|
-
* Note: Returns a shallow copy to prevent external modification while allowing access.
|
|
33
|
-
*/
|
|
34
|
-
get frames() {
|
|
35
|
-
return [...this._frames];
|
|
36
|
-
}
|
|
37
|
-
/**
|
|
38
|
-
* Adds a frame to the buffer. If the buffer exceeds maxSize after adding,
|
|
39
|
-
* the oldest frames will be automatically pruned and closed.
|
|
40
|
-
*
|
|
41
|
-
* @param frame The VideoFrame to add to the buffer
|
|
42
|
-
*/
|
|
43
|
-
add(frame) {
|
|
44
|
-
this._frames.push(frame);
|
|
45
|
-
this.pruneToSize();
|
|
46
|
-
}
|
|
47
|
-
/**
|
|
48
|
-
* Finds a frame in the buffer by its timestamp.
|
|
49
|
-
*
|
|
50
|
-
* @param timestamp The timestamp to search for
|
|
51
|
-
* @returns The VideoFrame with matching timestamp, or undefined if not found
|
|
52
|
-
*/
|
|
53
|
-
findByTimestamp(timestamp) {
|
|
54
|
-
return this._frames.find((frame) => frame.timestamp === timestamp);
|
|
55
|
-
}
|
|
56
|
-
/**
|
|
57
|
-
* Checks if the buffer contains the specified frame instance.
|
|
58
|
-
*
|
|
59
|
-
* @param frame The VideoFrame instance to check for
|
|
60
|
-
* @returns true if the frame is in the buffer, false otherwise
|
|
61
|
-
*/
|
|
62
|
-
includes(frame) {
|
|
63
|
-
return this._frames.includes(frame);
|
|
64
|
-
}
|
|
65
|
-
/**
|
|
66
|
-
* Removes all frames from the buffer and closes them to free memory.
|
|
67
|
-
* This method is safe to call even if some frames are already closed.
|
|
68
|
-
*/
|
|
69
|
-
clear() {
|
|
70
|
-
for (const frame of this._frames) try {
|
|
71
|
-
frame.close();
|
|
72
|
-
} catch (error) {}
|
|
73
|
-
this._frames = [];
|
|
74
|
-
}
|
|
75
|
-
/**
|
|
76
|
-
* Prunes the buffer to the maximum size by removing and closing the oldest frames.
|
|
77
|
-
* This maintains the LRU (Least Recently Used) eviction policy where oldest
|
|
78
|
-
* frames are removed first.
|
|
79
|
-
*/
|
|
80
|
-
pruneToSize() {
|
|
81
|
-
while (this._frames.length > this._maxSize) {
|
|
82
|
-
const oldestFrame = this._frames.shift();
|
|
83
|
-
if (oldestFrame) try {
|
|
84
|
-
oldestFrame.close();
|
|
85
|
-
} catch (error) {}
|
|
86
|
-
}
|
|
87
|
-
}
|
|
88
|
-
};
|
|
89
|
-
export { FrameBuffer };
|
package/dist/MP4File.d.ts
DELETED
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
import * as MP4Box from "mp4box";
|
|
2
|
-
export interface MP4FileOptions {
|
|
3
|
-
readyTimeoutMs?: number;
|
|
4
|
-
sampleWaitTimeoutMs?: number;
|
|
5
|
-
}
|
|
6
|
-
export declare class MP4File extends MP4Box.ISOFile {
|
|
7
|
-
private timeoutId?;
|
|
8
|
-
private readonly readyTimeoutMs;
|
|
9
|
-
private readonly sampleWaitTimeoutMs;
|
|
10
|
-
readonly readyPromise: Promise<void>;
|
|
11
|
-
constructor(options?: MP4FileOptions);
|
|
12
|
-
setSegmentOptions(id: number, user: any, options: MP4Box.SegmentOptions): void;
|
|
13
|
-
/**
|
|
14
|
-
* Fragments all tracks in a file into separate array buffers.
|
|
15
|
-
*/
|
|
16
|
-
fragmentAllTracks(): Promise<Record<number, ArrayBuffer[]>>;
|
|
17
|
-
fragmentIterator(): AsyncGenerator<{
|
|
18
|
-
track: number;
|
|
19
|
-
segment: "init";
|
|
20
|
-
data: ArrayBuffer;
|
|
21
|
-
cts?: undefined;
|
|
22
|
-
dts?: undefined;
|
|
23
|
-
duration?: undefined;
|
|
24
|
-
} | {
|
|
25
|
-
track: number;
|
|
26
|
-
segment: number;
|
|
27
|
-
data: ArrayBuffer;
|
|
28
|
-
cts: number;
|
|
29
|
-
dts: number;
|
|
30
|
-
duration: number;
|
|
31
|
-
}, void, unknown>;
|
|
32
|
-
waitingForSamples: Array<(last: boolean) => void>;
|
|
33
|
-
_hasSeenLastSamples: boolean;
|
|
34
|
-
waitForMoreSamples(): Promise<boolean>;
|
|
35
|
-
processSamples(last: boolean): void;
|
|
36
|
-
_arrayBufferFileStart: number;
|
|
37
|
-
}
|