@lumen5/beamcoder 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.circleci/config.yml +41 -0
- package/.circleci/images/testbeam10-4.1/Dockerfile +12 -0
- package/.circleci/test_image/Dockerfile +14 -0
- package/.circleci/test_image/build.md +13 -0
- package/.eslintrc.js +27 -0
- package/.github/workflows/publish-npm.yml +33 -0
- package/LICENSE +674 -0
- package/README.md +1221 -0
- package/beamstreams.js +692 -0
- package/binding.gyp +103 -0
- package/examples/encode_h264.js +92 -0
- package/examples/jpeg_app.js +55 -0
- package/examples/jpeg_filter_app.js +101 -0
- package/examples/make_mp4.js +123 -0
- package/images/beamcoder_small.jpg +0 -0
- package/index.d.ts +83 -0
- package/index.js +44 -0
- package/install_ffmpeg.js +240 -0
- package/package.json +45 -0
- package/scratch/decode_aac.js +38 -0
- package/scratch/decode_avci.js +50 -0
- package/scratch/decode_hevc.js +38 -0
- package/scratch/decode_pcm.js +39 -0
- package/scratch/make_a_mux.js +68 -0
- package/scratch/muxer.js +74 -0
- package/scratch/read_wav.js +35 -0
- package/scratch/simple_mux.js +39 -0
- package/scratch/stream_avci.js +127 -0
- package/scratch/stream_mp4.js +78 -0
- package/scratch/stream_mux.js +47 -0
- package/scratch/stream_pcm.js +82 -0
- package/scratch/stream_wav.js +62 -0
- package/scripts/install_beamcoder_dependencies.sh +25 -0
- package/src/adaptor.h +202 -0
- package/src/beamcoder.cc +937 -0
- package/src/beamcoder_util.cc +1129 -0
- package/src/beamcoder_util.h +206 -0
- package/src/codec.cc +7386 -0
- package/src/codec.h +44 -0
- package/src/codec_par.cc +1818 -0
- package/src/codec_par.h +40 -0
- package/src/decode.cc +569 -0
- package/src/decode.h +75 -0
- package/src/demux.cc +584 -0
- package/src/demux.h +88 -0
- package/src/encode.cc +496 -0
- package/src/encode.h +72 -0
- package/src/filter.cc +1888 -0
- package/src/filter.h +30 -0
- package/src/format.cc +5287 -0
- package/src/format.h +77 -0
- package/src/frame.cc +2681 -0
- package/src/frame.h +52 -0
- package/src/governor.cc +286 -0
- package/src/governor.h +30 -0
- package/src/hwcontext.cc +378 -0
- package/src/hwcontext.h +35 -0
- package/src/log.cc +186 -0
- package/src/log.h +20 -0
- package/src/mux.cc +834 -0
- package/src/mux.h +106 -0
- package/src/packet.cc +762 -0
- package/src/packet.h +49 -0
- package/test/codecParamsSpec.js +148 -0
- package/test/decoderSpec.js +56 -0
- package/test/demuxerSpec.js +41 -0
- package/test/encoderSpec.js +69 -0
- package/test/filtererSpec.js +47 -0
- package/test/formatSpec.js +343 -0
- package/test/frameSpec.js +145 -0
- package/test/introspectionSpec.js +73 -0
- package/test/muxerSpec.js +34 -0
- package/test/packetSpec.js +122 -0
- package/types/Beamstreams.d.ts +98 -0
- package/types/Codec.d.ts +123 -0
- package/types/CodecContext.d.ts +555 -0
- package/types/CodecPar.d.ts +108 -0
- package/types/Decoder.d.ts +137 -0
- package/types/Demuxer.d.ts +113 -0
- package/types/Encoder.d.ts +94 -0
- package/types/Filter.d.ts +324 -0
- package/types/FormatContext.d.ts +380 -0
- package/types/Frame.d.ts +295 -0
- package/types/HWContext.d.ts +62 -0
- package/types/Muxer.d.ts +121 -0
- package/types/Packet.d.ts +82 -0
- package/types/PrivClass.d.ts +25 -0
- package/types/Stream.d.ts +165 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { CodecPar } from "./CodecPar"
|
|
2
|
+
import { Packet } from "./Packet"
|
|
3
|
+
import { Frame } from "./Frame"
|
|
4
|
+
import { Codec } from "./Codec"
|
|
5
|
+
import { CodecContext } from "./CodecContext"
|
|
6
|
+
import { Demuxer } from "./Demuxer"
|
|
7
|
+
|
|
8
|
+
/** The DecodedFrames object is returned as the result of a decode operation */
|
|
9
|
+
export interface DecodedFrames {
|
|
10
|
+
/** Object name. */
|
|
11
|
+
readonly type: 'frames'
|
|
12
|
+
/**
|
|
13
|
+
* Decoded frames that are now available. If the array is empty, the decoder has buffered
|
|
14
|
+
* the packet as part of the process of producing future frames
|
|
15
|
+
*/
|
|
16
|
+
readonly frames: Array<Frame>
|
|
17
|
+
/** Total time in microseconds that the decode operation took to complete */
|
|
18
|
+
readonly total_time: number
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface Decoder extends Omit<CodecContext,
|
|
22
|
+
'bit_rate_tolerance' | 'global_quality' | 'compression_level' |
|
|
23
|
+
'max_b_frames' | 'b_quant_factor' | 'b_quant_offset' |
|
|
24
|
+
'i_quant_factor' | 'i_quant_offset' | 'lumi_masking' |
|
|
25
|
+
'temporal_cplx_masking' | 'spatial_cplx_masking' | 'p_masking' | 'dark_masking' |
|
|
26
|
+
'me_cmp' | 'me_sub_cmp' | 'mb_cmp' | 'ildct_cmp' | 'dia_size' | 'last_predictor_count' |
|
|
27
|
+
'mb_pre_cmp' | 'pre_dia_size' | 'me_subpel_quality' | 'me_range' |
|
|
28
|
+
'mb_decision' | 'mb_lmin' | 'mb_lmax' | 'bidir_refine' | 'keyint_min' |
|
|
29
|
+
'mv0_threshold' | 'slices' | 'block_align' | 'audio_service_type' |
|
|
30
|
+
'qcompress' | 'qblur' | 'qmin' | 'qmax' | 'max_qdiff' | 'rc_buffer_size' | 'rc_override' |
|
|
31
|
+
'rc_min_rate' | 'rc_max_available_vbv_use' | 'rc_min_vbv_overflow_use' |
|
|
32
|
+
'rc_initial_buffer_occupancy' | 'trellis' | 'stats_out' | 'stats_in' | 'error' | 'dct_algo' |
|
|
33
|
+
'nsse_weight' | 'initial_padding' | 'seek_preroll' | 'chroma_intra_matrix' |'coded_side_data'
|
|
34
|
+
> {
|
|
35
|
+
readonly type: 'decoder'
|
|
36
|
+
readonly time_base: Array<number>
|
|
37
|
+
readonly sample_aspect_ratio: Array<number>
|
|
38
|
+
readonly intra_matrix: Array<number> | null
|
|
39
|
+
readonly inter_matrix: Array<number> | null
|
|
40
|
+
readonly intra_dc_precision: number
|
|
41
|
+
readonly refs: number
|
|
42
|
+
readonly color_primaries?: string
|
|
43
|
+
readonly color_trc: string
|
|
44
|
+
readonly colorspace: string
|
|
45
|
+
readonly color_range: string
|
|
46
|
+
readonly chroma_sample_location: 'unspecified' | 'left' | 'center' | 'topleft' | 'top' | 'bottomleft' | 'bottom'
|
|
47
|
+
readonly field_order: 'progressive' |
|
|
48
|
+
'top coded first, top displayed first' |
|
|
49
|
+
'bottom coded first, bottom displayed first' |
|
|
50
|
+
'top coded first, bottom displayed first' |
|
|
51
|
+
'bottom coded first, top displayed first' |
|
|
52
|
+
'unknown'
|
|
53
|
+
readonly sample_fmt: string | null
|
|
54
|
+
readonly audio_service_type: 'main' | 'effects' | 'visually-impaired' | 'hearing-impaired' | 'dialogue' |
|
|
55
|
+
'commentary' | 'emergency' | 'voice-over' | 'karaoke' | 'nb'
|
|
56
|
+
readonly bits_per_raw_sample: number
|
|
57
|
+
readonly profile: string | number
|
|
58
|
+
readonly level: number
|
|
59
|
+
readonly subtitle_header: Buffer | null
|
|
60
|
+
readonly framerate: Array<number>
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Decode an encoded data packet or array of packets and create an uncompressed frame
|
|
64
|
+
* or frames (may be a frames-worth of audio).
|
|
65
|
+
* Decoders may need more than one packet to produce a frame and may subsequently
|
|
66
|
+
* produce more than one frame per packet. This is particularly the case for long-GOP formats.
|
|
67
|
+
* @param packet A packet or an array of packets to be decoded
|
|
68
|
+
* @returns a promise that resolves to a DecodedFrames object when the decode has completed successfully
|
|
69
|
+
*/
|
|
70
|
+
decode(packet: Packet | Packet[]): Promise<DecodedFrames>
|
|
71
|
+
/**
|
|
72
|
+
* Decode a number of packets passed as separate parameters and create uncompressed frames
|
|
73
|
+
* (may be a frames-worth of audio).
|
|
74
|
+
* Decoders may need more than one packet to produce a frame and may subsequently
|
|
75
|
+
* produce more than one frame per packet. This is particularly the case for long-GOP formats.
|
|
76
|
+
* @param packets An arbitrary number of packets to be decoded
|
|
77
|
+
* @returns a promise that resolves to a DecodedFrames object when the decode has completed successfully
|
|
78
|
+
*/
|
|
79
|
+
decode(...packets: Packet[]): Promise<DecodedFrames>
|
|
80
|
+
/**
|
|
81
|
+
* Once all packets have been passed to the decoder, it is necessary to call its
|
|
82
|
+
* asynchronous flush() method. If any frames are yet to be delivered by the decoder
|
|
83
|
+
* they will be provided in the resolved value.
|
|
84
|
+
*
|
|
85
|
+
* Call the flush operation once and do not use the decoder for further decoding once it has
|
|
86
|
+
* been flushed. The resources held by the decoder will be cleaned up as part of the Javascript
|
|
87
|
+
* garbage collection process, so make sure that the reference to the decoder goes out of scope.
|
|
88
|
+
* @returns a promise that resolves to a DecodedFrames object when the flush has completed successfully
|
|
89
|
+
*/
|
|
90
|
+
flush(): Promise<DecodedFrames>
|
|
91
|
+
/**
|
|
92
|
+
* Extract the CodecPar object for the Decoder
|
|
93
|
+
* @returns A CodecPar object
|
|
94
|
+
*/
|
|
95
|
+
extractParams(): any
|
|
96
|
+
/**
|
|
97
|
+
* Initialise the decoder with parameters from a CodecPar object
|
|
98
|
+
* @param param The CodecPar object that is to be used to override the current Decoder parameters
|
|
99
|
+
* @returns the modified Decoder object
|
|
100
|
+
*/
|
|
101
|
+
useParams(params: CodecPar): Decoder
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Provides a list and details of all the available decoders
|
|
106
|
+
* @returns an object with name and details of each of the available decoders
|
|
107
|
+
*/
|
|
108
|
+
export function decoders(): { [key: string]: Codec }
|
|
109
|
+
/**
|
|
110
|
+
* Create a decoder by name
|
|
111
|
+
* @param name The codec name required
|
|
112
|
+
* @param ... Any non-readonly parameters from the Decoder object as required
|
|
113
|
+
* @returns A Decoder object - note creation is synchronous
|
|
114
|
+
*/
|
|
115
|
+
export function decoder(options: { name: string, [key: string]: any }): Decoder
|
|
116
|
+
/**
|
|
117
|
+
* Create a decoder by codec_id
|
|
118
|
+
* @param codec_id The codec ID from AV_CODEC_ID_xxx
|
|
119
|
+
* @param ... Any non-readonly parameters from the Decoder object as required
|
|
120
|
+
* @returns A Decoder object - note creation is synchronous
|
|
121
|
+
*/
|
|
122
|
+
export function decoder(options: { codec_id: number, [key: string]: any }): Decoder
|
|
123
|
+
/**
|
|
124
|
+
* Create a decoder from a demuxer and a stream_index
|
|
125
|
+
* @param demuxer An initialised Demuxer object
|
|
126
|
+
* @param stream_index The stream number of the demuxer object to be used to initialise the decoder
|
|
127
|
+
* @param ... Any non-readonly parameters from the Decoder object as required
|
|
128
|
+
* @returns A Decoder object - note creation is synchronous
|
|
129
|
+
*/
|
|
130
|
+
export function decoder(options: { demuxer: Demuxer, stream_index: number, [key: string]: any }): Decoder
|
|
131
|
+
/**
|
|
132
|
+
* Create a decoder from a CodecPar object
|
|
133
|
+
* @param params CodecPar object whose codec name or id will be used to initialise the decoder
|
|
134
|
+
* @param ... Any non-readonly parameters from the Decoder object as required
|
|
135
|
+
* @returns A Decoder object - note creation is synchronous
|
|
136
|
+
*/
|
|
137
|
+
export function decoder(options: { params: CodecPar, [key: string]: any }): Decoder
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import { Packet } from "./Packet"
|
|
2
|
+
import { InputFormat, FormatContext } from "./FormatContext"
|
|
3
|
+
|
|
4
|
+
export interface SeekOptions {
|
|
5
|
+
/**
|
|
6
|
+
* The stream where to seek
|
|
7
|
+
* Use in conjunction with property frame or timestamp
|
|
8
|
+
*/
|
|
9
|
+
stream_index?: number
|
|
10
|
+
/**
|
|
11
|
+
* Seek by the number of frames into a given stream
|
|
12
|
+
* Use in conjunction with stream_index
|
|
13
|
+
*/
|
|
14
|
+
frame?: number
|
|
15
|
+
/**
|
|
16
|
+
* Seek forward to a keyframe in a given stream or file at a given timestamp
|
|
17
|
+
* The timestamp is the presentation timestamp of the packet measured in the timebase of the stream
|
|
18
|
+
* Use in conjunction with stream_index
|
|
19
|
+
*/
|
|
20
|
+
timestamp?: number
|
|
21
|
+
/**
|
|
22
|
+
* seek based on elapsed time from the beginning of the primary stream
|
|
23
|
+
* (as determined by FFmpeg, normally the first video stream where available)
|
|
24
|
+
*/
|
|
25
|
+
time?: number
|
|
26
|
+
/**
|
|
27
|
+
* byte offset position into the file
|
|
28
|
+
*/
|
|
29
|
+
pos?: number
|
|
30
|
+
/**
|
|
31
|
+
* The backward Boolean-valued property is interpreted as:
|
|
32
|
+
* true: find the nearest key frame before the timestamp
|
|
33
|
+
* false: find the nearest keyframe after the timestamp
|
|
34
|
+
*/
|
|
35
|
+
backward?: boolean
|
|
36
|
+
/**
|
|
37
|
+
* The any Boolean-valued property enables seeking to both key and non-key frames
|
|
38
|
+
*/
|
|
39
|
+
any?: boolean
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* The process of demuxing (de-multiplexing) extracts time-labelled packets of data
|
|
44
|
+
* contained in a media stream or file.
|
|
45
|
+
*/
|
|
46
|
+
export interface Demuxer extends Omit<FormatContext,
|
|
47
|
+
'oformat' | 'max_interleave_delta' | 'avoid_negative_ts' | 'audio_preload' |
|
|
48
|
+
'max_chunk_duration' | 'max_chunk_size' | 'flush_packets' | 'metadata_header_padding'
|
|
49
|
+
> {
|
|
50
|
+
/** Object name. */
|
|
51
|
+
readonly type: 'demuxer'
|
|
52
|
+
readonly iformat: InputFormat
|
|
53
|
+
readonly url: string
|
|
54
|
+
readonly duration: number
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Beam coder offers FFmpeg's many options for seeking a particular frame in a file,
|
|
58
|
+
* either by time reference, frame count or file position.
|
|
59
|
+
* https://github.com/Streampunk/beamcoder#seeking
|
|
60
|
+
* @param options an object that specifies details on how the seek is to be calculated.
|
|
61
|
+
* @returns a promise that resolves when the seek has completed
|
|
62
|
+
*/
|
|
63
|
+
seek(options: SeekOptions): Promise<null>
|
|
64
|
+
/**
|
|
65
|
+
* Read the next blob of data from the file or stream at the current position,
|
|
66
|
+
* where that data could be from any of the streams.
|
|
67
|
+
* Typically, a packet is one frame of video data or a data blob representing
|
|
68
|
+
* a codec-dependent number of audio samples.
|
|
69
|
+
* Use the stream_index property of returned packet to find out which stream it is
|
|
70
|
+
* associated with and dimensions including height, width or audio sample rate.
|
|
71
|
+
* https://github.com/Streampunk/beamcoder#reading-data-packets
|
|
72
|
+
* @returns a promise that resolves to a Packet when the read has completed
|
|
73
|
+
*/
|
|
74
|
+
read(): Promise<Packet>
|
|
75
|
+
/**
|
|
76
|
+
* Abandon the demuxing process and forcibly close the file or stream without waiting for it to finish
|
|
77
|
+
*/
|
|
78
|
+
forceClose(): undefined
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Provides a list and details of all the available demuxer input formats
|
|
83
|
+
* @returns an object with details of all the available demuxer input formats
|
|
84
|
+
*/
|
|
85
|
+
export function demuxers(): { [key: string]: InputFormat }
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Create a demuxer to read from a URL or filename
|
|
89
|
+
* @param url a string describing the source to be read from (may contain %d for a sequence of numbered files).
|
|
90
|
+
* @returns a promise that resolves to a Demuxer when it has determined sufficient
|
|
91
|
+
* format details by consuming data from the source. The promise will wait indefinitely
|
|
92
|
+
* until sufficient source data has been read.
|
|
93
|
+
*/
|
|
94
|
+
export function demuxer(url: string): Promise<Demuxer>
|
|
95
|
+
|
|
96
|
+
/** Object to provide additional metadata on Demuxer creation */
|
|
97
|
+
export interface DemuxerCreateOptions {
|
|
98
|
+
/** String describing the source to be read from (may contain %d for a sequence of numbered files). */
|
|
99
|
+
url?: string
|
|
100
|
+
/** Object that provides format details */
|
|
101
|
+
iformat?: InputFormat
|
|
102
|
+
/** Object allowing additional information to be provided */
|
|
103
|
+
options?: { [key: string]: any }
|
|
104
|
+
}
|
|
105
|
+
/**
|
|
106
|
+
* For formats that require additional metadata, such as the rawvideo format,
|
|
107
|
+
* it may be necessary to pass additional information such as image size or pixel format to Demuxer creation.
|
|
108
|
+
* @param options a DemuxerCreateOptions object
|
|
109
|
+
* @returns a promise that resolves to a Demuxer when it has determined sufficient
|
|
110
|
+
* format details by consuming data from the source. The promise will wait indefinitely
|
|
111
|
+
* until sufficient source data has been read.
|
|
112
|
+
*/
|
|
113
|
+
export function demuxer(options: DemuxerCreateOptions): Promise<Demuxer>
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { CodecPar } from "./CodecPar"
|
|
2
|
+
import { Packet } from "./Packet";
|
|
3
|
+
import { Frame } from "./Frame";
|
|
4
|
+
import { Codec } from "./Codec"
|
|
5
|
+
import { CodecContext } from "./CodecContext"
|
|
6
|
+
|
|
7
|
+
/** The EncodedPackets object is returned as the result of a encode operation */
|
|
8
|
+
export interface EncodedPackets {
|
|
9
|
+
/** Object name. */
|
|
10
|
+
readonly type: 'packets'
|
|
11
|
+
/**
|
|
12
|
+
* Encoded packets that are now available. If the array is empty, the encoder has buffered
|
|
13
|
+
* the frame as part of the process of producing future packets
|
|
14
|
+
*/
|
|
15
|
+
readonly packets: Array<Packet>
|
|
16
|
+
/** Total time in microseconds that the encode operation took to complete */
|
|
17
|
+
readonly total_time: number
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Encoder takes a stream of uncompressed data in the form of Frames and converts them into coded Packets.
|
|
21
|
+
* Encoding takes place on a single type of stream, for example audio or video.
|
|
22
|
+
*/
|
|
23
|
+
export interface Encoder extends Omit<CodecContext,
|
|
24
|
+
'coded_width' | 'coded_height' | 'slice_flags' | 'skip_top' | 'skip_bottom' |
|
|
25
|
+
'request_channel_layout' | 'request_sample_fmt' | 'error_concealment' | 'err_recognition' |
|
|
26
|
+
'reordered_opaque' | 'skip_loop_filter' | 'skip_idct' | 'skip_frame' | 'sw_pix_fmt' |
|
|
27
|
+
'pkt_timebase' | 'codec_descriptor' | 'sub_charenc' | 'sub_charenc_mode' | 'skip_alpha' |
|
|
28
|
+
'codec_whitelist' | 'properties' | 'sub_text_format' | 'hwaccel_flags' | 'apply_cropping' | 'extra_hw_frames'
|
|
29
|
+
> {
|
|
30
|
+
readonly type: 'encoder'
|
|
31
|
+
readonly extradata: Buffer | null
|
|
32
|
+
readonly slice_count: number
|
|
33
|
+
readonly slice_offset: Array<number> | null
|
|
34
|
+
readonly bits_per_coded_sample: number
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Encode a Frame or array of Frames and create a compressed Packet or Packets.
|
|
38
|
+
* Encoders may need more than one Frame to produce a Packet and may subsequently
|
|
39
|
+
* produce more than one Packet per Frame. This is particularly the case for long-GOP formats.
|
|
40
|
+
* @param frame A Frame or an array of Frames to be encoded
|
|
41
|
+
* @returns a promise that resolves to a EncodedPackets object when the encode has completed successfully
|
|
42
|
+
*/
|
|
43
|
+
encode(frame: Frame | Frame[]): Promise<EncodedPackets>
|
|
44
|
+
/**
|
|
45
|
+
* Encode a number of Frames passed as separate parameters and create compressed Packets
|
|
46
|
+
* Encoders may need more than one Frame to produce a Packet and may subsequently
|
|
47
|
+
* produce more than one Packet per Frame. This is particularly the case for long-GOP formats.
|
|
48
|
+
* @param frames An arbitrary number of Frames to be encoded
|
|
49
|
+
* @returns a promise that resolves to a EncodedPackets object when the encode has completed successfully
|
|
50
|
+
*/
|
|
51
|
+
encode(...frames: Frame[]): Promise<EncodedPackets>
|
|
52
|
+
/**
|
|
53
|
+
* Once all Frames have been passed to the encoder, it is necessary to call its
|
|
54
|
+
* asynchronous flush() method. If any Packets are yet to be delivered by the encoder
|
|
55
|
+
* they will be provided in the resolved value.
|
|
56
|
+
*
|
|
57
|
+
* Call the flush operation once and do not use the encoder for further encoding once it has
|
|
58
|
+
* been flushed. The resources held by the encoder will be cleaned up as part of the Javascript
|
|
59
|
+
* garbage collection process, so make sure that the reference to the encoder goes out of scope.
|
|
60
|
+
* @returns a promise that resolves to a EncodedPackets object when the flush has completed successfully
|
|
61
|
+
*/
|
|
62
|
+
flush(): Promise<EncodedPackets>
|
|
63
|
+
/**
|
|
64
|
+
* Extract the CodecPar object for the Encoder
|
|
65
|
+
* @returns A CodecPar object
|
|
66
|
+
*/
|
|
67
|
+
extractParams(): any
|
|
68
|
+
/**
|
|
69
|
+
* Initialise the encoder with parameters from a CodecPar object
|
|
70
|
+
* @param param The CodecPar object that is to be used to override the current Encoder parameters
|
|
71
|
+
* @returns the modified Encoder object
|
|
72
|
+
*/
|
|
73
|
+
useParams(params: CodecPar): Encoder
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Provides a list and details of all the available encoders
|
|
78
|
+
* @returns an object with name and details of each of the available encoders
|
|
79
|
+
*/
|
|
80
|
+
export function encoders(): { [key: string]: Codec }
|
|
81
|
+
/**
|
|
82
|
+
* Create an encoder by name
|
|
83
|
+
* @param name The codec name required
|
|
84
|
+
* @param ... Any non-readonly parameters from the Encoder object as required
|
|
85
|
+
* @returns An Encoder object - note creation is synchronous
|
|
86
|
+
*/
|
|
87
|
+
export function encoder(options: { name: string, [key: string]: any }): Encoder
|
|
88
|
+
/**
|
|
89
|
+
* Create an encoder by codec_id
|
|
90
|
+
* @param codec_id The codec ID from AV_CODEC_ID_xxx
|
|
91
|
+
* @param ... Any non-readonly parameters from the Encoder object as required
|
|
92
|
+
* @returns An Encoder object - note creation is synchronous
|
|
93
|
+
*/
|
|
94
|
+
export function encoder(options: { codec_id: number, [key: string]: any }): Encoder
|
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
import { Frame } from "./Frame"
|
|
2
|
+
import { PrivClass } from "./PrivClass"
|
|
3
|
+
import { HWDeviceContext } from "./HWContext";
|
|
4
|
+
|
|
5
|
+
export interface Filter {
|
|
6
|
+
readonly type: 'Filter'
|
|
7
|
+
/** Filter name. Must be non-NULL and unique among filters. */
|
|
8
|
+
readonly name: string
|
|
9
|
+
/** A description of the filter. May be NULL. */
|
|
10
|
+
readonly description: string
|
|
11
|
+
/**
|
|
12
|
+
* List of inputs.
|
|
13
|
+
*
|
|
14
|
+
* NULL if there are no (static) inputs. Instances of filters with
|
|
15
|
+
* AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in
|
|
16
|
+
* this list.
|
|
17
|
+
*/
|
|
18
|
+
readonly inputs: ReadonlyArray<FilterPad>
|
|
19
|
+
/**
|
|
20
|
+
* List of outputs.
|
|
21
|
+
*
|
|
22
|
+
* NULL if there are no (static) outputs. Instances of filters with
|
|
23
|
+
* AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in
|
|
24
|
+
* this list.
|
|
25
|
+
*/
|
|
26
|
+
readonly outputs: ReadonlyArray<FilterPad>
|
|
27
|
+
/**
|
|
28
|
+
* A class for the private data, used to declare filter private AVOptions.
|
|
29
|
+
* This field is NULL for filters that do not declare any options.
|
|
30
|
+
*/
|
|
31
|
+
readonly priv_class: PrivClass | null
|
|
32
|
+
/** A combination of AVFILTER_FLAG_* */
|
|
33
|
+
readonly flags: {
|
|
34
|
+
/**
|
|
35
|
+
* The number of the filter inputs is not determined just by AVFilter.inputs.
|
|
36
|
+
* The filter might add additional inputs during initialization depending on the
|
|
37
|
+
* options supplied to it.
|
|
38
|
+
*/
|
|
39
|
+
DYNAMIC_INPUTS: boolean
|
|
40
|
+
/**
|
|
41
|
+
* The number of the filter outputs is not determined just by AVFilter.outputs.
|
|
42
|
+
* The filter might add additional outputs during initialization depending on
|
|
43
|
+
* the options supplied to it.
|
|
44
|
+
*/
|
|
45
|
+
DYNAMIC_OUTPUTS: boolean
|
|
46
|
+
/**
|
|
47
|
+
* The filter supports multithreading by splitting frames into multiple parts and
|
|
48
|
+
* processing them concurrently.
|
|
49
|
+
*/
|
|
50
|
+
SLICE_THREADS: boolean
|
|
51
|
+
/**
|
|
52
|
+
* Some filters support a generic "enable" expression option that can be used
|
|
53
|
+
* to enable or disable a filter in the timeline. Filters supporting this
|
|
54
|
+
* option have this flag set. When the enable expression is false, the default
|
|
55
|
+
* no-op filter_frame() function is called in place of the filter_frame()
|
|
56
|
+
* callback defined on each input pad, thus the frame is passed unchanged to
|
|
57
|
+
* the next filters.
|
|
58
|
+
*/
|
|
59
|
+
SUPPORT_TIMELINE_GENERIC: boolean
|
|
60
|
+
/**
|
|
61
|
+
* Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
|
|
62
|
+
* have its filter_frame() callback(s) called as usual even when the enable
|
|
63
|
+
* expression is false. The filter will disable filtering within the
|
|
64
|
+
* filter_frame() callback(s) itself, for example executing code depending on
|
|
65
|
+
* the AVFilterContext->is_disabled value.
|
|
66
|
+
*/
|
|
67
|
+
SUPPORT_TIMELINE_INTERNAL: boolean
|
|
68
|
+
/**
|
|
69
|
+
* Handy mask to test whether the filter supports or no the timeline feature
|
|
70
|
+
* (internally or generically).
|
|
71
|
+
*/
|
|
72
|
+
SUPPORT_TIMELINE: boolean
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
export type MediaType = 'unknown' | 'video' | 'audio' | 'data' | 'subtitle' | 'attachment' | 'nb'
|
|
77
|
+
|
|
78
|
+
export interface FilterPad {
|
|
79
|
+
name: string
|
|
80
|
+
media_type: MediaType
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
export interface FilterLink {
|
|
84
|
+
/** source filter name */
|
|
85
|
+
readonly src: string
|
|
86
|
+
/** output pad on the source filter */
|
|
87
|
+
readonly srcpad: string
|
|
88
|
+
/** dest filter name */
|
|
89
|
+
readonly dst: string
|
|
90
|
+
/** input pad on the dest filter */
|
|
91
|
+
readonly dstpad: string
|
|
92
|
+
/** filter media type */
|
|
93
|
+
readonly type: MediaType
|
|
94
|
+
/** video only - agreed upon image width */
|
|
95
|
+
readonly w?: number
|
|
96
|
+
/** video only - agreed upon image height */
|
|
97
|
+
readonly h?: number
|
|
98
|
+
/** video only - agreed upon sample aspect ratio */
|
|
99
|
+
readonly sample_aspect_ratio?: ReadonlyArray<number>
|
|
100
|
+
/** audio only - number of channels in the channel layout. */
|
|
101
|
+
readonly channel_count?: number
|
|
102
|
+
/** audio only - channel layout of current buffer */
|
|
103
|
+
readonly channel_layout?: string
|
|
104
|
+
/** audio only - samples per second */
|
|
105
|
+
readonly sample_rate?: number
|
|
106
|
+
/** agreed upon media format */
|
|
107
|
+
readonly format: string
|
|
108
|
+
/**
|
|
109
|
+
* Define the time base used by the PTS of the frames/samples which will pass through this link.
|
|
110
|
+
* During the configuration stage, each filter is supposed to change only the output timebase,
|
|
111
|
+
* while the timebase of the input link is assumed to be an unchangeable property.
|
|
112
|
+
*/
|
|
113
|
+
readonly time_base: ReadonlyArray<number>
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
export interface FilterContext {
|
|
117
|
+
readonly type: 'FilterContext'
|
|
118
|
+
/** the AVFilter of which this is an instance */
|
|
119
|
+
readonly filter: Filter
|
|
120
|
+
/** name of this filter instance */
|
|
121
|
+
readonly name: string
|
|
122
|
+
/** array of input pads */
|
|
123
|
+
readonly input_pads: ReadonlyArray<FilterPad>
|
|
124
|
+
/** array of pointers to input links */
|
|
125
|
+
readonly inputs: ReadonlyArray<FilterLink> | null
|
|
126
|
+
/** array of output pads */
|
|
127
|
+
readonly output_pads: ReadonlyArray<FilterPad>
|
|
128
|
+
/** array of pointers to output links */
|
|
129
|
+
readonly outputs: ReadonlyArray<FilterLink> | null
|
|
130
|
+
/** private data for use by the filter */
|
|
131
|
+
priv: { [key: string]: any } | null
|
|
132
|
+
/**
|
|
133
|
+
* Type of multithreading being allowed/used. A combination of
|
|
134
|
+
* AVFILTER_THREAD_* flags.
|
|
135
|
+
*
|
|
136
|
+
* May be set by the caller before initializing the filter to forbid some
|
|
137
|
+
* or all kinds of multithreading for this filter. The default is allowing
|
|
138
|
+
* everything.
|
|
139
|
+
*
|
|
140
|
+
* When the filter is initialized, this field is combined using bit AND with
|
|
141
|
+
* AVFilterGraph.thread_type to get the final mask used for determining
|
|
142
|
+
* allowed threading types. I.e. a threading type needs to be set in both
|
|
143
|
+
* to be allowed.
|
|
144
|
+
*
|
|
145
|
+
* After the filter is initialized, libavfilter sets this field to the
|
|
146
|
+
* threading type that is actually used (0 for no multithreading).
|
|
147
|
+
*/
|
|
148
|
+
readonly thread_type: number
|
|
149
|
+
/**
|
|
150
|
+
* Max number of threads allowed in this filter instance.
|
|
151
|
+
* If <= 0, its value is ignored.
|
|
152
|
+
* Overrides global number of threads set per filter graph.
|
|
153
|
+
*/
|
|
154
|
+
readonly nb_threads: number
|
|
155
|
+
/**
|
|
156
|
+
* Ready status of the filter.
|
|
157
|
+
* A non-0 value means that the filter needs activating,
|
|
158
|
+
* a higher value suggests a more urgent activation.
|
|
159
|
+
*/
|
|
160
|
+
readonly ready: number
|
|
161
|
+
/**
|
|
162
|
+
* Sets the number of extra hardware frames which the filter will
|
|
163
|
+
* allocate on its output links for use in following filters or by
|
|
164
|
+
* the caller.
|
|
165
|
+
*
|
|
166
|
+
* Some hardware filters require all frames that they will use for
|
|
167
|
+
* output to be defined in advance before filtering starts. For such
|
|
168
|
+
* filters, any hardware frame pools used for output must therefore be
|
|
169
|
+
* of fixed size. The extra frames set here are on top of any number
|
|
170
|
+
* that the filter needs internally in order to operate normally.
|
|
171
|
+
*
|
|
172
|
+
* This field must be set before the graph containing this filter is
|
|
173
|
+
* configured.
|
|
174
|
+
*/
|
|
175
|
+
readonly extra_hw_frames: number
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
export interface FilterGraph {
|
|
179
|
+
readonly type: 'FilterGraph'
|
|
180
|
+
|
|
181
|
+
readonly filters: ReadonlyArray<FilterContext>
|
|
182
|
+
/** sws options to use for the auto-inserted scale filters */
|
|
183
|
+
readonly scale_sws_opts: string | null
|
|
184
|
+
/**
|
|
185
|
+
* Type of multithreading allowed for filters in this graph. A combination of AVFILTER_THREAD_* flags.
|
|
186
|
+
* May be set by the caller at any point, the setting will apply to all filters initialized after that.
|
|
187
|
+
* The default is allowing everything.
|
|
188
|
+
*
|
|
189
|
+
* When a filter in this graph is initialized, this field is combined using bit AND with
|
|
190
|
+
* AVFilterContext.thread_type to get the final mask used for determining allowed threading types.
|
|
191
|
+
* I.e. a threading type needs to be set in both to be allowed.
|
|
192
|
+
*/
|
|
193
|
+
readonly thread_type: number
|
|
194
|
+
/**
|
|
195
|
+
* Maximum number of threads used by filters in this graph. May be set by
|
|
196
|
+
* the caller before adding any filters to the filtergraph. Zero (the
|
|
197
|
+
* default) means that the number of threads is determined automatically.
|
|
198
|
+
*/
|
|
199
|
+
readonly nb_threads: number
|
|
200
|
+
/**
|
|
201
|
+
* Dump a graph into a human-readable string representation.
|
|
202
|
+
* @returns: String representation of the filter graph
|
|
203
|
+
*/
|
|
204
|
+
dump(): string
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
export interface FiltererResult {
|
|
208
|
+
/** Output pad name in the filterSpec string used in the filterer setup. */
|
|
209
|
+
readonly name: string
|
|
210
|
+
/** Array of output frames for the pad */
|
|
211
|
+
readonly frames: Array<Frame>
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
export interface Filterer {
|
|
215
|
+
readonly type: 'Filterer'
|
|
216
|
+
readonly graph: FilterGraph
|
|
217
|
+
|
|
218
|
+
/**
|
|
219
|
+
* Filter an array of frames
|
|
220
|
+
* For a filter that has only one input pass an array of frame objects directly
|
|
221
|
+
* and the filter input will have a default name applied.
|
|
222
|
+
* This name will match a filter specification that doesn't name its inputs.
|
|
223
|
+
* @param frames Array of Frame objects to be applied to the single input pad
|
|
224
|
+
* @returns Array of objects containing Frame arrays for each output pad of the filter
|
|
225
|
+
*/
|
|
226
|
+
filter(frames: Array<Frame>): Promise<Array<FiltererResult> & { total_time: number }>
|
|
227
|
+
/**
|
|
228
|
+
* Filter an array of frames
|
|
229
|
+
* Pass an array of objects, one per filter input, each with a name string property
|
|
230
|
+
* and a frames property that contains an array of frame objects
|
|
231
|
+
* The name must match the input name in the filter specification
|
|
232
|
+
* @param framesArr Array of objects with name and Frame array for each input pad
|
|
233
|
+
* @returns Array of objects containing Frame arrays for each output pad of the filter
|
|
234
|
+
*/
|
|
235
|
+
filter(framesArr: Array<{ name: string, frames: Array<Frame> }>): Promise<Array<FiltererResult> & { total_time: number }>
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
/**
|
|
239
|
+
* Provides a list and details of all the available filters
|
|
240
|
+
* @returns an object with name and details of each of the available filters
|
|
241
|
+
*/
|
|
242
|
+
export function filters(): { [key: string]: Filter }
|
|
243
|
+
|
|
244
|
+
/** List the available bitstream filters */
|
|
245
|
+
export function bsfs(): {
|
|
246
|
+
[key: string]: {
|
|
247
|
+
name: string
|
|
248
|
+
codec_ids: Array<string>
|
|
249
|
+
priv_class: PrivClass | null }
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/** The required parameters for setting up filter inputs */
|
|
253
|
+
export interface InputParam {
|
|
254
|
+
/**
|
|
255
|
+
* Input pad name that matches the filter specification string
|
|
256
|
+
* For a single input filter without a name in the filter specification the name can be omitted
|
|
257
|
+
*/
|
|
258
|
+
name?: string
|
|
259
|
+
/** Define the time base used by the PTS of the frames/samples for this filter. */
|
|
260
|
+
timeBase: Array<number>
|
|
261
|
+
}
|
|
262
|
+
/** The required parameters for setting up video filter inputs */
|
|
263
|
+
export interface VideoInputParam extends InputParam {
|
|
264
|
+
width: number
|
|
265
|
+
height: number
|
|
266
|
+
pixelFormat: string
|
|
267
|
+
pixelAspect: Array<number>
|
|
268
|
+
hw_device_ctx?: HWDeviceContext // Optional
|
|
269
|
+
swPixelFormat?: string // Optional
|
|
270
|
+
}
|
|
271
|
+
/** The required parameters for setting up audio filter inputs */
|
|
272
|
+
export interface AudioInputParam extends InputParam {
|
|
273
|
+
sampleRate: number
|
|
274
|
+
sampleFormat: string
|
|
275
|
+
channelLayout: string
|
|
276
|
+
}
|
|
277
|
+
/** The required parameters for setting up filter inputs */
|
|
278
|
+
export interface OutputParam {
|
|
279
|
+
/**
|
|
280
|
+
* Output pad name that matches the filter specification string
|
|
281
|
+
* For a single output filter without a name in the filter specification the name can be omitted
|
|
282
|
+
*/
|
|
283
|
+
name?: string
|
|
284
|
+
}
|
|
285
|
+
/** The required parameters for setting up video filter outputs */
|
|
286
|
+
export interface VideoOutputParam extends OutputParam {
|
|
287
|
+
pixelFormat: string
|
|
288
|
+
}
|
|
289
|
+
/** The required parameters for setting up audio filter outputs */
|
|
290
|
+
export interface AudioOutputParam extends OutputParam {
|
|
291
|
+
sampleRate: number
|
|
292
|
+
sampleFormat: string
|
|
293
|
+
channelLayout: string
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
export interface FiltererOptions {
|
|
297
|
+
/** The filter type - video or audio */
|
|
298
|
+
filterType: MediaType
|
|
299
|
+
filterSpec: string
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
export interface FiltererVideoOptions extends FiltererOptions {
|
|
303
|
+
/** Video filter type */
|
|
304
|
+
filterType: 'video'
|
|
305
|
+
/** Video input parameters for the filter */
|
|
306
|
+
inputParams: Array<VideoInputParam>
|
|
307
|
+
/** Video output parameters for the filter */
|
|
308
|
+
outputParams: Array<VideoOutputParam>
|
|
309
|
+
}
|
|
310
|
+
export interface FiltererAudioOptions extends FiltererOptions {
|
|
311
|
+
/** Audio filter type */
|
|
312
|
+
filterType: 'audio'
|
|
313
|
+
/** Audio input parameters for the filter */
|
|
314
|
+
inputParams: Array<AudioInputParam>
|
|
315
|
+
/** Audio output parameters for the filter */
|
|
316
|
+
outputParams: Array<AudioOutputParam>
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Create a filterer
|
|
321
|
+
* @param options parameters to set up the type, inputs, outputs and spec of the filter
|
|
322
|
+
* @returns Promise that resolve to a Filterer on success
|
|
323
|
+
*/
|
|
324
|
+
export function filterer(options: FiltererVideoOptions | FiltererAudioOptions): Promise<Filterer>
|