@ovencord/voice 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,296 @@
1
+ import type { Buffer } from 'node:buffer';
2
+ import { pipeline, type Readable } from 'node:stream';
3
+ import prism from 'prism-media';
4
+ import { noop } from '../util/util';
5
+ import { SILENCE_FRAME, type AudioPlayer } from './AudioPlayer';
6
+ import { findPipeline, StreamType, TransformerType, type Edge } from './TransformerGraph';
7
+
8
+ /**
9
+ * Options that are set when creating a new audio resource.
10
+ *
11
+ * @typeParam Metadata - the type for the metadata (if any) of the audio resource
12
+ */
13
+ export interface CreateAudioResourceOptions<Metadata> {
14
+ /**
15
+ * Whether or not inline volume should be enabled. If enabled, you will be able to change the volume
16
+ * of the stream on-the-fly. However, this also increases the performance cost of playback. Defaults to `false`.
17
+ */
18
+ inlineVolume?: boolean;
19
+
20
+ /**
21
+ * The type of the input stream. Defaults to `StreamType.Arbitrary`.
22
+ */
23
+ inputType?: StreamType;
24
+
25
+ /**
26
+ * Optional metadata that can be attached to the resource (e.g. track title, random id).
27
+ * This is useful for identification purposes when the resource is passed around in events.
28
+ * See {@link AudioResource.metadata}
29
+ */
30
+ metadata?: Metadata;
31
+
32
+ /**
33
+ * The number of silence frames to append to the end of the resource's audio stream, to prevent interpolation glitches.
34
+ * Defaults to 5.
35
+ */
36
+ silencePaddingFrames?: number;
37
+ }
38
+
39
+ /**
40
+ * Represents an audio resource that can be played by an audio player.
41
+ *
42
+ * @typeParam Metadata - the type for the metadata (if any) of the audio resource
43
+ */
44
+ export class AudioResource<Metadata = unknown> {
45
+ /**
46
+ * An object-mode Readable stream that emits Opus packets. This is what is played by audio players.
47
+ */
48
+ public readonly playStream: Readable;
49
+
50
+ /**
51
+ * The pipeline used to convert the input stream into a playable format. For example, this may
52
+ * contain an FFmpeg component for arbitrary inputs, and it may contain a VolumeTransformer component
53
+ * for resources with inline volume transformation enabled.
54
+ */
55
+ public readonly edges: readonly Edge[];
56
+
57
+ /**
58
+ * Optional metadata that can be used to identify the resource.
59
+ */
60
+ public metadata: Metadata;
61
+
62
+ /**
63
+ * If the resource was created with inline volume transformation enabled, then this will be a
64
+ * prism-media VolumeTransformer. You can use this to alter the volume of the stream.
65
+ */
66
+ public readonly volume?: prism.VolumeTransformer;
67
+
68
+ /**
69
+ * If using an Opus encoder to create this audio resource, then this will be a prism-media opus.Encoder.
70
+ * You can use this to control settings such as bitrate, FEC, PLP.
71
+ */
72
+ public readonly encoder?: prism.opus.Encoder;
73
+
74
+ /**
75
+ * The audio player that the resource is subscribed to, if any.
76
+ */
77
+ public audioPlayer?: AudioPlayer | undefined;
78
+
79
+ /**
80
+ * The playback duration of this audio resource, given in milliseconds.
81
+ */
82
+ public playbackDuration = 0;
83
+
84
+ /**
85
+ * Whether or not the stream for this resource has started (data has become readable)
86
+ */
87
+ public started = false;
88
+
89
+ /**
90
+ * The number of silence frames to append to the end of the resource's audio stream, to prevent interpolation glitches.
91
+ */
92
+ public readonly silencePaddingFrames: number;
93
+
94
+ /**
95
+ * The number of remaining silence frames to play. If -1, the frames have not yet started playing.
96
+ */
97
+ public silenceRemaining = -1;
98
+
99
+ public constructor(
100
+ edges: readonly Edge[],
101
+ streams: readonly Readable[],
102
+ metadata: Metadata,
103
+ silencePaddingFrames: number,
104
+ ) {
105
+ this.edges = edges;
106
+ this.playStream = streams.length > 1 ? (pipeline(streams, noop) as any as Readable) : streams[0]!;
107
+ this.metadata = metadata;
108
+ this.silencePaddingFrames = silencePaddingFrames;
109
+
110
+ for (const stream of streams) {
111
+ if (stream instanceof prism.VolumeTransformer) {
112
+ this.volume = stream;
113
+ } else if (stream instanceof prism.opus.Encoder) {
114
+ this.encoder = stream;
115
+ }
116
+ }
117
+
118
+ this.playStream.once('readable', () => (this.started = true));
119
+ }
120
+
121
+ /**
122
+ * Whether this resource is readable. If the underlying resource is no longer readable, this will still return true
123
+ * while there are silence padding frames left to play.
124
+ */
125
+ public get readable() {
126
+ if (this.silenceRemaining === 0) return false;
127
+ const real = this.playStream.readable;
128
+ if (!real) {
129
+ if (this.silenceRemaining === -1) this.silenceRemaining = this.silencePaddingFrames;
130
+ return this.silenceRemaining !== 0;
131
+ }
132
+
133
+ return real;
134
+ }
135
+
136
+ /**
137
+ * Whether this resource has ended or not.
138
+ */
139
+ public get ended() {
140
+ return this.playStream.readableEnded || this.playStream.destroyed || this.silenceRemaining === 0;
141
+ }
142
+
143
+ /**
144
+ * Attempts to read an Opus packet from the audio resource. If a packet is available, the playbackDuration
145
+ * is incremented.
146
+ *
147
+ * @remarks
148
+ * It is advisable to check that the playStream is readable before calling this method. While no runtime
149
+ * errors will be thrown, you should check that the resource is still available before attempting to
150
+ * read from it.
151
+ * @internal
152
+ */
153
+ public read(): Buffer | null {
154
+ if (this.silenceRemaining === 0) {
155
+ return null;
156
+ } else if (this.silenceRemaining > 0) {
157
+ this.silenceRemaining--;
158
+ return SILENCE_FRAME;
159
+ }
160
+
161
+ const packet = this.playStream.read() as Buffer | null;
162
+ if (packet) {
163
+ this.playbackDuration += 20;
164
+ }
165
+
166
+ return packet;
167
+ }
168
+ }
169
+
170
+ /**
171
+ * Ensures that a path contains at least one volume transforming component.
172
+ *
173
+ * @param path - The path to validate constraints on
174
+ */
175
+ export const VOLUME_CONSTRAINT = (path: Edge[]) => path.some((edge) => edge.type === TransformerType.InlineVolume);
176
+
177
+ export const NO_CONSTRAINT = () => true;
178
+
179
+ /**
180
+ * Tries to infer the type of a stream to aid with transcoder pipelining.
181
+ *
182
+ * @param stream - The stream to infer the type of
183
+ */
184
+ export function inferStreamType(stream: Readable): {
185
+ hasVolume: boolean;
186
+ streamType: StreamType;
187
+ } {
188
+ if (stream instanceof prism.opus.Encoder) {
189
+ return { streamType: StreamType.Opus, hasVolume: false };
190
+ } else if (stream instanceof prism.opus.Decoder) {
191
+ return { streamType: StreamType.Raw, hasVolume: false };
192
+ } else if (stream instanceof prism.VolumeTransformer) {
193
+ return { streamType: StreamType.Raw, hasVolume: true };
194
+ } else if (stream instanceof prism.opus.OggDemuxer) {
195
+ return { streamType: StreamType.Opus, hasVolume: false };
196
+ } else if (stream instanceof prism.opus.WebmDemuxer) {
197
+ return { streamType: StreamType.Opus, hasVolume: false };
198
+ }
199
+
200
+ return { streamType: StreamType.Arbitrary, hasVolume: false };
201
+ }
202
+
203
+ /**
204
+ * Creates an audio resource that can be played by audio players.
205
+ *
206
+ * @remarks
207
+ * If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
208
+ *
209
+ * If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
210
+ * to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
211
+ * Opus transcoders, and Ogg/WebM demuxers.
212
+ * @param input - The resource to play
213
+ * @param options - Configurable options for creating the resource
214
+ * @typeParam Metadata - the type for the metadata (if any) of the audio resource
215
+ */
216
+ export function createAudioResource<Metadata>(
217
+ input: Readable | string,
218
+ options: CreateAudioResourceOptions<Metadata> &
219
+ Pick<
220
+ Metadata extends null | undefined
221
+ ? CreateAudioResourceOptions<Metadata>
222
+ : Required<CreateAudioResourceOptions<Metadata>>,
223
+ 'metadata'
224
+ >,
225
+ ): AudioResource<Metadata extends null | undefined ? null : Metadata>;
226
+
227
+ /**
228
+ * Creates an audio resource that can be played by audio players.
229
+ *
230
+ * @remarks
231
+ * If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
232
+ *
233
+ * If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
234
+ * to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
235
+ * Opus transcoders, and Ogg/WebM demuxers.
236
+ * @param input - The resource to play
237
+ * @param options - Configurable options for creating the resource
238
+ * @typeParam Metadata - the type for the metadata (if any) of the audio resource
239
+ */
240
+ export function createAudioResource<Metadata extends null | undefined>(
241
+ input: Readable | string,
242
+ options?: Omit<CreateAudioResourceOptions<Metadata>, 'metadata'>,
243
+ ): AudioResource<null>;
244
+
245
+ /**
246
+ * Creates an audio resource that can be played by audio players.
247
+ *
248
+ * @remarks
249
+ * If the input is given as a string, then the inputType option will be overridden and FFmpeg will be used.
250
+ *
251
+ * If the input is not in the correct format, then a pipeline of transcoders and transformers will be created
252
+ * to ensure that the resultant stream is in the correct format for playback. This could involve using FFmpeg,
253
+ * Opus transcoders, and Ogg/WebM demuxers.
254
+ * @param input - The resource to play
255
+ * @param options - Configurable options for creating the resource
256
+ * @typeParam Metadata - the type for the metadata (if any) of the audio resource
257
+ */
258
+ export function createAudioResource<Metadata>(
259
+ input: Readable | string,
260
+ options: CreateAudioResourceOptions<Metadata> = {},
261
+ ): AudioResource<Metadata> {
262
+ let inputType = options.inputType;
263
+ let needsInlineVolume = Boolean(options.inlineVolume);
264
+
265
+ // string inputs can only be used with FFmpeg
266
+ if (typeof input === 'string') {
267
+ inputType = StreamType.Arbitrary;
268
+ } else if (inputType === undefined) {
269
+ const analysis = inferStreamType(input);
270
+ inputType = analysis.streamType;
271
+ needsInlineVolume = needsInlineVolume && !analysis.hasVolume;
272
+ }
273
+
274
+ const transformerPipeline = findPipeline(inputType, needsInlineVolume ? VOLUME_CONSTRAINT : NO_CONSTRAINT);
275
+
276
+ if (transformerPipeline.length === 0) {
277
+ if (typeof input === 'string') throw new Error(`Invalid pipeline constructed for string resource '${input}'`);
278
+ // No adjustments required
279
+ return new AudioResource<Metadata>(
280
+ [],
281
+ [input],
282
+ (options.metadata ?? null) as Metadata,
283
+ options.silencePaddingFrames ?? 5,
284
+ );
285
+ }
286
+
287
+ const streams = transformerPipeline.map((edge) => edge.transformer(input));
288
+ if (typeof input !== 'string') streams.unshift(input);
289
+
290
+ return new AudioResource<Metadata>(
291
+ transformerPipeline,
292
+ streams,
293
+ (options.metadata ?? null) as Metadata,
294
+ options.silencePaddingFrames ?? 5,
295
+ );
296
+ }
@@ -0,0 +1,33 @@
1
+ /* eslint-disable @typescript-eslint/dot-notation */
2
+ import type { VoiceConnection } from '../VoiceConnection';
3
+ import type { AudioPlayer } from './AudioPlayer';
4
+
5
+ /**
6
+ * Represents a subscription of a voice connection to an audio player, allowing
7
+ * the audio player to play audio on the voice connection.
8
+ */
9
+ export class PlayerSubscription {
10
+ /**
11
+ * The voice connection of this subscription.
12
+ */
13
+ public readonly connection: VoiceConnection;
14
+
15
+ /**
16
+ * The audio player of this subscription.
17
+ */
18
+ public readonly player: AudioPlayer;
19
+
20
+ public constructor(connection: VoiceConnection, player: AudioPlayer) {
21
+ this.connection = connection;
22
+ this.player = player;
23
+ }
24
+
25
+ /**
26
+ * Unsubscribes the connection from the audio player, meaning that the
27
+ * audio player cannot stream audio to it until a new subscription is made.
28
+ */
29
+ public unsubscribe() {
30
+ this.connection['onSubscriptionRemoved'](this);
31
+ this.player['unsubscribe'](this);
32
+ }
33
+ }
@@ -0,0 +1,281 @@
1
+ import type { Readable } from 'node:stream';
2
+ import prism from 'prism-media';
3
+
4
+ /**
5
+ * This module creates a Transformer Graph to figure out what the most efficient way
6
+ * of transforming the input stream into something playable would be.
7
+ */
8
+
9
+ const FFMPEG_PCM_ARGUMENTS = ['-analyzeduration', '0', '-loglevel', '0', '-f', 's16le', '-ar', '48000', '-ac', '2'];
10
+ const FFMPEG_OPUS_ARGUMENTS = [
11
+ '-analyzeduration',
12
+ '0',
13
+ '-loglevel',
14
+ '0',
15
+ '-acodec',
16
+ 'libopus',
17
+ '-f',
18
+ 'opus',
19
+ '-ar',
20
+ '48000',
21
+ '-ac',
22
+ '2',
23
+ ];
24
+
25
+ /**
26
+ * The different types of stream that can exist within the pipeline.
27
+ */
28
+ export enum StreamType {
29
+ /**
30
+ * The type of the stream at this point is unknown.
31
+ */
32
+ Arbitrary = 'arbitrary',
33
+ /**
34
+ * The stream at this point is Opus audio encoded in an Ogg wrapper.
35
+ */
36
+ OggOpus = 'ogg/opus',
37
+ /**
38
+ * The stream at this point is Opus audio, and the stream is in object-mode. This is ready to play.
39
+ */
40
+ Opus = 'opus',
41
+ /**
42
+ * The stream at this point is s16le PCM.
43
+ */
44
+ Raw = 'raw',
45
+ /**
46
+ * The stream at this point is Opus audio encoded in a WebM wrapper.
47
+ */
48
+ WebmOpus = 'webm/opus',
49
+ }
50
+
51
+ /**
52
+ * The different types of transformers that can exist within the pipeline.
53
+ */
54
+ export enum TransformerType {
55
+ FFmpegOgg = 'ffmpeg ogg',
56
+ FFmpegPCM = 'ffmpeg pcm',
57
+ InlineVolume = 'volume transformer',
58
+ OggOpusDemuxer = 'ogg/opus demuxer',
59
+ OpusDecoder = 'opus decoder',
60
+ OpusEncoder = 'opus encoder',
61
+ WebmOpusDemuxer = 'webm/opus demuxer',
62
+ }
63
+
64
+ /**
65
+ * Represents a pathway from one stream type to another using a transformer.
66
+ */
67
+ export interface Edge {
68
+ cost: number;
69
+ from: Node;
70
+ to: Node;
71
+ transformer(input: Readable | string): Readable;
72
+ type: TransformerType;
73
+ }
74
+
75
+ /**
76
+ * Represents a type of stream within the graph, e.g. an Opus stream, or a stream of raw audio.
77
+ */
78
+ export class Node {
79
+ /**
80
+ * The outbound edges from this node.
81
+ */
82
+ public readonly edges: Edge[] = [];
83
+
84
+ /**
85
+ * The type of stream for this node.
86
+ */
87
+ public readonly type: StreamType;
88
+
89
+ public constructor(type: StreamType) {
90
+ this.type = type;
91
+ }
92
+
93
+ /**
94
+ * Creates an outbound edge from this node.
95
+ *
96
+ * @param edge - The edge to create
97
+ */
98
+ public addEdge(edge: Omit<Edge, 'from'>) {
99
+ this.edges.push({ ...edge, from: this });
100
+ }
101
+ }
102
+
103
+ // Create a node for each stream type
104
+ let NODES: Map<StreamType, Node> | null = null;
105
+
106
+ /**
107
+ * Gets a node from its stream type.
108
+ *
109
+ * @param type - The stream type of the target node
110
+ */
111
+ export function getNode(type: StreamType) {
112
+ const node = (NODES ??= initializeNodes()).get(type);
113
+ if (!node) throw new Error(`Node type '${type}' does not exist!`);
114
+ return node;
115
+ }
116
+
117
+ // Try to enable FFmpeg Ogg optimizations
118
+ function canEnableFFmpegOptimizations(): boolean {
119
+ try {
120
+ return prism.FFmpeg.getInfo().output.includes('--enable-libopus');
121
+ } catch {}
122
+
123
+ return false;
124
+ }
125
+
126
+ function initializeNodes(): Map<StreamType, Node> {
127
+ const nodes = new Map<StreamType, Node>();
128
+ for (const streamType of Object.values(StreamType)) {
129
+ nodes.set(streamType, new Node(streamType));
130
+ }
131
+
132
+ nodes.get(StreamType.Raw)!.addEdge({
133
+ type: TransformerType.OpusEncoder,
134
+ to: nodes.get(StreamType.Opus)!,
135
+ cost: 1.5,
136
+ transformer: () => new prism.opus.Encoder({ rate: 48_000, channels: 2, frameSize: 960 }),
137
+ });
138
+
139
+ nodes.get(StreamType.Opus)!.addEdge({
140
+ type: TransformerType.OpusDecoder,
141
+ to: nodes.get(StreamType.Raw)!,
142
+ cost: 1.5,
143
+ transformer: () => new prism.opus.Decoder({ rate: 48_000, channels: 2, frameSize: 960 }),
144
+ });
145
+
146
+ nodes.get(StreamType.OggOpus)!.addEdge({
147
+ type: TransformerType.OggOpusDemuxer,
148
+ to: nodes.get(StreamType.Opus)!,
149
+ cost: 1,
150
+ transformer: () => new prism.opus.OggDemuxer(),
151
+ });
152
+
153
+ nodes.get(StreamType.WebmOpus)!.addEdge({
154
+ type: TransformerType.WebmOpusDemuxer,
155
+ to: nodes.get(StreamType.Opus)!,
156
+ cost: 1,
157
+ transformer: () => new prism.opus.WebmDemuxer(),
158
+ });
159
+
160
+ const FFMPEG_PCM_EDGE: Omit<Edge, 'from'> = {
161
+ type: TransformerType.FFmpegPCM,
162
+ to: nodes.get(StreamType.Raw)!,
163
+ cost: 2,
164
+ transformer: (input) =>
165
+ new prism.FFmpeg({
166
+ args: ['-i', typeof input === 'string' ? input : '-', ...FFMPEG_PCM_ARGUMENTS],
167
+ }),
168
+ };
169
+
170
+ nodes.get(StreamType.Arbitrary)!.addEdge(FFMPEG_PCM_EDGE);
171
+ nodes.get(StreamType.OggOpus)!.addEdge(FFMPEG_PCM_EDGE);
172
+ nodes.get(StreamType.WebmOpus)!.addEdge(FFMPEG_PCM_EDGE);
173
+
174
+ nodes.get(StreamType.Raw)!.addEdge({
175
+ type: TransformerType.InlineVolume,
176
+ to: nodes.get(StreamType.Raw)!,
177
+ cost: 0.5,
178
+ transformer: () => new prism.VolumeTransformer({ type: 's16le' }),
179
+ });
180
+
181
+ if (canEnableFFmpegOptimizations()) {
182
+ const FFMPEG_OGG_EDGE: Omit<Edge, 'from'> = {
183
+ type: TransformerType.FFmpegOgg,
184
+ to: nodes.get(StreamType.OggOpus)!,
185
+ cost: 2,
186
+ transformer: (input) =>
187
+ new prism.FFmpeg({
188
+ args: ['-i', typeof input === 'string' ? input : '-', ...FFMPEG_OPUS_ARGUMENTS],
189
+ }),
190
+ };
191
+ nodes.get(StreamType.Arbitrary)!.addEdge(FFMPEG_OGG_EDGE);
192
+ // Include Ogg and WebM as well in case they have different sampling rates or are mono instead of stereo
193
+ // at the moment, this will not do anything. However, if/when detection for correct Opus headers is
194
+ // implemented, this will help inform the voice engine that it is able to transcode the audio.
195
+ nodes.get(StreamType.OggOpus)!.addEdge(FFMPEG_OGG_EDGE);
196
+ nodes.get(StreamType.WebmOpus)!.addEdge(FFMPEG_OGG_EDGE);
197
+ }
198
+
199
+ return nodes;
200
+ }
201
+
202
+ /**
203
+ * Represents a step in the path from node A to node B.
204
+ */
205
+ interface Step {
206
+ /**
207
+ * The cost of the steps after this step.
208
+ */
209
+ cost: number;
210
+
211
+ /**
212
+ * The edge associated with this step.
213
+ */
214
+ edge?: Edge;
215
+
216
+ /**
217
+ * The next step.
218
+ */
219
+ next?: Step;
220
+ }
221
+
222
+ /**
223
+ * Finds the shortest cost path from node A to node B.
224
+ *
225
+ * @param from - The start node
226
+ * @param constraints - Extra validation for a potential solution. Takes a path, returns true if the path is valid
227
+ * @param goal - The target node
228
+ * @param path - The running path
229
+ * @param depth - The number of remaining recursions
230
+ */
231
+ function findPath(
232
+ from: Node,
233
+ constraints: (path: Edge[]) => boolean,
234
+ goal = getNode(StreamType.Opus),
235
+ path: Edge[] = [],
236
+ depth = 5,
237
+ ): Step {
238
+ if (from === goal && constraints(path)) {
239
+ return { cost: 0 };
240
+ } else if (depth === 0) {
241
+ return { cost: Number.POSITIVE_INFINITY };
242
+ }
243
+
244
+ let currentBest: Step | undefined;
245
+ for (const edge of from.edges) {
246
+ if (currentBest && edge.cost > currentBest.cost) continue;
247
+ const next = findPath(edge.to, constraints, goal, [...path, edge], depth - 1);
248
+ const cost = edge.cost + next.cost;
249
+ if (!currentBest || cost < currentBest.cost) {
250
+ currentBest = { cost, edge, next };
251
+ }
252
+ }
253
+
254
+ return currentBest ?? { cost: Number.POSITIVE_INFINITY };
255
+ }
256
+
257
+ /**
258
+ * Takes the solution from findPath and assembles it into a list of edges.
259
+ *
260
+ * @param step - The first step of the path
261
+ */
262
+ function constructPipeline(step: Step) {
263
+ const edges = [];
264
+ let current: Step | undefined = step;
265
+ while (current?.edge) {
266
+ edges.push(current.edge);
267
+ current = current.next;
268
+ }
269
+
270
+ return edges;
271
+ }
272
+
273
+ /**
274
+ * Finds the lowest-cost pipeline to convert the input stream type into an Opus stream.
275
+ *
276
+ * @param from - The stream type to start from
277
+ * @param constraint - Extra constraints that may be imposed on potential solution
278
+ */
279
+ export function findPipeline(from: StreamType, constraint: (path: Edge[]) => boolean) {
280
+ return constructPipeline(findPath(getNode(from), constraint));
281
+ }
@@ -0,0 +1,20 @@
1
+ export {
2
+ AudioPlayer,
3
+ AudioPlayerStatus,
4
+ type AudioPlayerState,
5
+ NoSubscriberBehavior,
6
+ createAudioPlayer,
7
+ type AudioPlayerBufferingState,
8
+ type AudioPlayerIdleState,
9
+ type AudioPlayerPausedState,
10
+ type AudioPlayerPlayingState,
11
+ type CreateAudioPlayerOptions,
12
+ } from './AudioPlayer';
13
+
14
+ export { AudioPlayerError } from './AudioPlayerError';
15
+
16
+ export { AudioResource, type CreateAudioResourceOptions, createAudioResource } from './AudioResource';
17
+
18
+ export { PlayerSubscription } from './PlayerSubscription';
19
+
20
+ export { StreamType, type Edge, TransformerType, Node } from './TransformerGraph';
package/src/index.ts ADDED
@@ -0,0 +1,47 @@
1
+ export * from './joinVoiceChannel';
2
+ export * from './audio/index';
3
+ export * from './util/index';
4
+ export * from './receive/index';
5
+
6
+ export {
7
+ Networking,
8
+ type ConnectionData,
9
+ type ConnectionOptions,
10
+ type NetworkingState,
11
+ type NetworkingResumingState,
12
+ type NetworkingSelectingProtocolState,
13
+ type NetworkingUdpHandshakingState,
14
+ type NetworkingClosedState,
15
+ type NetworkingIdentifyingState,
16
+ type NetworkingOpeningWsState,
17
+ type NetworkingReadyState,
18
+ NetworkingStatusCode,
19
+ VoiceUDPSocket,
20
+ VoiceWebSocket,
21
+ type SocketConfig,
22
+ DAVESession,
23
+ } from './networking/index.js';
24
+
25
+ export {
26
+ VoiceConnection,
27
+ type VoiceConnectionState,
28
+ VoiceConnectionStatus,
29
+ type VoiceConnectionConnectingState,
30
+ type VoiceConnectionDestroyedState,
31
+ type VoiceConnectionDisconnectedState,
32
+ type VoiceConnectionDisconnectedBaseState,
33
+ type VoiceConnectionDisconnectedOtherState,
34
+ type VoiceConnectionDisconnectedWebSocketState,
35
+ VoiceConnectionDisconnectReason,
36
+ type VoiceConnectionReadyState,
37
+ type VoiceConnectionSignallingState,
38
+ } from './VoiceConnection';
39
+
40
+ export { type JoinConfig, getVoiceConnection, getVoiceConnections, getGroups } from './DataStore';
41
+
42
+ /**
43
+ * The {@link https://github.com/ovencord/ovencord/blob/main/packages/voice#readme | @ovencord/voice} version
44
+ * that you are currently using.
45
+ */
46
+ // This needs to explicitly be `string` so it is not typed as a "const string" that gets injected by esbuild
47
+ export const version = '[VI]{{inject}}[/VI]' as string;