@imcooder/opuslib 0.2.1 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -29,28 +29,37 @@
29
29
  > });
30
30
  > ```
31
31
  >
32
- > **New Fields**
33
- > - **`audioLevel`** Each `audioChunk` event includes a normalized `audioLevel` (0.0~1.0), computed via configurable RMS sliding window (default 360ms) with dBFS-to-linear mapping (IEC 61606).
32
+ > **`framesPerCallback` — batch multiple frames to reduce data transfer overhead**
33
+ > - Multiple independently-encoded Opus frames can be batched into a single `audioChunk` callback via `framesPerCallback`, reducing JS bridge calls and data transfer overhead. Each frame in `frames[]` is a complete, independently decodable Opus packet (with its own TOC byte) no illegal byte concatenation.
34
+ > - Example: `frameSize=20ms, framesPerCallback=5` → 5 frames encoded individually, returned as `frames: OpusFrame[]` in one `audioChunk` event (80% fewer bridge calls).
35
+ >
36
+ > **New `audioChunk` fields**
37
+ > - **`frames`** — Array of `OpusFrame` objects. Each frame is an independent, decodable Opus packet (with its own TOC byte). No illegal byte concatenation.
38
+ > - **`OpusFrame.audioLevel`** — Per-frame normalized audio level (0.0~1.0), computed via RMS with dBFS-to-linear mapping. Only present when `enableAudioLevel: true`. Consumers can average neighboring frames for smoothing.
39
+ > - **`duration`** — Duration of all frames in milliseconds (`frameSize * frameCount`).
40
+ > - **`frameCount`** — Number of Opus frames in this callback (= `frames.length`).
41
+ > - **`preSkip`** — (in `audioStarted` event) Opus encoder lookahead in samples. Decoders should skip this many samples at the beginning of the stream.
34
42
  > ```typescript
35
43
  > Opuslib.addListener('audioChunk', (event) => {
36
- > // event.data: ArrayBuffer (Opus encoded packet)
44
+ > // event.frames: OpusFrame[] (independent Opus packets)
45
+ > // each frame: { data: ArrayBuffer, audioLevel?: number }
37
46
  > // event.timestamp: 1711000000100 (ms since epoch)
38
- > // event.sequenceNumber: 5 (packet counter)
39
- > // event.audioLevel: 0.72 (0=silence, 1=loud)
47
+ > // event.sequenceNumber: 5 (callback counter)
48
+ > // event.duration: 100 (ms, = frameSize * frameCount)
49
+ > // event.frameCount: 5 (= frames.length)
40
50
  > });
41
51
  > ```
42
- > - **`preSkip`** — Opus encoder lookahead in samples, returned in `audioStarted` event. Decoders should skip this many samples at the beginning of the stream.
43
52
  >
44
53
  > **New Config Options**
45
- > - **`audioLevelWindow`** — RMS window duration in milliseconds for audio level calculation (default: 360ms). Shorter window = more responsive, longer window = smoother.
54
+ > - **`enableAudioLevel`** — Enable per-frame audio level calculation (default: false). When enabled, each `OpusFrame` includes `audioLevel` (0.0~1.0). Disabled by default to save computation.
46
55
  > ```typescript
47
56
  > await Opuslib.startStreaming({
48
57
  > sampleRate: 16000,
49
58
  > channels: 1,
50
59
  > bitrate: 24000,
51
60
  > frameSize: 20,
52
- > packetDuration: 20,
53
- > audioLevelWindow: 200, // 200ms window (default: 360ms)
61
+ > framesPerCallback: 5, // batch 5 independent Opus frames per event
62
+ > enableAudioLevel: true, // enable per-frame audio level
54
63
  > });
55
64
  > ```
56
65
 
@@ -66,7 +75,7 @@ Real-time audio capture and encoding using the latest Opus 1.6 codec, built from
66
75
  - **Low Latency** - Real-time encoding with minimal overhead
67
76
  - **Native Performance** - Direct C/C++ integration, no JavaScript encoding
68
77
  - **Thread-safe Encoding** - Dedicated encoding thread, capture thread never blocked
69
- - **Audio Level Metering** - Real-time 0~1 audio level in each audio chunk (360ms RMS window)
78
+ - **Audio Level Metering** - Optional per-frame 0~1 audio level via RMS (enable with `enableAudioLevel: true`)
70
79
  - **Lifecycle Events** - `audioStarted` / `audioEnd` events with session metadata
71
80
  - **High Quality** - 24kbps achieves excellent speech quality
72
81
  - **Cross-Platform** - iOS and Android with a consistent API
@@ -154,10 +163,11 @@ async function startRecording() {
154
163
 
155
164
  // Listen for encoded audio chunks
156
165
  const subscription = Opuslib.addListener('audioChunk', (event) => {
157
- const { data, timestamp, sequenceNumber, audioLevel } = event;
158
- console.log(`Opus packet: ${data.byteLength} bytes, level=${audioLevel.toFixed(2)}`);
159
-
160
- // Send to your backend, save to file, etc.
166
+ const { frames, timestamp, sequenceNumber } = event;
167
+ for (const frame of frames) {
168
+ console.log(`Opus packet: ${frame.data.byteLength} bytes, level=${frame.audioLevel?.toFixed(2) ?? 'N/A'}`);
169
+ // Send each independent Opus packet to your backend, save to file, etc.
170
+ }
161
171
  });
162
172
 
163
173
  // Start streaming
@@ -166,7 +176,7 @@ async function startRecording() {
166
176
  channels: 1, // Mono
167
177
  bitrate: 24000, // 24 kbps
168
178
  frameSize: 20, // 20ms frames
169
- packetDuration: 20, // 20ms packets
179
+ framesPerCallback: 1, // 1 frame per callback (default)
170
180
  });
171
181
  }
172
182
 
@@ -194,9 +204,9 @@ interface AudioConfig {
194
204
  channels: number; // Number of channels (1 = mono, 2 = stereo)
195
205
  bitrate: number; // Target bitrate in bits/second (e.g., 24000)
196
206
  frameSize: number; // Frame duration in ms (2.5, 5, 10, 20, 40, 60)
197
- packetDuration: number; // Packet duration in ms (multiple of frameSize)
207
+ framesPerCallback?: number; // Frames per callback (default 1), batching reduces bridge calls
198
208
  dredDuration?: number; // Reserved for future DRED support (default: 0)
199
- audioLevelWindow?: number; // RMS window duration in ms for audioLevel (default: 360)
209
+ enableAudioLevel?: boolean; // Enable per-frame audio level (default: false)
200
210
  enableAmplitudeEvents?: boolean; // Enable amplitude monitoring (default: false)
201
211
  amplitudeEventInterval?: number; // Amplitude update interval in ms (default: 16)
202
212
  }
@@ -210,7 +220,7 @@ interface AudioConfig {
210
220
  channels: 1, // Mono - sufficient for voice
211
221
  bitrate: 24000, // 24 kbps - excellent quality
212
222
  frameSize: 20, // 20ms - standard for real-time
213
- packetDuration: 20, // 20ms - low latency
223
+ framesPerCallback: 1, // 1 frame per callback - low latency
214
224
  }
215
225
  ```
216
226
 
@@ -269,19 +279,30 @@ Emitted when an encoded Opus packet is ready.
269
279
 
270
280
  ```typescript
271
281
  Opuslib.addListener('audioChunk', (event: AudioChunkEvent) => {
272
- // event.data: ArrayBuffer - Raw Opus packet (ready to send/save)
273
- // event.audioLevel: number - Audio level 0.0~1.0 (0=silence, 1=loud)
282
+ // event.frames: OpusFrame[] - Independent Opus packets (each decodable on its own)
283
+ // frame.audioLevel?: number - Per-frame level 0.0~1.0 (when enableAudioLevel is true)
284
+ // event.duration: number - Duration in ms (frameSize * frameCount)
285
+ // event.frameCount: number - Number of Opus frames (= frames.length)
286
+ for (const frame of event.frames) {
287
+ websocket.send(frame.data); // each frame is an independent Opus packet
288
+ }
274
289
  });
275
290
  ```
276
291
 
277
292
  **Event Data:**
278
293
 
279
294
  ```typescript
295
+ interface OpusFrame {
296
+ data: ArrayBuffer; // Independent Opus packet (one opus_encode() output with its own TOC byte)
297
+ audioLevel?: number; // Per-frame audio level 0.0~1.0 (only when enableAudioLevel is true)
298
+ }
299
+
280
300
  interface AudioChunkEvent {
281
- data: ArrayBuffer; // Raw Opus-encoded audio packet
301
+ frames: OpusFrame[]; // Array of independent Opus packets
282
302
  timestamp: number; // Milliseconds since epoch
283
- sequenceNumber: number; // Incrementing packet counter
284
- audioLevel: number; // Audio level 0.0~1.0 (360ms RMS window, 0=silence, 1=loud)
303
+ sequenceNumber: number; // Incrementing callback counter
304
+ duration: number; // Total duration in ms (frameSize * frameCount)
305
+ frameCount: number; // Number of Opus frames (= frames.length)
285
306
  }
286
307
  ```
287
308
 
@@ -347,7 +368,7 @@ Capture Thread Encoding Thread (serial queue)
347
368
  |---- post(samples) ----------->| pendingSamples.append(samples)
348
369
  | | while (enough samples) {
349
370
  | | opus_encode()
350
- | | audioLevel calc (360ms RMS)
371
+ | | per-frame audioLevel (if enabled)
351
372
  | | emit audioChunk event
352
373
  | | }
353
374
  | |
@@ -7,6 +7,14 @@ import java.io.File
7
7
  import java.io.FileOutputStream
8
8
  import java.util.concurrent.CountDownLatch
9
9
 
10
+ /**
11
+ * A single encoded Opus frame with optional per-frame audio level.
12
+ */
13
+ data class EncodedFrame(
14
+ val data: ByteArray,
15
+ val audioLevel: Float? // null when enableAudioLevel is false
16
+ )
17
+
10
18
  /**
11
19
  * AudioProcessor - Dedicated encoding thread for Opus encoding and dispatch.
12
20
  *
@@ -31,20 +39,20 @@ class AudioProcessor(private val config: AudioConfig) {
31
39
  private var opusEncoder: OpusEncoder? = null
32
40
  private val pendingSamples = mutableListOf<Short>()
33
41
  private val samplesPerFrame: Int = (config.sampleRate * config.frameSize / 1000.0).toInt()
42
+ private val framesPerPacket: Int = Math.max(1, config.framesPerCallback)
43
+ private var packetFrames = mutableListOf<EncodedFrame>() // independent Opus packets with per-frame level
34
44
  private var sequenceNumber: Int = 0
35
45
  private var startTime: Double = 0.0
36
46
 
37
- // Audio level: accumulate RMS over ~360ms window
38
- private var levelSumSquares: Double = 0.0
39
- private var levelSampleCount: Int = 0
40
- private val levelUpdateSamples: Int = config.sampleRate * config.channels * config.audioLevelWindow / 1000
41
- private var currentLevel: Float = 0.0f
47
+ // Whether to compute per-frame audio level
48
+ private val enableAudioLevel: Boolean = config.enableAudioLevel
42
49
 
43
50
  // Debug file output
44
51
  private var pcmFileOutputStream: FileOutputStream? = null
45
52
 
46
53
  // Event callbacks (all invoked on encoding thread)
47
- private var onAudioChunk: ((ByteArray, Double, Int, Float) -> Unit)? = null
54
+ // onAudioChunk: (frames, timestamp, sequenceNumber, duration, frameCount)
55
+ private var onAudioChunk: ((List<EncodedFrame>, Double, Int, Double, Int) -> Unit)? = null
48
56
  private var onStarted: ((timestamp: Double, sampleRate: Int, channels: Int, bitrate: Int, frameSize: Double, preSkip: Int) -> Unit)? = null
49
57
  private var onEnd: ((timestamp: Double, totalDuration: Double, totalPackets: Int) -> Unit)? = null
50
58
 
@@ -138,7 +146,7 @@ class AudioProcessor(private val config: AudioConfig) {
138
146
 
139
147
  // MARK: - Event callback setters
140
148
 
141
- fun setOnAudioChunk(callback: (ByteArray, Double, Int, Float) -> Unit) {
149
+ fun setOnAudioChunk(callback: (List<EncodedFrame>, Double, Int, Double, Int) -> Unit) {
142
150
  this.onAudioChunk = callback
143
151
  }
144
152
 
@@ -179,6 +187,7 @@ class AudioProcessor(private val config: AudioConfig) {
179
187
  fos.write(bytes)
180
188
  }
181
189
 
190
+ // Encode single frame to Opus
182
191
  val opusData = try {
183
192
  encoder.encode(frameData, samplesPerFrame)
184
193
  } catch (e: Exception) {
@@ -191,37 +200,47 @@ class AudioProcessor(private val config: AudioConfig) {
191
200
  continue
192
201
  }
193
202
 
194
- // Accumulate energy for RMS over ~360ms window
195
- for (sample in frameData) {
196
- val s = sample.toDouble() / 32768.0
197
- levelSumSquares += s * s
198
- }
199
- levelSampleCount += frameData.size
200
-
201
- if (levelSampleCount >= levelUpdateSamples) {
202
- val rms = Math.sqrt(levelSumSquares / levelSampleCount)
203
+ // Per-frame audio level (RMS dBFS → 0~1)
204
+ var frameLevel: Float? = null
205
+ if (enableAudioLevel) {
206
+ var sumSquares = 0.0
207
+ for (sample in frameData) {
208
+ val s = sample.toDouble() / 32768.0
209
+ sumSquares += s * s
210
+ }
211
+ val rms = Math.sqrt(sumSquares / frameData.size)
203
212
  val dB = 20.0 * Math.log10(Math.max(rms, 1e-10))
204
213
  val dbFloor = -35.0
205
214
  val dbCeiling = -6.0
206
- currentLevel = Math.max(0.0, Math.min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))).toFloat()
207
- levelSumSquares = 0.0
208
- levelSampleCount = 0
215
+ frameLevel = Math.max(0.0, Math.min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))).toFloat()
209
216
  }
210
217
 
211
- val timestampMs = System.currentTimeMillis().toDouble()
212
- onAudioChunk?.invoke(opusData, timestampMs, sequenceNumber, currentLevel)
213
- sequenceNumber++
218
+ // Accumulate encoded frame as independent packet (no byte concatenation)
219
+ packetFrames.add(EncodedFrame(data = opusData, audioLevel = frameLevel))
220
+
221
+ // Emit when we have enough frames (framesPerCallback)
222
+ if (packetFrames.size >= framesPerPacket) {
223
+ val timestampMs = System.currentTimeMillis().toDouble()
224
+ val frameCount = packetFrames.size
225
+ val duration = frameCount * config.frameSize
226
+ onAudioChunk?.invoke(packetFrames.toList(), timestampMs, sequenceNumber, duration, frameCount)
227
+ sequenceNumber++
228
+ packetFrames.clear()
229
+ }
214
230
  }
215
231
  }
216
232
 
217
233
  private fun _flushRemainingFrames() {
218
234
  val encoder = opusEncoder ?: return
219
- if (pendingSamples.isEmpty()) return
220
235
 
221
- while (pendingSamples.size < samplesPerFrame) {
222
- pendingSamples.add(0)
236
+ // Pad remaining PCM with silence to fill the last frame
237
+ if (pendingSamples.isNotEmpty() && pendingSamples.size < samplesPerFrame) {
238
+ while (pendingSamples.size < samplesPerFrame) {
239
+ pendingSamples.add(0)
240
+ }
223
241
  }
224
242
 
243
+ // Encode remaining frames
225
244
  while (pendingSamples.size >= samplesPerFrame) {
226
245
  val frameData = ShortArray(samplesPerFrame)
227
246
  for (i in 0 until samplesPerFrame) {
@@ -235,10 +254,18 @@ class AudioProcessor(private val config: AudioConfig) {
235
254
  }
236
255
 
237
256
  if (opusData == null || opusData.isEmpty()) continue
257
+ // Flush frames get level 0 (silence-padded)
258
+ packetFrames.add(EncodedFrame(data = opusData, audioLevel = if (enableAudioLevel) 0.0f else null))
259
+ }
238
260
 
261
+ // Flush any remaining frames (even if less than framesPerPacket)
262
+ if (packetFrames.isNotEmpty()) {
239
263
  val timestampMs = System.currentTimeMillis().toDouble()
240
- onAudioChunk?.invoke(opusData, timestampMs, sequenceNumber, currentLevel)
264
+ val frameCount = packetFrames.size
265
+ val duration = frameCount * config.frameSize
266
+ onAudioChunk?.invoke(packetFrames.toList(), timestampMs, sequenceNumber, duration, frameCount)
241
267
  sequenceNumber++
268
+ packetFrames.clear()
242
269
  }
243
270
  }
244
271
  }
@@ -37,7 +37,7 @@ class AudioRecordManager(
37
37
  private var loggedFirstBuffer = false
38
38
 
39
39
  // Event callbacks
40
- private var onAudioChunk: ((ByteArray, Double, Int, Float) -> Unit)? = null
40
+ private var onAudioChunk: ((List<EncodedFrame>, Double, Int, Double, Int) -> Unit)? = null
41
41
  private var onStarted: ((timestamp: Double, sampleRate: Int, channels: Int, bitrate: Int, frameSize: Double, preSkip: Int) -> Unit)? = null
42
42
  private var onEnd: ((timestamp: Double, totalDuration: Double, totalPackets: Int) -> Unit)? = null
43
43
  private var onAmplitude: ((Float, Float, Double) -> Unit)? = null
@@ -91,8 +91,8 @@ class AudioRecordManager(
91
91
 
92
92
  // Create and start AudioProcessor (encoding thread)
93
93
  val proc = AudioProcessor(config)
94
- proc.setOnAudioChunk { data, timestamp, seq, level ->
95
- onAudioChunk?.invoke(data, timestamp, seq, level)
94
+ proc.setOnAudioChunk { frames, timestamp, seq, duration, frameCount ->
95
+ onAudioChunk?.invoke(frames, timestamp, seq, duration, frameCount)
96
96
  }
97
97
  proc.setOnStarted { timestamp, sampleRate, channels, bitrate, frameSize, preSkip ->
98
98
  onStarted?.invoke(timestamp, sampleRate, channels, bitrate, frameSize, preSkip)
@@ -172,7 +172,7 @@ class AudioRecordManager(
172
172
  }
173
173
 
174
174
  // Event handlers
175
- fun setOnAudioChunk(callback: (ByteArray, Double, Int, Float) -> Unit) {
175
+ fun setOnAudioChunk(callback: (List<EncodedFrame>, Double, Int, Double, Int) -> Unit) {
176
176
  this.onAudioChunk = callback
177
177
  }
178
178
 
@@ -81,12 +81,19 @@ class OpuslibModule : Module() {
81
81
 
82
82
  // Set up event callbacks — audioStarted/audioEnd come from encoding thread
83
83
  android.util.Log.d(TAG, "🔗 Setting up event callbacks...")
84
- manager.setOnAudioChunk { data, timestamp, sequenceNumber, audioLevel ->
84
+ manager.setOnAudioChunk { frames, timestamp, sequenceNumber, duration, frameCount ->
85
+ // Each frame is an independent Opus packet wrapped in { data, audioLevel? }
86
+ val frameObjects = frames.map { frame ->
87
+ val obj = mutableMapOf<String, Any>("data" to frame.data)
88
+ frame.audioLevel?.let { obj["audioLevel"] = it }
89
+ obj
90
+ }
85
91
  sendEvent("audioChunk", mapOf(
86
- "data" to data,
92
+ "frames" to frameObjects,
87
93
  "timestamp" to timestamp,
88
94
  "sequenceNumber" to sequenceNumber,
89
- "audioLevel" to audioLevel
95
+ "duration" to duration,
96
+ "frameCount" to frameCount
90
97
  ))
91
98
  }
92
99
 
@@ -191,7 +198,7 @@ class AudioConfig : Record {
191
198
  var frameSize: Double = 20.0
192
199
 
193
200
  @Field
194
- var packetDuration: Double = 20.0
201
+ var framesPerCallback: Int = 1
195
202
 
196
203
  @Field
197
204
  var dredDuration: Int = 100 // NEW: DRED recovery duration in ms
@@ -203,7 +210,7 @@ class AudioConfig : Record {
203
210
  var amplitudeEventInterval: Double = 16.0
204
211
 
205
212
  @Field
206
- var audioLevelWindow: Int = 360 // RMS window duration in ms (default 360)
213
+ var enableAudioLevel: Boolean = false // Enable per-frame audio level calculation
207
214
 
208
215
  @Field
209
216
  var saveDebugAudio: Boolean = false
@@ -10,31 +10,42 @@ export interface AudioConfig {
10
10
  bitrate: number;
11
11
  /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */
12
12
  frameSize: number;
13
- /** Packet duration in milliseconds (typically 20-100ms) */
14
- packetDuration: number;
13
+ /** Number of Opus frames per callback (default 1). Multiple frames are returned as independent packets in frames[], reducing JS bridge calls */
14
+ framesPerCallback?: number;
15
15
  /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */
16
16
  dredDuration?: number;
17
17
  /** Enable amplitude events for waveform visualization */
18
18
  enableAmplitudeEvents?: boolean;
19
19
  /** Amplitude event interval in milliseconds (default 16) */
20
20
  amplitudeEventInterval?: number;
21
- /** Audio level RMS window duration in milliseconds (default 360) */
22
- audioLevelWindow?: number;
21
+ /** Enable per-frame audio level calculation (default false). When enabled, each OpusFrame includes audioLevel */
22
+ enableAudioLevel?: boolean;
23
23
  /** Save debug PCM audio to file (development only) */
24
24
  saveDebugAudio?: boolean;
25
25
  }
26
+ /**
27
+ * A single Opus frame — one complete opus_encode() output with its own TOC byte
28
+ */
29
+ export interface OpusFrame {
30
+ /** Opus-encoded packet data (independent, decodable) */
31
+ data: ArrayBuffer;
32
+ /** Per-frame audio level 0.0~1.0 (only present when enableAudioLevel is true) */
33
+ audioLevel?: number;
34
+ }
26
35
  /**
27
36
  * Audio chunk event payload (Opus-encoded data)
28
37
  */
29
38
  export interface AudioChunkEvent {
30
- /** Opus-encoded audio data as ArrayBuffer */
31
- data: ArrayBuffer;
39
+ /** Array of independent Opus frames. Each frame is a complete opus_encode() output, decodable on its own */
40
+ frames: OpusFrame[];
32
41
  /** Timestamp in milliseconds */
33
42
  timestamp: number;
34
- /** Sequence number (increments with each packet) */
43
+ /** Sequence number (increments with each callback) */
35
44
  sequenceNumber: number;
36
- /** Audio level normalized to 0.0~1.0 (mapped from dBFS, 0 = silence, 1 = loud) */
37
- audioLevel: number;
45
+ /** Duration of all frames in milliseconds (frameSize * frameCount) */
46
+ duration: number;
47
+ /** Number of Opus frames in this callback (= frames.length) */
48
+ frameCount: number;
38
49
  }
39
50
  /**
40
51
  * Amplitude event payload (for waveform visualization)
@@ -1 +1 @@
1
- {"version":3,"file":"Opuslib.types.d.ts","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B,2DAA2D;IAC3D,UAAU,EAAE,MAAM,CAAA;IAClB,gDAAgD;IAChD,QAAQ,EAAE,MAAM,CAAA;IAChB,6DAA6D;IAC7D,OAAO,EAAE,MAAM,CAAA;IACf,8DAA8D;IAC9D,SAAS,EAAE,MAAM,CAAA;IACjB,2DAA2D;IAC3D,cAAc,EAAE,MAAM,CAAA;IACtB,oFAAoF;IACpF,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,qBAAqB,CAAC,EAAE,OAAO,CAAA;IAC/B,4DAA4D;IAC5D,sBAAsB,CAAC,EAAE,MAAM,CAAA;IAC/B,oEAAoE;IACpE,gBAAgB,CAAC,EAAE,MAAM,CAAA;IACzB,sDAAsD;IACtD,cAAc,CAAC,EAAE,OAAO,CAAA;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,6CAA6C;IAC7C,IAAI,EAAE,WAAW,CAAA;IACjB,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;IACjB,oDAAoD;IACpD,cAAc,EAAE,MAAM,CAAA;IACtB,kFAAkF;IAClF,UAAU,EAAE,MAAM,CAAA;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,6CAA6C;IAC7C,GAAG,EAAE,MAAM,CAAA;IACX,iCAAiC;IACjC,IAAI,EAAE,MAAM,CAAA;IACZ,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;CAClB;AAED;;;GAGG;AACH,MAAM,WAAW,iBAAiB;IAChC,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,oCAAoC;IACpC,UAAU,EAAE,MAAM,CAAA;IAClB,yBAAyB;IACzB,QAAQ,EAAE,MAAM,CAAA;IAChB,wCAAwC;IACxC,OAAO,EAAE,MAAM,CAAA;IACf,qCAAqC;IACrC,SAAS,EAAE,MAAM,CAAA;IACjB,yFAAyF;IACzF,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;;GAGG;AACH,MAAM,WAAW,aAAa;IAC5B,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,8DAA8D;IAC9D,aAAa,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,YAAY,EAAE,MAAM,CAAA;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAA;IACZ,oBAAoB;IACpB,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,MAAM,EAAE,MAAM,IAAI,CAAA;CACnB"}
1
+ {"version":3,"file":"Opuslib.types.d.ts","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B,2DAA2D;IAC3D,UAAU,EAAE,MAAM,CAAA;IAClB,gDAAgD;IAChD,QAAQ,EAAE,MAAM,CAAA;IAChB,6DAA6D;IAC7D,OAAO,EAAE,MAAM,CAAA;IACf,8DAA8D;IAC9D,SAAS,EAAE,MAAM,CAAA;IACjB,gJAAgJ;IAChJ,iBAAiB,CAAC,EAAE,MAAM,CAAA;IAC1B,oFAAoF;IACpF,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,qBAAqB,CAAC,EAAE,OAAO,CAAA;IAC/B,4DAA4D;IAC5D,sBAAsB,CAAC,EAAE,MAAM,CAAA;IAC/B,iHAAiH;IACjH,gBAAgB,CAAC,EAAE,OAAO,CAAA;IAC1B,sDAAsD;IACtD,cAAc,CAAC,EAAE,OAAO,CAAA;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,SAAS;IACxB,wDAAwD;IACxD,IAAI,EAAE,WAAW,CAAA;IACjB,iFAAiF;IACjF,UAAU,CAAC,EAAE,MAAM,CAAA;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,4GAA4G;IAC5G,MAAM,EAAE,SAAS,EAAE,CAAA;IACnB,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;IACjB,sDAAsD;IACtD,cAAc,EAAE,MAAM,CAAA;IACtB,sEAAsE;IACtE,QAAQ,EAAE,MAAM,CAAA;IAChB,+DAA+D;IAC/D,UAAU,EAAE,MAAM,CAAA;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,6CAA6C;IAC7C,GAAG,EAAE,MAAM,CAAA;IACX,iCAAiC;IACjC,IAAI,EAAE,MAAM,CAAA;IACZ,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;CAClB;AAED;;;GAGG;AACH,MAAM,WAAW,iBAAiB;IAChC,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,oCAAoC;IACpC,UAAU,EAAE,MAAM,CAAA;IAClB,yBAAyB;IACzB,QAAQ,EAAE,MAAM,CAAA;IAChB,wCAAwC;IACxC,OAAO,EAAE,MAAM,CAAA;IACf,qCAAqC;IACrC,SAAS,EAAE,MAAM,CAAA;IACjB,yFAAyF;IACzF,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;;GAGG;AACH,MAAM,WAAW,aAAa;IAC5B,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,8DAA8D;IAC9D,aAAa,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,YAAY,EAAE,MAAM,CAAA;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAA;IACZ,oBAAoB;IACpB,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,MAAM,EAAE,MAAM,IAAI,CAAA;CACnB"}
@@ -1 +1 @@
1
- {"version":3,"file":"Opuslib.types.js","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"","sourcesContent":["/**\n * Audio configuration for Opus encoding\n */\nexport interface AudioConfig {\n /** Sample rate in Hz (8000, 12000, 16000, 24000, 48000) */\n sampleRate: number\n /** Number of channels (1 = mono, 2 = stereo) */\n channels: number\n /** Target bitrate in bits/second (e.g., 24000 for 24kbps) */\n bitrate: number\n /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */\n frameSize: number\n /** Packet duration in milliseconds (typically 20-100ms) */\n packetDuration: number\n /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */\n dredDuration?: number\n /** Enable amplitude events for waveform visualization */\n enableAmplitudeEvents?: boolean\n /** Amplitude event interval in milliseconds (default 16) */\n amplitudeEventInterval?: number\n /** Audio level RMS window duration in milliseconds (default 360) */\n audioLevelWindow?: number\n /** Save debug PCM audio to file (development only) */\n saveDebugAudio?: boolean\n}\n\n/**\n * Audio chunk event payload (Opus-encoded data)\n */\nexport interface AudioChunkEvent {\n /** Opus-encoded audio data as ArrayBuffer */\n data: ArrayBuffer\n /** Timestamp in milliseconds */\n timestamp: number\n /** Sequence number (increments with each packet) */\n sequenceNumber: number\n /** Audio level normalized to 0.0~1.0 (mapped from dBFS, 0 = silence, 1 = loud) */\n audioLevel: number\n}\n\n/**\n * Amplitude event payload (for waveform visualization)\n */\nexport interface AmplitudeEvent {\n /** Root mean square amplitude (0.0 - 1.0) */\n rms: number\n /** Peak amplitude (0.0 - 1.0) */\n peak: number\n /** Timestamp in milliseconds */\n timestamp: number\n}\n\n/**\n * Audio started event payload\n * Emitted when audio streaming successfully starts\n */\nexport interface AudioStartedEvent {\n /** Timestamp in milliseconds when streaming started */\n timestamp: number\n /** Actual sample rate being used */\n sampleRate: number\n /** Number of channels */\n channels: number\n /** Configured bitrate in bits/second */\n bitrate: number\n /** Frame duration in milliseconds */\n frameSize: number\n /** Opus encoder lookahead in samples (decoder should skip this many samples at start) */\n preSkip: number\n}\n\n/**\n * Audio end event payload\n * Emitted when audio streaming stops\n */\nexport interface AudioEndEvent {\n /** Timestamp in milliseconds when streaming stopped */\n timestamp: number\n /** Total duration of the streaming session in milliseconds */\n totalDuration: number\n /** Total number of packets encoded during the session */\n totalPackets: number\n}\n\n/**\n * Error event payload\n */\nexport interface ErrorEvent {\n /** Error code */\n code: string\n /** Error message */\n message: string\n}\n\n/**\n * Event subscription\n */\nexport interface Subscription {\n remove: () => void\n}\n"]}
1
+ {"version":3,"file":"Opuslib.types.js","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"","sourcesContent":["/**\n * Audio configuration for Opus encoding\n */\nexport interface AudioConfig {\n /** Sample rate in Hz (8000, 12000, 16000, 24000, 48000) */\n sampleRate: number\n /** Number of channels (1 = mono, 2 = stereo) */\n channels: number\n /** Target bitrate in bits/second (e.g., 24000 for 24kbps) */\n bitrate: number\n /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */\n frameSize: number\n /** Number of Opus frames per callback (default 1). Multiple frames are returned as independent packets in frames[], reducing JS bridge calls */\n framesPerCallback?: number\n /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */\n dredDuration?: number\n /** Enable amplitude events for waveform visualization */\n enableAmplitudeEvents?: boolean\n /** Amplitude event interval in milliseconds (default 16) */\n amplitudeEventInterval?: number\n /** Enable per-frame audio level calculation (default false). When enabled, each OpusFrame includes audioLevel */\n enableAudioLevel?: boolean\n /** Save debug PCM audio to file (development only) */\n saveDebugAudio?: boolean\n}\n\n/**\n * A single Opus frame — one complete opus_encode() output with its own TOC byte\n */\nexport interface OpusFrame {\n /** Opus-encoded packet data (independent, decodable) */\n data: ArrayBuffer\n /** Per-frame audio level 0.0~1.0 (only present when enableAudioLevel is true) */\n audioLevel?: number\n}\n\n/**\n * Audio chunk event payload (Opus-encoded data)\n */\nexport interface AudioChunkEvent {\n /** Array of independent Opus frames. Each frame is a complete opus_encode() output, decodable on its own */\n frames: OpusFrame[]\n /** Timestamp in milliseconds */\n timestamp: number\n /** Sequence number (increments with each callback) */\n sequenceNumber: number\n /** Duration of all frames in milliseconds (frameSize * frameCount) */\n duration: number\n /** Number of Opus frames in this callback (= frames.length) */\n frameCount: number\n}\n\n/**\n * Amplitude event payload (for waveform visualization)\n */\nexport interface AmplitudeEvent {\n /** Root mean square amplitude (0.0 - 1.0) */\n rms: number\n /** Peak amplitude (0.0 - 1.0) */\n peak: number\n /** Timestamp in milliseconds */\n timestamp: number\n}\n\n/**\n * Audio started event payload\n * Emitted when audio streaming successfully starts\n */\nexport interface AudioStartedEvent {\n /** Timestamp in milliseconds when streaming started */\n timestamp: number\n /** Actual sample rate being used */\n sampleRate: number\n /** Number of channels */\n channels: number\n /** Configured bitrate in bits/second */\n bitrate: number\n /** Frame duration in milliseconds */\n frameSize: number\n /** Opus encoder lookahead in samples (decoder should skip this many samples at start) */\n preSkip: number\n}\n\n/**\n * Audio end event payload\n * Emitted when audio streaming stops\n */\nexport interface AudioEndEvent {\n /** Timestamp in milliseconds when streaming stopped */\n timestamp: number\n /** Total duration of the streaming session in milliseconds */\n totalDuration: number\n /** Total number of packets encoded during the session */\n totalPackets: number\n}\n\n/**\n * Error event payload\n */\nexport interface ErrorEvent {\n /** Error code */\n code: string\n /** Error message */\n message: string\n}\n\n/**\n * Event subscription\n */\nexport interface Subscription {\n remove: () => void\n}\n"]}
@@ -17,7 +17,7 @@ declare const _default: {
17
17
  * channels: 1,
18
18
  * bitrate: 24000,
19
19
  * frameSize: 20,
20
- * packetDuration: 20,
20
+ * framesPerCallback: 5, // batch 5 independent Opus packets per event
21
21
  * dredDuration: 100, // Enable 100ms DRED recovery
22
22
  * })
23
23
  * ```
@@ -45,8 +45,10 @@ declare const _default: {
45
45
  * ```ts
46
46
  * // Listen for audio chunks
47
47
  * const subscription = Opuslib.addListener('audioChunk', (event) => {
48
- * console.log('Received Opus packet:', event.data.byteLength, 'bytes')
49
- * websocket.send(event.data)
48
+ * for (const frame of event.frames) {
49
+ * console.log('Opus packet:', frame.data.byteLength, 'bytes')
50
+ * websocket.send(frame.data)
51
+ * }
50
52
  * })
51
53
  *
52
54
  * // Listen for errors
@@ -1 +1 @@
1
- {"version":3,"file":"OpuslibModule.d.ts","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,WAAW,EAAE,eAAe,EAAE,cAAc,EAAE,iBAAiB,EAAE,aAAa,EAAE,UAAU,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAA;AAoC/I;;;;;GAKG;;IAED;;;;;;;;;;;;;;;OAeG;6BACsB,WAAW;IAEpC;;OAEG;;IAGH;;OAEG;;IAGH;;OAEG;;IAGH;;;;;;;;;;;;;;;;;;;;;;;OAuBG;iBAIsE;QACvE,CAAC,SAAS,EAAE,YAAY,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,eAAe,KAAK,IAAI,GAAG,YAAY,CAAA;QACnF,CAAC,SAAS,EAAE,WAAW,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,GAAG,YAAY,CAAA;QACjF,CAAC,SAAS,EAAE,cAAc,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,iBAAiB,KAAK,IAAI,GAAG,YAAY,CAAA;QACvF,CAAC,SAAS,EAAE,UAAU,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,aAAa,KAAK,IAAI,GAAG,YAAY,CAAA;QAC/E,CAAC,SAAS,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,GAAG,YAAY,CAAA;KAC1E;IAED;;;;;OAKG;qCAES,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,KACxC,YAAY;IAEf;;;;;OAKG;iCAES,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,KACpC,YAAY;;AAvFjB,wBAwFC"}
1
+ {"version":3,"file":"OpuslibModule.d.ts","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,WAAW,EAAE,eAAe,EAAE,cAAc,EAAE,iBAAiB,EAAE,aAAa,EAAE,UAAU,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAA;AAoC/I;;;;;GAKG;;IAED;;;;;;;;;;;;;;;OAeG;6BACsB,WAAW;IAEpC;;OAEG;;IAGH;;OAEG;;IAGH;;OAEG;;IAGH;;;;;;;;;;;;;;;;;;;;;;;;;OAyBG;iBAIsE;QACvE,CAAC,SAAS,EAAE,YAAY,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,eAAe,KAAK,IAAI,GAAG,YAAY,CAAA;QACnF,CAAC,SAAS,EAAE,WAAW,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,GAAG,YAAY,CAAA;QACjF,CAAC,SAAS,EAAE,cAAc,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,iBAAiB,KAAK,IAAI,GAAG,YAAY,CAAA;QACvF,CAAC,SAAS,EAAE,UAAU,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,aAAa,KAAK,IAAI,GAAG,YAAY,CAAA;QAC/E,CAAC,SAAS,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,GAAG,YAAY,CAAA;KAC1E;IAED;;;;;OAKG;qCAES,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,KACxC,YAAY;IAEf;;;;;OAKG;iCAES,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,KACpC,YAAY;;AAzFjB,wBA0FC"}
@@ -21,7 +21,7 @@ export default {
21
21
  * channels: 1,
22
22
  * bitrate: 24000,
23
23
  * frameSize: 20,
24
- * packetDuration: 20,
24
+ * framesPerCallback: 5, // batch 5 independent Opus packets per event
25
25
  * dredDuration: 100, // Enable 100ms DRED recovery
26
26
  * })
27
27
  * ```
@@ -49,8 +49,10 @@ export default {
49
49
  * ```ts
50
50
  * // Listen for audio chunks
51
51
  * const subscription = Opuslib.addListener('audioChunk', (event) => {
52
- * console.log('Received Opus packet:', event.data.byteLength, 'bytes')
53
- * websocket.send(event.data)
52
+ * for (const frame of event.frames) {
53
+ * console.log('Opus packet:', frame.data.byteLength, 'bytes')
54
+ * websocket.send(frame.data)
55
+ * }
54
56
  * })
55
57
  *
56
58
  * // Listen for errors
@@ -1 +1 @@
1
- {"version":3,"file":"OpuslibModule.js","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,mBAAmB,EAAE,YAAY,EAAE,MAAM,MAAM,CAAA;AA+BtE,kCAAkC;AAClC,MAAM,aAAa,GAAG,mBAAmB,CAAoB,SAAS,CAAC,CAAA;AAEvE,+CAA+C;AAC/C,MAAM,OAAO,GAAG,IAAI,YAAY,CAAC,aAAoB,CAAC,CAAA;AAEtD;;;;;GAKG;AACH,eAAe;IACb;;;;;;;;;;;;;;;OAeG;IACH,cAAc,EAAE,CAAC,MAAmB,EAAE,EAAE,CAAC,aAAa,CAAC,cAAc,CAAC,MAAM,CAAC;IAE7E;;OAEG;IACH,aAAa,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,aAAa,EAAE;IAElD;;OAEG;IACH,cAAc,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,cAAc,EAAE;IAEpD;;OAEG;IACH,eAAe,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,eAAe,EAAE;IAEtD;;;;;;;;;;;;;;;;;;;;;;;OAuBG;IACH,WAAW,EAAE,CAAC,CACZ,SAA6E,EAC7E,QAA4G,EAC9F,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,CAMnE;IAED;;;;;OAKG;IACH,oBAAoB,EAAE,CACpB,QAAyC,EAC3B,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,WAAW,EAAE,QAAQ,CAAC;IAEtE;;;;;OAKG;IACH,gBAAgB,EAAE,CAChB,QAAqC,EACvB,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,OAAO,EAAE,QAAQ,CAAC;CACnE,CAAA","sourcesContent":["import { NativeModule, requireNativeModule, EventEmitter } from 'expo'\nimport type { AudioConfig, AudioChunkEvent, AmplitudeEvent, AudioStartedEvent, AudioEndEvent, ErrorEvent, Subscription } from './Opuslib.types'\n\n/**\n * Opuslib Native Module Interface\n *\n * Provides native audio capture and Opus 1.6 encoding with DRED support\n */\ndeclare class OpuslibModuleType extends NativeModule {\n /**\n * Start audio streaming with Opus encoding\n * @param config Audio configuration\n */\n startStreaming(config: AudioConfig): Promise<void>\n\n /**\n * Stop audio streaming\n */\n stopStreaming(): Promise<void>\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming(): void\n\n /**\n * Resume audio streaming\n */\n resumeStreaming(): void\n}\n\n// Load the native module from JSI\nconst OpuslibModule = requireNativeModule<OpuslibModuleType>('Opuslib')\n\n// Create event emitter for listening to events\nconst emitter = new EventEmitter(OpuslibModule as any)\n\n/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * This module provides real-time audio capture and Opus 1.6 encoding\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n */\nexport default {\n /**\n * Start audio streaming with Opus encoding\n *\n * @param config Audio configuration\n * @example\n * ```ts\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * packetDuration: 20,\n * dredDuration: 100, // Enable 100ms DRED recovery\n * })\n * ```\n */\n startStreaming: (config: AudioConfig) => OpuslibModule.startStreaming(config),\n\n /**\n * Stop audio streaming and release resources\n */\n stopStreaming: () => OpuslibModule.stopStreaming(),\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming: () => OpuslibModule.pauseStreaming(),\n\n /**\n * Resume audio streaming\n */\n resumeStreaming: () => OpuslibModule.resumeStreaming(),\n\n /**\n * Listen for events (audioChunk, amplitude, or error)\n *\n * @param eventName Event type to listen for\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n * @example\n * ```ts\n * // Listen for audio chunks\n * const subscription = Opuslib.addListener('audioChunk', (event) => {\n * console.log('Received Opus packet:', event.data.byteLength, 'bytes')\n * websocket.send(event.data)\n * })\n *\n * // Listen for errors\n * const errorSub = Opuslib.addListener('error', (event) => {\n * console.error('Error:', event.message)\n * })\n *\n * // Later: unsubscribe\n * subscription.remove()\n * errorSub.remove()\n * ```\n */\n addListener: ((\n eventName: 'audioChunk' | 'amplitude' | 'audioStarted' | 'audioEnd' | 'error',\n listener: (event: AudioChunkEvent | AmplitudeEvent | AudioStartedEvent | AudioEndEvent | ErrorEvent) => void\n ): Subscription => (emitter as any).addListener(eventName, listener)) as {\n (eventName: 'audioChunk', listener: (event: AudioChunkEvent) => void): Subscription\n (eventName: 'amplitude', listener: (event: AmplitudeEvent) => void): Subscription\n (eventName: 'audioStarted', listener: (event: AudioStartedEvent) => void): Subscription\n (eventName: 'audioEnd', listener: (event: AudioEndEvent) => void): Subscription\n (eventName: 'error', listener: (event: ErrorEvent) => void): Subscription\n },\n\n /**\n * Listen for amplitude events (for waveform visualization)\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addAmplitudeListener: (\n listener: (event: AmplitudeEvent) => void\n ): Subscription => (emitter as any).addListener('amplitude', listener),\n\n /**\n * Listen for error events\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addErrorListener: (\n listener: (event: ErrorEvent) => void\n ): Subscription => (emitter as any).addListener('error', listener),\n}\n"]}
1
+ {"version":3,"file":"OpuslibModule.js","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,mBAAmB,EAAE,YAAY,EAAE,MAAM,MAAM,CAAA;AA+BtE,kCAAkC;AAClC,MAAM,aAAa,GAAG,mBAAmB,CAAoB,SAAS,CAAC,CAAA;AAEvE,+CAA+C;AAC/C,MAAM,OAAO,GAAG,IAAI,YAAY,CAAC,aAAoB,CAAC,CAAA;AAEtD;;;;;GAKG;AACH,eAAe;IACb;;;;;;;;;;;;;;;OAeG;IACH,cAAc,EAAE,CAAC,MAAmB,EAAE,EAAE,CAAC,aAAa,CAAC,cAAc,CAAC,MAAM,CAAC;IAE7E;;OAEG;IACH,aAAa,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,aAAa,EAAE;IAElD;;OAEG;IACH,cAAc,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,cAAc,EAAE;IAEpD;;OAEG;IACH,eAAe,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,eAAe,EAAE;IAEtD;;;;;;;;;;;;;;;;;;;;;;;;;OAyBG;IACH,WAAW,EAAE,CAAC,CACZ,SAA6E,EAC7E,QAA4G,EAC9F,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,CAMnE;IAED;;;;;OAKG;IACH,oBAAoB,EAAE,CACpB,QAAyC,EAC3B,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,WAAW,EAAE,QAAQ,CAAC;IAEtE;;;;;OAKG;IACH,gBAAgB,EAAE,CAChB,QAAqC,EACvB,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,OAAO,EAAE,QAAQ,CAAC;CACnE,CAAA","sourcesContent":["import { NativeModule, requireNativeModule, EventEmitter } from 'expo'\nimport type { AudioConfig, AudioChunkEvent, AmplitudeEvent, AudioStartedEvent, AudioEndEvent, ErrorEvent, Subscription } from './Opuslib.types'\n\n/**\n * Opuslib Native Module Interface\n *\n * Provides native audio capture and Opus 1.6 encoding with DRED support\n */\ndeclare class OpuslibModuleType extends NativeModule {\n /**\n * Start audio streaming with Opus encoding\n * @param config Audio configuration\n */\n startStreaming(config: AudioConfig): Promise<void>\n\n /**\n * Stop audio streaming\n */\n stopStreaming(): Promise<void>\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming(): void\n\n /**\n * Resume audio streaming\n */\n resumeStreaming(): void\n}\n\n// Load the native module from JSI\nconst OpuslibModule = requireNativeModule<OpuslibModuleType>('Opuslib')\n\n// Create event emitter for listening to events\nconst emitter = new EventEmitter(OpuslibModule as any)\n\n/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * This module provides real-time audio capture and Opus 1.6 encoding\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n */\nexport default {\n /**\n * Start audio streaming with Opus encoding\n *\n * @param config Audio configuration\n * @example\n * ```ts\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * framesPerCallback: 5, // batch 5 independent Opus packets per event\n * dredDuration: 100, // Enable 100ms DRED recovery\n * })\n * ```\n */\n startStreaming: (config: AudioConfig) => OpuslibModule.startStreaming(config),\n\n /**\n * Stop audio streaming and release resources\n */\n stopStreaming: () => OpuslibModule.stopStreaming(),\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming: () => OpuslibModule.pauseStreaming(),\n\n /**\n * Resume audio streaming\n */\n resumeStreaming: () => OpuslibModule.resumeStreaming(),\n\n /**\n * Listen for events (audioChunk, amplitude, or error)\n *\n * @param eventName Event type to listen for\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n * @example\n * ```ts\n * // Listen for audio chunks\n * const subscription = Opuslib.addListener('audioChunk', (event) => {\n * for (const frame of event.frames) {\n * console.log('Opus packet:', frame.data.byteLength, 'bytes')\n * websocket.send(frame.data)\n * }\n * })\n *\n * // Listen for errors\n * const errorSub = Opuslib.addListener('error', (event) => {\n * console.error('Error:', event.message)\n * })\n *\n * // Later: unsubscribe\n * subscription.remove()\n * errorSub.remove()\n * ```\n */\n addListener: ((\n eventName: 'audioChunk' | 'amplitude' | 'audioStarted' | 'audioEnd' | 'error',\n listener: (event: AudioChunkEvent | AmplitudeEvent | AudioStartedEvent | AudioEndEvent | ErrorEvent) => void\n ): Subscription => (emitter as any).addListener(eventName, listener)) as {\n (eventName: 'audioChunk', listener: (event: AudioChunkEvent) => void): Subscription\n (eventName: 'amplitude', listener: (event: AmplitudeEvent) => void): Subscription\n (eventName: 'audioStarted', listener: (event: AudioStartedEvent) => void): Subscription\n (eventName: 'audioEnd', listener: (event: AudioEndEvent) => void): Subscription\n (eventName: 'error', listener: (event: ErrorEvent) => void): Subscription\n },\n\n /**\n * Listen for amplitude events (for waveform visualization)\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addAmplitudeListener: (\n listener: (event: AmplitudeEvent) => void\n ): Subscription => (emitter as any).addListener('amplitude', listener),\n\n /**\n * Listen for error events\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addErrorListener: (\n listener: (event: ErrorEvent) => void\n ): Subscription => (emitter as any).addListener('error', listener),\n}\n"]}
package/build/index.d.ts CHANGED
@@ -14,14 +14,15 @@
14
14
  * channels: 1,
15
15
  * bitrate: 24000,
16
16
  * frameSize: 20,
17
- * packetDuration: 20,
17
+ * framesPerCallback: 5, // batch 5 independent Opus frames per event
18
18
  * dredDuration: 100, // 100ms DRED recovery
19
19
  * })
20
20
  *
21
21
  * // Listen for Opus packets
22
22
  * Opuslib.addListener('audioChunk', (event) => {
23
- * // Send to backend
24
- * websocket.send(event.data)
23
+ * for (const frame of event.frames) {
24
+ * websocket.send(frame.data)
25
+ * }
25
26
  * })
26
27
  *
27
28
  * // Stop streaming
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA"}
package/build/index.js CHANGED
@@ -14,14 +14,15 @@
14
14
  * channels: 1,
15
15
  * bitrate: 24000,
16
16
  * frameSize: 20,
17
- * packetDuration: 20,
17
+ * framesPerCallback: 5, // batch 5 independent Opus frames per event
18
18
  * dredDuration: 100, // 100ms DRED recovery
19
19
  * })
20
20
  *
21
21
  * // Listen for Opus packets
22
22
  * Opuslib.addListener('audioChunk', (event) => {
23
- * // Send to backend
24
- * websocket.send(event.data)
23
+ * for (const frame of event.frames) {
24
+ * websocket.send(frame.data)
25
+ * }
25
26
  * })
26
27
  *
27
28
  * // Stop streaming
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA","sourcesContent":["/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * Native audio capture and Opus 1.6 encoding for React Native/Expo\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n *\n * @example\n * ```ts\n * import Opuslib from 'opuslib'\n *\n * // Start streaming with DRED enabled\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * packetDuration: 20,\n * dredDuration: 100, // 100ms DRED recovery\n * })\n *\n * // Listen for Opus packets\n * Opuslib.addListener('audioChunk', (event) => {\n * // Send to backend\n * websocket.send(event.data)\n * })\n *\n * // Stop streaming\n * await Opuslib.stopStreaming()\n * ```\n */\nexport { default } from './OpuslibModule'\nexport * from './Opuslib.types'\n"]}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA","sourcesContent":["/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * Native audio capture and Opus 1.6 encoding for React Native/Expo\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n *\n * @example\n * ```ts\n * import Opuslib from 'opuslib'\n *\n * // Start streaming with DRED enabled\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * framesPerCallback: 5, // batch 5 independent Opus frames per event\n * dredDuration: 100, // 100ms DRED recovery\n * })\n *\n * // Listen for Opus packets\n * Opuslib.addListener('audioChunk', (event) => {\n * for (const frame of event.frames) {\n * websocket.send(frame.data)\n * }\n * })\n *\n * // Stop streaming\n * await Opuslib.stopStreaming()\n * ```\n */\nexport { default } from './OpuslibModule'\nexport * from './Opuslib.types'\n"]}
@@ -29,7 +29,7 @@ class AudioEngineManager {
29
29
  private var loggedFirstBuffer = false
30
30
 
31
31
  // Event callbacks
32
- private var onAudioChunk: ((Data, Double, Int, Float) -> Void)?
32
+ private var onAudioChunk: (([EncodedFrame], Double, Int, Double, Int) -> Void)?
33
33
  private var onStarted: ((_ timestamp: Double, _ sampleRate: Int, _ channels: Int, _ bitrate: Int, _ frameSize: Double, _ preSkip: Int) -> Void)?
34
34
  private var onEnd: ((_ timestamp: Double, _ totalDuration: Double, _ totalPackets: Int) -> Void)?
35
35
  private var onAmplitude: ((Float, Float, Double) -> Void)?
@@ -59,8 +59,8 @@ class AudioEngineManager {
59
59
 
60
60
  // Create and start AudioProcessor (encoding thread)
61
61
  let proc = AudioProcessor(config: config)
62
- proc.setOnAudioChunk { [weak self] data, timestamp, seq, level in
63
- self?.onAudioChunk?(data, timestamp, seq, level)
62
+ proc.setOnAudioChunk { [weak self] frames, timestamp, seq, duration, frameCount in
63
+ self?.onAudioChunk?(frames, timestamp, seq, duration, frameCount)
64
64
  }
65
65
  proc.setOnStarted { [weak self] timestamp, sampleRate, channels, bitrate, frameSize, preSkip in
66
66
  self?.onStarted?(timestamp, sampleRate, channels, bitrate, frameSize, preSkip)
@@ -159,7 +159,7 @@ class AudioEngineManager {
159
159
 
160
160
  // MARK: - Event Handlers
161
161
 
162
- func setOnAudioChunk(_ callback: @escaping (Data, Double, Int, Float) -> Void) {
162
+ func setOnAudioChunk(_ callback: @escaping ([EncodedFrame], Double, Int, Double, Int) -> Void) {
163
163
  self.onAudioChunk = callback
164
164
  }
165
165
 
@@ -11,6 +11,12 @@ import Foundation
11
11
  * - audioStarted/audioEnd events are emitted from the encoding queue,
12
12
  * so preSkip/sequenceNumber are read without cross-thread risk
13
13
  */
14
+ /// A single encoded Opus frame with optional per-frame audio level
15
+ struct EncodedFrame {
16
+ let data: Data
17
+ let audioLevel: Float? // nil when enableAudioLevel is false
18
+ }
19
+
14
20
  class AudioProcessor {
15
21
  // Dedicated serial queue — equivalent to boost::asio::io_context + thread / HandlerThread
16
22
  private let queue = DispatchQueue(label: "com.opuslib.encoding", qos: .userInitiated)
@@ -19,20 +25,20 @@ class AudioProcessor {
19
25
  private var opusEncoder: OpusEncoder?
20
26
  private var pendingSamples: [Int16] = []
21
27
  private let samplesPerFrame: Int
28
+ private let framesPerPacket: Int // how many frames to batch before emitting
29
+ private var packetFrames: [EncodedFrame] = [] // independent Opus packets with per-frame level
22
30
  private var sequenceNumber: Int = 0
23
31
  private var startTime: Double = 0
24
32
 
25
- // Audio level: accumulate RMS over ~360ms window
26
- private var levelSumSquares: Double = 0.0
27
- private var levelSampleCount: Int = 0
28
- private let levelUpdateSamples: Int
29
- private var currentLevel: Float = 0.0
33
+ // Whether to compute per-frame audio level
34
+ private let enableAudioLevel: Bool
30
35
 
31
36
  // Debug file
32
37
  private var pcmFileHandle: FileHandle?
33
38
 
34
39
  // Event callbacks (all invoked on encoding queue)
35
- private var onAudioChunk: ((Data, Double, Int, Float) -> Void)?
40
+ // onAudioChunk: (frames, timestamp, sequenceNumber, duration, frameCount)
41
+ private var onAudioChunk: (([EncodedFrame], Double, Int, Double, Int) -> Void)?
36
42
  private var onStarted: ((_ timestamp: Double, _ sampleRate: Int, _ channels: Int, _ bitrate: Int, _ frameSize: Double, _ preSkip: Int) -> Void)?
37
43
  private var onEnd: ((_ timestamp: Double, _ totalDuration: Double, _ totalPackets: Int) -> Void)?
38
44
 
@@ -42,8 +48,9 @@ class AudioProcessor {
42
48
  init(config: AudioConfig) {
43
49
  self.config = config
44
50
  self.samplesPerFrame = Int(Double(config.sampleRate) * config.frameSize / 1000.0)
45
- let windowMs = config.audioLevelWindow ?? 360
46
- self.levelUpdateSamples = config.sampleRate * config.channels * windowMs / 1000
51
+ let framesPerCallback = config.framesPerCallback ?? 1
52
+ self.framesPerPacket = max(1, framesPerCallback)
53
+ self.enableAudioLevel = config.enableAudioLevel ?? false
47
54
  }
48
55
 
49
56
  // MARK: - Public API
@@ -128,7 +135,7 @@ class AudioProcessor {
128
135
 
129
136
  // MARK: - Event callbacks
130
137
 
131
- func setOnAudioChunk(_ callback: @escaping (Data, Double, Int, Float) -> Void) {
138
+ func setOnAudioChunk(_ callback: @escaping ([EncodedFrame], Double, Int, Double, Int) -> Void) {
132
139
  self.onAudioChunk = callback
133
140
  }
134
141
 
@@ -157,7 +164,7 @@ class AudioProcessor {
157
164
  }
158
165
  }
159
166
 
160
- // Encode to Opus
167
+ // Encode single frame to Opus
161
168
  var encodedPacket: Data?
162
169
  frameData.withUnsafeBufferPointer { bufferPointer in
163
170
  guard let baseAddress = bufferPointer.baseAddress else { return }
@@ -169,37 +176,45 @@ class AudioProcessor {
169
176
  continue
170
177
  }
171
178
 
172
- // Accumulate energy for RMS over ~360ms window
173
- for sample in frameData {
174
- let s = Double(sample) / 32768.0
175
- levelSumSquares += s * s
176
- }
177
- levelSampleCount += frameData.count
178
-
179
- if levelSampleCount >= levelUpdateSamples {
180
- let rms = sqrt(levelSumSquares / Double(levelSampleCount))
179
+ // Per-frame audio level (RMS dBFS → 0~1)
180
+ var frameLevel: Float? = nil
181
+ if enableAudioLevel {
182
+ var sumSquares: Double = 0.0
183
+ for sample in frameData {
184
+ let s = Double(sample) / 32768.0
185
+ sumSquares += s * s
186
+ }
187
+ let rms = sqrt(sumSquares / Double(frameData.count))
181
188
  let dB = 20.0 * log10(max(rms, 1e-10))
182
189
  let dbFloor = -35.0
183
190
  let dbCeiling = -6.0
184
- currentLevel = Float(max(0.0, min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))))
185
- levelSumSquares = 0.0
186
- levelSampleCount = 0
191
+ frameLevel = Float(max(0.0, min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))))
187
192
  }
188
193
 
189
- let timestampMs = Date().timeIntervalSince1970 * 1000
190
- onAudioChunk?(opusData, timestampMs, sequenceNumber, currentLevel)
191
- sequenceNumber += 1
194
+ // Accumulate encoded frame as independent packet (no byte concatenation)
195
+ packetFrames.append(EncodedFrame(data: opusData, audioLevel: frameLevel))
196
+
197
+ // Emit when we have enough frames (framesPerCallback)
198
+ if packetFrames.count >= framesPerPacket {
199
+ let timestampMs = Date().timeIntervalSince1970 * 1000
200
+ let frameCount = packetFrames.count
201
+ let duration = Double(frameCount) * config.frameSize
202
+ onAudioChunk?(packetFrames, timestampMs, sequenceNumber, duration, frameCount)
203
+ sequenceNumber += 1
204
+ packetFrames.removeAll()
205
+ }
192
206
  }
193
207
  }
194
208
 
195
209
  private func _flushRemainingFrames() {
196
210
  guard let opusEncoder = opusEncoder else { return }
197
- guard !pendingSamples.isEmpty else { return }
198
211
 
199
- if pendingSamples.count < samplesPerFrame {
212
+ // Pad remaining PCM with silence to fill the last frame
213
+ if !pendingSamples.isEmpty && pendingSamples.count < samplesPerFrame {
200
214
  pendingSamples.append(contentsOf: [Int16](repeating: 0, count: samplesPerFrame - pendingSamples.count))
201
215
  }
202
216
 
217
+ // Encode remaining frames
203
218
  while pendingSamples.count >= samplesPerFrame {
204
219
  let frameData = Array(pendingSamples.prefix(samplesPerFrame))
205
220
  pendingSamples.removeFirst(samplesPerFrame)
@@ -211,10 +226,18 @@ class AudioProcessor {
211
226
  }
212
227
 
213
228
  guard let opusData = encodedPacket, !opusData.isEmpty else { continue }
229
+ // Flush frames get level 0 (silence-padded)
230
+ packetFrames.append(EncodedFrame(data: opusData, audioLevel: enableAudioLevel ? 0.0 : nil))
231
+ }
214
232
 
233
+ // Flush any remaining frames (even if less than framesPerPacket)
234
+ if !packetFrames.isEmpty {
215
235
  let timestampMs = Date().timeIntervalSince1970 * 1000
216
- onAudioChunk?(opusData, timestampMs, sequenceNumber, currentLevel)
236
+ let frameCount = packetFrames.count
237
+ let duration = Double(frameCount) * config.frameSize
238
+ onAudioChunk?(packetFrames, timestampMs, sequenceNumber, duration, frameCount)
217
239
  sequenceNumber += 1
240
+ packetFrames.removeAll()
218
241
  }
219
242
  }
220
243
  }
@@ -58,12 +58,21 @@ public class OpuslibModule: Module {
58
58
  print("[OpuslibModule] ✅ AudioEngineManager created")
59
59
 
60
60
  // Set up event callbacks — audioStarted/audioEnd come from encoding thread
61
- manager.setOnAudioChunk { [weak self] data, timestamp, sequenceNumber, audioLevel in
61
+ manager.setOnAudioChunk { [weak self] frames, timestamp, sequenceNumber, duration, frameCount in
62
+ // Each frame is an independent Opus packet wrapped in { data, audioLevel? }
63
+ let frameObjects: [[String: Any]] = frames.map { frame in
64
+ var obj: [String: Any] = ["data": frame.data]
65
+ if let level = frame.audioLevel {
66
+ obj["audioLevel"] = level
67
+ }
68
+ return obj
69
+ }
62
70
  self?.sendEvent("audioChunk", [
63
- "data": data,
71
+ "frames": frameObjects,
64
72
  "timestamp": timestamp,
65
73
  "sequenceNumber": sequenceNumber,
66
- "audioLevel": audioLevel
74
+ "duration": duration,
75
+ "frameCount": frameCount
67
76
  ])
68
77
  }
69
78
 
@@ -184,11 +193,11 @@ struct AudioConfig: Record {
184
193
  @Field var channels: Int = 1
185
194
  @Field var bitrate: Int = 24000
186
195
  @Field var frameSize: Double = 20.0
187
- @Field var packetDuration: Double = 20.0
196
+ @Field var framesPerCallback: Int? = 1
188
197
  @Field var dredDuration: Int? = 100 // NEW: DRED recovery duration in ms
189
198
  @Field var enableAmplitudeEvents: Bool? = false
190
199
  @Field var amplitudeEventInterval: Double? = 16.0
191
- @Field var audioLevelWindow: Int? = 360 // RMS window duration in ms (default 360)
200
+ @Field var enableAudioLevel: Bool? = false // Enable per-frame audio level calculation
192
201
  @Field var saveDebugAudio: Bool? = false
193
202
  }
194
203
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@imcooder/opuslib",
3
- "version": "0.2.1",
3
+ "version": "2.3.1",
4
4
  "description": "Opus 1.6 audio encoding for React Native and Expo with audio level metering and lifecycle events. Forked from Scdales/opuslib.",
5
5
  "main": "build/index.js",
6
6
  "types": "build/index.d.ts",
@@ -10,32 +10,44 @@ export interface AudioConfig {
10
10
  bitrate: number
11
11
  /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */
12
12
  frameSize: number
13
- /** Packet duration in milliseconds (typically 20-100ms) */
14
- packetDuration: number
13
+ /** Number of Opus frames per callback (default 1). Multiple frames are returned as independent packets in frames[], reducing JS bridge calls */
14
+ framesPerCallback?: number
15
15
  /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */
16
16
  dredDuration?: number
17
17
  /** Enable amplitude events for waveform visualization */
18
18
  enableAmplitudeEvents?: boolean
19
19
  /** Amplitude event interval in milliseconds (default 16) */
20
20
  amplitudeEventInterval?: number
21
- /** Audio level RMS window duration in milliseconds (default 360) */
22
- audioLevelWindow?: number
21
+ /** Enable per-frame audio level calculation (default false). When enabled, each OpusFrame includes audioLevel */
22
+ enableAudioLevel?: boolean
23
23
  /** Save debug PCM audio to file (development only) */
24
24
  saveDebugAudio?: boolean
25
25
  }
26
26
 
27
+ /**
28
+ * A single Opus frame — one complete opus_encode() output with its own TOC byte
29
+ */
30
+ export interface OpusFrame {
31
+ /** Opus-encoded packet data (independent, decodable) */
32
+ data: ArrayBuffer
33
+ /** Per-frame audio level 0.0~1.0 (only present when enableAudioLevel is true) */
34
+ audioLevel?: number
35
+ }
36
+
27
37
  /**
28
38
  * Audio chunk event payload (Opus-encoded data)
29
39
  */
30
40
  export interface AudioChunkEvent {
31
- /** Opus-encoded audio data as ArrayBuffer */
32
- data: ArrayBuffer
41
+ /** Array of independent Opus frames. Each frame is a complete opus_encode() output, decodable on its own */
42
+ frames: OpusFrame[]
33
43
  /** Timestamp in milliseconds */
34
44
  timestamp: number
35
- /** Sequence number (increments with each packet) */
45
+ /** Sequence number (increments with each callback) */
36
46
  sequenceNumber: number
37
- /** Audio level normalized to 0.0~1.0 (mapped from dBFS, 0 = silence, 1 = loud) */
38
- audioLevel: number
47
+ /** Duration of all frames in milliseconds (frameSize * frameCount) */
48
+ duration: number
49
+ /** Number of Opus frames in this callback (= frames.length) */
50
+ frameCount: number
39
51
  }
40
52
 
41
53
  /**
@@ -53,7 +53,7 @@ export default {
53
53
  * channels: 1,
54
54
  * bitrate: 24000,
55
55
  * frameSize: 20,
56
- * packetDuration: 20,
56
+ * framesPerCallback: 5, // batch 5 independent Opus packets per event
57
57
  * dredDuration: 100, // Enable 100ms DRED recovery
58
58
  * })
59
59
  * ```
@@ -85,8 +85,10 @@ export default {
85
85
  * ```ts
86
86
  * // Listen for audio chunks
87
87
  * const subscription = Opuslib.addListener('audioChunk', (event) => {
88
- * console.log('Received Opus packet:', event.data.byteLength, 'bytes')
89
- * websocket.send(event.data)
88
+ * for (const frame of event.frames) {
89
+ * console.log('Opus packet:', frame.data.byteLength, 'bytes')
90
+ * websocket.send(frame.data)
91
+ * }
90
92
  * })
91
93
  *
92
94
  * // Listen for errors
package/src/index.ts CHANGED
@@ -14,14 +14,15 @@
14
14
  * channels: 1,
15
15
  * bitrate: 24000,
16
16
  * frameSize: 20,
17
- * packetDuration: 20,
17
+ * framesPerCallback: 5, // batch 5 independent Opus frames per event
18
18
  * dredDuration: 100, // 100ms DRED recovery
19
19
  * })
20
20
  *
21
21
  * // Listen for Opus packets
22
22
  * Opuslib.addListener('audioChunk', (event) => {
23
- * // Send to backend
24
- * websocket.send(event.data)
23
+ * for (const frame of event.frames) {
24
+ * websocket.send(frame.data)
25
+ * }
25
26
  * })
26
27
  *
27
28
  * // Stop streaming