@imcooder/opuslib 2.2.1 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -29,36 +29,37 @@
29
29
  > });
30
30
  > ```
31
31
  >
32
- > **`packetDuration` now works (was ignored in original)**
33
- > - In the original library, `packetDuration` was accepted but had no effect. Now it controls how many Opus frames are batched before emitting an `audioChunk` event, reducing JS bridge calls.
34
- > - Example: `frameSize=20ms, packetDuration=100ms` → 5 frames encoded individually, batched into one `audioChunk` event (80% fewer bridge calls).
32
+ > **`framesPerCallback` batch multiple frames to reduce data transfer overhead**
33
+ > - Multiple independently-encoded Opus frames can be batched into a single `audioChunk` callback via `framesPerCallback`, reducing JS bridge calls and data transfer overhead. Each frame in `frames[]` is a complete, independently decodable Opus packet (with its own TOC byte) no illegal byte concatenation.
34
+ > - Example: `frameSize=20ms, framesPerCallback=5` → 5 frames encoded individually, returned as `frames: OpusFrame[]` in one `audioChunk` event (80% fewer bridge calls).
35
35
  >
36
36
  > **New `audioChunk` fields**
37
- > - **`audioLevel`** — Normalized audio level (0.0~1.0), computed via configurable RMS sliding window (default 360ms) with dBFS-to-linear mapping (IEC 61606).
38
- > - **`duration`** — Duration of this packet in milliseconds (`frameSize * frameCount`).
39
- > - **`frameCount`** — Number of Opus frames contained in this packet.
37
+ > - **`frames`** — Array of `OpusFrame` objects. Each frame is an independent, decodable Opus packet (with its own TOC byte). No illegal byte concatenation.
38
+ > - **`OpusFrame.audioLevel`** — Per-frame normalized audio level (0.0~1.0), computed via RMS with dBFS-to-linear mapping. Only present when `enableAudioLevel: true`. Consumers can average neighboring frames for smoothing.
39
+ > - **`duration`** — Duration of all frames in milliseconds (`frameSize * frameCount`).
40
+ > - **`frameCount`** — Number of Opus frames in this callback (= `frames.length`).
40
41
  > - **`preSkip`** — (in `audioStarted` event) Opus encoder lookahead in samples. Decoders should skip this many samples at the beginning of the stream.
41
42
  > ```typescript
42
43
  > Opuslib.addListener('audioChunk', (event) => {
43
- > // event.data: ArrayBuffer (batched Opus encoded frames)
44
+ > // event.frames: OpusFrame[] (independent Opus packets)
45
+ > // each frame: { data: ArrayBuffer, audioLevel?: number }
44
46
  > // event.timestamp: 1711000000100 (ms since epoch)
45
- > // event.sequenceNumber: 5 (packet counter)
46
- > // event.audioLevel: 0.72 (0=silence, 1=loud)
47
+ > // event.sequenceNumber: 5 (callback counter)
47
48
  > // event.duration: 100 (ms, = frameSize * frameCount)
48
- > // event.frameCount: 5 (number of Opus frames in this packet)
49
+ > // event.frameCount: 5 (= frames.length)
49
50
  > });
50
51
  > ```
51
52
  >
52
53
  > **New Config Options**
53
- > - **`audioLevelWindow`** — RMS window duration in milliseconds for audio level calculation (default: 360ms). Shorter = more responsive, longer = smoother.
54
+ > - **`enableAudioLevel`** — Enable per-frame audio level calculation (default: false). When enabled, each `OpusFrame` includes `audioLevel` (0.0~1.0). Disabled by default to save computation.
54
55
  > ```typescript
55
56
  > await Opuslib.startStreaming({
56
57
  > sampleRate: 16000,
57
58
  > channels: 1,
58
59
  > bitrate: 24000,
59
60
  > frameSize: 20,
60
- > packetDuration: 100, // batch 5 frames per event (was ignored before)
61
- > audioLevelWindow: 200, // 200ms RMS window (default: 360ms)
61
+ > framesPerCallback: 5, // batch 5 independent Opus frames per event
62
+ > enableAudioLevel: true, // enable per-frame audio level
62
63
  > });
63
64
  > ```
64
65
 
@@ -74,7 +75,7 @@ Real-time audio capture and encoding using the latest Opus 1.6 codec, built from
74
75
  - **Low Latency** - Real-time encoding with minimal overhead
75
76
  - **Native Performance** - Direct C/C++ integration, no JavaScript encoding
76
77
  - **Thread-safe Encoding** - Dedicated encoding thread, capture thread never blocked
77
- - **Audio Level Metering** - Real-time 0~1 audio level in each audio chunk (360ms RMS window)
78
+ - **Audio Level Metering** - Optional per-frame 0~1 audio level via RMS (enable with `enableAudioLevel: true`)
78
79
  - **Lifecycle Events** - `audioStarted` / `audioEnd` events with session metadata
79
80
  - **High Quality** - 24kbps achieves excellent speech quality
80
81
  - **Cross-Platform** - iOS and Android with a consistent API
@@ -162,10 +163,11 @@ async function startRecording() {
162
163
 
163
164
  // Listen for encoded audio chunks
164
165
  const subscription = Opuslib.addListener('audioChunk', (event) => {
165
- const { data, timestamp, sequenceNumber, audioLevel } = event;
166
- console.log(`Opus packet: ${data.byteLength} bytes, level=${audioLevel.toFixed(2)}`);
167
-
168
- // Send to your backend, save to file, etc.
166
+ const { frames, timestamp, sequenceNumber } = event;
167
+ for (const frame of frames) {
168
+ console.log(`Opus packet: ${frame.data.byteLength} bytes, level=${frame.audioLevel?.toFixed(2) ?? 'N/A'}`);
169
+ // Send each independent Opus packet to your backend, save to file, etc.
170
+ }
169
171
  });
170
172
 
171
173
  // Start streaming
@@ -174,7 +176,7 @@ async function startRecording() {
174
176
  channels: 1, // Mono
175
177
  bitrate: 24000, // 24 kbps
176
178
  frameSize: 20, // 20ms frames
177
- packetDuration: 20, // 20ms packets
179
+ framesPerCallback: 1, // 1 frame per callback (default)
178
180
  });
179
181
  }
180
182
 
@@ -202,9 +204,9 @@ interface AudioConfig {
202
204
  channels: number; // Number of channels (1 = mono, 2 = stereo)
203
205
  bitrate: number; // Target bitrate in bits/second (e.g., 24000)
204
206
  frameSize: number; // Frame duration in ms (2.5, 5, 10, 20, 40, 60)
205
- packetDuration: number; // Packet duration in ms (multiple of frameSize)
207
+ framesPerCallback?: number; // Frames per callback (default 1), batching reduces bridge calls
206
208
  dredDuration?: number; // Reserved for future DRED support (default: 0)
207
- audioLevelWindow?: number; // RMS window duration in ms for audioLevel (default: 360)
209
+ enableAudioLevel?: boolean; // Enable per-frame audio level (default: false)
208
210
  enableAmplitudeEvents?: boolean; // Enable amplitude monitoring (default: false)
209
211
  amplitudeEventInterval?: number; // Amplitude update interval in ms (default: 16)
210
212
  }
@@ -218,7 +220,7 @@ interface AudioConfig {
218
220
  channels: 1, // Mono - sufficient for voice
219
221
  bitrate: 24000, // 24 kbps - excellent quality
220
222
  frameSize: 20, // 20ms - standard for real-time
221
- packetDuration: 20, // 20ms - low latency
223
+ framesPerCallback: 1, // 1 frame per callback - low latency
222
224
  }
223
225
  ```
224
226
 
@@ -277,23 +279,30 @@ Emitted when an encoded Opus packet is ready.
277
279
 
278
280
  ```typescript
279
281
  Opuslib.addListener('audioChunk', (event: AudioChunkEvent) => {
280
- // event.data: ArrayBuffer - Batched Opus frames (ready to send/save)
281
- // event.audioLevel: number - Audio level 0.0~1.0 (0=silence, 1=loud)
282
+ // event.frames: OpusFrame[] - Independent Opus packets (each decodable on its own)
283
+ // frame.audioLevel?: number - Per-frame level 0.0~1.0 (when enableAudioLevel is true)
282
284
  // event.duration: number - Duration in ms (frameSize * frameCount)
283
- // event.frameCount: number - Number of Opus frames in this packet
285
+ // event.frameCount: number - Number of Opus frames (= frames.length)
286
+ for (const frame of event.frames) {
287
+ websocket.send(frame.data); // each frame is an independent Opus packet
288
+ }
284
289
  });
285
290
  ```
286
291
 
287
292
  **Event Data:**
288
293
 
289
294
  ```typescript
295
+ interface OpusFrame {
296
+ data: ArrayBuffer; // Independent Opus packet (one opus_encode() output with its own TOC byte)
297
+ audioLevel?: number; // Per-frame audio level 0.0~1.0 (only when enableAudioLevel is true)
298
+ }
299
+
290
300
  interface AudioChunkEvent {
291
- data: ArrayBuffer; // Batched Opus-encoded frames
301
+ frames: OpusFrame[]; // Array of independent Opus packets
292
302
  timestamp: number; // Milliseconds since epoch
293
- sequenceNumber: number; // Incrementing packet counter
294
- audioLevel: number; // Audio level 0.0~1.0 (360ms RMS window, 0=silence, 1=loud)
295
- duration: number; // Packet duration in ms (frameSize * frameCount)
296
- frameCount: number; // Number of Opus frames in this packet
303
+ sequenceNumber: number; // Incrementing callback counter
304
+ duration: number; // Total duration in ms (frameSize * frameCount)
305
+ frameCount: number; // Number of Opus frames (= frames.length)
297
306
  }
298
307
  ```
299
308
 
@@ -359,7 +368,7 @@ Capture Thread Encoding Thread (serial queue)
359
368
  |---- post(samples) ----------->| pendingSamples.append(samples)
360
369
  | | while (enough samples) {
361
370
  | | opus_encode()
362
- | | audioLevel calc (360ms RMS)
371
+ | | per-frame audioLevel (if enabled)
363
372
  | | emit audioChunk event
364
373
  | | }
365
374
  | |
@@ -7,6 +7,14 @@ import java.io.File
7
7
  import java.io.FileOutputStream
8
8
  import java.util.concurrent.CountDownLatch
9
9
 
10
+ /**
11
+ * A single encoded Opus frame with optional per-frame audio level.
12
+ */
13
+ data class EncodedFrame(
14
+ val data: ByteArray,
15
+ val audioLevel: Float? // null when enableAudioLevel is false
16
+ )
17
+
10
18
  /**
11
19
  * AudioProcessor - Dedicated encoding thread for Opus encoding and dispatch.
12
20
  *
@@ -31,24 +39,20 @@ class AudioProcessor(private val config: AudioConfig) {
31
39
  private var opusEncoder: OpusEncoder? = null
32
40
  private val pendingSamples = mutableListOf<Short>()
33
41
  private val samplesPerFrame: Int = (config.sampleRate * config.frameSize / 1000.0).toInt()
34
- private val framesPerPacket: Int = Math.max(1, (config.packetDuration / config.frameSize).toInt())
35
- private var packetBuffer = java.io.ByteArrayOutputStream() // accumulates encoded frames
36
- private var packetFrameCount: Int = 0
42
+ private val framesPerPacket: Int = Math.max(1, config.framesPerCallback)
43
+ private var packetFrames = mutableListOf<EncodedFrame>() // independent Opus packets with per-frame level
37
44
  private var sequenceNumber: Int = 0
38
45
  private var startTime: Double = 0.0
39
46
 
40
- // Audio level: accumulate RMS over ~360ms window
41
- private var levelSumSquares: Double = 0.0
42
- private var levelSampleCount: Int = 0
43
- private val levelUpdateSamples: Int = config.sampleRate * config.channels * config.audioLevelWindow / 1000
44
- private var currentLevel: Float = 0.0f
47
+ // Whether to compute per-frame audio level
48
+ private val enableAudioLevel: Boolean = config.enableAudioLevel
45
49
 
46
50
  // Debug file output
47
51
  private var pcmFileOutputStream: FileOutputStream? = null
48
52
 
49
53
  // Event callbacks (all invoked on encoding thread)
50
- // onAudioChunk: (data, timestamp, sequenceNumber, audioLevel, duration, frameCount)
51
- private var onAudioChunk: ((ByteArray, Double, Int, Float, Double, Int) -> Unit)? = null
54
+ // onAudioChunk: (frames, timestamp, sequenceNumber, duration, frameCount)
55
+ private var onAudioChunk: ((List<EncodedFrame>, Double, Int, Double, Int) -> Unit)? = null
52
56
  private var onStarted: ((timestamp: Double, sampleRate: Int, channels: Int, bitrate: Int, frameSize: Double, preSkip: Int) -> Unit)? = null
53
57
  private var onEnd: ((timestamp: Double, totalDuration: Double, totalPackets: Int) -> Unit)? = null
54
58
 
@@ -142,7 +146,7 @@ class AudioProcessor(private val config: AudioConfig) {
142
146
 
143
147
  // MARK: - Event callback setters
144
148
 
145
- fun setOnAudioChunk(callback: (ByteArray, Double, Int, Float, Double, Int) -> Unit) {
149
+ fun setOnAudioChunk(callback: (List<EncodedFrame>, Double, Int, Double, Int) -> Unit) {
146
150
  this.onAudioChunk = callback
147
151
  }
148
152
 
@@ -196,35 +200,32 @@ class AudioProcessor(private val config: AudioConfig) {
196
200
  continue
197
201
  }
198
202
 
199
- // Accumulate encoded frame into packet buffer
200
- packetBuffer.write(opusData)
201
- packetFrameCount++
202
-
203
- // Accumulate energy for RMS level
204
- for (sample in frameData) {
205
- val s = sample.toDouble() / 32768.0
206
- levelSumSquares += s * s
207
- }
208
- levelSampleCount += frameData.size
209
-
210
- if (levelSampleCount >= levelUpdateSamples) {
211
- val rms = Math.sqrt(levelSumSquares / levelSampleCount)
203
+ // Per-frame audio level (RMS dBFS → 0~1)
204
+ var frameLevel: Float? = null
205
+ if (enableAudioLevel) {
206
+ var sumSquares = 0.0
207
+ for (sample in frameData) {
208
+ val s = sample.toDouble() / 32768.0
209
+ sumSquares += s * s
210
+ }
211
+ val rms = Math.sqrt(sumSquares / frameData.size)
212
212
  val dB = 20.0 * Math.log10(Math.max(rms, 1e-10))
213
213
  val dbFloor = -35.0
214
214
  val dbCeiling = -6.0
215
- currentLevel = Math.max(0.0, Math.min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))).toFloat()
216
- levelSumSquares = 0.0
217
- levelSampleCount = 0
215
+ frameLevel = Math.max(0.0, Math.min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))).toFloat()
218
216
  }
219
217
 
220
- // Emit when we have enough frames for one packet (packetDuration)
221
- if (packetFrameCount >= framesPerPacket) {
218
+ // Accumulate encoded frame as independent packet (no byte concatenation)
219
+ packetFrames.add(EncodedFrame(data = opusData, audioLevel = frameLevel))
220
+
221
+ // Emit when we have enough frames (framesPerCallback)
222
+ if (packetFrames.size >= framesPerPacket) {
222
223
  val timestampMs = System.currentTimeMillis().toDouble()
223
- val duration = packetFrameCount * config.frameSize
224
- onAudioChunk?.invoke(packetBuffer.toByteArray(), timestampMs, sequenceNumber, currentLevel, duration, packetFrameCount)
224
+ val frameCount = packetFrames.size
225
+ val duration = frameCount * config.frameSize
226
+ onAudioChunk?.invoke(packetFrames.toList(), timestampMs, sequenceNumber, duration, frameCount)
225
227
  sequenceNumber++
226
- packetBuffer.reset()
227
- packetFrameCount = 0
228
+ packetFrames.clear()
228
229
  }
229
230
  }
230
231
  }
@@ -253,18 +254,18 @@ class AudioProcessor(private val config: AudioConfig) {
253
254
  }
254
255
 
255
256
  if (opusData == null || opusData.isEmpty()) continue
256
- packetBuffer.write(opusData)
257
- packetFrameCount++
257
+ // Flush frames get level 0 (silence-padded)
258
+ packetFrames.add(EncodedFrame(data = opusData, audioLevel = if (enableAudioLevel) 0.0f else null))
258
259
  }
259
260
 
260
- // Flush any remaining packet buffer (even if less than framesPerPacket)
261
- if (packetBuffer.size() > 0) {
261
+ // Flush any remaining frames (even if less than framesPerPacket)
262
+ if (packetFrames.isNotEmpty()) {
262
263
  val timestampMs = System.currentTimeMillis().toDouble()
263
- val duration = packetFrameCount * config.frameSize
264
- onAudioChunk?.invoke(packetBuffer.toByteArray(), timestampMs, sequenceNumber, currentLevel, duration, packetFrameCount)
264
+ val frameCount = packetFrames.size
265
+ val duration = frameCount * config.frameSize
266
+ onAudioChunk?.invoke(packetFrames.toList(), timestampMs, sequenceNumber, duration, frameCount)
265
267
  sequenceNumber++
266
- packetBuffer.reset()
267
- packetFrameCount = 0
268
+ packetFrames.clear()
268
269
  }
269
270
  }
270
271
  }
@@ -37,7 +37,7 @@ class AudioRecordManager(
37
37
  private var loggedFirstBuffer = false
38
38
 
39
39
  // Event callbacks
40
- private var onAudioChunk: ((ByteArray, Double, Int, Float, Double, Int) -> Unit)? = null
40
+ private var onAudioChunk: ((List<EncodedFrame>, Double, Int, Double, Int) -> Unit)? = null
41
41
  private var onStarted: ((timestamp: Double, sampleRate: Int, channels: Int, bitrate: Int, frameSize: Double, preSkip: Int) -> Unit)? = null
42
42
  private var onEnd: ((timestamp: Double, totalDuration: Double, totalPackets: Int) -> Unit)? = null
43
43
  private var onAmplitude: ((Float, Float, Double) -> Unit)? = null
@@ -91,8 +91,8 @@ class AudioRecordManager(
91
91
 
92
92
  // Create and start AudioProcessor (encoding thread)
93
93
  val proc = AudioProcessor(config)
94
- proc.setOnAudioChunk { data, timestamp, seq, level, duration, frameCount ->
95
- onAudioChunk?.invoke(data, timestamp, seq, level, duration, frameCount)
94
+ proc.setOnAudioChunk { frames, timestamp, seq, duration, frameCount ->
95
+ onAudioChunk?.invoke(frames, timestamp, seq, duration, frameCount)
96
96
  }
97
97
  proc.setOnStarted { timestamp, sampleRate, channels, bitrate, frameSize, preSkip ->
98
98
  onStarted?.invoke(timestamp, sampleRate, channels, bitrate, frameSize, preSkip)
@@ -172,7 +172,7 @@ class AudioRecordManager(
172
172
  }
173
173
 
174
174
  // Event handlers
175
- fun setOnAudioChunk(callback: (ByteArray, Double, Int, Float, Double, Int) -> Unit) {
175
+ fun setOnAudioChunk(callback: (List<EncodedFrame>, Double, Int, Double, Int) -> Unit) {
176
176
  this.onAudioChunk = callback
177
177
  }
178
178
 
@@ -81,12 +81,17 @@ class OpuslibModule : Module() {
81
81
 
82
82
  // Set up event callbacks — audioStarted/audioEnd come from encoding thread
83
83
  android.util.Log.d(TAG, "🔗 Setting up event callbacks...")
84
- manager.setOnAudioChunk { data, timestamp, sequenceNumber, audioLevel, duration, frameCount ->
84
+ manager.setOnAudioChunk { frames, timestamp, sequenceNumber, duration, frameCount ->
85
+ // Each frame is an independent Opus packet wrapped in { data, audioLevel? }
86
+ val frameObjects = frames.map { frame ->
87
+ val obj = mutableMapOf<String, Any>("data" to frame.data)
88
+ frame.audioLevel?.let { obj["audioLevel"] = it }
89
+ obj
90
+ }
85
91
  sendEvent("audioChunk", mapOf(
86
- "data" to data,
92
+ "frames" to frameObjects,
87
93
  "timestamp" to timestamp,
88
94
  "sequenceNumber" to sequenceNumber,
89
- "audioLevel" to audioLevel,
90
95
  "duration" to duration,
91
96
  "frameCount" to frameCount
92
97
  ))
@@ -193,7 +198,7 @@ class AudioConfig : Record {
193
198
  var frameSize: Double = 20.0
194
199
 
195
200
  @Field
196
- var packetDuration: Double = 20.0
201
+ var framesPerCallback: Int = 1
197
202
 
198
203
  @Field
199
204
  var dredDuration: Int = 100 // NEW: DRED recovery duration in ms
@@ -205,7 +210,7 @@ class AudioConfig : Record {
205
210
  var amplitudeEventInterval: Double = 16.0
206
211
 
207
212
  @Field
208
- var audioLevelWindow: Int = 360 // RMS window duration in ms (default 360)
213
+ var enableAudioLevel: Boolean = false // Enable per-frame audio level calculation
209
214
 
210
215
  @Field
211
216
  var saveDebugAudio: Boolean = false
@@ -10,34 +10,41 @@ export interface AudioConfig {
10
10
  bitrate: number;
11
11
  /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */
12
12
  frameSize: number;
13
- /** Packet duration in milliseconds (typically 20-100ms) */
14
- packetDuration: number;
13
+ /** Number of Opus frames per callback (default 1). Multiple frames are returned as independent packets in frames[], reducing JS bridge calls */
14
+ framesPerCallback?: number;
15
15
  /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */
16
16
  dredDuration?: number;
17
17
  /** Enable amplitude events for waveform visualization */
18
18
  enableAmplitudeEvents?: boolean;
19
19
  /** Amplitude event interval in milliseconds (default 16) */
20
20
  amplitudeEventInterval?: number;
21
- /** Audio level RMS window duration in milliseconds (default 360) */
22
- audioLevelWindow?: number;
21
+ /** Enable per-frame audio level calculation (default false). When enabled, each OpusFrame includes audioLevel */
22
+ enableAudioLevel?: boolean;
23
23
  /** Save debug PCM audio to file (development only) */
24
24
  saveDebugAudio?: boolean;
25
25
  }
26
+ /**
27
+ * A single Opus frame — one complete opus_encode() output with its own TOC byte
28
+ */
29
+ export interface OpusFrame {
30
+ /** Opus-encoded packet data (independent, decodable) */
31
+ data: ArrayBuffer;
32
+ /** Per-frame audio level 0.0~1.0 (only present when enableAudioLevel is true) */
33
+ audioLevel?: number;
34
+ }
26
35
  /**
27
36
  * Audio chunk event payload (Opus-encoded data)
28
37
  */
29
38
  export interface AudioChunkEvent {
30
- /** Opus-encoded audio data as ArrayBuffer */
31
- data: ArrayBuffer;
39
+ /** Array of independent Opus frames. Each frame is a complete opus_encode() output, decodable on its own */
40
+ frames: OpusFrame[];
32
41
  /** Timestamp in milliseconds */
33
42
  timestamp: number;
34
- /** Sequence number (increments with each packet) */
43
+ /** Sequence number (increments with each callback) */
35
44
  sequenceNumber: number;
36
- /** Audio level normalized to 0.0~1.0 (mapped from dBFS, 0 = silence, 1 = loud) */
37
- audioLevel: number;
38
- /** Duration of this packet in milliseconds (frameSize * frameCount) */
45
+ /** Duration of all frames in milliseconds (frameSize * frameCount) */
39
46
  duration: number;
40
- /** Number of Opus frames in this packet */
47
+ /** Number of Opus frames in this callback (= frames.length) */
41
48
  frameCount: number;
42
49
  }
43
50
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"Opuslib.types.d.ts","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B,2DAA2D;IAC3D,UAAU,EAAE,MAAM,CAAA;IAClB,gDAAgD;IAChD,QAAQ,EAAE,MAAM,CAAA;IAChB,6DAA6D;IAC7D,OAAO,EAAE,MAAM,CAAA;IACf,8DAA8D;IAC9D,SAAS,EAAE,MAAM,CAAA;IACjB,2DAA2D;IAC3D,cAAc,EAAE,MAAM,CAAA;IACtB,oFAAoF;IACpF,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,qBAAqB,CAAC,EAAE,OAAO,CAAA;IAC/B,4DAA4D;IAC5D,sBAAsB,CAAC,EAAE,MAAM,CAAA;IAC/B,oEAAoE;IACpE,gBAAgB,CAAC,EAAE,MAAM,CAAA;IACzB,sDAAsD;IACtD,cAAc,CAAC,EAAE,OAAO,CAAA;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,6CAA6C;IAC7C,IAAI,EAAE,WAAW,CAAA;IACjB,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;IACjB,oDAAoD;IACpD,cAAc,EAAE,MAAM,CAAA;IACtB,kFAAkF;IAClF,UAAU,EAAE,MAAM,CAAA;IAClB,uEAAuE;IACvE,QAAQ,EAAE,MAAM,CAAA;IAChB,2CAA2C;IAC3C,UAAU,EAAE,MAAM,CAAA;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,6CAA6C;IAC7C,GAAG,EAAE,MAAM,CAAA;IACX,iCAAiC;IACjC,IAAI,EAAE,MAAM,CAAA;IACZ,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;CAClB;AAED;;;GAGG;AACH,MAAM,WAAW,iBAAiB;IAChC,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,oCAAoC;IACpC,UAAU,EAAE,MAAM,CAAA;IAClB,yBAAyB;IACzB,QAAQ,EAAE,MAAM,CAAA;IAChB,wCAAwC;IACxC,OAAO,EAAE,MAAM,CAAA;IACf,qCAAqC;IACrC,SAAS,EAAE,MAAM,CAAA;IACjB,yFAAyF;IACzF,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;;GAGG;AACH,MAAM,WAAW,aAAa;IAC5B,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,8DAA8D;IAC9D,aAAa,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,YAAY,EAAE,MAAM,CAAA;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAA;IACZ,oBAAoB;IACpB,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,MAAM,EAAE,MAAM,IAAI,CAAA;CACnB"}
1
+ {"version":3,"file":"Opuslib.types.d.ts","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B,2DAA2D;IAC3D,UAAU,EAAE,MAAM,CAAA;IAClB,gDAAgD;IAChD,QAAQ,EAAE,MAAM,CAAA;IAChB,6DAA6D;IAC7D,OAAO,EAAE,MAAM,CAAA;IACf,8DAA8D;IAC9D,SAAS,EAAE,MAAM,CAAA;IACjB,gJAAgJ;IAChJ,iBAAiB,CAAC,EAAE,MAAM,CAAA;IAC1B,oFAAoF;IACpF,YAAY,CAAC,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,qBAAqB,CAAC,EAAE,OAAO,CAAA;IAC/B,4DAA4D;IAC5D,sBAAsB,CAAC,EAAE,MAAM,CAAA;IAC/B,iHAAiH;IACjH,gBAAgB,CAAC,EAAE,OAAO,CAAA;IAC1B,sDAAsD;IACtD,cAAc,CAAC,EAAE,OAAO,CAAA;CACzB;AAED;;GAEG;AACH,MAAM,WAAW,SAAS;IACxB,wDAAwD;IACxD,IAAI,EAAE,WAAW,CAAA;IACjB,iFAAiF;IACjF,UAAU,CAAC,EAAE,MAAM,CAAA;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,eAAe;IAC9B,4GAA4G;IAC5G,MAAM,EAAE,SAAS,EAAE,CAAA;IACnB,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;IACjB,sDAAsD;IACtD,cAAc,EAAE,MAAM,CAAA;IACtB,sEAAsE;IACtE,QAAQ,EAAE,MAAM,CAAA;IAChB,+DAA+D;IAC/D,UAAU,EAAE,MAAM,CAAA;CACnB;AAED;;GAEG;AACH,MAAM,WAAW,cAAc;IAC7B,6CAA6C;IAC7C,GAAG,EAAE,MAAM,CAAA;IACX,iCAAiC;IACjC,IAAI,EAAE,MAAM,CAAA;IACZ,gCAAgC;IAChC,SAAS,EAAE,MAAM,CAAA;CAClB;AAED;;;GAGG;AACH,MAAM,WAAW,iBAAiB;IAChC,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,oCAAoC;IACpC,UAAU,EAAE,MAAM,CAAA;IAClB,yBAAyB;IACzB,QAAQ,EAAE,MAAM,CAAA;IAChB,wCAAwC;IACxC,OAAO,EAAE,MAAM,CAAA;IACf,qCAAqC;IACrC,SAAS,EAAE,MAAM,CAAA;IACjB,yFAAyF;IACzF,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;;GAGG;AACH,MAAM,WAAW,aAAa;IAC5B,uDAAuD;IACvD,SAAS,EAAE,MAAM,CAAA;IACjB,8DAA8D;IAC9D,aAAa,EAAE,MAAM,CAAA;IACrB,yDAAyD;IACzD,YAAY,EAAE,MAAM,CAAA;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAA;IACZ,oBAAoB;IACpB,OAAO,EAAE,MAAM,CAAA;CAChB;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B,MAAM,EAAE,MAAM,IAAI,CAAA;CACnB"}
@@ -1 +1 @@
1
- {"version":3,"file":"Opuslib.types.js","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"","sourcesContent":["/**\n * Audio configuration for Opus encoding\n */\nexport interface AudioConfig {\n /** Sample rate in Hz (8000, 12000, 16000, 24000, 48000) */\n sampleRate: number\n /** Number of channels (1 = mono, 2 = stereo) */\n channels: number\n /** Target bitrate in bits/second (e.g., 24000 for 24kbps) */\n bitrate: number\n /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */\n frameSize: number\n /** Packet duration in milliseconds (typically 20-100ms) */\n packetDuration: number\n /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */\n dredDuration?: number\n /** Enable amplitude events for waveform visualization */\n enableAmplitudeEvents?: boolean\n /** Amplitude event interval in milliseconds (default 16) */\n amplitudeEventInterval?: number\n /** Audio level RMS window duration in milliseconds (default 360) */\n audioLevelWindow?: number\n /** Save debug PCM audio to file (development only) */\n saveDebugAudio?: boolean\n}\n\n/**\n * Audio chunk event payload (Opus-encoded data)\n */\nexport interface AudioChunkEvent {\n /** Opus-encoded audio data as ArrayBuffer */\n data: ArrayBuffer\n /** Timestamp in milliseconds */\n timestamp: number\n /** Sequence number (increments with each packet) */\n sequenceNumber: number\n /** Audio level normalized to 0.0~1.0 (mapped from dBFS, 0 = silence, 1 = loud) */\n audioLevel: number\n /** Duration of this packet in milliseconds (frameSize * frameCount) */\n duration: number\n /** Number of Opus frames in this packet */\n frameCount: number\n}\n\n/**\n * Amplitude event payload (for waveform visualization)\n */\nexport interface AmplitudeEvent {\n /** Root mean square amplitude (0.0 - 1.0) */\n rms: number\n /** Peak amplitude (0.0 - 1.0) */\n peak: number\n /** Timestamp in milliseconds */\n timestamp: number\n}\n\n/**\n * Audio started event payload\n * Emitted when audio streaming successfully starts\n */\nexport interface AudioStartedEvent {\n /** Timestamp in milliseconds when streaming started */\n timestamp: number\n /** Actual sample rate being used */\n sampleRate: number\n /** Number of channels */\n channels: number\n /** Configured bitrate in bits/second */\n bitrate: number\n /** Frame duration in milliseconds */\n frameSize: number\n /** Opus encoder lookahead in samples (decoder should skip this many samples at start) */\n preSkip: number\n}\n\n/**\n * Audio end event payload\n * Emitted when audio streaming stops\n */\nexport interface AudioEndEvent {\n /** Timestamp in milliseconds when streaming stopped */\n timestamp: number\n /** Total duration of the streaming session in milliseconds */\n totalDuration: number\n /** Total number of packets encoded during the session */\n totalPackets: number\n}\n\n/**\n * Error event payload\n */\nexport interface ErrorEvent {\n /** Error code */\n code: string\n /** Error message */\n message: string\n}\n\n/**\n * Event subscription\n */\nexport interface Subscription {\n remove: () => void\n}\n"]}
1
+ {"version":3,"file":"Opuslib.types.js","sourceRoot":"","sources":["../src/Opuslib.types.ts"],"names":[],"mappings":"","sourcesContent":["/**\n * Audio configuration for Opus encoding\n */\nexport interface AudioConfig {\n /** Sample rate in Hz (8000, 12000, 16000, 24000, 48000) */\n sampleRate: number\n /** Number of channels (1 = mono, 2 = stereo) */\n channels: number\n /** Target bitrate in bits/second (e.g., 24000 for 24kbps) */\n bitrate: number\n /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */\n frameSize: number\n /** Number of Opus frames per callback (default 1). Multiple frames are returned as independent packets in frames[], reducing JS bridge calls */\n framesPerCallback?: number\n /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */\n dredDuration?: number\n /** Enable amplitude events for waveform visualization */\n enableAmplitudeEvents?: boolean\n /** Amplitude event interval in milliseconds (default 16) */\n amplitudeEventInterval?: number\n /** Enable per-frame audio level calculation (default false). When enabled, each OpusFrame includes audioLevel */\n enableAudioLevel?: boolean\n /** Save debug PCM audio to file (development only) */\n saveDebugAudio?: boolean\n}\n\n/**\n * A single Opus frame — one complete opus_encode() output with its own TOC byte\n */\nexport interface OpusFrame {\n /** Opus-encoded packet data (independent, decodable) */\n data: ArrayBuffer\n /** Per-frame audio level 0.0~1.0 (only present when enableAudioLevel is true) */\n audioLevel?: number\n}\n\n/**\n * Audio chunk event payload (Opus-encoded data)\n */\nexport interface AudioChunkEvent {\n /** Array of independent Opus frames. Each frame is a complete opus_encode() output, decodable on its own */\n frames: OpusFrame[]\n /** Timestamp in milliseconds */\n timestamp: number\n /** Sequence number (increments with each callback) */\n sequenceNumber: number\n /** Duration of all frames in milliseconds (frameSize * frameCount) */\n duration: number\n /** Number of Opus frames in this callback (= frames.length) */\n frameCount: number\n}\n\n/**\n * Amplitude event payload (for waveform visualization)\n */\nexport interface AmplitudeEvent {\n /** Root mean square amplitude (0.0 - 1.0) */\n rms: number\n /** Peak amplitude (0.0 - 1.0) */\n peak: number\n /** Timestamp in milliseconds */\n timestamp: number\n}\n\n/**\n * Audio started event payload\n * Emitted when audio streaming successfully starts\n */\nexport interface AudioStartedEvent {\n /** Timestamp in milliseconds when streaming started */\n timestamp: number\n /** Actual sample rate being used */\n sampleRate: number\n /** Number of channels */\n channels: number\n /** Configured bitrate in bits/second */\n bitrate: number\n /** Frame duration in milliseconds */\n frameSize: number\n /** Opus encoder lookahead in samples (decoder should skip this many samples at start) */\n preSkip: number\n}\n\n/**\n * Audio end event payload\n * Emitted when audio streaming stops\n */\nexport interface AudioEndEvent {\n /** Timestamp in milliseconds when streaming stopped */\n timestamp: number\n /** Total duration of the streaming session in milliseconds */\n totalDuration: number\n /** Total number of packets encoded during the session */\n totalPackets: number\n}\n\n/**\n * Error event payload\n */\nexport interface ErrorEvent {\n /** Error code */\n code: string\n /** Error message */\n message: string\n}\n\n/**\n * Event subscription\n */\nexport interface Subscription {\n remove: () => void\n}\n"]}
@@ -17,7 +17,7 @@ declare const _default: {
17
17
  * channels: 1,
18
18
  * bitrate: 24000,
19
19
  * frameSize: 20,
20
- * packetDuration: 20,
20
+ * framesPerCallback: 5, // batch 5 independent Opus packets per event
21
21
  * dredDuration: 100, // Enable 100ms DRED recovery
22
22
  * })
23
23
  * ```
@@ -45,8 +45,10 @@ declare const _default: {
45
45
  * ```ts
46
46
  * // Listen for audio chunks
47
47
  * const subscription = Opuslib.addListener('audioChunk', (event) => {
48
- * console.log('Received Opus packet:', event.data.byteLength, 'bytes')
49
- * websocket.send(event.data)
48
+ * for (const frame of event.frames) {
49
+ * console.log('Opus packet:', frame.data.byteLength, 'bytes')
50
+ * websocket.send(frame.data)
51
+ * }
50
52
  * })
51
53
  *
52
54
  * // Listen for errors
@@ -1 +1 @@
1
- {"version":3,"file":"OpuslibModule.d.ts","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,WAAW,EAAE,eAAe,EAAE,cAAc,EAAE,iBAAiB,EAAE,aAAa,EAAE,UAAU,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAA;AAoC/I;;;;;GAKG;;IAED;;;;;;;;;;;;;;;OAeG;6BACsB,WAAW;IAEpC;;OAEG;;IAGH;;OAEG;;IAGH;;OAEG;;IAGH;;;;;;;;;;;;;;;;;;;;;;;OAuBG;iBAIsE;QACvE,CAAC,SAAS,EAAE,YAAY,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,eAAe,KAAK,IAAI,GAAG,YAAY,CAAA;QACnF,CAAC,SAAS,EAAE,WAAW,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,GAAG,YAAY,CAAA;QACjF,CAAC,SAAS,EAAE,cAAc,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,iBAAiB,KAAK,IAAI,GAAG,YAAY,CAAA;QACvF,CAAC,SAAS,EAAE,UAAU,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,aAAa,KAAK,IAAI,GAAG,YAAY,CAAA;QAC/E,CAAC,SAAS,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,GAAG,YAAY,CAAA;KAC1E;IAED;;;;;OAKG;qCAES,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,KACxC,YAAY;IAEf;;;;;OAKG;iCAES,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,KACpC,YAAY;;AAvFjB,wBAwFC"}
1
+ {"version":3,"file":"OpuslibModule.d.ts","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,WAAW,EAAE,eAAe,EAAE,cAAc,EAAE,iBAAiB,EAAE,aAAa,EAAE,UAAU,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAA;AAoC/I;;;;;GAKG;;IAED;;;;;;;;;;;;;;;OAeG;6BACsB,WAAW;IAEpC;;OAEG;;IAGH;;OAEG;;IAGH;;OAEG;;IAGH;;;;;;;;;;;;;;;;;;;;;;;;;OAyBG;iBAIsE;QACvE,CAAC,SAAS,EAAE,YAAY,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,eAAe,KAAK,IAAI,GAAG,YAAY,CAAA;QACnF,CAAC,SAAS,EAAE,WAAW,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,GAAG,YAAY,CAAA;QACjF,CAAC,SAAS,EAAE,cAAc,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,iBAAiB,KAAK,IAAI,GAAG,YAAY,CAAA;QACvF,CAAC,SAAS,EAAE,UAAU,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,aAAa,KAAK,IAAI,GAAG,YAAY,CAAA;QAC/E,CAAC,SAAS,EAAE,OAAO,EAAE,QAAQ,EAAE,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,GAAG,YAAY,CAAA;KAC1E;IAED;;;;;OAKG;qCAES,CAAC,KAAK,EAAE,cAAc,KAAK,IAAI,KACxC,YAAY;IAEf;;;;;OAKG;iCAES,CAAC,KAAK,EAAE,UAAU,KAAK,IAAI,KACpC,YAAY;;AAzFjB,wBA0FC"}
@@ -21,7 +21,7 @@ export default {
21
21
  * channels: 1,
22
22
  * bitrate: 24000,
23
23
  * frameSize: 20,
24
- * packetDuration: 20,
24
+ * framesPerCallback: 5, // batch 5 independent Opus packets per event
25
25
  * dredDuration: 100, // Enable 100ms DRED recovery
26
26
  * })
27
27
  * ```
@@ -49,8 +49,10 @@ export default {
49
49
  * ```ts
50
50
  * // Listen for audio chunks
51
51
  * const subscription = Opuslib.addListener('audioChunk', (event) => {
52
- * console.log('Received Opus packet:', event.data.byteLength, 'bytes')
53
- * websocket.send(event.data)
52
+ * for (const frame of event.frames) {
53
+ * console.log('Opus packet:', frame.data.byteLength, 'bytes')
54
+ * websocket.send(frame.data)
55
+ * }
54
56
  * })
55
57
  *
56
58
  * // Listen for errors
@@ -1 +1 @@
1
- {"version":3,"file":"OpuslibModule.js","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,mBAAmB,EAAE,YAAY,EAAE,MAAM,MAAM,CAAA;AA+BtE,kCAAkC;AAClC,MAAM,aAAa,GAAG,mBAAmB,CAAoB,SAAS,CAAC,CAAA;AAEvE,+CAA+C;AAC/C,MAAM,OAAO,GAAG,IAAI,YAAY,CAAC,aAAoB,CAAC,CAAA;AAEtD;;;;;GAKG;AACH,eAAe;IACb;;;;;;;;;;;;;;;OAeG;IACH,cAAc,EAAE,CAAC,MAAmB,EAAE,EAAE,CAAC,aAAa,CAAC,cAAc,CAAC,MAAM,CAAC;IAE7E;;OAEG;IACH,aAAa,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,aAAa,EAAE;IAElD;;OAEG;IACH,cAAc,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,cAAc,EAAE;IAEpD;;OAEG;IACH,eAAe,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,eAAe,EAAE;IAEtD;;;;;;;;;;;;;;;;;;;;;;;OAuBG;IACH,WAAW,EAAE,CAAC,CACZ,SAA6E,EAC7E,QAA4G,EAC9F,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,CAMnE;IAED;;;;;OAKG;IACH,oBAAoB,EAAE,CACpB,QAAyC,EAC3B,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,WAAW,EAAE,QAAQ,CAAC;IAEtE;;;;;OAKG;IACH,gBAAgB,EAAE,CAChB,QAAqC,EACvB,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,OAAO,EAAE,QAAQ,CAAC;CACnE,CAAA","sourcesContent":["import { NativeModule, requireNativeModule, EventEmitter } from 'expo'\nimport type { AudioConfig, AudioChunkEvent, AmplitudeEvent, AudioStartedEvent, AudioEndEvent, ErrorEvent, Subscription } from './Opuslib.types'\n\n/**\n * Opuslib Native Module Interface\n *\n * Provides native audio capture and Opus 1.6 encoding with DRED support\n */\ndeclare class OpuslibModuleType extends NativeModule {\n /**\n * Start audio streaming with Opus encoding\n * @param config Audio configuration\n */\n startStreaming(config: AudioConfig): Promise<void>\n\n /**\n * Stop audio streaming\n */\n stopStreaming(): Promise<void>\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming(): void\n\n /**\n * Resume audio streaming\n */\n resumeStreaming(): void\n}\n\n// Load the native module from JSI\nconst OpuslibModule = requireNativeModule<OpuslibModuleType>('Opuslib')\n\n// Create event emitter for listening to events\nconst emitter = new EventEmitter(OpuslibModule as any)\n\n/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * This module provides real-time audio capture and Opus 1.6 encoding\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n */\nexport default {\n /**\n * Start audio streaming with Opus encoding\n *\n * @param config Audio configuration\n * @example\n * ```ts\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * packetDuration: 20,\n * dredDuration: 100, // Enable 100ms DRED recovery\n * })\n * ```\n */\n startStreaming: (config: AudioConfig) => OpuslibModule.startStreaming(config),\n\n /**\n * Stop audio streaming and release resources\n */\n stopStreaming: () => OpuslibModule.stopStreaming(),\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming: () => OpuslibModule.pauseStreaming(),\n\n /**\n * Resume audio streaming\n */\n resumeStreaming: () => OpuslibModule.resumeStreaming(),\n\n /**\n * Listen for events (audioChunk, amplitude, or error)\n *\n * @param eventName Event type to listen for\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n * @example\n * ```ts\n * // Listen for audio chunks\n * const subscription = Opuslib.addListener('audioChunk', (event) => {\n * console.log('Received Opus packet:', event.data.byteLength, 'bytes')\n * websocket.send(event.data)\n * })\n *\n * // Listen for errors\n * const errorSub = Opuslib.addListener('error', (event) => {\n * console.error('Error:', event.message)\n * })\n *\n * // Later: unsubscribe\n * subscription.remove()\n * errorSub.remove()\n * ```\n */\n addListener: ((\n eventName: 'audioChunk' | 'amplitude' | 'audioStarted' | 'audioEnd' | 'error',\n listener: (event: AudioChunkEvent | AmplitudeEvent | AudioStartedEvent | AudioEndEvent | ErrorEvent) => void\n ): Subscription => (emitter as any).addListener(eventName, listener)) as {\n (eventName: 'audioChunk', listener: (event: AudioChunkEvent) => void): Subscription\n (eventName: 'amplitude', listener: (event: AmplitudeEvent) => void): Subscription\n (eventName: 'audioStarted', listener: (event: AudioStartedEvent) => void): Subscription\n (eventName: 'audioEnd', listener: (event: AudioEndEvent) => void): Subscription\n (eventName: 'error', listener: (event: ErrorEvent) => void): Subscription\n },\n\n /**\n * Listen for amplitude events (for waveform visualization)\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addAmplitudeListener: (\n listener: (event: AmplitudeEvent) => void\n ): Subscription => (emitter as any).addListener('amplitude', listener),\n\n /**\n * Listen for error events\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addErrorListener: (\n listener: (event: ErrorEvent) => void\n ): Subscription => (emitter as any).addListener('error', listener),\n}\n"]}
1
+ {"version":3,"file":"OpuslibModule.js","sourceRoot":"","sources":["../src/OpuslibModule.ts"],"names":[],"mappings":"AAAA,OAAO,EAAgB,mBAAmB,EAAE,YAAY,EAAE,MAAM,MAAM,CAAA;AA+BtE,kCAAkC;AAClC,MAAM,aAAa,GAAG,mBAAmB,CAAoB,SAAS,CAAC,CAAA;AAEvE,+CAA+C;AAC/C,MAAM,OAAO,GAAG,IAAI,YAAY,CAAC,aAAoB,CAAC,CAAA;AAEtD;;;;;GAKG;AACH,eAAe;IACb;;;;;;;;;;;;;;;OAeG;IACH,cAAc,EAAE,CAAC,MAAmB,EAAE,EAAE,CAAC,aAAa,CAAC,cAAc,CAAC,MAAM,CAAC;IAE7E;;OAEG;IACH,aAAa,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,aAAa,EAAE;IAElD;;OAEG;IACH,cAAc,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,cAAc,EAAE;IAEpD;;OAEG;IACH,eAAe,EAAE,GAAG,EAAE,CAAC,aAAa,CAAC,eAAe,EAAE;IAEtD;;;;;;;;;;;;;;;;;;;;;;;;;OAyBG;IACH,WAAW,EAAE,CAAC,CACZ,SAA6E,EAC7E,QAA4G,EAC9F,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,SAAS,EAAE,QAAQ,CAAC,CAMnE;IAED;;;;;OAKG;IACH,oBAAoB,EAAE,CACpB,QAAyC,EAC3B,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,WAAW,EAAE,QAAQ,CAAC;IAEtE;;;;;OAKG;IACH,gBAAgB,EAAE,CAChB,QAAqC,EACvB,EAAE,CAAE,OAAe,CAAC,WAAW,CAAC,OAAO,EAAE,QAAQ,CAAC;CACnE,CAAA","sourcesContent":["import { NativeModule, requireNativeModule, EventEmitter } from 'expo'\nimport type { AudioConfig, AudioChunkEvent, AmplitudeEvent, AudioStartedEvent, AudioEndEvent, ErrorEvent, Subscription } from './Opuslib.types'\n\n/**\n * Opuslib Native Module Interface\n *\n * Provides native audio capture and Opus 1.6 encoding with DRED support\n */\ndeclare class OpuslibModuleType extends NativeModule {\n /**\n * Start audio streaming with Opus encoding\n * @param config Audio configuration\n */\n startStreaming(config: AudioConfig): Promise<void>\n\n /**\n * Stop audio streaming\n */\n stopStreaming(): Promise<void>\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming(): void\n\n /**\n * Resume audio streaming\n */\n resumeStreaming(): void\n}\n\n// Load the native module from JSI\nconst OpuslibModule = requireNativeModule<OpuslibModuleType>('Opuslib')\n\n// Create event emitter for listening to events\nconst emitter = new EventEmitter(OpuslibModule as any)\n\n/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * This module provides real-time audio capture and Opus 1.6 encoding\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n */\nexport default {\n /**\n * Start audio streaming with Opus encoding\n *\n * @param config Audio configuration\n * @example\n * ```ts\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * framesPerCallback: 5, // batch 5 independent Opus packets per event\n * dredDuration: 100, // Enable 100ms DRED recovery\n * })\n * ```\n */\n startStreaming: (config: AudioConfig) => OpuslibModule.startStreaming(config),\n\n /**\n * Stop audio streaming and release resources\n */\n stopStreaming: () => OpuslibModule.stopStreaming(),\n\n /**\n * Pause audio streaming (keeps resources allocated)\n */\n pauseStreaming: () => OpuslibModule.pauseStreaming(),\n\n /**\n * Resume audio streaming\n */\n resumeStreaming: () => OpuslibModule.resumeStreaming(),\n\n /**\n * Listen for events (audioChunk, amplitude, or error)\n *\n * @param eventName Event type to listen for\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n * @example\n * ```ts\n * // Listen for audio chunks\n * const subscription = Opuslib.addListener('audioChunk', (event) => {\n * for (const frame of event.frames) {\n * console.log('Opus packet:', frame.data.byteLength, 'bytes')\n * websocket.send(frame.data)\n * }\n * })\n *\n * // Listen for errors\n * const errorSub = Opuslib.addListener('error', (event) => {\n * console.error('Error:', event.message)\n * })\n *\n * // Later: unsubscribe\n * subscription.remove()\n * errorSub.remove()\n * ```\n */\n addListener: ((\n eventName: 'audioChunk' | 'amplitude' | 'audioStarted' | 'audioEnd' | 'error',\n listener: (event: AudioChunkEvent | AmplitudeEvent | AudioStartedEvent | AudioEndEvent | ErrorEvent) => void\n ): Subscription => (emitter as any).addListener(eventName, listener)) as {\n (eventName: 'audioChunk', listener: (event: AudioChunkEvent) => void): Subscription\n (eventName: 'amplitude', listener: (event: AmplitudeEvent) => void): Subscription\n (eventName: 'audioStarted', listener: (event: AudioStartedEvent) => void): Subscription\n (eventName: 'audioEnd', listener: (event: AudioEndEvent) => void): Subscription\n (eventName: 'error', listener: (event: ErrorEvent) => void): Subscription\n },\n\n /**\n * Listen for amplitude events (for waveform visualization)\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addAmplitudeListener: (\n listener: (event: AmplitudeEvent) => void\n ): Subscription => (emitter as any).addListener('amplitude', listener),\n\n /**\n * Listen for error events\n *\n * @param listener Event listener callback\n * @returns Subscription object with remove() method\n */\n addErrorListener: (\n listener: (event: ErrorEvent) => void\n ): Subscription => (emitter as any).addListener('error', listener),\n}\n"]}
package/build/index.d.ts CHANGED
@@ -14,14 +14,15 @@
14
14
  * channels: 1,
15
15
  * bitrate: 24000,
16
16
  * frameSize: 20,
17
- * packetDuration: 20,
17
+ * framesPerCallback: 5, // batch 5 independent Opus frames per event
18
18
  * dredDuration: 100, // 100ms DRED recovery
19
19
  * })
20
20
  *
21
21
  * // Listen for Opus packets
22
22
  * Opuslib.addListener('audioChunk', (event) => {
23
- * // Send to backend
24
- * websocket.send(event.data)
23
+ * for (const frame of event.frames) {
24
+ * websocket.send(frame.data)
25
+ * }
25
26
  * })
26
27
  *
27
28
  * // Stop streaming
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA"}
package/build/index.js CHANGED
@@ -14,14 +14,15 @@
14
14
  * channels: 1,
15
15
  * bitrate: 24000,
16
16
  * frameSize: 20,
17
- * packetDuration: 20,
17
+ * framesPerCallback: 5, // batch 5 independent Opus frames per event
18
18
  * dredDuration: 100, // 100ms DRED recovery
19
19
  * })
20
20
  *
21
21
  * // Listen for Opus packets
22
22
  * Opuslib.addListener('audioChunk', (event) => {
23
- * // Send to backend
24
- * websocket.send(event.data)
23
+ * for (const frame of event.frames) {
24
+ * websocket.send(frame.data)
25
+ * }
25
26
  * })
26
27
  *
27
28
  * // Stop streaming
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA","sourcesContent":["/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * Native audio capture and Opus 1.6 encoding for React Native/Expo\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n *\n * @example\n * ```ts\n * import Opuslib from 'opuslib'\n *\n * // Start streaming with DRED enabled\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * packetDuration: 20,\n * dredDuration: 100, // 100ms DRED recovery\n * })\n *\n * // Listen for Opus packets\n * Opuslib.addListener('audioChunk', (event) => {\n * // Send to backend\n * websocket.send(event.data)\n * })\n *\n * // Stop streaming\n * await Opuslib.stopStreaming()\n * ```\n */\nexport { default } from './OpuslibModule'\nexport * from './Opuslib.types'\n"]}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8BG;AACH,OAAO,EAAE,OAAO,EAAE,MAAM,iBAAiB,CAAA;AACzC,cAAc,iBAAiB,CAAA","sourcesContent":["/**\n * Opuslib - Opus 1.6 Audio Encoding with DRED Support\n *\n * Native audio capture and Opus 1.6 encoding for React Native/Expo\n * with Deep Redundancy (DRED) for improved quality on lossy networks.\n *\n * @example\n * ```ts\n * import Opuslib from 'opuslib'\n *\n * // Start streaming with DRED enabled\n * await Opuslib.startStreaming({\n * sampleRate: 16000,\n * channels: 1,\n * bitrate: 24000,\n * frameSize: 20,\n * framesPerCallback: 5, // batch 5 independent Opus frames per event\n * dredDuration: 100, // 100ms DRED recovery\n * })\n *\n * // Listen for Opus packets\n * Opuslib.addListener('audioChunk', (event) => {\n * for (const frame of event.frames) {\n * websocket.send(frame.data)\n * }\n * })\n *\n * // Stop streaming\n * await Opuslib.stopStreaming()\n * ```\n */\nexport { default } from './OpuslibModule'\nexport * from './Opuslib.types'\n"]}
@@ -29,7 +29,7 @@ class AudioEngineManager {
29
29
  private var loggedFirstBuffer = false
30
30
 
31
31
  // Event callbacks
32
- private var onAudioChunk: ((Data, Double, Int, Float, Double, Int) -> Void)?
32
+ private var onAudioChunk: (([EncodedFrame], Double, Int, Double, Int) -> Void)?
33
33
  private var onStarted: ((_ timestamp: Double, _ sampleRate: Int, _ channels: Int, _ bitrate: Int, _ frameSize: Double, _ preSkip: Int) -> Void)?
34
34
  private var onEnd: ((_ timestamp: Double, _ totalDuration: Double, _ totalPackets: Int) -> Void)?
35
35
  private var onAmplitude: ((Float, Float, Double) -> Void)?
@@ -59,8 +59,8 @@ class AudioEngineManager {
59
59
 
60
60
  // Create and start AudioProcessor (encoding thread)
61
61
  let proc = AudioProcessor(config: config)
62
- proc.setOnAudioChunk { [weak self] data, timestamp, seq, level, duration, frameCount in
63
- self?.onAudioChunk?(data, timestamp, seq, level, duration, frameCount)
62
+ proc.setOnAudioChunk { [weak self] frames, timestamp, seq, duration, frameCount in
63
+ self?.onAudioChunk?(frames, timestamp, seq, duration, frameCount)
64
64
  }
65
65
  proc.setOnStarted { [weak self] timestamp, sampleRate, channels, bitrate, frameSize, preSkip in
66
66
  self?.onStarted?(timestamp, sampleRate, channels, bitrate, frameSize, preSkip)
@@ -159,7 +159,7 @@ class AudioEngineManager {
159
159
 
160
160
  // MARK: - Event Handlers
161
161
 
162
- func setOnAudioChunk(_ callback: @escaping (Data, Double, Int, Float, Double, Int) -> Void) {
162
+ func setOnAudioChunk(_ callback: @escaping ([EncodedFrame], Double, Int, Double, Int) -> Void) {
163
163
  self.onAudioChunk = callback
164
164
  }
165
165
 
@@ -11,6 +11,12 @@ import Foundation
11
11
  * - audioStarted/audioEnd events are emitted from the encoding queue,
12
12
  * so preSkip/sequenceNumber are read without cross-thread risk
13
13
  */
14
+ /// A single encoded Opus frame with optional per-frame audio level
15
+ struct EncodedFrame {
16
+ let data: Data
17
+ let audioLevel: Float? // nil when enableAudioLevel is false
18
+ }
19
+
14
20
  class AudioProcessor {
15
21
  // Dedicated serial queue — equivalent to boost::asio::io_context + thread / HandlerThread
16
22
  private let queue = DispatchQueue(label: "com.opuslib.encoding", qos: .userInitiated)
@@ -20,23 +26,19 @@ class AudioProcessor {
20
26
  private var pendingSamples: [Int16] = []
21
27
  private let samplesPerFrame: Int
22
28
  private let framesPerPacket: Int // how many frames to batch before emitting
23
- private var packetBuffer: Data = Data() // accumulates encoded frames
24
- private var packetFrameCount: Int = 0
29
+ private var packetFrames: [EncodedFrame] = [] // independent Opus packets with per-frame level
25
30
  private var sequenceNumber: Int = 0
26
31
  private var startTime: Double = 0
27
32
 
28
- // Audio level: accumulate RMS over ~360ms window
29
- private var levelSumSquares: Double = 0.0
30
- private var levelSampleCount: Int = 0
31
- private let levelUpdateSamples: Int
32
- private var currentLevel: Float = 0.0
33
+ // Whether to compute per-frame audio level
34
+ private let enableAudioLevel: Bool
33
35
 
34
36
  // Debug file
35
37
  private var pcmFileHandle: FileHandle?
36
38
 
37
39
  // Event callbacks (all invoked on encoding queue)
38
- // onAudioChunk: (data, timestamp, sequenceNumber, audioLevel, duration, frameCount)
39
- private var onAudioChunk: ((Data, Double, Int, Float, Double, Int) -> Void)?
40
+ // onAudioChunk: (frames, timestamp, sequenceNumber, duration, frameCount)
41
+ private var onAudioChunk: (([EncodedFrame], Double, Int, Double, Int) -> Void)?
40
42
  private var onStarted: ((_ timestamp: Double, _ sampleRate: Int, _ channels: Int, _ bitrate: Int, _ frameSize: Double, _ preSkip: Int) -> Void)?
41
43
  private var onEnd: ((_ timestamp: Double, _ totalDuration: Double, _ totalPackets: Int) -> Void)?
42
44
 
@@ -46,9 +48,9 @@ class AudioProcessor {
46
48
  init(config: AudioConfig) {
47
49
  self.config = config
48
50
  self.samplesPerFrame = Int(Double(config.sampleRate) * config.frameSize / 1000.0)
49
- self.framesPerPacket = max(1, Int(config.packetDuration / config.frameSize))
50
- let windowMs = config.audioLevelWindow ?? 360
51
- self.levelUpdateSamples = config.sampleRate * config.channels * windowMs / 1000
51
+ let framesPerCallback = config.framesPerCallback ?? 1
52
+ self.framesPerPacket = max(1, framesPerCallback)
53
+ self.enableAudioLevel = config.enableAudioLevel ?? false
52
54
  }
53
55
 
54
56
  // MARK: - Public API
@@ -133,7 +135,7 @@ class AudioProcessor {
133
135
 
134
136
  // MARK: - Event callbacks
135
137
 
136
- func setOnAudioChunk(_ callback: @escaping (Data, Double, Int, Float, Double, Int) -> Void) {
138
+ func setOnAudioChunk(_ callback: @escaping ([EncodedFrame], Double, Int, Double, Int) -> Void) {
137
139
  self.onAudioChunk = callback
138
140
  }
139
141
 
@@ -174,35 +176,32 @@ class AudioProcessor {
174
176
  continue
175
177
  }
176
178
 
177
- // Accumulate encoded frame into packet buffer
178
- packetBuffer.append(opusData)
179
- packetFrameCount += 1
180
-
181
- // Accumulate energy for RMS level
182
- for sample in frameData {
183
- let s = Double(sample) / 32768.0
184
- levelSumSquares += s * s
185
- }
186
- levelSampleCount += frameData.count
187
-
188
- if levelSampleCount >= levelUpdateSamples {
189
- let rms = sqrt(levelSumSquares / Double(levelSampleCount))
179
+ // Per-frame audio level (RMS dBFS → 0~1)
180
+ var frameLevel: Float? = nil
181
+ if enableAudioLevel {
182
+ var sumSquares: Double = 0.0
183
+ for sample in frameData {
184
+ let s = Double(sample) / 32768.0
185
+ sumSquares += s * s
186
+ }
187
+ let rms = sqrt(sumSquares / Double(frameData.count))
190
188
  let dB = 20.0 * log10(max(rms, 1e-10))
191
189
  let dbFloor = -35.0
192
190
  let dbCeiling = -6.0
193
- currentLevel = Float(max(0.0, min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))))
194
- levelSumSquares = 0.0
195
- levelSampleCount = 0
191
+ frameLevel = Float(max(0.0, min(1.0, (dB - dbFloor) / (dbCeiling - dbFloor))))
196
192
  }
197
193
 
198
- // Emit when we have enough frames for one packet (packetDuration)
199
- if packetFrameCount >= framesPerPacket {
194
+ // Accumulate encoded frame as independent packet (no byte concatenation)
195
+ packetFrames.append(EncodedFrame(data: opusData, audioLevel: frameLevel))
196
+
197
+ // Emit when we have enough frames (framesPerCallback)
198
+ if packetFrames.count >= framesPerPacket {
200
199
  let timestampMs = Date().timeIntervalSince1970 * 1000
201
- let duration = Double(packetFrameCount) * config.frameSize
202
- onAudioChunk?(packetBuffer, timestampMs, sequenceNumber, currentLevel, duration, packetFrameCount)
200
+ let frameCount = packetFrames.count
201
+ let duration = Double(frameCount) * config.frameSize
202
+ onAudioChunk?(packetFrames, timestampMs, sequenceNumber, duration, frameCount)
203
203
  sequenceNumber += 1
204
- packetBuffer = Data()
205
- packetFrameCount = 0
204
+ packetFrames.removeAll()
206
205
  }
207
206
  }
208
207
  }
@@ -227,18 +226,18 @@ class AudioProcessor {
227
226
  }
228
227
 
229
228
  guard let opusData = encodedPacket, !opusData.isEmpty else { continue }
230
- packetBuffer.append(opusData)
231
- packetFrameCount += 1
229
+ // Flush frames get level 0 (silence-padded)
230
+ packetFrames.append(EncodedFrame(data: opusData, audioLevel: enableAudioLevel ? 0.0 : nil))
232
231
  }
233
232
 
234
- // Flush any remaining packet buffer (even if less than framesPerPacket)
235
- if !packetBuffer.isEmpty {
233
+ // Flush any remaining frames (even if less than framesPerPacket)
234
+ if !packetFrames.isEmpty {
236
235
  let timestampMs = Date().timeIntervalSince1970 * 1000
237
- let duration = Double(packetFrameCount) * config.frameSize
238
- onAudioChunk?(packetBuffer, timestampMs, sequenceNumber, currentLevel, duration, packetFrameCount)
236
+ let frameCount = packetFrames.count
237
+ let duration = Double(frameCount) * config.frameSize
238
+ onAudioChunk?(packetFrames, timestampMs, sequenceNumber, duration, frameCount)
239
239
  sequenceNumber += 1
240
- packetBuffer = Data()
241
- packetFrameCount = 0
240
+ packetFrames.removeAll()
242
241
  }
243
242
  }
244
243
  }
@@ -58,12 +58,19 @@ public class OpuslibModule: Module {
58
58
  print("[OpuslibModule] ✅ AudioEngineManager created")
59
59
 
60
60
  // Set up event callbacks — audioStarted/audioEnd come from encoding thread
61
- manager.setOnAudioChunk { [weak self] data, timestamp, sequenceNumber, audioLevel, duration, frameCount in
61
+ manager.setOnAudioChunk { [weak self] frames, timestamp, sequenceNumber, duration, frameCount in
62
+ // Each frame is an independent Opus packet wrapped in { data, audioLevel? }
63
+ let frameObjects: [[String: Any]] = frames.map { frame in
64
+ var obj: [String: Any] = ["data": frame.data]
65
+ if let level = frame.audioLevel {
66
+ obj["audioLevel"] = level
67
+ }
68
+ return obj
69
+ }
62
70
  self?.sendEvent("audioChunk", [
63
- "data": data,
71
+ "frames": frameObjects,
64
72
  "timestamp": timestamp,
65
73
  "sequenceNumber": sequenceNumber,
66
- "audioLevel": audioLevel,
67
74
  "duration": duration,
68
75
  "frameCount": frameCount
69
76
  ])
@@ -186,11 +193,11 @@ struct AudioConfig: Record {
186
193
  @Field var channels: Int = 1
187
194
  @Field var bitrate: Int = 24000
188
195
  @Field var frameSize: Double = 20.0
189
- @Field var packetDuration: Double = 20.0
196
+ @Field var framesPerCallback: Int? = 1
190
197
  @Field var dredDuration: Int? = 100 // NEW: DRED recovery duration in ms
191
198
  @Field var enableAmplitudeEvents: Bool? = false
192
199
  @Field var amplitudeEventInterval: Double? = 16.0
193
- @Field var audioLevelWindow: Int? = 360 // RMS window duration in ms (default 360)
200
+ @Field var enableAudioLevel: Bool? = false // Enable per-frame audio level calculation
194
201
  @Field var saveDebugAudio: Bool? = false
195
202
  }
196
203
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@imcooder/opuslib",
3
- "version": "2.2.1",
3
+ "version": "2.3.1",
4
4
  "description": "Opus 1.6 audio encoding for React Native and Expo with audio level metering and lifecycle events. Forked from Scdales/opuslib.",
5
5
  "main": "build/index.js",
6
6
  "types": "build/index.d.ts",
@@ -10,35 +10,43 @@ export interface AudioConfig {
10
10
  bitrate: number
11
11
  /** Frame duration in milliseconds (2.5, 5, 10, 20, 40, 60) */
12
12
  frameSize: number
13
- /** Packet duration in milliseconds (typically 20-100ms) */
14
- packetDuration: number
13
+ /** Number of Opus frames per callback (default 1). Multiple frames are returned as independent packets in frames[], reducing JS bridge calls */
14
+ framesPerCallback?: number
15
15
  /** DRED recovery duration in milliseconds (0-100, default 100) - NEW in Opus 1.6 */
16
16
  dredDuration?: number
17
17
  /** Enable amplitude events for waveform visualization */
18
18
  enableAmplitudeEvents?: boolean
19
19
  /** Amplitude event interval in milliseconds (default 16) */
20
20
  amplitudeEventInterval?: number
21
- /** Audio level RMS window duration in milliseconds (default 360) */
22
- audioLevelWindow?: number
21
+ /** Enable per-frame audio level calculation (default false). When enabled, each OpusFrame includes audioLevel */
22
+ enableAudioLevel?: boolean
23
23
  /** Save debug PCM audio to file (development only) */
24
24
  saveDebugAudio?: boolean
25
25
  }
26
26
 
27
+ /**
28
+ * A single Opus frame — one complete opus_encode() output with its own TOC byte
29
+ */
30
+ export interface OpusFrame {
31
+ /** Opus-encoded packet data (independent, decodable) */
32
+ data: ArrayBuffer
33
+ /** Per-frame audio level 0.0~1.0 (only present when enableAudioLevel is true) */
34
+ audioLevel?: number
35
+ }
36
+
27
37
  /**
28
38
  * Audio chunk event payload (Opus-encoded data)
29
39
  */
30
40
  export interface AudioChunkEvent {
31
- /** Opus-encoded audio data as ArrayBuffer */
32
- data: ArrayBuffer
41
+ /** Array of independent Opus frames. Each frame is a complete opus_encode() output, decodable on its own */
42
+ frames: OpusFrame[]
33
43
  /** Timestamp in milliseconds */
34
44
  timestamp: number
35
- /** Sequence number (increments with each packet) */
45
+ /** Sequence number (increments with each callback) */
36
46
  sequenceNumber: number
37
- /** Audio level normalized to 0.0~1.0 (mapped from dBFS, 0 = silence, 1 = loud) */
38
- audioLevel: number
39
- /** Duration of this packet in milliseconds (frameSize * frameCount) */
47
+ /** Duration of all frames in milliseconds (frameSize * frameCount) */
40
48
  duration: number
41
- /** Number of Opus frames in this packet */
49
+ /** Number of Opus frames in this callback (= frames.length) */
42
50
  frameCount: number
43
51
  }
44
52
 
@@ -53,7 +53,7 @@ export default {
53
53
  * channels: 1,
54
54
  * bitrate: 24000,
55
55
  * frameSize: 20,
56
- * packetDuration: 20,
56
+ * framesPerCallback: 5, // batch 5 independent Opus packets per event
57
57
  * dredDuration: 100, // Enable 100ms DRED recovery
58
58
  * })
59
59
  * ```
@@ -85,8 +85,10 @@ export default {
85
85
  * ```ts
86
86
  * // Listen for audio chunks
87
87
  * const subscription = Opuslib.addListener('audioChunk', (event) => {
88
- * console.log('Received Opus packet:', event.data.byteLength, 'bytes')
89
- * websocket.send(event.data)
88
+ * for (const frame of event.frames) {
89
+ * console.log('Opus packet:', frame.data.byteLength, 'bytes')
90
+ * websocket.send(frame.data)
91
+ * }
90
92
  * })
91
93
  *
92
94
  * // Listen for errors
package/src/index.ts CHANGED
@@ -14,14 +14,15 @@
14
14
  * channels: 1,
15
15
  * bitrate: 24000,
16
16
  * frameSize: 20,
17
- * packetDuration: 20,
17
+ * framesPerCallback: 5, // batch 5 independent Opus frames per event
18
18
  * dredDuration: 100, // 100ms DRED recovery
19
19
  * })
20
20
  *
21
21
  * // Listen for Opus packets
22
22
  * Opuslib.addListener('audioChunk', (event) => {
23
- * // Send to backend
24
- * websocket.send(event.data)
23
+ * for (const frame of event.frames) {
24
+ * websocket.send(frame.data)
25
+ * }
25
26
  * })
26
27
  *
27
28
  * // Stop streaming