@edkimmel/expo-audio-stream 0.3.0 → 0.3.2-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/android/src/main/java/expo/modules/audiostream/AudioEffectsManager.kt +35 -10
- package/ios/SharedAudioEngine.swift +18 -6
- package/package.json +3 -2
- package/docs/superpowers/plans/2026-03-13-frequency-band-analysis.md +0 -1006
- package/docs/superpowers/specs/2026-03-13-frequency-band-analysis-design.md +0 -276
|
@@ -7,12 +7,27 @@ import android.media.audiofx.NoiseSuppressor
|
|
|
7
7
|
import android.util.Log
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
|
-
* Manages audio effects for voice recording
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
10
|
+
* Manages hardware audio effects for voice recording.
|
|
11
|
+
*
|
|
12
|
+
* We use VOICE_RECOGNITION as our audio source. The Android CDD (Section 5.4)
|
|
13
|
+
* mandates that this source delivers unprocessed audio:
|
|
14
|
+
* [C-1-2] MUST disable noise reduction by default
|
|
15
|
+
* [C-1-3] MUST disable automatic gain control by default
|
|
16
|
+
*
|
|
17
|
+
* NS and AGC are therefore off by default to honor the spec. Enabling them
|
|
18
|
+
* re-introduces the processing the CDD explicitly prohibits for this source
|
|
19
|
+
* and can cause low-volume capture on many OEMs.
|
|
20
|
+
*
|
|
21
|
+
* AEC is the one effect the CDD permits for VOICE_RECOGNITION ("expects a
|
|
22
|
+
* stream that has an echo cancellation effect if available"), so it is
|
|
23
|
+
* enabled by default.
|
|
14
24
|
*/
|
|
15
|
-
class AudioEffectsManager
|
|
25
|
+
class AudioEffectsManager(
|
|
26
|
+
/** Enable hardware noise suppressor. Default false — CDD 5.4 [C-1-2] prohibits it for VOICE_RECOGNITION. */
|
|
27
|
+
private val enableNS: Boolean = false,
|
|
28
|
+
/** Enable hardware AGC. Default false — CDD 5.4 [C-1-3] prohibits it for VOICE_RECOGNITION. */
|
|
29
|
+
private val enableAGC: Boolean = false
|
|
30
|
+
) {
|
|
16
31
|
// Audio effects
|
|
17
32
|
private var acousticEchoCanceler: AcousticEchoCanceler? = null
|
|
18
33
|
private var noiseSuppressor: NoiseSuppressor? = null
|
|
@@ -41,11 +56,21 @@ class AudioEffectsManager {
|
|
|
41
56
|
Log.d(Constants.TAG, "Acoustic Echo Canceler enabled: ${acousticEchoCanceler?.enabled}")
|
|
42
57
|
}
|
|
43
58
|
|
|
44
|
-
//
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
59
|
+
// NS off by default — CDD 5.4 [C-1-2] prohibits it for VOICE_RECOGNITION.
|
|
60
|
+
// Enabling it can aggressively attenuate speech on many OEMs.
|
|
61
|
+
if (enableNS) {
|
|
62
|
+
enableNoiseSuppression(audioSessionId)
|
|
63
|
+
} else {
|
|
64
|
+
Log.d(Constants.TAG, "Noise Suppressor skipped (CDD 5.4 [C-1-2])")
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// AGC off by default — CDD 5.4 [C-1-3] prohibits it for VOICE_RECOGNITION.
|
|
68
|
+
// Hardware AGC is also unreliable across devices.
|
|
69
|
+
if (enableAGC) {
|
|
70
|
+
enableAutomaticGainControl(audioSessionId)
|
|
71
|
+
} else {
|
|
72
|
+
Log.d(Constants.TAG, "Hardware AGC skipped (CDD 5.4 [C-1-3])")
|
|
73
|
+
}
|
|
49
74
|
|
|
50
75
|
} catch (e: Exception) {
|
|
51
76
|
Log.e(Constants.TAG, "Error setting up audio effects", e)
|
|
@@ -170,8 +170,14 @@ class SharedAudioEngine {
|
|
|
170
170
|
|
|
171
171
|
node.pause()
|
|
172
172
|
node.stop()
|
|
173
|
-
|
|
174
|
-
engine.
|
|
173
|
+
|
|
174
|
+
// Only disconnect/detach if the node is still attached to this engine.
|
|
175
|
+
// The node may already have been removed (e.g. engine died, concurrent
|
|
176
|
+
// teardown, or duplicate disconnect call).
|
|
177
|
+
if node.engine === engine {
|
|
178
|
+
engine.disconnectNodeOutput(node)
|
|
179
|
+
engine.detach(node)
|
|
180
|
+
}
|
|
175
181
|
attachedNodes.removeAll { $0.node === node }
|
|
176
182
|
|
|
177
183
|
Logger.debug("[\(SharedAudioEngine.TAG)] Node detached")
|
|
@@ -194,8 +200,12 @@ class SharedAudioEngine {
|
|
|
194
200
|
for info in attachedNodes {
|
|
195
201
|
info.node.pause()
|
|
196
202
|
info.node.stop()
|
|
197
|
-
engine
|
|
198
|
-
|
|
203
|
+
// Guard against nodes already removed from engine (e.g. engine
|
|
204
|
+
// died or node was detached by a concurrent disconnect call).
|
|
205
|
+
if info.node.engine === engine {
|
|
206
|
+
engine.disconnectNodeOutput(info.node)
|
|
207
|
+
engine.detach(info.node)
|
|
208
|
+
}
|
|
199
209
|
}
|
|
200
210
|
}
|
|
201
211
|
attachedNodes.removeAll()
|
|
@@ -265,8 +275,10 @@ class SharedAudioEngine {
|
|
|
265
275
|
|
|
266
276
|
// 3. Detach all nodes
|
|
267
277
|
for info in attachedNodes {
|
|
268
|
-
|
|
269
|
-
|
|
278
|
+
if info.node.engine === engine {
|
|
279
|
+
engine.disconnectNodeOutput(info.node)
|
|
280
|
+
engine.detach(info.node)
|
|
281
|
+
}
|
|
270
282
|
}
|
|
271
283
|
Logger.debug("[\(SharedAudioEngine.TAG)] Nodes detached (\(attachedNodes.count))")
|
|
272
284
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@edkimmel/expo-audio-stream",
|
|
3
|
-
"version": "0.3.0",
|
|
3
|
+
"version": "0.3.2-beta.0",
|
|
4
4
|
"description": "Expo Play Audio Stream module",
|
|
5
5
|
"main": "build/index.js",
|
|
6
6
|
"types": "build/index.d.ts",
|
|
@@ -45,5 +45,6 @@
|
|
|
45
45
|
"publishConfig": {
|
|
46
46
|
"access": "public",
|
|
47
47
|
"registry": "https://registry.npmjs.org/"
|
|
48
|
-
}
|
|
48
|
+
},
|
|
49
|
+
"stableVersion": "0.3.1"
|
|
49
50
|
}
|
|
@@ -1,1006 +0,0 @@
|
|
|
1
|
-
# Frequency Band Analysis Implementation Plan
|
|
2
|
-
|
|
3
|
-
> **For agentic workers:** REQUIRED: Use superpowers:subagent-driven-development (if subagents available) or superpowers:executing-plans to implement this plan. Steps use checkbox (`- [ ]`) syntax for tracking.
|
|
4
|
-
|
|
5
|
-
**Goal:** Add real-time low/mid/high frequency band analysis to microphone capture and pipeline playback, computed natively using parallel single-pole IIR low-pass filters.
|
|
6
|
-
|
|
7
|
-
**Architecture:** A shared `FrequencyBandAnalyzer` DSP class on each platform (Swift/Kotlin) runs inline in the audio processing loops. For the microphone path, bands are harvested per-callback and added to the existing `AudioData` event. For the pipeline path, bands are accumulated in the write/schedule loop and harvested by a separate timer that emits a new `PipelineFrequencyBands` event at a configurable interval.
|
|
8
|
-
|
|
9
|
-
**Tech Stack:** Swift (iOS), Kotlin (Android), TypeScript (API types + event wiring)
|
|
10
|
-
|
|
11
|
-
**Spec:** `docs/superpowers/specs/2026-03-13-frequency-band-analysis-design.md`
|
|
12
|
-
|
|
13
|
-
---
|
|
14
|
-
|
|
15
|
-
## Chunk 1: FrequencyBandAnalyzer + TypeScript Types
|
|
16
|
-
|
|
17
|
-
### Task 1: Create iOS FrequencyBandAnalyzer
|
|
18
|
-
|
|
19
|
-
**Files:**
|
|
20
|
-
- Create: `ios/FrequencyBandAnalyzer.swift`
|
|
21
|
-
|
|
22
|
-
- [ ] **Step 1: Create the FrequencyBandAnalyzer class**
|
|
23
|
-
|
|
24
|
-
```swift
|
|
25
|
-
import Foundation
|
|
26
|
-
|
|
27
|
-
/// Lightweight IIR-based frequency band analyzer.
|
|
28
|
-
///
|
|
29
|
-
/// Uses two parallel single-pole low-pass filters to split audio into
|
|
30
|
-
/// low / mid / high bands and accumulate RMS energy.
|
|
31
|
-
/// Thread-safe: `processSamples` and `harvest` may be called from
|
|
32
|
-
/// different threads (guarded by an internal lock).
|
|
33
|
-
struct FrequencyBands {
|
|
34
|
-
let low: Float
|
|
35
|
-
let mid: Float
|
|
36
|
-
let high: Float
|
|
37
|
-
|
|
38
|
-
static let zero = FrequencyBands(low: 0, mid: 0, high: 0)
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
class FrequencyBandAnalyzer {
|
|
42
|
-
// ── Coefficients (immutable after init) ──────────────────────────
|
|
43
|
-
private let alphaLow: Float
|
|
44
|
-
private let alphaHigh: Float
|
|
45
|
-
|
|
46
|
-
// ── Filter state ─────────────────────────────────────────────────
|
|
47
|
-
private var lp1: Float = 0 // low-pass accumulator (low crossover)
|
|
48
|
-
private var lp2: Float = 0 // low-pass accumulator (high crossover)
|
|
49
|
-
|
|
50
|
-
// ── Energy accumulators ──────────────────────────────────────────
|
|
51
|
-
private var lowE: Float = 0
|
|
52
|
-
private var midE: Float = 0
|
|
53
|
-
private var highE: Float = 0
|
|
54
|
-
private var count: Int = 0
|
|
55
|
-
|
|
56
|
-
// ── Synchronization ──────────────────────────────────────────────
|
|
57
|
-
private let lock = NSLock()
|
|
58
|
-
|
|
59
|
-
init(sampleRate: Int, lowCrossoverHz: Float = 300, highCrossoverHz: Float = 2000) {
|
|
60
|
-
let sr = Float(sampleRate)
|
|
61
|
-
self.alphaLow = min(1, (2 * Float.pi * lowCrossoverHz) / sr)
|
|
62
|
-
self.alphaHigh = min(1, (2 * Float.pi * highCrossoverHz) / sr)
|
|
63
|
-
}
|
|
64
|
-
|
|
65
|
-
/// Process a batch of PCM16 samples. Accumulates energy — does NOT
|
|
66
|
-
/// produce output. Call `harvest()` to read and reset.
|
|
67
|
-
func processSamples(_ samples: UnsafePointer<Int16>, count sampleCount: Int) {
|
|
68
|
-
lock.lock()
|
|
69
|
-
defer { lock.unlock() }
|
|
70
|
-
|
|
71
|
-
for i in 0..<sampleCount {
|
|
72
|
-
let s = Float(samples[i]) / 32768.0
|
|
73
|
-
|
|
74
|
-
lp1 += alphaLow * (s - lp1)
|
|
75
|
-
lp2 += alphaHigh * (s - lp2)
|
|
76
|
-
|
|
77
|
-
let low = lp1
|
|
78
|
-
let high = s - lp2
|
|
79
|
-
let mid = s - low - high
|
|
80
|
-
|
|
81
|
-
lowE += low * low
|
|
82
|
-
midE += mid * mid
|
|
83
|
-
highE += high * high
|
|
84
|
-
count += 1
|
|
85
|
-
}
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
/// Process PCM16 samples from a Data blob (little-endian Int16).
|
|
89
|
-
func processSamplesFromData(_ data: Data) {
|
|
90
|
-
data.withUnsafeBytes { rawBuffer in
|
|
91
|
-
guard let ptr = rawBuffer.baseAddress?.assumingMemoryBound(to: Int16.self) else { return }
|
|
92
|
-
let sampleCount = data.count / 2
|
|
93
|
-
processSamples(ptr, count: sampleCount)
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
/// Read accumulated RMS energy and reset accumulators.
|
|
98
|
-
func harvest() -> FrequencyBands {
|
|
99
|
-
lock.lock()
|
|
100
|
-
defer { lock.unlock() }
|
|
101
|
-
|
|
102
|
-
guard count > 0 else { return .zero }
|
|
103
|
-
|
|
104
|
-
let n = Float(count)
|
|
105
|
-
let result = FrequencyBands(
|
|
106
|
-
low: sqrt(lowE / n),
|
|
107
|
-
mid: sqrt(midE / n),
|
|
108
|
-
high: sqrt(highE / n)
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
lowE = 0
|
|
112
|
-
midE = 0
|
|
113
|
-
highE = 0
|
|
114
|
-
count = 0
|
|
115
|
-
|
|
116
|
-
return result
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
/// Zero all state (filter accumulators + energy).
|
|
120
|
-
func reset() {
|
|
121
|
-
lock.lock()
|
|
122
|
-
defer { lock.unlock() }
|
|
123
|
-
|
|
124
|
-
lp1 = 0
|
|
125
|
-
lp2 = 0
|
|
126
|
-
lowE = 0
|
|
127
|
-
midE = 0
|
|
128
|
-
highE = 0
|
|
129
|
-
count = 0
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
|
-
```
|
|
133
|
-
|
|
134
|
-
- [ ] **Step 2: Commit**
|
|
135
|
-
|
|
136
|
-
```bash
|
|
137
|
-
git add ios/FrequencyBandAnalyzer.swift
|
|
138
|
-
git commit -m "feat: add iOS FrequencyBandAnalyzer DSP component"
|
|
139
|
-
```
|
|
140
|
-
|
|
141
|
-
---
|
|
142
|
-
|
|
143
|
-
### Task 2: Create Android FrequencyBandAnalyzer
|
|
144
|
-
|
|
145
|
-
**Files:**
|
|
146
|
-
- Create: `android/src/main/java/expo/modules/audiostream/FrequencyBandAnalyzer.kt`
|
|
147
|
-
|
|
148
|
-
- [ ] **Step 1: Create the FrequencyBandAnalyzer class**
|
|
149
|
-
|
|
150
|
-
```kotlin
|
|
151
|
-
package expo.modules.audiostream
|
|
152
|
-
|
|
153
|
-
import java.util.concurrent.locks.ReentrantLock
|
|
154
|
-
import kotlin.concurrent.withLock
|
|
155
|
-
import kotlin.math.PI
|
|
156
|
-
import kotlin.math.min
|
|
157
|
-
import kotlin.math.sqrt
|
|
158
|
-
|
|
159
|
-
/**
|
|
160
|
-
* RMS energy per frequency band, range [0, 1].
|
|
161
|
-
*/
|
|
162
|
-
data class FrequencyBands(
|
|
163
|
-
val low: Float,
|
|
164
|
-
val mid: Float,
|
|
165
|
-
val high: Float
|
|
166
|
-
) {
|
|
167
|
-
companion object {
|
|
168
|
-
val ZERO = FrequencyBands(0f, 0f, 0f)
|
|
169
|
-
}
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
/**
|
|
173
|
-
* Lightweight IIR-based frequency band analyzer.
|
|
174
|
-
*
|
|
175
|
-
* Uses two parallel single-pole low-pass filters to split audio into
|
|
176
|
-
* low / mid / high bands and accumulate RMS energy.
|
|
177
|
-
* Thread-safe: [processSamples] and [harvest] may be called from
|
|
178
|
-
* different threads (guarded by an internal lock).
|
|
179
|
-
*/
|
|
180
|
-
class FrequencyBandAnalyzer(
|
|
181
|
-
sampleRate: Int,
|
|
182
|
-
lowCrossoverHz: Float = 300f,
|
|
183
|
-
highCrossoverHz: Float = 2000f
|
|
184
|
-
) {
|
|
185
|
-
// ── Coefficients (immutable after init) ──────────────────────────
|
|
186
|
-
private val alphaLow: Float = min(1f, (2f * PI.toFloat() * lowCrossoverHz) / sampleRate)
|
|
187
|
-
private val alphaHigh: Float = min(1f, (2f * PI.toFloat() * highCrossoverHz) / sampleRate)
|
|
188
|
-
|
|
189
|
-
// ── Filter state ─────────────────────────────────────────────────
|
|
190
|
-
private var lp1: Float = 0f
|
|
191
|
-
private var lp2: Float = 0f
|
|
192
|
-
|
|
193
|
-
// ── Energy accumulators ──────────────────────────────────────────
|
|
194
|
-
private var lowE: Float = 0f
|
|
195
|
-
private var midE: Float = 0f
|
|
196
|
-
private var highE: Float = 0f
|
|
197
|
-
private var count: Int = 0
|
|
198
|
-
|
|
199
|
-
// ── Synchronization ──────────────────────────────────────────────
|
|
200
|
-
private val lock = ReentrantLock()
|
|
201
|
-
|
|
202
|
-
/**
|
|
203
|
-
* Process a batch of PCM16 samples. Accumulates energy — does NOT
|
|
204
|
-
* produce output. Call [harvest] to read and reset.
|
|
205
|
-
*/
|
|
206
|
-
fun processSamples(samples: ShortArray, length: Int = samples.size) {
|
|
207
|
-
lock.withLock {
|
|
208
|
-
for (i in 0 until length) {
|
|
209
|
-
val s = samples[i].toFloat() / 32768f
|
|
210
|
-
|
|
211
|
-
lp1 += alphaLow * (s - lp1)
|
|
212
|
-
lp2 += alphaHigh * (s - lp2)
|
|
213
|
-
|
|
214
|
-
val low = lp1
|
|
215
|
-
val high = s - lp2
|
|
216
|
-
val mid = s - low - high
|
|
217
|
-
|
|
218
|
-
lowE += low * low
|
|
219
|
-
midE += mid * mid
|
|
220
|
-
highE += high * high
|
|
221
|
-
count++
|
|
222
|
-
}
|
|
223
|
-
}
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
/**
|
|
227
|
-
* Process PCM16 samples from a ByteArray (little-endian Int16).
|
|
228
|
-
*/
|
|
229
|
-
fun processSamplesFromBytes(data: ByteArray, length: Int = data.size) {
|
|
230
|
-
val sampleCount = length / 2
|
|
231
|
-
val samples = ShortArray(sampleCount)
|
|
232
|
-
val buf = java.nio.ByteBuffer.wrap(data, 0, length)
|
|
233
|
-
.order(java.nio.ByteOrder.LITTLE_ENDIAN)
|
|
234
|
-
.asShortBuffer()
|
|
235
|
-
buf.get(samples)
|
|
236
|
-
processSamples(samples, sampleCount)
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
/**
|
|
240
|
-
* Read accumulated RMS energy and reset accumulators.
|
|
241
|
-
*/
|
|
242
|
-
fun harvest(): FrequencyBands {
|
|
243
|
-
lock.withLock {
|
|
244
|
-
if (count == 0) return FrequencyBands.ZERO
|
|
245
|
-
|
|
246
|
-
val n = count.toFloat()
|
|
247
|
-
val result = FrequencyBands(
|
|
248
|
-
low = sqrt(lowE / n),
|
|
249
|
-
mid = sqrt(midE / n),
|
|
250
|
-
high = sqrt(highE / n)
|
|
251
|
-
)
|
|
252
|
-
|
|
253
|
-
lowE = 0f
|
|
254
|
-
midE = 0f
|
|
255
|
-
highE = 0f
|
|
256
|
-
count = 0
|
|
257
|
-
|
|
258
|
-
return result
|
|
259
|
-
}
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
/**
|
|
263
|
-
* Zero all state (filter accumulators + energy).
|
|
264
|
-
*/
|
|
265
|
-
fun reset() {
|
|
266
|
-
lock.withLock {
|
|
267
|
-
lp1 = 0f
|
|
268
|
-
lp2 = 0f
|
|
269
|
-
lowE = 0f
|
|
270
|
-
midE = 0f
|
|
271
|
-
highE = 0f
|
|
272
|
-
count = 0
|
|
273
|
-
}
|
|
274
|
-
}
|
|
275
|
-
}
|
|
276
|
-
```
|
|
277
|
-
|
|
278
|
-
- [ ] **Step 2: Commit**
|
|
279
|
-
|
|
280
|
-
```bash
|
|
281
|
-
git add android/src/main/java/expo/modules/audiostream/FrequencyBandAnalyzer.kt
|
|
282
|
-
git commit -m "feat: add Android FrequencyBandAnalyzer DSP component"
|
|
283
|
-
```
|
|
284
|
-
|
|
285
|
-
---
|
|
286
|
-
|
|
287
|
-
### Task 3: Add TypeScript Types
|
|
288
|
-
|
|
289
|
-
**Files:**
|
|
290
|
-
- Modify: `src/types.ts` (add FrequencyBands, FrequencyBandConfig; extend AudioDataEvent, RecordingConfig)
|
|
291
|
-
- Modify: `src/pipeline/types.ts` (add PipelineFrequencyBandsEvent; extend ConnectPipelineOptions, PipelineEventMap)
|
|
292
|
-
|
|
293
|
-
- [ ] **Step 1: Add new types to `src/types.ts`**
|
|
294
|
-
|
|
295
|
-
Add after the `Encoding` type definition (after line 88):
|
|
296
|
-
|
|
297
|
-
```typescript
|
|
298
|
-
/** RMS energy per frequency band, range [0, 1]. */
|
|
299
|
-
export interface FrequencyBands {
|
|
300
|
-
low: number;
|
|
301
|
-
mid: number;
|
|
302
|
-
high: number;
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
/** Crossover frequency configuration for band analysis. */
|
|
306
|
-
export interface FrequencyBandConfig {
|
|
307
|
-
/** Low/mid crossover in Hz (default 300). */
|
|
308
|
-
lowCrossoverHz?: number;
|
|
309
|
-
/** Mid/high crossover in Hz (default 2000). */
|
|
310
|
-
highCrossoverHz?: number;
|
|
311
|
-
}
|
|
312
|
-
```
|
|
313
|
-
|
|
314
|
-
- [ ] **Step 2: Extend AudioDataEvent**
|
|
315
|
-
|
|
316
|
-
In `src/types.ts`, add to the `AudioDataEvent` interface (after the `soundLevel` field at line 137):
|
|
317
|
-
|
|
318
|
-
```typescript
|
|
319
|
-
/** Frequency band RMS energy, present when recording is active. */
|
|
320
|
-
frequencyBands?: FrequencyBands;
|
|
321
|
-
```
|
|
322
|
-
|
|
323
|
-
- [ ] **Step 3: Extend RecordingConfig**
|
|
324
|
-
|
|
325
|
-
In `src/types.ts`, add to the `RecordingConfig` interface (after the `onAudioStream` field at line 149):
|
|
326
|
-
|
|
327
|
-
```typescript
|
|
328
|
-
/** Optional frequency band crossover configuration. */
|
|
329
|
-
frequencyBandConfig?: FrequencyBandConfig;
|
|
330
|
-
```
|
|
331
|
-
|
|
332
|
-
- [ ] **Step 4: Extend pipeline types in `src/pipeline/types.ts`**
|
|
333
|
-
|
|
334
|
-
Add to `ConnectPipelineOptions` (after the `playbackMode` field, around line 23):
|
|
335
|
-
|
|
336
|
-
```typescript
|
|
337
|
-
/** Interval in ms for PipelineFrequencyBands events (default 100). */
|
|
338
|
-
frequencyBandIntervalMs?: number;
|
|
339
|
-
/** Optional frequency band crossover configuration. */
|
|
340
|
-
frequencyBandConfig?: FrequencyBandConfig;
|
|
341
|
-
```
|
|
342
|
-
|
|
343
|
-
Also add the import at the top of the file (line 6, extend existing import):
|
|
344
|
-
|
|
345
|
-
```typescript
|
|
346
|
-
import { PlaybackMode, FrequencyBandConfig, FrequencyBands } from "../types";
|
|
347
|
-
```
|
|
348
|
-
|
|
349
|
-
Add new event type (after `PipelineAudioFocusResumedEvent`, around line 117):
|
|
350
|
-
|
|
351
|
-
```typescript
|
|
352
|
-
/** Payload for `PipelineFrequencyBands`. */
|
|
353
|
-
export interface PipelineFrequencyBandsEvent extends FrequencyBands {}
|
|
354
|
-
```
|
|
355
|
-
|
|
356
|
-
Add to `PipelineEventMap` (before the closing brace, around line 131):
|
|
357
|
-
|
|
358
|
-
```typescript
|
|
359
|
-
PipelineFrequencyBands: PipelineFrequencyBandsEvent;
|
|
360
|
-
```
|
|
361
|
-
|
|
362
|
-
- [ ] **Step 5: Commit**
|
|
363
|
-
|
|
364
|
-
```bash
|
|
365
|
-
git add src/types.ts src/pipeline/types.ts
|
|
366
|
-
git commit -m "feat: add FrequencyBands TypeScript types and extend events"
|
|
367
|
-
```
|
|
368
|
-
|
|
369
|
-
---
|
|
370
|
-
|
|
371
|
-
## Chunk 2: Pipeline Integration (iOS + Android)
|
|
372
|
-
|
|
373
|
-
### Task 4: Add onFrequencyBands to PipelineListener and wire iOS pipeline
|
|
374
|
-
|
|
375
|
-
**Files:**
|
|
376
|
-
- Modify: `ios/AudioPipeline.swift` — add `onFrequencyBands` to protocol (line 13-21), add analyzer + harvest timer to class
|
|
377
|
-
- Modify: `ios/PipelineIntegration.swift` — add event constant, implement new listener method, parse new config in connect()
|
|
378
|
-
|
|
379
|
-
- [ ] **Step 1: Add onFrequencyBands to PipelineListener protocol**
|
|
380
|
-
|
|
381
|
-
In `ios/AudioPipeline.swift`, add to the `PipelineListener` protocol (after `onAudioFocusResumed()` at line 21):
|
|
382
|
-
|
|
383
|
-
```swift
|
|
384
|
-
func onFrequencyBands(low: Float, mid: Float, high: Float)
|
|
385
|
-
```
|
|
386
|
-
|
|
387
|
-
- [ ] **Step 2: Add analyzer and config to AudioPipeline**
|
|
388
|
-
|
|
389
|
-
In `ios/AudioPipeline.swift`, add new config fields after `targetBufferMs` (line 58):
|
|
390
|
-
|
|
391
|
-
```swift
|
|
392
|
-
private let frequencyBandIntervalMs: Int
|
|
393
|
-
private let lowCrossoverHz: Float
|
|
394
|
-
private let highCrossoverHz: Float
|
|
395
|
-
```
|
|
396
|
-
|
|
397
|
-
Add analyzer and timer fields after `zombieTimer` (line 85):
|
|
398
|
-
|
|
399
|
-
```swift
|
|
400
|
-
private var frequencyBandTimer: DispatchSourceTimer?
|
|
401
|
-
private var frequencyBandAnalyzer: FrequencyBandAnalyzer?
|
|
402
|
-
```
|
|
403
|
-
|
|
404
|
-
Update `init` (line 99) to accept the new parameters:
|
|
405
|
-
|
|
406
|
-
```swift
|
|
407
|
-
init(sampleRate: Int, channelCount: Int, targetBufferMs: Int,
|
|
408
|
-
frequencyBandIntervalMs: Int = 100,
|
|
409
|
-
lowCrossoverHz: Float = 300, highCrossoverHz: Float = 2000,
|
|
410
|
-
sharedEngine: SharedAudioEngine, listener: PipelineListener) {
|
|
411
|
-
self.sampleRate = sampleRate
|
|
412
|
-
self.channelCount = channelCount
|
|
413
|
-
self.targetBufferMs = targetBufferMs
|
|
414
|
-
self.frequencyBandIntervalMs = frequencyBandIntervalMs
|
|
415
|
-
self.lowCrossoverHz = lowCrossoverHz
|
|
416
|
-
self.highCrossoverHz = highCrossoverHz
|
|
417
|
-
self.sharedEngine = sharedEngine
|
|
418
|
-
self.listener = listener
|
|
419
|
-
self.frameSizeSamples = max(1, sampleRate * channelCount / 50)
|
|
420
|
-
}
|
|
421
|
-
```
|
|
422
|
-
|
|
423
|
-
- [ ] **Step 3: Create analyzer in connect(), destroy in disconnect()**
|
|
424
|
-
|
|
425
|
-
In `connect()`, after `resetTelemetry()` (line 171), add:
|
|
426
|
-
|
|
427
|
-
```swift
|
|
428
|
-
// ── 8. Frequency band analyzer ───────────────────────────────
|
|
429
|
-
frequencyBandAnalyzer = FrequencyBandAnalyzer(
|
|
430
|
-
sampleRate: sampleRate,
|
|
431
|
-
lowCrossoverHz: lowCrossoverHz,
|
|
432
|
-
highCrossoverHz: highCrossoverHz
|
|
433
|
-
)
|
|
434
|
-
startFrequencyBandTimer()
|
|
435
|
-
```
|
|
436
|
-
|
|
437
|
-
In `disconnect()`, after cancelling `zombieTimer` (line 193-194), add:
|
|
438
|
-
|
|
439
|
-
```swift
|
|
440
|
-
frequencyBandTimer?.cancel()
|
|
441
|
-
frequencyBandTimer = nil
|
|
442
|
-
frequencyBandAnalyzer = nil
|
|
443
|
-
```
|
|
444
|
-
|
|
445
|
-
Also add to `engineDidDie` (after `zombieTimer = nil` at line 292):
|
|
446
|
-
|
|
447
|
-
```swift
|
|
448
|
-
frequencyBandTimer?.cancel()
|
|
449
|
-
frequencyBandTimer = nil
|
|
450
|
-
frequencyBandAnalyzer = nil
|
|
451
|
-
```
|
|
452
|
-
|
|
453
|
-
- [ ] **Step 4: Call processSamples in scheduleNextBuffer**
|
|
454
|
-
|
|
455
|
-
In `scheduleNextBuffer()`, after `buf.read(dest: &renderSamples, length: frameSizeSamples)` (line 422) and before the Float32 conversion, add:
|
|
456
|
-
|
|
457
|
-
```swift
|
|
458
|
-
// Analyze frequency bands on the raw Int16 samples
|
|
459
|
-
if !isInterrupted {
|
|
460
|
-
renderSamples.withUnsafeBufferPointer { bufferPtr in
|
|
461
|
-
if let baseAddress = bufferPtr.baseAddress {
|
|
462
|
-
frequencyBandAnalyzer?.processSamples(baseAddress, count: frameSizeSamples)
|
|
463
|
-
}
|
|
464
|
-
}
|
|
465
|
-
}
|
|
466
|
-
```
|
|
467
|
-
|
|
468
|
-
- [ ] **Step 5: Reset analyzer on turn boundary**
|
|
469
|
-
|
|
470
|
-
In `pushAudio()`, inside the turn boundary block (after `setState(.streaming)` at line 338), add:
|
|
471
|
-
|
|
472
|
-
```swift
|
|
473
|
-
frequencyBandAnalyzer?.reset()
|
|
474
|
-
```
|
|
475
|
-
|
|
476
|
-
In `invalidateTurn()`, after `setState(.idle)` (line 379), add:
|
|
477
|
-
|
|
478
|
-
```swift
|
|
479
|
-
frequencyBandAnalyzer?.reset()
|
|
480
|
-
```
|
|
481
|
-
|
|
482
|
-
- [ ] **Step 6: Add frequency band timer method**
|
|
483
|
-
|
|
484
|
-
Add after `startZombieDetection()` (before the "Internal helpers" section):
|
|
485
|
-
|
|
486
|
-
```swift
|
|
487
|
-
// ════════════════════════════════════════════════════════════════════
|
|
488
|
-
// Frequency band emission
|
|
489
|
-
// ════════════════════════════════════════════════════════════════════
|
|
490
|
-
|
|
491
|
-
private func startFrequencyBandTimer() {
|
|
492
|
-
let intervalSec = TimeInterval(frequencyBandIntervalMs) / 1000.0
|
|
493
|
-
let timer = DispatchSource.makeTimerSource(queue: .main)
|
|
494
|
-
timer.schedule(deadline: .now() + intervalSec, repeating: intervalSec)
|
|
495
|
-
timer.setEventHandler { [weak self] in
|
|
496
|
-
guard let self = self, self.running,
|
|
497
|
-
let analyzer = self.frequencyBandAnalyzer else { return }
|
|
498
|
-
let bands = analyzer.harvest()
|
|
499
|
-
self.listener?.onFrequencyBands(low: bands.low, mid: bands.mid, high: bands.high)
|
|
500
|
-
}
|
|
501
|
-
timer.resume()
|
|
502
|
-
frequencyBandTimer = timer
|
|
503
|
-
}
|
|
504
|
-
```
|
|
505
|
-
|
|
506
|
-
- [ ] **Step 7: Wire PipelineIntegration**
|
|
507
|
-
|
|
508
|
-
In `ios/PipelineIntegration.swift`:
|
|
509
|
-
|
|
510
|
-
Add event constant after `EVENT_AUDIO_FOCUS_RESUMED` (line 24):
|
|
511
|
-
|
|
512
|
-
```swift
|
|
513
|
-
static let EVENT_FREQUENCY_BANDS = "PipelineFrequencyBands"
|
|
514
|
-
```
|
|
515
|
-
|
|
516
|
-
Update `connect()` method to parse new options. After the `targetBufferMs` parsing (line 58), add:
|
|
517
|
-
|
|
518
|
-
```swift
|
|
519
|
-
let frequencyBandIntervalMs = (options["frequencyBandIntervalMs"] as? NSNumber)?.intValue ?? 100
|
|
520
|
-
let bandConfig = options["frequencyBandConfig"] as? [String: Any]
|
|
521
|
-
let lowCrossoverHz = (bandConfig?["lowCrossoverHz"] as? NSNumber)?.floatValue ?? 300
|
|
522
|
-
let highCrossoverHz = (bandConfig?["highCrossoverHz"] as? NSNumber)?.floatValue ?? 2000
|
|
523
|
-
```
|
|
524
|
-
|
|
525
|
-
Update the `AudioPipeline` constructor call (line 60-66) to pass the new params:
|
|
526
|
-
|
|
527
|
-
```swift
|
|
528
|
-
let p = AudioPipeline(
|
|
529
|
-
sampleRate: sampleRate,
|
|
530
|
-
channelCount: channelCount,
|
|
531
|
-
targetBufferMs: targetBufferMs,
|
|
532
|
-
frequencyBandIntervalMs: frequencyBandIntervalMs,
|
|
533
|
-
lowCrossoverHz: lowCrossoverHz,
|
|
534
|
-
highCrossoverHz: highCrossoverHz,
|
|
535
|
-
sharedEngine: sharedEngine,
|
|
536
|
-
listener: self
|
|
537
|
-
)
|
|
538
|
-
```
|
|
539
|
-
|
|
540
|
-
Add `onFrequencyBands` implementation (after `onAudioFocusResumed`, around line 207):
|
|
541
|
-
|
|
542
|
-
```swift
|
|
543
|
-
func onFrequencyBands(low: Float, mid: Float, high: Float) {
|
|
544
|
-
sendEvent(PipelineIntegration.EVENT_FREQUENCY_BANDS, [
|
|
545
|
-
"low": low, "mid": mid, "high": high
|
|
546
|
-
])
|
|
547
|
-
}
|
|
548
|
-
```
|
|
549
|
-
|
|
550
|
-
- [ ] **Step 8: Register event in ExpoPlayAudioStreamModule.swift**
|
|
551
|
-
|
|
552
|
-
In `ios/ExpoPlayAudioStreamModule.swift`, add to the Events block (after the last `PipelineIntegration.EVENT_*` entry, around line 66):
|
|
553
|
-
|
|
554
|
-
```swift
|
|
555
|
-
PipelineIntegration.EVENT_FREQUENCY_BANDS,
|
|
556
|
-
```
|
|
557
|
-
|
|
558
|
-
- [ ] **Step 9: Commit**
|
|
559
|
-
|
|
560
|
-
```bash
|
|
561
|
-
git add ios/AudioPipeline.swift ios/PipelineIntegration.swift ios/ExpoPlayAudioStreamModule.swift
|
|
562
|
-
git commit -m "feat: wire frequency band analysis into iOS pipeline"
|
|
563
|
-
```
|
|
564
|
-
|
|
565
|
-
---
|
|
566
|
-
|
|
567
|
-
### Task 5: Wire Android pipeline
|
|
568
|
-
|
|
569
|
-
**Files:**
|
|
570
|
-
- Modify: `android/src/main/java/expo/modules/audiostream/pipeline/AudioPipeline.kt` — add to PipelineListener, add analyzer + harvest timer
|
|
571
|
-
- Modify: `android/src/main/java/expo/modules/audiostream/pipeline/PipelineIntegration.kt` — add event constant, implement listener, parse config
|
|
572
|
-
- Modify: `android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt` — register event
|
|
573
|
-
|
|
574
|
-
- [ ] **Step 1: Add onFrequencyBands to PipelineListener interface**
|
|
575
|
-
|
|
576
|
-
In `android/.../pipeline/AudioPipeline.kt`, add to the `PipelineListener` interface (after `onAudioFocusResumed()` at line 51):
|
|
577
|
-
|
|
578
|
-
```kotlin
|
|
579
|
-
fun onFrequencyBands(low: Float, mid: Float, high: Float)
|
|
580
|
-
```
|
|
581
|
-
|
|
582
|
-
- [ ] **Step 2: Add analyzer config to AudioPipeline constructor and fields**
|
|
583
|
-
|
|
584
|
-
Update the `AudioPipeline` class constructor (line 82) to accept new params:
|
|
585
|
-
|
|
586
|
-
```kotlin
|
|
587
|
-
class AudioPipeline(
|
|
588
|
-
private val context: Context,
|
|
589
|
-
private val sampleRate: Int,
|
|
590
|
-
private val channelCount: Int,
|
|
591
|
-
private val targetBufferMs: Int,
|
|
592
|
-
private val frequencyBandIntervalMs: Int = 100,
|
|
593
|
-
private val lowCrossoverHz: Float = 300f,
|
|
594
|
-
private val highCrossoverHz: Float = 2000f,
|
|
595
|
-
private val listener: PipelineListener
|
|
596
|
-
)
|
|
597
|
-
```
|
|
598
|
-
|
|
599
|
-
Add analyzer and timer fields after `lastReportedUnderrunCount` (line 185):
|
|
600
|
-
|
|
601
|
-
```kotlin
|
|
602
|
-
// ── Frequency band analysis ──────────────────────────────────────
|
|
603
|
-
private var frequencyBandAnalyzer: FrequencyBandAnalyzer? = null
|
|
604
|
-
private var frequencyBandThread: Thread? = null
|
|
605
|
-
```
|
|
606
|
-
|
|
607
|
-
- [ ] **Step 3: Create analyzer in connect(), destroy in disconnect()**
|
|
608
|
-
|
|
609
|
-
In `connect()`, after `resetTelemetry()` (line 261), add:
|
|
610
|
-
|
|
611
|
-
```kotlin
|
|
612
|
-
// ── 8. Frequency band analyzer ──────────────────────────
|
|
613
|
-
frequencyBandAnalyzer = FrequencyBandAnalyzer(
|
|
614
|
-
sampleRate = sampleRate,
|
|
615
|
-
lowCrossoverHz = lowCrossoverHz,
|
|
616
|
-
highCrossoverHz = highCrossoverHz
|
|
617
|
-
)
|
|
618
|
-
startFrequencyBandTimer()
|
|
619
|
-
```
|
|
620
|
-
|
|
621
|
-
In `disconnect()`, after `zombieThread = null` (line 285), add:
|
|
622
|
-
|
|
623
|
-
```kotlin
|
|
624
|
-
// Stop frequency band timer
|
|
625
|
-
frequencyBandThread?.interrupt()
|
|
626
|
-
frequencyBandThread = null
|
|
627
|
-
frequencyBandAnalyzer = null
|
|
628
|
-
```
|
|
629
|
-
|
|
630
|
-
- [ ] **Step 4: Call processSamples in writeLoop**
|
|
631
|
-
|
|
632
|
-
In `writeLoop()`, after `buf.read(frame)` (line 454) and the audio focus silence check, but before `track.write()` (line 463), add:
|
|
633
|
-
|
|
634
|
-
```kotlin
|
|
635
|
-
// Analyze frequency bands on the raw Int16 samples
|
|
636
|
-
if (!audioFocusLost.get()) {
|
|
637
|
-
frequencyBandAnalyzer?.processSamples(frame, frame.size)
|
|
638
|
-
}
|
|
639
|
-
```
|
|
640
|
-
|
|
641
|
-
- [ ] **Step 5: Reset analyzer on turn boundary**
|
|
642
|
-
|
|
643
|
-
In `pushAudio()`, inside the turn boundary block (after `setState(PipelineState.STREAMING)` at line 359), add:
|
|
644
|
-
|
|
645
|
-
```kotlin
|
|
646
|
-
frequencyBandAnalyzer?.reset()
|
|
647
|
-
```
|
|
648
|
-
|
|
649
|
-
In `invalidateTurn()`, **inside** the `turnLock.withLock` block, after `setState(PipelineState.IDLE)` (line 402) and before the closing brace of the `withLock` block, add:
|
|
650
|
-
|
|
651
|
-
```kotlin
|
|
652
|
-
frequencyBandAnalyzer?.reset()
|
|
653
|
-
```
|
|
654
|
-
|
|
655
|
-
- [ ] **Step 6: Add frequency band timer method**
|
|
656
|
-
|
|
657
|
-
Add after `startZombieDetection()` (before the VolumeGuard section):
|
|
658
|
-
|
|
659
|
-
```kotlin
|
|
660
|
-
// ════════════════════════════════════════════════════════════════════
|
|
661
|
-
// Frequency band emission
|
|
662
|
-
// ════════════════════════════════════════════════════════════════════
|
|
663
|
-
|
|
664
|
-
private fun startFrequencyBandTimer() {
|
|
665
|
-
frequencyBandThread = Thread({
|
|
666
|
-
while (running.get() && !Thread.currentThread().isInterrupted) {
|
|
667
|
-
try {
|
|
668
|
-
Thread.sleep(frequencyBandIntervalMs.toLong())
|
|
669
|
-
} catch (_: InterruptedException) {
|
|
670
|
-
break
|
|
671
|
-
}
|
|
672
|
-
|
|
673
|
-
val analyzer = frequencyBandAnalyzer ?: continue
|
|
674
|
-
val bands = analyzer.harvest()
|
|
675
|
-
listener.onFrequencyBands(bands.low, bands.mid, bands.high)
|
|
676
|
-
}
|
|
677
|
-
}, "AudioPipeline-FreqBands").apply {
|
|
678
|
-
isDaemon = true
|
|
679
|
-
start()
|
|
680
|
-
}
|
|
681
|
-
}
|
|
682
|
-
```
|
|
683
|
-
|
|
684
|
-
- [ ] **Step 7: Wire PipelineIntegration**
|
|
685
|
-
|
|
686
|
-
In `android/.../pipeline/PipelineIntegration.kt`:
|
|
687
|
-
|
|
688
|
-
Add event constant (after the last `EVENT_*` constant, around line 94):
|
|
689
|
-
|
|
690
|
-
```kotlin
|
|
691
|
-
private const val EVENT_FREQUENCY_BANDS = "PipelineFrequencyBands"
|
|
692
|
-
```
|
|
693
|
-
|
|
694
|
-
Update `connect()` method to parse new options. After `targetBufferMs` parsing (around line 122), add:
|
|
695
|
-
|
|
696
|
-
```kotlin
|
|
697
|
-
val frequencyBandIntervalMs = (options["frequencyBandIntervalMs"] as? Number)?.toInt() ?: 100
|
|
698
|
-
val bandConfig = options["frequencyBandConfig"] as? Map<*, *>
|
|
699
|
-
val lowCrossoverHz = (bandConfig?.get("lowCrossoverHz") as? Number)?.toFloat() ?: 300f
|
|
700
|
-
val highCrossoverHz = (bandConfig?.get("highCrossoverHz") as? Number)?.toFloat() ?: 2000f
|
|
701
|
-
```
|
|
702
|
-
|
|
703
|
-
Update the `AudioPipeline` constructor call (around line 125) to pass new params:
|
|
704
|
-
|
|
705
|
-
```kotlin
|
|
706
|
-
pipeline = AudioPipeline(
|
|
707
|
-
context = context,
|
|
708
|
-
sampleRate = sampleRate,
|
|
709
|
-
channelCount = channelCount,
|
|
710
|
-
targetBufferMs = targetBufferMs,
|
|
711
|
-
frequencyBandIntervalMs = frequencyBandIntervalMs,
|
|
712
|
-
lowCrossoverHz = lowCrossoverHz,
|
|
713
|
-
highCrossoverHz = highCrossoverHz,
|
|
714
|
-
listener = this
|
|
715
|
-
)
|
|
716
|
-
```
|
|
717
|
-
|
|
718
|
-
Add `onFrequencyBands` implementation (after `onAudioFocusResumed`, around line 304):
|
|
719
|
-
|
|
720
|
-
```kotlin
|
|
721
|
-
override fun onFrequencyBands(low: Float, mid: Float, high: Float) {
|
|
722
|
-
sendEvent(EVENT_FREQUENCY_BANDS, bundleOf(
|
|
723
|
-
"low" to low, "mid" to mid, "high" to high
|
|
724
|
-
))
|
|
725
|
-
}
|
|
726
|
-
```
|
|
727
|
-
|
|
728
|
-
- [ ] **Step 8: Register event in ExpoPlayAudioStreamModule.kt**
|
|
729
|
-
|
|
730
|
-
In `android/.../ExpoPlayAudioStreamModule.kt`, add to the Events block (after the last pipeline event, around line 110):
|
|
731
|
-
|
|
732
|
-
```kotlin
|
|
733
|
-
"PipelineFrequencyBands",
|
|
734
|
-
```
|
|
735
|
-
|
|
736
|
-
- [ ] **Step 9: Commit**
|
|
737
|
-
|
|
738
|
-
```bash
|
|
739
|
-
git add android/src/main/java/expo/modules/audiostream/pipeline/AudioPipeline.kt \
|
|
740
|
-
android/src/main/java/expo/modules/audiostream/pipeline/PipelineIntegration.kt \
|
|
741
|
-
android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt
|
|
742
|
-
git commit -m "feat: wire frequency band analysis into Android pipeline"
|
|
743
|
-
```
|
|
744
|
-
|
|
745
|
-
---
|
|
746
|
-
|
|
747
|
-
## Chunk 3: Microphone Integration (iOS + Android)
|
|
748
|
-
|
|
749
|
-
### Task 6: Wire iOS microphone frequency bands
|
|
750
|
-
|
|
751
|
-
**Files:**
|
|
752
|
-
- Modify: `ios/MicrophoneDataDelegate.swift` — extend delegate signature
|
|
753
|
-
- Modify: `ios/Microphone.swift` — add analyzer, compute bands, pass to delegate
|
|
754
|
-
- Modify: `ios/ExpoPlayAudioStreamModule.swift` — update delegate implementation to include bands in event
|
|
755
|
-
|
|
756
|
-
- [ ] **Step 1: Update MicrophoneDataDelegate protocol**
|
|
757
|
-
|
|
758
|
-
In `ios/MicrophoneDataDelegate.swift`, change the `onMicrophoneData` signature from:
|
|
759
|
-
|
|
760
|
-
```swift
|
|
761
|
-
func onMicrophoneData(_ microphoneData: Data, _ soundLevel: Float?)
|
|
762
|
-
```
|
|
763
|
-
|
|
764
|
-
to:
|
|
765
|
-
|
|
766
|
-
```swift
|
|
767
|
-
func onMicrophoneData(_ microphoneData: Data, _ soundLevel: Float?, _ frequencyBands: FrequencyBands?)
|
|
768
|
-
```
|
|
769
|
-
|
|
770
|
-
- [ ] **Step 2: Add analyzer to Microphone**
|
|
771
|
-
|
|
772
|
-
In `ios/Microphone.swift`, add a field after `isSilent` (line 29):
|
|
773
|
-
|
|
774
|
-
```swift
|
|
775
|
-
private var frequencyBandAnalyzer: FrequencyBandAnalyzer?
|
|
776
|
-
private var frequencyBandConfig: (lowCrossoverHz: Float, highCrossoverHz: Float)?
|
|
777
|
-
```
|
|
778
|
-
|
|
779
|
-
- [ ] **Step 3: Create analyzer in startRecording, destroy in stopRecording**
|
|
780
|
-
|
|
781
|
-
In `startRecording()`, pass config through. Add a parameter:
|
|
782
|
-
|
|
783
|
-
```swift
|
|
784
|
-
func startRecording(settings: RecordingSettings, intervalMilliseconds: Int,
|
|
785
|
-
frequencyBandConfig: (lowCrossoverHz: Float, highCrossoverHz: Float)? = nil) -> StartRecordingResult? {
|
|
786
|
-
```
|
|
787
|
-
|
|
788
|
-
After `recordingSettings = newSettings` (line 98), add:
|
|
789
|
-
|
|
790
|
-
```swift
|
|
791
|
-
self.frequencyBandConfig = frequencyBandConfig
|
|
792
|
-
// Analyzer uses the desired (target) sample rate, not hardware rate
|
|
793
|
-
let targetRate = Int(settings.desiredSampleRate ?? settings.sampleRate)
|
|
794
|
-
let fbConfig = frequencyBandConfig ?? (lowCrossoverHz: 300, highCrossoverHz: 2000)
|
|
795
|
-
frequencyBandAnalyzer = FrequencyBandAnalyzer(
|
|
796
|
-
sampleRate: targetRate,
|
|
797
|
-
lowCrossoverHz: fbConfig.lowCrossoverHz,
|
|
798
|
-
highCrossoverHz: fbConfig.highCrossoverHz
|
|
799
|
-
)
|
|
800
|
-
```
|
|
801
|
-
|
|
802
|
-
In `stopRecording()`, after `audioEngine.stop()` (line 153), add:
|
|
803
|
-
|
|
804
|
-
```swift
|
|
805
|
-
frequencyBandAnalyzer = nil
|
|
806
|
-
```
|
|
807
|
-
|
|
808
|
-
- [ ] **Step 4: Compute bands in processAudioBuffer**
|
|
809
|
-
|
|
810
|
-
In `processAudioBuffer()`, after the `data` variable is assigned (after the format conversion block ending around line 213) and before `totalDataSize += ...` (line 215), add:
|
|
811
|
-
|
|
812
|
-
```swift
|
|
813
|
-
// Compute frequency bands from the Int16 PCM data
|
|
814
|
-
let bands: FrequencyBands?
|
|
815
|
-
if isSilent {
|
|
816
|
-
bands = .zero
|
|
817
|
-
} else if let analyzer = frequencyBandAnalyzer {
|
|
818
|
-
analyzer.processSamplesFromData(data)
|
|
819
|
-
bands = analyzer.harvest()
|
|
820
|
-
} else {
|
|
821
|
-
bands = nil
|
|
822
|
-
}
|
|
823
|
-
```
|
|
824
|
-
|
|
825
|
-
Update the delegate call (line 218) from:
|
|
826
|
-
|
|
827
|
-
```swift
|
|
828
|
-
self.delegate?.onMicrophoneData(data, powerLevel)
|
|
829
|
-
```
|
|
830
|
-
|
|
831
|
-
to:
|
|
832
|
-
|
|
833
|
-
```swift
|
|
834
|
-
self.delegate?.onMicrophoneData(data, powerLevel, bands)
|
|
835
|
-
```
|
|
836
|
-
|
|
837
|
-
- [ ] **Step 5: Update ExpoPlayAudioStreamModule delegate implementation**
|
|
838
|
-
|
|
839
|
-
In `ios/ExpoPlayAudioStreamModule.swift`, update the `onMicrophoneData` implementation (around line 387).
|
|
840
|
-
|
|
841
|
-
Change the method signature from:
|
|
842
|
-
|
|
843
|
-
```swift
|
|
844
|
-
func onMicrophoneData(_ microphoneData: Data, _ soundLevel: Float?) {
|
|
845
|
-
```
|
|
846
|
-
|
|
847
|
-
to:
|
|
848
|
-
|
|
849
|
-
```swift
|
|
850
|
-
func onMicrophoneData(_ microphoneData: Data, _ soundLevel: Float?, _ frequencyBands: FrequencyBands?) {
|
|
851
|
-
```
|
|
852
|
-
|
|
853
|
-
**Important:** The existing `eventBody` is declared with `let` (immutable). Change `let eventBody: [String: Any]` to `var eventBody: [String: Any]` on line 390. Then after the `soundLevel` entry, add:
|
|
854
|
-
|
|
855
|
-
```swift
|
|
856
|
-
if let bands = frequencyBands {
|
|
857
|
-
eventBody["frequencyBands"] = [
|
|
858
|
-
"low": bands.low,
|
|
859
|
-
"mid": bands.mid,
|
|
860
|
-
"high": bands.high
|
|
861
|
-
]
|
|
862
|
-
}
|
|
863
|
-
```
|
|
864
|
-
|
|
865
|
-
- [ ] **Step 6: Pass frequency band config from module to microphone**
|
|
866
|
-
|
|
867
|
-
In `ios/ExpoPlayAudioStreamModule.swift`, in the `startMicrophone` AsyncFunction (line 153), after the `interval` extraction (line 159), add:
|
|
868
|
-
|
|
869
|
-
```swift
|
|
870
|
-
let fbConfigDict = options["frequencyBandConfig"] as? [String: Any]
|
|
871
|
-
let fbConfig: (lowCrossoverHz: Float, highCrossoverHz: Float)? = fbConfigDict.map {
|
|
872
|
-
(
|
|
873
|
-
lowCrossoverHz: ($0["lowCrossoverHz"] as? NSNumber)?.floatValue ?? 300,
|
|
874
|
-
highCrossoverHz: ($0["highCrossoverHz"] as? NSNumber)?.floatValue ?? 2000
|
|
875
|
-
)
|
|
876
|
-
}
|
|
877
|
-
```
|
|
878
|
-
|
|
879
|
-
Then update the `startRecording` call at line 179 from:
|
|
880
|
-
|
|
881
|
-
```swift
|
|
882
|
-
if let result = self.microphone.startRecording(settings: settings, intervalMilliseconds: interval) {
|
|
883
|
-
```
|
|
884
|
-
|
|
885
|
-
to:
|
|
886
|
-
|
|
887
|
-
```swift
|
|
888
|
-
if let result = self.microphone.startRecording(settings: settings, intervalMilliseconds: interval, frequencyBandConfig: fbConfig) {
|
|
889
|
-
```
|
|
890
|
-
|
|
891
|
-
Also update the route change handler in `ios/Microphone.swift` (line 57) to pass the stored config:
|
|
892
|
-
|
|
893
|
-
```swift
|
|
894
|
-
_ = startRecording(settings: self.recordingSettings!, intervalMilliseconds: 100, frequencyBandConfig: self.frequencyBandConfig)
|
|
895
|
-
```
|
|
896
|
-
|
|
897
|
-
- [ ] **Step 7: Commit**
|
|
898
|
-
|
|
899
|
-
```bash
|
|
900
|
-
git add ios/MicrophoneDataDelegate.swift ios/Microphone.swift ios/ExpoPlayAudioStreamModule.swift
|
|
901
|
-
git commit -m "feat: wire frequency band analysis into iOS microphone"
|
|
902
|
-
```
|
|
903
|
-
|
|
904
|
-
---
|
|
905
|
-
|
|
906
|
-
### Task 7: Wire Android microphone frequency bands
|
|
907
|
-
|
|
908
|
-
**Files:**
|
|
909
|
-
- Modify: `android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt` — add analyzer, compute bands, emit in event
|
|
910
|
-
|
|
911
|
-
- [ ] **Step 1: Add analyzer field and config**
|
|
912
|
-
|
|
913
|
-
In `AudioRecorderManager.kt`, add fields after `isSilent` (line 38):
|
|
914
|
-
|
|
915
|
-
```kotlin
|
|
916
|
-
private var frequencyBandAnalyzer: FrequencyBandAnalyzer? = null
|
|
917
|
-
```
|
|
918
|
-
|
|
919
|
-
- [ ] **Step 2: Create analyzer in startRecording, destroy in cleanup**
|
|
920
|
-
|
|
921
|
-
In `startRecording()`, after `audioRecord?.startRecording()` (line 155) and effect setup, add:
|
|
922
|
-
|
|
923
|
-
```kotlin
|
|
924
|
-
// Create frequency band analyzer
|
|
925
|
-
val bandConfig = options["frequencyBandConfig"] as? Map<*, *>
|
|
926
|
-
frequencyBandAnalyzer = FrequencyBandAnalyzer(
|
|
927
|
-
sampleRate = recordingConfig.sampleRate,
|
|
928
|
-
lowCrossoverHz = (bandConfig?.get("lowCrossoverHz") as? Number)?.toFloat() ?: 300f,
|
|
929
|
-
highCrossoverHz = (bandConfig?.get("highCrossoverHz") as? Number)?.toFloat() ?: 2000f
|
|
930
|
-
)
|
|
931
|
-
```
|
|
932
|
-
|
|
933
|
-
In `cleanupResources()`, after `streamUuid = null` (line 220), add:
|
|
934
|
-
|
|
935
|
-
```kotlin
|
|
936
|
-
frequencyBandAnalyzer = null
|
|
937
|
-
```
|
|
938
|
-
|
|
939
|
-
- [ ] **Step 3: Compute bands in emitAudioData**
|
|
940
|
-
|
|
941
|
-
In `emitAudioData()`, after `soundLevel` calculation (line 374), add:
|
|
942
|
-
|
|
943
|
-
```kotlin
|
|
944
|
-
// Compute frequency bands
|
|
945
|
-
val bands = if (isSilent) {
|
|
946
|
-
FrequencyBands.ZERO
|
|
947
|
-
} else {
|
|
948
|
-
frequencyBandAnalyzer?.let { analyzer ->
|
|
949
|
-
analyzer.processSamplesFromBytes(audioData, length)
|
|
950
|
-
analyzer.harvest()
|
|
951
|
-
}
|
|
952
|
-
}
|
|
953
|
-
```
|
|
954
|
-
|
|
955
|
-
In the event Bundle construction (inside `mainHandler.post`, around line 388), after the `soundLevel` entry, add:
|
|
956
|
-
|
|
957
|
-
```kotlin
|
|
958
|
-
"frequencyBandsLow" to (bands?.low ?: 0f),
|
|
959
|
-
"frequencyBandsMid" to (bands?.mid ?: 0f),
|
|
960
|
-
"frequencyBandsHigh" to (bands?.high ?: 0f),
|
|
961
|
-
```
|
|
962
|
-
|
|
963
|
-
Use the nested `bundleOf` pattern for consistency with the iOS side and the TypeScript `FrequencyBands` interface structure:
|
|
964
|
-
|
|
965
|
-
```kotlin
|
|
966
|
-
"frequencyBands" to bundleOf(
|
|
967
|
-
"low" to (bands?.low ?: 0f),
|
|
968
|
-
"mid" to (bands?.mid ?: 0f),
|
|
969
|
-
"high" to (bands?.high ?: 0f)
|
|
970
|
-
),
|
|
971
|
-
```
|
|
972
|
-
|
|
973
|
-
Remove the flat key alternative shown above — use only the nested pattern.
|
|
974
|
-
|
|
975
|
-
- [ ] **Step 4: Commit**
|
|
976
|
-
|
|
977
|
-
```bash
|
|
978
|
-
git add android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt
|
|
979
|
-
git commit -m "feat: wire frequency band analysis into Android microphone"
|
|
980
|
-
```
|
|
981
|
-
|
|
982
|
-
---
|
|
983
|
-
|
|
984
|
-
### Task 8: Build verification
|
|
985
|
-
|
|
986
|
-
- [ ] **Step 1: Verify TypeScript compiles**
|
|
987
|
-
|
|
988
|
-
```bash
|
|
989
|
-
cd /Users/edward.kimmel/Documents/GitHub/expo-audio-stream
|
|
990
|
-
npx tsc --noEmit
|
|
991
|
-
```
|
|
992
|
-
|
|
993
|
-
Expected: No errors.
|
|
994
|
-
|
|
995
|
-
- [ ] **Step 2: Verify the module builds**
|
|
996
|
-
|
|
997
|
-
If an example app exists, build it. Otherwise verify the TypeScript compilation is clean and the native code compiles by checking for syntax errors.
|
|
998
|
-
|
|
999
|
-
- [ ] **Step 3: Final commit**
|
|
1000
|
-
|
|
1001
|
-
```bash
|
|
1002
|
-
git add -A
|
|
1003
|
-
git status
|
|
1004
|
-
# Only commit if there are remaining changes
|
|
1005
|
-
git commit -m "feat: frequency band analysis - build verification clean"
|
|
1006
|
-
```
|
|
@@ -1,276 +0,0 @@
|
|
|
1
|
-
# Frequency Band Analysis — Design Spec
|
|
2
|
-
|
|
3
|
-
## Overview
|
|
4
|
-
|
|
5
|
-
Add real-time low/mid/high frequency band analysis to both microphone capture and pipeline playback in the expo-audio-stream native module. The analysis uses two parallel single-pole IIR (Infinite Impulse Response) low-pass filters — O(n) per sample, no FFT — to split audio into three bands and compute RMS energy for each.
|
|
6
|
-
|
|
7
|
-
## Motivation
|
|
8
|
-
|
|
9
|
-
The JavaScript side of the module does not have visibility into what the native audio engine is actually playing or capturing at the PCM level. Frequency band data must be computed natively where the raw samples are available.
|
|
10
|
-
|
|
11
|
-
## Requirements
|
|
12
|
-
|
|
13
|
-
1. **Microphone**: Emit `{low, mid, high}` frequency band RMS values alongside each `AudioData` event (same interval as mic capture).
|
|
14
|
-
2. **Pipeline playback**: Emit `{low, mid, high}` via a new `PipelineFrequencyBands` event at a configurable interval, independent of chunk size or mic config.
|
|
15
|
-
3. When the pipeline is connected but not actively playing audio, emit near-zero values (not silence/no-emit).
|
|
16
|
-
4. Band crossover frequencies (default 300 Hz low/mid, 2000 Hz mid/high) are configurable at connect/start time.
|
|
17
|
-
5. OTA-safe — no native API surface changes that would require a new binary build beyond the module update itself.
|
|
18
|
-
|
|
19
|
-
## Non-Goals
|
|
20
|
-
|
|
21
|
-
- FFT-based spectral analysis or arbitrary band counts.
|
|
22
|
-
- Frequency analysis for the SoundPlayer (chunk-based) path — pipeline only.
|
|
23
|
-
- Web platform support (native-only feature).
|
|
24
|
-
|
|
25
|
-
---
|
|
26
|
-
|
|
27
|
-
## Architecture
|
|
28
|
-
|
|
29
|
-
### FrequencyBandAnalyzer — Shared DSP Component
|
|
30
|
-
|
|
31
|
-
A pure-math class on each platform (Swift / Kotlin) encapsulating the IIR filter state and energy accumulation. No threading or platform audio dependencies.
|
|
32
|
-
|
|
33
|
-
#### State
|
|
34
|
-
|
|
35
|
-
| Field | Type | Description |
|
|
36
|
-
|-------|------|-------------|
|
|
37
|
-
| `lp1` | Float | Low-pass accumulator 1 (low crossover) |
|
|
38
|
-
| `lp2` | Float | Low-pass accumulator 2 (high crossover) |
|
|
39
|
-
| `alphaLow` | Float | Precomputed coefficient: `min(1, 2*PI*lowCrossover/sampleRate)` |
|
|
40
|
-
| `alphaHigh` | Float | Precomputed coefficient: `min(1, 2*PI*highCrossover/sampleRate)` |
|
|
41
|
-
| `lowE` | Float | Accumulated low-band energy (sum of squares) |
|
|
42
|
-
| `midE` | Float | Accumulated mid-band energy |
|
|
43
|
-
| `highE` | Float | Accumulated high-band energy |
|
|
44
|
-
| `count` | Int | Number of samples processed since last harvest |
|
|
45
|
-
|
|
46
|
-
#### Methods
|
|
47
|
-
|
|
48
|
-
| Method | Description |
|
|
49
|
-
|--------|-------------|
|
|
50
|
-
| `init(sampleRate, lowCrossoverHz=300, highCrossoverHz=2000)` | Precompute alpha coefficients from sample rate and crossover frequencies |
|
|
51
|
-
| `processSamples(samples: Int16[], length: Int)` | Run IIR filter over a batch of PCM16 samples, accumulate energy. Called from write/schedule loop. |
|
|
52
|
-
| `harvest() -> {low, mid, high}` | Return RMS values (`sqrt(energy/count)`), reset accumulators to zero. Called from timer or inline. |
|
|
53
|
-
| `reset()` | Zero all state (filter accumulators + energy). Called on turn change / disconnect / stop. |
|
|
54
|
-
|
|
55
|
-
#### DSP Algorithm (per sample)
|
|
56
|
-
|
|
57
|
-
```
|
|
58
|
-
s = int16sample / 32768.0 // normalize to [-1, 1] (matches existing codebase convention)
|
|
59
|
-
lp1 += alphaLow * (s - lp1) // independent low-pass at 300 Hz
|
|
60
|
-
lp2 += alphaHigh * (s - lp2) // independent low-pass at 2000 Hz
|
|
61
|
-
low = lp1 // below 300 Hz
|
|
62
|
-
high = s - lp2 // above 2000 Hz
|
|
63
|
-
mid = s - low - high // 300–2000 Hz (= lp2 - lp1)
|
|
64
|
-
lowE += low * low
|
|
65
|
-
midE += mid * mid
|
|
66
|
-
highE += high * high
|
|
67
|
-
count += 1
|
|
68
|
-
```
|
|
69
|
-
|
|
70
|
-
Note: The two filters operate in parallel on the same input `s` (not cascaded).
|
|
71
|
-
For stereo audio, interleaved samples are processed as a single stream — the filter
|
|
72
|
-
state spans both channels, which is intentional since we only need aggregate energy
|
|
73
|
-
levels, not per-channel analysis.
|
|
74
|
-
|
|
75
|
-
#### Synchronization
|
|
76
|
-
|
|
77
|
-
- **Pipeline analyzer**: `processSamples()` runs on write/schedule thread; `harvest()` runs on timer thread. A simple mutex (NSLock / ReentrantLock) guards the accumulator read/reset. Contention is negligible — harvest fires every 50–100ms and takes microseconds.
|
|
78
|
-
- **Microphone analyzer**: No synchronization needed — both `processSamples()` and `harvest()` run in the same callback/thread.
|
|
79
|
-
|
|
80
|
-
---
|
|
81
|
-
|
|
82
|
-
### Pipeline Integration (Playback Bands)
|
|
83
|
-
|
|
84
|
-
#### Injection Point — processSamples
|
|
85
|
-
|
|
86
|
-
- **iOS** (`AudioPipeline.scheduleNextBuffer`): After `buf.read(dest: &renderSamples, ...)` populates the Int16 array, call `analyzer.processSamples(renderSamples, frameSizeSamples)` before the Float32 conversion.
|
|
87
|
-
- **Android** (`AudioPipeline.writeLoop`): After `buf.read(frame)` populates the ShortArray, call `analyzer.processSamples(frame, frame.size)` before `track.write()`.
|
|
88
|
-
|
|
89
|
-
Both paths already have the Int16 samples in a local variable — zero additional allocation.
|
|
90
|
-
|
|
91
|
-
#### Harvest Timer
|
|
92
|
-
|
|
93
|
-
A new timer created in `connect()`, destroyed in `disconnect()`:
|
|
94
|
-
|
|
95
|
-
- **iOS**: `DispatchSourceTimer` on `.main`, interval from config.
|
|
96
|
-
- **Android**: Daemon thread with `Thread.sleep(intervalMs)`.
|
|
97
|
-
|
|
98
|
-
The timer calls `analyzer.harvest()` and emits a `PipelineFrequencyBands` event through the `PipelineListener` -> `PipelineIntegration` -> Expo event bridge. This requires adding a new method to the `PipelineListener` protocol (iOS) and interface (Android):
|
|
99
|
-
|
|
100
|
-
- iOS: `func onFrequencyBands(low: Float, mid: Float, high: Float)`
|
|
101
|
-
- Android: `fun onFrequencyBands(low: Float, mid: Float, high: Float)`
|
|
102
|
-
|
|
103
|
-
**Default interval**: `frequencyBandIntervalMs` defaults to 100ms. The default is applied in the native `PipelineIntegration.connect()` on both platforms when parsing options (same pattern as `targetBufferMs` defaulting to 80).
|
|
104
|
-
|
|
105
|
-
#### Idle/Drained Behavior
|
|
106
|
-
|
|
107
|
-
When the pipeline is connected but idle/drained, the write loop continues running (writing silence from the jitter buffer). The analyzer accumulates near-zero energy from silence samples, so `harvest()` naturally returns values near zero without special-casing.
|
|
108
|
-
|
|
109
|
-
#### Turn Boundary
|
|
110
|
-
|
|
111
|
-
On turn change (`pushAudio` with `isFirstChunk` or `invalidateTurn`), call `analyzer.reset()` to clear stale filter state from the previous turn.
|
|
112
|
-
|
|
113
|
-
#### Lifecycle
|
|
114
|
-
|
|
115
|
-
| Event | Action |
|
|
116
|
-
|-------|--------|
|
|
117
|
-
| `connect()` | Create analyzer, start harvest timer |
|
|
118
|
-
| `disconnect()` | Stop harvest timer, destroy analyzer |
|
|
119
|
-
| Turn boundary | `analyzer.reset()` |
|
|
120
|
-
| Route change / engine rebuild | Analyzer state preserved (continues accumulating) |
|
|
121
|
-
|
|
122
|
-
---
|
|
123
|
-
|
|
124
|
-
### Microphone Integration
|
|
125
|
-
|
|
126
|
-
#### Injection Point
|
|
127
|
-
|
|
128
|
-
- **iOS** (`Microphone.processAudioBuffer`): After resampling and Int16 conversion (which produces a `Data` object), extract the Int16 samples into a temporary `[Int16]` array (or provide a `processSamplesFromData(_ data: Data)` convenience method on the analyzer that interprets the bytes as little-endian Int16). Call `processSamples()` then `harvest()` inline — each tap callback = one interval.
|
|
129
|
-
- **Android** (`AudioRecorderManager.emitAudioData`): Wrap the raw `audioData: ByteArray` in a `ByteBuffer` with `LITTLE_ENDIAN` order and read as a `ShortArray` (same pattern used in `AudioPipeline.pushAudio`). Call `processSamples()` then `harvest()` inline — each `read()` = one interval.
|
|
130
|
-
|
|
131
|
-
#### Event Delivery
|
|
132
|
-
|
|
133
|
-
Add `frequencyBands` fields to the existing `AudioData` event payload:
|
|
134
|
-
|
|
135
|
-
- iOS: Add to the dictionary passed to `onMicrophoneData` delegate callback
|
|
136
|
-
- Android: Add to the Bundle in `emitAudioData`
|
|
137
|
-
|
|
138
|
-
#### Silent Mode
|
|
139
|
-
|
|
140
|
-
When `isSilent` is true, skip frequency analysis and emit `{low: 0, mid: 0, high: 0}`.
|
|
141
|
-
|
|
142
|
-
#### Lifecycle
|
|
143
|
-
|
|
144
|
-
| Event | Action |
|
|
145
|
-
|-------|--------|
|
|
146
|
-
| `startRecording()` | Create analyzer with target sample rate and config |
|
|
147
|
-
| `stopRecording()` | Destroy analyzer |
|
|
148
|
-
|
|
149
|
-
---
|
|
150
|
-
|
|
151
|
-
## TypeScript API Changes
|
|
152
|
-
|
|
153
|
-
### New Types
|
|
154
|
-
|
|
155
|
-
```typescript
|
|
156
|
-
/** RMS energy per frequency band, range [0, 1]. */
|
|
157
|
-
export interface FrequencyBands {
|
|
158
|
-
low: number;
|
|
159
|
-
mid: number;
|
|
160
|
-
high: number;
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
/** Crossover frequency configuration. */
|
|
164
|
-
export interface FrequencyBandConfig {
|
|
165
|
-
/** Low/mid crossover in Hz (default 300). */
|
|
166
|
-
lowCrossoverHz?: number;
|
|
167
|
-
/** Mid/high crossover in Hz (default 2000). */
|
|
168
|
-
highCrossoverHz?: number;
|
|
169
|
-
}
|
|
170
|
-
```
|
|
171
|
-
|
|
172
|
-
### Modified Types
|
|
173
|
-
|
|
174
|
-
#### AudioDataEvent (microphone)
|
|
175
|
-
|
|
176
|
-
```typescript
|
|
177
|
-
export interface AudioDataEvent {
|
|
178
|
-
// ... existing fields ...
|
|
179
|
-
/** Frequency band RMS energy, present when recording is active. */
|
|
180
|
-
frequencyBands?: FrequencyBands;
|
|
181
|
-
}
|
|
182
|
-
```
|
|
183
|
-
|
|
184
|
-
#### RecordingConfig (microphone)
|
|
185
|
-
|
|
186
|
-
```typescript
|
|
187
|
-
export interface RecordingConfig {
|
|
188
|
-
// ... existing fields ...
|
|
189
|
-
/** Optional frequency band crossover configuration. */
|
|
190
|
-
frequencyBandConfig?: FrequencyBandConfig;
|
|
191
|
-
}
|
|
192
|
-
```
|
|
193
|
-
|
|
194
|
-
#### ConnectPipelineOptions (pipeline)
|
|
195
|
-
|
|
196
|
-
```typescript
|
|
197
|
-
export interface ConnectPipelineOptions {
|
|
198
|
-
// ... existing fields ...
|
|
199
|
-
/** Interval in ms for PipelineFrequencyBands events (default 100). */
|
|
200
|
-
frequencyBandIntervalMs?: number;
|
|
201
|
-
/** Optional frequency band crossover configuration. */
|
|
202
|
-
frequencyBandConfig?: FrequencyBandConfig;
|
|
203
|
-
}
|
|
204
|
-
```
|
|
205
|
-
|
|
206
|
-
### New Event
|
|
207
|
-
|
|
208
|
-
```typescript
|
|
209
|
-
/** Payload for PipelineFrequencyBands event. */
|
|
210
|
-
export interface PipelineFrequencyBandsEvent extends FrequencyBands {}
|
|
211
|
-
|
|
212
|
-
// Added to PipelineEventMap:
|
|
213
|
-
export interface PipelineEventMap {
|
|
214
|
-
// ... existing events ...
|
|
215
|
-
PipelineFrequencyBands: PipelineFrequencyBandsEvent;
|
|
216
|
-
}
|
|
217
|
-
```
|
|
218
|
-
|
|
219
|
-
### Usage Example
|
|
220
|
-
|
|
221
|
-
```typescript
|
|
222
|
-
// Microphone — bands arrive with each AudioData event
|
|
223
|
-
stream.subscribeToAudioEvents(async (event) => {
|
|
224
|
-
if (event.frequencyBands) {
|
|
225
|
-
updateMicVisualization(event.frequencyBands);
|
|
226
|
-
}
|
|
227
|
-
});
|
|
228
|
-
|
|
229
|
-
// Pipeline — bands arrive at configured interval
|
|
230
|
-
pipeline.subscribe('PipelineFrequencyBands', (bands) => {
|
|
231
|
-
updatePlaybackVisualization(bands);
|
|
232
|
-
});
|
|
233
|
-
|
|
234
|
-
// Configuration
|
|
235
|
-
await pipeline.connect({
|
|
236
|
-
sampleRate: 24000,
|
|
237
|
-
frequencyBandIntervalMs: 50,
|
|
238
|
-
frequencyBandConfig: { lowCrossoverHz: 250, highCrossoverHz: 2500 },
|
|
239
|
-
});
|
|
240
|
-
```
|
|
241
|
-
|
|
242
|
-
---
|
|
243
|
-
|
|
244
|
-
## File Changes Summary
|
|
245
|
-
|
|
246
|
-
### New Files
|
|
247
|
-
|
|
248
|
-
| File | Description |
|
|
249
|
-
|------|-------------|
|
|
250
|
-
| `ios/FrequencyBandAnalyzer.swift` | iOS DSP component |
|
|
251
|
-
| `android/.../FrequencyBandAnalyzer.kt` | Android DSP component |
|
|
252
|
-
|
|
253
|
-
### Modified Files
|
|
254
|
-
|
|
255
|
-
| File | Change |
|
|
256
|
-
|------|--------|
|
|
257
|
-
| `ios/AudioPipeline.swift` | Add analyzer field, accept frequency band config in init, call processSamples in scheduleNextBuffer, add harvest timer, reset on turn boundary, add `onFrequencyBands` to PipelineListener protocol |
|
|
258
|
-
| `ios/PipelineIntegration.swift` | Add `PipelineFrequencyBands` event constant, parse new config options in `connect()`, forward from new listener method |
|
|
259
|
-
| `ios/Microphone.swift` | Add analyzer field, compute bands in processAudioBuffer, pass to delegate |
|
|
260
|
-
| `ios/MicrophoneDataDelegate.swift` | Update delegate method signature to include frequency bands |
|
|
261
|
-
| `android/.../pipeline/AudioPipeline.kt` | Add analyzer field, accept frequency band config in constructor, call processSamples in writeLoop, add harvest timer thread, reset on turn boundary, add `onFrequencyBands` to PipelineListener interface |
|
|
262
|
-
| `android/.../pipeline/PipelineIntegration.kt` | Add event constant, parse new config options in `connect()`, forward frequency bands event |
|
|
263
|
-
| `android/.../AudioRecorderManager.kt` | Add analyzer field, compute bands in emitAudioData |
|
|
264
|
-
| `src/types.ts` | Add FrequencyBands, FrequencyBandConfig types; extend AudioDataEvent, RecordingConfig |
|
|
265
|
-
| `src/pipeline/types.ts` | Add PipelineFrequencyBandsEvent; extend ConnectPipelineOptions, PipelineEventMap |
|
|
266
|
-
| `ios/ExpoPlayAudioStreamModule.swift` | Register new PipelineFrequencyBands event name |
|
|
267
|
-
| `android/.../ExpoPlayAudioStreamModule.kt` | Register new PipelineFrequencyBands event name |
|
|
268
|
-
|
|
269
|
-
---
|
|
270
|
-
|
|
271
|
-
## Edge Cases
|
|
272
|
-
|
|
273
|
-
1. **Very short buffers** (< 10 samples): `harvest()` returns zeros if `count == 0` to avoid division by zero.
|
|
274
|
-
2. **Sample rate change**: Analyzer is destroyed and recreated — alpha coefficients depend on sample rate.
|
|
275
|
-
3. **Mono vs stereo**: For stereo pipeline audio, process interleaved samples treating L+R as a single stream. The IIR filter state will span both channels, which causes slight cross-channel smearing — acceptable since we need aggregate energy levels, not per-channel analysis.
|
|
276
|
-
4. **Float overflow**: Energy accumulation uses Float (32-bit). At 48kHz with 100ms harvest interval, that's ~4800 samples — well within Float32 precision for sum-of-squares accumulation.
|