@edkimmel/expo-audio-stream 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.js +5 -0
- package/.yarnrc.yml +8 -0
- package/NATIVE_EVENTS.md +270 -0
- package/README.md +289 -0
- package/android/build.gradle +92 -0
- package/android/src/main/AndroidManifest.xml +4 -0
- package/android/src/main/java/expo/modules/audiostream/AudioDataEncoder.kt +178 -0
- package/android/src/main/java/expo/modules/audiostream/AudioEffectsManager.kt +107 -0
- package/android/src/main/java/expo/modules/audiostream/AudioPlaybackManager.kt +651 -0
- package/android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt +509 -0
- package/android/src/main/java/expo/modules/audiostream/Constants.kt +21 -0
- package/android/src/main/java/expo/modules/audiostream/EventSender.kt +7 -0
- package/android/src/main/java/expo/modules/audiostream/ExpoAudioStreamView.kt +7 -0
- package/android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt +280 -0
- package/android/src/main/java/expo/modules/audiostream/PermissionUtils.kt +16 -0
- package/android/src/main/java/expo/modules/audiostream/RecordingConfig.kt +60 -0
- package/android/src/main/java/expo/modules/audiostream/SoundConfig.kt +46 -0
- package/android/src/main/java/expo/modules/audiostream/pipeline/AudioPipeline.kt +685 -0
- package/android/src/main/java/expo/modules/audiostream/pipeline/JitterBuffer.kt +227 -0
- package/android/src/main/java/expo/modules/audiostream/pipeline/PipelineIntegration.kt +315 -0
- package/app.plugin.js +1 -0
- package/build/ExpoPlayAudioStreamModule.d.ts +3 -0
- package/build/ExpoPlayAudioStreamModule.d.ts.map +1 -0
- package/build/ExpoPlayAudioStreamModule.js +5 -0
- package/build/ExpoPlayAudioStreamModule.js.map +1 -0
- package/build/events.d.ts +36 -0
- package/build/events.d.ts.map +1 -0
- package/build/events.js +25 -0
- package/build/events.js.map +1 -0
- package/build/index.d.ts +125 -0
- package/build/index.d.ts.map +1 -0
- package/build/index.js +222 -0
- package/build/index.js.map +1 -0
- package/build/pipeline/index.d.ts +81 -0
- package/build/pipeline/index.d.ts.map +1 -0
- package/build/pipeline/index.js +140 -0
- package/build/pipeline/index.js.map +1 -0
- package/build/pipeline/types.d.ts +132 -0
- package/build/pipeline/types.d.ts.map +1 -0
- package/build/pipeline/types.js +5 -0
- package/build/pipeline/types.js.map +1 -0
- package/build/types.d.ts +221 -0
- package/build/types.d.ts.map +1 -0
- package/build/types.js +10 -0
- package/build/types.js.map +1 -0
- package/expo-module.config.json +9 -0
- package/ios/AudioPipeline.swift +562 -0
- package/ios/AudioUtils.swift +356 -0
- package/ios/ExpoPlayAudioStream.podspec +27 -0
- package/ios/ExpoPlayAudioStreamModule.swift +436 -0
- package/ios/ExpoPlayAudioStreamView.swift +7 -0
- package/ios/JitterBuffer.swift +208 -0
- package/ios/Logger.swift +7 -0
- package/ios/Microphone.swift +221 -0
- package/ios/MicrophoneDataDelegate.swift +4 -0
- package/ios/PipelineIntegration.swift +214 -0
- package/ios/RecordingResult.swift +10 -0
- package/ios/RecordingSettings.swift +11 -0
- package/ios/SharedAudioEngine.swift +484 -0
- package/ios/SoundConfig.swift +45 -0
- package/ios/SoundPlayer.swift +408 -0
- package/ios/SoundPlayerDelegate.swift +7 -0
- package/package.json +49 -0
- package/plugin/build/index.d.ts +5 -0
- package/plugin/build/index.js +28 -0
- package/plugin/src/index.ts +53 -0
- package/plugin/tsconfig.json +9 -0
- package/plugin/tsconfig.tsbuildinfo +1 -0
- package/src/ExpoPlayAudioStreamModule.ts +5 -0
- package/src/events.ts +66 -0
- package/src/index.ts +359 -0
- package/src/pipeline/index.ts +216 -0
- package/src/pipeline/types.ts +169 -0
- package/src/types.ts +270 -0
- package/tsconfig.json +9 -0
|
@@ -0,0 +1,685 @@
|
|
|
1
|
+
package expo.modules.audiostream.pipeline
|
|
2
|
+
|
|
3
|
+
import android.content.ContentResolver
|
|
4
|
+
import android.content.Context
|
|
5
|
+
import android.database.ContentObserver
|
|
6
|
+
import android.media.AudioAttributes
|
|
7
|
+
import android.media.AudioFormat
|
|
8
|
+
import android.media.AudioManager
|
|
9
|
+
import android.media.AudioTrack
|
|
10
|
+
import android.os.Bundle
|
|
11
|
+
import android.os.Handler
|
|
12
|
+
import android.os.Looper
|
|
13
|
+
import android.provider.Settings
|
|
14
|
+
import android.util.Base64
|
|
15
|
+
import android.util.Log
|
|
16
|
+
import java.nio.ByteBuffer
|
|
17
|
+
import java.nio.ByteOrder
|
|
18
|
+
import java.util.concurrent.atomic.AtomicBoolean
|
|
19
|
+
import java.util.concurrent.atomic.AtomicInteger
|
|
20
|
+
import java.util.concurrent.atomic.AtomicLong
|
|
21
|
+
import java.util.concurrent.locks.ReentrantLock
|
|
22
|
+
import kotlin.concurrent.withLock
|
|
23
|
+
|
|
24
|
+
// ────────────────────────────────────────────────────────────────────────────
|
|
25
|
+
// Public contracts
|
|
26
|
+
// ────────────────────────────────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
/** Pipeline states reported to JS via [PipelineListener.onStateChanged]. */
|
|
29
|
+
enum class PipelineState(val value: String) {
|
|
30
|
+
IDLE("idle"),
|
|
31
|
+
CONNECTING("connecting"),
|
|
32
|
+
STREAMING("streaming"),
|
|
33
|
+
DRAINING("draining"),
|
|
34
|
+
ERROR("error");
|
|
35
|
+
|
|
36
|
+
companion object {
|
|
37
|
+
fun fromValue(value: String): PipelineState =
|
|
38
|
+
entries.firstOrNull { it.value == value } ?: IDLE
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/** Listener interface — implemented by [PipelineIntegration] to bridge events to JS. */
|
|
43
|
+
interface PipelineListener {
|
|
44
|
+
fun onStateChanged(state: PipelineState)
|
|
45
|
+
fun onPlaybackStarted(turnId: String)
|
|
46
|
+
fun onError(code: String, message: String)
|
|
47
|
+
fun onZombieDetected(playbackHead: Long, stalledMs: Long)
|
|
48
|
+
fun onUnderrun(count: Int)
|
|
49
|
+
fun onDrained(turnId: String)
|
|
50
|
+
fun onAudioFocusLost()
|
|
51
|
+
fun onAudioFocusResumed()
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// ────────────────────────────────────────────────────────────────────────────
|
|
55
|
+
// AudioPipeline
|
|
56
|
+
// ────────────────────────────────────────────────────────────────────────────
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Core orchestrator for the native audio pipeline.
|
|
60
|
+
*
|
|
61
|
+
* Creates an [AudioTrack] whose buffer size is derived from the device HAL's
|
|
62
|
+
* `getMinBufferSize` (never hardcoded), a [JitterBuffer] ring, and a
|
|
63
|
+
* **MAX_PRIORITY write thread** that loops `buffer.read() → track.write(BLOCKING)`.
|
|
64
|
+
*
|
|
65
|
+
* Key design points:
|
|
66
|
+
* - AudioTrack uses **USAGE_MEDIA + CONTENT_TYPE_SPEECH** (not
|
|
67
|
+
* VOICE_COMMUNICATION — avoids earpiece routing).
|
|
68
|
+
* - AudioTrack stays alive for the entire session, writing silence when idle.
|
|
69
|
+
* This avoids 50–100 ms restart latency.
|
|
70
|
+
* - Config is **immutable per session** — tear down and rebuild to change
|
|
71
|
+
* sample rate.
|
|
72
|
+
* - [turnLock] synchronizes [pushAudio] and [invalidateTurn] to prevent
|
|
73
|
+
* interleaved buffer.reset + buffer.write.
|
|
74
|
+
* - [disconnect] calls `track.stop()` to unblock WRITE_BLOCKING before
|
|
75
|
+
* joining the write thread, preventing the race where cleanup releases a
|
|
76
|
+
* track the write thread still holds.
|
|
77
|
+
* - [setState] dispatches listener callbacks to the main thread when called
|
|
78
|
+
* from the bridge thread.
|
|
79
|
+
* - Underrun events are debounced (fire once per new underrun, not per
|
|
80
|
+
* silence frame).
|
|
81
|
+
*/
|
|
82
|
+
class AudioPipeline(
|
|
83
|
+
private val context: Context,
|
|
84
|
+
private val sampleRate: Int,
|
|
85
|
+
private val channelCount: Int,
|
|
86
|
+
private val targetBufferMs: Int,
|
|
87
|
+
private val listener: PipelineListener
|
|
88
|
+
) {
|
|
89
|
+
companion object {
|
|
90
|
+
private const val TAG = "AudioPipeline"
|
|
91
|
+
|
|
92
|
+
/** Track buffer = 4× frame size for scheduling headroom. */
|
|
93
|
+
private const val TRACK_BUFFER_MULTIPLIER = 4
|
|
94
|
+
|
|
95
|
+
/** How often (ms) the zombie-detection daemon checks playbackHeadPosition. */
|
|
96
|
+
private const val ZOMBIE_POLL_INTERVAL_MS = 2000L
|
|
97
|
+
|
|
98
|
+
/** If playback head hasn't moved for this long, declare zombie. */
|
|
99
|
+
private const val ZOMBIE_STALL_THRESHOLD_MS = 5000L
|
|
100
|
+
|
|
101
|
+
/** Minimum volume level (0–15) enforced by VolumeGuard on STREAM_MUSIC. */
|
|
102
|
+
private const val MIN_VOLUME_LEVEL = 1
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// ── Derived audio constants ─────────────────────────────────────────
|
|
106
|
+
private val channelMask =
|
|
107
|
+
if (channelCount == 1) AudioFormat.CHANNEL_OUT_MONO
|
|
108
|
+
else AudioFormat.CHANNEL_OUT_STEREO
|
|
109
|
+
|
|
110
|
+
/** Minimum buffer size in bytes reported by the device HAL. */
|
|
111
|
+
private val minBufferBytes: Int = run {
|
|
112
|
+
val size = AudioTrack.getMinBufferSize(
|
|
113
|
+
sampleRate,
|
|
114
|
+
channelMask,
|
|
115
|
+
AudioFormat.ENCODING_PCM_16BIT
|
|
116
|
+
)
|
|
117
|
+
if (size <= 0) {
|
|
118
|
+
Log.e(TAG, "getMinBufferSize returned $size " +
|
|
119
|
+
"(sampleRate=$sampleRate, channels=$channelCount). " +
|
|
120
|
+
"Falling back to 20ms frame.")
|
|
121
|
+
// Fallback: 20ms worth of 16-bit samples
|
|
122
|
+
(sampleRate * channelCount * 2) / 50 // 2 bytes per sample, 50 = 1000/20
|
|
123
|
+
} else {
|
|
124
|
+
size
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
/** Number of 16-bit samples per "frame" (one HAL buffer). */
|
|
129
|
+
val frameSizeSamples: Int = minBufferBytes / 2 // 2 bytes per short
|
|
130
|
+
|
|
131
|
+
/** Track buffer in bytes — 4× frame for scheduling headroom. */
|
|
132
|
+
private val trackBufferBytes = minBufferBytes * TRACK_BUFFER_MULTIPLIER
|
|
133
|
+
|
|
134
|
+
// ── Core components ─────────────────────────────────────────────────
|
|
135
|
+
private var audioTrack: AudioTrack? = null
|
|
136
|
+
private var jitterBuffer: JitterBuffer? = null
|
|
137
|
+
|
|
138
|
+
// ── Threading ───────────────────────────────────────────────────────
|
|
139
|
+
private var writeThread: Thread? = null
|
|
140
|
+
private val running = AtomicBoolean(false)
|
|
141
|
+
|
|
142
|
+
// ── Turn management ─────────────────────────────────────────────────
|
|
143
|
+
private val turnLock = ReentrantLock()
|
|
144
|
+
@Volatile private var currentTurnId: String? = null
|
|
145
|
+
@Volatile private var isFirstChunkOfTurn = true
|
|
146
|
+
@Volatile private var playbackStartedForTurn = false
|
|
147
|
+
|
|
148
|
+
/** Set by pushAudio on first chunk; consumed by writeLoop to flush stale silence from AudioTrack. */
|
|
149
|
+
private val pendingFlush = AtomicBoolean(false)
|
|
150
|
+
|
|
151
|
+
// ── Audio focus ─────────────────────────────────────────────────────
|
|
152
|
+
private val audioManager: AudioManager =
|
|
153
|
+
context.getSystemService(Context.AUDIO_SERVICE) as AudioManager
|
|
154
|
+
private val hasAudioFocus = AtomicBoolean(false)
|
|
155
|
+
private val audioFocusLost = AtomicBoolean(false)
|
|
156
|
+
|
|
157
|
+
private val focusChangeListener = AudioManager.OnAudioFocusChangeListener { focusChange ->
|
|
158
|
+
when (focusChange) {
|
|
159
|
+
AudioManager.AUDIOFOCUS_GAIN -> {
|
|
160
|
+
Log.d(TAG, "Audio focus gained")
|
|
161
|
+
audioFocusLost.set(false)
|
|
162
|
+
hasAudioFocus.set(true)
|
|
163
|
+
listener.onAudioFocusResumed()
|
|
164
|
+
}
|
|
165
|
+
AudioManager.AUDIOFOCUS_LOSS,
|
|
166
|
+
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT,
|
|
167
|
+
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK -> {
|
|
168
|
+
Log.d(TAG, "Audio focus lost: $focusChange")
|
|
169
|
+
audioFocusLost.set(true)
|
|
170
|
+
// Don't release focus — keep writing silence so track stays alive
|
|
171
|
+
listener.onAudioFocusLost()
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// ── Zombie detection ────────────────────────────────────────────────
|
|
177
|
+
private var zombieThread: Thread? = null
|
|
178
|
+
private var lastPlaybackHead: Long = 0
|
|
179
|
+
private var lastHeadChangeTime: Long = System.currentTimeMillis()
|
|
180
|
+
|
|
181
|
+
// ── VolumeGuard ─────────────────────────────────────────────────────
|
|
182
|
+
private var volumeObserver: ContentObserver? = null
|
|
183
|
+
|
|
184
|
+
// ── Underrun debounce ───────────────────────────────────────────────
|
|
185
|
+
private var lastReportedUnderrunCount = 0
|
|
186
|
+
|
|
187
|
+
// ── State ───────────────────────────────────────────────────────────
|
|
188
|
+
@Volatile private var state: PipelineState = PipelineState.IDLE
|
|
189
|
+
private val mainHandler = Handler(Looper.getMainLooper())
|
|
190
|
+
|
|
191
|
+
// ── Telemetry (atomics — safe to read from any thread) ──────────────
|
|
192
|
+
val totalPushCalls = AtomicLong(0)
|
|
193
|
+
val totalPushBytes = AtomicLong(0)
|
|
194
|
+
val totalWriteLoops = AtomicLong(0)
|
|
195
|
+
|
|
196
|
+
// ════════════════════════════════════════════════════════════════════
|
|
197
|
+
// Connect / Disconnect
|
|
198
|
+
// ════════════════════════════════════════════════════════════════════
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Build the AudioTrack, JitterBuffer, start the write thread, request
|
|
202
|
+
* audio focus, and install VolumeGuard + zombie detection.
|
|
203
|
+
*/
|
|
204
|
+
fun connect() {
|
|
205
|
+
if (running.get()) {
|
|
206
|
+
Log.w(TAG, "connect() called while already running — ignoring")
|
|
207
|
+
return
|
|
208
|
+
}
|
|
209
|
+
setState(PipelineState.CONNECTING)
|
|
210
|
+
|
|
211
|
+
try {
|
|
212
|
+
// ── 1. JitterBuffer ─────────────────────────────────────────
|
|
213
|
+
jitterBuffer = JitterBuffer(
|
|
214
|
+
sampleRate = sampleRate,
|
|
215
|
+
channels = channelCount,
|
|
216
|
+
targetBufferMs = targetBufferMs
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
// ── 2. AudioTrack ───────────────────────────────────────────
|
|
220
|
+
val audioAttributes = AudioAttributes.Builder()
|
|
221
|
+
.setUsage(AudioAttributes.USAGE_MEDIA)
|
|
222
|
+
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
|
223
|
+
.build()
|
|
224
|
+
|
|
225
|
+
val audioFormat = AudioFormat.Builder()
|
|
226
|
+
.setSampleRate(sampleRate)
|
|
227
|
+
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
|
|
228
|
+
.setChannelMask(channelMask)
|
|
229
|
+
.build()
|
|
230
|
+
|
|
231
|
+
audioTrack = AudioTrack.Builder()
|
|
232
|
+
.setAudioAttributes(audioAttributes)
|
|
233
|
+
.setAudioFormat(audioFormat)
|
|
234
|
+
.setBufferSizeInBytes(trackBufferBytes)
|
|
235
|
+
.setTransferMode(AudioTrack.MODE_STREAM)
|
|
236
|
+
.build()
|
|
237
|
+
|
|
238
|
+
audioTrack!!.play()
|
|
239
|
+
Log.d(TAG, "AudioTrack created and started — playState=${audioTrack!!.playState}, " +
|
|
240
|
+
"state=${audioTrack!!.state}, sampleRate=$sampleRate, " +
|
|
241
|
+
"bufferBytes=$trackBufferBytes, minBufferBytes=$minBufferBytes")
|
|
242
|
+
|
|
243
|
+
// ── 3. Audio focus ──────────────────────────────────────────
|
|
244
|
+
requestAudioFocus()
|
|
245
|
+
|
|
246
|
+
// ── 4. Write thread ─────────────────────────────────────────
|
|
247
|
+
running.set(true)
|
|
248
|
+
writeThread = Thread(::writeLoop, "AudioPipeline-Writer").apply {
|
|
249
|
+
priority = Thread.MAX_PRIORITY
|
|
250
|
+
isDaemon = false
|
|
251
|
+
start()
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// ── 5. Zombie detection daemon ──────────────────────────────
|
|
255
|
+
startZombieDetection()
|
|
256
|
+
|
|
257
|
+
// ── 6. VolumeGuard ──────────────────────────────────────────
|
|
258
|
+
installVolumeGuard()
|
|
259
|
+
|
|
260
|
+
// ── 7. Reset telemetry ──────────────────────────────────────
|
|
261
|
+
resetTelemetry()
|
|
262
|
+
|
|
263
|
+
setState(PipelineState.IDLE)
|
|
264
|
+
Log.d(TAG, "Connected — sampleRate=$sampleRate ch=$channelCount " +
|
|
265
|
+
"frameSamples=$frameSizeSamples targetBuffer=${targetBufferMs}ms")
|
|
266
|
+
} catch (e: Exception) {
|
|
267
|
+
Log.e(TAG, "connect() failed", e)
|
|
268
|
+
setState(PipelineState.ERROR)
|
|
269
|
+
listener.onError("CONNECT_FAILED", e.message ?: "Unknown error")
|
|
270
|
+
disconnect()
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
/**
|
|
275
|
+
* Tear down the pipeline.
|
|
276
|
+
*
|
|
277
|
+
* Calls `track.stop()` **first** to unblock the write thread's
|
|
278
|
+
* `WRITE_BLOCKING` call, then joins the thread.
|
|
279
|
+
*/
|
|
280
|
+
fun disconnect() {
|
|
281
|
+
running.set(false)
|
|
282
|
+
|
|
283
|
+
// Stop zombie detection
|
|
284
|
+
zombieThread?.interrupt()
|
|
285
|
+
zombieThread = null
|
|
286
|
+
|
|
287
|
+
// Remove VolumeGuard
|
|
288
|
+
removeVolumeGuard()
|
|
289
|
+
|
|
290
|
+
// Abandon audio focus
|
|
291
|
+
abandonAudioFocus()
|
|
292
|
+
|
|
293
|
+
// Stop AudioTrack to unblock WRITE_BLOCKING
|
|
294
|
+
try {
|
|
295
|
+
audioTrack?.stop()
|
|
296
|
+
} catch (e: IllegalStateException) {
|
|
297
|
+
Log.w(TAG, "AudioTrack.stop() failed — may already be stopped", e)
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
// Join write thread (now unblocked)
|
|
301
|
+
writeThread?.let { thread ->
|
|
302
|
+
try {
|
|
303
|
+
thread.join(2000)
|
|
304
|
+
if (thread.isAlive) {
|
|
305
|
+
Log.w(TAG, "Write thread did not exit in time — interrupting")
|
|
306
|
+
thread.interrupt()
|
|
307
|
+
thread.join(1000)
|
|
308
|
+
}
|
|
309
|
+
} catch (_: InterruptedException) {
|
|
310
|
+
Thread.currentThread().interrupt()
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
writeThread = null
|
|
314
|
+
|
|
315
|
+
// Release AudioTrack
|
|
316
|
+
try {
|
|
317
|
+
audioTrack?.release()
|
|
318
|
+
} catch (e: Exception) {
|
|
319
|
+
Log.w(TAG, "AudioTrack.release() failed", e)
|
|
320
|
+
}
|
|
321
|
+
audioTrack = null
|
|
322
|
+
|
|
323
|
+
jitterBuffer = null
|
|
324
|
+
currentTurnId = null
|
|
325
|
+
|
|
326
|
+
setState(PipelineState.IDLE)
|
|
327
|
+
Log.d(TAG, "Disconnected")
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
// ════════════════════════════════════════════════════════════════════
|
|
331
|
+
// Push audio (bridge thread → jitter buffer)
|
|
332
|
+
// ════════════════════════════════════════════════════════════════════
|
|
333
|
+
|
|
334
|
+
/**
|
|
335
|
+
* Decode a base64-encoded PCM16 chunk and write it into the jitter buffer.
|
|
336
|
+
*
|
|
337
|
+
* @param base64Audio Base64-encoded PCM 16-bit LE audio data.
|
|
338
|
+
* @param turnId Conversation turn identifier.
|
|
339
|
+
* @param isFirstChunk True if this is the first chunk of a new turn.
|
|
340
|
+
* @param isLastChunk True if this is the final chunk of the current turn.
|
|
341
|
+
*/
|
|
342
|
+
fun pushAudio(base64Audio: String, turnId: String, isFirstChunk: Boolean, isLastChunk: Boolean) {
|
|
343
|
+
val buf = jitterBuffer ?: run {
|
|
344
|
+
listener.onError("NOT_CONNECTED", "Pipeline not connected")
|
|
345
|
+
return
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
turnLock.withLock {
|
|
349
|
+
// ── Turn boundary handling ──────────────────────────────────
|
|
350
|
+
if (isFirstChunk || currentTurnId != turnId) {
|
|
351
|
+
buf.reset()
|
|
352
|
+
currentTurnId = turnId
|
|
353
|
+
this.isFirstChunkOfTurn = true
|
|
354
|
+
playbackStartedForTurn = false
|
|
355
|
+
lastReportedUnderrunCount = 0
|
|
356
|
+
// Signal write loop to flush stale silence from AudioTrack
|
|
357
|
+
// so real audio plays immediately without waiting behind queued silence.
|
|
358
|
+
pendingFlush.set(true)
|
|
359
|
+
setState(PipelineState.STREAMING)
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// ── Decode base64 → PCM shorts ──────────────────────────────
|
|
363
|
+
val bytes: ByteArray = try {
|
|
364
|
+
Base64.decode(base64Audio, Base64.DEFAULT)
|
|
365
|
+
} catch (e: Exception) {
|
|
366
|
+
listener.onError("DECODE_ERROR", "Base64 decode failed: ${e.message}")
|
|
367
|
+
return
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
val shortBuffer = ByteBuffer.wrap(bytes)
|
|
371
|
+
.order(ByteOrder.LITTLE_ENDIAN)
|
|
372
|
+
.asShortBuffer()
|
|
373
|
+
val samples = ShortArray(shortBuffer.remaining())
|
|
374
|
+
shortBuffer.get(samples)
|
|
375
|
+
|
|
376
|
+
// ── Write into jitter buffer ────────────────────────────────
|
|
377
|
+
buf.write(samples)
|
|
378
|
+
|
|
379
|
+
// ── Telemetry ───────────────────────────────────────────────
|
|
380
|
+
totalPushCalls.incrementAndGet()
|
|
381
|
+
totalPushBytes.addAndGet(bytes.size.toLong())
|
|
382
|
+
|
|
383
|
+
// ── End-of-stream ───────────────────────────────────────────
|
|
384
|
+
if (isLastChunk) {
|
|
385
|
+
buf.markEndOfStream()
|
|
386
|
+
setState(PipelineState.DRAINING)
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
/**
|
|
392
|
+
* Invalidate the current turn. Resets the jitter buffer so stale audio
|
|
393
|
+
* is discarded immediately. Safe to call from any thread.
|
|
394
|
+
*/
|
|
395
|
+
fun invalidateTurn(newTurnId: String) {
|
|
396
|
+
turnLock.withLock {
|
|
397
|
+
jitterBuffer?.reset()
|
|
398
|
+
currentTurnId = newTurnId
|
|
399
|
+
isFirstChunkOfTurn = true
|
|
400
|
+
playbackStartedForTurn = false
|
|
401
|
+
lastReportedUnderrunCount = 0
|
|
402
|
+
setState(PipelineState.IDLE)
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
// ════════════════════════════════════════════════════════════════════
|
|
407
|
+
// State & Telemetry
|
|
408
|
+
// ════════════════════════════════════════════════════════════════════
|
|
409
|
+
|
|
410
|
+
fun getState(): PipelineState = state
|
|
411
|
+
|
|
412
|
+
fun getTelemetry(): Bundle {
|
|
413
|
+
val buf = jitterBuffer
|
|
414
|
+
val bundle = Bundle().apply {
|
|
415
|
+
putString("state", state.value)
|
|
416
|
+
putInt("bufferMs", buf?.bufferedMs() ?: 0)
|
|
417
|
+
putInt("bufferSamples", buf?.availableSamples() ?: 0)
|
|
418
|
+
putBoolean("primed", buf?.isPrimed() ?: false)
|
|
419
|
+
putLong("totalWritten", buf?.totalWritten?.get() ?: 0)
|
|
420
|
+
putLong("totalRead", buf?.totalRead?.get() ?: 0)
|
|
421
|
+
putInt("underrunCount", buf?.underrunCount?.get() ?: 0)
|
|
422
|
+
putInt("peakLevel", buf?.peakLevel?.get() ?: 0)
|
|
423
|
+
putLong("totalPushCalls", totalPushCalls.get())
|
|
424
|
+
putLong("totalPushBytes", totalPushBytes.get())
|
|
425
|
+
putLong("totalWriteLoops", totalWriteLoops.get())
|
|
426
|
+
putString("turnId", currentTurnId ?: "")
|
|
427
|
+
}
|
|
428
|
+
return bundle
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
// ════════════════════════════════════════════════════════════════════
|
|
432
|
+
// Write loop (runs on MAX_PRIORITY thread)
|
|
433
|
+
// ════════════════════════════════════════════════════════════════════
|
|
434
|
+
|
|
435
|
+
private fun writeLoop() {
|
|
436
|
+
Log.d(TAG, "Write thread started — frameSizeSamples=$frameSizeSamples, trackBufferBytes=$trackBufferBytes")
|
|
437
|
+
val frame = ShortArray(frameSizeSamples)
|
|
438
|
+
|
|
439
|
+
while (running.get()) {
|
|
440
|
+
val track = audioTrack ?: break
|
|
441
|
+
val buf = jitterBuffer ?: break
|
|
442
|
+
|
|
443
|
+
// Flush stale silence from AudioTrack when a new turn starts.
|
|
444
|
+
// This prevents the real audio from queuing behind silence frames
|
|
445
|
+
// that were written while idle.
|
|
446
|
+
if (pendingFlush.compareAndSet(true, false)) {
|
|
447
|
+
Log.d(TAG, "Flushing AudioTrack for new turn (head=${track.playbackHeadPosition})")
|
|
448
|
+
track.pause()
|
|
449
|
+
track.flush()
|
|
450
|
+
track.play()
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// Read from jitter buffer (silence if not primed or underrun)
|
|
454
|
+
buf.read(frame)
|
|
455
|
+
|
|
456
|
+
// If audio focus is lost, overwrite with silence
|
|
457
|
+
if (audioFocusLost.get()) {
|
|
458
|
+
frame.fill(0)
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
// Write to AudioTrack (BLOCKING — will park thread until space available)
|
|
462
|
+
try {
|
|
463
|
+
val written = track.write(frame, 0, frame.size, AudioTrack.WRITE_BLOCKING)
|
|
464
|
+
|
|
465
|
+
if (written < 0) {
|
|
466
|
+
val errorName = when (written) {
|
|
467
|
+
AudioTrack.ERROR_INVALID_OPERATION -> "ERROR_INVALID_OPERATION"
|
|
468
|
+
AudioTrack.ERROR_BAD_VALUE -> "ERROR_BAD_VALUE"
|
|
469
|
+
AudioTrack.ERROR_DEAD_OBJECT -> "ERROR_DEAD_OBJECT"
|
|
470
|
+
AudioTrack.ERROR -> "ERROR"
|
|
471
|
+
else -> "UNKNOWN($written)"
|
|
472
|
+
}
|
|
473
|
+
Log.e(TAG, "AudioTrack.write returned error: $errorName ($written), " +
|
|
474
|
+
"playState=${track.playState}, trackState=${track.state}")
|
|
475
|
+
setState(PipelineState.ERROR)
|
|
476
|
+
listener.onError("WRITE_ERROR", "AudioTrack.write returned $errorName ($written)")
|
|
477
|
+
break
|
|
478
|
+
}
|
|
479
|
+
} catch (e: IllegalStateException) {
|
|
480
|
+
// Track was stopped/released — expected during disconnect
|
|
481
|
+
if (running.get()) {
|
|
482
|
+
Log.e(TAG, "AudioTrack.write threw in running state", e)
|
|
483
|
+
setState(PipelineState.ERROR)
|
|
484
|
+
listener.onError("WRITE_ERROR", e.message ?: "AudioTrack write error")
|
|
485
|
+
}
|
|
486
|
+
break
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
totalWriteLoops.incrementAndGet()
|
|
490
|
+
|
|
491
|
+
// ── Playback-started event (once per turn) ──────────────────
|
|
492
|
+
if (!playbackStartedForTurn && buf.isPrimed() && currentTurnId != null) {
|
|
493
|
+
playbackStartedForTurn = true
|
|
494
|
+
listener.onPlaybackStarted(currentTurnId!!)
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
// ── Underrun debounce ───────────────────────────────────────
|
|
498
|
+
val currentUnderruns = buf.underrunCount.get()
|
|
499
|
+
if (currentUnderruns > lastReportedUnderrunCount) {
|
|
500
|
+
lastReportedUnderrunCount = currentUnderruns
|
|
501
|
+
listener.onUnderrun(currentUnderruns)
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
// ── Drain detection ─────────────────────────────────────────
|
|
505
|
+
if (buf.isDrained() && state == PipelineState.DRAINING) {
|
|
506
|
+
currentTurnId?.let { listener.onDrained(it) }
|
|
507
|
+
setState(PipelineState.IDLE)
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
Log.d(TAG, "Write thread exiting")
|
|
512
|
+
}
|
|
513
|
+
|
|
514
|
+
// ════════════════════════════════════════════════════════════════════
|
|
515
|
+
// Audio focus
|
|
516
|
+
// ════════════════════════════════════════════════════════════════════
|
|
517
|
+
|
|
518
|
+
private fun requestAudioFocus() {
|
|
519
|
+
val result = audioManager.requestAudioFocus(
|
|
520
|
+
focusChangeListener,
|
|
521
|
+
AudioManager.STREAM_MUSIC,
|
|
522
|
+
AudioManager.AUDIOFOCUS_GAIN
|
|
523
|
+
)
|
|
524
|
+
hasAudioFocus.set(result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED)
|
|
525
|
+
if (!hasAudioFocus.get()) {
|
|
526
|
+
Log.w(TAG, "Audio focus request denied")
|
|
527
|
+
}
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
private fun abandonAudioFocus() {
|
|
531
|
+
audioManager.abandonAudioFocus(focusChangeListener)
|
|
532
|
+
hasAudioFocus.set(false)
|
|
533
|
+
audioFocusLost.set(false)
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
// ════════════════════════════════════════════════════════════════════
|
|
537
|
+
// Zombie AudioTrack detection
|
|
538
|
+
// ════════════════════════════════════════════════════════════════════
|
|
539
|
+
|
|
540
|
+
private fun startZombieDetection() {
|
|
541
|
+
lastPlaybackHead = audioTrack?.playbackHeadPosition?.toLong() ?: 0
|
|
542
|
+
lastHeadChangeTime = System.currentTimeMillis()
|
|
543
|
+
|
|
544
|
+
zombieThread = Thread({
|
|
545
|
+
while (running.get() && !Thread.currentThread().isInterrupted) {
|
|
546
|
+
try {
|
|
547
|
+
Thread.sleep(ZOMBIE_POLL_INTERVAL_MS)
|
|
548
|
+
} catch (_: InterruptedException) {
|
|
549
|
+
break
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
val track = audioTrack ?: break
|
|
553
|
+
val head = track.playbackHeadPosition.toLong()
|
|
554
|
+
val now = System.currentTimeMillis()
|
|
555
|
+
|
|
556
|
+
if (head != lastPlaybackHead) {
|
|
557
|
+
lastPlaybackHead = head
|
|
558
|
+
lastHeadChangeTime = now
|
|
559
|
+
} else {
|
|
560
|
+
val stalledMs = now - lastHeadChangeTime
|
|
561
|
+
// Only flag zombie if we think we're actively streaming
|
|
562
|
+
if (stalledMs >= ZOMBIE_STALL_THRESHOLD_MS &&
|
|
563
|
+
(state == PipelineState.STREAMING || state == PipelineState.DRAINING)
|
|
564
|
+
) {
|
|
565
|
+
Log.w(TAG, "Zombie AudioTrack detected! head=$head stalledMs=$stalledMs " +
|
|
566
|
+
"playState=${track.playState} trackState=${track.state} " +
|
|
567
|
+
"writeLoops=${totalWriteLoops.get()}")
|
|
568
|
+
listener.onZombieDetected(head, stalledMs)
|
|
569
|
+
// Reset the timer so we don't spam
|
|
570
|
+
lastHeadChangeTime = now
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
}
|
|
574
|
+
}, "AudioPipeline-Zombie").apply {
|
|
575
|
+
isDaemon = true
|
|
576
|
+
start()
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
// ════════════════════════════════════════════════════════════════════
|
|
581
|
+
// VolumeGuard
|
|
582
|
+
// ════════════════════════════════════════════════════════════════════
|
|
583
|
+
|
|
584
|
+
private fun installVolumeGuard() {
|
|
585
|
+
volumeObserver = object : ContentObserver(mainHandler) {
|
|
586
|
+
override fun onChange(selfChange: Boolean) {
|
|
587
|
+
val current = audioManager.getStreamVolume(AudioManager.STREAM_MUSIC)
|
|
588
|
+
if (current < MIN_VOLUME_LEVEL) {
|
|
589
|
+
Log.d(TAG, "VolumeGuard: raising STREAM_MUSIC from $current to $MIN_VOLUME_LEVEL")
|
|
590
|
+
try {
|
|
591
|
+
audioManager.setStreamVolume(
|
|
592
|
+
AudioManager.STREAM_MUSIC,
|
|
593
|
+
MIN_VOLUME_LEVEL,
|
|
594
|
+
0 // no flags — silent raise
|
|
595
|
+
)
|
|
596
|
+
} catch (e: SecurityException) {
|
|
597
|
+
Log.w(TAG, "VolumeGuard: setStreamVolume denied", e)
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
try {
|
|
604
|
+
context.contentResolver.registerContentObserver(
|
|
605
|
+
Settings.System.CONTENT_URI,
|
|
606
|
+
true,
|
|
607
|
+
volumeObserver!!
|
|
608
|
+
)
|
|
609
|
+
} catch (e: Exception) {
|
|
610
|
+
Log.w(TAG, "VolumeGuard: failed to register ContentObserver", e)
|
|
611
|
+
volumeObserver = null
|
|
612
|
+
}
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
private fun removeVolumeGuard() {
|
|
616
|
+
volumeObserver?.let {
|
|
617
|
+
try {
|
|
618
|
+
context.contentResolver.unregisterContentObserver(it)
|
|
619
|
+
} catch (e: Exception) {
|
|
620
|
+
Log.w(TAG, "VolumeGuard: failed to unregister", e)
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
volumeObserver = null
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
// ════════════════════════════════════════════════════════════════════
|
|
627
|
+
// Diagnostics (called from device callback via PipelineIntegration)
|
|
628
|
+
// ════════════════════════════════════════════════════════════════════
|
|
629
|
+
|
|
630
|
+
/**
|
|
631
|
+
* Snapshot AudioTrack state at the moment of a route change.
|
|
632
|
+
* This tells us whether the track survives the switch or silently dies.
|
|
633
|
+
*/
|
|
634
|
+
fun logTrackHealth(trigger: String) {
|
|
635
|
+
val track = audioTrack
|
|
636
|
+
if (track == null) {
|
|
637
|
+
Log.d(TAG, "[$trigger] AudioTrack health: track is null (pipeline not connected)")
|
|
638
|
+
return
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
val playState = when (track.playState) {
|
|
642
|
+
AudioTrack.PLAYSTATE_STOPPED -> "STOPPED"
|
|
643
|
+
AudioTrack.PLAYSTATE_PAUSED -> "PAUSED"
|
|
644
|
+
AudioTrack.PLAYSTATE_PLAYING -> "PLAYING"
|
|
645
|
+
else -> "UNKNOWN(${track.playState})"
|
|
646
|
+
}
|
|
647
|
+
val trackState = when (track.state) {
|
|
648
|
+
AudioTrack.STATE_UNINITIALIZED -> "UNINITIALIZED"
|
|
649
|
+
AudioTrack.STATE_INITIALIZED -> "INITIALIZED"
|
|
650
|
+
AudioTrack.STATE_NO_STATIC_DATA -> "NO_STATIC_DATA"
|
|
651
|
+
else -> "UNKNOWN(${track.state})"
|
|
652
|
+
}
|
|
653
|
+
val head = track.playbackHeadPosition
|
|
654
|
+
val buf = jitterBuffer
|
|
655
|
+
val bufMs = buf?.bufferedMs() ?: -1
|
|
656
|
+
val bufPrimed = buf?.isPrimed() ?: false
|
|
657
|
+
|
|
658
|
+
Log.d(TAG, "[$trigger] AudioTrack health: playState=$playState, trackState=$trackState, " +
|
|
659
|
+
"head=$head, pipelineState=${state.value}, running=${running.get()}, " +
|
|
660
|
+
"bufferMs=$bufMs, primed=$bufPrimed, audioFocusLost=${audioFocusLost.get()}, " +
|
|
661
|
+
"writeLoops=${totalWriteLoops.get()}")
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
// ════════════════════════════════════════════════════════════════════
|
|
665
|
+
// Internal helpers
|
|
666
|
+
// ════════════════════════════════════════════════════════════════════
|
|
667
|
+
|
|
668
|
+
private fun setState(newState: PipelineState) {
|
|
669
|
+
if (state == newState) return
|
|
670
|
+
state = newState
|
|
671
|
+
// Dispatch to main thread if called from bridge/write thread
|
|
672
|
+
if (Looper.myLooper() == Looper.getMainLooper()) {
|
|
673
|
+
listener.onStateChanged(newState)
|
|
674
|
+
} else {
|
|
675
|
+
mainHandler.post { listener.onStateChanged(newState) }
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
private fun resetTelemetry() {
|
|
680
|
+
totalPushCalls.set(0)
|
|
681
|
+
totalPushBytes.set(0)
|
|
682
|
+
totalWriteLoops.set(0)
|
|
683
|
+
jitterBuffer?.resetTelemetry()
|
|
684
|
+
}
|
|
685
|
+
}
|