@edkimmel/expo-audio-stream 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.js +5 -0
- package/.yarnrc.yml +8 -0
- package/NATIVE_EVENTS.md +270 -0
- package/README.md +289 -0
- package/android/build.gradle +92 -0
- package/android/src/main/AndroidManifest.xml +4 -0
- package/android/src/main/java/expo/modules/audiostream/AudioDataEncoder.kt +178 -0
- package/android/src/main/java/expo/modules/audiostream/AudioEffectsManager.kt +107 -0
- package/android/src/main/java/expo/modules/audiostream/AudioPlaybackManager.kt +651 -0
- package/android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt +509 -0
- package/android/src/main/java/expo/modules/audiostream/Constants.kt +21 -0
- package/android/src/main/java/expo/modules/audiostream/EventSender.kt +7 -0
- package/android/src/main/java/expo/modules/audiostream/ExpoAudioStreamView.kt +7 -0
- package/android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt +280 -0
- package/android/src/main/java/expo/modules/audiostream/PermissionUtils.kt +16 -0
- package/android/src/main/java/expo/modules/audiostream/RecordingConfig.kt +60 -0
- package/android/src/main/java/expo/modules/audiostream/SoundConfig.kt +46 -0
- package/android/src/main/java/expo/modules/audiostream/pipeline/AudioPipeline.kt +685 -0
- package/android/src/main/java/expo/modules/audiostream/pipeline/JitterBuffer.kt +227 -0
- package/android/src/main/java/expo/modules/audiostream/pipeline/PipelineIntegration.kt +315 -0
- package/app.plugin.js +1 -0
- package/build/ExpoPlayAudioStreamModule.d.ts +3 -0
- package/build/ExpoPlayAudioStreamModule.d.ts.map +1 -0
- package/build/ExpoPlayAudioStreamModule.js +5 -0
- package/build/ExpoPlayAudioStreamModule.js.map +1 -0
- package/build/events.d.ts +36 -0
- package/build/events.d.ts.map +1 -0
- package/build/events.js +25 -0
- package/build/events.js.map +1 -0
- package/build/index.d.ts +125 -0
- package/build/index.d.ts.map +1 -0
- package/build/index.js +222 -0
- package/build/index.js.map +1 -0
- package/build/pipeline/index.d.ts +81 -0
- package/build/pipeline/index.d.ts.map +1 -0
- package/build/pipeline/index.js +140 -0
- package/build/pipeline/index.js.map +1 -0
- package/build/pipeline/types.d.ts +132 -0
- package/build/pipeline/types.d.ts.map +1 -0
- package/build/pipeline/types.js +5 -0
- package/build/pipeline/types.js.map +1 -0
- package/build/types.d.ts +221 -0
- package/build/types.d.ts.map +1 -0
- package/build/types.js +10 -0
- package/build/types.js.map +1 -0
- package/expo-module.config.json +9 -0
- package/ios/AudioPipeline.swift +562 -0
- package/ios/AudioUtils.swift +356 -0
- package/ios/ExpoPlayAudioStream.podspec +27 -0
- package/ios/ExpoPlayAudioStreamModule.swift +436 -0
- package/ios/ExpoPlayAudioStreamView.swift +7 -0
- package/ios/JitterBuffer.swift +208 -0
- package/ios/Logger.swift +7 -0
- package/ios/Microphone.swift +221 -0
- package/ios/MicrophoneDataDelegate.swift +4 -0
- package/ios/PipelineIntegration.swift +214 -0
- package/ios/RecordingResult.swift +10 -0
- package/ios/RecordingSettings.swift +11 -0
- package/ios/SharedAudioEngine.swift +484 -0
- package/ios/SoundConfig.swift +45 -0
- package/ios/SoundPlayer.swift +408 -0
- package/ios/SoundPlayerDelegate.swift +7 -0
- package/package.json +49 -0
- package/plugin/build/index.d.ts +5 -0
- package/plugin/build/index.js +28 -0
- package/plugin/src/index.ts +53 -0
- package/plugin/tsconfig.json +9 -0
- package/plugin/tsconfig.tsbuildinfo +1 -0
- package/src/ExpoPlayAudioStreamModule.ts +5 -0
- package/src/events.ts +66 -0
- package/src/index.ts +359 -0
- package/src/pipeline/index.ts +216 -0
- package/src/pipeline/types.ts +169 -0
- package/src/types.ts +270 -0
- package/tsconfig.json +9 -0
|
@@ -0,0 +1,509 @@
|
|
|
1
|
+
package expo.modules.audiostream
|
|
2
|
+
|
|
3
|
+
import android.media.AudioFormat
|
|
4
|
+
import android.media.AudioRecord
|
|
5
|
+
import android.media.MediaRecorder
|
|
6
|
+
import android.os.Build
|
|
7
|
+
import android.os.Handler
|
|
8
|
+
import android.os.Looper
|
|
9
|
+
import android.util.Log
|
|
10
|
+
import androidx.annotation.RequiresApi
|
|
11
|
+
import androidx.core.os.bundleOf
|
|
12
|
+
import expo.modules.kotlin.Promise
|
|
13
|
+
import java.util.concurrent.atomic.AtomicBoolean
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AudioRecorderManager(
|
|
17
|
+
private val permissionUtils: PermissionUtils,
|
|
18
|
+
private val audioDataEncoder: AudioDataEncoder,
|
|
19
|
+
private val eventSender: EventSender,
|
|
20
|
+
private val audioEffectsManager: AudioEffectsManager
|
|
21
|
+
) {
|
|
22
|
+
private var audioRecord: AudioRecord? = null
|
|
23
|
+
private var bufferSizeInBytes = 0 // AudioRecord internal ring buffer (>= getMinBufferSize)
|
|
24
|
+
private var readSizeInBytes = 0 // Bytes to read per call (exactly one interval of audio)
|
|
25
|
+
private var isRecording = AtomicBoolean(false)
|
|
26
|
+
private val isPaused = AtomicBoolean(false)
|
|
27
|
+
private var streamUuid: String? = null
|
|
28
|
+
private var recordingThread: Thread? = null
|
|
29
|
+
private var recordingStartTime: Long = 0
|
|
30
|
+
private var totalRecordedTime: Long = 0
|
|
31
|
+
private var totalDataSize = 0
|
|
32
|
+
private var pausedDuration = 0L
|
|
33
|
+
private var lastEmittedSize = 0L
|
|
34
|
+
private val mainHandler = Handler(Looper.getMainLooper())
|
|
35
|
+
private val audioRecordLock = Any()
|
|
36
|
+
|
|
37
|
+
// Flag to control whether actual audio data or silence is sent
|
|
38
|
+
private var isSilent = false
|
|
39
|
+
|
|
40
|
+
private lateinit var recordingConfig: RecordingConfig
|
|
41
|
+
private var mimeType = "audio/wav"
|
|
42
|
+
private var audioFormat: Int = AudioFormat.ENCODING_PCM_16BIT
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Validates the recording state by checking permission and recording status
|
|
46
|
+
* @param promise Promise to reject if validation fails
|
|
47
|
+
* @param checkRecordingState Whether to check if recording is in progress
|
|
48
|
+
* @param shouldRejectIfRecording Whether to reject if recording is in progress
|
|
49
|
+
* @return True if validation passes, false otherwise
|
|
50
|
+
*/
|
|
51
|
+
private fun validateRecordingState(
|
|
52
|
+
promise: Promise? = null,
|
|
53
|
+
checkRecordingState: Boolean = false,
|
|
54
|
+
shouldRejectIfRecording: Boolean = true
|
|
55
|
+
): Boolean {
|
|
56
|
+
// First check permission
|
|
57
|
+
if (!permissionUtils.checkRecordingPermission()) {
|
|
58
|
+
if (promise != null) {
|
|
59
|
+
promise.reject("PERMISSION_DENIED", "Recording permission has not been granted", null)
|
|
60
|
+
} else {
|
|
61
|
+
throw SecurityException("Recording permission has not been granted")
|
|
62
|
+
}
|
|
63
|
+
return false
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Then check recording state if requested
|
|
67
|
+
if (checkRecordingState) {
|
|
68
|
+
val isActive = isRecording.get() && !isPaused.get()
|
|
69
|
+
|
|
70
|
+
if (isActive && shouldRejectIfRecording && promise != null) {
|
|
71
|
+
promise.resolve("Recording is already in progress")
|
|
72
|
+
return false
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return !isActive // Return true if not recording (validation passes)
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
return true // Permission check passed
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
@RequiresApi(Build.VERSION_CODES.R)
|
|
82
|
+
fun startRecording(options: Map<String, Any?>, promise: Promise) {
|
|
83
|
+
// Check permission and recording state
|
|
84
|
+
if (!validateRecordingState(promise, checkRecordingState = true, shouldRejectIfRecording = true)) {
|
|
85
|
+
return
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Initialize the recording configuration using the factory method
|
|
89
|
+
val tempRecordingConfig = RecordingConfig.fromOptions(options)
|
|
90
|
+
Log.d(Constants.TAG, "Initial recording configuration: $tempRecordingConfig")
|
|
91
|
+
|
|
92
|
+
// Validate the recording configuration
|
|
93
|
+
val configValidationResult = tempRecordingConfig.validate()
|
|
94
|
+
if (configValidationResult != null) {
|
|
95
|
+
promise.reject(configValidationResult.code, configValidationResult.message, null)
|
|
96
|
+
return
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Get audio format configuration using the helper
|
|
100
|
+
val formatConfig = audioDataEncoder.getAudioFormatConfig(tempRecordingConfig.encoding)
|
|
101
|
+
|
|
102
|
+
// Check for any errors in the configuration
|
|
103
|
+
if (formatConfig.error != null) {
|
|
104
|
+
promise.reject("UNSUPPORTED_FORMAT", formatConfig.error, null)
|
|
105
|
+
return
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
// Set the audio format
|
|
109
|
+
audioFormat = formatConfig.audioFormat
|
|
110
|
+
|
|
111
|
+
// Validate the audio format and get potentially updated config
|
|
112
|
+
val formatValidationResult = validateAudioFormat(tempRecordingConfig, audioFormat, promise)
|
|
113
|
+
if (formatValidationResult == null) {
|
|
114
|
+
return
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Update with validated values
|
|
118
|
+
audioFormat = formatValidationResult.first
|
|
119
|
+
recordingConfig = formatValidationResult.second
|
|
120
|
+
|
|
121
|
+
// Compute how many bytes correspond to the requested interval.
|
|
122
|
+
val bytesPerSample = when (recordingConfig.encoding) {
|
|
123
|
+
"pcm_8bit" -> 1
|
|
124
|
+
"pcm_32bit" -> 4
|
|
125
|
+
else -> 2
|
|
126
|
+
}
|
|
127
|
+
val intervalBytes = (recordingConfig.interval * recordingConfig.sampleRate *
|
|
128
|
+
recordingConfig.channels * bytesPerSample / 1000).toInt()
|
|
129
|
+
|
|
130
|
+
// readSizeInBytes = exactly one interval of audio; this is what we request
|
|
131
|
+
// per read() call, giving us the cadence the caller asked for.
|
|
132
|
+
readSizeInBytes = intervalBytes
|
|
133
|
+
|
|
134
|
+
// AudioRecord's internal ring buffer must be >= getMinBufferSize.
|
|
135
|
+
// Make it large enough to hold at least one full read, too.
|
|
136
|
+
val channelConfig = if (recordingConfig.channels == 1) AudioFormat.CHANNEL_IN_MONO
|
|
137
|
+
else AudioFormat.CHANNEL_IN_STEREO
|
|
138
|
+
val minBuf = AudioRecord.getMinBufferSize(recordingConfig.sampleRate, channelConfig, audioFormat)
|
|
139
|
+
bufferSizeInBytes = maxOf(intervalBytes, minBuf)
|
|
140
|
+
Log.d(Constants.TAG, "Interval: ${recordingConfig.interval}ms, readSize: $readSizeInBytes, ringBuffer: $bufferSizeInBytes (minBuf=$minBuf)")
|
|
141
|
+
|
|
142
|
+
// Initialize the AudioRecord if it's a new recording or if it's not currently paused
|
|
143
|
+
if (audioRecord == null || !isPaused.get()) {
|
|
144
|
+
Log.d(Constants.TAG, "AudioFormat: $audioFormat, BufferSize: $bufferSizeInBytes")
|
|
145
|
+
|
|
146
|
+
audioRecord = createAudioRecord(tempRecordingConfig, audioFormat, promise)
|
|
147
|
+
if (audioRecord == null) {
|
|
148
|
+
return
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// Generate a unique ID for this recording stream
|
|
153
|
+
streamUuid = java.util.UUID.randomUUID().toString()
|
|
154
|
+
|
|
155
|
+
audioRecord?.startRecording()
|
|
156
|
+
// Apply audio effects after starting recording using the manager
|
|
157
|
+
audioRecord?.let { audioEffectsManager.setupAudioEffects(it) }
|
|
158
|
+
|
|
159
|
+
isPaused.set(false)
|
|
160
|
+
isRecording.set(true)
|
|
161
|
+
|
|
162
|
+
if (!isPaused.get()) {
|
|
163
|
+
recordingStartTime =
|
|
164
|
+
System.currentTimeMillis() // Only reset start time if it's not a resume
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
recordingThread = Thread { recordingProcess() }.apply { start() }
|
|
168
|
+
|
|
169
|
+
val result = bundleOf(
|
|
170
|
+
"fileUri" to "",
|
|
171
|
+
"channels" to recordingConfig.channels,
|
|
172
|
+
"bitDepth" to when (recordingConfig.encoding) {
|
|
173
|
+
"pcm_8bit" -> 8
|
|
174
|
+
"pcm_16bit" -> 16
|
|
175
|
+
"pcm_32bit" -> 32
|
|
176
|
+
else -> 16 // Default to 16 if the encoding is not recognized
|
|
177
|
+
},
|
|
178
|
+
"sampleRate" to recordingConfig.sampleRate,
|
|
179
|
+
"mimeType" to formatConfig.mimeType
|
|
180
|
+
)
|
|
181
|
+
promise.resolve(result)
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* Common resource cleanup logic extracted to avoid duplication
|
|
186
|
+
*/
|
|
187
|
+
private fun cleanupResources() {
|
|
188
|
+
try {
|
|
189
|
+
// Release audio effects
|
|
190
|
+
audioEffectsManager.releaseAudioEffects()
|
|
191
|
+
|
|
192
|
+
// Stop and release AudioRecord if exists
|
|
193
|
+
if (audioRecord != null) {
|
|
194
|
+
try {
|
|
195
|
+
if (audioRecord!!.state == AudioRecord.STATE_INITIALIZED) {
|
|
196
|
+
audioRecord!!.stop()
|
|
197
|
+
}
|
|
198
|
+
} catch (e: Exception) {
|
|
199
|
+
Log.e(Constants.TAG, "Error stopping AudioRecord", e)
|
|
200
|
+
} finally {
|
|
201
|
+
try {
|
|
202
|
+
audioRecord!!.release()
|
|
203
|
+
} catch (e: Exception) {
|
|
204
|
+
Log.e(Constants.TAG, "Error releasing AudioRecord", e)
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
audioRecord = null
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// Interrupt and clear recording thread
|
|
211
|
+
recordingThread?.interrupt()
|
|
212
|
+
recordingThread = null
|
|
213
|
+
|
|
214
|
+
// Always reset state
|
|
215
|
+
isRecording.set(false)
|
|
216
|
+
isPaused.set(false)
|
|
217
|
+
totalRecordedTime = 0
|
|
218
|
+
pausedDuration = 0
|
|
219
|
+
totalDataSize = 0
|
|
220
|
+
streamUuid = null
|
|
221
|
+
lastEmittedSize = 0
|
|
222
|
+
|
|
223
|
+
Log.d(Constants.TAG, "Audio resources cleaned up")
|
|
224
|
+
} catch (e: Exception) {
|
|
225
|
+
Log.e(Constants.TAG, "Error during resource cleanup", e)
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
fun stopRecording(promise: Promise) {
|
|
230
|
+
synchronized(audioRecordLock) {
|
|
231
|
+
if (!isRecording.get()) {
|
|
232
|
+
Log.e(Constants.TAG, "Recording is not active")
|
|
233
|
+
promise.resolve(null)
|
|
234
|
+
return
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
try {
|
|
238
|
+
// Read any final audio data
|
|
239
|
+
val audioData = ByteArray(bufferSizeInBytes)
|
|
240
|
+
val bytesRead = audioRecord?.read(audioData, 0, bufferSizeInBytes) ?: -1
|
|
241
|
+
Log.d(Constants.TAG, "Last Read $bytesRead bytes")
|
|
242
|
+
if (bytesRead > 0) {
|
|
243
|
+
emitAudioData(audioData, bytesRead)
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Generate result before cleanup
|
|
247
|
+
val bytesPerSample = when (recordingConfig.encoding) {
|
|
248
|
+
"pcm_8bit" -> 1
|
|
249
|
+
"pcm_16bit" -> 2
|
|
250
|
+
"pcm_32bit" -> 4
|
|
251
|
+
else -> 2
|
|
252
|
+
}
|
|
253
|
+
val byteRate = recordingConfig.sampleRate * recordingConfig.channels * bytesPerSample
|
|
254
|
+
val duration = if (byteRate > 0) (totalDataSize.toLong() * 1000 / byteRate) else 0
|
|
255
|
+
|
|
256
|
+
// Create result bundle
|
|
257
|
+
val result = bundleOf(
|
|
258
|
+
"fileUri" to "",
|
|
259
|
+
"filename" to "",
|
|
260
|
+
"durationMs" to duration,
|
|
261
|
+
"channels" to recordingConfig.channels,
|
|
262
|
+
"bitDepth" to when (recordingConfig.encoding) {
|
|
263
|
+
"pcm_8bit" -> 8
|
|
264
|
+
"pcm_16bit" -> 16
|
|
265
|
+
"pcm_32bit" -> 32
|
|
266
|
+
else -> 16
|
|
267
|
+
},
|
|
268
|
+
"sampleRate" to recordingConfig.sampleRate,
|
|
269
|
+
"size" to totalDataSize.toLong(),
|
|
270
|
+
"mimeType" to mimeType
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
// Clean up all resources
|
|
274
|
+
cleanupResources()
|
|
275
|
+
|
|
276
|
+
// Resolve promise with the result
|
|
277
|
+
promise.resolve(result)
|
|
278
|
+
|
|
279
|
+
} catch (e: Exception) {
|
|
280
|
+
Log.d(Constants.TAG, "Failed to stop recording", e)
|
|
281
|
+
// Make sure to clean up even if there's an error
|
|
282
|
+
cleanupResources()
|
|
283
|
+
promise.reject("STOP_FAILED", "Failed to stop recording", e)
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
private fun recordingProcess() {
|
|
289
|
+
Log.i(Constants.TAG, "Starting recording process, readSize=$readSizeInBytes, ringBuffer=$bufferSizeInBytes")
|
|
290
|
+
val audioData = ByteArray(readSizeInBytes)
|
|
291
|
+
var consecutiveErrors = 0
|
|
292
|
+
|
|
293
|
+
try {
|
|
294
|
+
while (isRecording.get() && !Thread.currentThread().isInterrupted) {
|
|
295
|
+
if (isPaused.get()) {
|
|
296
|
+
try {
|
|
297
|
+
Thread.sleep(10)
|
|
298
|
+
} catch (_: InterruptedException) {
|
|
299
|
+
Thread.currentThread().interrupt()
|
|
300
|
+
break
|
|
301
|
+
}
|
|
302
|
+
continue
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
val bytesRead = synchronized(audioRecordLock) {
|
|
306
|
+
audioRecord?.let {
|
|
307
|
+
if (it.state != AudioRecord.STATE_INITIALIZED) {
|
|
308
|
+
Log.e(Constants.TAG, "AudioRecord not initialized")
|
|
309
|
+
return@let -1
|
|
310
|
+
}
|
|
311
|
+
// Read exactly one interval's worth of audio.
|
|
312
|
+
// AudioRecord.read() blocks until readSizeInBytes are available.
|
|
313
|
+
it.read(audioData, 0, readSizeInBytes).also { bytes ->
|
|
314
|
+
if (bytes < 0) {
|
|
315
|
+
Log.e(Constants.TAG, "AudioRecord read error: $bytes")
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
} ?: -1
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
if (bytesRead > 0) {
|
|
322
|
+
consecutiveErrors = 0
|
|
323
|
+
totalDataSize += bytesRead
|
|
324
|
+
// Emit immediately — each read is one interval of audio
|
|
325
|
+
emitAudioData(audioData, bytesRead)
|
|
326
|
+
} else if (bytesRead < 0) {
|
|
327
|
+
consecutiveErrors++
|
|
328
|
+
if (consecutiveErrors >= 10) {
|
|
329
|
+
Log.e(Constants.TAG, "Too many consecutive read errors ($consecutiveErrors), stopping")
|
|
330
|
+
emitRecordingError("READ_ERROR", "AudioRecord read failed after $consecutiveErrors consecutive errors")
|
|
331
|
+
break
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
} catch (e: Exception) {
|
|
336
|
+
Log.e(Constants.TAG, "Recording thread crashed", e)
|
|
337
|
+
emitRecordingError("RECORDING_CRASH", e.message ?: "Recording thread unexpected error")
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
/**
|
|
342
|
+
* Sends a recording error event to JS so the caller can react.
|
|
343
|
+
*/
|
|
344
|
+
private fun emitRecordingError(code: String, message: String) {
|
|
345
|
+
mainHandler.post {
|
|
346
|
+
try {
|
|
347
|
+
eventSender.sendExpoEvent(
|
|
348
|
+
Constants.AUDIO_EVENT_NAME, bundleOf(
|
|
349
|
+
"error" to code,
|
|
350
|
+
"errorMessage" to message,
|
|
351
|
+
"streamUuid" to streamUuid
|
|
352
|
+
)
|
|
353
|
+
)
|
|
354
|
+
} catch (e: Exception) {
|
|
355
|
+
Log.e(Constants.TAG, "Failed to send error event", e)
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
private fun emitAudioData(audioData: ByteArray, length: Int) {
|
|
361
|
+
// If silent mode is active, replace audioData with zeros (using concise expression)
|
|
362
|
+
val dataToEncode = if (isSilent) ByteArray(length) else audioData
|
|
363
|
+
|
|
364
|
+
val encodedBuffer = audioDataEncoder.encodeToBase64(dataToEncode)
|
|
365
|
+
|
|
366
|
+
val from = lastEmittedSize
|
|
367
|
+
val deltaSize = totalDataSize.toLong() - lastEmittedSize
|
|
368
|
+
lastEmittedSize = totalDataSize.toLong()
|
|
369
|
+
|
|
370
|
+
// Calculate position in milliseconds
|
|
371
|
+
val positionInMs = (from * 1000) / (recordingConfig.sampleRate * recordingConfig.channels * (if (recordingConfig.encoding == "pcm_8bit") 8 else 16) / 8)
|
|
372
|
+
|
|
373
|
+
// Calculate power level (using concise expression)
|
|
374
|
+
val soundLevel = if (isSilent) -160.0f else audioDataEncoder.calculatePowerLevel(audioData, length)
|
|
375
|
+
|
|
376
|
+
mainHandler.post {
|
|
377
|
+
try {
|
|
378
|
+
eventSender.sendExpoEvent(
|
|
379
|
+
Constants.AUDIO_EVENT_NAME, bundleOf(
|
|
380
|
+
"fileUri" to "",
|
|
381
|
+
"lastEmittedSize" to from,
|
|
382
|
+
"encoded" to encodedBuffer,
|
|
383
|
+
"deltaSize" to length,
|
|
384
|
+
"position" to positionInMs,
|
|
385
|
+
"mimeType" to mimeType,
|
|
386
|
+
"soundLevel" to soundLevel,
|
|
387
|
+
"totalSize" to totalDataSize.toLong(),
|
|
388
|
+
"streamUuid" to streamUuid
|
|
389
|
+
)
|
|
390
|
+
)
|
|
391
|
+
} catch (e: Exception) {
|
|
392
|
+
Log.e(Constants.TAG, "Failed to send event", e)
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
/**
|
|
398
|
+
* Releases all resources used by the recorder.
|
|
399
|
+
* Should be called when the module is being destroyed.
|
|
400
|
+
*/
|
|
401
|
+
fun release() {
|
|
402
|
+
try {
|
|
403
|
+
// If recording is active, stop it properly
|
|
404
|
+
if (isRecording.get()) {
|
|
405
|
+
// Create a simple promise to handle the result without callback
|
|
406
|
+
val dummyPromise = object : Promise {
|
|
407
|
+
override fun resolve(value: Any?) {
|
|
408
|
+
Log.d(Constants.TAG, "Recording stopped during release")
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
override fun reject(code: String, message: String?, cause: Throwable?) {
|
|
412
|
+
Log.e(Constants.TAG, "Error stopping recording during release: $message", cause)
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// Use stopRecording which will handle full cleanup
|
|
417
|
+
stopRecording(dummyPromise)
|
|
418
|
+
} else {
|
|
419
|
+
// Not recording, just clean up resources
|
|
420
|
+
cleanupResources()
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
Log.d(Constants.TAG, "AudioRecorderManager fully released")
|
|
424
|
+
} catch (e: Exception) {
|
|
425
|
+
Log.e(Constants.TAG, "Error during AudioRecorderManager release", e)
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
/**
|
|
430
|
+
* Toggles between sending actual audio data and silence
|
|
431
|
+
*/
|
|
432
|
+
fun toggleSilence(isSilent: Boolean) {
|
|
433
|
+
this.isSilent = isSilent
|
|
434
|
+
Log.d(Constants.TAG, "Silence mode toggled: $isSilent")
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
/**
|
|
438
|
+
* Creates an AudioRecord instance with the given configuration
|
|
439
|
+
* @param config The recording configuration
|
|
440
|
+
* @param audioFormat The audio format to use
|
|
441
|
+
* @param promise Promise to reject if initialization fails
|
|
442
|
+
* @return The created AudioRecord instance or null if failed
|
|
443
|
+
*/
|
|
444
|
+
private fun createAudioRecord(
|
|
445
|
+
config: RecordingConfig,
|
|
446
|
+
audioFormat: Int,
|
|
447
|
+
promise: Promise
|
|
448
|
+
): AudioRecord? {
|
|
449
|
+
// Double check permission again directly before creating AudioRecord
|
|
450
|
+
if (!permissionUtils.checkRecordingPermission()) {
|
|
451
|
+
promise.reject("PERMISSION_DENIED", "Recording permission has not been granted", null)
|
|
452
|
+
return null
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
// Use VOICE_RECOGNITION for far-field/speakerphone use — higher mic gain,
|
|
456
|
+
// no near-field gain reduction. AEC/NS/AGC are applied separately via AudioEffectsManager.
|
|
457
|
+
val audioSource = MediaRecorder.AudioSource.VOICE_RECOGNITION
|
|
458
|
+
|
|
459
|
+
val record = AudioRecord(
|
|
460
|
+
audioSource,
|
|
461
|
+
config.sampleRate,
|
|
462
|
+
if (config.channels == 1) AudioFormat.CHANNEL_IN_MONO else AudioFormat.CHANNEL_IN_STEREO,
|
|
463
|
+
audioFormat,
|
|
464
|
+
bufferSizeInBytes
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
if (record.state != AudioRecord.STATE_INITIALIZED) {
|
|
468
|
+
promise.reject(
|
|
469
|
+
"INITIALIZATION_FAILED",
|
|
470
|
+
"Failed to initialize the audio recorder",
|
|
471
|
+
null
|
|
472
|
+
)
|
|
473
|
+
return null
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
return record
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
/**
|
|
480
|
+
* Validates the audio format for the given recording configuration
|
|
481
|
+
* @param config The recording configuration
|
|
482
|
+
* @param initialFormat The initial audio format to validate
|
|
483
|
+
* @param promise Promise to reject if no supported format is found
|
|
484
|
+
* @return A pair containing the validated audio format and potentially updated recording config
|
|
485
|
+
*/
|
|
486
|
+
private fun validateAudioFormat(
|
|
487
|
+
config: RecordingConfig,
|
|
488
|
+
initialFormat: Int,
|
|
489
|
+
promise: Promise
|
|
490
|
+
): Pair<Int, RecordingConfig>? {
|
|
491
|
+
var audioFormat = initialFormat
|
|
492
|
+
var updatedConfig = config
|
|
493
|
+
|
|
494
|
+
// Check if selected audio format is supported
|
|
495
|
+
if (!audioDataEncoder.isAudioFormatSupported(config.sampleRate, config.channels, audioFormat, permissionUtils)) {
|
|
496
|
+
Log.e(Constants.TAG, "Selected audio format not supported, falling back to 16-bit PCM")
|
|
497
|
+
audioFormat = AudioFormat.ENCODING_PCM_16BIT
|
|
498
|
+
|
|
499
|
+
if (!audioDataEncoder.isAudioFormatSupported(config.sampleRate, config.channels, audioFormat, permissionUtils)) {
|
|
500
|
+
promise.reject("INITIALIZATION_FAILED", "Failed to initialize audio recorder with any supported format", null)
|
|
501
|
+
return null
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
updatedConfig = config.copy(encoding = "pcm_16bit")
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
return Pair(audioFormat, updatedConfig)
|
|
508
|
+
}
|
|
509
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
package expo.modules.audiostream
|
|
2
|
+
|
|
3
|
+
object Constants {
|
|
4
|
+
const val AUDIO_EVENT_NAME = "AudioData"
|
|
5
|
+
const val AUDIO_ANALYSIS_EVENT_NAME = "AudioAnalysis"
|
|
6
|
+
const val SOUND_CHUNK_PLAYED_EVENT_NAME = "SoundChunkPlayed"
|
|
7
|
+
const val SOUND_STARTED_EVENT_NAME = "SoundStarted"
|
|
8
|
+
const val DEVICE_RECONNECTED_EVENT_NAME = "DeviceReconnected"
|
|
9
|
+
const val DEFAULT_SAMPLE_RATE = 16000 // Default sample rate for audio recording
|
|
10
|
+
const val DEFAULT_CHANNEL_CONFIG = 1 // Mono
|
|
11
|
+
const val DEFAULT_AUDIO_FORMAT = 16 // 16-bit PCM
|
|
12
|
+
const val DEFAULT_INTERVAL = 100L
|
|
13
|
+
const val MIN_INTERVAL = 100L // Minimum interval in ms for emitting audio data
|
|
14
|
+
const val WAV_HEADER_SIZE = 44
|
|
15
|
+
const val RIFF_HEADER = 0x52494646 // "RIFF"
|
|
16
|
+
const val WAVE_HEADER = 0x57415645 // "WAVE"
|
|
17
|
+
const val FMT_CHUNK_ID = 0x666d7420 // "fmt "
|
|
18
|
+
const val DATA_CHUNK_ID = 0x64617461 // "data"
|
|
19
|
+
const val INFO_CHUNK_ID = 0x494E464F // "info"
|
|
20
|
+
const val TAG = "AudioRecorderModule"
|
|
21
|
+
}
|