react-native-audio-concat 0.2.3 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AudioConcat.podspec +2 -21
- package/README.md +3 -7
- package/android/build.gradle +2 -54
- package/android/src/main/java/com/audioconcat/AudioConcatModule.kt +1178 -0
- package/android/src/main/java/com/audioconcat/AudioConcatPackage.kt +33 -0
- package/ios/AudioConcat.h +5 -0
- package/ios/AudioConcat.mm +104 -0
- package/lib/module/NativeAudioConcat.js +5 -0
- package/lib/module/NativeAudioConcat.js.map +1 -0
- package/lib/module/index.js +2 -28
- package/lib/module/index.js.map +1 -1
- package/lib/typescript/src/NativeAudioConcat.d.ts +12 -0
- package/lib/typescript/src/NativeAudioConcat.d.ts.map +1 -0
- package/lib/typescript/src/index.d.ts +6 -27
- package/lib/typescript/src/index.d.ts.map +1 -1
- package/package.json +14 -18
- package/src/NativeAudioConcat.ts +12 -0
- package/src/index.tsx +4 -32
- package/android/CMakeLists.txt +0 -24
- package/android/src/main/cpp/cpp-adapter.cpp +0 -6
- package/android/src/main/java/com/margelo/nitro/audioconcat/AudioConcat.kt +0 -349
- package/android/src/main/java/com/margelo/nitro/audioconcat/AudioConcatPackage.kt +0 -22
- package/ios/AudioConcat.swift +0 -75
- package/lib/module/AudioConcat.nitro.js +0 -4
- package/lib/module/AudioConcat.nitro.js.map +0 -1
- package/lib/typescript/src/AudioConcat.nitro.d.ts +0 -16
- package/lib/typescript/src/AudioConcat.nitro.d.ts.map +0 -1
- package/nitro.json +0 -17
- package/nitrogen/generated/android/audioconcat+autolinking.cmake +0 -82
- package/nitrogen/generated/android/audioconcat+autolinking.gradle +0 -27
- package/nitrogen/generated/android/audioconcatOnLoad.cpp +0 -44
- package/nitrogen/generated/android/audioconcatOnLoad.hpp +0 -25
- package/nitrogen/generated/android/c++/JAudioData.hpp +0 -53
- package/nitrogen/generated/android/c++/JAudioDataOrSilence.cpp +0 -26
- package/nitrogen/generated/android/c++/JAudioDataOrSilence.hpp +0 -72
- package/nitrogen/generated/android/c++/JHybridAudioConcatSpec.cpp +0 -77
- package/nitrogen/generated/android/c++/JHybridAudioConcatSpec.hpp +0 -64
- package/nitrogen/generated/android/c++/JSilentData.hpp +0 -53
- package/nitrogen/generated/android/kotlin/com/margelo/nitro/audioconcat/AudioData.kt +0 -29
- package/nitrogen/generated/android/kotlin/com/margelo/nitro/audioconcat/AudioDataOrSilence.kt +0 -42
- package/nitrogen/generated/android/kotlin/com/margelo/nitro/audioconcat/HybridAudioConcatSpec.kt +0 -52
- package/nitrogen/generated/android/kotlin/com/margelo/nitro/audioconcat/SilentData.kt +0 -29
- package/nitrogen/generated/android/kotlin/com/margelo/nitro/audioconcat/audioconcatOnLoad.kt +0 -35
- package/nitrogen/generated/ios/AudioConcat+autolinking.rb +0 -60
- package/nitrogen/generated/ios/AudioConcat-Swift-Cxx-Bridge.cpp +0 -48
- package/nitrogen/generated/ios/AudioConcat-Swift-Cxx-Bridge.hpp +0 -160
- package/nitrogen/generated/ios/AudioConcat-Swift-Cxx-Umbrella.hpp +0 -53
- package/nitrogen/generated/ios/AudioConcatAutolinking.mm +0 -33
- package/nitrogen/generated/ios/AudioConcatAutolinking.swift +0 -25
- package/nitrogen/generated/ios/c++/HybridAudioConcatSpecSwift.cpp +0 -11
- package/nitrogen/generated/ios/c++/HybridAudioConcatSpecSwift.hpp +0 -81
- package/nitrogen/generated/ios/swift/AudioData.swift +0 -35
- package/nitrogen/generated/ios/swift/AudioDataOrSilence.swift +0 -18
- package/nitrogen/generated/ios/swift/Func_void_std__exception_ptr.swift +0 -47
- package/nitrogen/generated/ios/swift/Func_void_std__string.swift +0 -47
- package/nitrogen/generated/ios/swift/HybridAudioConcatSpec.swift +0 -49
- package/nitrogen/generated/ios/swift/HybridAudioConcatSpec_cxx.swift +0 -142
- package/nitrogen/generated/ios/swift/SilentData.swift +0 -35
- package/nitrogen/generated/shared/c++/AudioData.hpp +0 -67
- package/nitrogen/generated/shared/c++/HybridAudioConcatSpec.cpp +0 -21
- package/nitrogen/generated/shared/c++/HybridAudioConcatSpec.hpp +0 -70
- package/nitrogen/generated/shared/c++/SilentData.hpp +0 -67
- package/src/AudioConcat.nitro.ts +0 -19
|
@@ -1,349 +0,0 @@
|
|
|
1
|
-
package com.margelo.nitro.audioconcat
|
|
2
|
-
|
|
3
|
-
import com.facebook.proguard.annotations.DoNotStrip
|
|
4
|
-
import com.margelo.nitro.core.Promise
|
|
5
|
-
import android.media.MediaCodec
|
|
6
|
-
import android.media.MediaCodecInfo
|
|
7
|
-
import android.media.MediaExtractor
|
|
8
|
-
import android.media.MediaFormat
|
|
9
|
-
import android.media.MediaMuxer
|
|
10
|
-
import java.io.File
|
|
11
|
-
import java.nio.ByteBuffer
|
|
12
|
-
import android.util.Log
|
|
13
|
-
|
|
14
|
-
@DoNotStrip
|
|
15
|
-
class AudioConcat : HybridAudioConcatSpec() {
|
|
16
|
-
private data class AudioConfig(
|
|
17
|
-
val sampleRate: Int,
|
|
18
|
-
val channelCount: Int,
|
|
19
|
-
val bitRate: Int
|
|
20
|
-
)
|
|
21
|
-
|
|
22
|
-
private fun extractAudioConfig(filePath: String): AudioConfig {
|
|
23
|
-
val extractor = MediaExtractor()
|
|
24
|
-
try {
|
|
25
|
-
extractor.setDataSource(filePath)
|
|
26
|
-
for (i in 0 until extractor.trackCount) {
|
|
27
|
-
val format = extractor.getTrackFormat(i)
|
|
28
|
-
val mime = format.getString(MediaFormat.KEY_MIME) ?: continue
|
|
29
|
-
if (mime.startsWith("audio/")) {
|
|
30
|
-
val sampleRate = format.getInteger(MediaFormat.KEY_SAMPLE_RATE)
|
|
31
|
-
val channelCount = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT)
|
|
32
|
-
val bitRate = if (format.containsKey(MediaFormat.KEY_BIT_RATE)) {
|
|
33
|
-
format.getInteger(MediaFormat.KEY_BIT_RATE)
|
|
34
|
-
} else {
|
|
35
|
-
128000 // Default 128kbps
|
|
36
|
-
}
|
|
37
|
-
return AudioConfig(sampleRate, channelCount, bitRate)
|
|
38
|
-
}
|
|
39
|
-
}
|
|
40
|
-
throw Exception("No audio track found in $filePath")
|
|
41
|
-
} finally {
|
|
42
|
-
extractor.release()
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
private class StreamingEncoder(
|
|
47
|
-
sampleRate: Int,
|
|
48
|
-
channelCount: Int,
|
|
49
|
-
bitRate: Int,
|
|
50
|
-
outputPath: String
|
|
51
|
-
) {
|
|
52
|
-
private val encoder: MediaCodec
|
|
53
|
-
private val muxer: MediaMuxer
|
|
54
|
-
private var audioTrackIndex = -1
|
|
55
|
-
private var muxerStarted = false
|
|
56
|
-
private val bufferInfo = MediaCodec.BufferInfo()
|
|
57
|
-
private var totalPresentationTimeUs = 0L
|
|
58
|
-
private val sampleRate: Int
|
|
59
|
-
private val channelCount: Int
|
|
60
|
-
|
|
61
|
-
init {
|
|
62
|
-
this.sampleRate = sampleRate
|
|
63
|
-
this.channelCount = channelCount
|
|
64
|
-
|
|
65
|
-
val outputFormat = MediaFormat.createAudioFormat(
|
|
66
|
-
MediaFormat.MIMETYPE_AUDIO_AAC,
|
|
67
|
-
sampleRate,
|
|
68
|
-
channelCount
|
|
69
|
-
)
|
|
70
|
-
outputFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC)
|
|
71
|
-
outputFormat.setInteger(MediaFormat.KEY_BIT_RATE, bitRate)
|
|
72
|
-
outputFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 16384)
|
|
73
|
-
|
|
74
|
-
encoder = MediaCodec.createEncoderByType(MediaFormat.MIMETYPE_AUDIO_AAC)
|
|
75
|
-
encoder.configure(outputFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE)
|
|
76
|
-
encoder.start()
|
|
77
|
-
|
|
78
|
-
muxer = MediaMuxer(outputPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4)
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
fun encodePCMChunk(pcmData: ByteArray, isLast: Boolean = false): Boolean {
|
|
82
|
-
// Feed PCM data to encoder
|
|
83
|
-
val inputBufferIndex = encoder.dequeueInputBuffer(10000)
|
|
84
|
-
if (inputBufferIndex >= 0) {
|
|
85
|
-
val inputBuffer = encoder.getInputBuffer(inputBufferIndex)!!
|
|
86
|
-
inputBuffer.clear()
|
|
87
|
-
inputBuffer.put(pcmData)
|
|
88
|
-
|
|
89
|
-
val presentationTimeUs = totalPresentationTimeUs
|
|
90
|
-
totalPresentationTimeUs += (pcmData.size.toLong() * 1_000_000) / (sampleRate * channelCount * 2)
|
|
91
|
-
|
|
92
|
-
val flags = if (isLast) MediaCodec.BUFFER_FLAG_END_OF_STREAM else 0
|
|
93
|
-
encoder.queueInputBuffer(inputBufferIndex, 0, pcmData.size, presentationTimeUs, flags)
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
// Drain encoder output
|
|
97
|
-
drainEncoder(isLast)
|
|
98
|
-
|
|
99
|
-
return true
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
private fun drainEncoder(endOfStream: Boolean) {
|
|
103
|
-
while (true) {
|
|
104
|
-
val outputBufferIndex = encoder.dequeueOutputBuffer(bufferInfo, if (endOfStream) 10000 else 0)
|
|
105
|
-
|
|
106
|
-
when (outputBufferIndex) {
|
|
107
|
-
MediaCodec.INFO_OUTPUT_FORMAT_CHANGED -> {
|
|
108
|
-
if (muxerStarted) {
|
|
109
|
-
throw RuntimeException("Format changed twice")
|
|
110
|
-
}
|
|
111
|
-
val newFormat = encoder.outputFormat
|
|
112
|
-
audioTrackIndex = muxer.addTrack(newFormat)
|
|
113
|
-
muxer.start()
|
|
114
|
-
muxerStarted = true
|
|
115
|
-
Log.d("AwesomeLibrary", "Encoder started, format: $newFormat")
|
|
116
|
-
}
|
|
117
|
-
MediaCodec.INFO_TRY_AGAIN_LATER -> {
|
|
118
|
-
if (!endOfStream) {
|
|
119
|
-
break
|
|
120
|
-
}
|
|
121
|
-
// Continue draining when end of stream
|
|
122
|
-
}
|
|
123
|
-
else -> {
|
|
124
|
-
if (outputBufferIndex >= 0) {
|
|
125
|
-
val outputBuffer = encoder.getOutputBuffer(outputBufferIndex)!!
|
|
126
|
-
|
|
127
|
-
if ((bufferInfo.flags and MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
|
|
128
|
-
bufferInfo.size = 0
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
if (bufferInfo.size > 0 && muxerStarted) {
|
|
132
|
-
outputBuffer.position(bufferInfo.offset)
|
|
133
|
-
outputBuffer.limit(bufferInfo.offset + bufferInfo.size)
|
|
134
|
-
muxer.writeSampleData(audioTrackIndex, outputBuffer, bufferInfo)
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
encoder.releaseOutputBuffer(outputBufferIndex, false)
|
|
138
|
-
|
|
139
|
-
if ((bufferInfo.flags and MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
|
|
140
|
-
break
|
|
141
|
-
}
|
|
142
|
-
}
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
|
-
}
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
fun finish() {
|
|
149
|
-
// Signal end of stream
|
|
150
|
-
val inputBufferIndex = encoder.dequeueInputBuffer(10000)
|
|
151
|
-
if (inputBufferIndex >= 0) {
|
|
152
|
-
encoder.queueInputBuffer(inputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM)
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
// Drain remaining data
|
|
156
|
-
drainEncoder(true)
|
|
157
|
-
|
|
158
|
-
encoder.stop()
|
|
159
|
-
encoder.release()
|
|
160
|
-
|
|
161
|
-
if (muxerStarted) {
|
|
162
|
-
muxer.stop()
|
|
163
|
-
}
|
|
164
|
-
muxer.release()
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
private fun streamDecodeAudioFile(
|
|
169
|
-
filePath: String,
|
|
170
|
-
encoder: StreamingEncoder,
|
|
171
|
-
isLastFile: Boolean
|
|
172
|
-
) {
|
|
173
|
-
val extractor = MediaExtractor()
|
|
174
|
-
var decoder: MediaCodec? = null
|
|
175
|
-
|
|
176
|
-
try {
|
|
177
|
-
extractor.setDataSource(filePath)
|
|
178
|
-
|
|
179
|
-
var audioTrackIndex = -1
|
|
180
|
-
var audioFormat: MediaFormat? = null
|
|
181
|
-
|
|
182
|
-
for (i in 0 until extractor.trackCount) {
|
|
183
|
-
val format = extractor.getTrackFormat(i)
|
|
184
|
-
val mime = format.getString(MediaFormat.KEY_MIME) ?: continue
|
|
185
|
-
if (mime.startsWith("audio/")) {
|
|
186
|
-
audioTrackIndex = i
|
|
187
|
-
audioFormat = format
|
|
188
|
-
break
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
|
|
192
|
-
if (audioTrackIndex == -1 || audioFormat == null) {
|
|
193
|
-
throw Exception("No audio track found in $filePath")
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
extractor.selectTrack(audioTrackIndex)
|
|
197
|
-
|
|
198
|
-
val mime = audioFormat.getString(MediaFormat.KEY_MIME)!!
|
|
199
|
-
decoder = MediaCodec.createDecoderByType(mime)
|
|
200
|
-
decoder.configure(audioFormat, null, null, 0)
|
|
201
|
-
decoder.start()
|
|
202
|
-
|
|
203
|
-
val bufferInfo = MediaCodec.BufferInfo()
|
|
204
|
-
var isEOS = false
|
|
205
|
-
val pcmChunkSize = 8192 // Process in 8KB chunks
|
|
206
|
-
|
|
207
|
-
while (!isEOS) {
|
|
208
|
-
// Feed input to decoder
|
|
209
|
-
val inputBufferIndex = decoder.dequeueInputBuffer(10000)
|
|
210
|
-
if (inputBufferIndex >= 0) {
|
|
211
|
-
val inputBuffer = decoder.getInputBuffer(inputBufferIndex)!!
|
|
212
|
-
val sampleSize = extractor.readSampleData(inputBuffer, 0)
|
|
213
|
-
|
|
214
|
-
if (sampleSize < 0) {
|
|
215
|
-
decoder.queueInputBuffer(inputBufferIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM)
|
|
216
|
-
} else {
|
|
217
|
-
val presentationTimeUs = extractor.sampleTime
|
|
218
|
-
decoder.queueInputBuffer(inputBufferIndex, 0, sampleSize, presentationTimeUs, 0)
|
|
219
|
-
extractor.advance()
|
|
220
|
-
}
|
|
221
|
-
}
|
|
222
|
-
|
|
223
|
-
// Get PCM output from decoder and feed to encoder
|
|
224
|
-
val outputBufferIndex = decoder.dequeueOutputBuffer(bufferInfo, 10000)
|
|
225
|
-
if (outputBufferIndex >= 0) {
|
|
226
|
-
val outputBuffer = decoder.getOutputBuffer(outputBufferIndex)!!
|
|
227
|
-
|
|
228
|
-
if (bufferInfo.size > 0) {
|
|
229
|
-
val pcmData = ByteArray(bufferInfo.size)
|
|
230
|
-
outputBuffer.get(pcmData)
|
|
231
|
-
|
|
232
|
-
// Stream to encoder immediately
|
|
233
|
-
encoder.encodePCMChunk(pcmData, false)
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
decoder.releaseOutputBuffer(outputBufferIndex, false)
|
|
237
|
-
|
|
238
|
-
if ((bufferInfo.flags and MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
|
|
239
|
-
isEOS = true
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
} finally {
|
|
245
|
-
decoder?.stop()
|
|
246
|
-
decoder?.release()
|
|
247
|
-
extractor.release()
|
|
248
|
-
}
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
private fun streamEncodeSilence(
|
|
252
|
-
durationMs: Double,
|
|
253
|
-
encoder: StreamingEncoder,
|
|
254
|
-
sampleRate: Int,
|
|
255
|
-
channelCount: Int
|
|
256
|
-
) {
|
|
257
|
-
val totalSamples = ((durationMs / 1000.0) * sampleRate).toInt()
|
|
258
|
-
val chunkSamples = 4096 // Process in chunks
|
|
259
|
-
val bytesPerSample = channelCount * 2 // 16-bit stereo
|
|
260
|
-
|
|
261
|
-
var samplesRemaining = totalSamples
|
|
262
|
-
|
|
263
|
-
while (samplesRemaining > 0) {
|
|
264
|
-
val currentChunkSamples = minOf(chunkSamples, samplesRemaining)
|
|
265
|
-
val chunkBytes = currentChunkSamples * bytesPerSample
|
|
266
|
-
val silenceChunk = ByteArray(chunkBytes) // All zeros = silence
|
|
267
|
-
|
|
268
|
-
encoder.encodePCMChunk(silenceChunk, false)
|
|
269
|
-
samplesRemaining -= currentChunkSamples
|
|
270
|
-
}
|
|
271
|
-
}
|
|
272
|
-
|
|
273
|
-
override fun concatAudioFiles(data: Array<AudioDataOrSilence>, outputPath: String): Promise<String> {
|
|
274
|
-
return Promise.async {
|
|
275
|
-
if (data.isEmpty()) {
|
|
276
|
-
throw Exception("Data array is empty")
|
|
277
|
-
}
|
|
278
|
-
|
|
279
|
-
Log.d("AwesomeLibrary", "Streaming merge of ${data.size} items")
|
|
280
|
-
Log.d("AwesomeLibrary", "Output: $outputPath")
|
|
281
|
-
|
|
282
|
-
// Get audio config from first audio file
|
|
283
|
-
var audioConfig: AudioConfig? = null
|
|
284
|
-
for (item in data) {
|
|
285
|
-
if (item is AudioDataOrSilence.First) {
|
|
286
|
-
audioConfig = extractAudioConfig(item.value.filePath)
|
|
287
|
-
break
|
|
288
|
-
}
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
if (audioConfig == null) {
|
|
292
|
-
throw Exception("No audio files found in data array")
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
Log.d("AwesomeLibrary", "Audio config: ${audioConfig.sampleRate}Hz, ${audioConfig.channelCount}ch, ${audioConfig.bitRate}bps")
|
|
296
|
-
|
|
297
|
-
// Delete existing output file
|
|
298
|
-
val outputFile = File(outputPath)
|
|
299
|
-
if (outputFile.exists()) {
|
|
300
|
-
outputFile.delete()
|
|
301
|
-
}
|
|
302
|
-
|
|
303
|
-
// Create streaming encoder
|
|
304
|
-
val encoder = StreamingEncoder(
|
|
305
|
-
audioConfig.sampleRate,
|
|
306
|
-
audioConfig.channelCount,
|
|
307
|
-
audioConfig.bitRate,
|
|
308
|
-
outputPath
|
|
309
|
-
)
|
|
310
|
-
|
|
311
|
-
try {
|
|
312
|
-
// Process each item
|
|
313
|
-
for ((index, item) in data.withIndex()) {
|
|
314
|
-
when (item) {
|
|
315
|
-
is AudioDataOrSilence.First -> {
|
|
316
|
-
val filePath = item.value.filePath
|
|
317
|
-
Log.d("AwesomeLibrary", "Item $index: Streaming decode $filePath")
|
|
318
|
-
|
|
319
|
-
val isLastFile = (index == data.size - 1)
|
|
320
|
-
streamDecodeAudioFile(filePath, encoder, isLastFile)
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
is AudioDataOrSilence.Second -> {
|
|
324
|
-
val durationMs = item.value.durationMs
|
|
325
|
-
Log.d("AwesomeLibrary", "Item $index: Streaming silence ${durationMs}ms")
|
|
326
|
-
|
|
327
|
-
streamEncodeSilence(
|
|
328
|
-
durationMs,
|
|
329
|
-
encoder,
|
|
330
|
-
audioConfig.sampleRate,
|
|
331
|
-
audioConfig.channelCount
|
|
332
|
-
)
|
|
333
|
-
}
|
|
334
|
-
}
|
|
335
|
-
}
|
|
336
|
-
|
|
337
|
-
// Finish encoding
|
|
338
|
-
encoder.finish()
|
|
339
|
-
Log.d("AwesomeLibrary", "Successfully merged audio to $outputPath")
|
|
340
|
-
|
|
341
|
-
} catch (e: Exception) {
|
|
342
|
-
Log.e("AwesomeLibrary", "Error during streaming merge: ${e.message}", e)
|
|
343
|
-
throw e
|
|
344
|
-
}
|
|
345
|
-
|
|
346
|
-
outputPath
|
|
347
|
-
}
|
|
348
|
-
}
|
|
349
|
-
}
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
package com.margelo.nitro.audioconcat
|
|
2
|
-
|
|
3
|
-
import com.facebook.react.TurboReactPackage
|
|
4
|
-
import com.facebook.react.bridge.NativeModule
|
|
5
|
-
import com.facebook.react.bridge.ReactApplicationContext
|
|
6
|
-
import com.facebook.react.module.model.ReactModuleInfoProvider
|
|
7
|
-
|
|
8
|
-
class AudioConcatPackage : TurboReactPackage() {
|
|
9
|
-
override fun getModule(name: String, reactContext: ReactApplicationContext): NativeModule? {
|
|
10
|
-
return null
|
|
11
|
-
}
|
|
12
|
-
|
|
13
|
-
override fun getReactModuleInfoProvider(): ReactModuleInfoProvider {
|
|
14
|
-
return ReactModuleInfoProvider { HashMap() }
|
|
15
|
-
}
|
|
16
|
-
|
|
17
|
-
companion object {
|
|
18
|
-
init {
|
|
19
|
-
System.loadLibrary("audioconcat")
|
|
20
|
-
}
|
|
21
|
-
}
|
|
22
|
-
}
|
package/ios/AudioConcat.swift
DELETED
|
@@ -1,75 +0,0 @@
|
|
|
1
|
-
import AVFoundation
|
|
2
|
-
import NitroModules
|
|
3
|
-
|
|
4
|
-
class AudioConcat: HybridAudioConcatSpec {
|
|
5
|
-
public func concatAudioFiles(data: [AudioDataOrSilence], outputPath: String) throws -> Promise<String> {
|
|
6
|
-
return Promise.async { [weak self] in
|
|
7
|
-
guard !data.isEmpty else {
|
|
8
|
-
throw NSError(domain: "AwesomeLibrary", code: 1, userInfo: [NSLocalizedDescriptionKey: "Data array is empty"])
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
let composition = AVMutableComposition()
|
|
12
|
-
guard let audioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) else {
|
|
13
|
-
throw NSError(domain: "AwesomeLibrary", code: 2, userInfo: [NSLocalizedDescriptionKey: "Failed to create audio track"])
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
var currentTime = CMTime.zero
|
|
17
|
-
|
|
18
|
-
for (index, item) in data.enumerated() {
|
|
19
|
-
switch item {
|
|
20
|
-
case .first(let audioData):
|
|
21
|
-
// Audio file
|
|
22
|
-
let filePath = audioData.filePath
|
|
23
|
-
let fileURL = URL(fileURLWithPath: filePath)
|
|
24
|
-
let asset = AVAsset(url: fileURL)
|
|
25
|
-
|
|
26
|
-
guard let sourceTrack = asset.tracks(withMediaType: .audio).first else {
|
|
27
|
-
throw NSError(domain: "AwesomeLibrary", code: 3, userInfo: [NSLocalizedDescriptionKey: "No audio track found in \(filePath)"])
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
let duration = asset.duration
|
|
31
|
-
let timeRange = CMTimeRange(start: .zero, duration: duration)
|
|
32
|
-
|
|
33
|
-
try audioTrack.insertTimeRange(timeRange, of: sourceTrack, at: currentTime)
|
|
34
|
-
currentTime = CMTimeAdd(currentTime, duration)
|
|
35
|
-
|
|
36
|
-
case .second(let silentData):
|
|
37
|
-
// Silence
|
|
38
|
-
let durationMs = silentData.durationMs
|
|
39
|
-
let silenceDuration = CMTime(seconds: durationMs / 1000.0, preferredTimescale: 600)
|
|
40
|
-
|
|
41
|
-
// In AVMutableComposition, leaving a gap (not inserting anything)
|
|
42
|
-
// automatically creates silence
|
|
43
|
-
currentTime = CMTimeAdd(currentTime, silenceDuration)
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
let outputURL = URL(fileURLWithPath: outputPath)
|
|
48
|
-
|
|
49
|
-
// Remove existing file if present
|
|
50
|
-
try? FileManager.default.removeItem(at: outputURL)
|
|
51
|
-
|
|
52
|
-
guard let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetAppleM4A) else {
|
|
53
|
-
throw NSError(domain: "AwesomeLibrary", code: 4, userInfo: [NSLocalizedDescriptionKey: "Failed to create export session"])
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
exportSession.outputURL = outputURL
|
|
57
|
-
exportSession.outputFileType = .m4a
|
|
58
|
-
|
|
59
|
-
return try await withCheckedThrowingContinuation { continuation in
|
|
60
|
-
exportSession.exportAsynchronously {
|
|
61
|
-
switch exportSession.status {
|
|
62
|
-
case .completed:
|
|
63
|
-
continuation.resume(returning: outputPath)
|
|
64
|
-
case .failed:
|
|
65
|
-
continuation.resume(throwing: exportSession.error ?? NSError(domain: "AwesomeLibrary", code: 5, userInfo: [NSLocalizedDescriptionKey: "Export failed"]))
|
|
66
|
-
case .cancelled:
|
|
67
|
-
continuation.resume(throwing: NSError(domain: "AwesomeLibrary", code: 6, userInfo: [NSLocalizedDescriptionKey: "Export cancelled"]))
|
|
68
|
-
default:
|
|
69
|
-
continuation.resume(throwing: NSError(domain: "AwesomeLibrary", code: 7, userInfo: [NSLocalizedDescriptionKey: "Unknown export status"]))
|
|
70
|
-
}
|
|
71
|
-
}
|
|
72
|
-
}
|
|
73
|
-
}
|
|
74
|
-
}
|
|
75
|
-
}
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"names":[],"sourceRoot":"../../src","sources":["AudioConcat.nitro.ts"],"mappings":"","ignoreList":[]}
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
import type { HybridObject } from 'react-native-nitro-modules';
|
|
2
|
-
type SilentData = {
|
|
3
|
-
durationMs: number;
|
|
4
|
-
};
|
|
5
|
-
type AudioData = {
|
|
6
|
-
filePath: string;
|
|
7
|
-
};
|
|
8
|
-
export type AudioDataOrSilence = AudioData | SilentData;
|
|
9
|
-
export interface AudioConcat extends HybridObject<{
|
|
10
|
-
ios: 'swift';
|
|
11
|
-
android: 'kotlin';
|
|
12
|
-
}> {
|
|
13
|
-
concatAudioFiles(data: AudioDataOrSilence[], outputPath: string): Promise<string>;
|
|
14
|
-
}
|
|
15
|
-
export {};
|
|
16
|
-
//# sourceMappingURL=AudioConcat.nitro.d.ts.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"AudioConcat.nitro.d.ts","sourceRoot":"","sources":["../../../src/AudioConcat.nitro.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,4BAA4B,CAAC;AAE/D,KAAK,UAAU,GAAG;IAChB,UAAU,EAAE,MAAM,CAAC;CACpB,CAAC;AAEF,KAAK,SAAS,GAAG;IACf,QAAQ,EAAE,MAAM,CAAC;CAClB,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,SAAS,GAAG,UAAU,CAAC;AAExD,MAAM,WAAW,WACf,SAAQ,YAAY,CAAC;IAAE,GAAG,EAAE,OAAO,CAAC;IAAC,OAAO,EAAE,QAAQ,CAAA;CAAE,CAAC;IACzD,gBAAgB,CACd,IAAI,EAAE,kBAAkB,EAAE,EAC1B,UAAU,EAAE,MAAM,GACjB,OAAO,CAAC,MAAM,CAAC,CAAC;CACpB"}
|
package/nitro.json
DELETED
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"cxxNamespace": ["audioconcat"],
|
|
3
|
-
"ios": {
|
|
4
|
-
"iosModuleName": "AudioConcat"
|
|
5
|
-
},
|
|
6
|
-
"android": {
|
|
7
|
-
"androidNamespace": ["audioconcat"],
|
|
8
|
-
"androidCxxLibName": "audioconcat"
|
|
9
|
-
},
|
|
10
|
-
"autolinking": {
|
|
11
|
-
"AudioConcat": {
|
|
12
|
-
"swift": "AudioConcat",
|
|
13
|
-
"kotlin": "AudioConcat"
|
|
14
|
-
}
|
|
15
|
-
},
|
|
16
|
-
"ignorePaths": ["node_modules"]
|
|
17
|
-
}
|
|
@@ -1,82 +0,0 @@
|
|
|
1
|
-
#
|
|
2
|
-
# audioconcat+autolinking.cmake
|
|
3
|
-
# This file was generated by nitrogen. DO NOT MODIFY THIS FILE.
|
|
4
|
-
# https://github.com/mrousavy/nitro
|
|
5
|
-
# Copyright © 2025 Marc Rousavy @ Margelo
|
|
6
|
-
#
|
|
7
|
-
|
|
8
|
-
# This is a CMake file that adds all files generated by Nitrogen
|
|
9
|
-
# to the current CMake project.
|
|
10
|
-
#
|
|
11
|
-
# To use it, add this to your CMakeLists.txt:
|
|
12
|
-
# ```cmake
|
|
13
|
-
# include(${CMAKE_SOURCE_DIR}/../nitrogen/generated/android/audioconcat+autolinking.cmake)
|
|
14
|
-
# ```
|
|
15
|
-
|
|
16
|
-
# Define a flag to check if we are building properly
|
|
17
|
-
add_definitions(-DBUILDING_AUDIOCONCAT_WITH_GENERATED_CMAKE_PROJECT)
|
|
18
|
-
|
|
19
|
-
# Enable Raw Props parsing in react-native (for Nitro Views)
|
|
20
|
-
add_definitions(-DRN_SERIALIZABLE_STATE)
|
|
21
|
-
|
|
22
|
-
# Add all headers that were generated by Nitrogen
|
|
23
|
-
include_directories(
|
|
24
|
-
"../nitrogen/generated/shared/c++"
|
|
25
|
-
"../nitrogen/generated/android/c++"
|
|
26
|
-
"../nitrogen/generated/android/"
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
# Add all .cpp sources that were generated by Nitrogen
|
|
30
|
-
target_sources(
|
|
31
|
-
# CMake project name (Android C++ library name)
|
|
32
|
-
audioconcat PRIVATE
|
|
33
|
-
# Autolinking Setup
|
|
34
|
-
../nitrogen/generated/android/audioconcatOnLoad.cpp
|
|
35
|
-
# Shared Nitrogen C++ sources
|
|
36
|
-
../nitrogen/generated/shared/c++/HybridAudioConcatSpec.cpp
|
|
37
|
-
# Android-specific Nitrogen C++ sources
|
|
38
|
-
../nitrogen/generated/android/c++/JHybridAudioConcatSpec.cpp
|
|
39
|
-
../nitrogen/generated/android/c++/JAudioDataOrSilence.cpp
|
|
40
|
-
)
|
|
41
|
-
|
|
42
|
-
# From node_modules/react-native/ReactAndroid/cmake-utils/folly-flags.cmake
|
|
43
|
-
# Used in node_modules/react-native/ReactAndroid/cmake-utils/ReactNative-application.cmake
|
|
44
|
-
target_compile_definitions(
|
|
45
|
-
audioconcat PRIVATE
|
|
46
|
-
-DFOLLY_NO_CONFIG=1
|
|
47
|
-
-DFOLLY_HAVE_CLOCK_GETTIME=1
|
|
48
|
-
-DFOLLY_USE_LIBCPP=1
|
|
49
|
-
-DFOLLY_CFG_NO_COROUTINES=1
|
|
50
|
-
-DFOLLY_MOBILE=1
|
|
51
|
-
-DFOLLY_HAVE_RECVMMSG=1
|
|
52
|
-
-DFOLLY_HAVE_PTHREAD=1
|
|
53
|
-
# Once we target android-23 above, we can comment
|
|
54
|
-
# the following line. NDK uses GNU style stderror_r() after API 23.
|
|
55
|
-
-DFOLLY_HAVE_XSI_STRERROR_R=1
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
# Add all libraries required by the generated specs
|
|
59
|
-
find_package(fbjni REQUIRED) # <-- Used for communication between Java <-> C++
|
|
60
|
-
find_package(ReactAndroid REQUIRED) # <-- Used to set up React Native bindings (e.g. CallInvoker/TurboModule)
|
|
61
|
-
find_package(react-native-nitro-modules REQUIRED) # <-- Used to create all HybridObjects and use the Nitro core library
|
|
62
|
-
|
|
63
|
-
# Link all libraries together
|
|
64
|
-
target_link_libraries(
|
|
65
|
-
audioconcat
|
|
66
|
-
fbjni::fbjni # <-- Facebook C++ JNI helpers
|
|
67
|
-
ReactAndroid::jsi # <-- RN: JSI
|
|
68
|
-
react-native-nitro-modules::NitroModules # <-- NitroModules Core :)
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
# Link react-native (different prefab between RN 0.75 and RN 0.76)
|
|
72
|
-
if(ReactAndroid_VERSION_MINOR GREATER_EQUAL 76)
|
|
73
|
-
target_link_libraries(
|
|
74
|
-
audioconcat
|
|
75
|
-
ReactAndroid::reactnative # <-- RN: Native Modules umbrella prefab
|
|
76
|
-
)
|
|
77
|
-
else()
|
|
78
|
-
target_link_libraries(
|
|
79
|
-
audioconcat
|
|
80
|
-
ReactAndroid::react_nativemodule_core # <-- RN: TurboModules Core
|
|
81
|
-
)
|
|
82
|
-
endif()
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
///
|
|
2
|
-
/// audioconcat+autolinking.gradle
|
|
3
|
-
/// This file was generated by nitrogen. DO NOT MODIFY THIS FILE.
|
|
4
|
-
/// https://github.com/mrousavy/nitro
|
|
5
|
-
/// Copyright © 2025 Marc Rousavy @ Margelo
|
|
6
|
-
///
|
|
7
|
-
|
|
8
|
-
/// This is a Gradle file that adds all files generated by Nitrogen
|
|
9
|
-
/// to the current Gradle project.
|
|
10
|
-
///
|
|
11
|
-
/// To use it, add this to your build.gradle:
|
|
12
|
-
/// ```gradle
|
|
13
|
-
/// apply from: '../nitrogen/generated/android/audioconcat+autolinking.gradle'
|
|
14
|
-
/// ```
|
|
15
|
-
|
|
16
|
-
logger.warn("[NitroModules] 🔥 audioconcat is boosted by nitro!")
|
|
17
|
-
|
|
18
|
-
android {
|
|
19
|
-
sourceSets {
|
|
20
|
-
main {
|
|
21
|
-
java.srcDirs += [
|
|
22
|
-
// Nitrogen files
|
|
23
|
-
"${project.projectDir}/../nitrogen/generated/android/kotlin"
|
|
24
|
-
]
|
|
25
|
-
}
|
|
26
|
-
}
|
|
27
|
-
}
|
|
@@ -1,44 +0,0 @@
|
|
|
1
|
-
///
|
|
2
|
-
/// audioconcatOnLoad.cpp
|
|
3
|
-
/// This file was generated by nitrogen. DO NOT MODIFY THIS FILE.
|
|
4
|
-
/// https://github.com/mrousavy/nitro
|
|
5
|
-
/// Copyright © 2025 Marc Rousavy @ Margelo
|
|
6
|
-
///
|
|
7
|
-
|
|
8
|
-
#ifndef BUILDING_AUDIOCONCAT_WITH_GENERATED_CMAKE_PROJECT
|
|
9
|
-
#error audioconcatOnLoad.cpp is not being built with the autogenerated CMakeLists.txt project. Is a different CMakeLists.txt building this?
|
|
10
|
-
#endif
|
|
11
|
-
|
|
12
|
-
#include "audioconcatOnLoad.hpp"
|
|
13
|
-
|
|
14
|
-
#include <jni.h>
|
|
15
|
-
#include <fbjni/fbjni.h>
|
|
16
|
-
#include <NitroModules/HybridObjectRegistry.hpp>
|
|
17
|
-
|
|
18
|
-
#include "JHybridAudioConcatSpec.hpp"
|
|
19
|
-
#include <NitroModules/DefaultConstructableObject.hpp>
|
|
20
|
-
|
|
21
|
-
namespace margelo::nitro::audioconcat {
|
|
22
|
-
|
|
23
|
-
int initialize(JavaVM* vm) {
|
|
24
|
-
using namespace margelo::nitro;
|
|
25
|
-
using namespace margelo::nitro::audioconcat;
|
|
26
|
-
using namespace facebook;
|
|
27
|
-
|
|
28
|
-
return facebook::jni::initialize(vm, [] {
|
|
29
|
-
// Register native JNI methods
|
|
30
|
-
margelo::nitro::audioconcat::JHybridAudioConcatSpec::registerNatives();
|
|
31
|
-
|
|
32
|
-
// Register Nitro Hybrid Objects
|
|
33
|
-
HybridObjectRegistry::registerHybridObjectConstructor(
|
|
34
|
-
"AudioConcat",
|
|
35
|
-
[]() -> std::shared_ptr<HybridObject> {
|
|
36
|
-
static DefaultConstructableObject<JHybridAudioConcatSpec::javaobject> object("com/margelo/nitro/audioconcat/AudioConcat");
|
|
37
|
-
auto instance = object.create();
|
|
38
|
-
return instance->cthis()->shared();
|
|
39
|
-
}
|
|
40
|
-
);
|
|
41
|
-
});
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
} // namespace margelo::nitro::audioconcat
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
///
|
|
2
|
-
/// audioconcatOnLoad.hpp
|
|
3
|
-
/// This file was generated by nitrogen. DO NOT MODIFY THIS FILE.
|
|
4
|
-
/// https://github.com/mrousavy/nitro
|
|
5
|
-
/// Copyright © 2025 Marc Rousavy @ Margelo
|
|
6
|
-
///
|
|
7
|
-
|
|
8
|
-
#include <jni.h>
|
|
9
|
-
#include <NitroModules/NitroDefines.hpp>
|
|
10
|
-
|
|
11
|
-
namespace margelo::nitro::audioconcat {
|
|
12
|
-
|
|
13
|
-
/**
|
|
14
|
-
* Initializes the native (C++) part of audioconcat, and autolinks all Hybrid Objects.
|
|
15
|
-
* Call this in your `JNI_OnLoad` function (probably inside `cpp-adapter.cpp`).
|
|
16
|
-
* Example:
|
|
17
|
-
* ```cpp (cpp-adapter.cpp)
|
|
18
|
-
* JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM* vm, void*) {
|
|
19
|
-
* return margelo::nitro::audioconcat::initialize(vm);
|
|
20
|
-
* }
|
|
21
|
-
* ```
|
|
22
|
-
*/
|
|
23
|
-
int initialize(JavaVM* vm);
|
|
24
|
-
|
|
25
|
-
} // namespace margelo::nitro::audioconcat
|