@gmessier/nitro-speech 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/NitroSpeech.podspec +31 -0
  2. package/README.md +55 -0
  3. package/android/CMakeLists.txt +29 -0
  4. package/android/build.gradle +148 -0
  5. package/android/fix-prefab.gradle +51 -0
  6. package/android/gradle.properties +5 -0
  7. package/android/src/main/AndroidManifest.xml +3 -0
  8. package/android/src/main/cpp/cpp-adapter.cpp +6 -0
  9. package/android/src/main/java/com/margelo/nitro/nitrospeech/HybridNitroSpeech.kt +12 -0
  10. package/android/src/main/java/com/margelo/nitro/nitrospeech/NitroSpeechPackage.kt +20 -0
  11. package/android/src/main/java/com/margelo/nitro/nitrospeech/recognizer/AudioPermissionRequester.kt +39 -0
  12. package/android/src/main/java/com/margelo/nitro/nitrospeech/recognizer/AutoStopper.kt +35 -0
  13. package/android/src/main/java/com/margelo/nitro/nitrospeech/recognizer/HybridRecognizer.kt +181 -0
  14. package/android/src/main/java/com/margelo/nitro/nitrospeech/recognizer/RecognitionListenerSession.kt +106 -0
  15. package/ios/AppStateObserver.swift +31 -0
  16. package/ios/AutoStopper.swift +57 -0
  17. package/ios/Bridge.h +8 -0
  18. package/ios/HybridNitroSpeech.swift +6 -0
  19. package/ios/HybridRecognizer.swift +201 -0
  20. package/lib/commonjs/index.js +10 -0
  21. package/lib/commonjs/index.js.map +1 -0
  22. package/lib/commonjs/package.json +1 -0
  23. package/lib/commonjs/specs/NitroSpeech.nitro.js +6 -0
  24. package/lib/commonjs/specs/NitroSpeech.nitro.js.map +1 -0
  25. package/lib/module/index.js +6 -0
  26. package/lib/module/index.js.map +1 -0
  27. package/lib/module/package.json +1 -0
  28. package/lib/module/specs/NitroSpeech.nitro.js +4 -0
  29. package/lib/module/specs/NitroSpeech.nitro.js.map +1 -0
  30. package/lib/tsconfig.tsbuildinfo +1 -0
  31. package/lib/typescript/index.d.ts +3 -0
  32. package/lib/typescript/index.d.ts.map +1 -0
  33. package/lib/typescript/specs/NitroSpeech.nitro.d.ts +108 -0
  34. package/lib/typescript/specs/NitroSpeech.nitro.d.ts.map +1 -0
  35. package/nitro.json +24 -0
  36. package/nitrogen/generated/.gitattributes +1 -0
  37. package/nitrogen/generated/android/NitroSpeech+autolinking.cmake +83 -0
  38. package/nitrogen/generated/android/NitroSpeech+autolinking.gradle +27 -0
  39. package/nitrogen/generated/android/NitroSpeechOnLoad.cpp +54 -0
  40. package/nitrogen/generated/android/NitroSpeechOnLoad.hpp +25 -0
  41. package/nitrogen/generated/android/c++/JFunc_void.hpp +75 -0
  42. package/nitrogen/generated/android/c++/JFunc_void_double.hpp +75 -0
  43. package/nitrogen/generated/android/c++/JFunc_void_std__string.hpp +76 -0
  44. package/nitrogen/generated/android/c++/JFunc_void_std__vector_std__string_.hpp +95 -0
  45. package/nitrogen/generated/android/c++/JHybridNitroSpeechSpec.cpp +59 -0
  46. package/nitrogen/generated/android/c++/JHybridNitroSpeechSpec.hpp +66 -0
  47. package/nitrogen/generated/android/c++/JHybridRecognizerSpec.cpp +167 -0
  48. package/nitrogen/generated/android/c++/JHybridRecognizerSpec.hpp +77 -0
  49. package/nitrogen/generated/android/c++/JSpeechToTextParams.hpp +109 -0
  50. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/Func_void.kt +80 -0
  51. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/Func_void_double.kt +80 -0
  52. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/Func_void_std__string.kt +80 -0
  53. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/Func_void_std__vector_std__string_.kt +80 -0
  54. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/HybridNitroSpeechSpec.kt +59 -0
  55. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/HybridRecognizerSpec.kt +143 -0
  56. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/NitroSpeechOnLoad.kt +35 -0
  57. package/nitrogen/generated/android/kotlin/com/margelo/nitro/nitrospeech/SpeechToTextParams.kt +62 -0
  58. package/nitrogen/generated/ios/NitroSpeech+autolinking.rb +60 -0
  59. package/nitrogen/generated/ios/NitroSpeech-Swift-Cxx-Bridge.cpp +82 -0
  60. package/nitrogen/generated/ios/NitroSpeech-Swift-Cxx-Bridge.hpp +291 -0
  61. package/nitrogen/generated/ios/NitroSpeech-Swift-Cxx-Umbrella.hpp +55 -0
  62. package/nitrogen/generated/ios/NitroSpeechAutolinking.mm +33 -0
  63. package/nitrogen/generated/ios/NitroSpeechAutolinking.swift +25 -0
  64. package/nitrogen/generated/ios/c++/HybridNitroSpeechSpecSwift.cpp +11 -0
  65. package/nitrogen/generated/ios/c++/HybridNitroSpeechSpecSwift.hpp +77 -0
  66. package/nitrogen/generated/ios/c++/HybridRecognizerSpecSwift.cpp +11 -0
  67. package/nitrogen/generated/ios/c++/HybridRecognizerSpecSwift.hpp +126 -0
  68. package/nitrogen/generated/ios/swift/Func_void.swift +47 -0
  69. package/nitrogen/generated/ios/swift/Func_void_double.swift +47 -0
  70. package/nitrogen/generated/ios/swift/Func_void_std__string.swift +47 -0
  71. package/nitrogen/generated/ios/swift/Func_void_std__vector_std__string_.swift +47 -0
  72. package/nitrogen/generated/ios/swift/HybridNitroSpeechSpec.swift +56 -0
  73. package/nitrogen/generated/ios/swift/HybridNitroSpeechSpec_cxx.swift +137 -0
  74. package/nitrogen/generated/ios/swift/HybridRecognizerSpec.swift +62 -0
  75. package/nitrogen/generated/ios/swift/HybridRecognizerSpec_cxx.swift +337 -0
  76. package/nitrogen/generated/ios/swift/SpeechToTextParams.swift +300 -0
  77. package/nitrogen/generated/shared/c++/HybridNitroSpeechSpec.cpp +22 -0
  78. package/nitrogen/generated/shared/c++/HybridNitroSpeechSpec.hpp +65 -0
  79. package/nitrogen/generated/shared/c++/HybridRecognizerSpec.cpp +34 -0
  80. package/nitrogen/generated/shared/c++/HybridRecognizerSpec.hpp +79 -0
  81. package/nitrogen/generated/shared/c++/SpeechToTextParams.hpp +109 -0
  82. package/package.json +123 -0
  83. package/react-native.config.js +16 -0
  84. package/src/index.ts +8 -0
  85. package/src/specs/NitroSpeech.nitro.ts +113 -0
@@ -0,0 +1,106 @@
1
+ package com.margelo.nitro.nitrospeech.recognizer
2
+
3
+ import android.os.Bundle
4
+ import android.speech.RecognitionListener
5
+ import android.speech.SpeechRecognizer
6
+ import android.util.Log
7
+ import com.margelo.nitro.nitrospeech.SpeechToTextParams
8
+
9
+ class RecognitionListenerSession (
10
+ private val autoStopper: AutoStopper?,
11
+ private val config: SpeechToTextParams?,
12
+ private val onFinishRecognition: (result: ArrayList<String>?, errorMessage: String?, recordingStopped: Boolean) -> Unit,
13
+ ) {
14
+ companion object {
15
+ private const val TAG = "HybridRecognizer"
16
+ }
17
+
18
+ private var resultBatches: ArrayList<String>? = null
19
+
20
+ fun createRecognitionListener(): RecognitionListener {
21
+ resultBatches = null
22
+ return object : RecognitionListener {
23
+ override fun onReadyForSpeech(params: Bundle?) {}
24
+ override fun onBeginningOfSpeech() {}
25
+ override fun onRmsChanged(rmsdB: Float) {
26
+ autoStopper?.indicateRecordingActivity()
27
+ }
28
+ override fun onBufferReceived(buffer: ByteArray?) {}
29
+ override fun onEndOfSpeech() {}
30
+
31
+ override fun onError(error: Int) {
32
+ val message = when (error) {
33
+ SpeechRecognizer.ERROR_AUDIO -> "Audio recording error"
34
+ SpeechRecognizer.ERROR_CLIENT -> "Client side error"
35
+ SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS -> "Insufficient permissions"
36
+ SpeechRecognizer.ERROR_NETWORK -> "Network error"
37
+ SpeechRecognizer.ERROR_NETWORK_TIMEOUT -> "Network timeout"
38
+ SpeechRecognizer.ERROR_NO_MATCH -> "No match"
39
+ SpeechRecognizer.ERROR_RECOGNIZER_BUSY -> "Recognizer busy"
40
+ SpeechRecognizer.ERROR_SERVER -> "Server error"
41
+ SpeechRecognizer.ERROR_SPEECH_TIMEOUT -> "No speech input"
42
+ else -> "Unknown error"
43
+ }
44
+ onFinishRecognition(
45
+ null,
46
+ "Error at RecognitionListener: $message",
47
+ true
48
+ )
49
+ autoStopper?.stop()
50
+ autoStopper?.forceStopRecording()
51
+ }
52
+
53
+ override fun onResults(results: Bundle?) {
54
+ Log.d(TAG, "onResults: $resultBatches")
55
+ onFinishRecognition(resultBatches, null, true)
56
+ autoStopper?.stop()
57
+ autoStopper?.forceStopRecording()
58
+ }
59
+
60
+ override fun onPartialResults(partialResults: Bundle?) {
61
+ autoStopper?.indicateRecordingActivity()
62
+ val matches = partialResults?.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
63
+
64
+ if (matches.isNullOrEmpty() || matches[0] == "") {
65
+ Log.d(TAG, "onPartialResults[0], skip, NO RECOGNIZE")
66
+ return
67
+ }
68
+
69
+ Log.d(TAG, "onPartialResults[0], add ${matches[0]}")
70
+ var currentBatches = resultBatches
71
+ if (currentBatches.isNullOrEmpty()) {
72
+ Log.d(TAG, "onPartialResults[1], NO BATCHES YET | add first")
73
+ currentBatches = arrayListOf(matches[0])
74
+ } else {
75
+ Log.d(TAG, "onPartialResults[1], current batches $currentBatches")
76
+ val prevBatchLength = currentBatches[currentBatches.lastIndex].length
77
+ val match = if (config?.disableRepeatingFilter == true) matches[0] else repeatingFilter(matches[0])
78
+ val matchLength = match.length
79
+ if (config?.androidDisableBatchHandling == true || matchLength + 3 < prevBatchLength) {
80
+ Log.d(TAG, "onPartialResults[2], append new batch")
81
+ currentBatches.add(match)
82
+ } else {
83
+ Log.d(TAG, "onPartialResults[2], update batch, replace #${currentBatches.lastIndex}")
84
+ currentBatches[currentBatches.lastIndex] = match
85
+ }
86
+ }
87
+ resultBatches = currentBatches
88
+ onFinishRecognition(currentBatches, null, false)
89
+ }
90
+
91
+ override fun onEvent(eventType: Int, params: Bundle?) {}
92
+ }
93
+ }
94
+
95
+ // Filters out 2 or more repeating words in a row, like "and and"
96
+ private fun repeatingFilter(text: String): String {
97
+ val words = text.split(Regex("\\s+")).toMutableList()
98
+ var joiner = words[0]
99
+ for (i in words.indices) {
100
+ if (i == 0) continue
101
+ if (words[i] == words[i-1]) continue
102
+ joiner += " ${words[i]}"
103
+ }
104
+ return joiner
105
+ }
106
+ }
@@ -0,0 +1,31 @@
1
+ import Foundation
2
+ import UIKit
3
+
4
+ class AppStateObserver {
5
+ private var observer: NSObjectProtocol?
6
+ private let onResignActive: () -> Void
7
+
8
+ init(onResignActive: @escaping () -> Void) {
9
+ self.onResignActive = onResignActive
10
+
11
+ observer = NotificationCenter.default.addObserver(
12
+ forName: UIApplication.willResignActiveNotification,
13
+ object: nil,
14
+ queue: .main
15
+ ) { [weak self] _ in
16
+ self?.onResignActive()
17
+ }
18
+ }
19
+
20
+ func stop() {
21
+ if let observer = observer {
22
+ NotificationCenter.default.removeObserver(observer)
23
+ self.observer = nil
24
+ }
25
+ }
26
+
27
+ deinit {
28
+ stop()
29
+ }
30
+ }
31
+
@@ -0,0 +1,57 @@
1
+ import Foundation
2
+ import os.log
3
+
4
+ class AutoStopper {
5
+ private let silenceThresholdMs: Double
6
+ private let onTimeout: () -> Void
7
+ private let onProgress: (Double) -> Void
8
+ private var progressWorkItem: DispatchWorkItem?
9
+ private var elapsedSeconds: Int = 0
10
+ private var isStopped = false
11
+ private let logger = Logger(subsystem: "com.margelo.nitro.nitrospeech", category: "AutoStopper")
12
+
13
+ init(silenceThresholdMs: Double, onProgress: @escaping (Double) -> Void, onTimeout: @escaping () -> Void) {
14
+ self.silenceThresholdMs = silenceThresholdMs
15
+ self.onProgress = onProgress
16
+ self.onTimeout = onTimeout
17
+ }
18
+
19
+ func indicateRecordingActivity(from: String) {
20
+ logger.info("indicateRecordingActivity: \(from)")
21
+ self.onProgress(self.silenceThresholdMs)
22
+ progressWorkItem?.cancel()
23
+ elapsedSeconds = 0
24
+ if isStopped { return }
25
+
26
+ scheduleNextTick()
27
+ }
28
+
29
+ private func scheduleNextTick() {
30
+ let item = DispatchWorkItem { [weak self] in
31
+ guard let self = self, !self.isStopped else { return }
32
+
33
+ self.elapsedSeconds += 1
34
+ let elapsedMs = Double(self.elapsedSeconds) * 1000
35
+ let timeLeftMs = self.silenceThresholdMs - elapsedMs
36
+
37
+ if timeLeftMs <= 0 {
38
+ self.onTimeout()
39
+ } else {
40
+ self.onProgress(timeLeftMs)
41
+ self.scheduleNextTick()
42
+ }
43
+ }
44
+ progressWorkItem = item
45
+ DispatchQueue.main.asyncAfter(deadline: .now() + 1.0, execute: item)
46
+ }
47
+
48
+ func stop() {
49
+ isStopped = true
50
+ progressWorkItem?.cancel()
51
+ progressWorkItem = nil
52
+ }
53
+
54
+ deinit {
55
+ stop()
56
+ }
57
+ }
package/ios/Bridge.h ADDED
@@ -0,0 +1,8 @@
1
+ //
2
+ // Bridge.h
3
+ // NitroSpeech
4
+ //
5
+ // Created by Marc Rousavy on 22.07.24.
6
+ //
7
+
8
+ #pragma once
@@ -0,0 +1,6 @@
1
+ import Foundation
2
+ import NitroModules
3
+
4
+ class HybridNitroSpeech : HybridNitroSpeechSpec {
5
+ var recognizer: HybridRecognizerSpec = HybridRecognizer()
6
+ }
@@ -0,0 +1,201 @@
1
+ import Foundation
2
+ import Speech
3
+ import NitroModules
4
+
5
+ class HybridRecognizer: HybridRecognizerSpec {
6
+ private static let defaultAutoFinishRecognitionMs = 8000.0
7
+
8
+ var onReadyForSpeech: (() -> Void)?
9
+ var onRecordingStopped: (() -> Void)?
10
+ var onResult: (([String]) -> Void)?
11
+ var onAutoFinishProgress: ((Double) -> Void)?
12
+ var onError: ((String) -> Void)?
13
+ var onPermissionDenied: (() -> Void)?
14
+
15
+ private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
16
+ private var recognitionTask: SFSpeechRecognitionTask?
17
+ private var audioEngine: AVAudioEngine?
18
+ private var autoStopper: AutoStopper?
19
+ private var appStateObserver: AppStateObserver?
20
+ private var isActive: Bool = false
21
+
22
+ func startListening(params: SpeechToTextParams) {
23
+ if isActive {
24
+ onError?("Previous recognition session is still active")
25
+ return
26
+ }
27
+
28
+ SFSpeechRecognizer.requestAuthorization { [weak self] authStatus in
29
+ DispatchQueue.main.async {
30
+ guard let self = self else { return }
31
+
32
+ switch authStatus {
33
+ case .authorized:
34
+ self.requestMicrophonePermission(params: params)
35
+ case .denied, .restricted:
36
+ self.onPermissionDenied?()
37
+ case .notDetermined:
38
+ self.onError?("Speech recognition not determined")
39
+ @unknown default:
40
+ self.onError?("Unknown authorization status")
41
+ }
42
+ }
43
+ }
44
+ }
45
+
46
+ func stopListening() {
47
+ guard isActive else { return }
48
+ cleanup()
49
+ onRecordingStopped?()
50
+ }
51
+
52
+ private func requestMicrophonePermission(params: SpeechToTextParams) {
53
+ AVAudioSession.sharedInstance().requestRecordPermission { [weak self] granted in
54
+ DispatchQueue.main.async {
55
+ guard let self = self else { return }
56
+
57
+ if granted {
58
+ self.startRecognition(params: params)
59
+ } else {
60
+ self.onPermissionDenied?()
61
+ }
62
+ }
63
+ }
64
+ }
65
+
66
+ private func startRecognition(params: SpeechToTextParams) {
67
+ let locale = Locale(identifier: params.locale ?? "en-US")
68
+ guard let speechRecognizer = SFSpeechRecognizer(locale: locale), speechRecognizer.isAvailable else {
69
+ onError?("Speech recognizer not available")
70
+ return
71
+ }
72
+
73
+ autoStopper = AutoStopper(
74
+ silenceThresholdMs: params.autoFinishRecognitionMs ?? Self.defaultAutoFinishRecognitionMs,
75
+ onProgress: { [weak self] timeLeftMs in
76
+ self?.onAutoFinishProgress?(timeLeftMs)
77
+ },
78
+ onTimeout: { [weak self] in
79
+ self?.stopListening()
80
+ }
81
+ )
82
+
83
+ do {
84
+ let audioSession = AVAudioSession.sharedInstance()
85
+ try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
86
+ try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
87
+ } catch {
88
+ onError?("Failed to set up audio session: \(error.localizedDescription)")
89
+ return
90
+ }
91
+
92
+ audioEngine = AVAudioEngine()
93
+ recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
94
+
95
+ guard let recognitionRequest = recognitionRequest, let audioEngine = audioEngine else {
96
+ onError?("Failed to create recognition request or audio engine")
97
+ return
98
+ }
99
+
100
+ recognitionRequest.shouldReportPartialResults = true
101
+
102
+ if let contextualStrings = params.contextualStrings, !contextualStrings.isEmpty {
103
+ recognitionRequest.contextualStrings = contextualStrings
104
+ }
105
+
106
+ if #available(iOS 16, *) {
107
+ if let addPunctiation = params.iosAddPunctuation, addPunctiation == false {
108
+ recognitionRequest.addsPunctuation = false
109
+ } else {
110
+ recognitionRequest.addsPunctuation = true
111
+ }
112
+ }
113
+
114
+ let disableRepeatingFilter = params.disableRepeatingFilter ?? false
115
+
116
+ recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { [weak self] result, error in
117
+ guard let self = self else { return }
118
+
119
+ if let result = result {
120
+ self.autoStopper?.indicateRecordingActivity(from: "partial results")
121
+
122
+ var transcription = result.bestTranscription.formattedString
123
+ if !transcription.isEmpty {
124
+ if !disableRepeatingFilter {
125
+ transcription = self.repeatingFilter(text: transcription)
126
+ }
127
+ self.onResult?([transcription])
128
+ }
129
+
130
+ if result.isFinal {
131
+ self.stopListening()
132
+ }
133
+ }
134
+
135
+ if let error = error {
136
+ self.onError?("Recognition error: \(error.localizedDescription)")
137
+ self.stopListening()
138
+ }
139
+ }
140
+
141
+ let inputNode = audioEngine.inputNode
142
+ let recordingFormat = inputNode.outputFormat(forBus: 0)
143
+
144
+ inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { [weak self] buffer, _ in
145
+ self?.recognitionRequest?.append(buffer)
146
+ }
147
+
148
+ // Observe app going to background
149
+ appStateObserver = AppStateObserver { [weak self] in
150
+ guard let self = self, self.isActive else { return }
151
+ self.stopListening()
152
+ }
153
+
154
+ do {
155
+ audioEngine.prepare()
156
+ try audioEngine.start()
157
+ isActive = true
158
+ autoStopper?.indicateRecordingActivity(from: "startListening")
159
+ onReadyForSpeech?()
160
+ onResult?([])
161
+ } catch {
162
+ cleanup()
163
+ onError?("Failed to start audio engine: \(error.localizedDescription)")
164
+ }
165
+ }
166
+
167
+ private func cleanup() {
168
+ autoStopper?.stop()
169
+ autoStopper = nil
170
+
171
+ appStateObserver?.stop()
172
+ appStateObserver = nil
173
+
174
+ recognitionRequest?.endAudio()
175
+ recognitionTask?.cancel()
176
+
177
+ if let audioEngine = audioEngine {
178
+ if audioEngine.isRunning {
179
+ audioEngine.stop()
180
+ }
181
+ audioEngine.inputNode.removeTap(onBus: 0)
182
+ }
183
+
184
+ try? AVAudioSession.sharedInstance().setActive(false, options: .notifyOthersOnDeactivation)
185
+
186
+ recognitionRequest = nil
187
+ recognitionTask = nil
188
+ audioEngine = nil
189
+ isActive = false
190
+ }
191
+
192
+ private func repeatingFilter(text: String) -> String {
193
+ let words = text.split { $0.isWhitespace }.map { String($0) }
194
+ var joiner = words[0]
195
+ for i in words.indices {
196
+ if i == 0 || words[i] == words[i-1] {continue}
197
+ joiner += " \(words[i])"
198
+ }
199
+ return joiner
200
+ }
201
+ }
@@ -0,0 +1,10 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ exports.Recognizer = void 0;
7
+ var _reactNativeNitroModules = require("react-native-nitro-modules");
8
+ const NitroSpeech = _reactNativeNitroModules.NitroModules.createHybridObject('NitroSpeech');
9
+ const Recognizer = exports.Recognizer = NitroSpeech.recognizer;
10
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["_reactNativeNitroModules","require","NitroSpeech","NitroModules","createHybridObject","Recognizer","exports","recognizer"],"sourceRoot":"../../src","sources":["index.ts"],"mappings":";;;;;;AAAA,IAAAA,wBAAA,GAAAC,OAAA;AAGA,MAAMC,WAAW,GACfC,qCAAY,CAACC,kBAAkB,CAAkB,aAAa,CAAC;AAE1D,MAAMC,UAAU,GAAAC,OAAA,CAAAD,UAAA,GAAGH,WAAW,CAACK,UAAU","ignoreList":[]}
@@ -0,0 +1 @@
1
+ {"type":"commonjs"}
@@ -0,0 +1,6 @@
1
+ "use strict";
2
+
3
+ Object.defineProperty(exports, "__esModule", {
4
+ value: true
5
+ });
6
+ //# sourceMappingURL=NitroSpeech.nitro.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":[],"sourceRoot":"../../../src","sources":["specs/NitroSpeech.nitro.ts"],"mappings":"","ignoreList":[]}
@@ -0,0 +1,6 @@
1
+ "use strict";
2
+
3
+ import { NitroModules } from 'react-native-nitro-modules';
4
+ const NitroSpeech = NitroModules.createHybridObject('NitroSpeech');
5
+ export const Recognizer = NitroSpeech.recognizer;
6
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["NitroModules","NitroSpeech","createHybridObject","Recognizer","recognizer"],"sourceRoot":"../../src","sources":["index.ts"],"mappings":";;AAAA,SAASA,YAAY,QAAQ,4BAA4B;AAGzD,MAAMC,WAAW,GACfD,YAAY,CAACE,kBAAkB,CAAkB,aAAa,CAAC;AAEjE,OAAO,MAAMC,UAAU,GAAGF,WAAW,CAACG,UAAU","ignoreList":[]}
@@ -0,0 +1 @@
1
+ {"type":"module"}
@@ -0,0 +1,4 @@
1
+ "use strict";
2
+
3
+ export {};
4
+ //# sourceMappingURL=NitroSpeech.nitro.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":[],"sourceRoot":"../../../src","sources":["specs/NitroSpeech.nitro.ts"],"mappings":"","ignoreList":[]}