react-native-sherpa-onnx 0.3.2 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +28 -15
- package/SherpaOnnx.podspec +13 -5
- package/android/prebuilt-download.gradle +18 -5
- package/android/prebuilt-versions.gradle +8 -4
- package/android/src/main/cpp/jni/model_detect/sherpa-onnx-model-detect-helper.cpp +43 -142
- package/android/src/main/cpp/jni/model_detect/sherpa-onnx-model-detect-helper.h +12 -4
- package/android/src/main/cpp/jni/model_detect/sherpa-onnx-model-detect-stt.cpp +694 -307
- package/android/src/main/cpp/jni/model_detect/sherpa-onnx-model-detect-tts.cpp +194 -99
- package/android/src/main/cpp/jni/model_detect/sherpa-onnx-model-detect.h +90 -0
- package/android/src/main/cpp/jni/model_detect/sherpa-onnx-stt-wrapper.cpp +3 -0
- package/android/src/main/java/com/sherpaonnx/SherpaOnnxModule.kt +70 -0
- package/android/src/main/java/com/sherpaonnx/SherpaOnnxPcmCapture.kt +150 -0
- package/android/src/main/java/com/sherpaonnx/SherpaOnnxSttHelper.kt +39 -19
- package/ios/SherpaOnnx+PcmLiveStream.mm +288 -0
- package/ios/SherpaOnnx+STT.mm +2 -0
- package/ios/SherpaOnnx.mm +1 -1
- package/ios/model_detect/sherpa-onnx-model-detect-helper.h +9 -3
- package/ios/model_detect/sherpa-onnx-model-detect-helper.mm +38 -54
- package/ios/model_detect/sherpa-onnx-model-detect-stt.mm +620 -267
- package/ios/model_detect/sherpa-onnx-model-detect-tts.mm +131 -28
- package/ios/model_detect/sherpa-onnx-model-detect.h +70 -0
- package/ios/stt/sherpa-onnx-stt-wrapper.mm +4 -0
- package/lib/module/NativeSherpaOnnx.js.map +1 -1
- package/lib/module/audio/index.js +52 -0
- package/lib/module/audio/index.js.map +1 -1
- package/lib/module/stt/streaming.js +6 -3
- package/lib/module/stt/streaming.js.map +1 -1
- package/lib/typescript/src/NativeSherpaOnnx.d.ts +16 -2
- package/lib/typescript/src/NativeSherpaOnnx.d.ts.map +1 -1
- package/lib/typescript/src/audio/index.d.ts +17 -0
- package/lib/typescript/src/audio/index.d.ts.map +1 -1
- package/lib/typescript/src/stt/streaming.d.ts.map +1 -1
- package/lib/typescript/src/stt/streamingTypes.d.ts +1 -1
- package/lib/typescript/src/stt/streamingTypes.d.ts.map +1 -1
- package/package.json +6 -1
- package/scripts/check-model-csvs.sh +72 -0
- package/scripts/setup-ios-framework.sh +48 -48
- package/src/NativeSherpaOnnx.ts +18 -2
- package/src/audio/index.ts +81 -0
- package/src/stt/streaming.ts +10 -5
- package/src/stt/streamingTypes.ts +1 -1
- package/third_party/sherpa-onnx-prebuilt/ANDROID_RELEASE_TAG +1 -1
- package/third_party/sherpa-onnx-prebuilt/IOS_RELEASE_TAG +1 -1
|
@@ -1,23 +1,82 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* sherpa-onnx-model-detect-stt.cpp
|
|
3
3
|
*
|
|
4
|
-
* Purpose: Detects STT model type and fills SttModelPaths from a model directory.
|
|
5
|
-
* transducer, paraformer, whisper,
|
|
4
|
+
* Purpose: Detects STT model type and fills SttModelPaths from a model directory. Used by
|
|
5
|
+
* nativeDetectSttModel (module-jni). Supports transducer, paraformer, whisper, moonshine, etc.
|
|
6
|
+
*
|
|
7
|
+
* --- Detection pipeline (overview) ---
|
|
8
|
+
*
|
|
9
|
+
* 1. Gather files in modelDir (recursive), then:
|
|
10
|
+
* - SttCandidatePaths: map file names to logical paths (encoder, decoder, joiner, moonshine
|
|
11
|
+
* preprocessor/encoder/mergedDecoder, paraformer/ctc model, tokens, etc.).
|
|
12
|
+
* - SttPathHints: from directory name only (isLikelyMoonshine, isLikelyNemo, ...).
|
|
13
|
+
* - SttCapabilities: which model types are *possible* given paths + hints (hasWhisper,
|
|
14
|
+
* hasMoonshineV2, hasTransducer, ...). Multiple can be true at once (e.g. same files
|
|
15
|
+
* can satisfy both Whisper and Moonshine v2).
|
|
16
|
+
*
|
|
17
|
+
* 2. detectedModels (for UI "Select model type"): built from capabilities only. Every kind
|
|
18
|
+
* with has* == true is added. So the list shows all types that could work with the files,
|
|
19
|
+
* not the single chosen type.
|
|
20
|
+
*
|
|
21
|
+
* 3. selectedKind (which type we actually use): from ResolveSttKind():
|
|
22
|
+
* - If modelType is explicit (e.g. "whisper"): use it if capabilities allow.
|
|
23
|
+
* - If modelType == "auto": Priority 1 = folder name (GetKindsFromDirName: tokens like
|
|
24
|
+
* "moonshine", "whisper" in dir name → candidate kinds). Priority 2 = among those
|
|
25
|
+
* candidates, pick the first that CapabilitySupportsKind(). Fallback = if no name
|
|
26
|
+
* candidates, use file-only order (transducer → moonshine v2/v1 → CTC → paraformer →
|
|
27
|
+
* whisper → ...).
|
|
28
|
+
*
|
|
29
|
+
* 4. paths: ApplyPathsForSttKind(selectedKind) copies the relevant candidate paths into
|
|
30
|
+
* SttModelPaths (encoder/decoder, moonshine encoder/mergedDecoder, etc.) for the chosen kind.
|
|
31
|
+
*
|
|
32
|
+
* Result to caller: ok, error, detectedModels (list), selectedKind (single), paths (for selectedKind).
|
|
6
33
|
*/
|
|
7
34
|
#include "sherpa-onnx-model-detect.h"
|
|
8
35
|
#include "sherpa-onnx-model-detect-helper.h"
|
|
9
|
-
#include <android/log.h>
|
|
10
36
|
#include <cstdlib>
|
|
11
37
|
#include <string>
|
|
12
38
|
#include <algorithm>
|
|
13
|
-
|
|
39
|
+
#ifdef __ANDROID__
|
|
40
|
+
#include <android/log.h>
|
|
14
41
|
#define LOG_TAG "SttModelDetect"
|
|
15
42
|
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
|
|
16
43
|
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
|
|
44
|
+
#else
|
|
45
|
+
#define LOGI(...) ((void)0)
|
|
46
|
+
#define LOGE(...) ((void)0)
|
|
47
|
+
#endif
|
|
17
48
|
|
|
18
49
|
namespace sherpaonnx {
|
|
19
50
|
namespace {
|
|
20
51
|
|
|
52
|
+
static const char* KindToName(SttModelKind k) {
|
|
53
|
+
switch (k) {
|
|
54
|
+
case SttModelKind::kTransducer: return "transducer";
|
|
55
|
+
case SttModelKind::kNemoTransducer: return "nemo_transducer";
|
|
56
|
+
case SttModelKind::kParaformer: return "paraformer";
|
|
57
|
+
case SttModelKind::kNemoCtc: return "nemo_ctc";
|
|
58
|
+
case SttModelKind::kWenetCtc: return "wenet_ctc";
|
|
59
|
+
case SttModelKind::kSenseVoice: return "sense_voice";
|
|
60
|
+
case SttModelKind::kZipformerCtc: return "zipformer_ctc";
|
|
61
|
+
case SttModelKind::kWhisper: return "whisper";
|
|
62
|
+
case SttModelKind::kFunAsrNano: return "funasr_nano";
|
|
63
|
+
case SttModelKind::kFireRedAsr: return "fire_red_asr";
|
|
64
|
+
case SttModelKind::kMoonshine: return "moonshine";
|
|
65
|
+
case SttModelKind::kMoonshineV2: return "moonshine_v2";
|
|
66
|
+
case SttModelKind::kDolphin: return "dolphin";
|
|
67
|
+
case SttModelKind::kCanary: return "canary";
|
|
68
|
+
case SttModelKind::kOmnilingual: return "omnilingual";
|
|
69
|
+
case SttModelKind::kMedAsr: return "medasr";
|
|
70
|
+
case SttModelKind::kTeleSpeechCtc: return "telespeech_ctc";
|
|
71
|
+
case SttModelKind::kToneCtc: return "tone_ctc";
|
|
72
|
+
default: return "unknown";
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
static const char* EmptyOrPath(const std::string& s) {
|
|
77
|
+
return s.empty() ? "(empty)" : s.c_str();
|
|
78
|
+
}
|
|
79
|
+
|
|
21
80
|
SttModelKind ParseSttModelType(const std::string& modelType) {
|
|
22
81
|
if (modelType == "transducer") return SttModelKind::kTransducer;
|
|
23
82
|
if (modelType == "nemo_transducer") return SttModelKind::kNemoTransducer;
|
|
@@ -30,6 +89,7 @@ SttModelKind ParseSttModelType(const std::string& modelType) {
|
|
|
30
89
|
if (modelType == "funasr_nano") return SttModelKind::kFunAsrNano;
|
|
31
90
|
if (modelType == "fire_red_asr") return SttModelKind::kFireRedAsr;
|
|
32
91
|
if (modelType == "moonshine") return SttModelKind::kMoonshine;
|
|
92
|
+
if (modelType == "moonshine_v2") return SttModelKind::kMoonshineV2;
|
|
33
93
|
if (modelType == "dolphin") return SttModelKind::kDolphin;
|
|
34
94
|
if (modelType == "canary") return SttModelKind::kCanary;
|
|
35
95
|
if (modelType == "omnilingual") return SttModelKind::kOmnilingual;
|
|
@@ -39,6 +99,502 @@ SttModelKind ParseSttModelType(const std::string& modelType) {
|
|
|
39
99
|
return SttModelKind::kUnknown;
|
|
40
100
|
}
|
|
41
101
|
|
|
102
|
+
/** Returns true if \p cap and hints/paths support the given \p kind (required files present). */
|
|
103
|
+
static bool CapabilitySupportsKind(
|
|
104
|
+
SttModelKind kind,
|
|
105
|
+
const SttCapabilities& cap,
|
|
106
|
+
const SttPathHints& hints,
|
|
107
|
+
const SttCandidatePaths& paths
|
|
108
|
+
) {
|
|
109
|
+
switch (kind) {
|
|
110
|
+
case SttModelKind::kTransducer:
|
|
111
|
+
return cap.hasTransducer && !(hints.isLikelyNemo || hints.isLikelyTdt);
|
|
112
|
+
case SttModelKind::kNemoTransducer:
|
|
113
|
+
return cap.hasTransducer;
|
|
114
|
+
case SttModelKind::kParaformer:
|
|
115
|
+
return cap.hasParaformer;
|
|
116
|
+
case SttModelKind::kNemoCtc:
|
|
117
|
+
return !paths.ctcModel.empty() && hints.isLikelyNemo;
|
|
118
|
+
case SttModelKind::kWenetCtc:
|
|
119
|
+
return !paths.ctcModel.empty() && hints.isLikelyWenetCtc;
|
|
120
|
+
case SttModelKind::kSenseVoice:
|
|
121
|
+
return !paths.ctcModel.empty() && hints.isLikelySenseVoice;
|
|
122
|
+
case SttModelKind::kZipformerCtc:
|
|
123
|
+
return !paths.ctcModel.empty() && hints.isLikelyZipformer;
|
|
124
|
+
case SttModelKind::kWhisper:
|
|
125
|
+
return cap.hasWhisper;
|
|
126
|
+
case SttModelKind::kFunAsrNano:
|
|
127
|
+
return cap.hasFunAsrNano;
|
|
128
|
+
case SttModelKind::kFireRedAsr:
|
|
129
|
+
return cap.hasFireRedAsr;
|
|
130
|
+
case SttModelKind::kMoonshine:
|
|
131
|
+
return cap.hasMoonshine;
|
|
132
|
+
case SttModelKind::kMoonshineV2:
|
|
133
|
+
return cap.hasMoonshineV2;
|
|
134
|
+
case SttModelKind::kDolphin:
|
|
135
|
+
return cap.hasDolphin;
|
|
136
|
+
case SttModelKind::kCanary:
|
|
137
|
+
return cap.hasCanary;
|
|
138
|
+
case SttModelKind::kOmnilingual:
|
|
139
|
+
return cap.hasOmnilingual;
|
|
140
|
+
case SttModelKind::kMedAsr:
|
|
141
|
+
return cap.hasMedAsr;
|
|
142
|
+
case SttModelKind::kTeleSpeechCtc:
|
|
143
|
+
return cap.hasTeleSpeechCtc;
|
|
144
|
+
case SttModelKind::kToneCtc:
|
|
145
|
+
return cap.hasToneCtc;
|
|
146
|
+
default:
|
|
147
|
+
return false;
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Priority 1: Collect candidate STT kinds from the model directory name (last path component).
|
|
153
|
+
* Tokens like "moonshine", "whisper", "paraformer" are matched case-insensitively. Returns
|
|
154
|
+
* candidates in a fixed priority order so that when multiple kinds match the name, file-based
|
|
155
|
+
* disambiguation picks the first supported one.
|
|
156
|
+
*/
|
|
157
|
+
static std::vector<SttModelKind> GetKindsFromDirName(const std::string& modelDir) {
|
|
158
|
+
using namespace model_detect;
|
|
159
|
+
size_t pos = modelDir.find_last_of("/\\");
|
|
160
|
+
std::string base = (pos == std::string::npos) ? modelDir : modelDir.substr(pos + 1);
|
|
161
|
+
std::string lower = ToLower(base);
|
|
162
|
+
|
|
163
|
+
std::vector<SttModelKind> out;
|
|
164
|
+
auto add = [&out](SttModelKind k) {
|
|
165
|
+
if (std::find(out.begin(), out.end(), k) == out.end())
|
|
166
|
+
out.push_back(k);
|
|
167
|
+
};
|
|
168
|
+
|
|
169
|
+
if (lower.find("moonshine") != std::string::npos) {
|
|
170
|
+
add(SttModelKind::kMoonshineV2);
|
|
171
|
+
add(SttModelKind::kMoonshine);
|
|
172
|
+
}
|
|
173
|
+
if (lower.find("whisper") != std::string::npos)
|
|
174
|
+
add(SttModelKind::kWhisper);
|
|
175
|
+
if (lower.find("paraformer") != std::string::npos)
|
|
176
|
+
add(SttModelKind::kParaformer);
|
|
177
|
+
if (lower.find("nemo") != std::string::npos || lower.find("parakeet") != std::string::npos) {
|
|
178
|
+
add(SttModelKind::kNemoTransducer);
|
|
179
|
+
add(SttModelKind::kNemoCtc);
|
|
180
|
+
}
|
|
181
|
+
if (lower.find("tdt") != std::string::npos)
|
|
182
|
+
add(SttModelKind::kNemoTransducer);
|
|
183
|
+
if (lower.find("wenet") != std::string::npos)
|
|
184
|
+
add(SttModelKind::kWenetCtc);
|
|
185
|
+
if (lower.find("sense") != std::string::npos || lower.find("sensevoice") != std::string::npos)
|
|
186
|
+
add(SttModelKind::kSenseVoice);
|
|
187
|
+
if (lower.find("zipformer") != std::string::npos) {
|
|
188
|
+
add(SttModelKind::kTransducer);
|
|
189
|
+
add(SttModelKind::kZipformerCtc);
|
|
190
|
+
}
|
|
191
|
+
if (lower.find("funasr") != std::string::npos)
|
|
192
|
+
add(SttModelKind::kFunAsrNano);
|
|
193
|
+
if (lower.find("canary") != std::string::npos)
|
|
194
|
+
add(SttModelKind::kCanary);
|
|
195
|
+
if (lower.find("fire_red") != std::string::npos || lower.find("fire-red") != std::string::npos)
|
|
196
|
+
add(SttModelKind::kFireRedAsr);
|
|
197
|
+
if (lower.find("dolphin") != std::string::npos)
|
|
198
|
+
add(SttModelKind::kDolphin);
|
|
199
|
+
if (lower.find("omnilingual") != std::string::npos)
|
|
200
|
+
add(SttModelKind::kOmnilingual);
|
|
201
|
+
if (lower.find("medasr") != std::string::npos)
|
|
202
|
+
add(SttModelKind::kMedAsr);
|
|
203
|
+
if (lower.find("telespeech") != std::string::npos)
|
|
204
|
+
add(SttModelKind::kTeleSpeechCtc);
|
|
205
|
+
if (lower.find("t-one") != std::string::npos || lower.find("t_one") != std::string::npos ||
|
|
206
|
+
model_detect::ContainsWord(lower, "tone"))
|
|
207
|
+
add(SttModelKind::kToneCtc);
|
|
208
|
+
if (lower.find("transducer") != std::string::npos) {
|
|
209
|
+
add(SttModelKind::kTransducer);
|
|
210
|
+
add(SttModelKind::kNemoTransducer);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
return out;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
static SttCandidatePaths GatherSttCandidatePaths(
|
|
217
|
+
const std::vector<model_detect::FileEntry>& files,
|
|
218
|
+
const std::string& modelDir,
|
|
219
|
+
int maxDepth,
|
|
220
|
+
const std::optional<bool>& preferInt8
|
|
221
|
+
) {
|
|
222
|
+
using namespace model_detect;
|
|
223
|
+
SttCandidatePaths p;
|
|
224
|
+
p.encoder = FindOnnxByAnyToken(files, {"encoder"}, preferInt8);
|
|
225
|
+
p.decoder = FindOnnxByAnyToken(files, {"decoder"}, preferInt8);
|
|
226
|
+
p.joiner = FindOnnxByAnyToken(files, {"joiner"}, preferInt8);
|
|
227
|
+
p.funasrEncoderAdaptor = FindOnnxByAnyToken(files, {"encoder_adaptor", "encoder-adaptor"}, preferInt8);
|
|
228
|
+
p.funasrLLM = FindOnnxByAnyToken(files, {"llm"}, preferInt8);
|
|
229
|
+
p.funasrEmbedding = FindOnnxByAnyToken(files, {"embedding"}, preferInt8);
|
|
230
|
+
{
|
|
231
|
+
std::string vocabInSubdir;
|
|
232
|
+
const std::string vocabName = "vocab.json";
|
|
233
|
+
for (const auto& entry : files) {
|
|
234
|
+
if (entry.nameLower != vocabName) continue;
|
|
235
|
+
const std::string& path = entry.path;
|
|
236
|
+
if (path.size() >= modelDir.size() && path.compare(0, modelDir.size(), modelDir) == 0 &&
|
|
237
|
+
(modelDir.empty() || path[modelDir.size()] == '/')) {
|
|
238
|
+
if (path.size() == modelDir.size() + 12 && path.compare(modelDir.size(), 12, "/vocab.json") == 0) {
|
|
239
|
+
p.funasrTokenizerDir = modelDir;
|
|
240
|
+
break;
|
|
241
|
+
}
|
|
242
|
+
if (vocabInSubdir.empty())
|
|
243
|
+
vocabInSubdir = path;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
if (p.funasrTokenizerDir.empty() && !vocabInSubdir.empty()) {
|
|
247
|
+
size_t lastSlash = vocabInSubdir.find_last_of("/\\");
|
|
248
|
+
if (lastSlash != std::string::npos)
|
|
249
|
+
p.funasrTokenizerDir = vocabInSubdir.substr(0, lastSlash);
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
p.moonshinePreprocessor = FindOnnxByAnyToken(files, {"preprocess", "preprocessor"}, preferInt8);
|
|
253
|
+
p.moonshineEncoder = FindOnnxByAnyToken(files, {"encode", "encoder_model"}, preferInt8);
|
|
254
|
+
p.moonshineUncachedDecoder = FindOnnxByAnyToken(files, {"uncached_decode", "uncached"}, preferInt8);
|
|
255
|
+
p.moonshineCachedDecoder = FindOnnxByAnyTokenExcluding(
|
|
256
|
+
files, std::vector<std::string>{"cached_decode", "cached"}, std::vector<std::string>{"uncached"}, preferInt8);
|
|
257
|
+
p.moonshineMergedDecoder = FindOnnxByAnyToken(files, {"merged_decode", "merged_decoder", "decoder_model_merged", "merged"}, preferInt8);
|
|
258
|
+
static const std::vector<std::string> modelExcludes = {
|
|
259
|
+
"encoder", "decoder", "joiner", "vocoder", "acoustic", "embedding", "llm",
|
|
260
|
+
"encoder_adaptor", "encoder-adaptor", "encoder_model", "decoder_model",
|
|
261
|
+
"merged_decoder", "decoder_model_merged", "preprocess", "encode", "uncached", "cached"
|
|
262
|
+
};
|
|
263
|
+
p.paraformerModel = FindOnnxByAnyToken(files, {"model"}, preferInt8);
|
|
264
|
+
if (!p.paraformerModel.empty()) {
|
|
265
|
+
std::string lower = ToLower(p.paraformerModel);
|
|
266
|
+
if (lower.find("encoder_model") != std::string::npos ||
|
|
267
|
+
lower.find("decoder_model") != std::string::npos ||
|
|
268
|
+
lower.find("merged_decoder") != std::string::npos)
|
|
269
|
+
p.paraformerModel.clear();
|
|
270
|
+
}
|
|
271
|
+
if (p.paraformerModel.empty())
|
|
272
|
+
p.paraformerModel = FindLargestOnnxExcludingTokens(files, modelExcludes);
|
|
273
|
+
p.ctcModel = FindOnnxByAnyToken(files, {"model"}, preferInt8);
|
|
274
|
+
if (!p.ctcModel.empty()) {
|
|
275
|
+
std::string lower = ToLower(p.ctcModel);
|
|
276
|
+
if (lower.find("encoder_model") != std::string::npos ||
|
|
277
|
+
lower.find("decoder_model") != std::string::npos ||
|
|
278
|
+
lower.find("merged_decoder") != std::string::npos)
|
|
279
|
+
p.ctcModel.clear();
|
|
280
|
+
}
|
|
281
|
+
if (p.ctcModel.empty())
|
|
282
|
+
p.ctcModel = FindLargestOnnxExcludingTokens(files, modelExcludes);
|
|
283
|
+
if (!p.paraformerModel.empty() &&
|
|
284
|
+
(p.paraformerModel == p.encoder || p.paraformerModel == p.decoder || p.paraformerModel == p.joiner))
|
|
285
|
+
p.paraformerModel.clear();
|
|
286
|
+
if (!p.ctcModel.empty() &&
|
|
287
|
+
(p.ctcModel == p.encoder || p.ctcModel == p.decoder || p.ctcModel == p.joiner))
|
|
288
|
+
p.ctcModel.clear();
|
|
289
|
+
p.tokens = FindFileEndingWith(files, "tokens.txt");
|
|
290
|
+
p.bpeVocab = FindFileByName(files, "bpe.vocab");
|
|
291
|
+
p.encoderForV2 = p.encoder.empty() ? FindOnnxByAnyToken(files, {"encoder", "encoder_model"}, preferInt8) : p.encoder;
|
|
292
|
+
return p;
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
static SttPathHints GetSttPathHints(const std::string& modelDir) {
|
|
296
|
+
using namespace model_detect;
|
|
297
|
+
SttPathHints h;
|
|
298
|
+
std::string lower = ToLower(modelDir);
|
|
299
|
+
h.isLikelyNemo = lower.find("nemo") != std::string::npos || lower.find("parakeet") != std::string::npos;
|
|
300
|
+
h.isLikelyTdt = lower.find("tdt") != std::string::npos;
|
|
301
|
+
h.isLikelyWenetCtc = lower.find("wenet") != std::string::npos;
|
|
302
|
+
h.isLikelySenseVoice = lower.find("sense") != std::string::npos || lower.find("sensevoice") != std::string::npos;
|
|
303
|
+
h.isLikelyFunAsrNano = lower.find("funasr") != std::string::npos || lower.find("funasr-nano") != std::string::npos;
|
|
304
|
+
h.isLikelyZipformer = lower.find("zipformer") != std::string::npos;
|
|
305
|
+
h.isLikelyMoonshine = lower.find("moonshine") != std::string::npos;
|
|
306
|
+
h.isLikelyDolphin = lower.find("dolphin") != std::string::npos;
|
|
307
|
+
h.isLikelyFireRedAsr = lower.find("fire_red") != std::string::npos || lower.find("fire-red") != std::string::npos;
|
|
308
|
+
h.isLikelyCanary = lower.find("canary") != std::string::npos;
|
|
309
|
+
h.isLikelyOmnilingual = lower.find("omnilingual") != std::string::npos;
|
|
310
|
+
h.isLikelyMedAsr = lower.find("medasr") != std::string::npos;
|
|
311
|
+
h.isLikelyTeleSpeech = lower.find("telespeech") != std::string::npos;
|
|
312
|
+
// tone_ctc is for T-One models only (e.g. streaming-t-one-russian). WeNetSpeech CTC (yue, wu, etc.) uses wenet_ctc per sherpa-onnx docs.
|
|
313
|
+
h.isLikelyToneCtc = lower.find("t-one") != std::string::npos || lower.find("t_one") != std::string::npos ||
|
|
314
|
+
ContainsWord(lower, "tone");
|
|
315
|
+
h.isLikelyParaformer = lower.find("paraformer") != std::string::npos;
|
|
316
|
+
h.isLikelyVad = lower.find("vad") != std::string::npos || lower.find("silero") != std::string::npos ||
|
|
317
|
+
lower.find("ten-vad") != std::string::npos;
|
|
318
|
+
h.isLikelyTdnn = lower.find("tdnn") != std::string::npos;
|
|
319
|
+
return h;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
/** Error message when model is for unsupported hardware (RK35xx, Ascend, etc.). */
|
|
323
|
+
static const char* kHardwareSpecificUnsupportedMessage =
|
|
324
|
+
"This model is built for hardware-specific acceleration (e.g. RK35xx, Ascend, CANN) and is not supported by the React Native SDK. Use an ONNX model for CPU/GPU or a QNN-capable model on supported devices.";
|
|
325
|
+
|
|
326
|
+
/** True if model dir name indicates a hardware-specific build (e.g. RK3588, Ascend). Not runnable on generic host. QNN is supported by the SDK. */
|
|
327
|
+
static bool IsHardwareSpecificModelDir(const std::string& modelDir) {
|
|
328
|
+
using namespace model_detect;
|
|
329
|
+
std::string lower = ToLower(modelDir);
|
|
330
|
+
const char* tokens[] = {
|
|
331
|
+
"rk3588", "rk3576", "rk3568", "rk3566", "rk3562", "rknn",
|
|
332
|
+
"ascend", "cann", "910b", "910b2", "310p3"
|
|
333
|
+
};
|
|
334
|
+
for (const char* t : tokens) {
|
|
335
|
+
if (lower.find(t) != std::string::npos)
|
|
336
|
+
return true;
|
|
337
|
+
}
|
|
338
|
+
return false;
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
static SttCapabilities ComputeSttCapabilities(const SttCandidatePaths& paths, const SttPathHints& hints) {
|
|
342
|
+
using namespace model_detect;
|
|
343
|
+
SttCapabilities c;
|
|
344
|
+
c.hasTransducer = !paths.encoder.empty() && !paths.decoder.empty() && !paths.joiner.empty();
|
|
345
|
+
bool hasWhisperEnc = !paths.encoder.empty();
|
|
346
|
+
bool hasWhisperDec = !paths.decoder.empty();
|
|
347
|
+
c.hasWhisper = hasWhisperEnc && hasWhisperDec && paths.joiner.empty();
|
|
348
|
+
bool hasFunAsrTok = !paths.funasrTokenizerDir.empty();
|
|
349
|
+
c.hasFunAsrNano = !paths.funasrEncoderAdaptor.empty() && !paths.funasrLLM.empty() &&
|
|
350
|
+
!paths.funasrEmbedding.empty() && hasFunAsrTok;
|
|
351
|
+
c.hasMoonshine = !paths.moonshinePreprocessor.empty() && !paths.moonshineUncachedDecoder.empty() &&
|
|
352
|
+
!paths.moonshineCachedDecoder.empty() && !paths.moonshineEncoder.empty();
|
|
353
|
+
c.hasMoonshineV2 = !paths.moonshineMergedDecoder.empty() && !paths.encoderForV2.empty() && paths.joiner.empty();
|
|
354
|
+
// Streaming paraformer uses encoder.onnx + decoder.onnx (no joiner, no single "model.onnx").
|
|
355
|
+
c.hasParaformer = !paths.paraformerModel.empty() ||
|
|
356
|
+
(hints.isLikelyParaformer && hasWhisperEnc && hasWhisperDec && paths.joiner.empty());
|
|
357
|
+
c.hasDolphin = hints.isLikelyDolphin && !paths.ctcModel.empty();
|
|
358
|
+
// Fire Red ASR: only encoder+decoder (two files). Single-file Fire Red (e.g. fire-red-asr2-ctc) uses CTC path to avoid native crash.
|
|
359
|
+
c.hasFireRedAsr = (c.hasTransducer || (hasWhisperEnc && hasWhisperDec && paths.joiner.empty())) && hints.isLikelyFireRedAsr;
|
|
360
|
+
c.hasFireRedCtc = hints.isLikelyFireRedAsr && paths.encoder.empty() && paths.decoder.empty() &&
|
|
361
|
+
(!paths.ctcModel.empty() || !paths.paraformerModel.empty());
|
|
362
|
+
c.hasCanary = hasWhisperEnc && hasWhisperDec && paths.joiner.empty() && hints.isLikelyCanary;
|
|
363
|
+
c.hasOmnilingual = !paths.ctcModel.empty() && hints.isLikelyOmnilingual;
|
|
364
|
+
c.hasMedAsr = !paths.ctcModel.empty() && hints.isLikelyMedAsr;
|
|
365
|
+
c.hasTeleSpeechCtc = (!paths.ctcModel.empty() || !paths.paraformerModel.empty()) && hints.isLikelyTeleSpeech;
|
|
366
|
+
c.hasToneCtc = !paths.ctcModel.empty() && hints.isLikelyToneCtc;
|
|
367
|
+
return c;
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
static void CollectDetectedModels(
|
|
371
|
+
std::vector<DetectedModel>& out,
|
|
372
|
+
const SttCapabilities& cap,
|
|
373
|
+
const SttPathHints& hints,
|
|
374
|
+
const SttCandidatePaths& paths,
|
|
375
|
+
const std::string& modelDir
|
|
376
|
+
) {
|
|
377
|
+
if (cap.hasTransducer) {
|
|
378
|
+
out.push_back({(hints.isLikelyNemo || hints.isLikelyTdt) ? "nemo_transducer" : "transducer", modelDir});
|
|
379
|
+
}
|
|
380
|
+
if (!paths.ctcModel.empty() && (hints.isLikelyNemo || hints.isLikelyWenetCtc || hints.isLikelySenseVoice || hints.isLikelyZipformer)) {
|
|
381
|
+
if (hints.isLikelyNemo) out.push_back({"nemo_ctc", modelDir});
|
|
382
|
+
else if (hints.isLikelyWenetCtc) out.push_back({"wenet_ctc", modelDir});
|
|
383
|
+
else if (hints.isLikelySenseVoice) out.push_back({"sense_voice", modelDir});
|
|
384
|
+
else out.push_back({"zipformer_ctc", modelDir});
|
|
385
|
+
} else if (!paths.paraformerModel.empty()) {
|
|
386
|
+
out.push_back({"paraformer", modelDir});
|
|
387
|
+
}
|
|
388
|
+
if (cap.hasWhisper) out.push_back({"whisper", modelDir});
|
|
389
|
+
if (cap.hasFunAsrNano) out.push_back({"funasr_nano", modelDir});
|
|
390
|
+
if (cap.hasMoonshine) out.push_back({"moonshine", modelDir});
|
|
391
|
+
if (cap.hasMoonshineV2) out.push_back({"moonshine_v2", modelDir});
|
|
392
|
+
if (cap.hasDolphin) out.push_back({"dolphin", modelDir});
|
|
393
|
+
if (cap.hasFireRedAsr) out.push_back({"fire_red_asr", modelDir});
|
|
394
|
+
if (cap.hasCanary) out.push_back({"canary", modelDir});
|
|
395
|
+
if (cap.hasOmnilingual) out.push_back({"omnilingual", modelDir});
|
|
396
|
+
if (cap.hasMedAsr) out.push_back({"medasr", modelDir});
|
|
397
|
+
if (cap.hasTeleSpeechCtc) out.push_back({"telespeech_ctc", modelDir});
|
|
398
|
+
if (cap.hasToneCtc) out.push_back({"tone_ctc", modelDir});
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
static SttModelKind ResolveSttKind(
|
|
402
|
+
const std::optional<std::string>& modelType,
|
|
403
|
+
const SttCapabilities& cap,
|
|
404
|
+
const SttPathHints& hints,
|
|
405
|
+
const SttCandidatePaths& paths,
|
|
406
|
+
const std::string& modelDir,
|
|
407
|
+
std::string& outError
|
|
408
|
+
) {
|
|
409
|
+
outError.clear();
|
|
410
|
+
if (hints.isLikelyVad) {
|
|
411
|
+
outError = "VAD models are not yet supported by the React Native SDK.";
|
|
412
|
+
return SttModelKind::kUnknown;
|
|
413
|
+
}
|
|
414
|
+
if (hints.isLikelyTdnn) {
|
|
415
|
+
outError = "TDNN (keyword/yesno) models are not yet supported by the React Native SDK.";
|
|
416
|
+
return SttModelKind::kUnknown;
|
|
417
|
+
}
|
|
418
|
+
if (modelType.has_value() && modelType.value() != "auto") {
|
|
419
|
+
SttModelKind selected = ParseSttModelType(modelType.value());
|
|
420
|
+
if (selected == SttModelKind::kUnknown) {
|
|
421
|
+
outError = "Unknown model type: " + modelType.value();
|
|
422
|
+
return SttModelKind::kUnknown;
|
|
423
|
+
}
|
|
424
|
+
if (selected == SttModelKind::kTransducer && !cap.hasTransducer) {
|
|
425
|
+
outError = "Transducer model requested but files not found in " + modelDir;
|
|
426
|
+
return SttModelKind::kUnknown;
|
|
427
|
+
}
|
|
428
|
+
if (selected == SttModelKind::kNemoTransducer && !cap.hasTransducer) {
|
|
429
|
+
outError = "NeMo Transducer model requested but encoder/decoder/joiner not found in " + modelDir;
|
|
430
|
+
return SttModelKind::kUnknown;
|
|
431
|
+
}
|
|
432
|
+
if (selected == SttModelKind::kParaformer && !cap.hasParaformer) {
|
|
433
|
+
outError = "Paraformer model requested but model file (or encoder+decoder for streaming) not found in " + modelDir;
|
|
434
|
+
return SttModelKind::kUnknown;
|
|
435
|
+
}
|
|
436
|
+
if ((selected == SttModelKind::kNemoCtc || selected == SttModelKind::kWenetCtc ||
|
|
437
|
+
selected == SttModelKind::kSenseVoice || selected == SttModelKind::kZipformerCtc ||
|
|
438
|
+
selected == SttModelKind::kToneCtc) && paths.ctcModel.empty()) {
|
|
439
|
+
outError = "CTC model requested but model file not found in " + modelDir;
|
|
440
|
+
return SttModelKind::kUnknown;
|
|
441
|
+
}
|
|
442
|
+
if (selected == SttModelKind::kWhisper && !cap.hasWhisper) {
|
|
443
|
+
outError = "Whisper model requested but encoder/decoder not found in " + modelDir;
|
|
444
|
+
return SttModelKind::kUnknown;
|
|
445
|
+
}
|
|
446
|
+
if (selected == SttModelKind::kFunAsrNano && !cap.hasFunAsrNano) {
|
|
447
|
+
outError = "FunASR Nano model requested but required files not found in " + modelDir;
|
|
448
|
+
return SttModelKind::kUnknown;
|
|
449
|
+
}
|
|
450
|
+
if (selected == SttModelKind::kMoonshine && !cap.hasMoonshine) {
|
|
451
|
+
outError = "Moonshine v1 model requested but preprocess/encode/uncached_decode/cached_decode not found in " + modelDir;
|
|
452
|
+
return SttModelKind::kUnknown;
|
|
453
|
+
}
|
|
454
|
+
if (selected == SttModelKind::kMoonshineV2 && !cap.hasMoonshineV2) {
|
|
455
|
+
outError = "Moonshine v2 model requested but encoder/merged_decode not found in " + modelDir;
|
|
456
|
+
return SttModelKind::kUnknown;
|
|
457
|
+
}
|
|
458
|
+
if (selected == SttModelKind::kDolphin && !cap.hasDolphin) {
|
|
459
|
+
outError = "Dolphin model requested but model not found in " + modelDir;
|
|
460
|
+
return SttModelKind::kUnknown;
|
|
461
|
+
}
|
|
462
|
+
if (selected == SttModelKind::kFireRedAsr && !cap.hasFireRedAsr) {
|
|
463
|
+
outError = "FireRed ASR model requested but encoder/decoder not found in " + modelDir;
|
|
464
|
+
return SttModelKind::kUnknown;
|
|
465
|
+
}
|
|
466
|
+
if (selected == SttModelKind::kCanary && !cap.hasCanary) {
|
|
467
|
+
outError = "Canary model requested but encoder/decoder not found in " + modelDir;
|
|
468
|
+
return SttModelKind::kUnknown;
|
|
469
|
+
}
|
|
470
|
+
if (selected == SttModelKind::kOmnilingual && !cap.hasOmnilingual) {
|
|
471
|
+
outError = "Omnilingual model requested but model not found in " + modelDir;
|
|
472
|
+
return SttModelKind::kUnknown;
|
|
473
|
+
}
|
|
474
|
+
if (selected == SttModelKind::kMedAsr && !cap.hasMedAsr) {
|
|
475
|
+
outError = "MedASR model requested but model not found in " + modelDir;
|
|
476
|
+
return SttModelKind::kUnknown;
|
|
477
|
+
}
|
|
478
|
+
if (selected == SttModelKind::kTeleSpeechCtc && !cap.hasTeleSpeechCtc) {
|
|
479
|
+
outError = "TeleSpeech CTC model requested but model not found in " + modelDir;
|
|
480
|
+
return SttModelKind::kUnknown;
|
|
481
|
+
}
|
|
482
|
+
if (selected == SttModelKind::kToneCtc && !cap.hasToneCtc) {
|
|
483
|
+
outError = "Tone CTC model requested but path does not contain 'tone' (as a word), 't-one', or 't_one' (e.g. sherpa-onnx-streaming-t-one-*) in " + modelDir;
|
|
484
|
+
return SttModelKind::kUnknown;
|
|
485
|
+
}
|
|
486
|
+
return selected;
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
// Auto: Priority 1 – resolve from folder name candidates; Priority 2 – file-based disambiguation.
|
|
490
|
+
std::vector<SttModelKind> nameCandidates = GetKindsFromDirName(modelDir);
|
|
491
|
+
if (!nameCandidates.empty()) {
|
|
492
|
+
for (SttModelKind k : nameCandidates) {
|
|
493
|
+
if (CapabilitySupportsKind(k, cap, hints, paths))
|
|
494
|
+
return k;
|
|
495
|
+
}
|
|
496
|
+
// Name hinted at a model type but no candidate had required files; fall through to file-only.
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
// Fallback: no name-based candidates, or none supported – use file-only detection order.
|
|
500
|
+
if (cap.hasTransducer) {
|
|
501
|
+
return (hints.isLikelyNemo || hints.isLikelyTdt) ? SttModelKind::kNemoTransducer : SttModelKind::kTransducer;
|
|
502
|
+
}
|
|
503
|
+
if (hints.isLikelyMoonshine && cap.hasMoonshineV2) return SttModelKind::kMoonshineV2;
|
|
504
|
+
if (hints.isLikelyMoonshine && cap.hasMoonshine) return SttModelKind::kMoonshine;
|
|
505
|
+
if (!paths.ctcModel.empty() && (hints.isLikelyToneCtc || hints.isLikelyNemo || hints.isLikelyWenetCtc || hints.isLikelySenseVoice)) {
|
|
506
|
+
if (hints.isLikelyToneCtc) return SttModelKind::kToneCtc;
|
|
507
|
+
if (hints.isLikelyNemo) return SttModelKind::kNemoCtc;
|
|
508
|
+
if (hints.isLikelyWenetCtc) return SttModelKind::kWenetCtc;
|
|
509
|
+
return SttModelKind::kSenseVoice;
|
|
510
|
+
}
|
|
511
|
+
if (cap.hasFunAsrNano && hints.isLikelyFunAsrNano) return SttModelKind::kFunAsrNano;
|
|
512
|
+
if (cap.hasFireRedCtc) return SttModelKind::kZipformerCtc;
|
|
513
|
+
if (!paths.paraformerModel.empty()) return SttModelKind::kParaformer;
|
|
514
|
+
if (cap.hasCanary) return SttModelKind::kCanary;
|
|
515
|
+
if (cap.hasFireRedAsr) return SttModelKind::kFireRedAsr;
|
|
516
|
+
if (cap.hasWhisper) return SttModelKind::kWhisper;
|
|
517
|
+
if (cap.hasFunAsrNano) return SttModelKind::kFunAsrNano;
|
|
518
|
+
if (cap.hasMoonshineV2) return SttModelKind::kMoonshineV2;
|
|
519
|
+
if (cap.hasDolphin) return SttModelKind::kDolphin;
|
|
520
|
+
if (cap.hasOmnilingual) return SttModelKind::kOmnilingual;
|
|
521
|
+
if (cap.hasMedAsr) return SttModelKind::kMedAsr;
|
|
522
|
+
if (cap.hasTeleSpeechCtc) return SttModelKind::kTeleSpeechCtc;
|
|
523
|
+
if (cap.hasToneCtc) return SttModelKind::kToneCtc;
|
|
524
|
+
if (!paths.ctcModel.empty()) return SttModelKind::kZipformerCtc;
|
|
525
|
+
return SttModelKind::kUnknown;
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
static void ApplyPathsForSttKind(SttModelKind kind, const SttCandidatePaths& candidate, SttModelPaths& resultPaths) {
|
|
529
|
+
switch (kind) {
|
|
530
|
+
case SttModelKind::kTransducer:
|
|
531
|
+
case SttModelKind::kNemoTransducer:
|
|
532
|
+
resultPaths.encoder = candidate.encoder;
|
|
533
|
+
resultPaths.decoder = candidate.decoder;
|
|
534
|
+
resultPaths.joiner = candidate.joiner;
|
|
535
|
+
break;
|
|
536
|
+
case SttModelKind::kParaformer:
|
|
537
|
+
resultPaths.paraformerModel = candidate.paraformerModel;
|
|
538
|
+
// Streaming paraformer: encoder.onnx + decoder.onnx (no single model.onnx).
|
|
539
|
+
if (resultPaths.paraformerModel.empty() && !candidate.encoder.empty() && !candidate.decoder.empty()) {
|
|
540
|
+
resultPaths.encoder = candidate.encoder;
|
|
541
|
+
resultPaths.decoder = candidate.decoder;
|
|
542
|
+
}
|
|
543
|
+
break;
|
|
544
|
+
case SttModelKind::kNemoCtc:
|
|
545
|
+
case SttModelKind::kWenetCtc:
|
|
546
|
+
case SttModelKind::kSenseVoice:
|
|
547
|
+
case SttModelKind::kZipformerCtc:
|
|
548
|
+
case SttModelKind::kToneCtc:
|
|
549
|
+
resultPaths.ctcModel = candidate.ctcModel;
|
|
550
|
+
break;
|
|
551
|
+
case SttModelKind::kWhisper:
|
|
552
|
+
resultPaths.whisperEncoder = candidate.encoder;
|
|
553
|
+
resultPaths.whisperDecoder = candidate.decoder;
|
|
554
|
+
break;
|
|
555
|
+
case SttModelKind::kFunAsrNano:
|
|
556
|
+
resultPaths.funasrEncoderAdaptor = candidate.funasrEncoderAdaptor;
|
|
557
|
+
resultPaths.funasrLLM = candidate.funasrLLM;
|
|
558
|
+
resultPaths.funasrEmbedding = candidate.funasrEmbedding;
|
|
559
|
+
resultPaths.funasrTokenizer = candidate.funasrTokenizerDir;
|
|
560
|
+
break;
|
|
561
|
+
case SttModelKind::kMoonshine:
|
|
562
|
+
resultPaths.moonshinePreprocessor = candidate.moonshinePreprocessor;
|
|
563
|
+
resultPaths.moonshineEncoder = candidate.moonshineEncoder;
|
|
564
|
+
resultPaths.moonshineUncachedDecoder = candidate.moonshineUncachedDecoder;
|
|
565
|
+
resultPaths.moonshineCachedDecoder = candidate.moonshineCachedDecoder;
|
|
566
|
+
break;
|
|
567
|
+
case SttModelKind::kMoonshineV2:
|
|
568
|
+
resultPaths.moonshineEncoder = candidate.encoderForV2;
|
|
569
|
+
resultPaths.moonshineMergedDecoder = candidate.moonshineMergedDecoder;
|
|
570
|
+
break;
|
|
571
|
+
case SttModelKind::kDolphin:
|
|
572
|
+
resultPaths.dolphinModel = candidate.ctcModel.empty() ? candidate.paraformerModel : candidate.ctcModel;
|
|
573
|
+
break;
|
|
574
|
+
case SttModelKind::kFireRedAsr: {
|
|
575
|
+
std::string singleModel = candidate.paraformerModel.empty() ? candidate.ctcModel : candidate.paraformerModel;
|
|
576
|
+
resultPaths.fireRedEncoder = candidate.encoder.empty() ? singleModel : candidate.encoder;
|
|
577
|
+
resultPaths.fireRedDecoder = candidate.decoder.empty() ? singleModel : candidate.decoder;
|
|
578
|
+
break;
|
|
579
|
+
}
|
|
580
|
+
case SttModelKind::kCanary:
|
|
581
|
+
resultPaths.canaryEncoder = candidate.encoder;
|
|
582
|
+
resultPaths.canaryDecoder = candidate.decoder;
|
|
583
|
+
break;
|
|
584
|
+
case SttModelKind::kOmnilingual:
|
|
585
|
+
resultPaths.omnilingualModel = candidate.ctcModel;
|
|
586
|
+
break;
|
|
587
|
+
case SttModelKind::kMedAsr:
|
|
588
|
+
resultPaths.medasrModel = candidate.ctcModel;
|
|
589
|
+
break;
|
|
590
|
+
case SttModelKind::kTeleSpeechCtc:
|
|
591
|
+
resultPaths.telespeechCtcModel = candidate.ctcModel.empty() ? candidate.paraformerModel : candidate.ctcModel;
|
|
592
|
+
break;
|
|
593
|
+
default:
|
|
594
|
+
break;
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
|
|
42
598
|
} // namespace
|
|
43
599
|
|
|
44
600
|
SttDetectResult DetectSttModel(
|
|
@@ -81,334 +637,165 @@ SttDetectResult DetectSttModel(
|
|
|
81
637
|
LOGI("(detailed file listing suppressed; enable by passing debug=true to initialize())");
|
|
82
638
|
}
|
|
83
639
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
"
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
"embedding",
|
|
110
|
-
"llm",
|
|
111
|
-
"encoder_adaptor",
|
|
112
|
-
"encoder-adaptor"
|
|
113
|
-
};
|
|
114
|
-
|
|
115
|
-
std::string paraformerModelPath = FindOnnxByAnyToken(files, {"model"}, preferInt8);
|
|
116
|
-
if (paraformerModelPath.empty()) {
|
|
117
|
-
paraformerModelPath = FindLargestOnnxExcludingTokens(files, modelExcludes);
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
std::string ctcModelPath = FindOnnxByAnyToken(files, {"model"}, preferInt8);
|
|
121
|
-
if (ctcModelPath.empty()) {
|
|
122
|
-
ctcModelPath = FindLargestOnnxExcludingTokens(files, modelExcludes);
|
|
640
|
+
SttCandidatePaths candidate = GatherSttCandidatePaths(files, modelDir, kMaxSearchDepth, preferInt8);
|
|
641
|
+
SttPathHints hints = GetSttPathHints(modelDir);
|
|
642
|
+
SttCapabilities cap = ComputeSttCapabilities(candidate, hints);
|
|
643
|
+
if (debug) {
|
|
644
|
+
LOGI("DetectSttModel: tokens=%s", EmptyOrPath(candidate.tokens));
|
|
645
|
+
LOGI("DetectSttModel: transducer encoder=%s decoder=%s joiner=%s",
|
|
646
|
+
EmptyOrPath(candidate.encoder), EmptyOrPath(candidate.decoder), EmptyOrPath(candidate.joiner));
|
|
647
|
+
LOGI("DetectSttModel: paraformerModel=%s ctcModel=%s tokens=%s bpeVocab=%s",
|
|
648
|
+
EmptyOrPath(candidate.paraformerModel), EmptyOrPath(candidate.ctcModel), EmptyOrPath(candidate.tokens), EmptyOrPath(candidate.bpeVocab));
|
|
649
|
+
LOGI("DetectSttModel: moonshine preprocessor=%s encoder=%s uncachedDecoder=%s cachedDecoder=%s mergedDecoder=%s",
|
|
650
|
+
EmptyOrPath(candidate.moonshinePreprocessor), EmptyOrPath(candidate.moonshineEncoder), EmptyOrPath(candidate.moonshineUncachedDecoder),
|
|
651
|
+
EmptyOrPath(candidate.moonshineCachedDecoder), EmptyOrPath(candidate.moonshineMergedDecoder));
|
|
652
|
+
LOGI("DetectSttModel: whisper encoder=%s decoder=%s (same as transducer; joiner empty => whisper)",
|
|
653
|
+
EmptyOrPath(candidate.encoder), EmptyOrPath(candidate.decoder));
|
|
654
|
+
LOGI("DetectSttModel: funasr encoderAdaptor=%s llm=%s embedding=%s tokenizerDir=%s",
|
|
655
|
+
EmptyOrPath(candidate.funasrEncoderAdaptor), EmptyOrPath(candidate.funasrLLM), EmptyOrPath(candidate.funasrEmbedding), EmptyOrPath(candidate.funasrTokenizerDir));
|
|
656
|
+
LOGI("DetectSttModel: hasTransducer=%d hasWhisper=%d hasMoonshine=%d hasMoonshineV2=%d hasParaformer=%d hasFunAsrNano=%d hasDolphin=%d hasFireRedAsr=%d hasFireRedCtc=%d hasCanary=%d hasOmnilingual=%d hasMedAsr=%d hasTeleSpeechCtc=%d hasToneCtc=%d",
|
|
657
|
+
(int)cap.hasTransducer, (int)cap.hasWhisper, (int)cap.hasMoonshine, (int)cap.hasMoonshineV2,
|
|
658
|
+
(int)cap.hasParaformer, (int)cap.hasFunAsrNano, (int)cap.hasDolphin, (int)cap.hasFireRedAsr, (int)cap.hasFireRedCtc,
|
|
659
|
+
(int)cap.hasCanary, (int)cap.hasOmnilingual, (int)cap.hasMedAsr, (int)cap.hasTeleSpeechCtc, (int)cap.hasToneCtc);
|
|
660
|
+
LOGI("DetectSttModel: hints isLikelyNemo=%d isLikelyTdt=%d isLikelyWenetCtc=%d isLikelySenseVoice=%d isLikelyFunAsrNano=%d isLikelyZipformer=%d isLikelyMoonshine=%d isLikelyDolphin=%d isLikelyFireRedAsr=%d isLikelyCanary=%d isLikelyOmnilingual=%d isLikelyMedAsr=%d isLikelyTeleSpeech=%d isLikelyToneCtc=%d isLikelyParaformer=%d isLikelyVad=%d isLikelyTdnn=%d",
|
|
661
|
+
(int)hints.isLikelyNemo, (int)hints.isLikelyTdt, (int)hints.isLikelyWenetCtc, (int)hints.isLikelySenseVoice,
|
|
662
|
+
(int)hints.isLikelyFunAsrNano, (int)hints.isLikelyZipformer, (int)hints.isLikelyMoonshine, (int)hints.isLikelyDolphin,
|
|
663
|
+
(int)hints.isLikelyFireRedAsr, (int)hints.isLikelyCanary, (int)hints.isLikelyOmnilingual, (int)hints.isLikelyMedAsr,
|
|
664
|
+
(int)hints.isLikelyTeleSpeech, (int)hints.isLikelyToneCtc, (int)hints.isLikelyParaformer, (int)hints.isLikelyVad, (int)hints.isLikelyTdnn);
|
|
123
665
|
}
|
|
124
666
|
|
|
125
|
-
|
|
126
|
-
// (e.g. "tiny-tokens.txt" for Whisper models). Use same depth as file list
|
|
127
|
-
// so layouts like root/data/lang_bpe_500/tokens.txt (icefall) are found.
|
|
128
|
-
std::string tokensPath = FindFileEndingWith(modelDir, "tokens.txt", kMaxSearchDepth);
|
|
129
|
-
LOGI("DetectSttModel: tokens=%s", tokensPath.c_str());
|
|
667
|
+
CollectDetectedModels(result.detectedModels, cap, hints, candidate, modelDir);
|
|
130
668
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
bool hasWhisperEncoder = !encoderPath.empty();
|
|
140
|
-
bool hasWhisperDecoder = !decoderPath.empty();
|
|
141
|
-
bool hasWhisper = hasWhisperEncoder && hasWhisperDecoder && joinerPath.empty();
|
|
142
|
-
|
|
143
|
-
bool hasFunAsrEncoderAdaptor = !funasrEncoderAdaptor.empty();
|
|
144
|
-
bool hasFunAsrLLM = !funasrLLM.empty();
|
|
145
|
-
bool hasFunAsrEmbedding = !funasrEmbedding.empty();
|
|
146
|
-
bool hasFunAsrTokenizer = !funasrTokenizerDir.empty() && FileExists(funasrTokenizerDir + "/vocab.json");
|
|
147
|
-
bool hasFunAsrNano = hasFunAsrEncoderAdaptor && hasFunAsrLLM && hasFunAsrEmbedding && hasFunAsrTokenizer;
|
|
148
|
-
|
|
149
|
-
// Case-insensitive path hints so "Nemo parakeet Tdt CTC 110m EN" etc. are recognized
|
|
150
|
-
std::string modelDirLower = model_detect::ToLower(modelDir);
|
|
151
|
-
bool isLikelyNemo = modelDirLower.find("nemo") != std::string::npos ||
|
|
152
|
-
modelDirLower.find("parakeet") != std::string::npos;
|
|
153
|
-
bool isLikelyTdt = modelDirLower.find("tdt") != std::string::npos;
|
|
154
|
-
bool isLikelyWenetCtc = modelDirLower.find("wenet") != std::string::npos;
|
|
155
|
-
bool isLikelySenseVoice = modelDirLower.find("sense") != std::string::npos ||
|
|
156
|
-
modelDirLower.find("sensevoice") != std::string::npos;
|
|
157
|
-
bool isLikelyFunAsrNano = modelDirLower.find("funasr") != std::string::npos ||
|
|
158
|
-
modelDirLower.find("funasr-nano") != std::string::npos;
|
|
159
|
-
bool isLikelyMoonshine = modelDirLower.find("moonshine") != std::string::npos;
|
|
160
|
-
bool isLikelyDolphin = modelDirLower.find("dolphin") != std::string::npos;
|
|
161
|
-
bool isLikelyFireRedAsr = modelDirLower.find("fire_red") != std::string::npos ||
|
|
162
|
-
modelDirLower.find("fire-red") != std::string::npos;
|
|
163
|
-
bool isLikelyCanary = modelDirLower.find("canary") != std::string::npos;
|
|
164
|
-
bool isLikelyOmnilingual = modelDirLower.find("omnilingual") != std::string::npos;
|
|
165
|
-
bool isLikelyMedAsr = modelDirLower.find("medasr") != std::string::npos;
|
|
166
|
-
bool isLikelyTeleSpeech = modelDirLower.find("telespeech") != std::string::npos;
|
|
167
|
-
// Tone CTC: match "tone" only as standalone word (not e.g. "cantonese"); also accept "t-one" / "t_one"
|
|
168
|
-
bool isLikelyToneCtc = modelDirLower.find("t-one") != std::string::npos ||
|
|
169
|
-
modelDirLower.find("t_one") != std::string::npos ||
|
|
170
|
-
model_detect::ContainsWord(modelDirLower, "tone");
|
|
171
|
-
|
|
172
|
-
bool hasMoonshine = !moonshinePreprocessor.empty() && !moonshineUncachedDecoder.empty() &&
|
|
173
|
-
!moonshineCachedDecoder.empty() && !moonshineEncoder.empty();
|
|
174
|
-
bool hasDolphin = isLikelyDolphin && !ctcModelPath.empty();
|
|
175
|
-
bool hasFireRedAsr = hasTransducer && isLikelyFireRedAsr;
|
|
176
|
-
// Canary (NeMo Canary) uses encoder + decoder without joiner; same file pattern as Whisper but path contains "canary"
|
|
177
|
-
bool hasCanary = hasWhisperEncoder && hasWhisperDecoder && joinerPath.empty() && isLikelyCanary;
|
|
178
|
-
bool hasOmnilingual = !ctcModelPath.empty() && isLikelyOmnilingual;
|
|
179
|
-
bool hasMedAsr = !ctcModelPath.empty() && isLikelyMedAsr;
|
|
180
|
-
bool hasTeleSpeechCtc = (!ctcModelPath.empty() || !paraformerModelPath.empty()) && isLikelyTeleSpeech;
|
|
181
|
-
bool hasToneCtc = !ctcModelPath.empty() && isLikelyToneCtc;
|
|
182
|
-
|
|
183
|
-
if (hasTransducer) {
|
|
184
|
-
if (isLikelyNemo || isLikelyTdt) {
|
|
185
|
-
result.detectedModels.push_back({"nemo_transducer", modelDir});
|
|
186
|
-
} else {
|
|
187
|
-
result.detectedModels.push_back({"transducer", modelDir});
|
|
669
|
+
result.selectedKind = ResolveSttKind(modelType, cap, hints, candidate, modelDir, result.error);
|
|
670
|
+
if (result.selectedKind == SttModelKind::kUnknown) {
|
|
671
|
+
if (IsHardwareSpecificModelDir(modelDir)) {
|
|
672
|
+
result.ok = false;
|
|
673
|
+
result.isHardwareSpecificUnsupported = true;
|
|
674
|
+
result.error = kHardwareSpecificUnsupportedMessage;
|
|
675
|
+
LOGE("%s", result.error.c_str());
|
|
676
|
+
return result;
|
|
188
677
|
}
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
if (isLikelyNemo) {
|
|
193
|
-
result.detectedModels.push_back({"nemo_ctc", modelDir});
|
|
194
|
-
} else if (isLikelyWenetCtc) {
|
|
195
|
-
result.detectedModels.push_back({"wenet_ctc", modelDir});
|
|
196
|
-
} else if (isLikelySenseVoice) {
|
|
197
|
-
result.detectedModels.push_back({"sense_voice", modelDir});
|
|
198
|
-
} else {
|
|
199
|
-
result.detectedModels.push_back({"ctc", modelDir});
|
|
678
|
+
if (!result.error.empty()) {
|
|
679
|
+
LOGE("%s", result.error.c_str());
|
|
680
|
+
return result;
|
|
200
681
|
}
|
|
201
|
-
|
|
202
|
-
|
|
682
|
+
result.error = "No compatible model type detected in " + modelDir;
|
|
683
|
+
LOGE("%s", result.error.c_str());
|
|
684
|
+
return result;
|
|
203
685
|
}
|
|
204
686
|
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
687
|
+
LOGI("DetectSttModel: selected kind=%d (%s)", static_cast<int>(result.selectedKind), KindToName(result.selectedKind));
|
|
688
|
+
result.tokensRequired = (result.selectedKind != SttModelKind::kFunAsrNano);
|
|
689
|
+
ApplyPathsForSttKind(result.selectedKind, candidate, result.paths);
|
|
208
690
|
|
|
209
|
-
if (
|
|
210
|
-
result.
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
if (hasDolphin) {
|
|
216
|
-
result.detectedModels.push_back({"dolphin", modelDir});
|
|
217
|
-
}
|
|
218
|
-
if (hasFireRedAsr) {
|
|
219
|
-
result.detectedModels.push_back({"fire_red_asr", modelDir});
|
|
220
|
-
}
|
|
221
|
-
if (hasCanary) {
|
|
222
|
-
result.detectedModels.push_back({"canary", modelDir});
|
|
223
|
-
}
|
|
224
|
-
if (hasOmnilingual) {
|
|
225
|
-
result.detectedModels.push_back({"omnilingual", modelDir});
|
|
691
|
+
if (!candidate.tokens.empty() && FileExists(candidate.tokens)) {
|
|
692
|
+
result.paths.tokens = candidate.tokens;
|
|
693
|
+
} else if (result.tokensRequired) {
|
|
694
|
+
result.error = "Tokens file not found in " + modelDir;
|
|
695
|
+
LOGE("%s", result.error.c_str());
|
|
696
|
+
return result;
|
|
226
697
|
}
|
|
227
|
-
if (
|
|
228
|
-
result.
|
|
698
|
+
if (!candidate.bpeVocab.empty() && FileExists(candidate.bpeVocab)) {
|
|
699
|
+
result.paths.bpeVocab = candidate.bpeVocab;
|
|
229
700
|
}
|
|
230
|
-
|
|
231
|
-
|
|
701
|
+
|
|
702
|
+
// Log paths actually set for the selected kind (so we can verify nothing is missing).
|
|
703
|
+
switch (result.selectedKind) {
|
|
704
|
+
case SttModelKind::kTransducer:
|
|
705
|
+
case SttModelKind::kNemoTransducer:
|
|
706
|
+
LOGI("DetectSttModel: paths set encoder=%s decoder=%s joiner=%s",
|
|
707
|
+
EmptyOrPath(result.paths.encoder), EmptyOrPath(result.paths.decoder), EmptyOrPath(result.paths.joiner));
|
|
708
|
+
break;
|
|
709
|
+
case SttModelKind::kParaformer:
|
|
710
|
+
LOGI("DetectSttModel: paths set paraformerModel=%s", EmptyOrPath(result.paths.paraformerModel));
|
|
711
|
+
break;
|
|
712
|
+
case SttModelKind::kWhisper:
|
|
713
|
+
LOGI("DetectSttModel: paths set whisperEncoder=%s whisperDecoder=%s",
|
|
714
|
+
EmptyOrPath(result.paths.whisperEncoder), EmptyOrPath(result.paths.whisperDecoder));
|
|
715
|
+
break;
|
|
716
|
+
case SttModelKind::kMoonshine:
|
|
717
|
+
LOGI("DetectSttModel: paths set moonshine preprocessor=%s encoder=%s uncachedDecoder=%s cachedDecoder=%s",
|
|
718
|
+
EmptyOrPath(result.paths.moonshinePreprocessor), EmptyOrPath(result.paths.moonshineEncoder),
|
|
719
|
+
EmptyOrPath(result.paths.moonshineUncachedDecoder), EmptyOrPath(result.paths.moonshineCachedDecoder));
|
|
720
|
+
break;
|
|
721
|
+
case SttModelKind::kMoonshineV2:
|
|
722
|
+
LOGI("DetectSttModel: paths set moonshine_v2 encoder=%s mergedDecoder=%s",
|
|
723
|
+
EmptyOrPath(result.paths.moonshineEncoder), EmptyOrPath(result.paths.moonshineMergedDecoder));
|
|
724
|
+
break;
|
|
725
|
+
case SttModelKind::kNemoCtc:
|
|
726
|
+
case SttModelKind::kWenetCtc:
|
|
727
|
+
case SttModelKind::kSenseVoice:
|
|
728
|
+
case SttModelKind::kZipformerCtc:
|
|
729
|
+
case SttModelKind::kToneCtc:
|
|
730
|
+
LOGI("DetectSttModel: paths set ctcModel=%s", EmptyOrPath(result.paths.ctcModel));
|
|
731
|
+
break;
|
|
732
|
+
case SttModelKind::kFireRedAsr:
|
|
733
|
+
LOGI("DetectSttModel: paths set fireRedEncoder=%s fireRedDecoder=%s",
|
|
734
|
+
EmptyOrPath(result.paths.fireRedEncoder), EmptyOrPath(result.paths.fireRedDecoder));
|
|
735
|
+
break;
|
|
736
|
+
case SttModelKind::kFunAsrNano:
|
|
737
|
+
LOGI("DetectSttModel: paths set funasr adaptor=%s llm=%s embedding=%s tokenizer=%s",
|
|
738
|
+
EmptyOrPath(result.paths.funasrEncoderAdaptor), EmptyOrPath(result.paths.funasrLLM),
|
|
739
|
+
EmptyOrPath(result.paths.funasrEmbedding), EmptyOrPath(result.paths.funasrTokenizer));
|
|
740
|
+
break;
|
|
741
|
+
default:
|
|
742
|
+
break;
|
|
232
743
|
}
|
|
233
|
-
|
|
234
|
-
|
|
744
|
+
LOGI("DetectSttModel: tokens=%s (required=%d)", EmptyOrPath(result.paths.tokens), (int)result.tokensRequired);
|
|
745
|
+
LOGI("DetectSttModel: detection OK for %s", modelDir.c_str());
|
|
746
|
+
result.ok = true;
|
|
747
|
+
return result;
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
// Test-only: used by host-side model_detect_test; not used in production (Android/iOS use DetectSttModel).
|
|
751
|
+
SttDetectResult DetectSttModelFromFileList(
|
|
752
|
+
const std::vector<model_detect::FileEntry>& files,
|
|
753
|
+
const std::string& modelDir,
|
|
754
|
+
const std::optional<bool>& preferInt8,
|
|
755
|
+
const std::optional<std::string>& modelType
|
|
756
|
+
) {
|
|
757
|
+
using namespace model_detect;
|
|
758
|
+
|
|
759
|
+
SttDetectResult result;
|
|
760
|
+
const int kMaxSearchDepth = 4;
|
|
761
|
+
|
|
762
|
+
if (modelDir.empty()) {
|
|
763
|
+
result.error = "Model directory is empty";
|
|
764
|
+
return result;
|
|
235
765
|
}
|
|
236
766
|
|
|
237
|
-
|
|
767
|
+
SttCandidatePaths candidate = GatherSttCandidatePaths(files, modelDir, kMaxSearchDepth, preferInt8);
|
|
768
|
+
SttPathHints hints = GetSttPathHints(modelDir);
|
|
769
|
+
SttCapabilities cap = ComputeSttCapabilities(candidate, hints);
|
|
238
770
|
|
|
239
|
-
|
|
240
|
-
selected = ParseSttModelType(modelType.value());
|
|
241
|
-
if (selected == SttModelKind::kUnknown) {
|
|
242
|
-
result.error = "Unknown model type: " + modelType.value();
|
|
243
|
-
return result;
|
|
244
|
-
}
|
|
771
|
+
CollectDetectedModels(result.detectedModels, cap, hints, candidate, modelDir);
|
|
245
772
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
result.error =
|
|
252
|
-
return result;
|
|
253
|
-
}
|
|
254
|
-
if (selected == SttModelKind::kParaformer && paraformerModelPath.empty()) {
|
|
255
|
-
result.error = "Paraformer model requested but model file not found in " + modelDir;
|
|
256
|
-
return result;
|
|
257
|
-
}
|
|
258
|
-
if ((selected == SttModelKind::kNemoCtc || selected == SttModelKind::kWenetCtc ||
|
|
259
|
-
selected == SttModelKind::kSenseVoice || selected == SttModelKind::kZipformerCtc ||
|
|
260
|
-
selected == SttModelKind::kToneCtc) &&
|
|
261
|
-
ctcModelPath.empty()) {
|
|
262
|
-
result.error = "CTC model requested but model file not found in " + modelDir;
|
|
773
|
+
result.selectedKind = ResolveSttKind(modelType, cap, hints, candidate, modelDir, result.error);
|
|
774
|
+
if (result.selectedKind == SttModelKind::kUnknown) {
|
|
775
|
+
if (IsHardwareSpecificModelDir(modelDir)) {
|
|
776
|
+
result.ok = false;
|
|
777
|
+
result.isHardwareSpecificUnsupported = true;
|
|
778
|
+
result.error = kHardwareSpecificUnsupportedMessage;
|
|
263
779
|
return result;
|
|
264
780
|
}
|
|
265
|
-
if (
|
|
266
|
-
result.error = "
|
|
267
|
-
|
|
268
|
-
}
|
|
269
|
-
if (selected == SttModelKind::kFunAsrNano && !hasFunAsrNano) {
|
|
270
|
-
result.error = "FunASR Nano model requested but required files not found in " + modelDir;
|
|
271
|
-
return result;
|
|
272
|
-
}
|
|
273
|
-
if (selected == SttModelKind::kMoonshine && !hasMoonshine) {
|
|
274
|
-
result.error = "Moonshine model requested but preprocess/encode/uncached_decode/cached_decode not found in " + modelDir;
|
|
275
|
-
return result;
|
|
276
|
-
}
|
|
277
|
-
if (selected == SttModelKind::kDolphin && !hasDolphin) {
|
|
278
|
-
result.error = "Dolphin model requested but model not found in " + modelDir;
|
|
279
|
-
return result;
|
|
280
|
-
}
|
|
281
|
-
if (selected == SttModelKind::kFireRedAsr && !hasFireRedAsr) {
|
|
282
|
-
result.error = "FireRed ASR model requested but encoder/decoder not found in " + modelDir;
|
|
283
|
-
return result;
|
|
284
|
-
}
|
|
285
|
-
if (selected == SttModelKind::kCanary && !hasCanary) {
|
|
286
|
-
result.error = "Canary model requested but encoder/decoder not found in " + modelDir;
|
|
287
|
-
return result;
|
|
288
|
-
}
|
|
289
|
-
if (selected == SttModelKind::kOmnilingual && !hasOmnilingual) {
|
|
290
|
-
result.error = "Omnilingual model requested but model not found in " + modelDir;
|
|
291
|
-
return result;
|
|
292
|
-
}
|
|
293
|
-
if (selected == SttModelKind::kMedAsr && !hasMedAsr) {
|
|
294
|
-
result.error = "MedASR model requested but model not found in " + modelDir;
|
|
295
|
-
return result;
|
|
296
|
-
}
|
|
297
|
-
if (selected == SttModelKind::kTeleSpeechCtc && !hasTeleSpeechCtc) {
|
|
298
|
-
result.error = "TeleSpeech CTC model requested but model not found in " + modelDir;
|
|
299
|
-
return result;
|
|
300
|
-
}
|
|
301
|
-
if (selected == SttModelKind::kToneCtc && !hasToneCtc) {
|
|
302
|
-
result.error = "Tone CTC model requested but path does not contain 'tone' (as a word), 't-one', or 't_one' (e.g. sherpa-onnx-streaming-t-one-*) in " + modelDir;
|
|
303
|
-
return result;
|
|
304
|
-
}
|
|
305
|
-
} else {
|
|
306
|
-
if (hasTransducer) {
|
|
307
|
-
selected = (isLikelyNemo || isLikelyTdt) ? SttModelKind::kNemoTransducer : SttModelKind::kTransducer;
|
|
308
|
-
} else if (!ctcModelPath.empty() && (isLikelyNemo || isLikelyWenetCtc || isLikelySenseVoice)) {
|
|
309
|
-
if (isLikelyNemo) {
|
|
310
|
-
selected = SttModelKind::kNemoCtc;
|
|
311
|
-
} else if (isLikelyWenetCtc) {
|
|
312
|
-
selected = SttModelKind::kWenetCtc;
|
|
313
|
-
} else {
|
|
314
|
-
selected = SttModelKind::kSenseVoice;
|
|
315
|
-
}
|
|
316
|
-
} else if (hasFunAsrNano && isLikelyFunAsrNano) {
|
|
317
|
-
selected = SttModelKind::kFunAsrNano;
|
|
318
|
-
} else if (!paraformerModelPath.empty()) {
|
|
319
|
-
selected = SttModelKind::kParaformer;
|
|
320
|
-
} else if (hasCanary) {
|
|
321
|
-
selected = SttModelKind::kCanary;
|
|
322
|
-
} else if (hasFireRedAsr) {
|
|
323
|
-
selected = SttModelKind::kFireRedAsr;
|
|
324
|
-
} else if (hasWhisper) {
|
|
325
|
-
selected = SttModelKind::kWhisper;
|
|
326
|
-
} else if (hasFunAsrNano) {
|
|
327
|
-
selected = SttModelKind::kFunAsrNano;
|
|
328
|
-
} else if (hasMoonshine && isLikelyMoonshine) {
|
|
329
|
-
selected = SttModelKind::kMoonshine;
|
|
330
|
-
} else if (hasDolphin) {
|
|
331
|
-
selected = SttModelKind::kDolphin;
|
|
332
|
-
} else if (hasOmnilingual) {
|
|
333
|
-
selected = SttModelKind::kOmnilingual;
|
|
334
|
-
} else if (hasMedAsr) {
|
|
335
|
-
selected = SttModelKind::kMedAsr;
|
|
336
|
-
} else if (hasTeleSpeechCtc) {
|
|
337
|
-
selected = SttModelKind::kTeleSpeechCtc;
|
|
338
|
-
} else if (hasToneCtc) {
|
|
339
|
-
selected = SttModelKind::kToneCtc;
|
|
340
|
-
} else if (!ctcModelPath.empty()) {
|
|
341
|
-
selected = SttModelKind::kZipformerCtc;
|
|
342
|
-
}
|
|
343
|
-
}
|
|
344
|
-
|
|
345
|
-
if (selected == SttModelKind::kUnknown) {
|
|
346
|
-
result.error = "No compatible model type detected in " + modelDir;
|
|
347
|
-
LOGE("%s", result.error.c_str());
|
|
781
|
+
if (result.error.empty())
|
|
782
|
+
result.error = "No compatible model type detected in " + modelDir;
|
|
783
|
+
result.ok = false;
|
|
348
784
|
return result;
|
|
349
785
|
}
|
|
350
786
|
|
|
351
|
-
|
|
352
|
-
result.selectedKind
|
|
353
|
-
// sherpa-onnx's OfflineModelConfig::Validate() requires tokens for ALL models
|
|
354
|
-
// except FunASR-nano (which uses its own tokenizer directory).
|
|
355
|
-
// Whisper models also need tokens.txt despite seeming self-contained.
|
|
356
|
-
result.tokensRequired = (selected != SttModelKind::kFunAsrNano);
|
|
357
|
-
|
|
358
|
-
if (selected == SttModelKind::kTransducer || selected == SttModelKind::kNemoTransducer) {
|
|
359
|
-
result.paths.encoder = encoderPath;
|
|
360
|
-
result.paths.decoder = decoderPath;
|
|
361
|
-
result.paths.joiner = joinerPath;
|
|
362
|
-
} else if (selected == SttModelKind::kParaformer) {
|
|
363
|
-
result.paths.paraformerModel = paraformerModelPath;
|
|
364
|
-
} else if (selected == SttModelKind::kNemoCtc || selected == SttModelKind::kWenetCtc ||
|
|
365
|
-
selected == SttModelKind::kSenseVoice || selected == SttModelKind::kZipformerCtc ||
|
|
366
|
-
selected == SttModelKind::kToneCtc) {
|
|
367
|
-
result.paths.ctcModel = ctcModelPath;
|
|
368
|
-
} else if (selected == SttModelKind::kWhisper) {
|
|
369
|
-
result.paths.whisperEncoder = encoderPath;
|
|
370
|
-
result.paths.whisperDecoder = decoderPath;
|
|
371
|
-
} else if (selected == SttModelKind::kFunAsrNano) {
|
|
372
|
-
result.paths.funasrEncoderAdaptor = funasrEncoderAdaptor;
|
|
373
|
-
result.paths.funasrLLM = funasrLLM;
|
|
374
|
-
result.paths.funasrEmbedding = funasrEmbedding;
|
|
375
|
-
// FunASR Nano C++ expects tokenizer directory (e.g. .../Qwen3-0.6B), not path to vocab.json
|
|
376
|
-
result.paths.funasrTokenizer = funasrTokenizerDir;
|
|
377
|
-
} else if (selected == SttModelKind::kMoonshine) {
|
|
378
|
-
result.paths.moonshinePreprocessor = moonshinePreprocessor;
|
|
379
|
-
result.paths.moonshineEncoder = moonshineEncoder;
|
|
380
|
-
result.paths.moonshineUncachedDecoder = moonshineUncachedDecoder;
|
|
381
|
-
result.paths.moonshineCachedDecoder = moonshineCachedDecoder;
|
|
382
|
-
} else if (selected == SttModelKind::kDolphin) {
|
|
383
|
-
result.paths.dolphinModel = ctcModelPath.empty() ? paraformerModelPath : ctcModelPath;
|
|
384
|
-
} else if (selected == SttModelKind::kFireRedAsr) {
|
|
385
|
-
result.paths.fireRedEncoder = encoderPath;
|
|
386
|
-
result.paths.fireRedDecoder = decoderPath;
|
|
387
|
-
} else if (selected == SttModelKind::kCanary) {
|
|
388
|
-
result.paths.canaryEncoder = encoderPath;
|
|
389
|
-
result.paths.canaryDecoder = decoderPath;
|
|
390
|
-
} else if (selected == SttModelKind::kOmnilingual) {
|
|
391
|
-
result.paths.omnilingualModel = ctcModelPath;
|
|
392
|
-
} else if (selected == SttModelKind::kMedAsr) {
|
|
393
|
-
result.paths.medasrModel = ctcModelPath;
|
|
394
|
-
} else if (selected == SttModelKind::kTeleSpeechCtc) {
|
|
395
|
-
result.paths.telespeechCtcModel = ctcModelPath.empty() ? paraformerModelPath : ctcModelPath;
|
|
396
|
-
}
|
|
787
|
+
result.tokensRequired = (result.selectedKind != SttModelKind::kFunAsrNano);
|
|
788
|
+
ApplyPathsForSttKind(result.selectedKind, candidate, result.paths);
|
|
397
789
|
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
790
|
+
result.paths.tokens = candidate.tokens;
|
|
791
|
+
result.paths.bpeVocab = candidate.bpeVocab;
|
|
792
|
+
|
|
793
|
+
if (result.tokensRequired && candidate.tokens.empty()) {
|
|
401
794
|
result.error = "Tokens file not found in " + modelDir;
|
|
402
|
-
|
|
795
|
+
result.ok = false;
|
|
403
796
|
return result;
|
|
404
797
|
}
|
|
405
798
|
|
|
406
|
-
if (!bpeVocabPath.empty() && FileExists(bpeVocabPath)) {
|
|
407
|
-
result.paths.bpeVocab = bpeVocabPath;
|
|
408
|
-
}
|
|
409
|
-
|
|
410
|
-
LOGI("DetectSttModel: detection OK for %s — tokens=%s",
|
|
411
|
-
modelDir.c_str(), result.paths.tokens.c_str());
|
|
412
799
|
result.ok = true;
|
|
413
800
|
return result;
|
|
414
801
|
}
|