ailia-speech 1.3.0.5__tar.gz → 1.3.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ailia-speech might be problematic. Click here for more details.
- ailia_speech-1.3.2.1/PKG-INFO +123 -0
- ailia_speech-1.3.2.1/README.md +110 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech/__init__.py +50 -0
- ailia_speech-1.3.2.1/ailia_speech/linux/arm64-v8a/libailia_speech.so +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech/linux/x64/libailia_speech.so +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech/mac/libailia_speech.dylib +0 -0
- ailia_speech-1.3.2.1/ailia_speech/windows/x64/ailia_speech.dll +0 -0
- ailia_speech-1.3.2.1/ailia_speech.egg-info/PKG-INFO +123 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/setup.py +1 -1
- ailia_speech-1.3.0.5/PKG-INFO +0 -71
- ailia_speech-1.3.0.5/README.md +0 -58
- ailia_speech-1.3.0.5/ailia_speech/linux/arm64-v8a/libailia_speech.so +0 -0
- ailia_speech-1.3.0.5/ailia_speech/windows/x64/ailia_speech.dll +0 -0
- ailia_speech-1.3.0.5/ailia_speech.egg-info/PKG-INFO +0 -71
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech/LICENSE_AILIA_EN.pdf +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech/LICENSE_AILIA_JA.pdf +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech.egg-info/SOURCES.txt +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech.egg-info/dependency_links.txt +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech.egg-info/requires.txt +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/ailia_speech.egg-info/top_level.txt +0 -0
- {ailia_speech-1.3.0.5 → ailia_speech-1.3.2.1}/setup.cfg +0 -0
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: ailia_speech
|
|
3
|
+
Version: 1.3.2.1
|
|
4
|
+
Summary: ailia AI Speech
|
|
5
|
+
Home-page: https://ailia.jp/
|
|
6
|
+
Author: ax Inc.
|
|
7
|
+
Author-email: contact@axinc.jp
|
|
8
|
+
License: https://ailia.ai/en/license/
|
|
9
|
+
Requires-Python: >3.6
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: ailia
|
|
12
|
+
Requires-Dist: ailia_tokenizer
|
|
13
|
+
|
|
14
|
+
# ailia AI Speech Python API
|
|
15
|
+
|
|
16
|
+
!! CAUTION !!
|
|
17
|
+
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
18
|
+
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
19
|
+
|
|
20
|
+
## About ailia AI Speech
|
|
21
|
+
|
|
22
|
+
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
23
|
+
|
|
24
|
+
## Install from pip
|
|
25
|
+
|
|
26
|
+
You can install the ailia AI Speech free evaluation package with the following command.
|
|
27
|
+
|
|
28
|
+
```
|
|
29
|
+
pip3 install ailia_speech
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Install from package
|
|
33
|
+
|
|
34
|
+
You can install the ailia AI Speech from Package with the following command.
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
python3 bootstrap.py
|
|
38
|
+
pip3 install ./
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Usage
|
|
42
|
+
|
|
43
|
+
### Batch mode
|
|
44
|
+
|
|
45
|
+
In batch mode, the entire audio is transcribed at once.
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
import ailia_speech
|
|
49
|
+
|
|
50
|
+
import librosa
|
|
51
|
+
|
|
52
|
+
import os
|
|
53
|
+
import urllib.request
|
|
54
|
+
|
|
55
|
+
# Load target audio
|
|
56
|
+
input_file_path = "demo.wav"
|
|
57
|
+
if not os.path.exists(input_file_path):
|
|
58
|
+
urllib.request.urlretrieve(
|
|
59
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wav",
|
|
60
|
+
"demo.wav"
|
|
61
|
+
)
|
|
62
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
63
|
+
|
|
64
|
+
# Infer
|
|
65
|
+
speech = ailia_speech.Whisper()
|
|
66
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
67
|
+
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
68
|
+
for text in recognized_text:
|
|
69
|
+
print(text)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Step mode
|
|
73
|
+
|
|
74
|
+
In step mode, the audio is input in chunks and transcribed sequentially.
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
import ailia_speech
|
|
78
|
+
|
|
79
|
+
import librosa
|
|
80
|
+
|
|
81
|
+
import os
|
|
82
|
+
import urllib.request
|
|
83
|
+
|
|
84
|
+
# Load target audio
|
|
85
|
+
input_file_path = "demo.wav"
|
|
86
|
+
if not os.path.exists(input_file_path):
|
|
87
|
+
urllib.request.urlretrieve(
|
|
88
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
89
|
+
"demo.wav"
|
|
90
|
+
)
|
|
91
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
92
|
+
|
|
93
|
+
# Infer
|
|
94
|
+
speech = ailia_speech.Whisper()
|
|
95
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
96
|
+
speech.set_silent_threshold(silent_threshold = 0.5, speech_sec = 1.0, no_speech_sec = 0.5)
|
|
97
|
+
for i in range(0, audio_waveform.shape[0], sampling_rate):
|
|
98
|
+
complete = False
|
|
99
|
+
if i + sampling_rate >= audio_waveform.shape[0]:
|
|
100
|
+
complete = True
|
|
101
|
+
recognized_text = speech.transcribe_step(audio_waveform[i:min(audio_waveform.shape[0], i + sampling_rate)], sampling_rate, complete)
|
|
102
|
+
for text in recognized_text:
|
|
103
|
+
print(text)
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Available model types
|
|
107
|
+
|
|
108
|
+
It is possible to select multiple models according to accuracy and speed. LARGE_V3_TURBO is the most recommended.
|
|
109
|
+
|
|
110
|
+
```
|
|
111
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_TINY
|
|
112
|
+
ilia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_BASE
|
|
113
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL
|
|
114
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM
|
|
115
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE
|
|
116
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
117
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## API specification
|
|
121
|
+
|
|
122
|
+
https://github.com/axinc-ai/ailia-sdk
|
|
123
|
+
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
# ailia AI Speech Python API
|
|
2
|
+
|
|
3
|
+
!! CAUTION !!
|
|
4
|
+
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
5
|
+
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
6
|
+
|
|
7
|
+
## About ailia AI Speech
|
|
8
|
+
|
|
9
|
+
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
10
|
+
|
|
11
|
+
## Install from pip
|
|
12
|
+
|
|
13
|
+
You can install the ailia AI Speech free evaluation package with the following command.
|
|
14
|
+
|
|
15
|
+
```
|
|
16
|
+
pip3 install ailia_speech
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
## Install from package
|
|
20
|
+
|
|
21
|
+
You can install the ailia AI Speech from Package with the following command.
|
|
22
|
+
|
|
23
|
+
```
|
|
24
|
+
python3 bootstrap.py
|
|
25
|
+
pip3 install ./
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
## Usage
|
|
29
|
+
|
|
30
|
+
### Batch mode
|
|
31
|
+
|
|
32
|
+
In batch mode, the entire audio is transcribed at once.
|
|
33
|
+
|
|
34
|
+
```python
|
|
35
|
+
import ailia_speech
|
|
36
|
+
|
|
37
|
+
import librosa
|
|
38
|
+
|
|
39
|
+
import os
|
|
40
|
+
import urllib.request
|
|
41
|
+
|
|
42
|
+
# Load target audio
|
|
43
|
+
input_file_path = "demo.wav"
|
|
44
|
+
if not os.path.exists(input_file_path):
|
|
45
|
+
urllib.request.urlretrieve(
|
|
46
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wav",
|
|
47
|
+
"demo.wav"
|
|
48
|
+
)
|
|
49
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
50
|
+
|
|
51
|
+
# Infer
|
|
52
|
+
speech = ailia_speech.Whisper()
|
|
53
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
54
|
+
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
55
|
+
for text in recognized_text:
|
|
56
|
+
print(text)
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Step mode
|
|
60
|
+
|
|
61
|
+
In step mode, the audio is input in chunks and transcribed sequentially.
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
import ailia_speech
|
|
65
|
+
|
|
66
|
+
import librosa
|
|
67
|
+
|
|
68
|
+
import os
|
|
69
|
+
import urllib.request
|
|
70
|
+
|
|
71
|
+
# Load target audio
|
|
72
|
+
input_file_path = "demo.wav"
|
|
73
|
+
if not os.path.exists(input_file_path):
|
|
74
|
+
urllib.request.urlretrieve(
|
|
75
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
76
|
+
"demo.wav"
|
|
77
|
+
)
|
|
78
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
79
|
+
|
|
80
|
+
# Infer
|
|
81
|
+
speech = ailia_speech.Whisper()
|
|
82
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
83
|
+
speech.set_silent_threshold(silent_threshold = 0.5, speech_sec = 1.0, no_speech_sec = 0.5)
|
|
84
|
+
for i in range(0, audio_waveform.shape[0], sampling_rate):
|
|
85
|
+
complete = False
|
|
86
|
+
if i + sampling_rate >= audio_waveform.shape[0]:
|
|
87
|
+
complete = True
|
|
88
|
+
recognized_text = speech.transcribe_step(audio_waveform[i:min(audio_waveform.shape[0], i + sampling_rate)], sampling_rate, complete)
|
|
89
|
+
for text in recognized_text:
|
|
90
|
+
print(text)
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Available model types
|
|
94
|
+
|
|
95
|
+
It is possible to select multiple models according to accuracy and speed. LARGE_V3_TURBO is the most recommended.
|
|
96
|
+
|
|
97
|
+
```
|
|
98
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_TINY
|
|
99
|
+
ilia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_BASE
|
|
100
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL
|
|
101
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM
|
|
102
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE
|
|
103
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
104
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
## API specification
|
|
108
|
+
|
|
109
|
+
https://github.com/axinc-ai/ailia-sdk
|
|
110
|
+
|
|
@@ -67,6 +67,7 @@ AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL = (2)
|
|
|
67
67
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM = (3)
|
|
68
68
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE = (4)
|
|
69
69
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3 = (5)
|
|
70
|
+
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO = (6)
|
|
70
71
|
|
|
71
72
|
AILIA_SPEECH_TASK_TRANSCRIBE = (0)
|
|
72
73
|
AILIA_SPEECH_TASK_TRANSLATE = (1)
|
|
@@ -260,6 +261,9 @@ dll.ailiaSpeechSetIntermediateCallback.argtypes = (c_void_p, AILIA_SPEECH_USER_A
|
|
|
260
261
|
dll.ailiaSpeechSetLanguage.restype = c_int
|
|
261
262
|
dll.ailiaSpeechSetLanguage.argtypes = (c_void_p, c_char_p)
|
|
262
263
|
|
|
264
|
+
dll.ailiaSpeechSetSilentThreshold.restype = c_int
|
|
265
|
+
dll.ailiaSpeechSetSilentThreshold.argtypes = (c_void_p, c_float, c_float, c_float)
|
|
266
|
+
|
|
263
267
|
# ==============================================================================
|
|
264
268
|
# model download
|
|
265
269
|
# ==============================================================================
|
|
@@ -425,6 +429,12 @@ class Whisper(AiliaSpeechModel):
|
|
|
425
429
|
decoder_path = "decoder_large_v3_fix_kv_cache.onnx"
|
|
426
430
|
encoder_pb_path = "encoder_large_v3_weights.pb"
|
|
427
431
|
decoder_pb_path = "decoder_large_v3_fix_kv_cache_weights.pb"
|
|
432
|
+
elif model_type == AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO:
|
|
433
|
+
encoder_path = "encoder_turbo.onnx"
|
|
434
|
+
decoder_path = "decoder_turbo_fix_kv_cache.onnx"
|
|
435
|
+
encoder_pb_path = "encoder_turbo_weights.pb"
|
|
436
|
+
decoder_pb_path = None
|
|
437
|
+
model_type = AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
428
438
|
self._download_model(model_path, encoder_path, decoder_path, encoder_pb_path, decoder_pb_path)
|
|
429
439
|
self._open_model(model_path + encoder_path, model_path + decoder_path, model_type)
|
|
430
440
|
self._open_vad(model_path + "silero_vad.onnx", AILIA_SPEECH_VAD_TYPE_SILERO)
|
|
@@ -459,6 +469,9 @@ class Whisper(AiliaSpeechModel):
|
|
|
459
469
|
else:
|
|
460
470
|
self._check(dll.ailiaSpeechOpenVadFileA(self._instance, p1, vad_type))
|
|
461
471
|
|
|
472
|
+
def set_silent_threshold(self, silent_threshold, speech_sec, no_speech_sec):
|
|
473
|
+
self._check(dll.ailiaSpeechSetSilentThreshold(self._instance, silent_threshold, speech_sec, no_speech_sec))
|
|
474
|
+
|
|
462
475
|
def transcribe(self, audio_waveform, sampling_rate, lang = None):
|
|
463
476
|
if len(audio_waveform.shape) == 1:
|
|
464
477
|
channels = 1
|
|
@@ -494,6 +507,43 @@ class Whisper(AiliaSpeechModel):
|
|
|
494
507
|
|
|
495
508
|
self._check(dll.ailiaSpeechResetTranscribeState(self._instance))
|
|
496
509
|
|
|
510
|
+
def transcribe_step(self, audio_waveform, sampling_rate, complete, lang = None):
|
|
511
|
+
if len(audio_waveform.shape) == 1:
|
|
512
|
+
channels = 1
|
|
513
|
+
elif len(audio_waveform.shape) == 2:
|
|
514
|
+
channels = audio_waveform.shape[0]
|
|
515
|
+
audio_waveform = numpy.transpose(audio_waveform, (1, 0)).flatten()
|
|
516
|
+
else:
|
|
517
|
+
raise AiliaSpeechError(f"audio_waveform must be 1 channel or 2 channel", -1)
|
|
518
|
+
|
|
519
|
+
audio_waveform = numpy.ascontiguousarray(audio_waveform.astype(numpy.float32))
|
|
520
|
+
|
|
521
|
+
if lang is not None:
|
|
522
|
+
self._check(dll.ailiaSpeechSetLanguage(self._instance, self._string_buffer(lang)))
|
|
523
|
+
|
|
524
|
+
self._check(dll.ailiaSpeechPushInputData(self._instance, audio_waveform, channels, audio_waveform.shape[0] // channels, sampling_rate))
|
|
525
|
+
if complete:
|
|
526
|
+
self._check(dll.ailiaSpeechFinalizeInputData(self._instance))
|
|
527
|
+
|
|
528
|
+
while True:
|
|
529
|
+
buffered = ctypes.c_uint(0)
|
|
530
|
+
self._check(dll.ailiaSpeechBuffered(self._instance, ctypes.byref(buffered)))
|
|
531
|
+
if buffered.value == 0:
|
|
532
|
+
break
|
|
533
|
+
|
|
534
|
+
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
535
|
+
|
|
536
|
+
count = ctypes.c_uint(0)
|
|
537
|
+
self._check(dll.ailiaSpeechGetTextCount(self._instance, ctypes.byref(count)))
|
|
538
|
+
results = []
|
|
539
|
+
for i in range(count.value):
|
|
540
|
+
text = AILIASpeechText()
|
|
541
|
+
self._check(dll.ailiaSpeechGetText(self._instance, ctypes.byref(text), AILIA_SPEECH_TEXT_VERSION, i))
|
|
542
|
+
yield {"text" : text.text.decode(), "time_stamp_begin" : text.time_stamp_begin, "time_stamp_end" : text.time_stamp_end, "person_id" : text.person_id, "language" : text.language.decode(), "confidence" : text.confidence}
|
|
543
|
+
|
|
544
|
+
if complete:
|
|
545
|
+
self._check(dll.ailiaSpeechResetTranscribeState(self._instance))
|
|
546
|
+
|
|
497
547
|
def __del__(self):
|
|
498
548
|
if self._instance:
|
|
499
549
|
dll.ailiaSpeechDestroy(cast(self._instance, c_void_p))
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: ailia_speech
|
|
3
|
+
Version: 1.3.2.1
|
|
4
|
+
Summary: ailia AI Speech
|
|
5
|
+
Home-page: https://ailia.jp/
|
|
6
|
+
Author: ax Inc.
|
|
7
|
+
Author-email: contact@axinc.jp
|
|
8
|
+
License: https://ailia.ai/en/license/
|
|
9
|
+
Requires-Python: >3.6
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: ailia
|
|
12
|
+
Requires-Dist: ailia_tokenizer
|
|
13
|
+
|
|
14
|
+
# ailia AI Speech Python API
|
|
15
|
+
|
|
16
|
+
!! CAUTION !!
|
|
17
|
+
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
18
|
+
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
19
|
+
|
|
20
|
+
## About ailia AI Speech
|
|
21
|
+
|
|
22
|
+
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
23
|
+
|
|
24
|
+
## Install from pip
|
|
25
|
+
|
|
26
|
+
You can install the ailia AI Speech free evaluation package with the following command.
|
|
27
|
+
|
|
28
|
+
```
|
|
29
|
+
pip3 install ailia_speech
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Install from package
|
|
33
|
+
|
|
34
|
+
You can install the ailia AI Speech from Package with the following command.
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
python3 bootstrap.py
|
|
38
|
+
pip3 install ./
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Usage
|
|
42
|
+
|
|
43
|
+
### Batch mode
|
|
44
|
+
|
|
45
|
+
In batch mode, the entire audio is transcribed at once.
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
import ailia_speech
|
|
49
|
+
|
|
50
|
+
import librosa
|
|
51
|
+
|
|
52
|
+
import os
|
|
53
|
+
import urllib.request
|
|
54
|
+
|
|
55
|
+
# Load target audio
|
|
56
|
+
input_file_path = "demo.wav"
|
|
57
|
+
if not os.path.exists(input_file_path):
|
|
58
|
+
urllib.request.urlretrieve(
|
|
59
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wav",
|
|
60
|
+
"demo.wav"
|
|
61
|
+
)
|
|
62
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
63
|
+
|
|
64
|
+
# Infer
|
|
65
|
+
speech = ailia_speech.Whisper()
|
|
66
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
67
|
+
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
68
|
+
for text in recognized_text:
|
|
69
|
+
print(text)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Step mode
|
|
73
|
+
|
|
74
|
+
In step mode, the audio is input in chunks and transcribed sequentially.
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
import ailia_speech
|
|
78
|
+
|
|
79
|
+
import librosa
|
|
80
|
+
|
|
81
|
+
import os
|
|
82
|
+
import urllib.request
|
|
83
|
+
|
|
84
|
+
# Load target audio
|
|
85
|
+
input_file_path = "demo.wav"
|
|
86
|
+
if not os.path.exists(input_file_path):
|
|
87
|
+
urllib.request.urlretrieve(
|
|
88
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
89
|
+
"demo.wav"
|
|
90
|
+
)
|
|
91
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
92
|
+
|
|
93
|
+
# Infer
|
|
94
|
+
speech = ailia_speech.Whisper()
|
|
95
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
96
|
+
speech.set_silent_threshold(silent_threshold = 0.5, speech_sec = 1.0, no_speech_sec = 0.5)
|
|
97
|
+
for i in range(0, audio_waveform.shape[0], sampling_rate):
|
|
98
|
+
complete = False
|
|
99
|
+
if i + sampling_rate >= audio_waveform.shape[0]:
|
|
100
|
+
complete = True
|
|
101
|
+
recognized_text = speech.transcribe_step(audio_waveform[i:min(audio_waveform.shape[0], i + sampling_rate)], sampling_rate, complete)
|
|
102
|
+
for text in recognized_text:
|
|
103
|
+
print(text)
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Available model types
|
|
107
|
+
|
|
108
|
+
It is possible to select multiple models according to accuracy and speed. LARGE_V3_TURBO is the most recommended.
|
|
109
|
+
|
|
110
|
+
```
|
|
111
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_TINY
|
|
112
|
+
ilia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_BASE
|
|
113
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL
|
|
114
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM
|
|
115
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE
|
|
116
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
117
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## API specification
|
|
121
|
+
|
|
122
|
+
https://github.com/axinc-ai/ailia-sdk
|
|
123
|
+
|
ailia_speech-1.3.0.5/PKG-INFO
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: ailia_speech
|
|
3
|
-
Version: 1.3.0.5
|
|
4
|
-
Summary: ailia AI Speech
|
|
5
|
-
Home-page: https://ailia.jp/
|
|
6
|
-
Author: ax Inc.
|
|
7
|
-
Author-email: contact@axinc.jp
|
|
8
|
-
License: https://ailia.ai/en/license/
|
|
9
|
-
Requires-Python: >3.6
|
|
10
|
-
Description-Content-Type: text/markdown
|
|
11
|
-
Requires-Dist: ailia
|
|
12
|
-
Requires-Dist: ailia_tokenizer
|
|
13
|
-
|
|
14
|
-
# ailia AI Speech Python API
|
|
15
|
-
|
|
16
|
-
!! CAUTION !!
|
|
17
|
-
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
18
|
-
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
19
|
-
|
|
20
|
-
## About ailia AI Speech
|
|
21
|
-
|
|
22
|
-
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
23
|
-
|
|
24
|
-
## Install from pip
|
|
25
|
-
|
|
26
|
-
You can install the ailia AI Speech free evaluation package with the following command.
|
|
27
|
-
|
|
28
|
-
```
|
|
29
|
-
pip3 install ailia_speech
|
|
30
|
-
```
|
|
31
|
-
|
|
32
|
-
## Install from package
|
|
33
|
-
|
|
34
|
-
You can install the ailia AI Speech from Package with the following command.
|
|
35
|
-
|
|
36
|
-
```
|
|
37
|
-
python3 bootstrap.py
|
|
38
|
-
pip3 install ./
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
## Usage
|
|
42
|
-
|
|
43
|
-
```python
|
|
44
|
-
import ailia_speech
|
|
45
|
-
|
|
46
|
-
import librosa
|
|
47
|
-
|
|
48
|
-
import os
|
|
49
|
-
import urllib.request
|
|
50
|
-
|
|
51
|
-
# Load target audio
|
|
52
|
-
input_file_path = "demo.wav"
|
|
53
|
-
if not os.path.exists(input_file_path):
|
|
54
|
-
urllib.request.urlretrieve(
|
|
55
|
-
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
56
|
-
"demo.wav"
|
|
57
|
-
)
|
|
58
|
-
audio_waveform, sampling_rate = librosa.load(input_file_path, mono=True)
|
|
59
|
-
|
|
60
|
-
# Infer
|
|
61
|
-
speech = ailia_speech.Whisper()
|
|
62
|
-
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL)
|
|
63
|
-
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
64
|
-
for text in recognized_text:
|
|
65
|
-
print(text)
|
|
66
|
-
```
|
|
67
|
-
|
|
68
|
-
## API specification
|
|
69
|
-
|
|
70
|
-
https://github.com/axinc-ai/ailia-sdk
|
|
71
|
-
|
ailia_speech-1.3.0.5/README.md
DELETED
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
# ailia AI Speech Python API
|
|
2
|
-
|
|
3
|
-
!! CAUTION !!
|
|
4
|
-
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
5
|
-
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
6
|
-
|
|
7
|
-
## About ailia AI Speech
|
|
8
|
-
|
|
9
|
-
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
10
|
-
|
|
11
|
-
## Install from pip
|
|
12
|
-
|
|
13
|
-
You can install the ailia AI Speech free evaluation package with the following command.
|
|
14
|
-
|
|
15
|
-
```
|
|
16
|
-
pip3 install ailia_speech
|
|
17
|
-
```
|
|
18
|
-
|
|
19
|
-
## Install from package
|
|
20
|
-
|
|
21
|
-
You can install the ailia AI Speech from Package with the following command.
|
|
22
|
-
|
|
23
|
-
```
|
|
24
|
-
python3 bootstrap.py
|
|
25
|
-
pip3 install ./
|
|
26
|
-
```
|
|
27
|
-
|
|
28
|
-
## Usage
|
|
29
|
-
|
|
30
|
-
```python
|
|
31
|
-
import ailia_speech
|
|
32
|
-
|
|
33
|
-
import librosa
|
|
34
|
-
|
|
35
|
-
import os
|
|
36
|
-
import urllib.request
|
|
37
|
-
|
|
38
|
-
# Load target audio
|
|
39
|
-
input_file_path = "demo.wav"
|
|
40
|
-
if not os.path.exists(input_file_path):
|
|
41
|
-
urllib.request.urlretrieve(
|
|
42
|
-
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
43
|
-
"demo.wav"
|
|
44
|
-
)
|
|
45
|
-
audio_waveform, sampling_rate = librosa.load(input_file_path, mono=True)
|
|
46
|
-
|
|
47
|
-
# Infer
|
|
48
|
-
speech = ailia_speech.Whisper()
|
|
49
|
-
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL)
|
|
50
|
-
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
51
|
-
for text in recognized_text:
|
|
52
|
-
print(text)
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
## API specification
|
|
56
|
-
|
|
57
|
-
https://github.com/axinc-ai/ailia-sdk
|
|
58
|
-
|
|
Binary file
|
|
Binary file
|
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: ailia_speech
|
|
3
|
-
Version: 1.3.0.5
|
|
4
|
-
Summary: ailia AI Speech
|
|
5
|
-
Home-page: https://ailia.jp/
|
|
6
|
-
Author: ax Inc.
|
|
7
|
-
Author-email: contact@axinc.jp
|
|
8
|
-
License: https://ailia.ai/en/license/
|
|
9
|
-
Requires-Python: >3.6
|
|
10
|
-
Description-Content-Type: text/markdown
|
|
11
|
-
Requires-Dist: ailia
|
|
12
|
-
Requires-Dist: ailia_tokenizer
|
|
13
|
-
|
|
14
|
-
# ailia AI Speech Python API
|
|
15
|
-
|
|
16
|
-
!! CAUTION !!
|
|
17
|
-
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
18
|
-
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
19
|
-
|
|
20
|
-
## About ailia AI Speech
|
|
21
|
-
|
|
22
|
-
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
23
|
-
|
|
24
|
-
## Install from pip
|
|
25
|
-
|
|
26
|
-
You can install the ailia AI Speech free evaluation package with the following command.
|
|
27
|
-
|
|
28
|
-
```
|
|
29
|
-
pip3 install ailia_speech
|
|
30
|
-
```
|
|
31
|
-
|
|
32
|
-
## Install from package
|
|
33
|
-
|
|
34
|
-
You can install the ailia AI Speech from Package with the following command.
|
|
35
|
-
|
|
36
|
-
```
|
|
37
|
-
python3 bootstrap.py
|
|
38
|
-
pip3 install ./
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
## Usage
|
|
42
|
-
|
|
43
|
-
```python
|
|
44
|
-
import ailia_speech
|
|
45
|
-
|
|
46
|
-
import librosa
|
|
47
|
-
|
|
48
|
-
import os
|
|
49
|
-
import urllib.request
|
|
50
|
-
|
|
51
|
-
# Load target audio
|
|
52
|
-
input_file_path = "demo.wav"
|
|
53
|
-
if not os.path.exists(input_file_path):
|
|
54
|
-
urllib.request.urlretrieve(
|
|
55
|
-
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
56
|
-
"demo.wav"
|
|
57
|
-
)
|
|
58
|
-
audio_waveform, sampling_rate = librosa.load(input_file_path, mono=True)
|
|
59
|
-
|
|
60
|
-
# Infer
|
|
61
|
-
speech = ailia_speech.Whisper()
|
|
62
|
-
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL)
|
|
63
|
-
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
64
|
-
for text in recognized_text:
|
|
65
|
-
print(text)
|
|
66
|
-
```
|
|
67
|
-
|
|
68
|
-
## API specification
|
|
69
|
-
|
|
70
|
-
https://github.com/axinc-ai/ailia-sdk
|
|
71
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|