ailia-speech 1.3.0.4__py3-none-any.whl → 1.3.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ailia-speech might be problematic. Click here for more details.
- ailia_speech/__init__.py +58 -9
- {ailia_speech-1.3.0.4.data → ailia_speech-1.3.1.0.data}/scripts/__init__.py +58 -9
- ailia_speech-1.3.1.0.dist-info/METADATA +122 -0
- {ailia_speech-1.3.0.4.dist-info → ailia_speech-1.3.1.0.dist-info}/RECORD +6 -6
- ailia_speech-1.3.0.4.dist-info/METADATA +0 -70
- {ailia_speech-1.3.0.4.dist-info → ailia_speech-1.3.1.0.dist-info}/WHEEL +0 -0
- {ailia_speech-1.3.0.4.dist-info → ailia_speech-1.3.1.0.dist-info}/top_level.txt +0 -0
ailia_speech/__init__.py
CHANGED
|
@@ -67,6 +67,7 @@ AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL = (2)
|
|
|
67
67
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM = (3)
|
|
68
68
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE = (4)
|
|
69
69
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3 = (5)
|
|
70
|
+
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO = (6)
|
|
70
71
|
|
|
71
72
|
AILIA_SPEECH_TASK_TRANSCRIBE = (0)
|
|
72
73
|
AILIA_SPEECH_TASK_TRANSLATE = (1)
|
|
@@ -425,6 +426,12 @@ class Whisper(AiliaSpeechModel):
|
|
|
425
426
|
decoder_path = "decoder_large_v3_fix_kv_cache.onnx"
|
|
426
427
|
encoder_pb_path = "encoder_large_v3_weights.pb"
|
|
427
428
|
decoder_pb_path = "decoder_large_v3_fix_kv_cache_weights.pb"
|
|
429
|
+
elif model_type == AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO:
|
|
430
|
+
encoder_path = "encoder_turbo.onnx"
|
|
431
|
+
decoder_path = "decoder_turbo_fix_kv_cache.onnx"
|
|
432
|
+
encoder_pb_path = "encoder_turbo_weights.pb"
|
|
433
|
+
decoder_pb_path = None
|
|
434
|
+
model_type = AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
428
435
|
self._download_model(model_path, encoder_path, decoder_path, encoder_pb_path, decoder_pb_path)
|
|
429
436
|
self._open_model(model_path + encoder_path, model_path + decoder_path, model_type)
|
|
430
437
|
self._open_vad(model_path + "silero_vad.onnx", AILIA_SPEECH_VAD_TYPE_SILERO)
|
|
@@ -475,19 +482,61 @@ class Whisper(AiliaSpeechModel):
|
|
|
475
482
|
|
|
476
483
|
self._check(dll.ailiaSpeechPushInputData(self._instance, audio_waveform, channels, audio_waveform.shape[0] // channels, sampling_rate))
|
|
477
484
|
self._check(dll.ailiaSpeechFinalizeInputData(self._instance))
|
|
478
|
-
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
479
485
|
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
486
|
+
while True:
|
|
487
|
+
complete = ctypes.c_uint(0)
|
|
488
|
+
self._check(dll.ailiaSpeechComplete(self._instance, ctypes.byref(complete)))
|
|
489
|
+
if complete.value == 1:
|
|
490
|
+
break
|
|
491
|
+
|
|
492
|
+
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
493
|
+
|
|
494
|
+
count = ctypes.c_uint(0)
|
|
495
|
+
self._check(dll.ailiaSpeechGetTextCount(self._instance, ctypes.byref(count)))
|
|
496
|
+
results = []
|
|
497
|
+
for i in range(count.value):
|
|
498
|
+
text = AILIASpeechText()
|
|
499
|
+
self._check(dll.ailiaSpeechGetText(self._instance, ctypes.byref(text), AILIA_SPEECH_TEXT_VERSION, i))
|
|
500
|
+
yield {"text" : text.text.decode(), "time_stamp_begin" : text.time_stamp_begin, "time_stamp_end" : text.time_stamp_end, "person_id" : text.person_id, "language" : text.language.decode(), "confidence" : text.confidence}
|
|
487
501
|
|
|
488
502
|
self._check(dll.ailiaSpeechResetTranscribeState(self._instance))
|
|
489
503
|
|
|
490
|
-
|
|
504
|
+
def transcribe_step(self, audio_waveform, sampling_rate, complete, lang = None):
|
|
505
|
+
if len(audio_waveform.shape) == 1:
|
|
506
|
+
channels = 1
|
|
507
|
+
elif len(audio_waveform.shape) == 2:
|
|
508
|
+
channels = audio_waveform.shape[0]
|
|
509
|
+
audio_waveform = numpy.transpose(audio_waveform, (1, 0)).flatten()
|
|
510
|
+
else:
|
|
511
|
+
raise AiliaSpeechError(f"audio_waveform must be 1 channel or 2 channel", -1)
|
|
512
|
+
|
|
513
|
+
audio_waveform = numpy.ascontiguousarray(audio_waveform.astype(numpy.float32))
|
|
514
|
+
|
|
515
|
+
if lang is not None:
|
|
516
|
+
self._check(dll.ailiaSpeechSetLanguage(self._instance, self._string_buffer(lang)))
|
|
517
|
+
|
|
518
|
+
self._check(dll.ailiaSpeechPushInputData(self._instance, audio_waveform, channels, audio_waveform.shape[0] // channels, sampling_rate))
|
|
519
|
+
if complete:
|
|
520
|
+
self._check(dll.ailiaSpeechFinalizeInputData(self._instance))
|
|
521
|
+
|
|
522
|
+
while True:
|
|
523
|
+
buffered = ctypes.c_uint(0)
|
|
524
|
+
self._check(dll.ailiaSpeechBuffered(self._instance, ctypes.byref(buffered)))
|
|
525
|
+
if buffered.value == 0:
|
|
526
|
+
break
|
|
527
|
+
|
|
528
|
+
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
529
|
+
|
|
530
|
+
count = ctypes.c_uint(0)
|
|
531
|
+
self._check(dll.ailiaSpeechGetTextCount(self._instance, ctypes.byref(count)))
|
|
532
|
+
results = []
|
|
533
|
+
for i in range(count.value):
|
|
534
|
+
text = AILIASpeechText()
|
|
535
|
+
self._check(dll.ailiaSpeechGetText(self._instance, ctypes.byref(text), AILIA_SPEECH_TEXT_VERSION, i))
|
|
536
|
+
yield {"text" : text.text.decode(), "time_stamp_begin" : text.time_stamp_begin, "time_stamp_end" : text.time_stamp_end, "person_id" : text.person_id, "language" : text.language.decode(), "confidence" : text.confidence}
|
|
537
|
+
|
|
538
|
+
if complete:
|
|
539
|
+
self._check(dll.ailiaSpeechResetTranscribeState(self._instance))
|
|
491
540
|
|
|
492
541
|
def __del__(self):
|
|
493
542
|
if self._instance:
|
|
@@ -67,6 +67,7 @@ AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL = (2)
|
|
|
67
67
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM = (3)
|
|
68
68
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE = (4)
|
|
69
69
|
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3 = (5)
|
|
70
|
+
AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO = (6)
|
|
70
71
|
|
|
71
72
|
AILIA_SPEECH_TASK_TRANSCRIBE = (0)
|
|
72
73
|
AILIA_SPEECH_TASK_TRANSLATE = (1)
|
|
@@ -425,6 +426,12 @@ class Whisper(AiliaSpeechModel):
|
|
|
425
426
|
decoder_path = "decoder_large_v3_fix_kv_cache.onnx"
|
|
426
427
|
encoder_pb_path = "encoder_large_v3_weights.pb"
|
|
427
428
|
decoder_pb_path = "decoder_large_v3_fix_kv_cache_weights.pb"
|
|
429
|
+
elif model_type == AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO:
|
|
430
|
+
encoder_path = "encoder_turbo.onnx"
|
|
431
|
+
decoder_path = "decoder_turbo_fix_kv_cache.onnx"
|
|
432
|
+
encoder_pb_path = "encoder_turbo_weights.pb"
|
|
433
|
+
decoder_pb_path = None
|
|
434
|
+
model_type = AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
428
435
|
self._download_model(model_path, encoder_path, decoder_path, encoder_pb_path, decoder_pb_path)
|
|
429
436
|
self._open_model(model_path + encoder_path, model_path + decoder_path, model_type)
|
|
430
437
|
self._open_vad(model_path + "silero_vad.onnx", AILIA_SPEECH_VAD_TYPE_SILERO)
|
|
@@ -475,19 +482,61 @@ class Whisper(AiliaSpeechModel):
|
|
|
475
482
|
|
|
476
483
|
self._check(dll.ailiaSpeechPushInputData(self._instance, audio_waveform, channels, audio_waveform.shape[0] // channels, sampling_rate))
|
|
477
484
|
self._check(dll.ailiaSpeechFinalizeInputData(self._instance))
|
|
478
|
-
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
479
485
|
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
486
|
+
while True:
|
|
487
|
+
complete = ctypes.c_uint(0)
|
|
488
|
+
self._check(dll.ailiaSpeechComplete(self._instance, ctypes.byref(complete)))
|
|
489
|
+
if complete.value == 1:
|
|
490
|
+
break
|
|
491
|
+
|
|
492
|
+
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
493
|
+
|
|
494
|
+
count = ctypes.c_uint(0)
|
|
495
|
+
self._check(dll.ailiaSpeechGetTextCount(self._instance, ctypes.byref(count)))
|
|
496
|
+
results = []
|
|
497
|
+
for i in range(count.value):
|
|
498
|
+
text = AILIASpeechText()
|
|
499
|
+
self._check(dll.ailiaSpeechGetText(self._instance, ctypes.byref(text), AILIA_SPEECH_TEXT_VERSION, i))
|
|
500
|
+
yield {"text" : text.text.decode(), "time_stamp_begin" : text.time_stamp_begin, "time_stamp_end" : text.time_stamp_end, "person_id" : text.person_id, "language" : text.language.decode(), "confidence" : text.confidence}
|
|
487
501
|
|
|
488
502
|
self._check(dll.ailiaSpeechResetTranscribeState(self._instance))
|
|
489
503
|
|
|
490
|
-
|
|
504
|
+
def transcribe_step(self, audio_waveform, sampling_rate, complete, lang = None):
|
|
505
|
+
if len(audio_waveform.shape) == 1:
|
|
506
|
+
channels = 1
|
|
507
|
+
elif len(audio_waveform.shape) == 2:
|
|
508
|
+
channels = audio_waveform.shape[0]
|
|
509
|
+
audio_waveform = numpy.transpose(audio_waveform, (1, 0)).flatten()
|
|
510
|
+
else:
|
|
511
|
+
raise AiliaSpeechError(f"audio_waveform must be 1 channel or 2 channel", -1)
|
|
512
|
+
|
|
513
|
+
audio_waveform = numpy.ascontiguousarray(audio_waveform.astype(numpy.float32))
|
|
514
|
+
|
|
515
|
+
if lang is not None:
|
|
516
|
+
self._check(dll.ailiaSpeechSetLanguage(self._instance, self._string_buffer(lang)))
|
|
517
|
+
|
|
518
|
+
self._check(dll.ailiaSpeechPushInputData(self._instance, audio_waveform, channels, audio_waveform.shape[0] // channels, sampling_rate))
|
|
519
|
+
if complete:
|
|
520
|
+
self._check(dll.ailiaSpeechFinalizeInputData(self._instance))
|
|
521
|
+
|
|
522
|
+
while True:
|
|
523
|
+
buffered = ctypes.c_uint(0)
|
|
524
|
+
self._check(dll.ailiaSpeechBuffered(self._instance, ctypes.byref(buffered)))
|
|
525
|
+
if buffered.value == 0:
|
|
526
|
+
break
|
|
527
|
+
|
|
528
|
+
self._check(dll.ailiaSpeechTranscribe(self._instance))
|
|
529
|
+
|
|
530
|
+
count = ctypes.c_uint(0)
|
|
531
|
+
self._check(dll.ailiaSpeechGetTextCount(self._instance, ctypes.byref(count)))
|
|
532
|
+
results = []
|
|
533
|
+
for i in range(count.value):
|
|
534
|
+
text = AILIASpeechText()
|
|
535
|
+
self._check(dll.ailiaSpeechGetText(self._instance, ctypes.byref(text), AILIA_SPEECH_TEXT_VERSION, i))
|
|
536
|
+
yield {"text" : text.text.decode(), "time_stamp_begin" : text.time_stamp_begin, "time_stamp_end" : text.time_stamp_end, "person_id" : text.person_id, "language" : text.language.decode(), "confidence" : text.confidence}
|
|
537
|
+
|
|
538
|
+
if complete:
|
|
539
|
+
self._check(dll.ailiaSpeechResetTranscribeState(self._instance))
|
|
491
540
|
|
|
492
541
|
def __del__(self):
|
|
493
542
|
if self._instance:
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: ailia_speech
|
|
3
|
+
Version: 1.3.1.0
|
|
4
|
+
Summary: ailia AI Speech
|
|
5
|
+
Home-page: https://ailia.jp/
|
|
6
|
+
Author: ax Inc.
|
|
7
|
+
Author-email: contact@axinc.jp
|
|
8
|
+
License: https://ailia.ai/en/license/
|
|
9
|
+
Requires-Python: >3.6
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: ailia
|
|
12
|
+
Requires-Dist: ailia-tokenizer
|
|
13
|
+
|
|
14
|
+
# ailia AI Speech Python API
|
|
15
|
+
|
|
16
|
+
!! CAUTION !!
|
|
17
|
+
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
18
|
+
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
19
|
+
|
|
20
|
+
## About ailia AI Speech
|
|
21
|
+
|
|
22
|
+
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
23
|
+
|
|
24
|
+
## Install from pip
|
|
25
|
+
|
|
26
|
+
You can install the ailia AI Speech free evaluation package with the following command.
|
|
27
|
+
|
|
28
|
+
```
|
|
29
|
+
pip3 install ailia_speech
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Install from package
|
|
33
|
+
|
|
34
|
+
You can install the ailia AI Speech from Package with the following command.
|
|
35
|
+
|
|
36
|
+
```
|
|
37
|
+
python3 bootstrap.py
|
|
38
|
+
pip3 install ./
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Usage
|
|
42
|
+
|
|
43
|
+
### Batch mode
|
|
44
|
+
|
|
45
|
+
In batch mode, the entire audio is transcribed at once.
|
|
46
|
+
|
|
47
|
+
```python
|
|
48
|
+
import ailia_speech
|
|
49
|
+
|
|
50
|
+
import librosa
|
|
51
|
+
|
|
52
|
+
import os
|
|
53
|
+
import urllib.request
|
|
54
|
+
|
|
55
|
+
# Load target audio
|
|
56
|
+
input_file_path = "demo.wav"
|
|
57
|
+
if not os.path.exists(input_file_path):
|
|
58
|
+
urllib.request.urlretrieve(
|
|
59
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
60
|
+
"demo.wav"
|
|
61
|
+
)
|
|
62
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
63
|
+
|
|
64
|
+
# Infer
|
|
65
|
+
speech = ailia_speech.Whisper()
|
|
66
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
67
|
+
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
68
|
+
for text in recognized_text:
|
|
69
|
+
print(text)
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
### Step mode
|
|
73
|
+
|
|
74
|
+
In step mode, the audio is input in chunks and transcribed sequentially.
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
import ailia_speech
|
|
78
|
+
|
|
79
|
+
import librosa
|
|
80
|
+
|
|
81
|
+
import os
|
|
82
|
+
import urllib.request
|
|
83
|
+
|
|
84
|
+
# Load target audio
|
|
85
|
+
input_file_path = "demo.wav"
|
|
86
|
+
if not os.path.exists(input_file_path):
|
|
87
|
+
urllib.request.urlretrieve(
|
|
88
|
+
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
89
|
+
"demo.wav"
|
|
90
|
+
)
|
|
91
|
+
audio_waveform, sampling_rate = librosa.load(input_file_path, mono = True)
|
|
92
|
+
|
|
93
|
+
# Infer
|
|
94
|
+
speech = ailia_speech.Whisper()
|
|
95
|
+
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO)
|
|
96
|
+
for i in range(0, audio_waveform.shape[0], sampling_rate):
|
|
97
|
+
complete = False
|
|
98
|
+
if i + sampling_rate >= audio_waveform.shape[0]:
|
|
99
|
+
complete = True
|
|
100
|
+
recognized_text = speech.transcribe_step(audio_waveform[i:min(audio_waveform.shape[0], i + sampling_rate)], sampling_rate, complete)
|
|
101
|
+
for text in recognized_text:
|
|
102
|
+
print(text)
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Available model types
|
|
106
|
+
|
|
107
|
+
It is possible to select multiple models according to accuracy and speed. LARGE_V3_TURBO is the most recommended.
|
|
108
|
+
|
|
109
|
+
```
|
|
110
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_TINY
|
|
111
|
+
ilia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_BASE
|
|
112
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL
|
|
113
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_MEDIUM
|
|
114
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE
|
|
115
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3
|
|
116
|
+
ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_LARGE_V3_TURBO
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
## API specification
|
|
120
|
+
|
|
121
|
+
https://github.com/axinc-ai/ailia-sdk
|
|
122
|
+
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
ailia_speech/LICENSE_AILIA_EN.pdf,sha256=1DzVViPnw1uAS8gJ5a8uN3iZNNR5I1ItIXmezHfUpeM,70149
|
|
2
2
|
ailia_speech/LICENSE_AILIA_JA.pdf,sha256=s628QN47S2bNqIfuSjm2LBf0vIluv2df6MSemn6Ksmw,174134
|
|
3
|
-
ailia_speech/__init__.py,sha256=
|
|
3
|
+
ailia_speech/__init__.py,sha256=NMFB_mXj9ed9YcbEXrjvm-82OOYRxzSBENKCuxbtz6M,27915
|
|
4
4
|
ailia_speech/linux/arm64-v8a/libailia_speech.so,sha256=JAOwnBr7lbiMZmPCM99pd4vJQ08ZuXDPpq-FurrXSnE,166096
|
|
5
5
|
ailia_speech/linux/x64/libailia_speech.so,sha256=WbFvA5wKTgS_Zx8ErT7WBKJbzOUexavr4nP4EkLNawQ,171360
|
|
6
6
|
ailia_speech/mac/libailia_speech.dylib,sha256=-JAC40yLslAVMvfh6LhDvP3Zyt3hIT3WZc7wa9-07zU,317112
|
|
7
7
|
ailia_speech/windows/x64/ailia_speech.dll,sha256=WJCOHi0Na4tdMG1RT7dA7yAoWumiGSWeW1vxUtiXDS8,126464
|
|
8
|
-
ailia_speech-1.3.0.
|
|
9
|
-
ailia_speech-1.3.0.
|
|
10
|
-
ailia_speech-1.3.0.
|
|
11
|
-
ailia_speech-1.3.0.
|
|
12
|
-
ailia_speech-1.3.0.
|
|
8
|
+
ailia_speech-1.3.1.0.data/scripts/__init__.py,sha256=NMFB_mXj9ed9YcbEXrjvm-82OOYRxzSBENKCuxbtz6M,27915
|
|
9
|
+
ailia_speech-1.3.1.0.dist-info/METADATA,sha256=x5WBVorX7b9Mubn69FVh75XS2b3iDIy7ZM38F9yyGRo,3610
|
|
10
|
+
ailia_speech-1.3.1.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
11
|
+
ailia_speech-1.3.1.0.dist-info/top_level.txt,sha256=Ou9XeJ9AvdK8eutw07oosCthftD1tRYzAgNY2BrYhDc,13
|
|
12
|
+
ailia_speech-1.3.1.0.dist-info/RECORD,,
|
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: ailia_speech
|
|
3
|
-
Version: 1.3.0.4
|
|
4
|
-
Summary: ailia AI Speech
|
|
5
|
-
Home-page: https://ailia.jp/
|
|
6
|
-
Author: ax Inc.
|
|
7
|
-
Author-email: contact@axinc.jp
|
|
8
|
-
License: https://ailia.ai/en/license/
|
|
9
|
-
Requires-Python: >3.6
|
|
10
|
-
Description-Content-Type: text/markdown
|
|
11
|
-
Requires-Dist: ailia
|
|
12
|
-
Requires-Dist: ailia-tokenizer
|
|
13
|
-
|
|
14
|
-
# ailia AI Speech Python API
|
|
15
|
-
|
|
16
|
-
!! CAUTION !!
|
|
17
|
-
“ailia” IS NOT OPEN SOURCE SOFTWARE (OSS).
|
|
18
|
-
As long as user complies with the conditions stated in [License Document](https://ailia.ai/license/), user may use the Software for free of charge, but the Software is basically paid software.
|
|
19
|
-
|
|
20
|
-
## About ailia AI Speech
|
|
21
|
-
|
|
22
|
-
ailia AI Speech is a library to perform speech recognition using AI. It provides a C API for native applications, as well as a C# API well suited for Unity applications. Using ailia AI Speech, you can easily integrate AI powered speech recognition into your applications.
|
|
23
|
-
|
|
24
|
-
## Install from pip
|
|
25
|
-
|
|
26
|
-
You can install the ailia AI Speech free evaluation package with the following command.
|
|
27
|
-
|
|
28
|
-
```
|
|
29
|
-
pip3 install ailia_speech
|
|
30
|
-
```
|
|
31
|
-
|
|
32
|
-
## Install from package
|
|
33
|
-
|
|
34
|
-
You can install the ailia AI Speech from Package with the following command.
|
|
35
|
-
|
|
36
|
-
```
|
|
37
|
-
python3 bootstrap.py
|
|
38
|
-
pip3 install ./
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
## Usage
|
|
42
|
-
|
|
43
|
-
```python
|
|
44
|
-
import ailia_speech
|
|
45
|
-
|
|
46
|
-
import librosa
|
|
47
|
-
|
|
48
|
-
import os
|
|
49
|
-
import urllib.request
|
|
50
|
-
|
|
51
|
-
# Load target audio
|
|
52
|
-
ref_file_path = "demo.wav"
|
|
53
|
-
if not os.path.exists(ref_file_path):
|
|
54
|
-
urllib.request.urlretrieve(
|
|
55
|
-
"https://github.com/axinc-ai/ailia-models/raw/refs/heads/master/audio_processing/whisper/demo.wa",
|
|
56
|
-
"demo.wav"
|
|
57
|
-
)
|
|
58
|
-
audio_waveform, sampling_rate = librosa.load(ref_file_path, mono=True)
|
|
59
|
-
|
|
60
|
-
# Infer
|
|
61
|
-
speech = ailia_speech.Whisper()
|
|
62
|
-
speech.initialize_model(model_path = "./models/", model_type = ailia_speech.AILIA_SPEECH_MODEL_TYPE_WHISPER_MULTILINGUAL_SMALL)
|
|
63
|
-
recognized_text = speech.transcribe(audio_waveform, sampling_rate)
|
|
64
|
-
print(recognized_text)
|
|
65
|
-
```
|
|
66
|
-
|
|
67
|
-
## API specification
|
|
68
|
-
|
|
69
|
-
https://github.com/axinc-ai/ailia-sdk
|
|
70
|
-
|
|
File without changes
|
|
File without changes
|