videosdk-plugins-assemblyai 0.0.30__tar.gz → 0.0.31__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of videosdk-plugins-assemblyai might be problematic. Click here for more details.
- {videosdk_plugins_assemblyai-0.0.30 → videosdk_plugins_assemblyai-0.0.31}/PKG-INFO +3 -2
- {videosdk_plugins_assemblyai-0.0.30 → videosdk_plugins_assemblyai-0.0.31}/pyproject.toml +1 -1
- videosdk_plugins_assemblyai-0.0.31/videosdk/plugins/assemblyai/stt.py +288 -0
- videosdk_plugins_assemblyai-0.0.31/videosdk/plugins/assemblyai/version.py +1 -0
- videosdk_plugins_assemblyai-0.0.30/videosdk/plugins/assemblyai/stt.py +0 -149
- videosdk_plugins_assemblyai-0.0.30/videosdk/plugins/assemblyai/version.py +0 -1
- {videosdk_plugins_assemblyai-0.0.30 → videosdk_plugins_assemblyai-0.0.31}/.gitignore +0 -0
- {videosdk_plugins_assemblyai-0.0.30 → videosdk_plugins_assemblyai-0.0.31}/README.md +0 -0
- {videosdk_plugins_assemblyai-0.0.30 → videosdk_plugins_assemblyai-0.0.31}/videosdk/plugins/assemblyai/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: videosdk-plugins-assemblyai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.31
|
|
4
4
|
Summary: VideoSDK Agent Framework plugin for AssemblyAI
|
|
5
5
|
Author: videosdk
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -12,7 +12,8 @@ Classifier: Topic :: Multimedia :: Sound/Audio
|
|
|
12
12
|
Classifier: Topic :: Multimedia :: Video
|
|
13
13
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
14
14
|
Requires-Python: >=3.11
|
|
15
|
-
Requires-Dist:
|
|
15
|
+
Requires-Dist: aiohttp
|
|
16
|
+
Requires-Dist: videosdk-agents>=0.0.31
|
|
16
17
|
Description-Content-Type: text/markdown
|
|
17
18
|
|
|
18
19
|
# VideoSDK Assembly AI Plugin
|
|
@@ -20,7 +20,7 @@ classifiers = [
|
|
|
20
20
|
"Topic :: Multimedia :: Video",
|
|
21
21
|
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
22
22
|
]
|
|
23
|
-
dependencies = ["videosdk-agents>=0.0.
|
|
23
|
+
dependencies = ["videosdk-agents>=0.0.31","aiohttp"]
|
|
24
24
|
|
|
25
25
|
[tool.hatch.version]
|
|
26
26
|
path = "videosdk/plugins/assemblyai/version.py"
|
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
from urllib.parse import urlencode
|
|
8
|
+
import logging
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
import aiohttp
|
|
12
|
+
from videosdk.agents import STT as BaseSTT, STTResponse, SpeechData, SpeechEventType, global_event_emitter
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
from scipy import signal
|
|
16
|
+
SCIPY_AVAILABLE = True
|
|
17
|
+
except ImportError:
|
|
18
|
+
SCIPY_AVAILABLE = False
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
class AssemblyAISTT(BaseSTT):
|
|
23
|
+
"""
|
|
24
|
+
VideoSDK Agent Framework STT plugin for AssemblyAI Streaming API.
|
|
25
|
+
Real-time speech-to-text using WebSocket connection.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
api_key: str | None = None,
|
|
32
|
+
input_sample_rate: int = 48000,
|
|
33
|
+
target_sample_rate: int = 16000,
|
|
34
|
+
format_turns: bool = True,
|
|
35
|
+
word_boost: list[str] | None = None,
|
|
36
|
+
end_of_turn_confidence_threshold: float = 0.5,
|
|
37
|
+
min_end_of_turn_silence_when_confident: int = 800,
|
|
38
|
+
max_turn_silence: int = 2000,
|
|
39
|
+
) -> None:
|
|
40
|
+
super().__init__()
|
|
41
|
+
|
|
42
|
+
if not SCIPY_AVAILABLE:
|
|
43
|
+
raise ImportError("scipy is not installed. Please install it with 'pip install scipy'")
|
|
44
|
+
|
|
45
|
+
self.api_key = api_key or os.getenv("ASSEMBLYAI_API_KEY")
|
|
46
|
+
if not self.api_key:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
"AssemblyAI API key must be provided either through the 'api_key' parameter "
|
|
49
|
+
"or the 'ASSEMBLYAI_API_KEY' environment variable."
|
|
50
|
+
)
|
|
51
|
+
self.input_sample_rate = input_sample_rate
|
|
52
|
+
self.target_sample_rate = target_sample_rate
|
|
53
|
+
self.format_turns = format_turns
|
|
54
|
+
self.word_boost = word_boost or []
|
|
55
|
+
self.end_of_turn_confidence_threshold = end_of_turn_confidence_threshold
|
|
56
|
+
self.min_end_of_turn_silence_when_confident = min_end_of_turn_silence_when_confident
|
|
57
|
+
self.max_turn_silence = max_turn_silence
|
|
58
|
+
|
|
59
|
+
connection_params = {
|
|
60
|
+
"sample_rate": self.target_sample_rate,
|
|
61
|
+
"format_turns": self.format_turns,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
if self.end_of_turn_confidence_threshold != 0.7:
|
|
66
|
+
connection_params["end_of_turn_confidence_threshold"] = self.end_of_turn_confidence_threshold
|
|
67
|
+
if self.min_end_of_turn_silence_when_confident != 1500:
|
|
68
|
+
connection_params["min_end_of_turn_silence_when_confident"] = self.min_end_of_turn_silence_when_confident
|
|
69
|
+
if self.max_turn_silence != 3000:
|
|
70
|
+
connection_params["max_turn_silence"] = self.max_turn_silence
|
|
71
|
+
|
|
72
|
+
if self.word_boost:
|
|
73
|
+
connection_params["word_boost"] = json.dumps(self.word_boost)
|
|
74
|
+
|
|
75
|
+
self.ws_url = f"wss://streaming.assemblyai.com/v3/ws?{urlencode(connection_params)}"
|
|
76
|
+
logger.info(f"[AssemblyAI] WebSocket URL: {self.ws_url}")
|
|
77
|
+
|
|
78
|
+
self._session: Optional[aiohttp.ClientSession] = None
|
|
79
|
+
self._ws: Optional[aiohttp.ClientWebSocketResponse] = None
|
|
80
|
+
self._ws_task: Optional[asyncio.Task] = None
|
|
81
|
+
|
|
82
|
+
self._stream_buffer = bytearray()
|
|
83
|
+
self._target_chunk_size = int(0.1 * self.target_sample_rate * 2)
|
|
84
|
+
self._min_chunk_size = int(0.05 * self.target_sample_rate * 2)
|
|
85
|
+
|
|
86
|
+
self._last_speech_event_time = 0.0
|
|
87
|
+
self._last_transcript = ""
|
|
88
|
+
self._is_speaking = False
|
|
89
|
+
|
|
90
|
+
async def process_audio(
|
|
91
|
+
self,
|
|
92
|
+
audio_frames: bytes,
|
|
93
|
+
**kwargs: Any
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Process audio frames and send to AssemblyAI's Streaming API"""
|
|
96
|
+
|
|
97
|
+
if not self._ws:
|
|
98
|
+
await self._connect_ws()
|
|
99
|
+
self._ws_task = asyncio.create_task(self._listen_for_responses())
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
resampled_audio = self._resample_audio(audio_frames)
|
|
103
|
+
if not resampled_audio:
|
|
104
|
+
return
|
|
105
|
+
|
|
106
|
+
self._stream_buffer.extend(resampled_audio)
|
|
107
|
+
|
|
108
|
+
while len(self._stream_buffer) >= self._target_chunk_size:
|
|
109
|
+
chunk_to_send = bytes(self._stream_buffer[:self._target_chunk_size])
|
|
110
|
+
self._stream_buffer = self._stream_buffer[self._target_chunk_size:]
|
|
111
|
+
|
|
112
|
+
await self._ws.send_bytes(chunk_to_send)
|
|
113
|
+
|
|
114
|
+
except Exception as e:
|
|
115
|
+
logger.error(f"Error in process_audio: {str(e)}")
|
|
116
|
+
self.emit("error", str(e))
|
|
117
|
+
if self._ws:
|
|
118
|
+
await self._ws.close()
|
|
119
|
+
self._ws = None
|
|
120
|
+
if self._ws_task:
|
|
121
|
+
self._ws_task.cancel()
|
|
122
|
+
self._ws_task = None
|
|
123
|
+
|
|
124
|
+
async def _listen_for_responses(self) -> None:
|
|
125
|
+
"""Background task to listen for WebSocket responses"""
|
|
126
|
+
if not self._ws:
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
async for msg in self._ws:
|
|
131
|
+
if msg.type == aiohttp.WSMsgType.TEXT:
|
|
132
|
+
data = msg.json()
|
|
133
|
+
responses = self._handle_ws_message(data)
|
|
134
|
+
for response in responses:
|
|
135
|
+
if self._transcript_callback:
|
|
136
|
+
await self._transcript_callback(response)
|
|
137
|
+
elif msg.type == aiohttp.WSMsgType.ERROR:
|
|
138
|
+
logger.error(f"WebSocket error: {self._ws.exception()}")
|
|
139
|
+
self.emit("error", f"WebSocket error: {self._ws.exception()}")
|
|
140
|
+
break
|
|
141
|
+
except Exception as e:
|
|
142
|
+
logger.error(f"Error in WebSocket listener: {str(e)}")
|
|
143
|
+
self.emit("error", f"Error in WebSocket listener: {str(e)}")
|
|
144
|
+
finally:
|
|
145
|
+
if self._ws:
|
|
146
|
+
await self._ws.close()
|
|
147
|
+
self._ws = None
|
|
148
|
+
|
|
149
|
+
async def _connect_ws(self) -> None:
|
|
150
|
+
"""Establish WebSocket connection with AssemblyAI's Streaming API"""
|
|
151
|
+
|
|
152
|
+
if not self._session:
|
|
153
|
+
self._session = aiohttp.ClientSession()
|
|
154
|
+
|
|
155
|
+
headers = {
|
|
156
|
+
"Authorization": self.api_key,
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
try:
|
|
160
|
+
self._ws = await self._session.ws_connect(self.ws_url, headers=headers)
|
|
161
|
+
logger.info("[AssemblyAI] WebSocket connection opened")
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.error(f"Error connecting to WebSocket: {str(e)}")
|
|
164
|
+
raise
|
|
165
|
+
|
|
166
|
+
def _handle_ws_message(self, msg: dict) -> list[STTResponse]:
|
|
167
|
+
"""Handle incoming WebSocket messages and generate STT responses"""
|
|
168
|
+
responses = []
|
|
169
|
+
|
|
170
|
+
try:
|
|
171
|
+
msg_type = msg.get('type')
|
|
172
|
+
logger.info(f"[AssemblyAI] Message type: {msg_type}")
|
|
173
|
+
|
|
174
|
+
if msg_type == "Begin":
|
|
175
|
+
session_id = msg.get('id')
|
|
176
|
+
logger.info(f"[AssemblyAI] Session began: ID={session_id}")
|
|
177
|
+
|
|
178
|
+
elif msg_type == "Turn":
|
|
179
|
+
transcript = msg.get('transcript', '')
|
|
180
|
+
formatted = msg.get('turn_is_formatted', False)
|
|
181
|
+
confidence = msg.get('confidence', 1.0)
|
|
182
|
+
|
|
183
|
+
if transcript and transcript.strip():
|
|
184
|
+
self._last_transcript = transcript.strip()
|
|
185
|
+
|
|
186
|
+
event_type = SpeechEventType.FINAL if formatted else SpeechEventType.INTERIM
|
|
187
|
+
|
|
188
|
+
response = STTResponse(
|
|
189
|
+
event_type=event_type,
|
|
190
|
+
data=SpeechData(
|
|
191
|
+
text=transcript.strip(),
|
|
192
|
+
confidence=confidence
|
|
193
|
+
)
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
responses.append(response)
|
|
197
|
+
|
|
198
|
+
if not self._is_speaking:
|
|
199
|
+
self._is_speaking = True
|
|
200
|
+
global_event_emitter.emit("speech_started")
|
|
201
|
+
|
|
202
|
+
if formatted:
|
|
203
|
+
self._is_speaking = False
|
|
204
|
+
self._last_transcript = ""
|
|
205
|
+
|
|
206
|
+
elif msg_type == "Termination":
|
|
207
|
+
if self._last_transcript and self._is_speaking:
|
|
208
|
+
final_response = STTResponse(
|
|
209
|
+
event_type=SpeechEventType.FINAL,
|
|
210
|
+
data=SpeechData(
|
|
211
|
+
text=self._last_transcript,
|
|
212
|
+
confidence=1.0
|
|
213
|
+
)
|
|
214
|
+
)
|
|
215
|
+
responses.append(final_response)
|
|
216
|
+
self._last_transcript = ""
|
|
217
|
+
self._is_speaking = False
|
|
218
|
+
|
|
219
|
+
elif msg_type == "Error":
|
|
220
|
+
error_msg = msg.get('error', 'Unknown error')
|
|
221
|
+
logger.error(f"AssemblyAI Error: {error_msg}")
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.error(f"Error handling WebSocket message: {str(e)}")
|
|
225
|
+
|
|
226
|
+
return responses
|
|
227
|
+
|
|
228
|
+
def _resample_audio(self, audio_bytes: bytes) -> bytes:
|
|
229
|
+
"""Resample audio from input sample rate to target sample rate and convert to mono."""
|
|
230
|
+
try:
|
|
231
|
+
if not audio_bytes:
|
|
232
|
+
return b''
|
|
233
|
+
|
|
234
|
+
raw_audio = np.frombuffer(audio_bytes, dtype=np.int16)
|
|
235
|
+
if raw_audio.size == 0:
|
|
236
|
+
return b''
|
|
237
|
+
|
|
238
|
+
if raw_audio.size % 2 == 0:
|
|
239
|
+
stereo_audio = raw_audio.reshape(-1, 2)
|
|
240
|
+
mono_audio = stereo_audio.astype(np.float32).mean(axis=1)
|
|
241
|
+
else:
|
|
242
|
+
mono_audio = raw_audio.astype(np.float32)
|
|
243
|
+
|
|
244
|
+
if self.input_sample_rate != self.target_sample_rate:
|
|
245
|
+
target_length = int(len(mono_audio) * self.target_sample_rate / self.input_sample_rate)
|
|
246
|
+
resampled_data = signal.resample(mono_audio, target_length)
|
|
247
|
+
else:
|
|
248
|
+
resampled_data = mono_audio
|
|
249
|
+
|
|
250
|
+
resampled_data = np.clip(resampled_data, -32767, 32767)
|
|
251
|
+
return resampled_data.astype(np.int16).tobytes()
|
|
252
|
+
|
|
253
|
+
except Exception as e:
|
|
254
|
+
logger.error(f"Error resampling audio: {e}")
|
|
255
|
+
return b''
|
|
256
|
+
|
|
257
|
+
async def aclose(self) -> None:
|
|
258
|
+
"""Cleanup resources"""
|
|
259
|
+
|
|
260
|
+
if len(self._stream_buffer) >= self._min_chunk_size and self._ws:
|
|
261
|
+
try:
|
|
262
|
+
final_chunk = bytes(self._stream_buffer)
|
|
263
|
+
await self._ws.send_bytes(final_chunk)
|
|
264
|
+
except Exception as e:
|
|
265
|
+
logger.error(f"Error sending final audio: {e}")
|
|
266
|
+
|
|
267
|
+
if self._ws:
|
|
268
|
+
try:
|
|
269
|
+
await self._ws.send_str(json.dumps({"type": "Terminate"}))
|
|
270
|
+
await asyncio.sleep(0.5)
|
|
271
|
+
except Exception as e:
|
|
272
|
+
logger.error(f"Error sending termination: {e}")
|
|
273
|
+
|
|
274
|
+
if self._ws_task:
|
|
275
|
+
self._ws_task.cancel()
|
|
276
|
+
try:
|
|
277
|
+
await self._ws_task
|
|
278
|
+
except asyncio.CancelledError:
|
|
279
|
+
pass
|
|
280
|
+
self._ws_task = None
|
|
281
|
+
|
|
282
|
+
if self._ws:
|
|
283
|
+
await self._ws.close()
|
|
284
|
+
self._ws = None
|
|
285
|
+
|
|
286
|
+
if self._session:
|
|
287
|
+
await self._session.close()
|
|
288
|
+
self._session = None
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.0.31"
|
|
@@ -1,149 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import asyncio
|
|
4
|
-
import io
|
|
5
|
-
import os
|
|
6
|
-
import wave
|
|
7
|
-
from typing import Any
|
|
8
|
-
|
|
9
|
-
import aiohttp
|
|
10
|
-
import numpy as np
|
|
11
|
-
from videosdk.agents import STT, STTResponse, SpeechData, SpeechEventType, global_event_emitter
|
|
12
|
-
|
|
13
|
-
try:
|
|
14
|
-
from scipy import signal
|
|
15
|
-
SCIPY_AVAILABLE = True
|
|
16
|
-
except ImportError:
|
|
17
|
-
SCIPY_AVAILABLE = False
|
|
18
|
-
|
|
19
|
-
ASSEMBLYAI_API_URL = "https://api.assemblyai.com/v2"
|
|
20
|
-
|
|
21
|
-
class AssemblyAISTT(STT):
|
|
22
|
-
"""
|
|
23
|
-
VideoSDK Agent Framework STT plugin for AssemblyAI.
|
|
24
|
-
"""
|
|
25
|
-
|
|
26
|
-
def __init__(
|
|
27
|
-
self,
|
|
28
|
-
*,
|
|
29
|
-
api_key: str | None = None,
|
|
30
|
-
language_code: str = "en_us",
|
|
31
|
-
input_sample_rate: int = 48000,
|
|
32
|
-
target_sample_rate: int = 16000,
|
|
33
|
-
silence_threshold: float = 0.015,
|
|
34
|
-
silence_duration: float = 0.8,
|
|
35
|
-
) -> None:
|
|
36
|
-
super().__init__()
|
|
37
|
-
if not SCIPY_AVAILABLE:
|
|
38
|
-
raise ImportError("scipy is not installed. Please install it with 'pip install scipy'")
|
|
39
|
-
|
|
40
|
-
self.api_key = api_key or os.getenv("ASSEMBLYAI_API_KEY")
|
|
41
|
-
if not self.api_key:
|
|
42
|
-
raise ValueError("AssemblyAI API key must be provided either through the 'api_key' parameter or the 'ASSEMBLYAI_API_KEY' environment variable.")
|
|
43
|
-
|
|
44
|
-
self.language_code = language_code
|
|
45
|
-
self.input_sample_rate = input_sample_rate
|
|
46
|
-
self.target_sample_rate = target_sample_rate
|
|
47
|
-
self.silence_threshold_bytes = int(silence_threshold * 32767)
|
|
48
|
-
self.silence_duration_frames = int(silence_duration * self.input_sample_rate)
|
|
49
|
-
|
|
50
|
-
self._session = aiohttp.ClientSession(headers={"Authorization": self.api_key})
|
|
51
|
-
self._audio_buffer = bytearray()
|
|
52
|
-
self._is_speaking = False
|
|
53
|
-
self._silence_frames = 0
|
|
54
|
-
self._lock = asyncio.Lock()
|
|
55
|
-
|
|
56
|
-
async def process_audio(self, audio_frames: bytes, **kwargs: Any) -> None:
|
|
57
|
-
async with self._lock:
|
|
58
|
-
is_silent_chunk = self._is_silent(audio_frames)
|
|
59
|
-
|
|
60
|
-
if not is_silent_chunk:
|
|
61
|
-
if not self._is_speaking:
|
|
62
|
-
self._is_speaking = True
|
|
63
|
-
global_event_emitter.emit("speech_started")
|
|
64
|
-
self._audio_buffer.extend(audio_frames)
|
|
65
|
-
self._silence_frames = 0
|
|
66
|
-
else:
|
|
67
|
-
if self._is_speaking:
|
|
68
|
-
self._silence_frames += len(audio_frames) // 4
|
|
69
|
-
if self._silence_frames > self.silence_duration_frames:
|
|
70
|
-
global_event_emitter.emit("speech_stopped")
|
|
71
|
-
asyncio.create_task(self._transcribe_buffer())
|
|
72
|
-
self._is_speaking = False
|
|
73
|
-
self._silence_frames = 0
|
|
74
|
-
|
|
75
|
-
def _is_silent(self, audio_chunk: bytes) -> bool:
|
|
76
|
-
audio_data = np.frombuffer(audio_chunk, dtype=np.int16)
|
|
77
|
-
return np.max(np.abs(audio_data)) < self.silence_threshold_bytes
|
|
78
|
-
|
|
79
|
-
async def _transcribe_buffer(self):
|
|
80
|
-
async with self._lock:
|
|
81
|
-
if not self._audio_buffer:
|
|
82
|
-
return
|
|
83
|
-
audio_to_send = self._audio_buffer
|
|
84
|
-
self._audio_buffer = bytearray()
|
|
85
|
-
|
|
86
|
-
try:
|
|
87
|
-
resampled_audio_bytes = self._resample_audio(audio_to_send)
|
|
88
|
-
wav_audio = self._create_wav_in_memory(resampled_audio_bytes)
|
|
89
|
-
|
|
90
|
-
upload_url = f"{ASSEMBLYAI_API_URL}/upload"
|
|
91
|
-
async with self._session.post(upload_url, data=wav_audio) as response:
|
|
92
|
-
response.raise_for_status()
|
|
93
|
-
upload_data = await response.json()
|
|
94
|
-
audio_url = upload_data["upload_url"]
|
|
95
|
-
|
|
96
|
-
transcript_url = f"{ASSEMBLYAI_API_URL}/transcript"
|
|
97
|
-
payload = {"audio_url": audio_url, "language_code": self.language_code}
|
|
98
|
-
async with self._session.post(transcript_url, json=payload) as response:
|
|
99
|
-
response.raise_for_status()
|
|
100
|
-
transcript_data = await response.json()
|
|
101
|
-
transcript_id = transcript_data["id"]
|
|
102
|
-
|
|
103
|
-
poll_url = f"{ASSEMBLYAI_API_URL}/transcript/{transcript_id}"
|
|
104
|
-
while True:
|
|
105
|
-
await asyncio.sleep(1)
|
|
106
|
-
async with self._session.get(poll_url) as response:
|
|
107
|
-
response.raise_for_status()
|
|
108
|
-
result = await response.json()
|
|
109
|
-
if result["status"] == "completed":
|
|
110
|
-
if result.get("text") and self._transcript_callback:
|
|
111
|
-
event = STTResponse(
|
|
112
|
-
event_type=SpeechEventType.FINAL,
|
|
113
|
-
data=SpeechData(text=result["text"], language=self.language_code, confidence=result.get("confidence", 1.0))
|
|
114
|
-
)
|
|
115
|
-
await self._transcript_callback(event)
|
|
116
|
-
break
|
|
117
|
-
elif result["status"] == "error":
|
|
118
|
-
raise Exception(f"AssemblyAI transcription failed: {result.get('error')}")
|
|
119
|
-
|
|
120
|
-
except Exception as e:
|
|
121
|
-
print(f"!!! ASSEMBLYAI PLUGIN FATAL ERROR: {e} ({type(e).__name__}) !!!")
|
|
122
|
-
self.emit("error", f"AssemblyAI transcription error: {e}")
|
|
123
|
-
|
|
124
|
-
def _resample_audio(self, audio_bytes: bytes) -> bytes:
|
|
125
|
-
raw_audio = np.frombuffer(audio_bytes, dtype=np.int16)
|
|
126
|
-
if raw_audio.size == 0: return b''
|
|
127
|
-
stereo_audio = raw_audio.reshape(-1, 2)
|
|
128
|
-
mono_audio = stereo_audio.astype(np.float32).mean(axis=1)
|
|
129
|
-
resampled_data = signal.resample(mono_audio, int(len(mono_audio) * self.target_sample_rate / self.input_sample_rate))
|
|
130
|
-
return resampled_data.astype(np.int16).tobytes()
|
|
131
|
-
|
|
132
|
-
def _create_wav_in_memory(self, pcm_data: bytes) -> io.BytesIO:
|
|
133
|
-
"""Creates a WAV file in memory from raw PCM data."""
|
|
134
|
-
wav_buffer = io.BytesIO()
|
|
135
|
-
with wave.open(wav_buffer, 'wb') as wf:
|
|
136
|
-
wf.setnchannels(1) # Mono
|
|
137
|
-
wf.setsampwidth(2) # 16-bit
|
|
138
|
-
wf.setframerate(self.target_sample_rate)
|
|
139
|
-
wf.writeframes(pcm_data)
|
|
140
|
-
wav_buffer.seek(0)
|
|
141
|
-
return wav_buffer
|
|
142
|
-
|
|
143
|
-
async def aclose(self) -> None:
|
|
144
|
-
if self._is_speaking and self._audio_buffer:
|
|
145
|
-
await self._transcribe_buffer()
|
|
146
|
-
await asyncio.sleep(1)
|
|
147
|
-
|
|
148
|
-
if self._session and not self._session.closed:
|
|
149
|
-
await self._session.close()
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.0.30"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|