livekit-plugins-google 0.5.1__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- livekit/plugins/google/stt.py +44 -154
- livekit/plugins/google/tts.py +41 -70
- livekit/plugins/google/version.py +1 -1
- {livekit_plugins_google-0.5.1.dist-info → livekit_plugins_google-0.6.0.dist-info}/METADATA +2 -2
- livekit_plugins_google-0.6.0.dist-info/RECORD +11 -0
- {livekit_plugins_google-0.5.1.dist-info → livekit_plugins_google-0.6.0.dist-info}/WHEEL +1 -1
- livekit_plugins_google-0.5.1.dist-info/RECORD +0 -11
- {livekit_plugins_google-0.5.1.dist-info → livekit_plugins_google-0.6.0.dist-info}/top_level.txt +0 -0
livekit/plugins/google/stt.py
CHANGED
@@ -15,15 +15,13 @@
|
|
15
15
|
from __future__ import annotations
|
16
16
|
|
17
17
|
import asyncio
|
18
|
-
import contextlib
|
19
18
|
import dataclasses
|
20
19
|
import os
|
21
20
|
from dataclasses import dataclass
|
22
|
-
from typing import AsyncIterable, List,
|
21
|
+
from typing import AsyncIterable, List, Union
|
23
22
|
|
24
|
-
from livekit import agents
|
25
|
-
from livekit.agents import stt
|
26
|
-
from livekit.agents.utils import AudioBuffer
|
23
|
+
from livekit import agents
|
24
|
+
from livekit.agents import stt, utils
|
27
25
|
|
28
26
|
from google.cloud.speech_v2 import SpeechAsyncClient
|
29
27
|
from google.cloud.speech_v2.types import cloud_speech
|
@@ -63,7 +61,9 @@ class STT(stt.STT):
|
|
63
61
|
if no credentials is provided, it will use the credentials on the environment
|
64
62
|
GOOGLE_APPLICATION_CREDENTIALS (default behavior of Google SpeechAsyncClient)
|
65
63
|
"""
|
66
|
-
super().__init__(
|
64
|
+
super().__init__(
|
65
|
+
capabilities=stt.STTCapabilities(streaming=True, interim_results=True)
|
66
|
+
)
|
67
67
|
|
68
68
|
self._client: SpeechAsyncClient | None = None
|
69
69
|
self._credentials_info = credentials_info
|
@@ -112,11 +112,7 @@ class STT(stt.STT):
|
|
112
112
|
project_id = self._ensure_client().transport._credentials.project_id # type: ignore
|
113
113
|
return f"projects/{project_id}/locations/global/recognizers/_"
|
114
114
|
|
115
|
-
def _sanitize_options(
|
116
|
-
self,
|
117
|
-
*,
|
118
|
-
language: str | None = None,
|
119
|
-
) -> STTOptions:
|
115
|
+
def _sanitize_options(self, *, language: str | None = None) -> STTOptions:
|
120
116
|
config = dataclasses.replace(self._config)
|
121
117
|
|
122
118
|
if language:
|
@@ -135,8 +131,8 @@ class STT(stt.STT):
|
|
135
131
|
|
136
132
|
async def recognize(
|
137
133
|
self,
|
134
|
+
buffer: utils.AudioBuffer,
|
138
135
|
*,
|
139
|
-
buffer: AudioBuffer,
|
140
136
|
language: SpeechLanguages | str | None = None,
|
141
137
|
) -> stt.SpeechEvent:
|
142
138
|
config = self._sanitize_options(language=language)
|
@@ -159,24 +155,16 @@ class STT(stt.STT):
|
|
159
155
|
|
160
156
|
raw = await self._ensure_client().recognize(
|
161
157
|
cloud_speech.RecognizeRequest(
|
162
|
-
recognizer=self._recognizer,
|
163
|
-
config=config,
|
164
|
-
content=frame.data.tobytes(),
|
158
|
+
recognizer=self._recognizer, config=config, content=frame.data.tobytes()
|
165
159
|
)
|
166
160
|
)
|
167
161
|
return _recognize_response_to_speech_event(raw)
|
168
162
|
|
169
163
|
def stream(
|
170
|
-
self,
|
171
|
-
*,
|
172
|
-
language: SpeechLanguages | str | None = None,
|
164
|
+
self, *, language: SpeechLanguages | str | None = None
|
173
165
|
) -> "SpeechStream":
|
174
166
|
config = self._sanitize_options(language=language)
|
175
|
-
return SpeechStream(
|
176
|
-
self._ensure_client(),
|
177
|
-
self._recognizer,
|
178
|
-
config,
|
179
|
-
)
|
167
|
+
return SpeechStream(self._ensure_client(), self._recognizer, config)
|
180
168
|
|
181
169
|
|
182
170
|
class SpeechStream(stt.SpeechStream):
|
@@ -196,15 +184,7 @@ class SpeechStream(stt.SpeechStream):
|
|
196
184
|
self._config = config
|
197
185
|
self._sample_rate = sample_rate
|
198
186
|
self._num_channels = num_channels
|
199
|
-
|
200
|
-
self._queue = asyncio.Queue[Optional[rtc.AudioFrame]]()
|
201
|
-
self._event_queue = asyncio.Queue[Optional[stt.SpeechEvent]]()
|
202
|
-
self._closed = False
|
203
|
-
self._main_task = asyncio.create_task(self._run(max_retry=max_retry))
|
204
|
-
|
205
|
-
self._final_events: List[stt.SpeechEvent] = []
|
206
|
-
self._need_bos = True
|
207
|
-
self._need_eos = False
|
187
|
+
self._max_retry = max_retry
|
208
188
|
|
209
189
|
self._streaming_config = cloud_speech.StreamingRecognitionConfig(
|
210
190
|
config=cloud_speech.RecognitionConfig(
|
@@ -226,30 +206,13 @@ class SpeechStream(stt.SpeechStream):
|
|
226
206
|
),
|
227
207
|
)
|
228
208
|
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
self._main_task.add_done_callback(log_exception)
|
234
|
-
|
235
|
-
def push_frame(self, frame: rtc.AudioFrame) -> None:
|
236
|
-
if self._closed:
|
237
|
-
raise ValueError("cannot push frame to closed stream")
|
238
|
-
|
239
|
-
self._queue.put_nowait(frame)
|
240
|
-
|
241
|
-
async def aclose(self, *, wait: bool = True) -> None:
|
242
|
-
self._closed = True
|
243
|
-
if not wait:
|
244
|
-
self._main_task.cancel()
|
245
|
-
|
246
|
-
self._queue.put_nowait(None)
|
247
|
-
with contextlib.suppress(asyncio.CancelledError):
|
248
|
-
await self._main_task
|
209
|
+
@utils.log_exceptions(logger=logger)
|
210
|
+
async def _main_task(self) -> None:
|
211
|
+
await self._run(self._max_retry)
|
249
212
|
|
250
213
|
async def _run(self, max_retry: int) -> None:
|
251
214
|
retry_count = 0
|
252
|
-
while not self.
|
215
|
+
while not self._input_ch.closed:
|
253
216
|
try:
|
254
217
|
# google requires a async generator when calling streaming_recognize
|
255
218
|
# this function basically convert the queue into a async generator
|
@@ -260,19 +223,19 @@ class SpeechStream(stt.SpeechStream):
|
|
260
223
|
recognizer=self._recognizer,
|
261
224
|
streaming_config=self._streaming_config,
|
262
225
|
)
|
263
|
-
while True:
|
264
|
-
frame = await self._queue.get()
|
265
|
-
if frame is None:
|
266
|
-
break
|
267
226
|
|
227
|
+
async for frame in self._input_ch:
|
268
228
|
frame = frame.remix_and_resample(
|
269
229
|
self._sample_rate, self._num_channels
|
270
230
|
)
|
271
231
|
yield cloud_speech.StreamingRecognizeRequest(
|
272
|
-
audio=frame.data.tobytes()
|
232
|
+
audio=frame.data.tobytes()
|
273
233
|
)
|
274
|
-
|
275
|
-
|
234
|
+
|
235
|
+
except Exception:
|
236
|
+
logger.exception(
|
237
|
+
"an error occurred while streaming input to google STT"
|
238
|
+
)
|
276
239
|
|
277
240
|
# try to connect
|
278
241
|
stream = await self._client.streaming_recognize(
|
@@ -297,8 +260,6 @@ class SpeechStream(stt.SpeechStream):
|
|
297
260
|
)
|
298
261
|
await asyncio.sleep(retry_delay)
|
299
262
|
|
300
|
-
self._event_queue.put_nowait(None)
|
301
|
-
|
302
263
|
async def _run_stream(
|
303
264
|
self, stream: AsyncIterable[cloud_speech.StreamingRecognizeResponse]
|
304
265
|
):
|
@@ -307,11 +268,9 @@ class SpeechStream(stt.SpeechStream):
|
|
307
268
|
resp.speech_event_type
|
308
269
|
== cloud_speech.StreamingRecognizeResponse.SpeechEventType.SPEECH_ACTIVITY_BEGIN
|
309
270
|
):
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
if self._need_bos:
|
314
|
-
self._send_bos()
|
271
|
+
self._event_ch.send_nowait(
|
272
|
+
stt.SpeechEvent(type=stt.SpeechEventType.START_OF_SPEECH)
|
273
|
+
)
|
315
274
|
|
316
275
|
if (
|
317
276
|
resp.speech_event_type
|
@@ -319,96 +278,31 @@ class SpeechStream(stt.SpeechStream):
|
|
319
278
|
):
|
320
279
|
result = resp.results[0]
|
321
280
|
if not result.is_final:
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
281
|
+
self._event_ch.send_nowait(
|
282
|
+
stt.SpeechEvent(
|
283
|
+
type=stt.SpeechEventType.INTERIM_TRANSCRIPT,
|
284
|
+
alternatives=[
|
285
|
+
_streaming_recognize_response_to_speech_data(resp)
|
286
|
+
],
|
287
|
+
)
|
327
288
|
)
|
328
|
-
self._event_queue.put_nowait(iterim_event)
|
329
|
-
|
330
289
|
else:
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
290
|
+
self._event_ch.send_nowait(
|
291
|
+
stt.SpeechEvent(
|
292
|
+
type=stt.SpeechEventType.FINAL_TRANSCRIPT,
|
293
|
+
alternatives=[
|
294
|
+
_streaming_recognize_response_to_speech_data(resp)
|
295
|
+
],
|
296
|
+
)
|
336
297
|
)
|
337
|
-
self._final_events.append(final_event)
|
338
|
-
self._event_queue.put_nowait(final_event)
|
339
|
-
|
340
|
-
if self._need_eos:
|
341
|
-
self._send_eos()
|
342
298
|
|
343
299
|
if (
|
344
300
|
resp.speech_event_type
|
345
301
|
== cloud_speech.StreamingRecognizeResponse.SpeechEventType.SPEECH_ACTIVITY_END
|
346
302
|
):
|
347
|
-
self.
|
348
|
-
|
349
|
-
|
350
|
-
self._send_eos()
|
351
|
-
|
352
|
-
def _send_bos(self) -> None:
|
353
|
-
self._need_bos = False
|
354
|
-
start_event = stt.SpeechEvent(
|
355
|
-
type=stt.SpeechEventType.START_OF_SPEECH,
|
356
|
-
)
|
357
|
-
self._event_queue.put_nowait(start_event)
|
358
|
-
|
359
|
-
def _send_eos(self) -> None:
|
360
|
-
self._need_eos = False
|
361
|
-
self._need_bos = True
|
362
|
-
|
363
|
-
if self._final_events:
|
364
|
-
lg = self._final_events[0].alternatives[0].language
|
365
|
-
|
366
|
-
sentence = ""
|
367
|
-
confidence = 0.0
|
368
|
-
for alt in self._final_events:
|
369
|
-
sentence += f"{alt.alternatives[0].text.strip()} "
|
370
|
-
confidence += alt.alternatives[0].confidence
|
371
|
-
|
372
|
-
sentence = sentence.rstrip()
|
373
|
-
confidence /= len(self._final_events) # avg. of confidence
|
374
|
-
|
375
|
-
end_event = stt.SpeechEvent(
|
376
|
-
type=stt.SpeechEventType.END_OF_SPEECH,
|
377
|
-
alternatives=[
|
378
|
-
stt.SpeechData(
|
379
|
-
language=lg,
|
380
|
-
start_time=self._final_events[0].alternatives[0].start_time,
|
381
|
-
end_time=self._final_events[-1].alternatives[0].end_time,
|
382
|
-
confidence=confidence,
|
383
|
-
text=sentence,
|
384
|
-
)
|
385
|
-
],
|
386
|
-
)
|
387
|
-
|
388
|
-
self._final_events = []
|
389
|
-
self._event_queue.put_nowait(end_event)
|
390
|
-
else:
|
391
|
-
end_event = stt.SpeechEvent(
|
392
|
-
type=stt.SpeechEventType.END_OF_SPEECH,
|
393
|
-
alternatives=[
|
394
|
-
stt.SpeechData(
|
395
|
-
language="",
|
396
|
-
start_time=0,
|
397
|
-
end_time=0,
|
398
|
-
confidence=0,
|
399
|
-
text="",
|
400
|
-
)
|
401
|
-
],
|
402
|
-
)
|
403
|
-
|
404
|
-
self._event_queue.put_nowait(end_event)
|
405
|
-
|
406
|
-
async def __anext__(self) -> stt.SpeechEvent:
|
407
|
-
evt = await self._event_queue.get()
|
408
|
-
if evt is None:
|
409
|
-
raise StopAsyncIteration
|
410
|
-
|
411
|
-
return evt
|
303
|
+
self._event_ch.send_nowait(
|
304
|
+
stt.SpeechEvent(type=stt.SpeechEventType.END_OF_SPEECH)
|
305
|
+
)
|
412
306
|
|
413
307
|
|
414
308
|
def _recognize_response_to_speech_event(
|
@@ -453,11 +347,7 @@ def _streaming_recognize_response_to_speech_data(
|
|
453
347
|
lg = resp.results[0].language_code
|
454
348
|
|
455
349
|
data = stt.SpeechData(
|
456
|
-
language=lg,
|
457
|
-
start_time=0,
|
458
|
-
end_time=0,
|
459
|
-
confidence=confidence,
|
460
|
-
text=text,
|
350
|
+
language=lg, start_time=0, end_time=0, confidence=confidence, text=text
|
461
351
|
)
|
462
352
|
|
463
353
|
return data
|
livekit/plugins/google/tts.py
CHANGED
@@ -14,20 +14,14 @@
|
|
14
14
|
|
15
15
|
from __future__ import annotations
|
16
16
|
|
17
|
-
import asyncio
|
18
|
-
import contextlib
|
19
17
|
from dataclasses import dataclass
|
20
|
-
from typing import
|
18
|
+
from typing import Union
|
21
19
|
|
22
20
|
from livekit import rtc
|
23
|
-
from livekit.agents import tts
|
24
|
-
from livekit.agents.utils import codecs
|
21
|
+
from livekit.agents import tts, utils
|
25
22
|
|
26
23
|
from google.cloud import texttospeech
|
27
|
-
from google.cloud.texttospeech_v1.types import
|
28
|
-
SsmlVoiceGender,
|
29
|
-
SynthesizeSpeechResponse,
|
30
|
-
)
|
24
|
+
from google.cloud.texttospeech_v1.types import SsmlVoiceGender, SynthesizeSpeechResponse
|
31
25
|
|
32
26
|
from .log import logger
|
33
27
|
from .models import AudioEncoding, Gender, SpeechLanguages
|
@@ -61,7 +55,11 @@ class TTS(tts.TTS):
|
|
61
55
|
GOOGLE_APPLICATION_CREDENTIALS (default behavior of Google TextToSpeechAsyncClient)
|
62
56
|
"""
|
63
57
|
super().__init__(
|
64
|
-
|
58
|
+
capabilities=tts.TTSCapabilities(
|
59
|
+
streaming=True,
|
60
|
+
),
|
61
|
+
sample_rate=sample_rate,
|
62
|
+
num_channels=1,
|
65
63
|
)
|
66
64
|
|
67
65
|
self._client: texttospeech.TextToSpeechAsyncClient | None = None
|
@@ -75,9 +73,7 @@ class TTS(tts.TTS):
|
|
75
73
|
ssml_gender = SsmlVoiceGender.FEMALE
|
76
74
|
|
77
75
|
voice = texttospeech.VoiceSelectionParams(
|
78
|
-
name=voice_name,
|
79
|
-
language_code=language,
|
80
|
-
ssml_gender=ssml_gender,
|
76
|
+
name=voice_name, language_code=language, ssml_gender=ssml_gender
|
81
77
|
)
|
82
78
|
|
83
79
|
if encoding == "linear16" or encoding == "wav":
|
@@ -117,10 +113,7 @@ class TTS(tts.TTS):
|
|
117
113
|
assert self._client is not None
|
118
114
|
return self._client
|
119
115
|
|
120
|
-
def synthesize(
|
121
|
-
self,
|
122
|
-
text: str,
|
123
|
-
) -> "ChunkedStream":
|
116
|
+
def synthesize(self, text: str) -> "ChunkedStream":
|
124
117
|
return ChunkedStream(text, self._opts, self._ensure_client())
|
125
118
|
|
126
119
|
|
@@ -128,60 +121,38 @@ class ChunkedStream(tts.ChunkedStream):
|
|
128
121
|
def __init__(
|
129
122
|
self, text: str, opts: _TTSOptions, client: texttospeech.TextToSpeechAsyncClient
|
130
123
|
) -> None:
|
131
|
-
|
132
|
-
self._opts = opts
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
)
|
124
|
+
super().__init__()
|
125
|
+
self._text, self._opts, self._client = text, opts, client
|
126
|
+
|
127
|
+
@utils.log_exceptions(logger=logger)
|
128
|
+
async def _main_task(self) -> None:
|
129
|
+
request_id = utils.shortuuid()
|
130
|
+
segment_id = utils.shortuuid()
|
131
|
+
response: SynthesizeSpeechResponse = await self._client.synthesize_speech(
|
132
|
+
input=texttospeech.SynthesisInput(text=self._text),
|
133
|
+
voice=self._opts.voice,
|
134
|
+
audio_config=self._opts.audio_config,
|
135
|
+
)
|
144
136
|
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
self._queue.put_nowait(
|
151
|
-
tts.SynthesizedAudio(text=self._text, data=frame)
|
152
|
-
)
|
153
|
-
else:
|
154
|
-
self._queue.put_nowait(
|
137
|
+
data = response.audio_content
|
138
|
+
if self._opts.audio_config.audio_encoding == "mp3":
|
139
|
+
decoder = utils.codecs.Mp3StreamDecoder()
|
140
|
+
for frame in decoder.decode_chunk(data):
|
141
|
+
self._event_ch.send_nowait(
|
155
142
|
tts.SynthesizedAudio(
|
156
|
-
|
157
|
-
data=rtc.AudioFrame(
|
158
|
-
data=data,
|
159
|
-
sample_rate=self._opts.audio_config.sample_rate_hertz,
|
160
|
-
num_channels=1,
|
161
|
-
samples_per_channel=len(data) // 2, # 16-bit
|
162
|
-
),
|
143
|
+
request_id=request_id, segment_id=segment_id, frame=frame
|
163
144
|
)
|
164
145
|
)
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
return frame
|
180
|
-
|
181
|
-
async def aclose(self) -> None:
|
182
|
-
if not self._main_task:
|
183
|
-
return
|
184
|
-
|
185
|
-
self._main_task.cancel()
|
186
|
-
with contextlib.suppress(asyncio.CancelledError):
|
187
|
-
await self._main_task
|
146
|
+
else:
|
147
|
+
self._event_ch.send_nowait(
|
148
|
+
tts.SynthesizedAudio(
|
149
|
+
request_id=request_id,
|
150
|
+
segment_id=segment_id,
|
151
|
+
frame=rtc.AudioFrame(
|
152
|
+
data=data,
|
153
|
+
sample_rate=self._opts.audio_config.sample_rate_hertz,
|
154
|
+
num_channels=1,
|
155
|
+
samples_per_channel=len(data) // 2, # 16-bit
|
156
|
+
),
|
157
|
+
)
|
158
|
+
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: livekit-plugins-google
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.6.0
|
4
4
|
Summary: Agent Framework plugin for services from Google Cloud
|
5
5
|
Home-page: https://github.com/livekit/agents
|
6
6
|
License: Apache-2.0
|
@@ -21,7 +21,7 @@ Requires-Python: >=3.9.0
|
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
Requires-Dist: google-cloud-speech <3,>=2
|
23
23
|
Requires-Dist: google-cloud-texttospeech <3,>=2
|
24
|
-
Requires-Dist: livekit-agents
|
24
|
+
Requires-Dist: livekit-agents >=0.8.0.dev0
|
25
25
|
|
26
26
|
# LiveKit Plugins Google
|
27
27
|
|
@@ -0,0 +1,11 @@
|
|
1
|
+
livekit/plugins/google/__init__.py,sha256=DlQC5cosMFyQlM8_vFvJGoZiziFkd0Sa4mutnsxXyZM,959
|
2
|
+
livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
|
3
|
+
livekit/plugins/google/models.py,sha256=n8pgTJ7xyJpPCZJ_y0GzaQq6LqYknL6K6trpi07-AxM,1307
|
4
|
+
livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
livekit/plugins/google/stt.py,sha256=bqXaoi5trER7PE45axfEpHwReElmf7yl38RpK1iJsdc,12849
|
6
|
+
livekit/plugins/google/tts.py,sha256=KUw826CK3yt5meGVj0TKkueQ8o_gaXbc1Rtvdv2yF5M,5548
|
7
|
+
livekit/plugins/google/version.py,sha256=Z62pORgDetwUvtfZOgPeIzXJugcrpDAOzC876rjCR0o,600
|
8
|
+
livekit_plugins_google-0.6.0.dist-info/METADATA,sha256=Gb5O82GO4CpSvNHeYs4kD2K-neRklRGXaEQwOSQ8SpM,1584
|
9
|
+
livekit_plugins_google-0.6.0.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
10
|
+
livekit_plugins_google-0.6.0.dist-info/top_level.txt,sha256=OoDok3xUmXbZRvOrfvvXB-Juu4DX79dlq188E19YHoo,8
|
11
|
+
livekit_plugins_google-0.6.0.dist-info/RECORD,,
|
@@ -1,11 +0,0 @@
|
|
1
|
-
livekit/plugins/google/__init__.py,sha256=DlQC5cosMFyQlM8_vFvJGoZiziFkd0Sa4mutnsxXyZM,959
|
2
|
-
livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
|
3
|
-
livekit/plugins/google/models.py,sha256=n8pgTJ7xyJpPCZJ_y0GzaQq6LqYknL6K6trpi07-AxM,1307
|
4
|
-
livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
livekit/plugins/google/stt.py,sha256=GfWita3mgLZG2KpS9WYMCL8jwCNN5qukicpI58zPCcY,16058
|
6
|
-
livekit/plugins/google/tts.py,sha256=vYOPDebz5YUzQqlT1lqF0h_liP-1S38lCffdk1ETOg0,6214
|
7
|
-
livekit/plugins/google/version.py,sha256=lnLU02KpruwZLXljnGCTau4BB-Ycv9A1dIKvj053gCg,600
|
8
|
-
livekit_plugins_google-0.5.1.dist-info/METADATA,sha256=V4_6mKQZbvSotoU00pQpDX-t3UxMy_9ZuUO6xbFw9nE,1577
|
9
|
-
livekit_plugins_google-0.5.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
10
|
-
livekit_plugins_google-0.5.1.dist-info/top_level.txt,sha256=OoDok3xUmXbZRvOrfvvXB-Juu4DX79dlq188E19YHoo,8
|
11
|
-
livekit_plugins_google-0.5.1.dist-info/RECORD,,
|
{livekit_plugins_google-0.5.1.dist-info → livekit_plugins_google-0.6.0.dist-info}/top_level.txt
RENAMED
File without changes
|