livekit-plugins-google 0.9.1__py3-none-any.whl → 0.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,173 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import re
5
+ from dataclasses import dataclass
6
+ from typing import Literal
7
+
8
+ import websockets
9
+ from livekit import rtc
10
+ from livekit.agents import utils
11
+
12
+ from google import genai
13
+ from google.genai import types
14
+
15
+ from ...log import logger
16
+ from .api_proto import ClientEvents, LiveAPIModels
17
+
18
+ EventTypes = Literal[
19
+ "input_speech_started",
20
+ "input_speech_done",
21
+ ]
22
+
23
+ DEFAULT_LANGUAGE = "English"
24
+
25
+ SYSTEM_INSTRUCTIONS = f"""
26
+ You are an **Audio Transcriber**. Your task is to convert audio content into accurate and precise text.
27
+
28
+ - Transcribe verbatim; exclude non-speech sounds.
29
+ - Provide only transcription; no extra text or explanations.
30
+ - If audio is unclear, respond with: `...`
31
+ - Ensure error-free transcription, preserving meaning and context.
32
+ - Use proper punctuation and formatting.
33
+ - Do not add explanations, comments, or extra information.
34
+ - Do not include timestamps, speaker labels, or annotations unless specified.
35
+
36
+ - Audio Language: {DEFAULT_LANGUAGE}
37
+ """
38
+
39
+
40
+ @dataclass
41
+ class TranscriptionContent:
42
+ response_id: str
43
+ text: str
44
+
45
+
46
+ class TranscriberSession(utils.EventEmitter[EventTypes]):
47
+ def __init__(
48
+ self,
49
+ *,
50
+ client: genai.Client,
51
+ model: LiveAPIModels | str,
52
+ ):
53
+ """
54
+ Initializes a TranscriberSession instance for interacting with Google's Realtime API.
55
+ """
56
+ super().__init__()
57
+ self._client = client
58
+ self._model = model
59
+ self._closed = False
60
+ system_instructions = types.Content(
61
+ parts=[types.Part(text=SYSTEM_INSTRUCTIONS)]
62
+ )
63
+
64
+ self._config = types.LiveConnectConfig(
65
+ response_modalities=["TEXT"],
66
+ system_instruction=system_instructions,
67
+ generation_config=types.GenerationConfig(
68
+ temperature=0.0,
69
+ ),
70
+ )
71
+ self._main_atask = asyncio.create_task(
72
+ self._main_task(), name="gemini-realtime-transcriber"
73
+ )
74
+ self._send_ch = utils.aio.Chan[ClientEvents]()
75
+ self._active_response_id = None
76
+
77
+ def _push_audio(self, frame: rtc.AudioFrame) -> None:
78
+ if self._closed:
79
+ return
80
+ self._queue_msg(
81
+ types.LiveClientRealtimeInput(
82
+ media_chunks=[
83
+ types.Blob(data=frame.data.tobytes(), mime_type="audio/pcm")
84
+ ]
85
+ )
86
+ )
87
+
88
+ def _queue_msg(self, msg: ClientEvents) -> None:
89
+ if not self._closed:
90
+ self._send_ch.send_nowait(msg)
91
+
92
+ async def aclose(self) -> None:
93
+ if self._send_ch.closed:
94
+ return
95
+ self._closed = True
96
+ self._send_ch.close()
97
+ await self._main_atask
98
+
99
+ @utils.log_exceptions(logger=logger)
100
+ async def _main_task(self):
101
+ @utils.log_exceptions(logger=logger)
102
+ async def _send_task():
103
+ try:
104
+ async for msg in self._send_ch:
105
+ if self._closed:
106
+ break
107
+ await self._session.send(input=msg)
108
+ except websockets.exceptions.ConnectionClosedError as e:
109
+ logger.exception(f"Transcriber session closed in _send_task: {e}")
110
+ self._closed = True
111
+ except Exception as e:
112
+ logger.exception(f"Uncaught error in transcriber _send_task: {e}")
113
+ self._closed = True
114
+
115
+ @utils.log_exceptions(logger=logger)
116
+ async def _recv_task():
117
+ try:
118
+ while not self._closed:
119
+ async for response in self._session.receive():
120
+ if self._closed:
121
+ break
122
+ if self._active_response_id is None:
123
+ self._active_response_id = utils.shortuuid()
124
+ content = TranscriptionContent(
125
+ response_id=self._active_response_id,
126
+ text="",
127
+ )
128
+ self.emit("input_speech_started", content)
129
+
130
+ server_content = response.server_content
131
+ if server_content:
132
+ model_turn = server_content.model_turn
133
+ if model_turn:
134
+ for part in model_turn.parts:
135
+ if part.text:
136
+ content.text += part.text
137
+
138
+ if server_content.turn_complete:
139
+ content.text = clean_transcription(content.text)
140
+ self.emit("input_speech_done", content)
141
+ self._active_response_id = None
142
+
143
+ except websockets.exceptions.ConnectionClosedError as e:
144
+ logger.exception(f"Transcriber session closed in _recv_task: {e}")
145
+ self._closed = True
146
+ except Exception as e:
147
+ logger.exception(f"Uncaught error in transcriber _recv_task: {e}")
148
+ self._closed = True
149
+
150
+ async with self._client.aio.live.connect(
151
+ model=self._model, config=self._config
152
+ ) as session:
153
+ self._session = session
154
+ tasks = [
155
+ asyncio.create_task(
156
+ _send_task(), name="gemini-realtime-transcriber-send"
157
+ ),
158
+ asyncio.create_task(
159
+ _recv_task(), name="gemini-realtime-transcriber-recv"
160
+ ),
161
+ ]
162
+
163
+ try:
164
+ await asyncio.gather(*tasks)
165
+ finally:
166
+ await utils.aio.gracefully_cancel(*tasks)
167
+ await self._session.close()
168
+
169
+
170
+ def clean_transcription(text: str) -> str:
171
+ text = text.replace("\n", " ")
172
+ text = re.sub(r"\s+", " ", text)
173
+ return text.strip()
@@ -0,0 +1,414 @@
1
+ # Copyright 2023 LiveKit, Inc.
2
+ #
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from __future__ import annotations
17
+
18
+ import asyncio
19
+ import json
20
+ import os
21
+ from dataclasses import dataclass
22
+ from typing import Any, Literal, MutableSet, Union, cast
23
+
24
+ from livekit.agents import (
25
+ APIConnectionError,
26
+ APIStatusError,
27
+ llm,
28
+ utils,
29
+ )
30
+ from livekit.agents.llm import ToolChoice, _create_ai_function_info
31
+ from livekit.agents.types import DEFAULT_API_CONNECT_OPTIONS, APIConnectOptions
32
+
33
+ from google import genai
34
+ from google.auth._default_async import default_async
35
+ from google.genai import types
36
+ from google.genai.errors import APIError, ClientError, ServerError
37
+
38
+ from ._utils import _build_gemini_ctx, _build_tools
39
+ from .log import logger
40
+ from .models import ChatModels
41
+
42
+
43
+ @dataclass
44
+ class LLMOptions:
45
+ model: ChatModels | str
46
+ temperature: float | None
47
+ tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto"
48
+ vertexai: bool = False
49
+ project: str | None = None
50
+ location: str | None = None
51
+ candidate_count: int = 1
52
+ max_output_tokens: int | None = None
53
+ top_p: float | None = None
54
+ top_k: float | None = None
55
+ presence_penalty: float | None = None
56
+ frequency_penalty: float | None = None
57
+
58
+
59
+ class LLM(llm.LLM):
60
+ def __init__(
61
+ self,
62
+ *,
63
+ model: ChatModels | str = "gemini-2.0-flash-exp",
64
+ api_key: str | None = None,
65
+ vertexai: bool = False,
66
+ project: str | None = None,
67
+ location: str | None = None,
68
+ candidate_count: int = 1,
69
+ temperature: float = 0.8,
70
+ max_output_tokens: int | None = None,
71
+ top_p: float | None = None,
72
+ top_k: float | None = None,
73
+ presence_penalty: float | None = None,
74
+ frequency_penalty: float | None = None,
75
+ tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto",
76
+ ) -> None:
77
+ """
78
+ Create a new instance of Google GenAI LLM.
79
+
80
+ Environment Requirements:
81
+ - For VertexAI: Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the path of the service account key file.
82
+ The Google Cloud project and location can be set via `project` and `location` arguments or the environment variables
83
+ `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION`. By default, the project is inferred from the service account key file,
84
+ and the location defaults to "us-central1".
85
+ - For Google Gemini API: Set the `api_key` argument or the `GOOGLE_API_KEY` environment variable.
86
+
87
+ Args:
88
+ model (ChatModels | str, optional): The model name to use. Defaults to "gemini-2.0-flash-exp".
89
+ api_key (str, optional): The API key for Google Gemini. If not provided, it attempts to read from the `GOOGLE_API_KEY` environment variable.
90
+ vertexai (bool, optional): Whether to use VertexAI. Defaults to False.
91
+ project (str, optional): The Google Cloud project to use (only for VertexAI). Defaults to None.
92
+ location (str, optional): The location to use for VertexAI API requests. Defaults value is "us-central1".
93
+ candidate_count (int, optional): Number of candidate responses to generate. Defaults to 1.
94
+ temperature (float, optional): Sampling temperature for response generation. Defaults to 0.8.
95
+ max_output_tokens (int, optional): Maximum number of tokens to generate in the output. Defaults to None.
96
+ top_p (float, optional): The nucleus sampling probability for response generation. Defaults to None.
97
+ top_k (int, optional): The top-k sampling value for response generation. Defaults to None.
98
+ presence_penalty (float, optional): Penalizes the model for generating previously mentioned concepts. Defaults to None.
99
+ frequency_penalty (float, optional): Penalizes the model for repeating words. Defaults to None.
100
+ tool_choice (ToolChoice or Literal["auto", "required", "none"], optional): Specifies whether to use tools during response generation. Defaults to "auto".
101
+ """
102
+ super().__init__()
103
+ self._capabilities = llm.LLMCapabilities(supports_choices_on_int=False)
104
+ self._project_id = project or os.environ.get("GOOGLE_CLOUD_PROJECT", None)
105
+ self._location = location or os.environ.get(
106
+ "GOOGLE_CLOUD_LOCATION", "us-central1"
107
+ )
108
+ self._api_key = api_key or os.environ.get("GOOGLE_API_KEY", None)
109
+ _gac = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
110
+ if _gac is None:
111
+ raise ValueError(
112
+ "`GOOGLE_APPLICATION_CREDENTIALS` environment variable is not set. please set it to the path of the service account key file."
113
+ )
114
+
115
+ if vertexai:
116
+ if not self._project_id:
117
+ _, self._project_id = default_async(
118
+ scopes=["https://www.googleapis.com/auth/cloud-platform"]
119
+ )
120
+ self._api_key = None # VertexAI does not require an API key
121
+
122
+ else:
123
+ self._project_id = None
124
+ self._location = None
125
+ if not self._api_key:
126
+ raise ValueError(
127
+ "API key is required for Google API either via api_key or GOOGLE_API_KEY environment variable"
128
+ )
129
+
130
+ self._opts = LLMOptions(
131
+ model=model,
132
+ temperature=temperature,
133
+ tool_choice=tool_choice,
134
+ vertexai=vertexai,
135
+ project=project,
136
+ location=location,
137
+ candidate_count=candidate_count,
138
+ max_output_tokens=max_output_tokens,
139
+ top_p=top_p,
140
+ top_k=top_k,
141
+ presence_penalty=presence_penalty,
142
+ frequency_penalty=frequency_penalty,
143
+ )
144
+ self._client = genai.Client(
145
+ api_key=self._api_key,
146
+ vertexai=vertexai,
147
+ project=self._project_id,
148
+ location=self._location,
149
+ )
150
+ self._running_fncs: MutableSet[asyncio.Task[Any]] = set()
151
+
152
+ def chat(
153
+ self,
154
+ *,
155
+ chat_ctx: llm.ChatContext,
156
+ conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS,
157
+ fnc_ctx: llm.FunctionContext | None = None,
158
+ temperature: float | None = None,
159
+ n: int | None = 1,
160
+ parallel_tool_calls: bool | None = None,
161
+ tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]]
162
+ | None = None,
163
+ ) -> "LLMStream":
164
+ if tool_choice is None:
165
+ tool_choice = self._opts.tool_choice
166
+
167
+ if temperature is None:
168
+ temperature = self._opts.temperature
169
+
170
+ return LLMStream(
171
+ self,
172
+ client=self._client,
173
+ model=self._opts.model,
174
+ max_output_tokens=self._opts.max_output_tokens,
175
+ top_p=self._opts.top_p,
176
+ top_k=self._opts.top_k,
177
+ presence_penalty=self._opts.presence_penalty,
178
+ frequency_penalty=self._opts.frequency_penalty,
179
+ chat_ctx=chat_ctx,
180
+ fnc_ctx=fnc_ctx,
181
+ conn_options=conn_options,
182
+ n=n,
183
+ temperature=temperature,
184
+ tool_choice=tool_choice,
185
+ )
186
+
187
+
188
+ class LLMStream(llm.LLMStream):
189
+ def __init__(
190
+ self,
191
+ llm: LLM,
192
+ *,
193
+ client: genai.Client,
194
+ model: str | ChatModels,
195
+ chat_ctx: llm.ChatContext,
196
+ conn_options: APIConnectOptions,
197
+ fnc_ctx: llm.FunctionContext | None,
198
+ temperature: float | None,
199
+ n: int | None,
200
+ max_output_tokens: int | None,
201
+ top_p: float | None,
202
+ top_k: float | None,
203
+ presence_penalty: float | None,
204
+ frequency_penalty: float | None,
205
+ tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]],
206
+ ) -> None:
207
+ super().__init__(
208
+ llm, chat_ctx=chat_ctx, fnc_ctx=fnc_ctx, conn_options=conn_options
209
+ )
210
+ self._client = client
211
+ self._model = model
212
+ self._llm: LLM = llm
213
+ self._max_output_tokens = max_output_tokens
214
+ self._top_p = top_p
215
+ self._top_k = top_k
216
+ self._presence_penalty = presence_penalty
217
+ self._frequency_penalty = frequency_penalty
218
+ self._temperature = temperature
219
+ self._n = n
220
+ self._tool_choice = tool_choice
221
+
222
+ async def _run(self) -> None:
223
+ retryable = True
224
+ request_id = utils.shortuuid()
225
+
226
+ try:
227
+ opts: dict[str, Any] = dict()
228
+ turns, system_instruction = _build_gemini_ctx(self._chat_ctx, id(self))
229
+
230
+ if self._fnc_ctx and len(self._fnc_ctx.ai_functions) > 0:
231
+ functions = _build_tools(self._fnc_ctx)
232
+ opts["tools"] = [types.Tool(function_declarations=functions)]
233
+
234
+ if self._tool_choice is not None:
235
+ if isinstance(self._tool_choice, ToolChoice):
236
+ # specific function
237
+ tool_config = types.ToolConfig(
238
+ function_calling_config=types.FunctionCallingConfig(
239
+ mode="ANY",
240
+ allowed_function_names=[self._tool_choice.name],
241
+ )
242
+ )
243
+ elif self._tool_choice == "required":
244
+ # model must call any function
245
+ tool_config = types.ToolConfig(
246
+ function_calling_config=types.FunctionCallingConfig(
247
+ mode="ANY",
248
+ allowed_function_names=[
249
+ fnc.name
250
+ for fnc in self._fnc_ctx.ai_functions.values()
251
+ ],
252
+ )
253
+ )
254
+ elif self._tool_choice == "auto":
255
+ # model can call any function
256
+ tool_config = types.ToolConfig(
257
+ function_calling_config=types.FunctionCallingConfig(
258
+ mode="AUTO"
259
+ )
260
+ )
261
+ elif self._tool_choice == "none":
262
+ # model cannot call any function
263
+ tool_config = types.ToolConfig(
264
+ function_calling_config=types.FunctionCallingConfig(
265
+ mode="NONE",
266
+ )
267
+ )
268
+ opts["tool_config"] = tool_config
269
+
270
+ config = types.GenerateContentConfig(
271
+ candidate_count=self._n,
272
+ temperature=self._temperature,
273
+ max_output_tokens=self._max_output_tokens,
274
+ top_p=self._top_p,
275
+ top_k=self._top_k,
276
+ presence_penalty=self._presence_penalty,
277
+ frequency_penalty=self._frequency_penalty,
278
+ system_instruction=system_instruction,
279
+ **opts,
280
+ )
281
+ async for response in self._client.aio.models.generate_content_stream(
282
+ model=self._model,
283
+ contents=cast(types.ContentListUnion, turns),
284
+ config=config,
285
+ ):
286
+ if response.prompt_feedback:
287
+ raise APIStatusError(
288
+ response.prompt_feedback.json(),
289
+ retryable=False,
290
+ request_id=request_id,
291
+ )
292
+
293
+ if (
294
+ not response.candidates
295
+ or not response.candidates[0].content
296
+ or not response.candidates[0].content.parts
297
+ ):
298
+ raise APIStatusError(
299
+ "No candidates in the response",
300
+ retryable=True,
301
+ request_id=request_id,
302
+ )
303
+
304
+ if len(response.candidates) > 1:
305
+ logger.warning(
306
+ "gemini llm: there are multiple candidates in the response, returning response from the first one."
307
+ )
308
+
309
+ for index, part in enumerate(response.candidates[0].content.parts):
310
+ chat_chunk = self._parse_part(request_id, index, part)
311
+ if chat_chunk is not None:
312
+ retryable = False
313
+ self._event_ch.send_nowait(chat_chunk)
314
+
315
+ if response.usage_metadata is not None:
316
+ usage = response.usage_metadata
317
+ self._event_ch.send_nowait(
318
+ llm.ChatChunk(
319
+ request_id=request_id,
320
+ usage=llm.CompletionUsage(
321
+ completion_tokens=usage.candidates_token_count or 0,
322
+ prompt_tokens=usage.prompt_token_count or 0,
323
+ total_tokens=usage.total_token_count or 0,
324
+ ),
325
+ )
326
+ )
327
+ except ClientError as e:
328
+ raise APIStatusError(
329
+ "gemini llm: client error",
330
+ status_code=e.code,
331
+ body=e.message,
332
+ request_id=request_id,
333
+ retryable=False if e.code != 429 else True,
334
+ ) from e
335
+ except ServerError as e:
336
+ raise APIStatusError(
337
+ "gemini llm: server error",
338
+ status_code=e.code,
339
+ body=e.message,
340
+ request_id=request_id,
341
+ retryable=retryable,
342
+ ) from e
343
+ except APIError as e:
344
+ raise APIStatusError(
345
+ "gemini llm: api error",
346
+ status_code=e.code,
347
+ body=e.message,
348
+ request_id=request_id,
349
+ retryable=retryable,
350
+ ) from e
351
+ except Exception as e:
352
+ raise APIConnectionError(
353
+ "gemini llm: error generating content",
354
+ retryable=retryable,
355
+ ) from e
356
+
357
+ def _parse_part(
358
+ self, id: str, index: int, part: types.Part
359
+ ) -> llm.ChatChunk | None:
360
+ if part.function_call:
361
+ return self._try_build_function(id, index, part)
362
+
363
+ return llm.ChatChunk(
364
+ request_id=id,
365
+ choices=[
366
+ llm.Choice(
367
+ delta=llm.ChoiceDelta(content=part.text, role="assistant"),
368
+ index=index,
369
+ )
370
+ ],
371
+ )
372
+
373
+ def _try_build_function(
374
+ self, id: str, index: int, part: types.Part
375
+ ) -> llm.ChatChunk | None:
376
+ if part.function_call is None:
377
+ logger.warning("gemini llm: no function call in the response")
378
+ return None
379
+
380
+ if part.function_call.name is None:
381
+ logger.warning("gemini llm: no function name in the response")
382
+ return None
383
+
384
+ if part.function_call.id is None:
385
+ part.function_call.id = utils.shortuuid()
386
+
387
+ if self._fnc_ctx is None:
388
+ logger.warning(
389
+ "google stream tried to run function without function context"
390
+ )
391
+ return None
392
+
393
+ fnc_info = _create_ai_function_info(
394
+ self._fnc_ctx,
395
+ part.function_call.id,
396
+ part.function_call.name,
397
+ json.dumps(part.function_call.args),
398
+ )
399
+
400
+ self._function_calls_info.append(fnc_info)
401
+
402
+ return llm.ChatChunk(
403
+ request_id=id,
404
+ choices=[
405
+ llm.Choice(
406
+ delta=llm.ChoiceDelta(
407
+ role="assistant",
408
+ tool_calls=[fnc_info],
409
+ content=part.text,
410
+ ),
411
+ index=index,
412
+ )
413
+ ],
414
+ )
@@ -93,3 +93,5 @@ SpeechLanguages = Literal[
93
93
  Gender = Literal["male", "female", "neutral"]
94
94
 
95
95
  AudioEncoding = Literal["wav", "mp3", "ogg", "mulaw", "alaw", "linear16"]
96
+
97
+ ChatModels = Literal["gemini-2.0-flash-exp", "gemini-1.5-pro"]
@@ -89,9 +89,9 @@ class STT(stt.STT):
89
89
  detect_language: bool = True,
90
90
  interim_results: bool = True,
91
91
  punctuate: bool = True,
92
- spoken_punctuation: bool = True,
93
- model: SpeechModels = "long",
94
- location: str = "global",
92
+ spoken_punctuation: bool = False,
93
+ model: SpeechModels = "chirp_2",
94
+ location: str = "us-central1",
95
95
  sample_rate: int = 16000,
96
96
  credentials_info: dict | None = None,
97
97
  credentials_file: str | None = None,
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- __version__ = "0.9.1"
15
+ __version__ = "0.10.1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: livekit-plugins-google
3
- Version: 0.9.1
3
+ Version: 0.10.1
4
4
  Summary: Agent Framework plugin for services from Google Cloud
5
5
  Home-page: https://github.com/livekit/agents
6
6
  License: Apache-2.0
@@ -22,7 +22,7 @@ Description-Content-Type: text/markdown
22
22
  Requires-Dist: google-auth<3,>=2
23
23
  Requires-Dist: google-cloud-speech<3,>=2
24
24
  Requires-Dist: google-cloud-texttospeech<3,>=2
25
- Requires-Dist: google-genai>=0.3.0
25
+ Requires-Dist: google-genai==0.5.0
26
26
  Requires-Dist: livekit-agents>=0.12.3
27
27
  Dynamic: classifier
28
28
  Dynamic: description
@@ -0,0 +1,18 @@
1
+ livekit/plugins/google/__init__.py,sha256=e_kSlFNmKhyyeliz7f4WOKc_Y0-y39QjO5nCWuguhss,1171
2
+ livekit/plugins/google/_utils.py,sha256=mjsqblhGMgAZ2MNPisAVkNsqq4gfO6vvprEKzAGoVwE,7248
3
+ livekit/plugins/google/llm.py,sha256=vL8iyRqWVPT0wCDeXTlybytlyJ-J-VolVQYqP-ZVlb0,16388
4
+ livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
5
+ livekit/plugins/google/models.py,sha256=w_qmOk5y86vjtszDiGpP9p0ctjQeaB8-UzqprxgpvCY,1407
6
+ livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ livekit/plugins/google/stt.py,sha256=FA6Lpeb8QvRXLzkQ7cjsoMxHdtEGwHWkpN_TKqAdKAQ,21097
8
+ livekit/plugins/google/tts.py,sha256=95qXCigVQYWNbcN3pIKBpIah4b31U_MWtXv5Ji0AMc4,9229
9
+ livekit/plugins/google/version.py,sha256=byify7f_Iz8-hX3925GgkmUjX78ckJT4IS6Klbgy6WQ,601
10
+ livekit/plugins/google/beta/__init__.py,sha256=AxRYc7NGG62Tv1MmcZVCDHNvlhbC86hM-_yP01Qb28k,47
11
+ livekit/plugins/google/beta/realtime/__init__.py,sha256=sGTn6JFNyA30QUXBZ_BV3l2eHpGAzR35ByXxg77vWNU,205
12
+ livekit/plugins/google/beta/realtime/api_proto.py,sha256=9EhmwgeIgKDqdSijv5Q9pgx7UhAakK02ZDwbnUsra_o,657
13
+ livekit/plugins/google/beta/realtime/realtime_api.py,sha256=579kextvlPbBPV6Zoo3Mkqk0C0UN-Ybfb-gTiR7-f9E,21060
14
+ livekit/plugins/google/beta/realtime/transcriber.py,sha256=3TaYbtvPWHkxKlDSZSMLWBbR7KewBRg3HcdIxuGhl9c,5880
15
+ livekit_plugins_google-0.10.1.dist-info/METADATA,sha256=7XQzkOSP0LocoGcx4TX6VLIUsEvVIvQ1J6wLF7kLocE,2057
16
+ livekit_plugins_google-0.10.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
17
+ livekit_plugins_google-0.10.1.dist-info/top_level.txt,sha256=OoDok3xUmXbZRvOrfvvXB-Juu4DX79dlq188E19YHoo,8
18
+ livekit_plugins_google-0.10.1.dist-info/RECORD,,
@@ -1,15 +0,0 @@
1
- livekit/plugins/google/__init__.py,sha256=TY-5FwEX4Vs7GLO1wSegIxC5W4UPkHBthlr-__yuE4w,1143
2
- livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
3
- livekit/plugins/google/models.py,sha256=cBXhZGY9bFaSCyL9VeSng9wsxhf3peJi3AUYBKV-8GQ,1343
4
- livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- livekit/plugins/google/stt.py,sha256=E5kXPbicH4FEXBjyBzfqQWA-nPhKkojzcc-cbtWdmNs,21088
6
- livekit/plugins/google/tts.py,sha256=95qXCigVQYWNbcN3pIKBpIah4b31U_MWtXv5Ji0AMc4,9229
7
- livekit/plugins/google/version.py,sha256=4GcbYy7J7gvPMEA4wlPB0BJqg8CjF7HRVjQ-i1EH7M8,600
8
- livekit/plugins/google/beta/__init__.py,sha256=AxRYc7NGG62Tv1MmcZVCDHNvlhbC86hM-_yP01Qb28k,47
9
- livekit/plugins/google/beta/realtime/__init__.py,sha256=XnJpNIN6NRm7Y4hH2RNA8Xt-tTmkZEKCs_zzU3_koBI,251
10
- livekit/plugins/google/beta/realtime/api_proto.py,sha256=IHYBryuzpfGQD86Twlfq6qxrBhFHptf_IvOk36Wxo1M,2156
11
- livekit/plugins/google/beta/realtime/realtime_api.py,sha256=YUEf3iR9dIctnXRqev_qKSBM_plqcYKudodFO8nADJY,15966
12
- livekit_plugins_google-0.9.1.dist-info/METADATA,sha256=y5d0OEdbkoGk0IPGURiDZbt6e6sWhsxOU2cioNrPu7w,2056
13
- livekit_plugins_google-0.9.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
14
- livekit_plugins_google-0.9.1.dist-info/top_level.txt,sha256=OoDok3xUmXbZRvOrfvvXB-Juu4DX79dlq188E19YHoo,8
15
- livekit_plugins_google-0.9.1.dist-info/RECORD,,