livekit-plugins-google 1.2.9__py3-none-any.whl → 1.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of livekit-plugins-google might be problematic. Click here for more details.
- livekit/plugins/google/beta/gemini_tts.py +11 -0
- livekit/plugins/google/beta/realtime/realtime_api.py +58 -6
- livekit/plugins/google/llm.py +7 -0
- livekit/plugins/google/stt.py +13 -0
- livekit/plugins/google/tts.py +8 -0
- livekit/plugins/google/utils.py +31 -9
- livekit/plugins/google/version.py +1 -1
- {livekit_plugins_google-1.2.9.dist-info → livekit_plugins_google-1.2.12.dist-info}/METADATA +2 -2
- livekit_plugins_google-1.2.12.dist-info/RECORD +18 -0
- livekit_plugins_google-1.2.9.dist-info/RECORD +0 -18
- {livekit_plugins_google-1.2.9.dist-info → livekit_plugins_google-1.2.12.dist-info}/WHEEL +0 -0
|
@@ -148,6 +148,17 @@ class TTS(tts.TTS):
|
|
|
148
148
|
location=gcp_location,
|
|
149
149
|
)
|
|
150
150
|
|
|
151
|
+
@property
|
|
152
|
+
def model(self) -> str:
|
|
153
|
+
return self._opts.model
|
|
154
|
+
|
|
155
|
+
@property
|
|
156
|
+
def provider(self) -> str:
|
|
157
|
+
if self._client.vertexai:
|
|
158
|
+
return "Vertex AI"
|
|
159
|
+
else:
|
|
160
|
+
return "Gemini"
|
|
161
|
+
|
|
151
162
|
def synthesize(
|
|
152
163
|
self, text: str, *, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS
|
|
153
164
|
) -> ChunkedStream:
|
|
@@ -10,12 +10,12 @@ from collections.abc import Iterator
|
|
|
10
10
|
from dataclasses import dataclass, field
|
|
11
11
|
from typing import Literal
|
|
12
12
|
|
|
13
|
-
from google import
|
|
14
|
-
from google.genai import types
|
|
13
|
+
from google.genai import Client as GenAIClient, types
|
|
15
14
|
from google.genai.live import AsyncSession
|
|
16
15
|
from livekit import rtc
|
|
17
16
|
from livekit.agents import APIConnectionError, llm, utils
|
|
18
17
|
from livekit.agents.metrics import RealtimeModelMetrics
|
|
18
|
+
from livekit.agents.metrics.base import Metadata
|
|
19
19
|
from livekit.agents.types import (
|
|
20
20
|
DEFAULT_API_CONNECT_OPTIONS,
|
|
21
21
|
NOT_GIVEN,
|
|
@@ -76,6 +76,8 @@ class _RealtimeOptions:
|
|
|
76
76
|
context_window_compression: NotGivenOr[types.ContextWindowCompressionConfig] = NOT_GIVEN
|
|
77
77
|
api_version: NotGivenOr[str] = NOT_GIVEN
|
|
78
78
|
gemini_tools: NotGivenOr[list[_LLMTool]] = NOT_GIVEN
|
|
79
|
+
tool_behavior: NotGivenOr[types.Behavior] = NOT_GIVEN
|
|
80
|
+
tool_response_scheduling: NotGivenOr[types.FunctionResponseScheduling] = NOT_GIVEN
|
|
79
81
|
|
|
80
82
|
|
|
81
83
|
@dataclass
|
|
@@ -136,6 +138,8 @@ class RealtimeModel(llm.RealtimeModel):
|
|
|
136
138
|
proactivity: NotGivenOr[bool] = NOT_GIVEN,
|
|
137
139
|
realtime_input_config: NotGivenOr[types.RealtimeInputConfig] = NOT_GIVEN,
|
|
138
140
|
context_window_compression: NotGivenOr[types.ContextWindowCompressionConfig] = NOT_GIVEN,
|
|
141
|
+
tool_behavior: NotGivenOr[types.Behavior] = NOT_GIVEN,
|
|
142
|
+
tool_response_scheduling: NotGivenOr[types.FunctionResponseScheduling] = NOT_GIVEN,
|
|
139
143
|
api_version: NotGivenOr[str] = NOT_GIVEN,
|
|
140
144
|
conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS,
|
|
141
145
|
http_options: NotGivenOr[types.HttpOptions] = NOT_GIVEN,
|
|
@@ -174,6 +178,8 @@ class RealtimeModel(llm.RealtimeModel):
|
|
|
174
178
|
proactivity (bool, optional): Whether to enable proactive audio. Defaults to False.
|
|
175
179
|
realtime_input_config (RealtimeInputConfig, optional): The configuration for realtime input. Defaults to None.
|
|
176
180
|
context_window_compression (ContextWindowCompressionConfig, optional): The configuration for context window compression. Defaults to None.
|
|
181
|
+
tool_behavior (Behavior, optional): The behavior for tool call. Default behavior is BLOCK in Gemini Realtime API.
|
|
182
|
+
tool_response_scheduling (FunctionResponseScheduling, optional): The scheduling for tool response. Default scheduling is WHEN_IDLE.
|
|
177
183
|
conn_options (APIConnectOptions, optional): The configuration for the API connection. Defaults to DEFAULT_API_CONNECT_OPTIONS.
|
|
178
184
|
_gemini_tools (list[LLMTool], optional): Gemini-specific tools to use for the session. This parameter is experimental and may change.
|
|
179
185
|
|
|
@@ -265,12 +271,24 @@ class RealtimeModel(llm.RealtimeModel):
|
|
|
265
271
|
context_window_compression=context_window_compression,
|
|
266
272
|
api_version=api_version,
|
|
267
273
|
gemini_tools=_gemini_tools,
|
|
274
|
+
tool_behavior=tool_behavior,
|
|
268
275
|
conn_options=conn_options,
|
|
269
276
|
http_options=http_options,
|
|
270
277
|
)
|
|
271
278
|
|
|
272
279
|
self._sessions = weakref.WeakSet[RealtimeSession]()
|
|
273
280
|
|
|
281
|
+
@property
|
|
282
|
+
def model(self) -> str:
|
|
283
|
+
return self._opts.model
|
|
284
|
+
|
|
285
|
+
@property
|
|
286
|
+
def provider(self) -> str:
|
|
287
|
+
if self._opts.vertexai:
|
|
288
|
+
return "Vertex AI"
|
|
289
|
+
else:
|
|
290
|
+
return "Gemini"
|
|
291
|
+
|
|
274
292
|
def session(self) -> RealtimeSession:
|
|
275
293
|
sess = RealtimeSession(self)
|
|
276
294
|
self._sessions.add(sess)
|
|
@@ -281,6 +299,8 @@ class RealtimeModel(llm.RealtimeModel):
|
|
|
281
299
|
*,
|
|
282
300
|
voice: NotGivenOr[str] = NOT_GIVEN,
|
|
283
301
|
temperature: NotGivenOr[float] = NOT_GIVEN,
|
|
302
|
+
tool_behavior: NotGivenOr[types.Behavior] = NOT_GIVEN,
|
|
303
|
+
tool_response_scheduling: NotGivenOr[types.FunctionResponseScheduling] = NOT_GIVEN,
|
|
284
304
|
) -> None:
|
|
285
305
|
"""
|
|
286
306
|
Update the options for the RealtimeModel.
|
|
@@ -296,10 +316,18 @@ class RealtimeModel(llm.RealtimeModel):
|
|
|
296
316
|
if is_given(temperature):
|
|
297
317
|
self._opts.temperature = temperature
|
|
298
318
|
|
|
319
|
+
if is_given(tool_behavior):
|
|
320
|
+
self._opts.tool_behavior = tool_behavior
|
|
321
|
+
|
|
322
|
+
if is_given(tool_response_scheduling):
|
|
323
|
+
self._opts.tool_response_scheduling = tool_response_scheduling
|
|
324
|
+
|
|
299
325
|
for sess in self._sessions:
|
|
300
326
|
sess.update_options(
|
|
301
327
|
voice=self._opts.voice,
|
|
302
328
|
temperature=self._opts.temperature,
|
|
329
|
+
tool_behavior=self._opts.tool_behavior,
|
|
330
|
+
tool_response_scheduling=self._opts.tool_response_scheduling,
|
|
303
331
|
)
|
|
304
332
|
|
|
305
333
|
async def aclose(self) -> None:
|
|
@@ -333,7 +361,7 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
333
361
|
if api_version:
|
|
334
362
|
http_options.api_version = api_version
|
|
335
363
|
|
|
336
|
-
self._client =
|
|
364
|
+
self._client = GenAIClient(
|
|
337
365
|
api_key=self._opts.api_key,
|
|
338
366
|
vertexai=self._opts.vertexai,
|
|
339
367
|
project=self._opts.project,
|
|
@@ -377,6 +405,8 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
377
405
|
voice: NotGivenOr[str] = NOT_GIVEN,
|
|
378
406
|
temperature: NotGivenOr[float] = NOT_GIVEN,
|
|
379
407
|
tool_choice: NotGivenOr[llm.ToolChoice | None] = NOT_GIVEN,
|
|
408
|
+
tool_behavior: NotGivenOr[types.Behavior] = NOT_GIVEN,
|
|
409
|
+
tool_response_scheduling: NotGivenOr[types.FunctionResponseScheduling] = NOT_GIVEN,
|
|
380
410
|
) -> None:
|
|
381
411
|
should_restart = False
|
|
382
412
|
if is_given(voice) and self._opts.voice != voice:
|
|
@@ -387,6 +417,20 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
387
417
|
self._opts.temperature = temperature if is_given(temperature) else NOT_GIVEN
|
|
388
418
|
should_restart = True
|
|
389
419
|
|
|
420
|
+
if is_given(tool_behavior) and self._opts.tool_behavior != tool_behavior:
|
|
421
|
+
self._opts.tool_behavior = tool_behavior
|
|
422
|
+
should_restart = True
|
|
423
|
+
|
|
424
|
+
if (
|
|
425
|
+
is_given(tool_response_scheduling)
|
|
426
|
+
and self._opts.tool_response_scheduling != tool_response_scheduling
|
|
427
|
+
):
|
|
428
|
+
self._opts.tool_response_scheduling = tool_response_scheduling
|
|
429
|
+
# no need to restart
|
|
430
|
+
|
|
431
|
+
if is_given(tool_choice):
|
|
432
|
+
logger.warning("tool_choice is not supported by the Google Realtime API.")
|
|
433
|
+
|
|
390
434
|
if should_restart:
|
|
391
435
|
self._mark_restart_needed()
|
|
392
436
|
|
|
@@ -418,7 +462,11 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
418
462
|
).to_provider_format(format="google", inject_dummy_user_message=False)
|
|
419
463
|
# we are not generating, and do not need to inject
|
|
420
464
|
turns = [types.Content.model_validate(turn) for turn in turns_dict]
|
|
421
|
-
tool_results = get_tool_results_for_realtime(
|
|
465
|
+
tool_results = get_tool_results_for_realtime(
|
|
466
|
+
append_ctx,
|
|
467
|
+
vertexai=self._opts.vertexai,
|
|
468
|
+
tool_response_scheduling=self._opts.tool_response_scheduling,
|
|
469
|
+
)
|
|
422
470
|
if turns:
|
|
423
471
|
self._send_client_event(types.LiveClientContent(turns=turns, turn_complete=False))
|
|
424
472
|
if tool_results:
|
|
@@ -430,7 +478,7 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
430
478
|
|
|
431
479
|
async def update_tools(self, tools: list[llm.FunctionTool | llm.RawFunctionTool]) -> None:
|
|
432
480
|
new_declarations: list[types.FunctionDeclaration] = to_fnc_ctx(
|
|
433
|
-
tools, use_parameters_json_schema=False
|
|
481
|
+
tools, use_parameters_json_schema=False, tool_behavior=self._opts.tool_behavior
|
|
434
482
|
)
|
|
435
483
|
current_tool_names = {f.name for f in self._gemini_declarations}
|
|
436
484
|
new_tool_names = {f.name for f in new_declarations}
|
|
@@ -830,6 +878,7 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
830
878
|
message_stream=self._current_generation.message_ch,
|
|
831
879
|
function_stream=self._current_generation.function_ch,
|
|
832
880
|
user_initiated=False,
|
|
881
|
+
response_id=self._current_generation.response_id,
|
|
833
882
|
)
|
|
834
883
|
|
|
835
884
|
if self._pending_generation_fut and not self._pending_generation_fut.done():
|
|
@@ -1019,7 +1068,7 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
1019
1068
|
return token_details_map
|
|
1020
1069
|
|
|
1021
1070
|
metrics = RealtimeModelMetrics(
|
|
1022
|
-
label=self._realtime_model.
|
|
1071
|
+
label=self._realtime_model.label,
|
|
1023
1072
|
request_id=current_gen.response_id,
|
|
1024
1073
|
timestamp=current_gen._created_timestamp,
|
|
1025
1074
|
duration=duration,
|
|
@@ -1044,6 +1093,9 @@ class RealtimeSession(llm.RealtimeSession):
|
|
|
1044
1093
|
output_token_details=RealtimeModelMetrics.OutputTokenDetails(
|
|
1045
1094
|
**_token_details_map(usage_metadata.response_tokens_details),
|
|
1046
1095
|
),
|
|
1096
|
+
metadata=Metadata(
|
|
1097
|
+
model_name=self._realtime_model.model, model_provider=self._realtime_model.provider
|
|
1098
|
+
),
|
|
1047
1099
|
)
|
|
1048
1100
|
self.emit("metrics_collected", metrics)
|
|
1049
1101
|
|
livekit/plugins/google/llm.py
CHANGED
|
@@ -187,6 +187,13 @@ class LLM(llm.LLM):
|
|
|
187
187
|
def model(self) -> str:
|
|
188
188
|
return self._opts.model
|
|
189
189
|
|
|
190
|
+
@property
|
|
191
|
+
def provider(self) -> str:
|
|
192
|
+
if self._client.vertexai:
|
|
193
|
+
return "Vertex AI"
|
|
194
|
+
else:
|
|
195
|
+
return "Gemini"
|
|
196
|
+
|
|
190
197
|
def chat(
|
|
191
198
|
self,
|
|
192
199
|
*,
|
livekit/plugins/google/stt.py
CHANGED
|
@@ -70,6 +70,7 @@ class STTOptions:
|
|
|
70
70
|
spoken_punctuation: bool
|
|
71
71
|
enable_word_time_offsets: bool
|
|
72
72
|
enable_word_confidence: bool
|
|
73
|
+
enable_voice_activity_events: bool
|
|
73
74
|
model: SpeechModels | str
|
|
74
75
|
sample_rate: int
|
|
75
76
|
min_confidence_threshold: float
|
|
@@ -103,6 +104,7 @@ class STT(stt.STT):
|
|
|
103
104
|
spoken_punctuation: bool = False,
|
|
104
105
|
enable_word_time_offsets: bool = True,
|
|
105
106
|
enable_word_confidence: bool = False,
|
|
107
|
+
enable_voice_activity_events: bool = False,
|
|
106
108
|
model: SpeechModels | str = "latest_long",
|
|
107
109
|
location: str = "global",
|
|
108
110
|
sample_rate: int = 16000,
|
|
@@ -127,6 +129,7 @@ class STT(stt.STT):
|
|
|
127
129
|
spoken_punctuation(bool): whether to use spoken punctuation (default: False)
|
|
128
130
|
enable_word_time_offsets(bool): whether to enable word time offsets (default: True)
|
|
129
131
|
enable_word_confidence(bool): whether to enable word confidence (default: False)
|
|
132
|
+
enable_voice_activity_events(bool): whether to enable voice activity events (default: False)
|
|
130
133
|
model(SpeechModels): the model to use for recognition default: "latest_long"
|
|
131
134
|
location(str): the location to use for recognition default: "global"
|
|
132
135
|
sample_rate(int): the sample rate of the audio default: 16000
|
|
@@ -168,6 +171,7 @@ class STT(stt.STT):
|
|
|
168
171
|
spoken_punctuation=spoken_punctuation,
|
|
169
172
|
enable_word_time_offsets=enable_word_time_offsets,
|
|
170
173
|
enable_word_confidence=enable_word_confidence,
|
|
174
|
+
enable_voice_activity_events=enable_voice_activity_events,
|
|
171
175
|
model=model,
|
|
172
176
|
sample_rate=sample_rate,
|
|
173
177
|
min_confidence_threshold=min_confidence_threshold,
|
|
@@ -179,6 +183,14 @@ class STT(stt.STT):
|
|
|
179
183
|
connect_cb=self._create_client,
|
|
180
184
|
)
|
|
181
185
|
|
|
186
|
+
@property
|
|
187
|
+
def model(self) -> str:
|
|
188
|
+
return self._config.model
|
|
189
|
+
|
|
190
|
+
@property
|
|
191
|
+
def provider(self) -> str:
|
|
192
|
+
return "Google Cloud Platform"
|
|
193
|
+
|
|
182
194
|
async def _create_client(self, timeout: float) -> SpeechAsyncClient:
|
|
183
195
|
# Add support for passing a specific location that matches recognizer
|
|
184
196
|
# see: https://cloud.google.com/speech-to-text/v2/docs/speech-to-text-supported-languages
|
|
@@ -507,6 +519,7 @@ class SpeechStream(stt.SpeechStream):
|
|
|
507
519
|
),
|
|
508
520
|
streaming_features=cloud_speech.StreamingRecognitionFeatures(
|
|
509
521
|
interim_results=self._config.interim_results,
|
|
522
|
+
enable_voice_activity_events=self._config.enable_voice_activity_events,
|
|
510
523
|
),
|
|
511
524
|
)
|
|
512
525
|
|
livekit/plugins/google/tts.py
CHANGED
|
@@ -155,6 +155,14 @@ class TTS(tts.TTS):
|
|
|
155
155
|
)
|
|
156
156
|
self._streams = weakref.WeakSet[SynthesizeStream]()
|
|
157
157
|
|
|
158
|
+
@property
|
|
159
|
+
def model(self) -> str:
|
|
160
|
+
return "Chirp3"
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def provider(self) -> str:
|
|
164
|
+
return "Google Cloud Platform"
|
|
165
|
+
|
|
158
166
|
def update_options(
|
|
159
167
|
self,
|
|
160
168
|
*,
|
livekit/plugins/google/utils.py
CHANGED
|
@@ -16,6 +16,8 @@ from livekit.agents.llm.tool_context import (
|
|
|
16
16
|
is_function_tool,
|
|
17
17
|
is_raw_function_tool,
|
|
18
18
|
)
|
|
19
|
+
from livekit.agents.types import NOT_GIVEN, NotGivenOr
|
|
20
|
+
from livekit.agents.utils import is_given
|
|
19
21
|
|
|
20
22
|
from .log import logger
|
|
21
23
|
from .tools import _LLMTool
|
|
@@ -24,7 +26,10 @@ __all__ = ["to_fnc_ctx"]
|
|
|
24
26
|
|
|
25
27
|
|
|
26
28
|
def to_fnc_ctx(
|
|
27
|
-
fncs: list[FunctionTool | RawFunctionTool],
|
|
29
|
+
fncs: list[FunctionTool | RawFunctionTool],
|
|
30
|
+
*,
|
|
31
|
+
use_parameters_json_schema: bool = True,
|
|
32
|
+
tool_behavior: NotGivenOr[types.Behavior] = NOT_GIVEN,
|
|
28
33
|
) -> list[types.FunctionDeclaration]:
|
|
29
34
|
tools: list[types.FunctionDeclaration] = []
|
|
30
35
|
for fnc in fncs:
|
|
@@ -43,10 +48,14 @@ def to_fnc_ctx(
|
|
|
43
48
|
info.raw_schema.get("parameters", {})
|
|
44
49
|
)
|
|
45
50
|
)
|
|
51
|
+
|
|
52
|
+
if is_given(tool_behavior):
|
|
53
|
+
fnc_kwargs["behavior"] = tool_behavior
|
|
54
|
+
|
|
46
55
|
tools.append(types.FunctionDeclaration(**fnc_kwargs))
|
|
47
56
|
|
|
48
57
|
elif is_function_tool(fnc):
|
|
49
|
-
tools.append(_build_gemini_fnc(fnc))
|
|
58
|
+
tools.append(_build_gemini_fnc(fnc, tool_behavior=tool_behavior))
|
|
50
59
|
|
|
51
60
|
return tools
|
|
52
61
|
|
|
@@ -88,7 +97,10 @@ def create_tools_config(
|
|
|
88
97
|
|
|
89
98
|
|
|
90
99
|
def get_tool_results_for_realtime(
|
|
91
|
-
chat_ctx: llm.ChatContext,
|
|
100
|
+
chat_ctx: llm.ChatContext,
|
|
101
|
+
*,
|
|
102
|
+
vertexai: bool = False,
|
|
103
|
+
tool_response_scheduling: NotGivenOr[types.FunctionResponseScheduling] = NOT_GIVEN,
|
|
92
104
|
) -> types.LiveClientToolResponse | None:
|
|
93
105
|
function_responses: list[types.FunctionResponse] = []
|
|
94
106
|
for msg in chat_ctx.items:
|
|
@@ -96,6 +108,9 @@ def get_tool_results_for_realtime(
|
|
|
96
108
|
res = types.FunctionResponse(
|
|
97
109
|
name=msg.name,
|
|
98
110
|
response={"output": msg.output},
|
|
111
|
+
scheduling=tool_response_scheduling
|
|
112
|
+
if is_given(tool_response_scheduling)
|
|
113
|
+
else types.FunctionResponseScheduling.WHEN_IDLE,
|
|
99
114
|
)
|
|
100
115
|
if not vertexai:
|
|
101
116
|
# vertexai does not support id in FunctionResponse
|
|
@@ -109,14 +124,21 @@ def get_tool_results_for_realtime(
|
|
|
109
124
|
)
|
|
110
125
|
|
|
111
126
|
|
|
112
|
-
def _build_gemini_fnc(
|
|
127
|
+
def _build_gemini_fnc(
|
|
128
|
+
function_tool: FunctionTool, *, tool_behavior: NotGivenOr[types.Behavior] = NOT_GIVEN
|
|
129
|
+
) -> types.FunctionDeclaration:
|
|
113
130
|
fnc = llm.utils.build_legacy_openai_schema(function_tool, internally_tagged=True)
|
|
114
131
|
json_schema = _GeminiJsonSchema(fnc["parameters"]).simplify()
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
132
|
+
|
|
133
|
+
kwargs = {
|
|
134
|
+
"name": fnc["name"],
|
|
135
|
+
"description": fnc["description"],
|
|
136
|
+
"parameters": types.Schema.model_validate(json_schema) if json_schema else None,
|
|
137
|
+
}
|
|
138
|
+
if is_given(tool_behavior):
|
|
139
|
+
kwargs["behavior"] = tool_behavior
|
|
140
|
+
|
|
141
|
+
return types.FunctionDeclaration(**kwargs)
|
|
120
142
|
|
|
121
143
|
|
|
122
144
|
def to_response_format(response_format: type | dict) -> types.SchemaUnion:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: livekit-plugins-google
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.12
|
|
4
4
|
Summary: Agent Framework plugin for services from Google Cloud
|
|
5
5
|
Project-URL: Documentation, https://docs.livekit.io
|
|
6
6
|
Project-URL: Website, https://livekit.io/
|
|
@@ -22,7 +22,7 @@ Requires-Dist: google-auth<3,>=2
|
|
|
22
22
|
Requires-Dist: google-cloud-speech<3,>=2
|
|
23
23
|
Requires-Dist: google-cloud-texttospeech<3,>=2.27
|
|
24
24
|
Requires-Dist: google-genai>=v1.23.0
|
|
25
|
-
Requires-Dist: livekit-agents>=1.2.
|
|
25
|
+
Requires-Dist: livekit-agents>=1.2.12
|
|
26
26
|
Description-Content-Type: text/markdown
|
|
27
27
|
|
|
28
28
|
# Google AI plugin for LiveKit Agents
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
livekit/plugins/google/__init__.py,sha256=XIyZ-iFnRBpaLtOJgVwojlB-a8GjdDugVFcjBpMEww8,1412
|
|
2
|
+
livekit/plugins/google/llm.py,sha256=u9ZSSkdouPk0018UdiLfgthgTjjLLrXgseX1zrkeg64,18962
|
|
3
|
+
livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
|
|
4
|
+
livekit/plugins/google/models.py,sha256=poOvUBvgpqmmQV5EUQsq0RgNIRAq7nH-_IZIcIfPSBI,2801
|
|
5
|
+
livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
livekit/plugins/google/stt.py,sha256=i99gqXAvYeuhdJ8wh6UlOqLXj6f5_cIni71EwSR4FGw,26467
|
|
7
|
+
livekit/plugins/google/tools.py,sha256=tD5HVDHO5JfUF029Cx3axHMJec0Gxalkl7s1FDgxLzI,259
|
|
8
|
+
livekit/plugins/google/tts.py,sha256=2Ba4HjAc9RWYL3W4Z2586Ir3bYQGdSH2gfxSR7VsyY4,17454
|
|
9
|
+
livekit/plugins/google/utils.py,sha256=tFByjJ357A1WdCPwBQC4JABR9G5kxX0g7_FuWAIxix4,10002
|
|
10
|
+
livekit/plugins/google/version.py,sha256=NhtcZ3HrHC6TVbG58IieJaw1vTkwefziFdDCa5-FYxs,601
|
|
11
|
+
livekit/plugins/google/beta/__init__.py,sha256=RvAUdvEiRN-fe4JrgPcN0Jkw1kZR9wPerGMFVjS1Cc0,270
|
|
12
|
+
livekit/plugins/google/beta/gemini_tts.py,sha256=SpKorOteQ7GYoGWsxV5YPuGeMexoosmtDXQVz_1ZeLA,8743
|
|
13
|
+
livekit/plugins/google/beta/realtime/__init__.py,sha256=_fW2NMN22F-hnQ4xAJ_g5lPbR7CvM_xXzSWlUQY-E-U,188
|
|
14
|
+
livekit/plugins/google/beta/realtime/api_proto.py,sha256=nb_QkVQDEH7h0SKA9vdS3JaL12a6t2Z1ja4SdnxE6a8,814
|
|
15
|
+
livekit/plugins/google/beta/realtime/realtime_api.py,sha256=bvGLk75j6mO870PYLTZh2W3xY5IxuFkjGevltY2BhQA,50294
|
|
16
|
+
livekit_plugins_google-1.2.12.dist-info/METADATA,sha256=K3DT1L2Klv00Qj7Dbg-OQm9cmz_yc70G3NZhg8VZhAY,1909
|
|
17
|
+
livekit_plugins_google-1.2.12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
18
|
+
livekit_plugins_google-1.2.12.dist-info/RECORD,,
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
livekit/plugins/google/__init__.py,sha256=XIyZ-iFnRBpaLtOJgVwojlB-a8GjdDugVFcjBpMEww8,1412
|
|
2
|
-
livekit/plugins/google/llm.py,sha256=aeeGqhbEScbEs-GKp1T8rLocNqmvG4UBj6diekYe4FU,18809
|
|
3
|
-
livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
|
|
4
|
-
livekit/plugins/google/models.py,sha256=poOvUBvgpqmmQV5EUQsq0RgNIRAq7nH-_IZIcIfPSBI,2801
|
|
5
|
-
livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
livekit/plugins/google/stt.py,sha256=gRhVRsfg3BPNkBJGG78QOxEia0mF1jBnI_Ckq1jxqIs,25938
|
|
7
|
-
livekit/plugins/google/tools.py,sha256=tD5HVDHO5JfUF029Cx3axHMJec0Gxalkl7s1FDgxLzI,259
|
|
8
|
-
livekit/plugins/google/tts.py,sha256=LBLP3pEq1iCCgfidpRTtpeoDKYmXh8PKeJf1llAsybQ,17302
|
|
9
|
-
livekit/plugins/google/utils.py,sha256=z0iCP6-hYix3JRm2RM5JOBEJCICehUe5N4FTl-JpXLc,9269
|
|
10
|
-
livekit/plugins/google/version.py,sha256=qBF6bhFO57YNRku03dnWNtaFtkRztcLr4rdWnggtS84,600
|
|
11
|
-
livekit/plugins/google/beta/__init__.py,sha256=RvAUdvEiRN-fe4JrgPcN0Jkw1kZR9wPerGMFVjS1Cc0,270
|
|
12
|
-
livekit/plugins/google/beta/gemini_tts.py,sha256=esWjr0Xf95tl0_AB7MXiFZ_VCORWgcWjzvLvRa3t0FQ,8515
|
|
13
|
-
livekit/plugins/google/beta/realtime/__init__.py,sha256=_fW2NMN22F-hnQ4xAJ_g5lPbR7CvM_xXzSWlUQY-E-U,188
|
|
14
|
-
livekit/plugins/google/beta/realtime/api_proto.py,sha256=nb_QkVQDEH7h0SKA9vdS3JaL12a6t2Z1ja4SdnxE6a8,814
|
|
15
|
-
livekit/plugins/google/beta/realtime/realtime_api.py,sha256=wmQQZB8lwreUxZ2ReMmWOutW1Hc6TiPTm53DBqdOJG0,47830
|
|
16
|
-
livekit_plugins_google-1.2.9.dist-info/METADATA,sha256=77Ltyq9ZFibheExEbFyZbFrs5ATnmEETBHn353cuxUk,1907
|
|
17
|
-
livekit_plugins_google-1.2.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
18
|
-
livekit_plugins_google-1.2.9.dist-info/RECORD,,
|
|
File without changes
|