livekit-plugins-google 1.0.19__py3-none-any.whl → 1.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,6 +12,13 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ """Google AI plugin for LiveKit Agents
16
+
17
+ Supports Gemini, Cloud Speech-to-Text, and Cloud Text-to-Speech.
18
+
19
+ See https://docs.livekit.io/agents/integrations/stt/google/ for more information.
20
+ """
21
+
15
22
  from . import beta
16
23
  from .llm import LLM
17
24
  from .stt import STT, SpeechStream
@@ -1,3 +1,12 @@
1
1
  from . import realtime
2
2
 
3
3
  __all__ = ["realtime"]
4
+
5
+ # Cleanup docs of unexported modules
6
+ _module = dir()
7
+ NOT_IN_ALL = [m for m in _module if m not in __all__]
8
+
9
+ __pdoc__ = {}
10
+
11
+ for n in NOT_IN_ALL:
12
+ __pdoc__[n] = False
@@ -102,7 +102,7 @@ class RealtimeModel(llm.RealtimeModel):
102
102
  self,
103
103
  *,
104
104
  instructions: NotGivenOr[str] = NOT_GIVEN,
105
- model: LiveAPIModels | str = "gemini-2.0-flash-live-001",
105
+ model: NotGivenOr[LiveAPIModels | str] = NOT_GIVEN,
106
106
  api_key: NotGivenOr[str] = NOT_GIVEN,
107
107
  voice: Voice | str = "Puck",
108
108
  language: NotGivenOr[str] = NOT_GIVEN,
@@ -134,7 +134,7 @@ class RealtimeModel(llm.RealtimeModel):
134
134
  instructions (str, optional): Initial system instructions for the model. Defaults to "".
135
135
  api_key (str, optional): Google Gemini API key. If None, will attempt to read from the environment variable GOOGLE_API_KEY.
136
136
  modalities (list[Modality], optional): Modalities to use, such as ["TEXT", "AUDIO"]. Defaults to ["AUDIO"].
137
- model (str, optional): The name of the model to use. Defaults to "gemini-2.0-flash-live-001".
137
+ model (str, optional): The name of the model to use. Defaults to "gemini-2.0-flash-live-001" or "gemini-2.0-flash-exp" (vertexai).
138
138
  voice (api_proto.Voice, optional): Voice setting for audio outputs. Defaults to "Puck".
139
139
  language (str, optional): The language(BCP-47 Code) to use for the API. supported languages - https://ai.google.dev/gemini-api/docs/live#supported-languages
140
140
  temperature (float, optional): Sampling temperature for response generation. Defaults to 0.8.
@@ -160,14 +160,24 @@ class RealtimeModel(llm.RealtimeModel):
160
160
  )
161
161
  )
162
162
 
163
+ if not is_given(model):
164
+ if vertexai:
165
+ model = "gemini-2.0-flash-exp"
166
+ else:
167
+ model = "gemini-2.0-flash-live-001"
168
+
163
169
  gemini_api_key = api_key if is_given(api_key) else os.environ.get("GOOGLE_API_KEY")
164
170
  gcp_project = project if is_given(project) else os.environ.get("GOOGLE_CLOUD_PROJECT")
165
- gcp_location = location if is_given(location) else os.environ.get("GOOGLE_CLOUD_LOCATION")
171
+ gcp_location = (
172
+ location
173
+ if is_given(location)
174
+ else os.environ.get("GOOGLE_CLOUD_LOCATION") or "us-central1"
175
+ )
166
176
 
167
177
  if vertexai:
168
178
  if not gcp_project or not gcp_location:
169
179
  raise ValueError(
170
- "Project and location are required for VertexAI either via project and location or GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables" # noqa: E501
180
+ "Project is required for VertexAI via project kwarg or GOOGLE_CLOUD_PROJECT environment variable" # noqa: E501
171
181
  )
172
182
  gemini_api_key = None # VertexAI does not require an API key
173
183
  else:
@@ -311,7 +321,9 @@ class RealtimeSession(llm.RealtimeSession):
311
321
  async with self._update_lock:
312
322
  self._chat_ctx = chat_ctx.copy()
313
323
  turns, _ = to_chat_ctx(self._chat_ctx, id(self), ignore_functions=True)
314
- tool_results = get_tool_results_for_realtime(self._chat_ctx)
324
+ tool_results = get_tool_results_for_realtime(
325
+ self._chat_ctx, vertexai=self._opts.vertexai
326
+ )
315
327
  # TODO(dz): need to compute delta and then either append or recreate session
316
328
  if turns:
317
329
  self._send_client_event(LiveClientContent(turns=turns, turn_complete=False))
@@ -241,7 +241,7 @@ class LLM(llm.LLM):
241
241
  client=self._client,
242
242
  model=self._opts.model,
243
243
  chat_ctx=chat_ctx,
244
- tools=tools,
244
+ tools=tools or [],
245
245
  conn_options=conn_options,
246
246
  extra_kwargs=extra,
247
247
  )
@@ -256,7 +256,7 @@ class LLMStream(llm.LLMStream):
256
256
  model: str | ChatModels,
257
257
  chat_ctx: llm.ChatContext,
258
258
  conn_options: APIConnectOptions,
259
- tools: list[FunctionTool] | None,
259
+ tools: list[FunctionTool],
260
260
  extra_kwargs: dict[str, Any],
261
261
  ) -> None:
262
262
  super().__init__(llm, chat_ctx=chat_ctx, tools=tools, conn_options=conn_options)
@@ -103,6 +103,7 @@ class STT(stt.STT):
103
103
  credentials_info: NotGivenOr[dict] = NOT_GIVEN,
104
104
  credentials_file: NotGivenOr[str] = NOT_GIVEN,
105
105
  keywords: NotGivenOr[list[tuple[str, float]]] = NOT_GIVEN,
106
+ use_streaming: NotGivenOr[bool] = NOT_GIVEN,
106
107
  ):
107
108
  """
108
109
  Create a new instance of Google STT.
@@ -125,8 +126,13 @@ class STT(stt.STT):
125
126
  credentials_info(dict): the credentials info to use for recognition (default: None)
126
127
  credentials_file(str): the credentials file to use for recognition (default: None)
127
128
  keywords(List[tuple[str, float]]): list of keywords to recognize (default: None)
129
+ use_streaming(bool): whether to use streaming for recognition (default: True)
128
130
  """
129
- super().__init__(capabilities=stt.STTCapabilities(streaming=True, interim_results=True))
131
+ if not is_given(use_streaming):
132
+ use_streaming = True
133
+ super().__init__(
134
+ capabilities=stt.STTCapabilities(streaming=use_streaming, interim_results=True)
135
+ )
130
136
 
131
137
  self._location = location
132
138
  self._credentials_info = credentials_info
@@ -251,7 +257,7 @@ class STT(stt.STT):
251
257
  except DeadlineExceeded:
252
258
  raise APITimeoutError() from None
253
259
  except GoogleAPICallError as e:
254
- raise APIStatusError(e.message, status_code=e.code or -1) from None
260
+ raise APIStatusError(f"{e.message} {e.details}", status_code=e.code or -1) from e
255
261
  except Exception as e:
256
262
  raise APIConnectionError() from e
257
263
 
@@ -472,6 +478,7 @@ class SpeechStream(stt.SpeechStream):
472
478
  features=cloud_speech.RecognitionFeatures(
473
479
  enable_automatic_punctuation=self._config.punctuate,
474
480
  enable_word_time_offsets=True,
481
+ enable_spoken_punctuation=self._config.spoken_punctuation,
475
482
  ),
476
483
  ),
477
484
  streaming_features=cloud_speech.StreamingRecognitionFeatures(
@@ -505,7 +512,12 @@ class SpeechStream(stt.SpeechStream):
505
512
  except DeadlineExceeded:
506
513
  raise APITimeoutError() from None
507
514
  except GoogleAPICallError as e:
508
- raise APIStatusError(e.message, status_code=e.code or -1) from None
515
+ if e.code == 409:
516
+ logger.debug("stream timed out, restarting.")
517
+ else:
518
+ raise APIStatusError(
519
+ f"{e.message} {e.details}", status_code=e.code or -1
520
+ ) from e
509
521
  except Exception as e:
510
522
  raise APIConnectionError() from e
511
523
 
@@ -20,17 +20,21 @@ def to_fnc_ctx(fncs: list[FunctionTool]) -> list[types.FunctionDeclaration]:
20
20
  return [_build_gemini_fnc(fnc) for fnc in fncs]
21
21
 
22
22
 
23
- def get_tool_results_for_realtime(chat_ctx: llm.ChatContext) -> types.LiveClientToolResponse | None:
23
+ def get_tool_results_for_realtime(
24
+ chat_ctx: llm.ChatContext, *, vertexai: bool = False
25
+ ) -> types.LiveClientToolResponse | None:
24
26
  function_responses: list[types.FunctionResponse] = []
25
27
  for msg in chat_ctx.items:
26
28
  if msg.type == "function_call_output":
27
- function_responses.append(
28
- types.FunctionResponse(
29
- id=msg.call_id,
30
- name=msg.name,
31
- response={"output": msg.output},
32
- )
29
+ res = types.FunctionResponse(
30
+ name=msg.name,
31
+ response={"output": msg.output},
33
32
  )
33
+ if not vertexai:
34
+ # vertexai does not support id in FunctionResponse
35
+ # see: https://github.com/googleapis/python-genai/blob/85e00bc/google/genai/_live_converters.py#L1435
36
+ res.id = msg.call_id
37
+ function_responses.append(res)
34
38
  return (
35
39
  types.LiveClientToolResponse(function_responses=function_responses)
36
40
  if function_responses
@@ -175,6 +179,15 @@ class _GeminiJsonSchema:
175
179
  schema.pop("title", None)
176
180
  schema.pop("default", None)
177
181
  schema.pop("additionalProperties", None)
182
+ schema.pop("$schema", None)
183
+
184
+ if (const := schema.pop("const", None)) is not None:
185
+ # Gemini doesn't support const, but it does support enum with a single value
186
+ schema["enum"] = [const]
187
+
188
+ schema.pop("discriminator", None)
189
+ schema.pop("examples", None)
190
+
178
191
  if ref := schema.pop("$ref", None):
179
192
  key = re.sub(r"^#/\$defs/", "", ref)
180
193
  if key in refs_stack:
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- __version__ = "1.0.19"
15
+ __version__ = "1.0.20"
@@ -0,0 +1,47 @@
1
+ Metadata-Version: 2.4
2
+ Name: livekit-plugins-google
3
+ Version: 1.0.20
4
+ Summary: Agent Framework plugin for services from Google Cloud
5
+ Project-URL: Documentation, https://docs.livekit.io
6
+ Project-URL: Website, https://livekit.io/
7
+ Project-URL: Source, https://github.com/livekit/agents
8
+ Author: LiveKit
9
+ License-Expression: Apache-2.0
10
+ Keywords: audio,livekit,realtime,video,webrtc
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: Apache Software License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3 :: Only
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Topic :: Multimedia :: Sound/Audio
18
+ Classifier: Topic :: Multimedia :: Video
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.9.0
21
+ Requires-Dist: google-auth<3,>=2
22
+ Requires-Dist: google-cloud-speech<3,>=2
23
+ Requires-Dist: google-cloud-texttospeech<3,>=2
24
+ Requires-Dist: google-genai>=1.12.1
25
+ Requires-Dist: livekit-agents>=1.0.20
26
+ Description-Content-Type: text/markdown
27
+
28
+ # Google AI plugin for LiveKit Agents
29
+
30
+ Support for Gemini, Gemini Live, Cloud Speech-to-Text, and Cloud Text-to-Speech.
31
+
32
+ See [https://docs.livekit.io/agents/integrations/google/](https://docs.livekit.io/agents/integrations/google/) for more information.
33
+
34
+ ## Installation
35
+
36
+ ```bash
37
+ pip install livekit-plugins-google
38
+ ```
39
+
40
+ ## Pre-requisites
41
+
42
+ For credentials, you'll need a Google Cloud account and obtain the correct credentials. Credentials can be passed directly or via Application Default Credentials as specified in [How Application Default Credentials works](https://cloud.google.com/docs/authentication/application-default-credentials).
43
+
44
+ To use the STT and TTS API, you'll need to enable the respective services for your Google Cloud project.
45
+
46
+ - Cloud Speech-to-Text API
47
+ - Cloud Text-to-Speech API
@@ -0,0 +1,16 @@
1
+ livekit/plugins/google/__init__.py,sha256=xain2qUzU-YWhYWsLBkW8Q-szV-htpnzHTqymMPo-j0,1364
2
+ livekit/plugins/google/llm.py,sha256=m_lRoUw4RIO1d-LtNYugl99LUNcA1y4NQ17wX7Vv5j0,16189
3
+ livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
4
+ livekit/plugins/google/models.py,sha256=SGjAumdDK97NNLwMFcqZdKR68f1NoGB2Rk1UP2-imG0,1457
5
+ livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ livekit/plugins/google/stt.py,sha256=2jk-1fHiBT8UW_n3CZsIEdMp2iBnUAlTnmefdUd8rAM,23620
7
+ livekit/plugins/google/tts.py,sha256=29R0ieV5sRPBf5Yi0SPFQk7ZZMbELF30bIL9K_j_Wcg,9100
8
+ livekit/plugins/google/utils.py,sha256=zPzmnR-Rs2I87mT_k5S-PVbbuJMH8S-Hp5QcM4wv8vA,10067
9
+ livekit/plugins/google/version.py,sha256=t4KmPVTpEy1pOJ2GRCA-GNJfCQq_-zHNDBxGj4GKfVk,601
10
+ livekit/plugins/google/beta/__init__.py,sha256=5PnoG3Ux24bjzMSzmTeSVljE9EINivGcbWUEV6egGnM,216
11
+ livekit/plugins/google/beta/realtime/__init__.py,sha256=_fW2NMN22F-hnQ4xAJ_g5lPbR7CvM_xXzSWlUQY-E-U,188
12
+ livekit/plugins/google/beta/realtime/api_proto.py,sha256=Fyrejs3SG0EjOPCCFLEnWXKEUxCff47PMWk2VsKJm5E,594
13
+ livekit/plugins/google/beta/realtime/realtime_api.py,sha256=K_YD2CND3PMGV7c3gJY2UdReeLfsOPtIWDys5EU2T_A,31699
14
+ livekit_plugins_google-1.0.20.dist-info/METADATA,sha256=govmSaj6few3t11vreVNKlH9Ki2YzbRGnN3b3il2f20,1905
15
+ livekit_plugins_google-1.0.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
16
+ livekit_plugins_google-1.0.20.dist-info/RECORD,,
@@ -1,99 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: livekit-plugins-google
3
- Version: 1.0.19
4
- Summary: Agent Framework plugin for services from Google Cloud
5
- Project-URL: Documentation, https://docs.livekit.io
6
- Project-URL: Website, https://livekit.io/
7
- Project-URL: Source, https://github.com/livekit/agents
8
- Author: LiveKit
9
- License-Expression: Apache-2.0
10
- Keywords: audio,livekit,realtime,video,webrtc
11
- Classifier: Intended Audience :: Developers
12
- Classifier: License :: OSI Approved :: Apache Software License
13
- Classifier: Programming Language :: Python :: 3
14
- Classifier: Programming Language :: Python :: 3 :: Only
15
- Classifier: Programming Language :: Python :: 3.9
16
- Classifier: Programming Language :: Python :: 3.10
17
- Classifier: Topic :: Multimedia :: Sound/Audio
18
- Classifier: Topic :: Multimedia :: Video
19
- Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
- Requires-Python: >=3.9.0
21
- Requires-Dist: google-auth<3,>=2
22
- Requires-Dist: google-cloud-speech<3,>=2
23
- Requires-Dist: google-cloud-texttospeech<3,>=2
24
- Requires-Dist: google-genai>=1.12.1
25
- Requires-Dist: livekit-agents>=1.0.19
26
- Description-Content-Type: text/markdown
27
-
28
- # LiveKit Plugins Google
29
-
30
- Agent Framework plugin for services from Google Cloud. Currently supporting Google's [Speech-to-Text](https://cloud.google.com/speech-to-text) API.
31
-
32
- ## Installation
33
-
34
- ```bash
35
- pip install livekit-plugins-google
36
- ```
37
-
38
- ## Pre-requisites
39
-
40
- For credentials, you'll need a Google Cloud account and obtain the correct credentials. Credentials can be passed directly or via Application Default Credentials as specified in [How Application Default Credentials works](https://cloud.google.com/docs/authentication/application-default-credentials).
41
-
42
- To use the STT and TTS API, you'll need to enable the respective services for your Google Cloud project.
43
-
44
- - Cloud Speech-to-Text API
45
- - Cloud Text-to-Speech API
46
-
47
-
48
- ## Gemini Multimodal Live
49
-
50
- Gemini Multimodal Live can be used with the `MultimodalAgent` class. See examples/multimodal_agent/gemini_agent.py for an example.
51
-
52
- ### Live Video Input (experimental)
53
-
54
- You can push video frames to your Gemini Multimodal Live session alongside the audio automatically handled by the `MultimodalAgent`. The basic approach is to subscribe to the video track, create a video stream, sample frames at a suitable frame rate, and push them into the RealtimeSession:
55
-
56
- ```
57
- # Make sure you subscribe to audio and video tracks
58
- await ctx.connect(auto_subscribe=AutoSubscribe.SUBSCRIBE_ALL)
59
-
60
- # Create your RealtimeModel and store a reference
61
- model = google.beta.realtime.RealtimeModel(
62
- # ...
63
- )
64
-
65
- # Create your MultimodalAgent as usual
66
- agent = MultimodalAgent(
67
- model=model,
68
- # ...
69
- )
70
-
71
- # Async method to process the video track and push frames to Gemini
72
- async def _process_video_track(self, track: Track):
73
- video_stream = VideoStream(track)
74
- last_frame_time = 0
75
-
76
- async for event in video_stream:
77
- current_time = asyncio.get_event_loop().time()
78
-
79
- # Sample at 1 FPS
80
- if current_time - last_frame_time < 1.0:
81
- continue
82
-
83
- last_frame_time = current_time
84
- frame = event.frame
85
-
86
- # Push the frame into the RealtimeSession
87
- model.sessions[0].push_video(frame)
88
-
89
- await video_stream.aclose()
90
-
91
- # Subscribe to new tracks and process them
92
- @ctx.room.on("track_subscribed")
93
- def _on_track_subscribed(track: Track, pub, participant):
94
- if track.kind == TrackKind.KIND_VIDEO:
95
- asyncio.create_task(self._process_video_track(track))
96
- ```
97
-
98
-
99
-
@@ -1,16 +0,0 @@
1
- livekit/plugins/google/__init__.py,sha256=e_kSlFNmKhyyeliz7f4WOKc_Y0-y39QjO5nCWuguhss,1171
2
- livekit/plugins/google/llm.py,sha256=NaaT4Zaw6o98VcUHNrQcZZRkD7DPREd76O8fG9IOpXQ,16190
3
- livekit/plugins/google/log.py,sha256=GI3YWN5YzrafnUccljzPRS_ZALkMNk1i21IRnTl2vNA,69
4
- livekit/plugins/google/models.py,sha256=SGjAumdDK97NNLwMFcqZdKR68f1NoGB2Rk1UP2-imG0,1457
5
- livekit/plugins/google/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- livekit/plugins/google/stt.py,sha256=MADnkh0YKWY4bLRgBwFv4emu4YFO-7EVnhxO--dPTlI,23082
7
- livekit/plugins/google/tts.py,sha256=29R0ieV5sRPBf5Yi0SPFQk7ZZMbELF30bIL9K_j_Wcg,9100
8
- livekit/plugins/google/utils.py,sha256=sPZZg5VHf60kSILUIHGIZyN2CWYwnCGNYICn8Mhcv9g,9534
9
- livekit/plugins/google/version.py,sha256=UDC8ahmGgRkv-qMQUY3QibuuVevGMQ9Fd4yIhcQBZwA,601
10
- livekit/plugins/google/beta/__init__.py,sha256=AxRYc7NGG62Tv1MmcZVCDHNvlhbC86hM-_yP01Qb28k,47
11
- livekit/plugins/google/beta/realtime/__init__.py,sha256=_fW2NMN22F-hnQ4xAJ_g5lPbR7CvM_xXzSWlUQY-E-U,188
12
- livekit/plugins/google/beta/realtime/api_proto.py,sha256=Fyrejs3SG0EjOPCCFLEnWXKEUxCff47PMWk2VsKJm5E,594
13
- livekit/plugins/google/beta/realtime/realtime_api.py,sha256=yk202S604Eogp_ssBX2BSbAXV67uUyQzVO-bzLnScrs,31423
14
- livekit_plugins_google-1.0.19.dist-info/METADATA,sha256=HuRBvpT9dX3Mz7YOVhZhgQLm3-qQa2vAf2SRDQ5u1vM,3492
15
- livekit_plugins_google-1.0.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
16
- livekit_plugins_google-1.0.19.dist-info/RECORD,,