google-genai 1.28.0__py3-none-any.whl → 1.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/caches.py CHANGED
@@ -231,6 +231,11 @@ def _GoogleSearch_to_mldev(
231
231
  _Interval_to_mldev(getv(from_object, ['time_range_filter']), to_object),
232
232
  )
233
233
 
234
+ if getv(from_object, ['exclude_domains']) is not None:
235
+ raise ValueError(
236
+ 'exclude_domains parameter is not supported in Gemini API.'
237
+ )
238
+
234
239
  return to_object
235
240
 
236
241
 
@@ -278,6 +283,17 @@ def _UrlContext_to_mldev(
278
283
  return to_object
279
284
 
280
285
 
286
+ def _ToolComputerUse_to_mldev(
287
+ from_object: Union[dict[str, Any], object],
288
+ parent_object: Optional[dict[str, Any]] = None,
289
+ ) -> dict[str, Any]:
290
+ to_object: dict[str, Any] = {}
291
+ if getv(from_object, ['environment']) is not None:
292
+ setv(to_object, ['environment'], getv(from_object, ['environment']))
293
+
294
+ return to_object
295
+
296
+
281
297
  def _Tool_to_mldev(
282
298
  from_object: Union[dict[str, Any], object],
283
299
  parent_object: Optional[dict[str, Any]] = None,
@@ -327,12 +343,18 @@ def _Tool_to_mldev(
327
343
  _UrlContext_to_mldev(getv(from_object, ['url_context']), to_object),
328
344
  )
329
345
 
346
+ if getv(from_object, ['computer_use']) is not None:
347
+ setv(
348
+ to_object,
349
+ ['computerUse'],
350
+ _ToolComputerUse_to_mldev(
351
+ getv(from_object, ['computer_use']), to_object
352
+ ),
353
+ )
354
+
330
355
  if getv(from_object, ['code_execution']) is not None:
331
356
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
332
357
 
333
- if getv(from_object, ['computer_use']) is not None:
334
- setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
335
-
336
358
  return to_object
337
359
 
338
360
 
@@ -811,6 +833,9 @@ def _GoogleSearch_to_vertex(
811
833
  ),
812
834
  )
813
835
 
836
+ if getv(from_object, ['exclude_domains']) is not None:
837
+ setv(to_object, ['excludeDomains'], getv(from_object, ['exclude_domains']))
838
+
814
839
  return to_object
815
840
 
816
841
 
@@ -854,6 +879,8 @@ def _EnterpriseWebSearch_to_vertex(
854
879
  parent_object: Optional[dict[str, Any]] = None,
855
880
  ) -> dict[str, Any]:
856
881
  to_object: dict[str, Any] = {}
882
+ if getv(from_object, ['exclude_domains']) is not None:
883
+ setv(to_object, ['excludeDomains'], getv(from_object, ['exclude_domains']))
857
884
 
858
885
  return to_object
859
886
 
@@ -933,6 +960,17 @@ def _UrlContext_to_vertex(
933
960
  return to_object
934
961
 
935
962
 
963
+ def _ToolComputerUse_to_vertex(
964
+ from_object: Union[dict[str, Any], object],
965
+ parent_object: Optional[dict[str, Any]] = None,
966
+ ) -> dict[str, Any]:
967
+ to_object: dict[str, Any] = {}
968
+ if getv(from_object, ['environment']) is not None:
969
+ setv(to_object, ['environment'], getv(from_object, ['environment']))
970
+
971
+ return to_object
972
+
973
+
936
974
  def _Tool_to_vertex(
937
975
  from_object: Union[dict[str, Any], object],
938
976
  parent_object: Optional[dict[str, Any]] = None,
@@ -992,12 +1030,18 @@ def _Tool_to_vertex(
992
1030
  _UrlContext_to_vertex(getv(from_object, ['url_context']), to_object),
993
1031
  )
994
1032
 
1033
+ if getv(from_object, ['computer_use']) is not None:
1034
+ setv(
1035
+ to_object,
1036
+ ['computerUse'],
1037
+ _ToolComputerUse_to_vertex(
1038
+ getv(from_object, ['computer_use']), to_object
1039
+ ),
1040
+ )
1041
+
995
1042
  if getv(from_object, ['code_execution']) is not None:
996
1043
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
997
1044
 
998
- if getv(from_object, ['computer_use']) is not None:
999
- setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
1000
-
1001
1045
  return to_object
1002
1046
 
1003
1047
 
google/genai/chats.py CHANGED
@@ -454,6 +454,7 @@ class AsyncChat(_BaseChat):
454
454
  Usage:
455
455
 
456
456
  .. code-block:: python
457
+
457
458
  chat = client.aio.chats.create(model='gemini-2.0-flash')
458
459
  async for chunk in await chat.send_message_stream('tell me a story'):
459
460
  print(chunk.text)
google/genai/errors.py CHANGED
@@ -172,18 +172,21 @@ class ServerError(APIError):
172
172
 
173
173
  class UnknownFunctionCallArgumentError(ValueError):
174
174
  """Raised when the function call argument cannot be converted to the parameter annotation."""
175
-
176
175
  pass
177
176
 
178
177
 
179
178
  class UnsupportedFunctionError(ValueError):
180
179
  """Raised when the function is not supported."""
180
+ pass
181
181
 
182
182
 
183
183
  class FunctionInvocationError(ValueError):
184
184
  """Raised when the function cannot be invoked with the given arguments."""
185
-
186
185
  pass
187
186
 
188
187
 
188
+ class UnknownApiResponseError(ValueError):
189
+ """Raised when the response from the API cannot be parsed as JSON."""
190
+ pass
191
+
189
192
  ExperimentalWarning = _common.ExperimentalWarning
google/genai/live.py CHANGED
@@ -187,29 +187,30 @@ class AsyncSession:
187
187
  and will not return until you send `turn_complete=True`.
188
188
 
189
189
  Example:
190
- ```
191
- import google.genai
192
- from google.genai import types
193
- import os
194
190
 
195
- if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
196
- MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
197
- else:
198
- MODEL_NAME = 'gemini-live-2.5-flash-preview';
199
-
200
- client = genai.Client()
201
- async with client.aio.live.connect(
202
- model=MODEL_NAME,
203
- config={"response_modalities": ["TEXT"]}
204
- ) as session:
205
- await session.send_client_content(
206
- turns=types.Content(
207
- role='user',
208
- parts=[types.Part(text="Hello world!")]))
209
- async for msg in session.receive():
210
- if msg.text:
211
- print(msg.text)
212
- ```
191
+ .. code-block:: python
192
+
193
+ import google.genai
194
+ from google.genai import types
195
+ import os
196
+
197
+ if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
198
+ MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
199
+ else:
200
+ MODEL_NAME = 'gemini-live-2.5-flash-preview';
201
+
202
+ client = genai.Client()
203
+ async with client.aio.live.connect(
204
+ model=MODEL_NAME,
205
+ config={"response_modalities": ["TEXT"]}
206
+ ) as session:
207
+ await session.send_client_content(
208
+ turns=types.Content(
209
+ role='user',
210
+ parts=[types.Part(text="Hello world!")]))
211
+ async for msg in session.receive():
212
+ if msg.text:
213
+ print(msg.text)
213
214
  """
214
215
  client_content = t.t_client_content(turns, turn_complete).model_dump(
215
216
  mode='json', exclude_none=True
@@ -253,39 +254,40 @@ class AsyncSession:
253
254
  media: A `Blob`-like object, the realtime media to send.
254
255
 
255
256
  Example:
256
- ```
257
- from pathlib import Path
258
257
 
259
- from google import genai
260
- from google.genai import types
258
+ .. code-block:: python
261
259
 
262
- import PIL.Image
260
+ from pathlib import Path
263
261
 
264
- import os
262
+ from google import genai
263
+ from google.genai import types
265
264
 
266
- if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
267
- MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
268
- else:
269
- MODEL_NAME = 'gemini-live-2.5-flash-preview';
265
+ import PIL.Image
270
266
 
267
+ import os
271
268
 
272
- client = genai.Client()
269
+ if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
270
+ MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
271
+ else:
272
+ MODEL_NAME = 'gemini-live-2.5-flash-preview';
273
273
 
274
- async with client.aio.live.connect(
275
- model=MODEL_NAME,
276
- config={"response_modalities": ["TEXT"]},
277
- ) as session:
278
- await session.send_realtime_input(
279
- media=PIL.Image.open('image.jpg'))
280
274
 
281
- audio_bytes = Path('audio.pcm').read_bytes()
282
- await session.send_realtime_input(
283
- media=types.Blob(data=audio_bytes, mime_type='audio/pcm;rate=16000'))
275
+ client = genai.Client()
284
276
 
285
- async for msg in session.receive():
286
- if msg.text is not None:
287
- print(f'{msg.text}')
288
- ```
277
+ async with client.aio.live.connect(
278
+ model=MODEL_NAME,
279
+ config={"response_modalities": ["TEXT"]},
280
+ ) as session:
281
+ await session.send_realtime_input(
282
+ media=PIL.Image.open('image.jpg'))
283
+
284
+ audio_bytes = Path('audio.pcm').read_bytes()
285
+ await session.send_realtime_input(
286
+ media=types.Blob(data=audio_bytes, mime_type='audio/pcm;rate=16000'))
287
+
288
+ async for msg in session.receive():
289
+ if msg.text is not None:
290
+ print(f'{msg.text}')
289
291
  """
290
292
  kwargs: _common.StringDict = {}
291
293
  if media is not None:
@@ -351,52 +353,54 @@ class AsyncSession:
351
353
  `FunctionResponse`-like objects.
352
354
 
353
355
  Example:
354
- ```
355
- from google import genai
356
- from google.genai import types
357
356
 
358
- import os
357
+ .. code-block:: python
359
358
 
360
- if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
361
- MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
362
- else:
363
- MODEL_NAME = 'gemini-live-2.5-flash-preview';
364
-
365
- client = genai.Client()
366
-
367
- tools = [{'function_declarations': [{'name': 'turn_on_the_lights'}]}]
368
- config = {
369
- "tools": tools,
370
- "response_modalities": ['TEXT']
371
- }
372
-
373
- async with client.aio.live.connect(
374
- model='models/gemini-live-2.5-flash-preview',
375
- config=config
376
- ) as session:
377
- prompt = "Turn on the lights please"
378
- await session.send_client_content(
379
- turns={"parts": [{'text': prompt}]}
380
- )
359
+ from google import genai
360
+ from google.genai import types
381
361
 
382
- async for chunk in session.receive():
383
- if chunk.server_content:
384
- if chunk.text is not None:
385
- print(chunk.text)
386
- elif chunk.tool_call:
387
- print(chunk.tool_call)
388
- print('_'*80)
389
- function_response=types.FunctionResponse(
390
- name='turn_on_the_lights',
391
- response={'result': 'ok'},
392
- id=chunk.tool_call.function_calls[0].id,
393
- )
394
- print(function_response)
395
- await session.send_tool_response(
396
- function_responses=function_response
397
- )
362
+ import os
363
+
364
+ if os.environ.get('GOOGLE_GENAI_USE_VERTEXAI'):
365
+ MODEL_NAME = 'gemini-2.0-flash-live-preview-04-09'
366
+ else:
367
+ MODEL_NAME = 'gemini-live-2.5-flash-preview';
368
+
369
+ client = genai.Client()
370
+
371
+ tools = [{'function_declarations': [{'name': 'turn_on_the_lights'}]}]
372
+ config = {
373
+ "tools": tools,
374
+ "response_modalities": ['TEXT']
375
+ }
376
+
377
+ async with client.aio.live.connect(
378
+ model='models/gemini-live-2.5-flash-preview',
379
+ config=config
380
+ ) as session:
381
+ prompt = "Turn on the lights please"
382
+ await session.send_client_content(
383
+ turns={"parts": [{'text': prompt}]}
384
+ )
385
+
386
+ async for chunk in session.receive():
387
+ if chunk.server_content:
388
+ if chunk.text is not None:
389
+ print(chunk.text)
390
+ elif chunk.tool_call:
391
+ print(chunk.tool_call)
392
+ print('_'*80)
393
+ function_response=types.FunctionResponse(
394
+ name='turn_on_the_lights',
395
+ response={'result': 'ok'},
396
+ id=chunk.tool_call.function_calls[0].id,
397
+ )
398
+ print(function_response)
399
+ await session.send_tool_response(
400
+ function_responses=function_response
401
+ )
398
402
 
399
- print('_'*80)
403
+ print('_'*80)
400
404
  """
401
405
  tool_response = t.t_tool_response(function_responses)
402
406
  if self._api_client.vertexai: