google-genai 1.1.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/caches.py CHANGED
@@ -15,6 +15,7 @@
15
15
 
16
16
  # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
17
 
18
+ import logging
18
19
  from typing import Optional, Union
19
20
  from urllib.parse import urlencode
20
21
  from . import _api_module
@@ -26,6 +27,8 @@ from ._common import get_value_by_path as getv
26
27
  from ._common import set_value_by_path as setv
27
28
  from .pagers import AsyncPager, Pager
28
29
 
30
+ logger = logging.getLogger('google_genai.caches')
31
+
29
32
 
30
33
  def _Part_to_mldev(
31
34
  api_client: ApiClient,
@@ -172,8 +175,10 @@ def _Schema_to_mldev(
172
175
  raise ValueError('example parameter is not supported in Gemini API.')
173
176
 
174
177
  if getv(from_object, ['property_ordering']) is not None:
175
- raise ValueError(
176
- 'property_ordering parameter is not supported in Gemini API.'
178
+ setv(
179
+ to_object,
180
+ ['propertyOrdering'],
181
+ getv(from_object, ['property_ordering']),
177
182
  )
178
183
 
179
184
  if getv(from_object, ['pattern']) is not None:
@@ -1181,7 +1186,7 @@ class Caches(_api_module.BaseModule):
1181
1186
  .. code-block:: python
1182
1187
 
1183
1188
  contents = ... // Initialize the content to cache.
1184
- response = await client.aio.caches.create(
1189
+ response = client.caches.create(
1185
1190
  model= ... // The publisher model id
1186
1191
  contents=contents,
1187
1192
  config={
@@ -1249,8 +1254,7 @@ class Caches(_api_module.BaseModule):
1249
1254
 
1250
1255
  .. code-block:: python
1251
1256
 
1252
- await client.aio.caches.get(name= ... ) // The server-generated resource
1253
- name.
1257
+ client.caches.get(name= ... ) // The server-generated resource name.
1254
1258
  """
1255
1259
 
1256
1260
  parameter_model = types._GetCachedContentParameters(
@@ -1312,8 +1316,7 @@ class Caches(_api_module.BaseModule):
1312
1316
 
1313
1317
  .. code-block:: python
1314
1318
 
1315
- await client.aio.caches.delete(name= ... ) // The server-generated
1316
- resource name.
1319
+ client.caches.delete(name= ... ) // The server-generated resource name.
1317
1320
  """
1318
1321
 
1319
1322
  parameter_model = types._DeleteCachedContentParameters(
@@ -1375,7 +1378,7 @@ class Caches(_api_module.BaseModule):
1375
1378
 
1376
1379
  .. code-block:: python
1377
1380
 
1378
- response = await client.aio.caches.update(
1381
+ response = client.caches.update(
1379
1382
  name= ... // The server-generated resource name.
1380
1383
  config={
1381
1384
  'ttl': '7600s',
@@ -1437,8 +1440,8 @@ class Caches(_api_module.BaseModule):
1437
1440
 
1438
1441
  .. code-block:: python
1439
1442
 
1440
- cached_contents = await client.aio.caches.list(config={'page_size': 2})
1441
- async for cached_content in cached_contents:
1443
+ cached_contents = client.caches.list(config={'page_size': 2})
1444
+ for cached_content in cached_contents:
1442
1445
  print(cached_content)
1443
1446
  """
1444
1447
 
google/genai/client.py CHANGED
@@ -27,6 +27,7 @@ from .chats import AsyncChats, Chats
27
27
  from .files import AsyncFiles, Files
28
28
  from .live import AsyncLive
29
29
  from .models import AsyncModels, Models
30
+ from .operations import AsyncOperations, Operations
30
31
  from .tunings import AsyncTunings, Tunings
31
32
 
32
33
 
@@ -42,6 +43,7 @@ class AsyncClient:
42
43
  self._batches = AsyncBatches(self._api_client)
43
44
  self._files = AsyncFiles(self._api_client)
44
45
  self._live = AsyncLive(self._api_client)
46
+ self._operations = AsyncOperations(self._api_client)
45
47
 
46
48
  @property
47
49
  def models(self) -> AsyncModels:
@@ -71,6 +73,9 @@ class AsyncClient:
71
73
  def live(self) -> AsyncLive:
72
74
  return self._live
73
75
 
76
+ @property
77
+ def operations(self) -> AsyncOperations:
78
+ return self._operations
74
79
 
75
80
  class DebugConfig(pydantic.BaseModel):
76
81
  """Configuration options that change client network behavior when testing."""
@@ -94,6 +99,17 @@ class Client:
94
99
  Use this client to make a request to the Gemini Developer API or Vertex AI
95
100
  API and then wait for the response.
96
101
 
102
+ To initialize the client, provide the required arguments either directly
103
+ or by using environment variables. Gemini API users and Vertex AI users in
104
+ express mode can provide API key by providing input argument
105
+ `api_key="your-api-key"` or by defining `GOOGLE_API_KEY="your-api-key"` as an
106
+ environment variable
107
+
108
+ Vertex AI API users can provide inputs argument as `vertexai=True,
109
+ project="your-project-id", location="us-central1"` or by defining
110
+ `GOOGLE_GENAI_USE_VERTEXAI=true`, `GOOGLE_CLOUD_PROJECT` and
111
+ `GOOGLE_CLOUD_LOCATION` environment variables.
112
+
97
113
  Attributes:
98
114
  api_key: The `API key <https://ai.google.dev/gemini-api/docs/api-key>`_ to
99
115
  use for authentication. Applies to the Gemini Developer API only.
@@ -173,21 +189,11 @@ class Client:
173
189
  debug_config (DebugConfig): Config settings that control network behavior
174
190
  of the client. This is typically used when running test code.
175
191
  http_options (Union[HttpOptions, HttpOptionsDict]): Http options to use
176
- for the client. The field deprecated_response_payload should not be set
177
- in http_options.
192
+ for the client.
178
193
  """
179
194
 
180
195
  self._debug_config = debug_config or DebugConfig()
181
196
 
182
- # Throw ValueError if deprecated_response_payload is set in http_options
183
- # due to unpredictable behavior when running multiple coroutines through
184
- # client.aio.
185
- if http_options and 'deprecated_response_payload' in http_options:
186
- raise ValueError(
187
- 'Setting deprecated_response_payload in http_options is not'
188
- ' supported.'
189
- )
190
-
191
197
  self._api_client = self._get_api_client(
192
198
  vertexai=vertexai,
193
199
  api_key=api_key,
@@ -204,6 +210,7 @@ class Client:
204
210
  self._caches = Caches(self._api_client)
205
211
  self._batches = Batches(self._api_client)
206
212
  self._files = Files(self._api_client)
213
+ self._operations = Operations(self._api_client)
207
214
 
208
215
  @staticmethod
209
216
  def _get_api_client(
@@ -269,6 +276,10 @@ class Client:
269
276
  def files(self) -> Files:
270
277
  return self._files
271
278
 
279
+ @property
280
+ def operations(self) -> Operations:
281
+ return self._operations
282
+
272
283
  @property
273
284
  def vertexai(self) -> bool:
274
285
  """Returns whether the client is using the Vertex AI API."""
google/genai/errors.py CHANGED
@@ -16,7 +16,8 @@
16
16
  """Error classes for the GenAI SDK."""
17
17
 
18
18
  from typing import Any, Optional, TYPE_CHECKING, Union
19
-
19
+ import httpx
20
+ import json
20
21
  import requests
21
22
 
22
23
 
@@ -34,7 +35,9 @@ class APIError(Exception):
34
35
  response: Optional[Any] = None
35
36
 
36
37
  def __init__(
37
- self, code: int, response: Union[requests.Response, 'ReplayResponse']
38
+ self,
39
+ code: int,
40
+ response: Union[requests.Response, 'ReplayResponse', httpx.Response],
38
41
  ):
39
42
  self.response = response
40
43
 
@@ -48,6 +51,18 @@ class APIError(Exception):
48
51
  'message': response.text,
49
52
  'status': response.reason,
50
53
  }
54
+ elif isinstance(response, httpx.Response):
55
+ try:
56
+ response_json = response.json()
57
+ except (json.decoder.JSONDecodeError, httpx.ResponseNotRead):
58
+ try:
59
+ message = response.text
60
+ except httpx.ResponseNotRead:
61
+ message = None
62
+ response_json = {
63
+ 'message': message,
64
+ 'status': response.reason_phrase,
65
+ }
51
66
  else:
52
67
  response_json = response.body_segments[0].get('error', {})
53
68
 
@@ -89,7 +104,7 @@ class APIError(Exception):
89
104
 
90
105
  @classmethod
91
106
  def raise_for_response(
92
- cls, response: Union[requests.Response, 'ReplayResponse']
107
+ cls, response: Union[requests.Response, 'ReplayResponse', httpx.Response]
93
108
  ):
94
109
  """Raises an error with detailed error message if the response has an error status."""
95
110
  if response.status_code == 200:
google/genai/files.py CHANGED
@@ -16,6 +16,7 @@
16
16
  # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
17
 
18
18
  import io
19
+ import logging
19
20
  import mimetypes
20
21
  import os
21
22
  import pathlib
@@ -30,6 +31,8 @@ from ._common import get_value_by_path as getv
30
31
  from ._common import set_value_by_path as setv
31
32
  from .pagers import AsyncPager, Pager
32
33
 
34
+ logger = logging.getLogger('google_genai.files')
35
+
33
36
 
34
37
  def _ListFilesConfig_to_mldev(
35
38
  api_client: ApiClient,
@@ -981,7 +984,7 @@ class AsyncFiles(_api_module.BaseModule):
981
984
 
982
985
  .. code-block:: python
983
986
 
984
- pager = client.files.list(config={'page_size': 10})
987
+ pager = await client.aio.files.list(config={'page_size': 10})
985
988
  for file in pager.page:
986
989
  print(file.name)
987
990
  """
@@ -1101,7 +1104,7 @@ class AsyncFiles(_api_module.BaseModule):
1101
1104
 
1102
1105
  .. code-block:: python
1103
1106
 
1104
- file = client.files.get(name='files/...')
1107
+ file = await client.aio.files.get(name='files/...')
1105
1108
  print(file.uri)
1106
1109
  """
1107
1110
 
@@ -1164,7 +1167,7 @@ class AsyncFiles(_api_module.BaseModule):
1164
1167
 
1165
1168
  .. code-block:: python
1166
1169
 
1167
- client.files.delete(name='files/...')
1170
+ await client.aio.files.delete(name='files/...')
1168
1171
  """
1169
1172
 
1170
1173
  parameter_model = types._DeleteFileParameters(
@@ -1296,13 +1299,13 @@ class AsyncFiles(_api_module.BaseModule):
1296
1299
 
1297
1300
  if (
1298
1301
  response.http_headers is None
1299
- or 'X-Goog-Upload-URL' not in response.http_headers
1302
+ or 'x-goog-upload-url' not in response.http_headers
1300
1303
  ):
1301
1304
  raise KeyError(
1302
1305
  'Failed to create file. Upload URL did not returned from the create'
1303
1306
  ' file request.'
1304
1307
  )
1305
- upload_url = response.http_headers['X-Goog-Upload-URL']
1308
+ upload_url = response.http_headers['x-goog-upload-url']
1306
1309
 
1307
1310
  if isinstance(file, io.IOBase):
1308
1311
  return_file = await self._api_client.async_upload_file(
google/genai/live.py CHANGED
@@ -20,7 +20,7 @@ import base64
20
20
  import contextlib
21
21
  import json
22
22
  import logging
23
- from typing import AsyncIterator, Optional, Sequence, Union
23
+ from typing import Any, AsyncIterator, Dict, Optional, Sequence, Union
24
24
 
25
25
  import google.auth
26
26
  from websockets import ConnectionClosed
@@ -55,6 +55,7 @@ except ModuleNotFoundError:
55
55
  from websockets.client import ClientConnection
56
56
  from websockets.client import connect
57
57
 
58
+ logger = logging.getLogger('google_genai.live')
58
59
 
59
60
  _FUNCTION_RESPONSE_REQUIRES_ID = (
60
61
  'FunctionResponse request must have an `id` field from the'
@@ -72,15 +73,17 @@ class AsyncSession:
72
73
  async def send(
73
74
  self,
74
75
  *,
75
- input: Union[
76
- types.ContentListUnion,
77
- types.ContentListUnionDict,
78
- types.LiveClientContentOrDict,
79
- types.LiveClientRealtimeInputOrDict,
80
- types.LiveClientToolResponseOrDict,
81
- types.FunctionResponseOrDict,
82
- Sequence[types.FunctionResponseOrDict],
83
- ],
76
+ input: Optional[
77
+ Union[
78
+ types.ContentListUnion,
79
+ types.ContentListUnionDict,
80
+ types.LiveClientContentOrDict,
81
+ types.LiveClientRealtimeInputOrDict,
82
+ types.LiveClientToolResponseOrDict,
83
+ types.FunctionResponseOrDict,
84
+ Sequence[types.FunctionResponseOrDict],
85
+ ]
86
+ ] = None,
84
87
  end_of_turn: Optional[bool] = False,
85
88
  ):
86
89
  """Send input to the model.
@@ -234,7 +237,7 @@ class AsyncSession:
234
237
  def _LiveServerContent_from_mldev(
235
238
  self,
236
239
  from_object: Union[dict, object],
237
- ) -> dict:
240
+ ) -> Dict[str, Any]:
238
241
  to_object = {}
239
242
  if getv(from_object, ['modelTurn']) is not None:
240
243
  setv(
@@ -254,7 +257,7 @@ class AsyncSession:
254
257
  def _LiveToolCall_from_mldev(
255
258
  self,
256
259
  from_object: Union[dict, object],
257
- ) -> dict:
260
+ ) -> Dict[str, Any]:
258
261
  to_object = {}
259
262
  if getv(from_object, ['functionCalls']) is not None:
260
263
  setv(
@@ -267,7 +270,7 @@ class AsyncSession:
267
270
  def _LiveToolCall_from_vertex(
268
271
  self,
269
272
  from_object: Union[dict, object],
270
- ) -> dict:
273
+ ) -> Dict[str, Any]:
271
274
  to_object = {}
272
275
  if getv(from_object, ['functionCalls']) is not None:
273
276
  setv(
@@ -280,7 +283,7 @@ class AsyncSession:
280
283
  def _LiveServerMessage_from_mldev(
281
284
  self,
282
285
  from_object: Union[dict, object],
283
- ) -> dict:
286
+ ) -> Dict[str, Any]:
284
287
  to_object = {}
285
288
  if getv(from_object, ['serverContent']) is not None:
286
289
  setv(
@@ -307,7 +310,7 @@ class AsyncSession:
307
310
  def _LiveServerContent_from_vertex(
308
311
  self,
309
312
  from_object: Union[dict, object],
310
- ) -> dict:
313
+ ) -> Dict[str, Any]:
311
314
  to_object = {}
312
315
  if getv(from_object, ['modelTurn']) is not None:
313
316
  setv(
@@ -327,7 +330,7 @@ class AsyncSession:
327
330
  def _LiveServerMessage_from_vertex(
328
331
  self,
329
332
  from_object: Union[dict, object],
330
- ) -> dict:
333
+ ) -> Dict[str, Any]:
331
334
  to_object = {}
332
335
  if getv(from_object, ['serverContent']) is not None:
333
336
  setv(
@@ -354,18 +357,23 @@ class AsyncSession:
354
357
 
355
358
  def _parse_client_message(
356
359
  self,
357
- input: Union[
358
- types.ContentListUnion,
359
- types.ContentListUnionDict,
360
- types.LiveClientContentOrDict,
361
- types.LiveClientRealtimeInputOrDict,
362
- types.LiveClientRealtimeInputOrDict,
363
- types.LiveClientToolResponseOrDict,
364
- types.FunctionResponseOrDict,
365
- Sequence[types.FunctionResponseOrDict],
366
- ],
360
+ input: Optional[
361
+ Union[
362
+ types.ContentListUnion,
363
+ types.ContentListUnionDict,
364
+ types.LiveClientContentOrDict,
365
+ types.LiveClientRealtimeInputOrDict,
366
+ types.LiveClientToolResponseOrDict,
367
+ types.FunctionResponseOrDict,
368
+ Sequence[types.FunctionResponseOrDict],
369
+ ]
370
+ ] = None,
367
371
  end_of_turn: Optional[bool] = False,
368
- ) -> dict:
372
+ ) -> Dict[str, Any]:
373
+
374
+ if not input:
375
+ logging.info('No input provided. Assume it is the end of turn.')
376
+ return {'client_content': {'turn_complete': True}}
369
377
  if isinstance(input, str):
370
378
  input = [input]
371
379
  elif isinstance(input, dict) and 'data' in input:
@@ -374,7 +382,6 @@ class AsyncSession:
374
382
  input['data'] = decoded_data
375
383
  input = [input]
376
384
  elif isinstance(input, types.Blob):
377
- input.data = base64.b64encode(input.data).decode('utf-8')
378
385
  input = [input]
379
386
  elif isinstance(input, dict) and 'name' in input and 'response' in input:
380
387
  # ToolResponse.FunctionResponse
@@ -411,7 +418,7 @@ class AsyncSession:
411
418
  if any((isinstance(b, dict) and 'data' in b) for b in input):
412
419
  pass
413
420
  elif any(isinstance(b, types.Blob) for b in input):
414
- input = [b.model_dump(exclude_none=True) for b in input]
421
+ input = [b.model_dump(exclude_none=True, mode='json') for b in input]
415
422
  else:
416
423
  raise ValueError(
417
424
  f'Unsupported input type "{type(input)}" or input content "{input}"'
@@ -419,11 +426,21 @@ class AsyncSession:
419
426
 
420
427
  client_message = {'realtime_input': {'media_chunks': input}}
421
428
 
422
- elif isinstance(input, dict) and 'content' in input:
423
- # TODO(b/365983264) Add validation checks for content_update input_dict.
424
- client_message = {'client_content': input}
429
+ elif isinstance(input, dict):
430
+ if 'content' in input or 'turns' in input:
431
+ # TODO(b/365983264) Add validation checks for content_update input_dict.
432
+ client_message = {'client_content': input}
433
+ elif 'media_chunks' in input:
434
+ client_message = {'realtime_input': input}
435
+ elif 'function_responses' in input:
436
+ client_message = {'tool_response': input}
437
+ else:
438
+ raise ValueError(
439
+ f'Unsupported input type "{type(input)}" or input content "{input}"')
425
440
  elif isinstance(input, types.LiveClientRealtimeInput):
426
- client_message = {'realtime_input': input.model_dump(exclude_none=True)}
441
+ client_message = {
442
+ 'realtime_input': input.model_dump(exclude_none=True, mode='json')
443
+ }
427
444
  if isinstance(
428
445
  client_message['realtime_input']['media_chunks'][0]['data'], bytes
429
446
  ):
@@ -436,20 +453,26 @@ class AsyncSession:
436
453
  ]
437
454
 
438
455
  elif isinstance(input, types.LiveClientContent):
439
- client_message = {'client_content': input.model_dump(exclude_none=True)}
456
+ client_message = {
457
+ 'client_content': input.model_dump(exclude_none=True, mode='json')
458
+ }
440
459
  elif isinstance(input, types.LiveClientToolResponse):
441
460
  # ToolResponse.FunctionResponse
442
461
  if not (self._api_client.vertexai) and not (
443
462
  input.function_responses[0].id
444
463
  ):
445
464
  raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID)
446
- client_message = {'tool_response': input.model_dump(exclude_none=True)}
465
+ client_message = {
466
+ 'tool_response': input.model_dump(exclude_none=True, mode='json')
467
+ }
447
468
  elif isinstance(input, types.FunctionResponse):
448
469
  if not (self._api_client.vertexai) and not (input.id):
449
470
  raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID)
450
471
  client_message = {
451
472
  'tool_response': {
452
- 'function_responses': [input.model_dump(exclude_none=True)]
473
+ 'function_responses': [
474
+ input.model_dump(exclude_none=True, mode='json')
475
+ ]
453
476
  }
454
477
  }
455
478
  elif isinstance(input, Sequence) and isinstance(
@@ -460,7 +483,7 @@ class AsyncSession:
460
483
  client_message = {
461
484
  'tool_response': {
462
485
  'function_responses': [
463
- c.model_dump(exclude_none=True) for c in input
486
+ c.model_dump(exclude_none=True, mode='json') for c in input
464
487
  ]
465
488
  }
466
489
  }
@@ -682,10 +705,10 @@ class AsyncLive(_api_module.BaseModule):
682
705
  auth_req = google.auth.transport.requests.Request()
683
706
  creds.refresh(auth_req)
684
707
  bearer_token = creds.token
685
- headers = {
686
- 'Content-Type': 'application/json',
708
+ headers = self._api_client._http_options['headers']
709
+ headers.update({
687
710
  'Authorization': 'Bearer {}'.format(bearer_token),
688
- }
711
+ })
689
712
  version = self._api_client._http_options['api_version']
690
713
  uri = f'{base_url}/ws/google.cloud.aiplatform.{version}.LlmBidiService/BidiGenerateContent'
691
714
  location = self._api_client.location
@@ -702,6 +725,6 @@ class AsyncLive(_api_module.BaseModule):
702
725
 
703
726
  async with connect(uri, additional_headers=headers) as ws:
704
727
  await ws.send(request)
705
- logging.info(await ws.recv(decode=False))
728
+ logger.info(await ws.recv(decode=False))
706
729
 
707
730
  yield AsyncSession(api_client=self._api_client, websocket=ws)