google-genai 1.18.0__py3-none-any.whl → 1.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/chats.py CHANGED
@@ -63,13 +63,8 @@ def _extract_curated_history(
63
63
  """Extracts the curated (valid) history from a comprehensive history.
64
64
 
65
65
  The comprehensive history contains all turns (user input and model responses),
66
- including any invalid or rejected model outputs. This function filters
67
- that history to return only the valid turns.
68
-
69
- A "turn" starts with one user input (a single content) and then follows by
70
- corresponding model response (which may consist of multiple contents).
71
- Turns are assumed to alternate: user input, model output, user input, model
72
- output, etc.
66
+ including any invalid or rejected model outputs. This function filters that
67
+ history to return only the valid turns.
73
68
 
74
69
  Args:
75
70
  comprehensive_history: A list representing the complete chat history.
@@ -84,8 +79,6 @@ def _extract_curated_history(
84
79
  length = len(comprehensive_history)
85
80
  i = 0
86
81
  current_input = comprehensive_history[i]
87
- if current_input.role != "user":
88
- raise ValueError("History must start with a user turn.")
89
82
  while i < length:
90
83
  if comprehensive_history[i].role not in ["user", "model"]:
91
84
  raise ValueError(
@@ -94,6 +87,7 @@ def _extract_curated_history(
94
87
 
95
88
  if comprehensive_history[i].role == "user":
96
89
  current_input = comprehensive_history[i]
90
+ curated_history.append(current_input)
97
91
  i += 1
98
92
  else:
99
93
  current_output = []
@@ -104,8 +98,9 @@ def _extract_curated_history(
104
98
  is_valid = False
105
99
  i += 1
106
100
  if is_valid:
107
- curated_history.append(current_input)
108
101
  curated_history.extend(current_output)
102
+ elif curated_history:
103
+ curated_history.pop()
109
104
  return curated_history
110
105
 
111
106
 
google/genai/errors.py CHANGED
@@ -22,6 +22,7 @@ import json
22
22
 
23
23
  if TYPE_CHECKING:
24
24
  from .replay_api_client import ReplayResponse
25
+ import aiohttp
25
26
 
26
27
 
27
28
  class APIError(Exception):
@@ -36,7 +37,9 @@ class APIError(Exception):
36
37
  self,
37
38
  code: int,
38
39
  response_json: Any,
39
- response: Optional[Union['ReplayResponse', httpx.Response]] = None,
40
+ response: Optional[
41
+ Union['ReplayResponse', httpx.Response, 'aiohttp.ClientResponse']
42
+ ] = None,
40
43
  ):
41
44
  self.response = response
42
45
  self.details = response_json
@@ -106,12 +109,17 @@ class APIError(Exception):
106
109
 
107
110
  @classmethod
108
111
  async def raise_for_async_response(
109
- cls, response: Union['ReplayResponse', httpx.Response]
112
+ cls,
113
+ response: Union[
114
+ 'ReplayResponse', httpx.Response, 'aiohttp.ClientResponse'
115
+ ],
110
116
  ) -> None:
111
117
  """Raises an error with detailed error message if the response has an error status."""
112
- if response.status_code == 200:
113
- return
118
+ status_code = 0
119
+ response_json = None
114
120
  if isinstance(response, httpx.Response):
121
+ if response.status_code == 200:
122
+ return
115
123
  try:
116
124
  await response.aread()
117
125
  response_json = response.json()
@@ -121,10 +129,28 @@ class APIError(Exception):
121
129
  'message': message,
122
130
  'status': response.reason_phrase,
123
131
  }
132
+ status_code = response.status_code
124
133
  else:
125
- response_json = response.body_segments[0].get('error', {})
134
+ try:
135
+ import aiohttp # pylint: disable=g-import-not-at-top
136
+
137
+ if isinstance(response, aiohttp.ClientResponse):
138
+ if response.status == 200:
139
+ return
140
+ try:
141
+ response_json = await response.json()
142
+ except aiohttp.client_exceptions.ContentTypeError:
143
+ message = await response.text()
144
+ response_json = {
145
+ 'message': message,
146
+ 'status': response.reason,
147
+ }
148
+ status_code = response.status
149
+ else:
150
+ response_json = response.body_segments[0].get('error', {})
151
+ except ImportError:
152
+ response_json = response.body_segments[0].get('error', {})
126
153
 
127
- status_code = response.status_code
128
154
  if 400 <= status_code < 500:
129
155
  raise ClientError(status_code, response_json, response)
130
156
  elif 500 <= status_code < 600:
google/genai/files.py CHANGED
@@ -22,6 +22,7 @@ import os
22
22
  import pathlib
23
23
  from typing import Any, Optional, Union
24
24
  from urllib.parse import urlencode
25
+
25
26
  from . import _api_module
26
27
  from . import _common
27
28
  from . import _transformers as t
@@ -1152,15 +1153,18 @@ class AsyncFiles(_api_module.BaseModule):
1152
1153
  response = await self._create(
1153
1154
  file=file_obj, config=types.CreateFileConfig(http_options=http_options)
1154
1155
  )
1155
- if (
1156
- response.http_headers is None
1157
- or 'x-goog-upload-url' not in response.http_headers
1156
+ if response.http_headers is None or (
1157
+ 'x-goog-upload-url' not in response.http_headers
1158
+ and 'X-Goog-Upload-URL' not in response.http_headers
1158
1159
  ):
1159
1160
  raise KeyError(
1160
1161
  'Failed to create file. Upload URL did not returned from the create'
1161
1162
  ' file request.'
1162
1163
  )
1163
- upload_url = response.http_headers['x-goog-upload-url']
1164
+ elif 'x-goog-upload-url' in response.http_headers:
1165
+ upload_url = response.http_headers['x-goog-upload-url']
1166
+ else:
1167
+ upload_url = response.http_headers['X-Goog-Upload-URL']
1164
1168
 
1165
1169
  if isinstance(file, io.IOBase):
1166
1170
  return_file = await self._api_client.async_upload_file(
google/genai/models.py CHANGED
@@ -18,6 +18,7 @@
18
18
  import logging
19
19
  from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
20
20
  from urllib.parse import urlencode
21
+
21
22
  from . import _api_module
22
23
  from . import _common
23
24
  from . import _extra_utils
@@ -1364,6 +1365,28 @@ def _Image_to_mldev(
1364
1365
  return to_object
1365
1366
 
1366
1367
 
1368
+ def _Video_to_mldev(
1369
+ api_client: BaseApiClient,
1370
+ from_object: Union[dict[str, Any], object],
1371
+ parent_object: Optional[dict[str, Any]] = None,
1372
+ ) -> dict[str, Any]:
1373
+ to_object: dict[str, Any] = {}
1374
+ if getv(from_object, ['uri']) is not None:
1375
+ setv(to_object, ['video', 'uri'], getv(from_object, ['uri']))
1376
+
1377
+ if getv(from_object, ['video_bytes']) is not None:
1378
+ setv(
1379
+ to_object,
1380
+ ['video', 'encodedVideo'],
1381
+ t.t_bytes(api_client, getv(from_object, ['video_bytes'])),
1382
+ )
1383
+
1384
+ if getv(from_object, ['mime_type']) is not None:
1385
+ setv(to_object, ['encoding'], getv(from_object, ['mime_type']))
1386
+
1387
+ return to_object
1388
+
1389
+
1367
1390
  def _GenerateVideosConfig_to_mldev(
1368
1391
  api_client: BaseApiClient,
1369
1392
  from_object: Union[dict[str, Any], object],
@@ -1422,11 +1445,18 @@ def _GenerateVideosConfig_to_mldev(
1422
1445
  )
1423
1446
 
1424
1447
  if getv(from_object, ['enhance_prompt']) is not None:
1425
- raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
1448
+ setv(
1449
+ parent_object,
1450
+ ['parameters', 'enhancePrompt'],
1451
+ getv(from_object, ['enhance_prompt']),
1452
+ )
1426
1453
 
1427
1454
  if getv(from_object, ['generate_audio']) is not None:
1428
1455
  raise ValueError('generate_audio parameter is not supported in Gemini API.')
1429
1456
 
1457
+ if getv(from_object, ['last_frame']) is not None:
1458
+ raise ValueError('last_frame parameter is not supported in Gemini API.')
1459
+
1430
1460
  return to_object
1431
1461
 
1432
1462
 
@@ -1453,6 +1483,9 @@ def _GenerateVideosParameters_to_mldev(
1453
1483
  _Image_to_mldev(api_client, getv(from_object, ['image']), to_object),
1454
1484
  )
1455
1485
 
1486
+ if getv(from_object, ['video']) is not None:
1487
+ raise ValueError('video parameter is not supported in Gemini API.')
1488
+
1456
1489
  if getv(from_object, ['config']) is not None:
1457
1490
  setv(
1458
1491
  to_object,
@@ -1981,7 +2014,13 @@ def _Tool_to_vertex(
1981
2014
  )
1982
2015
 
1983
2016
  if getv(from_object, ['url_context']) is not None:
1984
- raise ValueError('url_context parameter is not supported in Vertex AI.')
2017
+ setv(
2018
+ to_object,
2019
+ ['urlContext'],
2020
+ _UrlContext_to_vertex(
2021
+ api_client, getv(from_object, ['url_context']), to_object
2022
+ ),
2023
+ )
1985
2024
 
1986
2025
  if getv(from_object, ['code_execution']) is not None:
1987
2026
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
@@ -3223,6 +3262,28 @@ def _ComputeTokensParameters_to_vertex(
3223
3262
  return to_object
3224
3263
 
3225
3264
 
3265
+ def _Video_to_vertex(
3266
+ api_client: BaseApiClient,
3267
+ from_object: Union[dict[str, Any], object],
3268
+ parent_object: Optional[dict[str, Any]] = None,
3269
+ ) -> dict[str, Any]:
3270
+ to_object: dict[str, Any] = {}
3271
+ if getv(from_object, ['uri']) is not None:
3272
+ setv(to_object, ['gcsUri'], getv(from_object, ['uri']))
3273
+
3274
+ if getv(from_object, ['video_bytes']) is not None:
3275
+ setv(
3276
+ to_object,
3277
+ ['bytesBase64Encoded'],
3278
+ t.t_bytes(api_client, getv(from_object, ['video_bytes'])),
3279
+ )
3280
+
3281
+ if getv(from_object, ['mime_type']) is not None:
3282
+ setv(to_object, ['mimeType'], getv(from_object, ['mime_type']))
3283
+
3284
+ return to_object
3285
+
3286
+
3226
3287
  def _GenerateVideosConfig_to_vertex(
3227
3288
  api_client: BaseApiClient,
3228
3289
  from_object: Union[dict[str, Any], object],
@@ -3306,6 +3367,15 @@ def _GenerateVideosConfig_to_vertex(
3306
3367
  getv(from_object, ['generate_audio']),
3307
3368
  )
3308
3369
 
3370
+ if getv(from_object, ['last_frame']) is not None:
3371
+ setv(
3372
+ parent_object,
3373
+ ['instances[0]', 'lastFrame'],
3374
+ _Image_to_vertex(
3375
+ api_client, getv(from_object, ['last_frame']), to_object
3376
+ ),
3377
+ )
3378
+
3309
3379
  return to_object
3310
3380
 
3311
3381
 
@@ -3332,6 +3402,13 @@ def _GenerateVideosParameters_to_vertex(
3332
3402
  _Image_to_vertex(api_client, getv(from_object, ['image']), to_object),
3333
3403
  )
3334
3404
 
3405
+ if getv(from_object, ['video']) is not None:
3406
+ setv(
3407
+ to_object,
3408
+ ['instances[0]', 'video'],
3409
+ _Video_to_vertex(api_client, getv(from_object, ['video']), to_object),
3410
+ )
3411
+
3335
3412
  if getv(from_object, ['config']) is not None:
3336
3413
  setv(
3337
3414
  to_object,
@@ -4294,6 +4371,15 @@ def _Candidate_from_vertex(
4294
4371
  if getv(from_object, ['finishReason']) is not None:
4295
4372
  setv(to_object, ['finish_reason'], getv(from_object, ['finishReason']))
4296
4373
 
4374
+ if getv(from_object, ['urlContextMetadata']) is not None:
4375
+ setv(
4376
+ to_object,
4377
+ ['url_context_metadata'],
4378
+ _UrlContextMetadata_from_vertex(
4379
+ api_client, getv(from_object, ['urlContextMetadata']), to_object
4380
+ ),
4381
+ )
4382
+
4297
4383
  if getv(from_object, ['avgLogprobs']) is not None:
4298
4384
  setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs']))
4299
4385
 
@@ -5778,13 +5864,26 @@ class Models(_api_module.BaseModule):
5778
5864
  model: str,
5779
5865
  prompt: Optional[str] = None,
5780
5866
  image: Optional[types.ImageOrDict] = None,
5867
+ video: Optional[types.VideoOrDict] = None,
5781
5868
  config: Optional[types.GenerateVideosConfigOrDict] = None,
5782
5869
  ) -> types.GenerateVideosOperation:
5783
- """Generates videos based on a text description and configuration.
5870
+ """Generates videos based on an input (text, image, or video) and configuration.
5871
+
5872
+ The following use cases are supported:
5873
+ 1. Text to video generation.
5874
+ 2a. Image to video generation (additional text prompt is optional).
5875
+ 2b. Image to video generation with frame interpolation (specify last_frame
5876
+ in config).
5877
+ 3. Video extension (additional text prompt is optional)
5784
5878
 
5785
5879
  Args:
5786
5880
  model: The model to use.
5787
- instances: A list of prompts, images and videos to generate videos from.
5881
+ prompt: The text prompt for generating the videos. Optional for image to
5882
+ video use cases.
5883
+ image: The input image for generating the videos. Optional if prompt is
5884
+ provided.
5885
+ video: The input video for video extension use cases. Optional if prompt
5886
+ or image is provided.
5788
5887
  config: Configuration for generation.
5789
5888
 
5790
5889
  Usage:
@@ -5806,6 +5905,7 @@ class Models(_api_module.BaseModule):
5806
5905
  model=model,
5807
5906
  prompt=prompt,
5808
5907
  image=image,
5908
+ video=video,
5809
5909
  config=config,
5810
5910
  )
5811
5911
 
@@ -7308,13 +7408,26 @@ class AsyncModels(_api_module.BaseModule):
7308
7408
  model: str,
7309
7409
  prompt: Optional[str] = None,
7310
7410
  image: Optional[types.ImageOrDict] = None,
7411
+ video: Optional[types.VideoOrDict] = None,
7311
7412
  config: Optional[types.GenerateVideosConfigOrDict] = None,
7312
7413
  ) -> types.GenerateVideosOperation:
7313
- """Generates videos based on a text description and configuration.
7414
+ """Generates videos based on an input (text, image, or video) and configuration.
7415
+
7416
+ The following use cases are supported:
7417
+ 1. Text to video generation.
7418
+ 2a. Image to video generation (additional text prompt is optional).
7419
+ 2b. Image to video generation with frame interpolation (specify last_frame
7420
+ in config).
7421
+ 3. Video extension (additional text prompt is optional)
7314
7422
 
7315
7423
  Args:
7316
7424
  model: The model to use.
7317
- instances: A list of prompts, images and videos to generate videos from.
7425
+ prompt: The text prompt for generating the videos. Optional for image to
7426
+ video use cases.
7427
+ image: The input image for generating the videos. Optional if prompt is
7428
+ provided.
7429
+ video: The input video for video extension use cases. Optional if prompt
7430
+ or image is provided.
7318
7431
  config: Configuration for generation.
7319
7432
 
7320
7433
  Usage:
@@ -7336,6 +7449,7 @@ class AsyncModels(_api_module.BaseModule):
7336
7449
  model=model,
7337
7450
  prompt=prompt,
7338
7451
  image=image,
7452
+ video=video,
7339
7453
  config=config,
7340
7454
  )
7341
7455
 
@@ -7644,8 +7758,10 @@ class AsyncModels(_api_module.BaseModule):
7644
7758
  or not chunk.candidates[0].content.parts
7645
7759
  ):
7646
7760
  break
7647
- func_response_parts = _extra_utils.get_function_response_parts(
7648
- chunk, function_map
7761
+ func_response_parts = (
7762
+ await _extra_utils.get_function_response_parts_async(
7763
+ chunk, function_map
7764
+ )
7649
7765
  )
7650
7766
  if not function_map:
7651
7767
  break
@@ -18,6 +18,7 @@
18
18
  import logging
19
19
  from typing import Any, Optional, Union
20
20
  from urllib.parse import urlencode
21
+
21
22
  from . import _api_module
22
23
  from . import _common
23
24
  from . import _transformers as t
google/genai/tokens.py CHANGED
@@ -143,6 +143,7 @@ class Tokens(_api_module.BaseModule):
143
143
  Usage:
144
144
 
145
145
  .. code-block:: python
146
+
146
147
  # Case 1: If LiveEphemeralParameters is unset, unlock LiveConnectConfig
147
148
  # when using the token in Live API sessions. Each session connection can
148
149
  # use a different configuration.
@@ -154,6 +155,7 @@ class Tokens(_api_module.BaseModule):
154
155
  auth_token = client.tokens.create(config=config)
155
156
 
156
157
  .. code-block:: python
158
+
157
159
  # Case 2: If LiveEphemeralParameters is set, lock all fields in
158
160
  # LiveConnectConfig when using the token in Live API sessions. For
159
161
  # example, changing `output_audio_transcription` in the Live API
@@ -170,7 +172,9 @@ class Tokens(_api_module.BaseModule):
170
172
  ),
171
173
  )
172
174
  )
173
- .. code-block:: python
175
+
176
+ .. code-block:: python
177
+
174
178
  # Case 3: If LiveEphemeralParameters is set and lockAdditionalFields is
175
179
  # empty, lock LiveConnectConfig with set fields (e.g.
176
180
  # system_instruction in this example) when using the token in Live API
@@ -187,7 +191,8 @@ class Tokens(_api_module.BaseModule):
187
191
  )
188
192
  )
189
193
 
190
- .. code-block:: python
194
+ .. code-block:: python
195
+
191
196
  # Case 4: If LiveEphemeralParameters is set and lockAdditionalFields is
192
197
  # set, lock LiveConnectConfig with set and additional fields (e.g.
193
198
  # system_instruction, temperature in this example) when using the token
google/genai/tunings.py CHANGED
@@ -18,6 +18,7 @@
18
18
  import logging
19
19
  from typing import Any, Optional, Union
20
20
  from urllib.parse import urlencode
21
+
21
22
  from . import _api_module
22
23
  from . import _common
23
24
  from . import _transformers as t