google-genai 1.23.0__tar.gz → 1.25.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-1.23.0/google_genai.egg-info → google_genai-1.25.0}/PKG-INFO +96 -7
- {google_genai-1.23.0 → google_genai-1.25.0}/README.md +95 -6
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_api_client.py +43 -31
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_common.py +6 -4
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_live_converters.py +14 -6
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_replay_api_client.py +15 -8
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_tokens_converters.py +6 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_transformers.py +12 -6
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/batches.py +84 -12
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/caches.py +6 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/errors.py +2 -2
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/live.py +14 -5
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/models.py +6 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/tokens.py +8 -4
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/tunings.py +12 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/types.py +312 -34
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/version.py +1 -1
- {google_genai-1.23.0 → google_genai-1.25.0/google_genai.egg-info}/PKG-INFO +96 -7
- {google_genai-1.23.0 → google_genai-1.25.0}/pyproject.toml +1 -1
- {google_genai-1.23.0 → google_genai-1.25.0}/LICENSE +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/MANIFEST.in +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/__init__.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_adapters.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_api_module.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_automatic_function_calling_util.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_base_url.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_extra_utils.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_mcp_utils.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/_test_api_client.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/chats.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/client.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/files.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/live_music.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/operations.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/pagers.py +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google/genai/py.typed +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google_genai.egg-info/SOURCES.txt +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google_genai.egg-info/requires.txt +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/google_genai.egg-info/top_level.txt +0 -0
- {google_genai-1.23.0 → google_genai-1.25.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: google-genai
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.25.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
@@ -89,10 +89,13 @@ You can create a client by configuring the necessary environment variables.
|
|
89
89
|
Configuration setup instructions depends on whether you're using the Gemini
|
90
90
|
Developer API or the Gemini API in Vertex AI.
|
91
91
|
|
92
|
-
**Gemini Developer API:** Set `
|
92
|
+
**Gemini Developer API:** Set the `GEMINI_API_KEY` or `GOOGLE_API_KEY`.
|
93
|
+
It will automatically be picked up by the client. It's recommended that you
|
94
|
+
set only one of those variables, but if both are set, `GOOGLE_API_KEY` takes
|
95
|
+
precedence.
|
93
96
|
|
94
97
|
```bash
|
95
|
-
export
|
98
|
+
export GEMINI_API_KEY='your-api-key'
|
96
99
|
```
|
97
100
|
|
98
101
|
**Gemini API on Vertex AI:** Set `GOOGLE_GENAI_USE_VERTEXAI`,
|
@@ -1132,9 +1135,9 @@ response3.generated_images[0].image.show()
|
|
1132
1135
|
|
1133
1136
|
### Veo
|
1134
1137
|
|
1135
|
-
|
1138
|
+
Support for generating videos is considered public preview
|
1136
1139
|
|
1137
|
-
|
1140
|
+
#### Generate Videos (Text to Video)
|
1138
1141
|
|
1139
1142
|
```python
|
1140
1143
|
from google.genai import types
|
@@ -1145,7 +1148,6 @@ operation = client.models.generate_videos(
|
|
1145
1148
|
prompt='A neon hologram of a cat driving at top speed',
|
1146
1149
|
config=types.GenerateVideosConfig(
|
1147
1150
|
number_of_videos=1,
|
1148
|
-
fps=24,
|
1149
1151
|
duration_seconds=5,
|
1150
1152
|
enhance_prompt=True,
|
1151
1153
|
),
|
@@ -1156,7 +1158,73 @@ while not operation.done:
|
|
1156
1158
|
time.sleep(20)
|
1157
1159
|
operation = client.operations.get(operation)
|
1158
1160
|
|
1159
|
-
video = operation.
|
1161
|
+
video = operation.response.generated_videos[0].video
|
1162
|
+
video.show()
|
1163
|
+
```
|
1164
|
+
|
1165
|
+
#### Generate Videos (Image to Video)
|
1166
|
+
|
1167
|
+
```python
|
1168
|
+
from google.genai import types
|
1169
|
+
|
1170
|
+
# Read local image (uses mimetypes.guess_type to infer mime type)
|
1171
|
+
image = types.Image.from_file("local/path/file.png")
|
1172
|
+
|
1173
|
+
# Create operation
|
1174
|
+
operation = client.models.generate_videos(
|
1175
|
+
model='veo-2.0-generate-001',
|
1176
|
+
# Prompt is optional if image is provided
|
1177
|
+
prompt='Night sky',
|
1178
|
+
image=image,
|
1179
|
+
config=types.GenerateVideosConfig(
|
1180
|
+
number_of_videos=1,
|
1181
|
+
duration_seconds=5,
|
1182
|
+
enhance_prompt=True,
|
1183
|
+
# Can also pass an Image into last_frame for frame interpolation
|
1184
|
+
),
|
1185
|
+
)
|
1186
|
+
|
1187
|
+
# Poll operation
|
1188
|
+
while not operation.done:
|
1189
|
+
time.sleep(20)
|
1190
|
+
operation = client.operations.get(operation)
|
1191
|
+
|
1192
|
+
video = operation.response.generated_videos[0].video
|
1193
|
+
video.show()
|
1194
|
+
```
|
1195
|
+
|
1196
|
+
#### Generate Videos (Video to Video)
|
1197
|
+
|
1198
|
+
Currently, only Vertex supports Video to Video generation (Video extension).
|
1199
|
+
|
1200
|
+
```python
|
1201
|
+
from google.genai import types
|
1202
|
+
|
1203
|
+
# Read local video (uses mimetypes.guess_type to infer mime type)
|
1204
|
+
video = types.Video.from_file("local/path/video.mp4")
|
1205
|
+
|
1206
|
+
# Create operation
|
1207
|
+
operation = client.models.generate_videos(
|
1208
|
+
model='veo-2.0-generate-001',
|
1209
|
+
# Prompt is optional if Video is provided
|
1210
|
+
prompt='Night sky',
|
1211
|
+
# Input video must be in GCS
|
1212
|
+
video=types.Video(
|
1213
|
+
uri="gs://bucket-name/inputs/videos/cat_driving.mp4",
|
1214
|
+
),
|
1215
|
+
config=types.GenerateVideosConfig(
|
1216
|
+
number_of_videos=1,
|
1217
|
+
duration_seconds=5,
|
1218
|
+
enhance_prompt=True,
|
1219
|
+
),
|
1220
|
+
)
|
1221
|
+
|
1222
|
+
# Poll operation
|
1223
|
+
while not operation.done:
|
1224
|
+
time.sleep(20)
|
1225
|
+
operation = client.operations.get(operation)
|
1226
|
+
|
1227
|
+
video = operation.response.generated_videos[0].video
|
1160
1228
|
video.show()
|
1161
1229
|
```
|
1162
1230
|
|
@@ -1568,3 +1636,24 @@ except errors.APIError as e:
|
|
1568
1636
|
print(e.code) # 404
|
1569
1637
|
print(e.message)
|
1570
1638
|
```
|
1639
|
+
|
1640
|
+
## Extra Request Body
|
1641
|
+
|
1642
|
+
The `extra_body` field in `HttpOptions` accepts a dictionary of additional JSON
|
1643
|
+
properties to include in the request body. This can be used to access new or
|
1644
|
+
experimental backend features that are not yet formally supported in the SDK.
|
1645
|
+
The structure of the dictionary must match the backend API's request structure.
|
1646
|
+
|
1647
|
+
- VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
|
1648
|
+
- GeminiAPI backend API docs: https://ai.google.dev/api/rest
|
1649
|
+
|
1650
|
+
```python
|
1651
|
+
response = client.models.generate_content(
|
1652
|
+
model="gemini-2.5-pro",
|
1653
|
+
contents="What is the weather in Boston? and how about Sunnyvale?",
|
1654
|
+
config=types.GenerateContentConfig(
|
1655
|
+
tools=[get_current_weather],
|
1656
|
+
http_options=types.HttpOptions(extra_body={'tool_config': {'function_calling_config': {'mode': 'COMPOSITIONAL'}}}),
|
1657
|
+
),
|
1658
|
+
)
|
1659
|
+
```
|
@@ -55,10 +55,13 @@ You can create a client by configuring the necessary environment variables.
|
|
55
55
|
Configuration setup instructions depends on whether you're using the Gemini
|
56
56
|
Developer API or the Gemini API in Vertex AI.
|
57
57
|
|
58
|
-
**Gemini Developer API:** Set `
|
58
|
+
**Gemini Developer API:** Set the `GEMINI_API_KEY` or `GOOGLE_API_KEY`.
|
59
|
+
It will automatically be picked up by the client. It's recommended that you
|
60
|
+
set only one of those variables, but if both are set, `GOOGLE_API_KEY` takes
|
61
|
+
precedence.
|
59
62
|
|
60
63
|
```bash
|
61
|
-
export
|
64
|
+
export GEMINI_API_KEY='your-api-key'
|
62
65
|
```
|
63
66
|
|
64
67
|
**Gemini API on Vertex AI:** Set `GOOGLE_GENAI_USE_VERTEXAI`,
|
@@ -1098,9 +1101,9 @@ response3.generated_images[0].image.show()
|
|
1098
1101
|
|
1099
1102
|
### Veo
|
1100
1103
|
|
1101
|
-
|
1104
|
+
Support for generating videos is considered public preview
|
1102
1105
|
|
1103
|
-
|
1106
|
+
#### Generate Videos (Text to Video)
|
1104
1107
|
|
1105
1108
|
```python
|
1106
1109
|
from google.genai import types
|
@@ -1111,7 +1114,6 @@ operation = client.models.generate_videos(
|
|
1111
1114
|
prompt='A neon hologram of a cat driving at top speed',
|
1112
1115
|
config=types.GenerateVideosConfig(
|
1113
1116
|
number_of_videos=1,
|
1114
|
-
fps=24,
|
1115
1117
|
duration_seconds=5,
|
1116
1118
|
enhance_prompt=True,
|
1117
1119
|
),
|
@@ -1122,7 +1124,73 @@ while not operation.done:
|
|
1122
1124
|
time.sleep(20)
|
1123
1125
|
operation = client.operations.get(operation)
|
1124
1126
|
|
1125
|
-
video = operation.
|
1127
|
+
video = operation.response.generated_videos[0].video
|
1128
|
+
video.show()
|
1129
|
+
```
|
1130
|
+
|
1131
|
+
#### Generate Videos (Image to Video)
|
1132
|
+
|
1133
|
+
```python
|
1134
|
+
from google.genai import types
|
1135
|
+
|
1136
|
+
# Read local image (uses mimetypes.guess_type to infer mime type)
|
1137
|
+
image = types.Image.from_file("local/path/file.png")
|
1138
|
+
|
1139
|
+
# Create operation
|
1140
|
+
operation = client.models.generate_videos(
|
1141
|
+
model='veo-2.0-generate-001',
|
1142
|
+
# Prompt is optional if image is provided
|
1143
|
+
prompt='Night sky',
|
1144
|
+
image=image,
|
1145
|
+
config=types.GenerateVideosConfig(
|
1146
|
+
number_of_videos=1,
|
1147
|
+
duration_seconds=5,
|
1148
|
+
enhance_prompt=True,
|
1149
|
+
# Can also pass an Image into last_frame for frame interpolation
|
1150
|
+
),
|
1151
|
+
)
|
1152
|
+
|
1153
|
+
# Poll operation
|
1154
|
+
while not operation.done:
|
1155
|
+
time.sleep(20)
|
1156
|
+
operation = client.operations.get(operation)
|
1157
|
+
|
1158
|
+
video = operation.response.generated_videos[0].video
|
1159
|
+
video.show()
|
1160
|
+
```
|
1161
|
+
|
1162
|
+
#### Generate Videos (Video to Video)
|
1163
|
+
|
1164
|
+
Currently, only Vertex supports Video to Video generation (Video extension).
|
1165
|
+
|
1166
|
+
```python
|
1167
|
+
from google.genai import types
|
1168
|
+
|
1169
|
+
# Read local video (uses mimetypes.guess_type to infer mime type)
|
1170
|
+
video = types.Video.from_file("local/path/video.mp4")
|
1171
|
+
|
1172
|
+
# Create operation
|
1173
|
+
operation = client.models.generate_videos(
|
1174
|
+
model='veo-2.0-generate-001',
|
1175
|
+
# Prompt is optional if Video is provided
|
1176
|
+
prompt='Night sky',
|
1177
|
+
# Input video must be in GCS
|
1178
|
+
video=types.Video(
|
1179
|
+
uri="gs://bucket-name/inputs/videos/cat_driving.mp4",
|
1180
|
+
),
|
1181
|
+
config=types.GenerateVideosConfig(
|
1182
|
+
number_of_videos=1,
|
1183
|
+
duration_seconds=5,
|
1184
|
+
enhance_prompt=True,
|
1185
|
+
),
|
1186
|
+
)
|
1187
|
+
|
1188
|
+
# Poll operation
|
1189
|
+
while not operation.done:
|
1190
|
+
time.sleep(20)
|
1191
|
+
operation = client.operations.get(operation)
|
1192
|
+
|
1193
|
+
video = operation.response.generated_videos[0].video
|
1126
1194
|
video.show()
|
1127
1195
|
```
|
1128
1196
|
|
@@ -1534,3 +1602,24 @@ except errors.APIError as e:
|
|
1534
1602
|
print(e.code) # 404
|
1535
1603
|
print(e.message)
|
1536
1604
|
```
|
1605
|
+
|
1606
|
+
## Extra Request Body
|
1607
|
+
|
1608
|
+
The `extra_body` field in `HttpOptions` accepts a dictionary of additional JSON
|
1609
|
+
properties to include in the request body. This can be used to access new or
|
1610
|
+
experimental backend features that are not yet formally supported in the SDK.
|
1611
|
+
The structure of the dictionary must match the backend API's request structure.
|
1612
|
+
|
1613
|
+
- VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
|
1614
|
+
- GeminiAPI backend API docs: https://ai.google.dev/api/rest
|
1615
|
+
|
1616
|
+
```python
|
1617
|
+
response = client.models.generate_content(
|
1618
|
+
model="gemini-2.5-pro",
|
1619
|
+
contents="What is the weather in Boston? and how about Sunnyvale?",
|
1620
|
+
config=types.GenerateContentConfig(
|
1621
|
+
tools=[get_current_weather],
|
1622
|
+
http_options=types.HttpOptions(extra_body={'tool_config': {'function_calling_config': {'mode': 'COMPOSITIONAL'}}}),
|
1623
|
+
),
|
1624
|
+
)
|
1625
|
+
```
|
@@ -74,8 +74,6 @@ try:
|
|
74
74
|
except ImportError:
|
75
75
|
pass
|
76
76
|
|
77
|
-
# internal comment
|
78
|
-
|
79
77
|
|
80
78
|
if TYPE_CHECKING:
|
81
79
|
from multidict import CIMultiDictProxy
|
@@ -234,8 +232,18 @@ class HttpResponse:
|
|
234
232
|
byte_stream: Union[Any, bytes] = None,
|
235
233
|
session: Optional['aiohttp.ClientSession'] = None,
|
236
234
|
):
|
235
|
+
if isinstance(headers, dict):
|
236
|
+
self.headers = headers
|
237
|
+
elif isinstance(headers, httpx.Headers):
|
238
|
+
self.headers = {
|
239
|
+
key: ', '.join(headers.get_list(key))
|
240
|
+
for key in headers.keys()}
|
241
|
+
elif type(headers).__name__ == 'CIMultiDictProxy':
|
242
|
+
self.headers = {
|
243
|
+
key: ', '.join(headers.getall(key))
|
244
|
+
for key in headers.keys()}
|
245
|
+
|
237
246
|
self.status_code: int = 200
|
238
|
-
self.headers = headers
|
239
247
|
self.response_stream = response_stream
|
240
248
|
self.byte_stream = byte_stream
|
241
249
|
self._session = session
|
@@ -338,9 +346,11 @@ class HttpResponse:
|
|
338
346
|
|
339
347
|
# Default retry options.
|
340
348
|
# The config is based on https://cloud.google.com/storage/docs/retry-strategy.
|
341
|
-
|
349
|
+
# By default, the client will retry 4 times with approximately 1.0, 2.0, 4.0,
|
350
|
+
# 8.0 seconds between each attempt.
|
351
|
+
_RETRY_ATTEMPTS = 5 # including the initial call.
|
342
352
|
_RETRY_INITIAL_DELAY = 1.0 # seconds
|
343
|
-
_RETRY_MAX_DELAY =
|
353
|
+
_RETRY_MAX_DELAY = 60.0 # seconds
|
344
354
|
_RETRY_EXP_BASE = 2
|
345
355
|
_RETRY_JITTER = 1
|
346
356
|
_RETRY_HTTP_STATUS_CODES = (
|
@@ -364,14 +374,13 @@ def _retry_args(options: Optional[HttpRetryOptions]) -> dict[str, Any]:
|
|
364
374
|
The arguments passed to the tenacity.(Async)Retrying constructor.
|
365
375
|
"""
|
366
376
|
if options is None:
|
367
|
-
return {'stop': tenacity.stop_after_attempt(1)}
|
377
|
+
return {'stop': tenacity.stop_after_attempt(1), 'reraise': True}
|
368
378
|
|
369
379
|
stop = tenacity.stop_after_attempt(options.attempts or _RETRY_ATTEMPTS)
|
370
380
|
retriable_codes = options.http_status_codes or _RETRY_HTTP_STATUS_CODES
|
371
|
-
retry = tenacity.
|
372
|
-
lambda
|
381
|
+
retry = tenacity.retry_if_exception(
|
382
|
+
lambda e: isinstance(e, errors.APIError) and e.code in retriable_codes,
|
373
383
|
)
|
374
|
-
retry_error_callback = lambda retry_state: retry_state.outcome.result()
|
375
384
|
wait = tenacity.wait_exponential_jitter(
|
376
385
|
initial=options.initial_delay or _RETRY_INITIAL_DELAY,
|
377
386
|
max=options.max_delay or _RETRY_MAX_DELAY,
|
@@ -381,7 +390,7 @@ def _retry_args(options: Optional[HttpRetryOptions]) -> dict[str, Any]:
|
|
381
390
|
return {
|
382
391
|
'stop': stop,
|
383
392
|
'retry': retry,
|
384
|
-
'
|
393
|
+
'reraise': True,
|
385
394
|
'wait': wait,
|
386
395
|
}
|
387
396
|
|
@@ -569,18 +578,16 @@ class BaseApiClient:
|
|
569
578
|
)
|
570
579
|
self._httpx_client = SyncHttpxClient(**client_args)
|
571
580
|
self._async_httpx_client = AsyncHttpxClient(**async_client_args)
|
572
|
-
if
|
581
|
+
if self._use_aiohttp():
|
573
582
|
# Do it once at the genai.Client level. Share among all requests.
|
574
583
|
self._async_client_session_request_args = self._ensure_aiohttp_ssl_ctx(
|
575
584
|
self._http_options
|
576
|
-
)
|
577
|
-
self._websocket_ssl_ctx = self._ensure_websocket_ssl_ctx(
|
578
|
-
self._http_options
|
579
|
-
)
|
585
|
+
)
|
586
|
+
self._websocket_ssl_ctx = self._ensure_websocket_ssl_ctx(self._http_options)
|
580
587
|
|
581
588
|
retry_kwargs = _retry_args(self._http_options.retry_options)
|
582
|
-
self._retry = tenacity.Retrying(**retry_kwargs
|
583
|
-
self._async_retry = tenacity.AsyncRetrying(**retry_kwargs
|
589
|
+
self._retry = tenacity.Retrying(**retry_kwargs)
|
590
|
+
self._async_retry = tenacity.AsyncRetrying(**retry_kwargs)
|
584
591
|
|
585
592
|
@staticmethod
|
586
593
|
def _ensure_httpx_ssl_ctx(
|
@@ -706,7 +713,6 @@ class BaseApiClient:
|
|
706
713
|
|
707
714
|
return _maybe_set(async_args, ctx)
|
708
715
|
|
709
|
-
|
710
716
|
@staticmethod
|
711
717
|
def _ensure_websocket_ssl_ctx(options: HttpOptions) -> dict[str, Any]:
|
712
718
|
"""Ensures the SSL context is present in the async client args.
|
@@ -762,6 +768,14 @@ class BaseApiClient:
|
|
762
768
|
|
763
769
|
return _maybe_set(async_args, ctx)
|
764
770
|
|
771
|
+
def _use_aiohttp(self) -> bool:
|
772
|
+
# If the instantiator has passed a custom transport, they want httpx not
|
773
|
+
# aiohttp.
|
774
|
+
return (
|
775
|
+
has_aiohttp
|
776
|
+
and (self._http_options.async_client_args or {}).get('transport')
|
777
|
+
is None
|
778
|
+
)
|
765
779
|
|
766
780
|
def _websocket_base_url(self) -> str:
|
767
781
|
url_parts = urlparse(self._http_options.base_url)
|
@@ -975,7 +989,7 @@ class BaseApiClient:
|
|
975
989
|
data = http_request.data
|
976
990
|
|
977
991
|
if stream:
|
978
|
-
if
|
992
|
+
if self._use_aiohttp():
|
979
993
|
session = aiohttp.ClientSession(
|
980
994
|
headers=http_request.headers,
|
981
995
|
trust_env=True,
|
@@ -1007,7 +1021,7 @@ class BaseApiClient:
|
|
1007
1021
|
await errors.APIError.raise_for_async_response(client_response)
|
1008
1022
|
return HttpResponse(client_response.headers, client_response)
|
1009
1023
|
else:
|
1010
|
-
if
|
1024
|
+
if self._use_aiohttp():
|
1011
1025
|
async with aiohttp.ClientSession(
|
1012
1026
|
headers=http_request.headers,
|
1013
1027
|
trust_env=True,
|
@@ -1061,11 +1075,10 @@ class BaseApiClient:
|
|
1061
1075
|
http_method, path, request_dict, http_options
|
1062
1076
|
)
|
1063
1077
|
response = self._request(http_request, stream=False)
|
1064
|
-
response_body =
|
1065
|
-
|
1066
|
-
headers=response.headers, body=response_body
|
1078
|
+
response_body = (
|
1079
|
+
response.response_stream[0] if response.response_stream else ''
|
1067
1080
|
)
|
1068
|
-
|
1081
|
+
return SdkHttpResponse(headers=response.headers, body=response_body)
|
1069
1082
|
|
1070
1083
|
def request_streamed(
|
1071
1084
|
self,
|
@@ -1080,7 +1093,9 @@ class BaseApiClient:
|
|
1080
1093
|
|
1081
1094
|
session_response = self._request(http_request, stream=True)
|
1082
1095
|
for chunk in session_response.segments():
|
1083
|
-
yield SdkHttpResponse(
|
1096
|
+
yield SdkHttpResponse(
|
1097
|
+
headers=session_response.headers, body=json.dumps(chunk)
|
1098
|
+
)
|
1084
1099
|
|
1085
1100
|
async def async_request(
|
1086
1101
|
self,
|
@@ -1095,10 +1110,7 @@ class BaseApiClient:
|
|
1095
1110
|
|
1096
1111
|
result = await self._async_request(http_request=http_request, stream=False)
|
1097
1112
|
response_body = result.response_stream[0] if result.response_stream else ''
|
1098
|
-
return SdkHttpResponse(
|
1099
|
-
headers=result.headers, body=response_body
|
1100
|
-
)
|
1101
|
-
|
1113
|
+
return SdkHttpResponse(headers=result.headers, body=response_body)
|
1102
1114
|
|
1103
1115
|
async def async_request_streamed(
|
1104
1116
|
self,
|
@@ -1324,7 +1336,7 @@ class BaseApiClient:
|
|
1324
1336
|
"""
|
1325
1337
|
offset = 0
|
1326
1338
|
# Upload the file in chunks
|
1327
|
-
if
|
1339
|
+
if self._use_aiohttp(): # pylint: disable=g-import-not-at-top
|
1328
1340
|
async with aiohttp.ClientSession(
|
1329
1341
|
headers=self._http_options.headers,
|
1330
1342
|
trust_env=True,
|
@@ -1507,7 +1519,7 @@ class BaseApiClient:
|
|
1507
1519
|
else:
|
1508
1520
|
data = http_request.data
|
1509
1521
|
|
1510
|
-
if
|
1522
|
+
if self._use_aiohttp():
|
1511
1523
|
async with aiohttp.ClientSession(
|
1512
1524
|
headers=http_request.headers,
|
1513
1525
|
trust_env=True,
|
@@ -29,11 +29,13 @@ import warnings
|
|
29
29
|
import pydantic
|
30
30
|
from pydantic import alias_generators
|
31
31
|
|
32
|
-
from . import _api_client
|
33
|
-
from . import errors
|
34
|
-
|
35
32
|
logger = logging.getLogger('google_genai._common')
|
36
33
|
|
34
|
+
|
35
|
+
class ExperimentalWarning(Warning):
|
36
|
+
"""Warning for experimental features."""
|
37
|
+
|
38
|
+
|
37
39
|
def set_value_by_path(data: Optional[dict[Any, Any]], keys: list[str], value: Any) -> None:
|
38
40
|
"""Examples:
|
39
41
|
|
@@ -540,7 +542,7 @@ def experimental_warning(message: str) -> Callable[[Callable[..., Any]], Callabl
|
|
540
542
|
warning_done = True
|
541
543
|
warnings.warn(
|
542
544
|
message=message,
|
543
|
-
category=
|
545
|
+
category=ExperimentalWarning,
|
544
546
|
stacklevel=2,
|
545
547
|
)
|
546
548
|
return func(*args, **kwargs)
|
@@ -877,6 +877,9 @@ def _Tool_to_mldev(
|
|
877
877
|
if getv(from_object, ['code_execution']) is not None:
|
878
878
|
setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
|
879
879
|
|
880
|
+
if getv(from_object, ['computer_use']) is not None:
|
881
|
+
setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
|
882
|
+
|
880
883
|
return to_object
|
881
884
|
|
882
885
|
|
@@ -942,6 +945,9 @@ def _Tool_to_vertex(
|
|
942
945
|
if getv(from_object, ['code_execution']) is not None:
|
943
946
|
setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
|
944
947
|
|
948
|
+
if getv(from_object, ['computer_use']) is not None:
|
949
|
+
setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
|
950
|
+
|
945
951
|
return to_object
|
946
952
|
|
947
953
|
|
@@ -1649,16 +1655,16 @@ def _LiveSendRealtimeInputParameters_to_vertex(
|
|
1649
1655
|
setv(to_object, ['mediaChunks'], t.t_blobs(getv(from_object, ['media'])))
|
1650
1656
|
|
1651
1657
|
if getv(from_object, ['audio']) is not None:
|
1652
|
-
|
1658
|
+
setv(to_object, ['audio'], t.t_audio_blob(getv(from_object, ['audio'])))
|
1653
1659
|
|
1654
1660
|
if getv(from_object, ['audio_stream_end']) is not None:
|
1655
1661
|
setv(to_object, ['audioStreamEnd'], getv(from_object, ['audio_stream_end']))
|
1656
1662
|
|
1657
1663
|
if getv(from_object, ['video']) is not None:
|
1658
|
-
|
1664
|
+
setv(to_object, ['video'], t.t_image_blob(getv(from_object, ['video'])))
|
1659
1665
|
|
1660
1666
|
if getv(from_object, ['text']) is not None:
|
1661
|
-
|
1667
|
+
setv(to_object, ['text'], getv(from_object, ['text']))
|
1662
1668
|
|
1663
1669
|
if getv(from_object, ['activity_start']) is not None:
|
1664
1670
|
setv(
|
@@ -1935,7 +1941,7 @@ def _LiveClientRealtimeInput_to_vertex(
|
|
1935
1941
|
setv(to_object, ['mediaChunks'], getv(from_object, ['media_chunks']))
|
1936
1942
|
|
1937
1943
|
if getv(from_object, ['audio']) is not None:
|
1938
|
-
|
1944
|
+
setv(to_object, ['audio'], getv(from_object, ['audio']))
|
1939
1945
|
|
1940
1946
|
if getv(from_object, ['audio_stream_end']) is not None:
|
1941
1947
|
raise ValueError(
|
@@ -1943,10 +1949,10 @@ def _LiveClientRealtimeInput_to_vertex(
|
|
1943
1949
|
)
|
1944
1950
|
|
1945
1951
|
if getv(from_object, ['video']) is not None:
|
1946
|
-
|
1952
|
+
setv(to_object, ['video'], getv(from_object, ['video']))
|
1947
1953
|
|
1948
1954
|
if getv(from_object, ['text']) is not None:
|
1949
|
-
|
1955
|
+
setv(to_object, ['text'], getv(from_object, ['text']))
|
1950
1956
|
|
1951
1957
|
if getv(from_object, ['activity_start']) is not None:
|
1952
1958
|
setv(
|
@@ -2467,6 +2473,8 @@ def _LiveServerSetupComplete_from_vertex(
|
|
2467
2473
|
parent_object: Optional[dict[str, Any]] = None,
|
2468
2474
|
) -> dict[str, Any]:
|
2469
2475
|
to_object: dict[str, Any] = {}
|
2476
|
+
if getv(from_object, ['sessionId']) is not None:
|
2477
|
+
setv(to_object, ['session_id'], getv(from_object, ['sessionId']))
|
2470
2478
|
|
2471
2479
|
return to_object
|
2472
2480
|
|
@@ -200,6 +200,12 @@ def _debug_print(message: str) -> None:
|
|
200
200
|
)
|
201
201
|
|
202
202
|
|
203
|
+
def pop_undeterministic_headers(headers: dict[str, str]) -> None:
|
204
|
+
"""Remove headers that are not deterministic."""
|
205
|
+
headers.pop('Date', None) # pytype: disable=attribute-error
|
206
|
+
headers.pop('Server-Timing', None) # pytype: disable=attribute-error
|
207
|
+
|
208
|
+
|
203
209
|
class ReplayRequest(BaseModel):
|
204
210
|
"""Represents a single request in a replay."""
|
205
211
|
|
@@ -219,10 +225,7 @@ class ReplayResponse(BaseModel):
|
|
219
225
|
sdk_response_segments: list[dict[str, object]]
|
220
226
|
|
221
227
|
def model_post_init(self, __context: Any) -> None:
|
222
|
-
|
223
|
-
# every time they are recorded.
|
224
|
-
self.headers.pop('Date', None)
|
225
|
-
self.headers.pop('Server-Timing', None)
|
228
|
+
pop_undeterministic_headers(self.headers)
|
226
229
|
|
227
230
|
|
228
231
|
class ReplayInteraction(BaseModel):
|
@@ -447,11 +450,15 @@ class ReplayApiClient(BaseApiClient):
|
|
447
450
|
if self._should_update_replay():
|
448
451
|
if isinstance(response_model, list):
|
449
452
|
response_model = response_model[0]
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
+
sdk_response_response = getattr(response_model, 'sdk_http_response', None)
|
454
|
+
if response_model and (
|
455
|
+
sdk_response_response is not None
|
453
456
|
):
|
454
|
-
|
457
|
+
headers = getattr(
|
458
|
+
sdk_response_response, 'headers', None
|
459
|
+
)
|
460
|
+
if headers:
|
461
|
+
pop_undeterministic_headers(headers)
|
455
462
|
interaction.response.sdk_response_segments.append(
|
456
463
|
response_model.model_dump(exclude_none=True)
|
457
464
|
)
|
@@ -877,6 +877,9 @@ def _Tool_to_mldev(
|
|
877
877
|
if getv(from_object, ['code_execution']) is not None:
|
878
878
|
setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
|
879
879
|
|
880
|
+
if getv(from_object, ['computer_use']) is not None:
|
881
|
+
setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
|
882
|
+
|
880
883
|
return to_object
|
881
884
|
|
882
885
|
|
@@ -942,6 +945,9 @@ def _Tool_to_vertex(
|
|
942
945
|
if getv(from_object, ['code_execution']) is not None:
|
943
946
|
setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
|
944
947
|
|
948
|
+
if getv(from_object, ['computer_use']) is not None:
|
949
|
+
setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
|
950
|
+
|
945
951
|
return to_object
|
946
952
|
|
947
953
|
|
@@ -625,17 +625,22 @@ def _raise_for_unsupported_schema_type(origin: Any) -> None:
|
|
625
625
|
|
626
626
|
|
627
627
|
def _raise_for_unsupported_mldev_properties(
|
628
|
-
schema: Any, client: _api_client.BaseApiClient
|
628
|
+
schema: Any, client: Optional[_api_client.BaseApiClient]
|
629
629
|
) -> None:
|
630
|
-
if
|
631
|
-
|
630
|
+
if (
|
631
|
+
client
|
632
|
+
and not client.vertexai
|
633
|
+
and (
|
634
|
+
schema.get('additionalProperties')
|
635
|
+
or schema.get('additional_properties')
|
636
|
+
)
|
632
637
|
):
|
633
638
|
raise ValueError('additionalProperties is not supported in the Gemini API.')
|
634
639
|
|
635
640
|
|
636
641
|
def process_schema(
|
637
642
|
schema: dict[str, Any],
|
638
|
-
client: _api_client.BaseApiClient,
|
643
|
+
client: Optional[_api_client.BaseApiClient],
|
639
644
|
defs: Optional[dict[str, Any]] = None,
|
640
645
|
*,
|
641
646
|
order_properties: bool = True,
|
@@ -785,7 +790,7 @@ def process_schema(
|
|
785
790
|
|
786
791
|
|
787
792
|
def _process_enum(
|
788
|
-
enum: EnumMeta, client: _api_client.BaseApiClient
|
793
|
+
enum: EnumMeta, client: Optional[_api_client.BaseApiClient]
|
789
794
|
) -> types.Schema:
|
790
795
|
is_integer_enum = False
|
791
796
|
|
@@ -823,7 +828,8 @@ def _is_type_dict_str_any(
|
|
823
828
|
|
824
829
|
|
825
830
|
def t_schema(
|
826
|
-
client: _api_client.BaseApiClient,
|
831
|
+
client: Optional[_api_client.BaseApiClient],
|
832
|
+
origin: Union[types.SchemaUnionDict, Any],
|
827
833
|
) -> Optional[types.Schema]:
|
828
834
|
if not origin:
|
829
835
|
return None
|