google-genai 1.31.0__py3-none-any.whl → 1.33.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +32 -9
- google/genai/_base_transformers.py +26 -0
- google/genai/_local_tokenizer_loader.py +223 -0
- google/genai/_operations_converters.py +307 -0
- google/genai/_replay_api_client.py +15 -0
- google/genai/_transformers.py +0 -10
- google/genai/caches.py +14 -2
- google/genai/files.py +12 -2
- google/genai/local_tokenizer.py +362 -0
- google/genai/models.py +171 -196
- google/genai/tunings.py +134 -0
- google/genai/types.py +402 -304
- google/genai/version.py +1 -1
- {google_genai-1.31.0.dist-info → google_genai-1.33.0.dist-info}/METADATA +6 -6
- {google_genai-1.31.0.dist-info → google_genai-1.33.0.dist-info}/RECORD +18 -14
- {google_genai-1.31.0.dist-info → google_genai-1.33.0.dist-info}/WHEEL +0 -0
- {google_genai-1.31.0.dist-info → google_genai-1.33.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.31.0.dist-info → google_genai-1.33.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -21,6 +21,7 @@ from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
|
|
21
21
|
from urllib.parse import urlencode
|
22
22
|
|
23
23
|
from . import _api_module
|
24
|
+
from . import _base_transformers as base_t
|
24
25
|
from . import _common
|
25
26
|
from . import _extra_utils
|
26
27
|
from . import _mcp_utils
|
@@ -1260,7 +1261,7 @@ def _Image_to_mldev(
|
|
1260
1261
|
setv(
|
1261
1262
|
to_object,
|
1262
1263
|
['bytesBase64Encoded'],
|
1263
|
-
|
1264
|
+
base_t.t_bytes(getv(from_object, ['image_bytes'])),
|
1264
1265
|
)
|
1265
1266
|
|
1266
1267
|
if getv(from_object, ['mime_type']) is not None:
|
@@ -1269,6 +1270,29 @@ def _Image_to_mldev(
|
|
1269
1270
|
return to_object
|
1270
1271
|
|
1271
1272
|
|
1273
|
+
def _GenerateVideosSource_to_mldev(
|
1274
|
+
from_object: Union[dict[str, Any], object],
|
1275
|
+
parent_object: Optional[dict[str, Any]] = None,
|
1276
|
+
) -> dict[str, Any]:
|
1277
|
+
to_object: dict[str, Any] = {}
|
1278
|
+
if getv(from_object, ['prompt']) is not None:
|
1279
|
+
setv(
|
1280
|
+
parent_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt'])
|
1281
|
+
)
|
1282
|
+
|
1283
|
+
if getv(from_object, ['image']) is not None:
|
1284
|
+
setv(
|
1285
|
+
parent_object,
|
1286
|
+
['instances[0]', 'image'],
|
1287
|
+
_Image_to_mldev(getv(from_object, ['image']), to_object),
|
1288
|
+
)
|
1289
|
+
|
1290
|
+
if getv(from_object, ['video']) is not None:
|
1291
|
+
raise ValueError('video parameter is not supported in Gemini API.')
|
1292
|
+
|
1293
|
+
return to_object
|
1294
|
+
|
1295
|
+
|
1272
1296
|
def _GenerateVideosConfig_to_mldev(
|
1273
1297
|
from_object: Union[dict[str, Any], object],
|
1274
1298
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -1306,7 +1330,11 @@ def _GenerateVideosConfig_to_mldev(
|
|
1306
1330
|
)
|
1307
1331
|
|
1308
1332
|
if getv(from_object, ['resolution']) is not None:
|
1309
|
-
|
1333
|
+
setv(
|
1334
|
+
parent_object,
|
1335
|
+
['parameters', 'resolution'],
|
1336
|
+
getv(from_object, ['resolution']),
|
1337
|
+
)
|
1310
1338
|
|
1311
1339
|
if getv(from_object, ['person_generation']) is not None:
|
1312
1340
|
setv(
|
@@ -1377,6 +1405,15 @@ def _GenerateVideosParameters_to_mldev(
|
|
1377
1405
|
if getv(from_object, ['video']) is not None:
|
1378
1406
|
raise ValueError('video parameter is not supported in Gemini API.')
|
1379
1407
|
|
1408
|
+
if getv(from_object, ['source']) is not None:
|
1409
|
+
setv(
|
1410
|
+
to_object,
|
1411
|
+
['config'],
|
1412
|
+
_GenerateVideosSource_to_mldev(
|
1413
|
+
getv(from_object, ['source']), to_object
|
1414
|
+
),
|
1415
|
+
)
|
1416
|
+
|
1380
1417
|
if getv(from_object, ['config']) is not None:
|
1381
1418
|
setv(
|
1382
1419
|
to_object,
|
@@ -2502,7 +2539,7 @@ def _Image_to_vertex(
|
|
2502
2539
|
setv(
|
2503
2540
|
to_object,
|
2504
2541
|
['bytesBase64Encoded'],
|
2505
|
-
|
2542
|
+
base_t.t_bytes(getv(from_object, ['image_bytes'])),
|
2506
2543
|
)
|
2507
2544
|
|
2508
2545
|
if getv(from_object, ['mime_type']) is not None:
|
@@ -2797,6 +2834,13 @@ def _UpscaleImageAPIConfig_to_vertex(
|
|
2797
2834
|
) -> dict[str, Any]:
|
2798
2835
|
to_object: dict[str, Any] = {}
|
2799
2836
|
|
2837
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
2838
|
+
setv(
|
2839
|
+
parent_object,
|
2840
|
+
['parameters', 'storageUri'],
|
2841
|
+
getv(from_object, ['output_gcs_uri']),
|
2842
|
+
)
|
2843
|
+
|
2800
2844
|
if getv(from_object, ['include_rai_reason']) is not None:
|
2801
2845
|
setv(
|
2802
2846
|
parent_object,
|
@@ -2973,6 +3017,13 @@ def _RecontextImageConfig_to_vertex(
|
|
2973
3017
|
getv(from_object, ['person_generation']),
|
2974
3018
|
)
|
2975
3019
|
|
3020
|
+
if getv(from_object, ['add_watermark']) is not None:
|
3021
|
+
setv(
|
3022
|
+
parent_object,
|
3023
|
+
['parameters', 'addWatermark'],
|
3024
|
+
getv(from_object, ['add_watermark']),
|
3025
|
+
)
|
3026
|
+
|
2976
3027
|
if getv(from_object, ['output_mime_type']) is not None:
|
2977
3028
|
setv(
|
2978
3029
|
parent_object,
|
@@ -3387,7 +3438,7 @@ def _Video_to_vertex(
|
|
3387
3438
|
setv(
|
3388
3439
|
to_object,
|
3389
3440
|
['bytesBase64Encoded'],
|
3390
|
-
|
3441
|
+
base_t.t_bytes(getv(from_object, ['video_bytes'])),
|
3391
3442
|
)
|
3392
3443
|
|
3393
3444
|
if getv(from_object, ['mime_type']) is not None:
|
@@ -3396,6 +3447,33 @@ def _Video_to_vertex(
|
|
3396
3447
|
return to_object
|
3397
3448
|
|
3398
3449
|
|
3450
|
+
def _GenerateVideosSource_to_vertex(
|
3451
|
+
from_object: Union[dict[str, Any], object],
|
3452
|
+
parent_object: Optional[dict[str, Any]] = None,
|
3453
|
+
) -> dict[str, Any]:
|
3454
|
+
to_object: dict[str, Any] = {}
|
3455
|
+
if getv(from_object, ['prompt']) is not None:
|
3456
|
+
setv(
|
3457
|
+
parent_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt'])
|
3458
|
+
)
|
3459
|
+
|
3460
|
+
if getv(from_object, ['image']) is not None:
|
3461
|
+
setv(
|
3462
|
+
parent_object,
|
3463
|
+
['instances[0]', 'image'],
|
3464
|
+
_Image_to_vertex(getv(from_object, ['image']), to_object),
|
3465
|
+
)
|
3466
|
+
|
3467
|
+
if getv(from_object, ['video']) is not None:
|
3468
|
+
setv(
|
3469
|
+
parent_object,
|
3470
|
+
['instances[0]', 'video'],
|
3471
|
+
_Video_to_vertex(getv(from_object, ['video']), to_object),
|
3472
|
+
)
|
3473
|
+
|
3474
|
+
return to_object
|
3475
|
+
|
3476
|
+
|
3399
3477
|
def _VideoGenerationReferenceImage_to_vertex(
|
3400
3478
|
from_object: Union[dict[str, Any], object],
|
3401
3479
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -3553,6 +3631,15 @@ def _GenerateVideosParameters_to_vertex(
|
|
3553
3631
|
_Video_to_vertex(getv(from_object, ['video']), to_object),
|
3554
3632
|
)
|
3555
3633
|
|
3634
|
+
if getv(from_object, ['source']) is not None:
|
3635
|
+
setv(
|
3636
|
+
to_object,
|
3637
|
+
['config'],
|
3638
|
+
_GenerateVideosSource_to_vertex(
|
3639
|
+
getv(from_object, ['source']), to_object
|
3640
|
+
),
|
3641
|
+
)
|
3642
|
+
|
3556
3643
|
if getv(from_object, ['config']) is not None:
|
3557
3644
|
setv(
|
3558
3645
|
to_object,
|
@@ -3914,7 +4001,7 @@ def _Image_from_mldev(
|
|
3914
4001
|
setv(
|
3915
4002
|
to_object,
|
3916
4003
|
['image_bytes'],
|
3917
|
-
|
4004
|
+
base_t.t_bytes(getv(from_object, ['bytesBase64Encoded'])),
|
3918
4005
|
)
|
3919
4006
|
|
3920
4007
|
if getv(from_object, ['mimeType']) is not None:
|
@@ -4101,6 +4188,10 @@ def _DeleteModelResponse_from_mldev(
|
|
4101
4188
|
parent_object: Optional[dict[str, Any]] = None,
|
4102
4189
|
) -> dict[str, Any]:
|
4103
4190
|
to_object: dict[str, Any] = {}
|
4191
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4192
|
+
setv(
|
4193
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4194
|
+
)
|
4104
4195
|
|
4105
4196
|
return to_object
|
4106
4197
|
|
@@ -4140,7 +4231,7 @@ def _Video_from_mldev(
|
|
4140
4231
|
setv(
|
4141
4232
|
to_object,
|
4142
4233
|
['video_bytes'],
|
4143
|
-
|
4234
|
+
base_t.t_bytes(getv(from_object, ['video', 'encodedVideo'])),
|
4144
4235
|
)
|
4145
4236
|
|
4146
4237
|
if getv(from_object, ['encoding']) is not None:
|
@@ -4606,7 +4697,7 @@ def _Image_from_vertex(
|
|
4606
4697
|
setv(
|
4607
4698
|
to_object,
|
4608
4699
|
['image_bytes'],
|
4609
|
-
|
4700
|
+
base_t.t_bytes(getv(from_object, ['bytesBase64Encoded'])),
|
4610
4701
|
)
|
4611
4702
|
|
4612
4703
|
if getv(from_object, ['mimeType']) is not None:
|
@@ -4968,6 +5059,10 @@ def _DeleteModelResponse_from_vertex(
|
|
4968
5059
|
parent_object: Optional[dict[str, Any]] = None,
|
4969
5060
|
) -> dict[str, Any]:
|
4970
5061
|
to_object: dict[str, Any] = {}
|
5062
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
5063
|
+
setv(
|
5064
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
5065
|
+
)
|
4971
5066
|
|
4972
5067
|
return to_object
|
4973
5068
|
|
@@ -5016,7 +5111,7 @@ def _Video_from_vertex(
|
|
5016
5111
|
setv(
|
5017
5112
|
to_object,
|
5018
5113
|
['video_bytes'],
|
5019
|
-
|
5114
|
+
base_t.t_bytes(getv(from_object, ['bytesBase64Encoded'])),
|
5020
5115
|
)
|
5021
5116
|
|
5022
5117
|
if getv(from_object, ['mimeType']) is not None:
|
@@ -5165,6 +5260,13 @@ class Models(_api_module.BaseModule):
|
|
5165
5260
|
'post', path, request_dict, http_options
|
5166
5261
|
)
|
5167
5262
|
|
5263
|
+
if config is not None and getattr(
|
5264
|
+
config, 'should_return_http_response', None
|
5265
|
+
):
|
5266
|
+
return_value = types.GenerateContentResponse(sdk_http_response=response)
|
5267
|
+
self._api_client._verify_response(return_value)
|
5268
|
+
return return_value
|
5269
|
+
|
5168
5270
|
response_dict = '' if not response.body else json.loads(response.body)
|
5169
5271
|
|
5170
5272
|
if self._api_client.vertexai:
|
@@ -5364,13 +5466,7 @@ class Models(_api_module.BaseModule):
|
|
5364
5466
|
prompt: str,
|
5365
5467
|
config: Optional[types.GenerateImagesConfigOrDict] = None,
|
5366
5468
|
) -> types.GenerateImagesResponse:
|
5367
|
-
"""
|
5368
|
-
|
5369
|
-
Args:
|
5370
|
-
model (str): The model to use.
|
5371
|
-
prompt (str): A text description of the images to generate.
|
5372
|
-
config (GenerateImagesConfig): Configuration for generation.
|
5373
|
-
"""
|
5469
|
+
"""Private method for generating images."""
|
5374
5470
|
|
5375
5471
|
parameter_model = types._GenerateImagesParameters(
|
5376
5472
|
model=model,
|
@@ -5443,47 +5539,7 @@ class Models(_api_module.BaseModule):
|
|
5443
5539
|
reference_images: list[types._ReferenceImageAPIOrDict],
|
5444
5540
|
config: Optional[types.EditImageConfigOrDict] = None,
|
5445
5541
|
) -> types.EditImageResponse:
|
5446
|
-
"""
|
5447
|
-
|
5448
|
-
Args:
|
5449
|
-
model (str): The model to use.
|
5450
|
-
prompt (str): A text description of the edit to apply to the image.
|
5451
|
-
reference_images (list[Union[RawReferenceImage, MaskReferenceImage,
|
5452
|
-
ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The
|
5453
|
-
reference images for editing.
|
5454
|
-
config (EditImageConfig): Configuration for editing.
|
5455
|
-
|
5456
|
-
Usage:
|
5457
|
-
|
5458
|
-
.. code-block:: python
|
5459
|
-
|
5460
|
-
from google.genai.types import RawReferenceImage, MaskReferenceImage
|
5461
|
-
|
5462
|
-
raw_ref_image = RawReferenceImage(
|
5463
|
-
reference_id=1,
|
5464
|
-
reference_image=types.Image.from_file(IMAGE_FILE_PATH),
|
5465
|
-
)
|
5466
|
-
|
5467
|
-
mask_ref_image = MaskReferenceImage(
|
5468
|
-
reference_id=2,
|
5469
|
-
config=types.MaskReferenceConfig(
|
5470
|
-
mask_mode='MASK_MODE_FOREGROUND',
|
5471
|
-
mask_dilation=0.06,
|
5472
|
-
),
|
5473
|
-
)
|
5474
|
-
response = client.models.edit_image(
|
5475
|
-
model='imagen-3.0-capability-001',
|
5476
|
-
prompt='man with dog',
|
5477
|
-
reference_images=[raw_ref_image, mask_ref_image],
|
5478
|
-
config=types.EditImageConfig(
|
5479
|
-
edit_mode= "EDIT_MODE_INPAINT_INSERTION",
|
5480
|
-
number_of_images= 1,
|
5481
|
-
include_rai_reason= True,
|
5482
|
-
)
|
5483
|
-
)
|
5484
|
-
response.generated_images[0].image.show()
|
5485
|
-
# Shows a man with a dog instead of a cat.
|
5486
|
-
"""
|
5542
|
+
"""Private method for editing an image."""
|
5487
5543
|
|
5488
5544
|
parameter_model = types._EditImageParameters(
|
5489
5545
|
model=model,
|
@@ -5547,14 +5603,7 @@ class Models(_api_module.BaseModule):
|
|
5547
5603
|
upscale_factor: str,
|
5548
5604
|
config: Optional[types._UpscaleImageAPIConfigOrDict] = None,
|
5549
5605
|
) -> types.UpscaleImageResponse:
|
5550
|
-
"""
|
5551
|
-
|
5552
|
-
Args:
|
5553
|
-
model (str): The model to use.
|
5554
|
-
image (Image): The input image for upscaling.
|
5555
|
-
upscale_factor (str): The factor to upscale the image (x2 or x4).
|
5556
|
-
config (_UpscaleImageAPIConfig): Configuration for upscaling.
|
5557
|
-
"""
|
5606
|
+
"""Private method for upscaling an image."""
|
5558
5607
|
|
5559
5608
|
parameter_model = types._UpscaleImageAPIParameters(
|
5560
5609
|
model=model,
|
@@ -6050,7 +6099,9 @@ class Models(_api_module.BaseModule):
|
|
6050
6099
|
return_value = types.DeleteModelResponse._from_response(
|
6051
6100
|
response=response_dict, kwargs=parameter_model.model_dump()
|
6052
6101
|
)
|
6053
|
-
|
6102
|
+
return_value.sdk_http_response = types.HttpResponse(
|
6103
|
+
headers=response.headers
|
6104
|
+
)
|
6054
6105
|
self._api_client._verify_response(return_value)
|
6055
6106
|
return return_value
|
6056
6107
|
|
@@ -6235,47 +6286,17 @@ class Models(_api_module.BaseModule):
|
|
6235
6286
|
prompt: Optional[str] = None,
|
6236
6287
|
image: Optional[types.ImageOrDict] = None,
|
6237
6288
|
video: Optional[types.VideoOrDict] = None,
|
6289
|
+
source: Optional[types.GenerateVideosSourceOrDict] = None,
|
6238
6290
|
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
6239
6291
|
) -> types.GenerateVideosOperation:
|
6240
|
-
"""
|
6241
|
-
|
6242
|
-
The following use cases are supported:
|
6243
|
-
1. Text to video generation.
|
6244
|
-
2a. Image to video generation (additional text prompt is optional).
|
6245
|
-
2b. Image to video generation with frame interpolation (specify last_frame
|
6246
|
-
in config).
|
6247
|
-
3. Video extension (additional text prompt is optional)
|
6248
|
-
|
6249
|
-
Args:
|
6250
|
-
model: The model to use.
|
6251
|
-
prompt: The text prompt for generating the videos. Optional for image to
|
6252
|
-
video and video extension use cases.
|
6253
|
-
image: The input image for generating the videos. Optional if prompt is
|
6254
|
-
provided.
|
6255
|
-
video: The input video for video extension use cases. Optional if prompt
|
6256
|
-
or image is provided.
|
6257
|
-
config: Configuration for generation.
|
6258
|
-
|
6259
|
-
Usage:
|
6260
|
-
|
6261
|
-
```
|
6262
|
-
operation = client.models.generate_videos(
|
6263
|
-
model="veo-2.0-generate-001",
|
6264
|
-
prompt="A neon hologram of a cat driving at top speed",
|
6265
|
-
)
|
6266
|
-
while not operation.done:
|
6267
|
-
time.sleep(10)
|
6268
|
-
operation = client.operations.get(operation)
|
6269
|
-
|
6270
|
-
operation.result.generated_videos[0].video.uri
|
6271
|
-
```
|
6272
|
-
"""
|
6292
|
+
"""Private method for generating videos."""
|
6273
6293
|
|
6274
6294
|
parameter_model = types._GenerateVideosParameters(
|
6275
6295
|
model=model,
|
6276
6296
|
prompt=prompt,
|
6277
6297
|
image=image,
|
6278
6298
|
video=video,
|
6299
|
+
source=source,
|
6279
6300
|
config=config,
|
6280
6301
|
)
|
6281
6302
|
|
@@ -6794,6 +6815,7 @@ class Models(_api_module.BaseModule):
|
|
6794
6815
|
config_dct = dict(config)
|
6795
6816
|
api_config = types._UpscaleImageAPIConfigDict(
|
6796
6817
|
http_options=config_dct.get('http_options', None),
|
6818
|
+
output_gcs_uri=config_dct.get('output_gcs_uri', None),
|
6797
6819
|
include_rai_reason=config_dct.get('include_rai_reason', None),
|
6798
6820
|
output_mime_type=config_dct.get('output_mime_type', None),
|
6799
6821
|
output_compression_quality=config_dct.get(
|
@@ -6823,6 +6845,7 @@ class Models(_api_module.BaseModule):
|
|
6823
6845
|
prompt: Optional[str] = None,
|
6824
6846
|
image: Optional[types.ImageOrDict] = None,
|
6825
6847
|
video: Optional[types.VideoOrDict] = None,
|
6848
|
+
source: Optional[types.GenerateVideosSourceOrDict] = None,
|
6826
6849
|
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
6827
6850
|
) -> types.GenerateVideosOperation:
|
6828
6851
|
"""Generates videos based on an input (text, image, or video) and configuration.
|
@@ -6837,11 +6860,15 @@ class Models(_api_module.BaseModule):
|
|
6837
6860
|
Args:
|
6838
6861
|
model: The model to use.
|
6839
6862
|
prompt: The text prompt for generating the videos. Optional for image to
|
6840
|
-
video and video extension use cases.
|
6863
|
+
video and video extension use cases. This argument is deprecated, please
|
6864
|
+
use source instead.
|
6841
6865
|
image: The input image for generating the videos. Optional if prompt is
|
6842
|
-
provided.
|
6866
|
+
provided. This argument is deprecated, please use source instead.
|
6843
6867
|
video: The input video for video extension use cases. Optional if prompt
|
6844
|
-
or image is provided.
|
6868
|
+
or image is provided. This argument is deprecated, please use source
|
6869
|
+
instead.
|
6870
|
+
source: The input source for generating the videos (prompt, image, and/or
|
6871
|
+
video)
|
6845
6872
|
config: Configuration for generation.
|
6846
6873
|
|
6847
6874
|
Usage:
|
@@ -6849,7 +6876,9 @@ class Models(_api_module.BaseModule):
|
|
6849
6876
|
```
|
6850
6877
|
operation = client.models.generate_videos(
|
6851
6878
|
model="veo-2.0-generate-001",
|
6852
|
-
|
6879
|
+
source=types.GenerateVideosSource(
|
6880
|
+
prompt="A neon hologram of a cat driving at top speed",
|
6881
|
+
),
|
6853
6882
|
)
|
6854
6883
|
while not operation.done:
|
6855
6884
|
time.sleep(10)
|
@@ -6858,11 +6887,17 @@ class Models(_api_module.BaseModule):
|
|
6858
6887
|
operation.result.generated_videos[0].video.uri
|
6859
6888
|
```
|
6860
6889
|
"""
|
6890
|
+
if (prompt or image or video) and source:
|
6891
|
+
raise ValueError(
|
6892
|
+
'Source and prompt/image/video are mutually exclusive.'
|
6893
|
+
+ ' Please only use source.'
|
6894
|
+
)
|
6861
6895
|
return self._generate_videos(
|
6862
6896
|
model=model,
|
6863
6897
|
prompt=prompt,
|
6864
6898
|
image=image,
|
6865
6899
|
video=video,
|
6900
|
+
source=source,
|
6866
6901
|
config=config,
|
6867
6902
|
)
|
6868
6903
|
|
@@ -6972,6 +7007,13 @@ class AsyncModels(_api_module.BaseModule):
|
|
6972
7007
|
'post', path, request_dict, http_options
|
6973
7008
|
)
|
6974
7009
|
|
7010
|
+
if config is not None and getattr(
|
7011
|
+
config, 'should_return_http_response', None
|
7012
|
+
):
|
7013
|
+
return_value = types.GenerateContentResponse(sdk_http_response=response)
|
7014
|
+
self._api_client._verify_response(return_value)
|
7015
|
+
return return_value
|
7016
|
+
|
6975
7017
|
response_dict = '' if not response.body else json.loads(response.body)
|
6976
7018
|
|
6977
7019
|
if self._api_client.vertexai:
|
@@ -7176,13 +7218,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
7176
7218
|
prompt: str,
|
7177
7219
|
config: Optional[types.GenerateImagesConfigOrDict] = None,
|
7178
7220
|
) -> types.GenerateImagesResponse:
|
7179
|
-
"""
|
7180
|
-
|
7181
|
-
Args:
|
7182
|
-
model (str): The model to use.
|
7183
|
-
prompt (str): A text description of the images to generate.
|
7184
|
-
config (GenerateImagesConfig): Configuration for generation.
|
7185
|
-
"""
|
7221
|
+
"""Private method for generating images asynchronously."""
|
7186
7222
|
|
7187
7223
|
parameter_model = types._GenerateImagesParameters(
|
7188
7224
|
model=model,
|
@@ -7255,47 +7291,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
7255
7291
|
reference_images: list[types._ReferenceImageAPIOrDict],
|
7256
7292
|
config: Optional[types.EditImageConfigOrDict] = None,
|
7257
7293
|
) -> types.EditImageResponse:
|
7258
|
-
"""
|
7259
|
-
|
7260
|
-
Args:
|
7261
|
-
model (str): The model to use.
|
7262
|
-
prompt (str): A text description of the edit to apply to the image.
|
7263
|
-
reference_images (list[Union[RawReferenceImage, MaskReferenceImage,
|
7264
|
-
ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The
|
7265
|
-
reference images for editing.
|
7266
|
-
config (EditImageConfig): Configuration for editing.
|
7267
|
-
|
7268
|
-
Usage:
|
7269
|
-
|
7270
|
-
.. code-block:: python
|
7271
|
-
|
7272
|
-
from google.genai.types import RawReferenceImage, MaskReferenceImage
|
7273
|
-
|
7274
|
-
raw_ref_image = RawReferenceImage(
|
7275
|
-
reference_id=1,
|
7276
|
-
reference_image=types.Image.from_file(IMAGE_FILE_PATH),
|
7277
|
-
)
|
7278
|
-
|
7279
|
-
mask_ref_image = MaskReferenceImage(
|
7280
|
-
reference_id=2,
|
7281
|
-
config=types.MaskReferenceConfig(
|
7282
|
-
mask_mode='MASK_MODE_FOREGROUND',
|
7283
|
-
mask_dilation=0.06,
|
7284
|
-
),
|
7285
|
-
)
|
7286
|
-
response = await client.aio.models.edit_image(
|
7287
|
-
model='imagen-3.0-capability-001',
|
7288
|
-
prompt='man with dog',
|
7289
|
-
reference_images=[raw_ref_image, mask_ref_image],
|
7290
|
-
config=types.EditImageConfig(
|
7291
|
-
edit_mode= "EDIT_MODE_INPAINT_INSERTION",
|
7292
|
-
number_of_images= 1,
|
7293
|
-
include_rai_reason= True,
|
7294
|
-
)
|
7295
|
-
)
|
7296
|
-
response.generated_images[0].image.show()
|
7297
|
-
# Shows a man with a dog instead of a cat.
|
7298
|
-
"""
|
7294
|
+
"""Private method for editing an image asynchronously."""
|
7299
7295
|
|
7300
7296
|
parameter_model = types._EditImageParameters(
|
7301
7297
|
model=model,
|
@@ -7359,14 +7355,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
7359
7355
|
upscale_factor: str,
|
7360
7356
|
config: Optional[types._UpscaleImageAPIConfigOrDict] = None,
|
7361
7357
|
) -> types.UpscaleImageResponse:
|
7362
|
-
"""
|
7363
|
-
|
7364
|
-
Args:
|
7365
|
-
model (str): The model to use.
|
7366
|
-
image (Image): The input image for upscaling.
|
7367
|
-
upscale_factor (str): The factor to upscale the image (x2 or x4).
|
7368
|
-
config (_UpscaleImageAPIConfig): Configuration for upscaling.
|
7369
|
-
"""
|
7358
|
+
"""Private method for upscaling an image asynchronously."""
|
7370
7359
|
|
7371
7360
|
parameter_model = types._UpscaleImageAPIParameters(
|
7372
7361
|
model=model,
|
@@ -7869,7 +7858,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
7869
7858
|
return_value = types.DeleteModelResponse._from_response(
|
7870
7859
|
response=response_dict, kwargs=parameter_model.model_dump()
|
7871
7860
|
)
|
7872
|
-
|
7861
|
+
return_value.sdk_http_response = types.HttpResponse(
|
7862
|
+
headers=response.headers
|
7863
|
+
)
|
7873
7864
|
self._api_client._verify_response(return_value)
|
7874
7865
|
return return_value
|
7875
7866
|
|
@@ -8053,47 +8044,17 @@ class AsyncModels(_api_module.BaseModule):
|
|
8053
8044
|
prompt: Optional[str] = None,
|
8054
8045
|
image: Optional[types.ImageOrDict] = None,
|
8055
8046
|
video: Optional[types.VideoOrDict] = None,
|
8047
|
+
source: Optional[types.GenerateVideosSourceOrDict] = None,
|
8056
8048
|
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
8057
8049
|
) -> types.GenerateVideosOperation:
|
8058
|
-
"""
|
8059
|
-
|
8060
|
-
The following use cases are supported:
|
8061
|
-
1. Text to video generation.
|
8062
|
-
2a. Image to video generation (additional text prompt is optional).
|
8063
|
-
2b. Image to video generation with frame interpolation (specify last_frame
|
8064
|
-
in config).
|
8065
|
-
3. Video extension (additional text prompt is optional)
|
8066
|
-
|
8067
|
-
Args:
|
8068
|
-
model: The model to use.
|
8069
|
-
prompt: The text prompt for generating the videos. Optional for image to
|
8070
|
-
video and video extension use cases.
|
8071
|
-
image: The input image for generating the videos. Optional if prompt is
|
8072
|
-
provided.
|
8073
|
-
video: The input video for video extension use cases. Optional if prompt
|
8074
|
-
or image is provided.
|
8075
|
-
config: Configuration for generation.
|
8076
|
-
|
8077
|
-
Usage:
|
8078
|
-
|
8079
|
-
```
|
8080
|
-
operation = client.models.generate_videos(
|
8081
|
-
model="veo-2.0-generate-001",
|
8082
|
-
prompt="A neon hologram of a cat driving at top speed",
|
8083
|
-
)
|
8084
|
-
while not operation.done:
|
8085
|
-
time.sleep(10)
|
8086
|
-
operation = client.operations.get(operation)
|
8087
|
-
|
8088
|
-
operation.result.generated_videos[0].video.uri
|
8089
|
-
```
|
8090
|
-
"""
|
8050
|
+
"""Private method for generating videos asynchronously."""
|
8091
8051
|
|
8092
8052
|
parameter_model = types._GenerateVideosParameters(
|
8093
8053
|
model=model,
|
8094
8054
|
prompt=prompt,
|
8095
8055
|
image=image,
|
8096
8056
|
video=video,
|
8057
|
+
source=source,
|
8097
8058
|
config=config,
|
8098
8059
|
)
|
8099
8060
|
|
@@ -8645,6 +8606,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
8645
8606
|
config_dct = dict(config)
|
8646
8607
|
api_config = types._UpscaleImageAPIConfigDict(
|
8647
8608
|
http_options=config_dct.get('http_options', None),
|
8609
|
+
output_gcs_uri=config_dct.get('output_gcs_uri', None),
|
8648
8610
|
include_rai_reason=config_dct.get('include_rai_reason', None),
|
8649
8611
|
output_mime_type=config_dct.get('output_mime_type', None),
|
8650
8612
|
output_compression_quality=config_dct.get(
|
@@ -8674,6 +8636,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
8674
8636
|
prompt: Optional[str] = None,
|
8675
8637
|
image: Optional[types.ImageOrDict] = None,
|
8676
8638
|
video: Optional[types.VideoOrDict] = None,
|
8639
|
+
source: Optional[types.GenerateVideosSourceOrDict] = None,
|
8677
8640
|
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
8678
8641
|
) -> types.GenerateVideosOperation:
|
8679
8642
|
"""Generates videos based on an input (text, image, or video) and configuration.
|
@@ -8688,11 +8651,15 @@ class AsyncModels(_api_module.BaseModule):
|
|
8688
8651
|
Args:
|
8689
8652
|
model: The model to use.
|
8690
8653
|
prompt: The text prompt for generating the videos. Optional for image to
|
8691
|
-
video and video extension use cases.
|
8654
|
+
video and video extension use cases. This argument is deprecated, please
|
8655
|
+
use source instead.
|
8692
8656
|
image: The input image for generating the videos. Optional if prompt is
|
8693
|
-
provided.
|
8657
|
+
provided. This argument is deprecated, please use source instead.
|
8694
8658
|
video: The input video for video extension use cases. Optional if prompt
|
8695
|
-
or image is provided.
|
8659
|
+
or image is provided. This argument is deprecated, please use source
|
8660
|
+
instead.
|
8661
|
+
source: The input source for generating the videos (prompt, image, and/or
|
8662
|
+
video)
|
8696
8663
|
config: Configuration for generation.
|
8697
8664
|
|
8698
8665
|
Usage:
|
@@ -8700,7 +8667,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
8700
8667
|
```
|
8701
8668
|
operation = client.models.generate_videos(
|
8702
8669
|
model="veo-2.0-generate-001",
|
8703
|
-
|
8670
|
+
source=types.GenerateVideosSource(
|
8671
|
+
prompt="A neon hologram of a cat driving at top speed",
|
8672
|
+
),
|
8704
8673
|
)
|
8705
8674
|
while not operation.done:
|
8706
8675
|
time.sleep(10)
|
@@ -8709,10 +8678,16 @@ class AsyncModels(_api_module.BaseModule):
|
|
8709
8678
|
operation.result.generated_videos[0].video.uri
|
8710
8679
|
```
|
8711
8680
|
"""
|
8681
|
+
if (prompt or image or video) and source:
|
8682
|
+
raise ValueError(
|
8683
|
+
'Source and prompt/image/video are mutually exclusive.'
|
8684
|
+
+ ' Please only use source.'
|
8685
|
+
)
|
8712
8686
|
return await self._generate_videos(
|
8713
8687
|
model=model,
|
8714
8688
|
prompt=prompt,
|
8715
8689
|
image=image,
|
8716
8690
|
video=video,
|
8691
|
+
source=source,
|
8717
8692
|
config=config,
|
8718
8693
|
)
|