google-genai 1.28.0__py3-none-any.whl → 1.29.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +139 -54
- google/genai/_automatic_function_calling_util.py +35 -7
- google/genai/_live_converters.py +14 -0
- google/genai/batches.py +19 -2
- google/genai/errors.py +5 -2
- google/genai/models.py +393 -12
- google/genai/types.py +336 -39
- google/genai/version.py +1 -1
- {google_genai-1.28.0.dist-info → google_genai-1.29.0.dist-info}/METADATA +2 -2
- {google_genai-1.28.0.dist-info → google_genai-1.29.0.dist-info}/RECORD +13 -13
- {google_genai-1.28.0.dist-info → google_genai-1.29.0.dist-info}/WHEEL +0 -0
- {google_genai-1.28.0.dist-info → google_genai-1.29.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.28.0.dist-info → google_genai-1.29.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -997,7 +997,11 @@ def _GenerateImagesConfig_to_mldev(
|
|
997
997
|
raise ValueError('add_watermark parameter is not supported in Gemini API.')
|
998
998
|
|
999
999
|
if getv(from_object, ['image_size']) is not None:
|
1000
|
-
|
1000
|
+
setv(
|
1001
|
+
parent_object,
|
1002
|
+
['parameters', 'sampleImageSize'],
|
1003
|
+
getv(from_object, ['image_size']),
|
1004
|
+
)
|
1001
1005
|
|
1002
1006
|
if getv(from_object, ['enhance_prompt']) is not None:
|
1003
1007
|
raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
|
@@ -2839,6 +2843,153 @@ def _UpscaleImageAPIParameters_to_vertex(
|
|
2839
2843
|
return to_object
|
2840
2844
|
|
2841
2845
|
|
2846
|
+
def _ProductImage_to_vertex(
|
2847
|
+
from_object: Union[dict[str, Any], object],
|
2848
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2849
|
+
) -> dict[str, Any]:
|
2850
|
+
to_object: dict[str, Any] = {}
|
2851
|
+
if getv(from_object, ['product_image']) is not None:
|
2852
|
+
setv(
|
2853
|
+
to_object,
|
2854
|
+
['image'],
|
2855
|
+
_Image_to_vertex(getv(from_object, ['product_image']), to_object),
|
2856
|
+
)
|
2857
|
+
|
2858
|
+
return to_object
|
2859
|
+
|
2860
|
+
|
2861
|
+
def _RecontextImageSource_to_vertex(
|
2862
|
+
from_object: Union[dict[str, Any], object],
|
2863
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2864
|
+
) -> dict[str, Any]:
|
2865
|
+
to_object: dict[str, Any] = {}
|
2866
|
+
if getv(from_object, ['prompt']) is not None:
|
2867
|
+
setv(
|
2868
|
+
parent_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt'])
|
2869
|
+
)
|
2870
|
+
|
2871
|
+
if getv(from_object, ['person_image']) is not None:
|
2872
|
+
setv(
|
2873
|
+
parent_object,
|
2874
|
+
['instances[0]', 'personImage', 'image'],
|
2875
|
+
_Image_to_vertex(getv(from_object, ['person_image']), to_object),
|
2876
|
+
)
|
2877
|
+
|
2878
|
+
if getv(from_object, ['product_images']) is not None:
|
2879
|
+
setv(
|
2880
|
+
parent_object,
|
2881
|
+
['instances[0]', 'productImages'],
|
2882
|
+
[
|
2883
|
+
_ProductImage_to_vertex(item, to_object)
|
2884
|
+
for item in getv(from_object, ['product_images'])
|
2885
|
+
],
|
2886
|
+
)
|
2887
|
+
|
2888
|
+
return to_object
|
2889
|
+
|
2890
|
+
|
2891
|
+
def _RecontextImageConfig_to_vertex(
|
2892
|
+
from_object: Union[dict[str, Any], object],
|
2893
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2894
|
+
) -> dict[str, Any]:
|
2895
|
+
to_object: dict[str, Any] = {}
|
2896
|
+
|
2897
|
+
if getv(from_object, ['number_of_images']) is not None:
|
2898
|
+
setv(
|
2899
|
+
parent_object,
|
2900
|
+
['parameters', 'sampleCount'],
|
2901
|
+
getv(from_object, ['number_of_images']),
|
2902
|
+
)
|
2903
|
+
|
2904
|
+
if getv(from_object, ['base_steps']) is not None:
|
2905
|
+
setv(
|
2906
|
+
parent_object,
|
2907
|
+
['parameters', 'editConfig', 'baseSteps'],
|
2908
|
+
getv(from_object, ['base_steps']),
|
2909
|
+
)
|
2910
|
+
|
2911
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
2912
|
+
setv(
|
2913
|
+
parent_object,
|
2914
|
+
['parameters', 'storageUri'],
|
2915
|
+
getv(from_object, ['output_gcs_uri']),
|
2916
|
+
)
|
2917
|
+
|
2918
|
+
if getv(from_object, ['seed']) is not None:
|
2919
|
+
setv(parent_object, ['parameters', 'seed'], getv(from_object, ['seed']))
|
2920
|
+
|
2921
|
+
if getv(from_object, ['safety_filter_level']) is not None:
|
2922
|
+
setv(
|
2923
|
+
parent_object,
|
2924
|
+
['parameters', 'safetySetting'],
|
2925
|
+
getv(from_object, ['safety_filter_level']),
|
2926
|
+
)
|
2927
|
+
|
2928
|
+
if getv(from_object, ['person_generation']) is not None:
|
2929
|
+
setv(
|
2930
|
+
parent_object,
|
2931
|
+
['parameters', 'personGeneration'],
|
2932
|
+
getv(from_object, ['person_generation']),
|
2933
|
+
)
|
2934
|
+
|
2935
|
+
if getv(from_object, ['output_mime_type']) is not None:
|
2936
|
+
setv(
|
2937
|
+
parent_object,
|
2938
|
+
['parameters', 'outputOptions', 'mimeType'],
|
2939
|
+
getv(from_object, ['output_mime_type']),
|
2940
|
+
)
|
2941
|
+
|
2942
|
+
if getv(from_object, ['output_compression_quality']) is not None:
|
2943
|
+
setv(
|
2944
|
+
parent_object,
|
2945
|
+
['parameters', 'outputOptions', 'compressionQuality'],
|
2946
|
+
getv(from_object, ['output_compression_quality']),
|
2947
|
+
)
|
2948
|
+
|
2949
|
+
if getv(from_object, ['enhance_prompt']) is not None:
|
2950
|
+
setv(
|
2951
|
+
parent_object,
|
2952
|
+
['parameters', 'enhancePrompt'],
|
2953
|
+
getv(from_object, ['enhance_prompt']),
|
2954
|
+
)
|
2955
|
+
|
2956
|
+
return to_object
|
2957
|
+
|
2958
|
+
|
2959
|
+
def _RecontextImageParameters_to_vertex(
|
2960
|
+
api_client: BaseApiClient,
|
2961
|
+
from_object: Union[dict[str, Any], object],
|
2962
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2963
|
+
) -> dict[str, Any]:
|
2964
|
+
to_object: dict[str, Any] = {}
|
2965
|
+
if getv(from_object, ['model']) is not None:
|
2966
|
+
setv(
|
2967
|
+
to_object,
|
2968
|
+
['_url', 'model'],
|
2969
|
+
t.t_model(api_client, getv(from_object, ['model'])),
|
2970
|
+
)
|
2971
|
+
|
2972
|
+
if getv(from_object, ['source']) is not None:
|
2973
|
+
setv(
|
2974
|
+
to_object,
|
2975
|
+
['config'],
|
2976
|
+
_RecontextImageSource_to_vertex(
|
2977
|
+
getv(from_object, ['source']), to_object
|
2978
|
+
),
|
2979
|
+
)
|
2980
|
+
|
2981
|
+
if getv(from_object, ['config']) is not None:
|
2982
|
+
setv(
|
2983
|
+
to_object,
|
2984
|
+
['config'],
|
2985
|
+
_RecontextImageConfig_to_vertex(
|
2986
|
+
getv(from_object, ['config']), to_object
|
2987
|
+
),
|
2988
|
+
)
|
2989
|
+
|
2990
|
+
return to_object
|
2991
|
+
|
2992
|
+
|
2842
2993
|
def _GetModelParameters_to_vertex(
|
2843
2994
|
api_client: BaseApiClient,
|
2844
2995
|
from_object: Union[dict[str, Any], object],
|
@@ -3518,6 +3669,9 @@ def _GenerateContentResponse_from_mldev(
|
|
3518
3669
|
if getv(from_object, ['promptFeedback']) is not None:
|
3519
3670
|
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
|
3520
3671
|
|
3672
|
+
if getv(from_object, ['responseId']) is not None:
|
3673
|
+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
3674
|
+
|
3521
3675
|
if getv(from_object, ['usageMetadata']) is not None:
|
3522
3676
|
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
|
3523
3677
|
|
@@ -4170,15 +4324,15 @@ def _GenerateContentResponse_from_vertex(
|
|
4170
4324
|
if getv(from_object, ['createTime']) is not None:
|
4171
4325
|
setv(to_object, ['create_time'], getv(from_object, ['createTime']))
|
4172
4326
|
|
4173
|
-
if getv(from_object, ['responseId']) is not None:
|
4174
|
-
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
4175
|
-
|
4176
4327
|
if getv(from_object, ['modelVersion']) is not None:
|
4177
4328
|
setv(to_object, ['model_version'], getv(from_object, ['modelVersion']))
|
4178
4329
|
|
4179
4330
|
if getv(from_object, ['promptFeedback']) is not None:
|
4180
4331
|
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
|
4181
4332
|
|
4333
|
+
if getv(from_object, ['responseId']) is not None:
|
4334
|
+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
4335
|
+
|
4182
4336
|
if getv(from_object, ['usageMetadata']) is not None:
|
4183
4337
|
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
|
4184
4338
|
|
@@ -4420,6 +4574,24 @@ def _UpscaleImageResponse_from_vertex(
|
|
4420
4574
|
return to_object
|
4421
4575
|
|
4422
4576
|
|
4577
|
+
def _RecontextImageResponse_from_vertex(
|
4578
|
+
from_object: Union[dict[str, Any], object],
|
4579
|
+
parent_object: Optional[dict[str, Any]] = None,
|
4580
|
+
) -> dict[str, Any]:
|
4581
|
+
to_object: dict[str, Any] = {}
|
4582
|
+
if getv(from_object, ['predictions']) is not None:
|
4583
|
+
setv(
|
4584
|
+
to_object,
|
4585
|
+
['generated_images'],
|
4586
|
+
[
|
4587
|
+
_GeneratedImage_from_vertex(item, to_object)
|
4588
|
+
for item in getv(from_object, ['predictions'])
|
4589
|
+
],
|
4590
|
+
)
|
4591
|
+
|
4592
|
+
return to_object
|
4593
|
+
|
4594
|
+
|
4423
4595
|
def _Endpoint_from_vertex(
|
4424
4596
|
from_object: Union[dict[str, Any], object],
|
4425
4597
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -5207,6 +5379,112 @@ class Models(_api_module.BaseModule):
|
|
5207
5379
|
self._api_client._verify_response(return_value)
|
5208
5380
|
return return_value
|
5209
5381
|
|
5382
|
+
def recontext_image(
|
5383
|
+
self,
|
5384
|
+
*,
|
5385
|
+
model: str,
|
5386
|
+
source: types.RecontextImageSourceOrDict,
|
5387
|
+
config: Optional[types.RecontextImageConfigOrDict] = None,
|
5388
|
+
) -> types.RecontextImageResponse:
|
5389
|
+
"""Recontextualizes an image.
|
5390
|
+
|
5391
|
+
There are two types of recontextualization currently supported:
|
5392
|
+
1) Imagen Product Recontext - Generate images of products in new scenes
|
5393
|
+
and contexts.
|
5394
|
+
2) Virtual Try-On: Generate images of persons modeling fashion products.
|
5395
|
+
|
5396
|
+
Args:
|
5397
|
+
model (str): The model to use.
|
5398
|
+
source (RecontextImageSource): An object containing the source inputs
|
5399
|
+
(prompt, person_image, product_images) for image recontext. prompt is
|
5400
|
+
optional for product recontext and disallowed for virtual try-on.
|
5401
|
+
person_image is required for virtual try-on, disallowed for product
|
5402
|
+
recontext. product_images is required for both product recontext and
|
5403
|
+
virtual try-on. Only one product image is supported for virtual try-on,
|
5404
|
+
and up to 3 product images (different angles of the same product) are
|
5405
|
+
supported for product recontext.
|
5406
|
+
config (RecontextImageConfig): Configuration for recontextualization.
|
5407
|
+
|
5408
|
+
Usage:
|
5409
|
+
|
5410
|
+
```
|
5411
|
+
product_recontext_response = client.models.recontext_image(
|
5412
|
+
model="imagen-product-recontext-preview-06-30",
|
5413
|
+
source=types.RecontextImageSource(
|
5414
|
+
prompt="In a modern kitchen setting.",
|
5415
|
+
product_images=[types.ProductImage.from_file(IMAGE_FILE_PATH)],
|
5416
|
+
),
|
5417
|
+
config=types.RecontextImageConfig(
|
5418
|
+
number_of_images=1,
|
5419
|
+
),
|
5420
|
+
)
|
5421
|
+
image = product_recontext_response.generated_images[0].image
|
5422
|
+
|
5423
|
+
virtual_try_on_response = client.models.recontext_image(
|
5424
|
+
model="virtual-try-on-preview-08-04",
|
5425
|
+
source=types.RecontextImageSource(
|
5426
|
+
person_image=types.Image.from_file(IMAGE1_FILE_PATH),
|
5427
|
+
product_images=[types.ProductImage.from_file(IMAGE2_FILE_PATH)],
|
5428
|
+
),
|
5429
|
+
config=types.RecontextImageConfig(
|
5430
|
+
number_of_images=1,
|
5431
|
+
),
|
5432
|
+
)
|
5433
|
+
image = virtual_try_on_response.generated_images[0].image
|
5434
|
+
```
|
5435
|
+
"""
|
5436
|
+
|
5437
|
+
parameter_model = types._RecontextImageParameters(
|
5438
|
+
model=model,
|
5439
|
+
source=source,
|
5440
|
+
config=config,
|
5441
|
+
)
|
5442
|
+
|
5443
|
+
request_url_dict: Optional[dict[str, str]]
|
5444
|
+
if not self._api_client.vertexai:
|
5445
|
+
raise ValueError('This method is only supported in the Vertex AI client.')
|
5446
|
+
else:
|
5447
|
+
request_dict = _RecontextImageParameters_to_vertex(
|
5448
|
+
self._api_client, parameter_model
|
5449
|
+
)
|
5450
|
+
request_url_dict = request_dict.get('_url')
|
5451
|
+
if request_url_dict:
|
5452
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
5453
|
+
else:
|
5454
|
+
path = '{model}:predict'
|
5455
|
+
|
5456
|
+
query_params = request_dict.get('_query')
|
5457
|
+
if query_params:
|
5458
|
+
path = f'{path}?{urlencode(query_params)}'
|
5459
|
+
# TODO: remove the hack that pops config.
|
5460
|
+
request_dict.pop('config', None)
|
5461
|
+
|
5462
|
+
http_options: Optional[types.HttpOptions] = None
|
5463
|
+
if (
|
5464
|
+
parameter_model.config is not None
|
5465
|
+
and parameter_model.config.http_options is not None
|
5466
|
+
):
|
5467
|
+
http_options = parameter_model.config.http_options
|
5468
|
+
|
5469
|
+
request_dict = _common.convert_to_dict(request_dict)
|
5470
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
5471
|
+
|
5472
|
+
response = self._api_client.request(
|
5473
|
+
'post', path, request_dict, http_options
|
5474
|
+
)
|
5475
|
+
|
5476
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
5477
|
+
|
5478
|
+
if self._api_client.vertexai:
|
5479
|
+
response_dict = _RecontextImageResponse_from_vertex(response_dict)
|
5480
|
+
|
5481
|
+
return_value = types.RecontextImageResponse._from_response(
|
5482
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5483
|
+
)
|
5484
|
+
|
5485
|
+
self._api_client._verify_response(return_value)
|
5486
|
+
return return_value
|
5487
|
+
|
5210
5488
|
def get(
|
5211
5489
|
self, *, model: str, config: Optional[types.GetModelConfigOrDict] = None
|
5212
5490
|
) -> types.Model:
|
@@ -5746,7 +6024,7 @@ class Models(_api_module.BaseModule):
|
|
5746
6024
|
self,
|
5747
6025
|
*,
|
5748
6026
|
model: str,
|
5749
|
-
contents:
|
6027
|
+
contents: types.ContentListUnionDict,
|
5750
6028
|
config: Optional[types.GenerateContentConfigOrDict] = None,
|
5751
6029
|
) -> types.GenerateContentResponse:
|
5752
6030
|
"""Makes an API request to generate content using a model.
|
@@ -5757,9 +6035,8 @@ class Models(_api_module.BaseModule):
|
|
5757
6035
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
5758
6036
|
- The partial resource name with 'publishers/', for example:
|
5759
6037
|
'publishers/google/models/gemini-2.0-flash' or
|
5760
|
-
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5761
6038
|
- `/` separated publisher and model name, for example:
|
5762
|
-
'google/gemini-2.0-flash'
|
6039
|
+
'google/gemini-2.0-flash'
|
5763
6040
|
|
5764
6041
|
For the `model` parameter, supported formats for Gemini API include:
|
5765
6042
|
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
@@ -5882,7 +6159,7 @@ class Models(_api_module.BaseModule):
|
|
5882
6159
|
self,
|
5883
6160
|
*,
|
5884
6161
|
model: str,
|
5885
|
-
contents:
|
6162
|
+
contents: types.ContentListUnionDict,
|
5886
6163
|
config: Optional[types.GenerateContentConfigOrDict] = None,
|
5887
6164
|
) -> Iterator[types.GenerateContentResponse]:
|
5888
6165
|
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
@@ -5893,9 +6170,8 @@ class Models(_api_module.BaseModule):
|
|
5893
6170
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
5894
6171
|
- The partial resource name with 'publishers/', for example:
|
5895
6172
|
'publishers/google/models/gemini-2.0-flash' or
|
5896
|
-
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5897
6173
|
- `/` separated publisher and model name, for example:
|
5898
|
-
'google/gemini-2.0-flash'
|
6174
|
+
'google/gemini-2.0-flash'
|
5899
6175
|
|
5900
6176
|
For the `model` parameter, supported formats for Gemini API include:
|
5901
6177
|
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
@@ -6832,6 +7108,112 @@ class AsyncModels(_api_module.BaseModule):
|
|
6832
7108
|
self._api_client._verify_response(return_value)
|
6833
7109
|
return return_value
|
6834
7110
|
|
7111
|
+
async def recontext_image(
|
7112
|
+
self,
|
7113
|
+
*,
|
7114
|
+
model: str,
|
7115
|
+
source: types.RecontextImageSourceOrDict,
|
7116
|
+
config: Optional[types.RecontextImageConfigOrDict] = None,
|
7117
|
+
) -> types.RecontextImageResponse:
|
7118
|
+
"""Recontextualizes an image.
|
7119
|
+
|
7120
|
+
There are two types of recontextualization currently supported:
|
7121
|
+
1) Imagen Product Recontext - Generate images of products in new scenes
|
7122
|
+
and contexts.
|
7123
|
+
2) Virtual Try-On: Generate images of persons modeling fashion products.
|
7124
|
+
|
7125
|
+
Args:
|
7126
|
+
model (str): The model to use.
|
7127
|
+
source (RecontextImageSource): An object containing the source inputs
|
7128
|
+
(prompt, person_image, product_images) for image recontext. prompt is
|
7129
|
+
optional for product recontext and disallowed for virtual try-on.
|
7130
|
+
person_image is required for virtual try-on, disallowed for product
|
7131
|
+
recontext. product_images is required for both product recontext and
|
7132
|
+
virtual try-on. Only one product image is supported for virtual try-on,
|
7133
|
+
and up to 3 product images (different angles of the same product) are
|
7134
|
+
supported for product recontext.
|
7135
|
+
config (RecontextImageConfig): Configuration for recontextualization.
|
7136
|
+
|
7137
|
+
Usage:
|
7138
|
+
|
7139
|
+
```
|
7140
|
+
product_recontext_response = client.models.recontext_image(
|
7141
|
+
model="imagen-product-recontext-preview-06-30",
|
7142
|
+
source=types.RecontextImageSource(
|
7143
|
+
prompt="In a modern kitchen setting.",
|
7144
|
+
product_images=[types.ProductImage.from_file(IMAGE_FILE_PATH)],
|
7145
|
+
),
|
7146
|
+
config=types.RecontextImageConfig(
|
7147
|
+
number_of_images=1,
|
7148
|
+
),
|
7149
|
+
)
|
7150
|
+
image = product_recontext_response.generated_images[0].image
|
7151
|
+
|
7152
|
+
virtual_try_on_response = client.models.recontext_image(
|
7153
|
+
model="virtual-try-on-preview-08-04",
|
7154
|
+
source=types.RecontextImageSource(
|
7155
|
+
person_image=types.Image.from_file(IMAGE1_FILE_PATH),
|
7156
|
+
product_images=[types.ProductImage.from_file(IMAGE2_FILE_PATH)],
|
7157
|
+
),
|
7158
|
+
config=types.RecontextImageConfig(
|
7159
|
+
number_of_images=1,
|
7160
|
+
),
|
7161
|
+
)
|
7162
|
+
image = virtual_try_on_response.generated_images[0].image
|
7163
|
+
```
|
7164
|
+
"""
|
7165
|
+
|
7166
|
+
parameter_model = types._RecontextImageParameters(
|
7167
|
+
model=model,
|
7168
|
+
source=source,
|
7169
|
+
config=config,
|
7170
|
+
)
|
7171
|
+
|
7172
|
+
request_url_dict: Optional[dict[str, str]]
|
7173
|
+
if not self._api_client.vertexai:
|
7174
|
+
raise ValueError('This method is only supported in the Vertex AI client.')
|
7175
|
+
else:
|
7176
|
+
request_dict = _RecontextImageParameters_to_vertex(
|
7177
|
+
self._api_client, parameter_model
|
7178
|
+
)
|
7179
|
+
request_url_dict = request_dict.get('_url')
|
7180
|
+
if request_url_dict:
|
7181
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
7182
|
+
else:
|
7183
|
+
path = '{model}:predict'
|
7184
|
+
|
7185
|
+
query_params = request_dict.get('_query')
|
7186
|
+
if query_params:
|
7187
|
+
path = f'{path}?{urlencode(query_params)}'
|
7188
|
+
# TODO: remove the hack that pops config.
|
7189
|
+
request_dict.pop('config', None)
|
7190
|
+
|
7191
|
+
http_options: Optional[types.HttpOptions] = None
|
7192
|
+
if (
|
7193
|
+
parameter_model.config is not None
|
7194
|
+
and parameter_model.config.http_options is not None
|
7195
|
+
):
|
7196
|
+
http_options = parameter_model.config.http_options
|
7197
|
+
|
7198
|
+
request_dict = _common.convert_to_dict(request_dict)
|
7199
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
7200
|
+
|
7201
|
+
response = await self._api_client.async_request(
|
7202
|
+
'post', path, request_dict, http_options
|
7203
|
+
)
|
7204
|
+
|
7205
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
7206
|
+
|
7207
|
+
if self._api_client.vertexai:
|
7208
|
+
response_dict = _RecontextImageResponse_from_vertex(response_dict)
|
7209
|
+
|
7210
|
+
return_value = types.RecontextImageResponse._from_response(
|
7211
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
7212
|
+
)
|
7213
|
+
|
7214
|
+
self._api_client._verify_response(return_value)
|
7215
|
+
return return_value
|
7216
|
+
|
6835
7217
|
async def get(
|
6836
7218
|
self, *, model: str, config: Optional[types.GetModelConfigOrDict] = None
|
6837
7219
|
) -> types.Model:
|
@@ -7490,9 +7872,8 @@ class AsyncModels(_api_module.BaseModule):
|
|
7490
7872
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
7491
7873
|
- The partial resource name with 'publishers/', for example:
|
7492
7874
|
'publishers/google/models/gemini-2.0-flash' or
|
7493
|
-
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
7494
7875
|
- `/` separated publisher and model name, for example:
|
7495
|
-
'google/gemini-2.0-flash'
|
7876
|
+
'google/gemini-2.0-flash'
|
7496
7877
|
|
7497
7878
|
For the `model` parameter, supported formats for Gemini API include:
|
7498
7879
|
- The Gemini model ID, for example: 'gemini-2.0-flash'
|