google-genai 1.27.0__py3-none-any.whl → 1.29.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +1 -0
- google/genai/_api_client.py +281 -92
- google/genai/_automatic_function_calling_util.py +35 -7
- google/genai/_common.py +9 -6
- google/genai/_extra_utils.py +8 -8
- google/genai/_live_converters.py +14 -0
- google/genai/_mcp_utils.py +4 -1
- google/genai/_replay_api_client.py +6 -2
- google/genai/_transformers.py +13 -12
- google/genai/batches.py +19 -2
- google/genai/errors.py +6 -3
- google/genai/live.py +2 -3
- google/genai/models.py +472 -22
- google/genai/pagers.py +5 -5
- google/genai/tokens.py +3 -3
- google/genai/tunings.py +33 -6
- google/genai/types.py +395 -39
- google/genai/version.py +1 -1
- {google_genai-1.27.0.dist-info → google_genai-1.29.0.dist-info}/METADATA +2 -2
- google_genai-1.29.0.dist-info/RECORD +35 -0
- google_genai-1.27.0.dist-info/RECORD +0 -35
- {google_genai-1.27.0.dist-info → google_genai-1.29.0.dist-info}/WHEEL +0 -0
- {google_genai-1.27.0.dist-info → google_genai-1.29.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.27.0.dist-info → google_genai-1.29.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -997,7 +997,11 @@ def _GenerateImagesConfig_to_mldev(
|
|
997
997
|
raise ValueError('add_watermark parameter is not supported in Gemini API.')
|
998
998
|
|
999
999
|
if getv(from_object, ['image_size']) is not None:
|
1000
|
-
|
1000
|
+
setv(
|
1001
|
+
parent_object,
|
1002
|
+
['parameters', 'sampleImageSize'],
|
1003
|
+
getv(from_object, ['image_size']),
|
1004
|
+
)
|
1001
1005
|
|
1002
1006
|
if getv(from_object, ['enhance_prompt']) is not None:
|
1003
1007
|
raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
|
@@ -2839,6 +2843,153 @@ def _UpscaleImageAPIParameters_to_vertex(
|
|
2839
2843
|
return to_object
|
2840
2844
|
|
2841
2845
|
|
2846
|
+
def _ProductImage_to_vertex(
|
2847
|
+
from_object: Union[dict[str, Any], object],
|
2848
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2849
|
+
) -> dict[str, Any]:
|
2850
|
+
to_object: dict[str, Any] = {}
|
2851
|
+
if getv(from_object, ['product_image']) is not None:
|
2852
|
+
setv(
|
2853
|
+
to_object,
|
2854
|
+
['image'],
|
2855
|
+
_Image_to_vertex(getv(from_object, ['product_image']), to_object),
|
2856
|
+
)
|
2857
|
+
|
2858
|
+
return to_object
|
2859
|
+
|
2860
|
+
|
2861
|
+
def _RecontextImageSource_to_vertex(
|
2862
|
+
from_object: Union[dict[str, Any], object],
|
2863
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2864
|
+
) -> dict[str, Any]:
|
2865
|
+
to_object: dict[str, Any] = {}
|
2866
|
+
if getv(from_object, ['prompt']) is not None:
|
2867
|
+
setv(
|
2868
|
+
parent_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt'])
|
2869
|
+
)
|
2870
|
+
|
2871
|
+
if getv(from_object, ['person_image']) is not None:
|
2872
|
+
setv(
|
2873
|
+
parent_object,
|
2874
|
+
['instances[0]', 'personImage', 'image'],
|
2875
|
+
_Image_to_vertex(getv(from_object, ['person_image']), to_object),
|
2876
|
+
)
|
2877
|
+
|
2878
|
+
if getv(from_object, ['product_images']) is not None:
|
2879
|
+
setv(
|
2880
|
+
parent_object,
|
2881
|
+
['instances[0]', 'productImages'],
|
2882
|
+
[
|
2883
|
+
_ProductImage_to_vertex(item, to_object)
|
2884
|
+
for item in getv(from_object, ['product_images'])
|
2885
|
+
],
|
2886
|
+
)
|
2887
|
+
|
2888
|
+
return to_object
|
2889
|
+
|
2890
|
+
|
2891
|
+
def _RecontextImageConfig_to_vertex(
|
2892
|
+
from_object: Union[dict[str, Any], object],
|
2893
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2894
|
+
) -> dict[str, Any]:
|
2895
|
+
to_object: dict[str, Any] = {}
|
2896
|
+
|
2897
|
+
if getv(from_object, ['number_of_images']) is not None:
|
2898
|
+
setv(
|
2899
|
+
parent_object,
|
2900
|
+
['parameters', 'sampleCount'],
|
2901
|
+
getv(from_object, ['number_of_images']),
|
2902
|
+
)
|
2903
|
+
|
2904
|
+
if getv(from_object, ['base_steps']) is not None:
|
2905
|
+
setv(
|
2906
|
+
parent_object,
|
2907
|
+
['parameters', 'editConfig', 'baseSteps'],
|
2908
|
+
getv(from_object, ['base_steps']),
|
2909
|
+
)
|
2910
|
+
|
2911
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
2912
|
+
setv(
|
2913
|
+
parent_object,
|
2914
|
+
['parameters', 'storageUri'],
|
2915
|
+
getv(from_object, ['output_gcs_uri']),
|
2916
|
+
)
|
2917
|
+
|
2918
|
+
if getv(from_object, ['seed']) is not None:
|
2919
|
+
setv(parent_object, ['parameters', 'seed'], getv(from_object, ['seed']))
|
2920
|
+
|
2921
|
+
if getv(from_object, ['safety_filter_level']) is not None:
|
2922
|
+
setv(
|
2923
|
+
parent_object,
|
2924
|
+
['parameters', 'safetySetting'],
|
2925
|
+
getv(from_object, ['safety_filter_level']),
|
2926
|
+
)
|
2927
|
+
|
2928
|
+
if getv(from_object, ['person_generation']) is not None:
|
2929
|
+
setv(
|
2930
|
+
parent_object,
|
2931
|
+
['parameters', 'personGeneration'],
|
2932
|
+
getv(from_object, ['person_generation']),
|
2933
|
+
)
|
2934
|
+
|
2935
|
+
if getv(from_object, ['output_mime_type']) is not None:
|
2936
|
+
setv(
|
2937
|
+
parent_object,
|
2938
|
+
['parameters', 'outputOptions', 'mimeType'],
|
2939
|
+
getv(from_object, ['output_mime_type']),
|
2940
|
+
)
|
2941
|
+
|
2942
|
+
if getv(from_object, ['output_compression_quality']) is not None:
|
2943
|
+
setv(
|
2944
|
+
parent_object,
|
2945
|
+
['parameters', 'outputOptions', 'compressionQuality'],
|
2946
|
+
getv(from_object, ['output_compression_quality']),
|
2947
|
+
)
|
2948
|
+
|
2949
|
+
if getv(from_object, ['enhance_prompt']) is not None:
|
2950
|
+
setv(
|
2951
|
+
parent_object,
|
2952
|
+
['parameters', 'enhancePrompt'],
|
2953
|
+
getv(from_object, ['enhance_prompt']),
|
2954
|
+
)
|
2955
|
+
|
2956
|
+
return to_object
|
2957
|
+
|
2958
|
+
|
2959
|
+
def _RecontextImageParameters_to_vertex(
|
2960
|
+
api_client: BaseApiClient,
|
2961
|
+
from_object: Union[dict[str, Any], object],
|
2962
|
+
parent_object: Optional[dict[str, Any]] = None,
|
2963
|
+
) -> dict[str, Any]:
|
2964
|
+
to_object: dict[str, Any] = {}
|
2965
|
+
if getv(from_object, ['model']) is not None:
|
2966
|
+
setv(
|
2967
|
+
to_object,
|
2968
|
+
['_url', 'model'],
|
2969
|
+
t.t_model(api_client, getv(from_object, ['model'])),
|
2970
|
+
)
|
2971
|
+
|
2972
|
+
if getv(from_object, ['source']) is not None:
|
2973
|
+
setv(
|
2974
|
+
to_object,
|
2975
|
+
['config'],
|
2976
|
+
_RecontextImageSource_to_vertex(
|
2977
|
+
getv(from_object, ['source']), to_object
|
2978
|
+
),
|
2979
|
+
)
|
2980
|
+
|
2981
|
+
if getv(from_object, ['config']) is not None:
|
2982
|
+
setv(
|
2983
|
+
to_object,
|
2984
|
+
['config'],
|
2985
|
+
_RecontextImageConfig_to_vertex(
|
2986
|
+
getv(from_object, ['config']), to_object
|
2987
|
+
),
|
2988
|
+
)
|
2989
|
+
|
2990
|
+
return to_object
|
2991
|
+
|
2992
|
+
|
2842
2993
|
def _GetModelParameters_to_vertex(
|
2843
2994
|
api_client: BaseApiClient,
|
2844
2995
|
from_object: Union[dict[str, Any], object],
|
@@ -3518,6 +3669,9 @@ def _GenerateContentResponse_from_mldev(
|
|
3518
3669
|
if getv(from_object, ['promptFeedback']) is not None:
|
3519
3670
|
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
|
3520
3671
|
|
3672
|
+
if getv(from_object, ['responseId']) is not None:
|
3673
|
+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
3674
|
+
|
3521
3675
|
if getv(from_object, ['usageMetadata']) is not None:
|
3522
3676
|
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
|
3523
3677
|
|
@@ -3549,6 +3703,11 @@ def _EmbedContentResponse_from_mldev(
|
|
3549
3703
|
parent_object: Optional[dict[str, Any]] = None,
|
3550
3704
|
) -> dict[str, Any]:
|
3551
3705
|
to_object: dict[str, Any] = {}
|
3706
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
3707
|
+
setv(
|
3708
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
3709
|
+
)
|
3710
|
+
|
3552
3711
|
if getv(from_object, ['embeddings']) is not None:
|
3553
3712
|
setv(
|
3554
3713
|
to_object,
|
@@ -3647,6 +3806,11 @@ def _GenerateImagesResponse_from_mldev(
|
|
3647
3806
|
parent_object: Optional[dict[str, Any]] = None,
|
3648
3807
|
) -> dict[str, Any]:
|
3649
3808
|
to_object: dict[str, Any] = {}
|
3809
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
3810
|
+
setv(
|
3811
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
3812
|
+
)
|
3813
|
+
|
3650
3814
|
if getv(from_object, ['predictions']) is not None:
|
3651
3815
|
setv(
|
3652
3816
|
to_object,
|
@@ -3772,6 +3936,11 @@ def _CountTokensResponse_from_mldev(
|
|
3772
3936
|
parent_object: Optional[dict[str, Any]] = None,
|
3773
3937
|
) -> dict[str, Any]:
|
3774
3938
|
to_object: dict[str, Any] = {}
|
3939
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
3940
|
+
setv(
|
3941
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
3942
|
+
)
|
3943
|
+
|
3775
3944
|
if getv(from_object, ['totalTokens']) is not None:
|
3776
3945
|
setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens']))
|
3777
3946
|
|
@@ -4155,15 +4324,15 @@ def _GenerateContentResponse_from_vertex(
|
|
4155
4324
|
if getv(from_object, ['createTime']) is not None:
|
4156
4325
|
setv(to_object, ['create_time'], getv(from_object, ['createTime']))
|
4157
4326
|
|
4158
|
-
if getv(from_object, ['responseId']) is not None:
|
4159
|
-
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
4160
|
-
|
4161
4327
|
if getv(from_object, ['modelVersion']) is not None:
|
4162
4328
|
setv(to_object, ['model_version'], getv(from_object, ['modelVersion']))
|
4163
4329
|
|
4164
4330
|
if getv(from_object, ['promptFeedback']) is not None:
|
4165
4331
|
setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
|
4166
4332
|
|
4333
|
+
if getv(from_object, ['responseId']) is not None:
|
4334
|
+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
4335
|
+
|
4167
4336
|
if getv(from_object, ['usageMetadata']) is not None:
|
4168
4337
|
setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
|
4169
4338
|
|
@@ -4224,6 +4393,11 @@ def _EmbedContentResponse_from_vertex(
|
|
4224
4393
|
parent_object: Optional[dict[str, Any]] = None,
|
4225
4394
|
) -> dict[str, Any]:
|
4226
4395
|
to_object: dict[str, Any] = {}
|
4396
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4397
|
+
setv(
|
4398
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4399
|
+
)
|
4400
|
+
|
4227
4401
|
if getv(from_object, ['predictions[]', 'embeddings']) is not None:
|
4228
4402
|
setv(
|
4229
4403
|
to_object,
|
@@ -4327,6 +4501,11 @@ def _GenerateImagesResponse_from_vertex(
|
|
4327
4501
|
parent_object: Optional[dict[str, Any]] = None,
|
4328
4502
|
) -> dict[str, Any]:
|
4329
4503
|
to_object: dict[str, Any] = {}
|
4504
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4505
|
+
setv(
|
4506
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4507
|
+
)
|
4508
|
+
|
4330
4509
|
if getv(from_object, ['predictions']) is not None:
|
4331
4510
|
setv(
|
4332
4511
|
to_object,
|
@@ -4354,6 +4533,11 @@ def _EditImageResponse_from_vertex(
|
|
4354
4533
|
parent_object: Optional[dict[str, Any]] = None,
|
4355
4534
|
) -> dict[str, Any]:
|
4356
4535
|
to_object: dict[str, Any] = {}
|
4536
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4537
|
+
setv(
|
4538
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4539
|
+
)
|
4540
|
+
|
4357
4541
|
if getv(from_object, ['predictions']) is not None:
|
4358
4542
|
setv(
|
4359
4543
|
to_object,
|
@@ -4370,6 +4554,29 @@ def _EditImageResponse_from_vertex(
|
|
4370
4554
|
def _UpscaleImageResponse_from_vertex(
|
4371
4555
|
from_object: Union[dict[str, Any], object],
|
4372
4556
|
parent_object: Optional[dict[str, Any]] = None,
|
4557
|
+
) -> dict[str, Any]:
|
4558
|
+
to_object: dict[str, Any] = {}
|
4559
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4560
|
+
setv(
|
4561
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4562
|
+
)
|
4563
|
+
|
4564
|
+
if getv(from_object, ['predictions']) is not None:
|
4565
|
+
setv(
|
4566
|
+
to_object,
|
4567
|
+
['generated_images'],
|
4568
|
+
[
|
4569
|
+
_GeneratedImage_from_vertex(item, to_object)
|
4570
|
+
for item in getv(from_object, ['predictions'])
|
4571
|
+
],
|
4572
|
+
)
|
4573
|
+
|
4574
|
+
return to_object
|
4575
|
+
|
4576
|
+
|
4577
|
+
def _RecontextImageResponse_from_vertex(
|
4578
|
+
from_object: Union[dict[str, Any], object],
|
4579
|
+
parent_object: Optional[dict[str, Any]] = None,
|
4373
4580
|
) -> dict[str, Any]:
|
4374
4581
|
to_object: dict[str, Any] = {}
|
4375
4582
|
if getv(from_object, ['predictions']) is not None:
|
@@ -4539,6 +4746,11 @@ def _CountTokensResponse_from_vertex(
|
|
4539
4746
|
parent_object: Optional[dict[str, Any]] = None,
|
4540
4747
|
) -> dict[str, Any]:
|
4541
4748
|
to_object: dict[str, Any] = {}
|
4749
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4750
|
+
setv(
|
4751
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4752
|
+
)
|
4753
|
+
|
4542
4754
|
if getv(from_object, ['totalTokens']) is not None:
|
4543
4755
|
setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens']))
|
4544
4756
|
|
@@ -4550,6 +4762,11 @@ def _ComputeTokensResponse_from_vertex(
|
|
4550
4762
|
parent_object: Optional[dict[str, Any]] = None,
|
4551
4763
|
) -> dict[str, Any]:
|
4552
4764
|
to_object: dict[str, Any] = {}
|
4765
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
4766
|
+
setv(
|
4767
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
4768
|
+
)
|
4769
|
+
|
4553
4770
|
if getv(from_object, ['tokensInfo']) is not None:
|
4554
4771
|
setv(to_object, ['tokens_info'], getv(from_object, ['tokensInfo']))
|
4555
4772
|
|
@@ -4903,7 +5120,9 @@ class Models(_api_module.BaseModule):
|
|
4903
5120
|
return_value = types.EmbedContentResponse._from_response(
|
4904
5121
|
response=response_dict, kwargs=parameter_model.model_dump()
|
4905
5122
|
)
|
4906
|
-
|
5123
|
+
return_value.sdk_http_response = types.HttpResponse(
|
5124
|
+
headers=response.headers
|
5125
|
+
)
|
4907
5126
|
self._api_client._verify_response(return_value)
|
4908
5127
|
return return_value
|
4909
5128
|
|
@@ -4979,7 +5198,9 @@ class Models(_api_module.BaseModule):
|
|
4979
5198
|
return_value = types.GenerateImagesResponse._from_response(
|
4980
5199
|
response=response_dict, kwargs=parameter_model.model_dump()
|
4981
5200
|
)
|
4982
|
-
|
5201
|
+
return_value.sdk_http_response = types.HttpResponse(
|
5202
|
+
headers=response.headers
|
5203
|
+
)
|
4983
5204
|
self._api_client._verify_response(return_value)
|
4984
5205
|
return return_value
|
4985
5206
|
|
@@ -5081,7 +5302,9 @@ class Models(_api_module.BaseModule):
|
|
5081
5302
|
return_value = types.EditImageResponse._from_response(
|
5082
5303
|
response=response_dict, kwargs=parameter_model.model_dump()
|
5083
5304
|
)
|
5084
|
-
|
5305
|
+
return_value.sdk_http_response = types.HttpResponse(
|
5306
|
+
headers=response.headers
|
5307
|
+
)
|
5085
5308
|
self._api_client._verify_response(return_value)
|
5086
5309
|
return return_value
|
5087
5310
|
|
@@ -5150,6 +5373,114 @@ class Models(_api_module.BaseModule):
|
|
5150
5373
|
return_value = types.UpscaleImageResponse._from_response(
|
5151
5374
|
response=response_dict, kwargs=parameter_model.model_dump()
|
5152
5375
|
)
|
5376
|
+
return_value.sdk_http_response = types.HttpResponse(
|
5377
|
+
headers=response.headers
|
5378
|
+
)
|
5379
|
+
self._api_client._verify_response(return_value)
|
5380
|
+
return return_value
|
5381
|
+
|
5382
|
+
def recontext_image(
|
5383
|
+
self,
|
5384
|
+
*,
|
5385
|
+
model: str,
|
5386
|
+
source: types.RecontextImageSourceOrDict,
|
5387
|
+
config: Optional[types.RecontextImageConfigOrDict] = None,
|
5388
|
+
) -> types.RecontextImageResponse:
|
5389
|
+
"""Recontextualizes an image.
|
5390
|
+
|
5391
|
+
There are two types of recontextualization currently supported:
|
5392
|
+
1) Imagen Product Recontext - Generate images of products in new scenes
|
5393
|
+
and contexts.
|
5394
|
+
2) Virtual Try-On: Generate images of persons modeling fashion products.
|
5395
|
+
|
5396
|
+
Args:
|
5397
|
+
model (str): The model to use.
|
5398
|
+
source (RecontextImageSource): An object containing the source inputs
|
5399
|
+
(prompt, person_image, product_images) for image recontext. prompt is
|
5400
|
+
optional for product recontext and disallowed for virtual try-on.
|
5401
|
+
person_image is required for virtual try-on, disallowed for product
|
5402
|
+
recontext. product_images is required for both product recontext and
|
5403
|
+
virtual try-on. Only one product image is supported for virtual try-on,
|
5404
|
+
and up to 3 product images (different angles of the same product) are
|
5405
|
+
supported for product recontext.
|
5406
|
+
config (RecontextImageConfig): Configuration for recontextualization.
|
5407
|
+
|
5408
|
+
Usage:
|
5409
|
+
|
5410
|
+
```
|
5411
|
+
product_recontext_response = client.models.recontext_image(
|
5412
|
+
model="imagen-product-recontext-preview-06-30",
|
5413
|
+
source=types.RecontextImageSource(
|
5414
|
+
prompt="In a modern kitchen setting.",
|
5415
|
+
product_images=[types.ProductImage.from_file(IMAGE_FILE_PATH)],
|
5416
|
+
),
|
5417
|
+
config=types.RecontextImageConfig(
|
5418
|
+
number_of_images=1,
|
5419
|
+
),
|
5420
|
+
)
|
5421
|
+
image = product_recontext_response.generated_images[0].image
|
5422
|
+
|
5423
|
+
virtual_try_on_response = client.models.recontext_image(
|
5424
|
+
model="virtual-try-on-preview-08-04",
|
5425
|
+
source=types.RecontextImageSource(
|
5426
|
+
person_image=types.Image.from_file(IMAGE1_FILE_PATH),
|
5427
|
+
product_images=[types.ProductImage.from_file(IMAGE2_FILE_PATH)],
|
5428
|
+
),
|
5429
|
+
config=types.RecontextImageConfig(
|
5430
|
+
number_of_images=1,
|
5431
|
+
),
|
5432
|
+
)
|
5433
|
+
image = virtual_try_on_response.generated_images[0].image
|
5434
|
+
```
|
5435
|
+
"""
|
5436
|
+
|
5437
|
+
parameter_model = types._RecontextImageParameters(
|
5438
|
+
model=model,
|
5439
|
+
source=source,
|
5440
|
+
config=config,
|
5441
|
+
)
|
5442
|
+
|
5443
|
+
request_url_dict: Optional[dict[str, str]]
|
5444
|
+
if not self._api_client.vertexai:
|
5445
|
+
raise ValueError('This method is only supported in the Vertex AI client.')
|
5446
|
+
else:
|
5447
|
+
request_dict = _RecontextImageParameters_to_vertex(
|
5448
|
+
self._api_client, parameter_model
|
5449
|
+
)
|
5450
|
+
request_url_dict = request_dict.get('_url')
|
5451
|
+
if request_url_dict:
|
5452
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
5453
|
+
else:
|
5454
|
+
path = '{model}:predict'
|
5455
|
+
|
5456
|
+
query_params = request_dict.get('_query')
|
5457
|
+
if query_params:
|
5458
|
+
path = f'{path}?{urlencode(query_params)}'
|
5459
|
+
# TODO: remove the hack that pops config.
|
5460
|
+
request_dict.pop('config', None)
|
5461
|
+
|
5462
|
+
http_options: Optional[types.HttpOptions] = None
|
5463
|
+
if (
|
5464
|
+
parameter_model.config is not None
|
5465
|
+
and parameter_model.config.http_options is not None
|
5466
|
+
):
|
5467
|
+
http_options = parameter_model.config.http_options
|
5468
|
+
|
5469
|
+
request_dict = _common.convert_to_dict(request_dict)
|
5470
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
5471
|
+
|
5472
|
+
response = self._api_client.request(
|
5473
|
+
'post', path, request_dict, http_options
|
5474
|
+
)
|
5475
|
+
|
5476
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
5477
|
+
|
5478
|
+
if self._api_client.vertexai:
|
5479
|
+
response_dict = _RecontextImageResponse_from_vertex(response_dict)
|
5480
|
+
|
5481
|
+
return_value = types.RecontextImageResponse._from_response(
|
5482
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5483
|
+
)
|
5153
5484
|
|
5154
5485
|
self._api_client._verify_response(return_value)
|
5155
5486
|
return return_value
|
@@ -5494,7 +5825,9 @@ class Models(_api_module.BaseModule):
|
|
5494
5825
|
return_value = types.CountTokensResponse._from_response(
|
5495
5826
|
response=response_dict, kwargs=parameter_model.model_dump()
|
5496
5827
|
)
|
5497
|
-
|
5828
|
+
return_value.sdk_http_response = types.HttpResponse(
|
5829
|
+
headers=response.headers
|
5830
|
+
)
|
5498
5831
|
self._api_client._verify_response(return_value)
|
5499
5832
|
return return_value
|
5500
5833
|
|
@@ -5575,7 +5908,9 @@ class Models(_api_module.BaseModule):
|
|
5575
5908
|
return_value = types.ComputeTokensResponse._from_response(
|
5576
5909
|
response=response_dict, kwargs=parameter_model.model_dump()
|
5577
5910
|
)
|
5578
|
-
|
5911
|
+
return_value.sdk_http_response = types.HttpResponse(
|
5912
|
+
headers=response.headers
|
5913
|
+
)
|
5579
5914
|
self._api_client._verify_response(return_value)
|
5580
5915
|
return return_value
|
5581
5916
|
|
@@ -5689,7 +6024,7 @@ class Models(_api_module.BaseModule):
|
|
5689
6024
|
self,
|
5690
6025
|
*,
|
5691
6026
|
model: str,
|
5692
|
-
contents:
|
6027
|
+
contents: types.ContentListUnionDict,
|
5693
6028
|
config: Optional[types.GenerateContentConfigOrDict] = None,
|
5694
6029
|
) -> types.GenerateContentResponse:
|
5695
6030
|
"""Makes an API request to generate content using a model.
|
@@ -5700,9 +6035,8 @@ class Models(_api_module.BaseModule):
|
|
5700
6035
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
5701
6036
|
- The partial resource name with 'publishers/', for example:
|
5702
6037
|
'publishers/google/models/gemini-2.0-flash' or
|
5703
|
-
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5704
6038
|
- `/` separated publisher and model name, for example:
|
5705
|
-
'google/gemini-2.0-flash'
|
6039
|
+
'google/gemini-2.0-flash'
|
5706
6040
|
|
5707
6041
|
For the `model` parameter, supported formats for Gemini API include:
|
5708
6042
|
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
@@ -5825,7 +6159,7 @@ class Models(_api_module.BaseModule):
|
|
5825
6159
|
self,
|
5826
6160
|
*,
|
5827
6161
|
model: str,
|
5828
|
-
contents:
|
6162
|
+
contents: types.ContentListUnionDict,
|
5829
6163
|
config: Optional[types.GenerateContentConfigOrDict] = None,
|
5830
6164
|
) -> Iterator[types.GenerateContentResponse]:
|
5831
6165
|
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
@@ -5836,9 +6170,8 @@ class Models(_api_module.BaseModule):
|
|
5836
6170
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
5837
6171
|
- The partial resource name with 'publishers/', for example:
|
5838
6172
|
'publishers/google/models/gemini-2.0-flash' or
|
5839
|
-
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5840
6173
|
- `/` separated publisher and model name, for example:
|
5841
|
-
'google/gemini-2.0-flash'
|
6174
|
+
'google/gemini-2.0-flash'
|
5842
6175
|
|
5843
6176
|
For the `model` parameter, supported formats for Gemini API include:
|
5844
6177
|
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
@@ -6516,7 +6849,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
6516
6849
|
return_value = types.EmbedContentResponse._from_response(
|
6517
6850
|
response=response_dict, kwargs=parameter_model.model_dump()
|
6518
6851
|
)
|
6519
|
-
|
6852
|
+
return_value.sdk_http_response = types.HttpResponse(
|
6853
|
+
headers=response.headers
|
6854
|
+
)
|
6520
6855
|
self._api_client._verify_response(return_value)
|
6521
6856
|
return return_value
|
6522
6857
|
|
@@ -6592,7 +6927,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
6592
6927
|
return_value = types.GenerateImagesResponse._from_response(
|
6593
6928
|
response=response_dict, kwargs=parameter_model.model_dump()
|
6594
6929
|
)
|
6595
|
-
|
6930
|
+
return_value.sdk_http_response = types.HttpResponse(
|
6931
|
+
headers=response.headers
|
6932
|
+
)
|
6596
6933
|
self._api_client._verify_response(return_value)
|
6597
6934
|
return return_value
|
6598
6935
|
|
@@ -6694,7 +7031,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
6694
7031
|
return_value = types.EditImageResponse._from_response(
|
6695
7032
|
response=response_dict, kwargs=parameter_model.model_dump()
|
6696
7033
|
)
|
6697
|
-
|
7034
|
+
return_value.sdk_http_response = types.HttpResponse(
|
7035
|
+
headers=response.headers
|
7036
|
+
)
|
6698
7037
|
self._api_client._verify_response(return_value)
|
6699
7038
|
return return_value
|
6700
7039
|
|
@@ -6763,6 +7102,114 @@ class AsyncModels(_api_module.BaseModule):
|
|
6763
7102
|
return_value = types.UpscaleImageResponse._from_response(
|
6764
7103
|
response=response_dict, kwargs=parameter_model.model_dump()
|
6765
7104
|
)
|
7105
|
+
return_value.sdk_http_response = types.HttpResponse(
|
7106
|
+
headers=response.headers
|
7107
|
+
)
|
7108
|
+
self._api_client._verify_response(return_value)
|
7109
|
+
return return_value
|
7110
|
+
|
7111
|
+
async def recontext_image(
|
7112
|
+
self,
|
7113
|
+
*,
|
7114
|
+
model: str,
|
7115
|
+
source: types.RecontextImageSourceOrDict,
|
7116
|
+
config: Optional[types.RecontextImageConfigOrDict] = None,
|
7117
|
+
) -> types.RecontextImageResponse:
|
7118
|
+
"""Recontextualizes an image.
|
7119
|
+
|
7120
|
+
There are two types of recontextualization currently supported:
|
7121
|
+
1) Imagen Product Recontext - Generate images of products in new scenes
|
7122
|
+
and contexts.
|
7123
|
+
2) Virtual Try-On: Generate images of persons modeling fashion products.
|
7124
|
+
|
7125
|
+
Args:
|
7126
|
+
model (str): The model to use.
|
7127
|
+
source (RecontextImageSource): An object containing the source inputs
|
7128
|
+
(prompt, person_image, product_images) for image recontext. prompt is
|
7129
|
+
optional for product recontext and disallowed for virtual try-on.
|
7130
|
+
person_image is required for virtual try-on, disallowed for product
|
7131
|
+
recontext. product_images is required for both product recontext and
|
7132
|
+
virtual try-on. Only one product image is supported for virtual try-on,
|
7133
|
+
and up to 3 product images (different angles of the same product) are
|
7134
|
+
supported for product recontext.
|
7135
|
+
config (RecontextImageConfig): Configuration for recontextualization.
|
7136
|
+
|
7137
|
+
Usage:
|
7138
|
+
|
7139
|
+
```
|
7140
|
+
product_recontext_response = client.models.recontext_image(
|
7141
|
+
model="imagen-product-recontext-preview-06-30",
|
7142
|
+
source=types.RecontextImageSource(
|
7143
|
+
prompt="In a modern kitchen setting.",
|
7144
|
+
product_images=[types.ProductImage.from_file(IMAGE_FILE_PATH)],
|
7145
|
+
),
|
7146
|
+
config=types.RecontextImageConfig(
|
7147
|
+
number_of_images=1,
|
7148
|
+
),
|
7149
|
+
)
|
7150
|
+
image = product_recontext_response.generated_images[0].image
|
7151
|
+
|
7152
|
+
virtual_try_on_response = client.models.recontext_image(
|
7153
|
+
model="virtual-try-on-preview-08-04",
|
7154
|
+
source=types.RecontextImageSource(
|
7155
|
+
person_image=types.Image.from_file(IMAGE1_FILE_PATH),
|
7156
|
+
product_images=[types.ProductImage.from_file(IMAGE2_FILE_PATH)],
|
7157
|
+
),
|
7158
|
+
config=types.RecontextImageConfig(
|
7159
|
+
number_of_images=1,
|
7160
|
+
),
|
7161
|
+
)
|
7162
|
+
image = virtual_try_on_response.generated_images[0].image
|
7163
|
+
```
|
7164
|
+
"""
|
7165
|
+
|
7166
|
+
parameter_model = types._RecontextImageParameters(
|
7167
|
+
model=model,
|
7168
|
+
source=source,
|
7169
|
+
config=config,
|
7170
|
+
)
|
7171
|
+
|
7172
|
+
request_url_dict: Optional[dict[str, str]]
|
7173
|
+
if not self._api_client.vertexai:
|
7174
|
+
raise ValueError('This method is only supported in the Vertex AI client.')
|
7175
|
+
else:
|
7176
|
+
request_dict = _RecontextImageParameters_to_vertex(
|
7177
|
+
self._api_client, parameter_model
|
7178
|
+
)
|
7179
|
+
request_url_dict = request_dict.get('_url')
|
7180
|
+
if request_url_dict:
|
7181
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
7182
|
+
else:
|
7183
|
+
path = '{model}:predict'
|
7184
|
+
|
7185
|
+
query_params = request_dict.get('_query')
|
7186
|
+
if query_params:
|
7187
|
+
path = f'{path}?{urlencode(query_params)}'
|
7188
|
+
# TODO: remove the hack that pops config.
|
7189
|
+
request_dict.pop('config', None)
|
7190
|
+
|
7191
|
+
http_options: Optional[types.HttpOptions] = None
|
7192
|
+
if (
|
7193
|
+
parameter_model.config is not None
|
7194
|
+
and parameter_model.config.http_options is not None
|
7195
|
+
):
|
7196
|
+
http_options = parameter_model.config.http_options
|
7197
|
+
|
7198
|
+
request_dict = _common.convert_to_dict(request_dict)
|
7199
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
7200
|
+
|
7201
|
+
response = await self._api_client.async_request(
|
7202
|
+
'post', path, request_dict, http_options
|
7203
|
+
)
|
7204
|
+
|
7205
|
+
response_dict = '' if not response.body else json.loads(response.body)
|
7206
|
+
|
7207
|
+
if self._api_client.vertexai:
|
7208
|
+
response_dict = _RecontextImageResponse_from_vertex(response_dict)
|
7209
|
+
|
7210
|
+
return_value = types.RecontextImageResponse._from_response(
|
7211
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
7212
|
+
)
|
6766
7213
|
|
6767
7214
|
self._api_client._verify_response(return_value)
|
6768
7215
|
return return_value
|
@@ -7111,7 +7558,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
7111
7558
|
return_value = types.CountTokensResponse._from_response(
|
7112
7559
|
response=response_dict, kwargs=parameter_model.model_dump()
|
7113
7560
|
)
|
7114
|
-
|
7561
|
+
return_value.sdk_http_response = types.HttpResponse(
|
7562
|
+
headers=response.headers
|
7563
|
+
)
|
7115
7564
|
self._api_client._verify_response(return_value)
|
7116
7565
|
return return_value
|
7117
7566
|
|
@@ -7191,7 +7640,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
7191
7640
|
return_value = types.ComputeTokensResponse._from_response(
|
7192
7641
|
response=response_dict, kwargs=parameter_model.model_dump()
|
7193
7642
|
)
|
7194
|
-
|
7643
|
+
return_value.sdk_http_response = types.HttpResponse(
|
7644
|
+
headers=response.headers
|
7645
|
+
)
|
7195
7646
|
self._api_client._verify_response(return_value)
|
7196
7647
|
return return_value
|
7197
7648
|
|
@@ -7421,9 +7872,8 @@ class AsyncModels(_api_module.BaseModule):
|
|
7421
7872
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
7422
7873
|
- The partial resource name with 'publishers/', for example:
|
7423
7874
|
'publishers/google/models/gemini-2.0-flash' or
|
7424
|
-
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
7425
7875
|
- `/` separated publisher and model name, for example:
|
7426
|
-
'google/gemini-2.0-flash'
|
7876
|
+
'google/gemini-2.0-flash'
|
7427
7877
|
|
7428
7878
|
For the `model` parameter, supported formats for Gemini API include:
|
7429
7879
|
- The Gemini model ID, for example: 'gemini-2.0-flash'
|