google-genai 1.4.0__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +207 -111
- google/genai/_automatic_function_calling_util.py +6 -16
- google/genai/_common.py +5 -2
- google/genai/_extra_utils.py +62 -47
- google/genai/_replay_api_client.py +70 -2
- google/genai/_transformers.py +98 -57
- google/genai/batches.py +14 -10
- google/genai/caches.py +30 -36
- google/genai/client.py +3 -2
- google/genai/errors.py +11 -19
- google/genai/files.py +28 -15
- google/genai/live.py +276 -93
- google/genai/models.py +201 -112
- google/genai/operations.py +40 -12
- google/genai/pagers.py +17 -10
- google/genai/tunings.py +40 -30
- google/genai/types.py +146 -58
- google/genai/version.py +1 -1
- {google_genai-1.4.0.dist-info → google_genai-1.6.0.dist-info}/METADATA +194 -24
- google_genai-1.6.0.dist-info/RECORD +27 -0
- {google_genai-1.4.0.dist-info → google_genai-1.6.0.dist-info}/WHEEL +1 -1
- google_genai-1.4.0.dist-info/RECORD +0 -27
- {google_genai-1.4.0.dist-info → google_genai-1.6.0.dist-info}/LICENSE +0 -0
- {google_genai-1.4.0.dist-info → google_genai-1.6.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -175,15 +175,9 @@ def _Schema_to_mldev(
|
|
175
175
|
if getv(from_object, ['pattern']) is not None:
|
176
176
|
raise ValueError('pattern parameter is not supported in Gemini API.')
|
177
177
|
|
178
|
-
if getv(from_object, ['minimum']) is not None:
|
179
|
-
raise ValueError('minimum parameter is not supported in Gemini API.')
|
180
|
-
|
181
178
|
if getv(from_object, ['default']) is not None:
|
182
179
|
raise ValueError('default parameter is not supported in Gemini API.')
|
183
180
|
|
184
|
-
if getv(from_object, ['any_of']) is not None:
|
185
|
-
raise ValueError('any_of parameter is not supported in Gemini API.')
|
186
|
-
|
187
181
|
if getv(from_object, ['max_length']) is not None:
|
188
182
|
raise ValueError('max_length parameter is not supported in Gemini API.')
|
189
183
|
|
@@ -196,12 +190,12 @@ def _Schema_to_mldev(
|
|
196
190
|
if getv(from_object, ['min_properties']) is not None:
|
197
191
|
raise ValueError('min_properties parameter is not supported in Gemini API.')
|
198
192
|
|
199
|
-
if getv(from_object, ['maximum']) is not None:
|
200
|
-
raise ValueError('maximum parameter is not supported in Gemini API.')
|
201
|
-
|
202
193
|
if getv(from_object, ['max_properties']) is not None:
|
203
194
|
raise ValueError('max_properties parameter is not supported in Gemini API.')
|
204
195
|
|
196
|
+
if getv(from_object, ['any_of']) is not None:
|
197
|
+
setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
|
198
|
+
|
205
199
|
if getv(from_object, ['description']) is not None:
|
206
200
|
setv(to_object, ['description'], getv(from_object, ['description']))
|
207
201
|
|
@@ -217,9 +211,15 @@ def _Schema_to_mldev(
|
|
217
211
|
if getv(from_object, ['max_items']) is not None:
|
218
212
|
setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
|
219
213
|
|
214
|
+
if getv(from_object, ['maximum']) is not None:
|
215
|
+
setv(to_object, ['maximum'], getv(from_object, ['maximum']))
|
216
|
+
|
220
217
|
if getv(from_object, ['min_items']) is not None:
|
221
218
|
setv(to_object, ['minItems'], getv(from_object, ['min_items']))
|
222
219
|
|
220
|
+
if getv(from_object, ['minimum']) is not None:
|
221
|
+
setv(to_object, ['minimum'], getv(from_object, ['minimum']))
|
222
|
+
|
223
223
|
if getv(from_object, ['nullable']) is not None:
|
224
224
|
setv(to_object, ['nullable'], getv(from_object, ['nullable']))
|
225
225
|
|
@@ -254,15 +254,9 @@ def _Schema_to_vertex(
|
|
254
254
|
if getv(from_object, ['pattern']) is not None:
|
255
255
|
setv(to_object, ['pattern'], getv(from_object, ['pattern']))
|
256
256
|
|
257
|
-
if getv(from_object, ['minimum']) is not None:
|
258
|
-
setv(to_object, ['minimum'], getv(from_object, ['minimum']))
|
259
|
-
|
260
257
|
if getv(from_object, ['default']) is not None:
|
261
258
|
setv(to_object, ['default'], getv(from_object, ['default']))
|
262
259
|
|
263
|
-
if getv(from_object, ['any_of']) is not None:
|
264
|
-
setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
|
265
|
-
|
266
260
|
if getv(from_object, ['max_length']) is not None:
|
267
261
|
setv(to_object, ['maxLength'], getv(from_object, ['max_length']))
|
268
262
|
|
@@ -275,12 +269,12 @@ def _Schema_to_vertex(
|
|
275
269
|
if getv(from_object, ['min_properties']) is not None:
|
276
270
|
setv(to_object, ['minProperties'], getv(from_object, ['min_properties']))
|
277
271
|
|
278
|
-
if getv(from_object, ['maximum']) is not None:
|
279
|
-
setv(to_object, ['maximum'], getv(from_object, ['maximum']))
|
280
|
-
|
281
272
|
if getv(from_object, ['max_properties']) is not None:
|
282
273
|
setv(to_object, ['maxProperties'], getv(from_object, ['max_properties']))
|
283
274
|
|
275
|
+
if getv(from_object, ['any_of']) is not None:
|
276
|
+
setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
|
277
|
+
|
284
278
|
if getv(from_object, ['description']) is not None:
|
285
279
|
setv(to_object, ['description'], getv(from_object, ['description']))
|
286
280
|
|
@@ -296,9 +290,15 @@ def _Schema_to_vertex(
|
|
296
290
|
if getv(from_object, ['max_items']) is not None:
|
297
291
|
setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
|
298
292
|
|
293
|
+
if getv(from_object, ['maximum']) is not None:
|
294
|
+
setv(to_object, ['maximum'], getv(from_object, ['maximum']))
|
295
|
+
|
299
296
|
if getv(from_object, ['min_items']) is not None:
|
300
297
|
setv(to_object, ['minItems'], getv(from_object, ['min_items']))
|
301
298
|
|
299
|
+
if getv(from_object, ['minimum']) is not None:
|
300
|
+
setv(to_object, ['minimum'], getv(from_object, ['minimum']))
|
301
|
+
|
302
302
|
if getv(from_object, ['nullable']) is not None:
|
303
303
|
setv(to_object, ['nullable'], getv(from_object, ['nullable']))
|
304
304
|
|
@@ -1351,10 +1351,8 @@ def _GenerateImagesConfig_to_mldev(
|
|
1351
1351
|
raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
|
1352
1352
|
|
1353
1353
|
if getv(from_object, ['negative_prompt']) is not None:
|
1354
|
-
|
1355
|
-
|
1356
|
-
['parameters', 'negativePrompt'],
|
1357
|
-
getv(from_object, ['negative_prompt']),
|
1354
|
+
raise ValueError(
|
1355
|
+
'negative_prompt parameter is not supported in Gemini API.'
|
1358
1356
|
)
|
1359
1357
|
|
1360
1358
|
if getv(from_object, ['number_of_images']) is not None:
|
@@ -1910,10 +1908,8 @@ def _EditImageConfig_to_mldev(
|
|
1910
1908
|
raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
|
1911
1909
|
|
1912
1910
|
if getv(from_object, ['negative_prompt']) is not None:
|
1913
|
-
|
1914
|
-
|
1915
|
-
['parameters', 'negativePrompt'],
|
1916
|
-
getv(from_object, ['negative_prompt']),
|
1911
|
+
raise ValueError(
|
1912
|
+
'negative_prompt parameter is not supported in Gemini API.'
|
1917
1913
|
)
|
1918
1914
|
|
1919
1915
|
if getv(from_object, ['number_of_images']) is not None:
|
@@ -2003,6 +1999,9 @@ def _EditImageConfig_to_mldev(
|
|
2003
1999
|
getv(from_object, ['edit_mode']),
|
2004
2000
|
)
|
2005
2001
|
|
2002
|
+
if getv(from_object, ['base_steps']) is not None:
|
2003
|
+
raise ValueError('base_steps parameter is not supported in Gemini API.')
|
2004
|
+
|
2006
2005
|
return to_object
|
2007
2006
|
|
2008
2007
|
|
@@ -2107,6 +2106,13 @@ def _EditImageConfig_to_vertex(
|
|
2107
2106
|
getv(from_object, ['edit_mode']),
|
2108
2107
|
)
|
2109
2108
|
|
2109
|
+
if getv(from_object, ['base_steps']) is not None:
|
2110
|
+
setv(
|
2111
|
+
parent_object,
|
2112
|
+
['parameters', 'editConfig', 'baseSteps'],
|
2113
|
+
getv(from_object, ['base_steps']),
|
2114
|
+
)
|
2115
|
+
|
2110
2116
|
return to_object
|
2111
2117
|
|
2112
2118
|
|
@@ -2813,8 +2819,10 @@ def _GenerateVideosConfig_to_mldev(
|
|
2813
2819
|
raise ValueError('fps parameter is not supported in Gemini API.')
|
2814
2820
|
|
2815
2821
|
if getv(from_object, ['duration_seconds']) is not None:
|
2816
|
-
|
2817
|
-
|
2822
|
+
setv(
|
2823
|
+
parent_object,
|
2824
|
+
['parameters', 'durationSeconds'],
|
2825
|
+
getv(from_object, ['duration_seconds']),
|
2818
2826
|
)
|
2819
2827
|
|
2820
2828
|
if getv(from_object, ['seed']) is not None:
|
@@ -2948,6 +2956,13 @@ def _GenerateVideosParameters_to_mldev(
|
|
2948
2956
|
if getv(from_object, ['prompt']) is not None:
|
2949
2957
|
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
2950
2958
|
|
2959
|
+
if getv(from_object, ['image']) is not None:
|
2960
|
+
setv(
|
2961
|
+
to_object,
|
2962
|
+
['instances[0]', 'image'],
|
2963
|
+
_Image_to_mldev(api_client, getv(from_object, ['image']), to_object),
|
2964
|
+
)
|
2965
|
+
|
2951
2966
|
if getv(from_object, ['config']) is not None:
|
2952
2967
|
setv(
|
2953
2968
|
to_object,
|
@@ -2976,6 +2991,13 @@ def _GenerateVideosParameters_to_vertex(
|
|
2976
2991
|
if getv(from_object, ['prompt']) is not None:
|
2977
2992
|
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
2978
2993
|
|
2994
|
+
if getv(from_object, ['image']) is not None:
|
2995
|
+
setv(
|
2996
|
+
to_object,
|
2997
|
+
['instances[0]', 'image'],
|
2998
|
+
_Image_to_vertex(api_client, getv(from_object, ['image']), to_object),
|
2999
|
+
)
|
3000
|
+
|
2979
3001
|
if getv(from_object, ['config']) is not None:
|
2980
3002
|
setv(
|
2981
3003
|
to_object,
|
@@ -3545,6 +3567,48 @@ def _Image_from_vertex(
|
|
3545
3567
|
return to_object
|
3546
3568
|
|
3547
3569
|
|
3570
|
+
def _SafetyAttributes_from_mldev(
|
3571
|
+
api_client: BaseApiClient,
|
3572
|
+
from_object: Union[dict, object],
|
3573
|
+
parent_object: Optional[dict] = None,
|
3574
|
+
) -> dict:
|
3575
|
+
to_object: dict[str, Any] = {}
|
3576
|
+
if getv(from_object, ['safetyAttributes', 'categories']) is not None:
|
3577
|
+
setv(
|
3578
|
+
to_object,
|
3579
|
+
['categories'],
|
3580
|
+
getv(from_object, ['safetyAttributes', 'categories']),
|
3581
|
+
)
|
3582
|
+
|
3583
|
+
if getv(from_object, ['safetyAttributes', 'scores']) is not None:
|
3584
|
+
setv(
|
3585
|
+
to_object, ['scores'], getv(from_object, ['safetyAttributes', 'scores'])
|
3586
|
+
)
|
3587
|
+
|
3588
|
+
return to_object
|
3589
|
+
|
3590
|
+
|
3591
|
+
def _SafetyAttributes_from_vertex(
|
3592
|
+
api_client: BaseApiClient,
|
3593
|
+
from_object: Union[dict, object],
|
3594
|
+
parent_object: Optional[dict] = None,
|
3595
|
+
) -> dict:
|
3596
|
+
to_object: dict[str, Any] = {}
|
3597
|
+
if getv(from_object, ['safetyAttributes', 'categories']) is not None:
|
3598
|
+
setv(
|
3599
|
+
to_object,
|
3600
|
+
['categories'],
|
3601
|
+
getv(from_object, ['safetyAttributes', 'categories']),
|
3602
|
+
)
|
3603
|
+
|
3604
|
+
if getv(from_object, ['safetyAttributes', 'scores']) is not None:
|
3605
|
+
setv(
|
3606
|
+
to_object, ['scores'], getv(from_object, ['safetyAttributes', 'scores'])
|
3607
|
+
)
|
3608
|
+
|
3609
|
+
return to_object
|
3610
|
+
|
3611
|
+
|
3548
3612
|
def _GeneratedImage_from_mldev(
|
3549
3613
|
api_client: BaseApiClient,
|
3550
3614
|
from_object: Union[dict, object],
|
@@ -3565,6 +3629,15 @@ def _GeneratedImage_from_mldev(
|
|
3565
3629
|
getv(from_object, ['raiFilteredReason']),
|
3566
3630
|
)
|
3567
3631
|
|
3632
|
+
if getv(from_object, ['_self']) is not None:
|
3633
|
+
setv(
|
3634
|
+
to_object,
|
3635
|
+
['safety_attributes'],
|
3636
|
+
_SafetyAttributes_from_mldev(
|
3637
|
+
api_client, getv(from_object, ['_self']), to_object
|
3638
|
+
),
|
3639
|
+
)
|
3640
|
+
|
3568
3641
|
return to_object
|
3569
3642
|
|
3570
3643
|
|
@@ -3588,6 +3661,15 @@ def _GeneratedImage_from_vertex(
|
|
3588
3661
|
getv(from_object, ['raiFilteredReason']),
|
3589
3662
|
)
|
3590
3663
|
|
3664
|
+
if getv(from_object, ['_self']) is not None:
|
3665
|
+
setv(
|
3666
|
+
to_object,
|
3667
|
+
['safety_attributes'],
|
3668
|
+
_SafetyAttributes_from_vertex(
|
3669
|
+
api_client, getv(from_object, ['_self']), to_object
|
3670
|
+
),
|
3671
|
+
)
|
3672
|
+
|
3591
3673
|
if getv(from_object, ['prompt']) is not None:
|
3592
3674
|
setv(to_object, ['enhanced_prompt'], getv(from_object, ['prompt']))
|
3593
3675
|
|
@@ -3999,14 +4081,14 @@ def _Video_from_mldev(
|
|
3999
4081
|
parent_object: Optional[dict] = None,
|
4000
4082
|
) -> dict:
|
4001
4083
|
to_object: dict[str, Any] = {}
|
4002
|
-
if getv(from_object, ['uri']) is not None:
|
4003
|
-
setv(to_object, ['uri'], getv(from_object, ['uri']))
|
4084
|
+
if getv(from_object, ['video', 'uri']) is not None:
|
4085
|
+
setv(to_object, ['uri'], getv(from_object, ['video', 'uri']))
|
4004
4086
|
|
4005
|
-
if getv(from_object, ['encodedVideo']) is not None:
|
4087
|
+
if getv(from_object, ['video', 'encodedVideo']) is not None:
|
4006
4088
|
setv(
|
4007
4089
|
to_object,
|
4008
4090
|
['video_bytes'],
|
4009
|
-
t.t_bytes(api_client, getv(from_object, ['encodedVideo'])),
|
4091
|
+
t.t_bytes(api_client, getv(from_object, ['video', 'encodedVideo'])),
|
4010
4092
|
)
|
4011
4093
|
|
4012
4094
|
if getv(from_object, ['encoding']) is not None:
|
@@ -4075,13 +4157,13 @@ def _GenerateVideosResponse_from_mldev(
|
|
4075
4157
|
parent_object: Optional[dict] = None,
|
4076
4158
|
) -> dict:
|
4077
4159
|
to_object: dict[str, Any] = {}
|
4078
|
-
if getv(from_object, ['
|
4160
|
+
if getv(from_object, ['generatedSamples']) is not None:
|
4079
4161
|
setv(
|
4080
4162
|
to_object,
|
4081
4163
|
['generated_videos'],
|
4082
4164
|
[
|
4083
4165
|
_GeneratedVideo_from_mldev(api_client, item, to_object)
|
4084
|
-
for item in getv(from_object, ['
|
4166
|
+
for item in getv(from_object, ['generatedSamples'])
|
4085
4167
|
],
|
4086
4168
|
)
|
4087
4169
|
|
@@ -4247,7 +4329,7 @@ class Models(_api_module.BaseModule):
|
|
4247
4329
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4248
4330
|
if isinstance(config, dict):
|
4249
4331
|
http_options = config.get('http_options', None)
|
4250
|
-
elif hasattr(config, 'http_options'):
|
4332
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4251
4333
|
http_options = config.http_options
|
4252
4334
|
|
4253
4335
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4318,7 +4400,7 @@ class Models(_api_module.BaseModule):
|
|
4318
4400
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4319
4401
|
if isinstance(config, dict):
|
4320
4402
|
http_options = config.get('http_options', None)
|
4321
|
-
elif hasattr(config, 'http_options'):
|
4403
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4322
4404
|
http_options = config.http_options
|
4323
4405
|
|
4324
4406
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4408,7 +4490,7 @@ class Models(_api_module.BaseModule):
|
|
4408
4490
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4409
4491
|
if isinstance(config, dict):
|
4410
4492
|
http_options = config.get('http_options', None)
|
4411
|
-
elif hasattr(config, 'http_options'):
|
4493
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4412
4494
|
http_options = config.http_options
|
4413
4495
|
|
4414
4496
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4498,7 +4580,7 @@ class Models(_api_module.BaseModule):
|
|
4498
4580
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4499
4581
|
if isinstance(config, dict):
|
4500
4582
|
http_options = config.get('http_options', None)
|
4501
|
-
elif hasattr(config, 'http_options'):
|
4583
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4502
4584
|
http_options = config.http_options
|
4503
4585
|
|
4504
4586
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4602,7 +4684,7 @@ class Models(_api_module.BaseModule):
|
|
4602
4684
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4603
4685
|
if isinstance(config, dict):
|
4604
4686
|
http_options = config.get('http_options', None)
|
4605
|
-
elif hasattr(config, 'http_options'):
|
4687
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4606
4688
|
http_options = config.http_options
|
4607
4689
|
|
4608
4690
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4673,7 +4755,7 @@ class Models(_api_module.BaseModule):
|
|
4673
4755
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4674
4756
|
if isinstance(config, dict):
|
4675
4757
|
http_options = config.get('http_options', None)
|
4676
|
-
elif hasattr(config, 'http_options'):
|
4758
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4677
4759
|
http_options = config.http_options
|
4678
4760
|
|
4679
4761
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4735,7 +4817,7 @@ class Models(_api_module.BaseModule):
|
|
4735
4817
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4736
4818
|
if isinstance(config, dict):
|
4737
4819
|
http_options = config.get('http_options', None)
|
4738
|
-
elif hasattr(config, 'http_options'):
|
4820
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4739
4821
|
http_options = config.http_options
|
4740
4822
|
|
4741
4823
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4792,7 +4874,7 @@ class Models(_api_module.BaseModule):
|
|
4792
4874
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4793
4875
|
if isinstance(config, dict):
|
4794
4876
|
http_options = config.get('http_options', None)
|
4795
|
-
elif hasattr(config, 'http_options'):
|
4877
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4796
4878
|
http_options = config.http_options
|
4797
4879
|
|
4798
4880
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4857,7 +4939,7 @@ class Models(_api_module.BaseModule):
|
|
4857
4939
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4858
4940
|
if isinstance(config, dict):
|
4859
4941
|
http_options = config.get('http_options', None)
|
4860
|
-
elif hasattr(config, 'http_options'):
|
4942
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4861
4943
|
http_options = config.http_options
|
4862
4944
|
|
4863
4945
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4918,7 +5000,7 @@ class Models(_api_module.BaseModule):
|
|
4918
5000
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
4919
5001
|
if isinstance(config, dict):
|
4920
5002
|
http_options = config.get('http_options', None)
|
4921
|
-
elif hasattr(config, 'http_options'):
|
5003
|
+
elif hasattr(config, 'http_options') and config is not None:
|
4922
5004
|
http_options = config.http_options
|
4923
5005
|
|
4924
5006
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -4964,7 +5046,7 @@ class Models(_api_module.BaseModule):
|
|
4964
5046
|
.. code-block:: python
|
4965
5047
|
|
4966
5048
|
response = client.models.count_tokens(
|
4967
|
-
model='gemini-
|
5049
|
+
model='gemini-2.0-flash',
|
4968
5050
|
contents='What is your name?',
|
4969
5051
|
)
|
4970
5052
|
print(response)
|
@@ -5006,7 +5088,7 @@ class Models(_api_module.BaseModule):
|
|
5006
5088
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5007
5089
|
if isinstance(config, dict):
|
5008
5090
|
http_options = config.get('http_options', None)
|
5009
|
-
elif hasattr(config, 'http_options'):
|
5091
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5010
5092
|
http_options = config.http_options
|
5011
5093
|
|
5012
5094
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5038,9 +5120,9 @@ class Models(_api_module.BaseModule):
|
|
5038
5120
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
5039
5121
|
config: Optional[types.ComputeTokensConfigOrDict] = None,
|
5040
5122
|
) -> types.ComputeTokensResponse:
|
5041
|
-
"""
|
5123
|
+
"""Given a list of contents, returns a corresponding TokensInfo containing the
|
5042
5124
|
|
5043
|
-
|
5125
|
+
list of tokens and list of token ids.
|
5044
5126
|
|
5045
5127
|
This method is not supported by the Gemini Developer API.
|
5046
5128
|
|
@@ -5053,7 +5135,7 @@ class Models(_api_module.BaseModule):
|
|
5053
5135
|
.. code-block:: python
|
5054
5136
|
|
5055
5137
|
response = client.models.compute_tokens(
|
5056
|
-
model='gemini-
|
5138
|
+
model='gemini-2.0-flash',
|
5057
5139
|
contents='What is your name?',
|
5058
5140
|
)
|
5059
5141
|
print(response)
|
@@ -5089,7 +5171,7 @@ class Models(_api_module.BaseModule):
|
|
5089
5171
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5090
5172
|
if isinstance(config, dict):
|
5091
5173
|
http_options = config.get('http_options', None)
|
5092
|
-
elif hasattr(config, 'http_options'):
|
5174
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5093
5175
|
http_options = config.http_options
|
5094
5176
|
|
5095
5177
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5122,6 +5204,7 @@ class Models(_api_module.BaseModule):
|
|
5122
5204
|
*,
|
5123
5205
|
model: str,
|
5124
5206
|
prompt: Optional[str] = None,
|
5207
|
+
image: Optional[types.ImageOrDict] = None,
|
5125
5208
|
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
5126
5209
|
) -> types.GenerateVideosOperation:
|
5127
5210
|
"""Generates videos based on a text description and configuration.
|
@@ -5149,6 +5232,7 @@ class Models(_api_module.BaseModule):
|
|
5149
5232
|
parameter_model = types._GenerateVideosParameters(
|
5150
5233
|
model=model,
|
5151
5234
|
prompt=prompt,
|
5235
|
+
image=image,
|
5152
5236
|
config=config,
|
5153
5237
|
)
|
5154
5238
|
|
@@ -5181,7 +5265,7 @@ class Models(_api_module.BaseModule):
|
|
5181
5265
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5182
5266
|
if isinstance(config, dict):
|
5183
5267
|
http_options = config.get('http_options', None)
|
5184
|
-
elif hasattr(config, 'http_options'):
|
5268
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5185
5269
|
http_options = config.http_options
|
5186
5270
|
|
5187
5271
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5216,21 +5300,21 @@ class Models(_api_module.BaseModule):
|
|
5216
5300
|
"""Makes an API request to generate content using a model.
|
5217
5301
|
|
5218
5302
|
For the `model` parameter, supported formats for Vertex AI API include:
|
5219
|
-
- The Gemini model ID, for example: 'gemini-
|
5303
|
+
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
5220
5304
|
- The full resource name starts with 'projects/', for example:
|
5221
|
-
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-
|
5305
|
+
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
5222
5306
|
- The partial resource name with 'publishers/', for example:
|
5223
|
-
'publishers/google/models/gemini-
|
5307
|
+
'publishers/google/models/gemini-2.0-flash' or
|
5224
5308
|
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5225
5309
|
- `/` separated publisher and model name, for example:
|
5226
|
-
'google/gemini-
|
5310
|
+
'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
|
5227
5311
|
|
5228
5312
|
For the `model` parameter, supported formats for Gemini API include:
|
5229
|
-
- The Gemini model ID, for example: 'gemini-
|
5313
|
+
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
5230
5314
|
- The model name starts with 'models/', for example:
|
5231
|
-
'models/gemini-
|
5232
|
-
-
|
5233
|
-
|
5315
|
+
'models/gemini-2.0-flash'
|
5316
|
+
- For tuned models, the model name starts with 'tunedModels/',
|
5317
|
+
for example:
|
5234
5318
|
'tunedModels/1234567890123456789'
|
5235
5319
|
|
5236
5320
|
Some models support multimodal input and output.
|
@@ -5247,7 +5331,7 @@ class Models(_api_module.BaseModule):
|
|
5247
5331
|
)
|
5248
5332
|
|
5249
5333
|
response = client.models.generate_content(
|
5250
|
-
model='gemini-
|
5334
|
+
model='gemini-2.0-flash',
|
5251
5335
|
contents='''What is a good name for a flower shop that specializes in
|
5252
5336
|
selling bouquets of dried flowers?'''
|
5253
5337
|
)
|
@@ -5258,7 +5342,7 @@ class Models(_api_module.BaseModule):
|
|
5258
5342
|
# * Timeless Petals
|
5259
5343
|
|
5260
5344
|
response = client.models.generate_content(
|
5261
|
-
model='gemini-
|
5345
|
+
model='gemini-2.0-flash',
|
5262
5346
|
contents=[
|
5263
5347
|
types.Part.from_text('What is shown in this image?'),
|
5264
5348
|
types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
|
@@ -5279,7 +5363,7 @@ class Models(_api_module.BaseModule):
|
|
5279
5363
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
5280
5364
|
)
|
5281
5365
|
automatic_function_calling_history: list[types.Content] = []
|
5282
|
-
response =
|
5366
|
+
response = types.GenerateContentResponse()
|
5283
5367
|
i = 0
|
5284
5368
|
while remaining_remote_calls_afc > 0:
|
5285
5369
|
i += 1
|
@@ -5315,11 +5399,12 @@ class Models(_api_module.BaseModule):
|
|
5315
5399
|
contents = t.t_contents(self._api_client, contents)
|
5316
5400
|
if not automatic_function_calling_history:
|
5317
5401
|
automatic_function_calling_history.extend(contents)
|
5318
|
-
contents
|
5319
|
-
|
5402
|
+
if isinstance(contents, list):
|
5403
|
+
contents.append(func_call_content)
|
5404
|
+
contents.append(func_response_content)
|
5320
5405
|
automatic_function_calling_history.append(func_call_content)
|
5321
5406
|
automatic_function_calling_history.append(func_response_content)
|
5322
|
-
if _extra_utils.should_append_afc_history(config):
|
5407
|
+
if _extra_utils.should_append_afc_history(config) and response is not None:
|
5323
5408
|
response.automatic_function_calling_history = (
|
5324
5409
|
automatic_function_calling_history
|
5325
5410
|
)
|
@@ -5335,21 +5420,21 @@ class Models(_api_module.BaseModule):
|
|
5335
5420
|
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
5336
5421
|
|
5337
5422
|
For the `model` parameter, supported formats for Vertex AI API include:
|
5338
|
-
- The Gemini model ID, for example: 'gemini-
|
5423
|
+
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
5339
5424
|
- The full resource name starts with 'projects/', for example:
|
5340
|
-
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-
|
5425
|
+
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
5341
5426
|
- The partial resource name with 'publishers/', for example:
|
5342
|
-
'publishers/google/models/gemini-
|
5427
|
+
'publishers/google/models/gemini-2.0-flash' or
|
5343
5428
|
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5344
5429
|
- `/` separated publisher and model name, for example:
|
5345
|
-
'google/gemini-
|
5430
|
+
'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
|
5346
5431
|
|
5347
5432
|
For the `model` parameter, supported formats for Gemini API include:
|
5348
|
-
- The Gemini model ID, for example: 'gemini-
|
5433
|
+
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
5349
5434
|
- The model name starts with 'models/', for example:
|
5350
|
-
'models/gemini-
|
5351
|
-
-
|
5352
|
-
|
5435
|
+
'models/gemini-2.0-flash'
|
5436
|
+
- For tuned models, the model name starts with 'tunedModels/',
|
5437
|
+
for example:
|
5353
5438
|
'tunedModels/1234567890123456789'
|
5354
5439
|
|
5355
5440
|
Some models support multimodal input and output.
|
@@ -5366,7 +5451,7 @@ class Models(_api_module.BaseModule):
|
|
5366
5451
|
)
|
5367
5452
|
|
5368
5453
|
for chunk in client.models.generate_content_stream(
|
5369
|
-
model='gemini-
|
5454
|
+
model='gemini-2.0-flash',
|
5370
5455
|
contents='''What is a good name for a flower shop that specializes in
|
5371
5456
|
selling bouquets of dried flowers?'''
|
5372
5457
|
):
|
@@ -5377,7 +5462,7 @@ class Models(_api_module.BaseModule):
|
|
5377
5462
|
# * Timeless Petals
|
5378
5463
|
|
5379
5464
|
for chunk in client.models.generate_content_stream(
|
5380
|
-
model='gemini-
|
5465
|
+
model='gemini-2.0-flash',
|
5381
5466
|
contents=[
|
5382
5467
|
types.Part.from_text('What is shown in this image?'),
|
5383
5468
|
types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
|
@@ -5467,8 +5552,9 @@ class Models(_api_module.BaseModule):
|
|
5467
5552
|
contents = t.t_contents(self._api_client, contents)
|
5468
5553
|
if not automatic_function_calling_history:
|
5469
5554
|
automatic_function_calling_history.extend(contents)
|
5470
|
-
contents
|
5471
|
-
|
5555
|
+
if isinstance(contents, list):
|
5556
|
+
contents.append(func_call_content)
|
5557
|
+
contents.append(func_response_content)
|
5472
5558
|
automatic_function_calling_history.append(func_call_content)
|
5473
5559
|
automatic_function_calling_history.append(func_response_content)
|
5474
5560
|
|
@@ -5623,7 +5709,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5623
5709
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5624
5710
|
if isinstance(config, dict):
|
5625
5711
|
http_options = config.get('http_options', None)
|
5626
|
-
elif hasattr(config, 'http_options'):
|
5712
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5627
5713
|
http_options = config.http_options
|
5628
5714
|
|
5629
5715
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5694,7 +5780,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5694
5780
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5695
5781
|
if isinstance(config, dict):
|
5696
5782
|
http_options = config.get('http_options', None)
|
5697
|
-
elif hasattr(config, 'http_options'):
|
5783
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5698
5784
|
http_options = config.http_options
|
5699
5785
|
|
5700
5786
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5789,7 +5875,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5789
5875
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5790
5876
|
if isinstance(config, dict):
|
5791
5877
|
http_options = config.get('http_options', None)
|
5792
|
-
elif hasattr(config, 'http_options'):
|
5878
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5793
5879
|
http_options = config.http_options
|
5794
5880
|
|
5795
5881
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5879,7 +5965,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5879
5965
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5880
5966
|
if isinstance(config, dict):
|
5881
5967
|
http_options = config.get('http_options', None)
|
5882
|
-
elif hasattr(config, 'http_options'):
|
5968
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5883
5969
|
http_options = config.http_options
|
5884
5970
|
|
5885
5971
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -5983,7 +6069,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5983
6069
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
5984
6070
|
if isinstance(config, dict):
|
5985
6071
|
http_options = config.get('http_options', None)
|
5986
|
-
elif hasattr(config, 'http_options'):
|
6072
|
+
elif hasattr(config, 'http_options') and config is not None:
|
5987
6073
|
http_options = config.http_options
|
5988
6074
|
|
5989
6075
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6054,7 +6140,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6054
6140
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6055
6141
|
if isinstance(config, dict):
|
6056
6142
|
http_options = config.get('http_options', None)
|
6057
|
-
elif hasattr(config, 'http_options'):
|
6143
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6058
6144
|
http_options = config.http_options
|
6059
6145
|
|
6060
6146
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6116,7 +6202,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6116
6202
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6117
6203
|
if isinstance(config, dict):
|
6118
6204
|
http_options = config.get('http_options', None)
|
6119
|
-
elif hasattr(config, 'http_options'):
|
6205
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6120
6206
|
http_options = config.http_options
|
6121
6207
|
|
6122
6208
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6173,7 +6259,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6173
6259
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6174
6260
|
if isinstance(config, dict):
|
6175
6261
|
http_options = config.get('http_options', None)
|
6176
|
-
elif hasattr(config, 'http_options'):
|
6262
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6177
6263
|
http_options = config.http_options
|
6178
6264
|
|
6179
6265
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6238,7 +6324,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6238
6324
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6239
6325
|
if isinstance(config, dict):
|
6240
6326
|
http_options = config.get('http_options', None)
|
6241
|
-
elif hasattr(config, 'http_options'):
|
6327
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6242
6328
|
http_options = config.http_options
|
6243
6329
|
|
6244
6330
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6299,7 +6385,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6299
6385
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6300
6386
|
if isinstance(config, dict):
|
6301
6387
|
http_options = config.get('http_options', None)
|
6302
|
-
elif hasattr(config, 'http_options'):
|
6388
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6303
6389
|
http_options = config.http_options
|
6304
6390
|
|
6305
6391
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6345,7 +6431,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6345
6431
|
.. code-block:: python
|
6346
6432
|
|
6347
6433
|
response = await client.aio.models.count_tokens(
|
6348
|
-
model='gemini-
|
6434
|
+
model='gemini-2.0-flash',
|
6349
6435
|
contents='What is your name?',
|
6350
6436
|
)
|
6351
6437
|
print(response)
|
@@ -6387,7 +6473,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6387
6473
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6388
6474
|
if isinstance(config, dict):
|
6389
6475
|
http_options = config.get('http_options', None)
|
6390
|
-
elif hasattr(config, 'http_options'):
|
6476
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6391
6477
|
http_options = config.http_options
|
6392
6478
|
|
6393
6479
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6419,11 +6505,10 @@ class AsyncModels(_api_module.BaseModule):
|
|
6419
6505
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
6420
6506
|
config: Optional[types.ComputeTokensConfigOrDict] = None,
|
6421
6507
|
) -> types.ComputeTokensResponse:
|
6422
|
-
"""
|
6508
|
+
"""Given a list of contents, returns a corresponding TokensInfo containing the
|
6423
6509
|
|
6424
|
-
|
6510
|
+
list of tokens and list of token ids.
|
6425
6511
|
|
6426
|
-
This method is not supported by the Gemini Developer API.
|
6427
6512
|
|
6428
6513
|
Args:
|
6429
6514
|
model (str): The model to use.
|
@@ -6434,7 +6519,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6434
6519
|
.. code-block:: python
|
6435
6520
|
|
6436
6521
|
response = await client.aio.models.compute_tokens(
|
6437
|
-
model='gemini-
|
6522
|
+
model='gemini-2.0-flash',
|
6438
6523
|
contents='What is your name?',
|
6439
6524
|
)
|
6440
6525
|
print(response)
|
@@ -6470,7 +6555,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6470
6555
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6471
6556
|
if isinstance(config, dict):
|
6472
6557
|
http_options = config.get('http_options', None)
|
6473
|
-
elif hasattr(config, 'http_options'):
|
6558
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6474
6559
|
http_options = config.http_options
|
6475
6560
|
|
6476
6561
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6503,6 +6588,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6503
6588
|
*,
|
6504
6589
|
model: str,
|
6505
6590
|
prompt: Optional[str] = None,
|
6591
|
+
image: Optional[types.ImageOrDict] = None,
|
6506
6592
|
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
6507
6593
|
) -> types.GenerateVideosOperation:
|
6508
6594
|
"""Generates videos based on a text description and configuration.
|
@@ -6530,6 +6616,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6530
6616
|
parameter_model = types._GenerateVideosParameters(
|
6531
6617
|
model=model,
|
6532
6618
|
prompt=prompt,
|
6619
|
+
image=image,
|
6533
6620
|
config=config,
|
6534
6621
|
)
|
6535
6622
|
|
@@ -6562,7 +6649,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6562
6649
|
http_options: Optional[types.HttpOptionsOrDict] = None
|
6563
6650
|
if isinstance(config, dict):
|
6564
6651
|
http_options = config.get('http_options', None)
|
6565
|
-
elif hasattr(config, 'http_options'):
|
6652
|
+
elif hasattr(config, 'http_options') and config is not None:
|
6566
6653
|
http_options = config.http_options
|
6567
6654
|
|
6568
6655
|
request_dict = _common.convert_to_dict(request_dict)
|
@@ -6610,7 +6697,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6610
6697
|
)
|
6611
6698
|
|
6612
6699
|
response = await client.aio.models.generate_content(
|
6613
|
-
model='gemini-
|
6700
|
+
model='gemini-2.0-flash',
|
6614
6701
|
contents='User input: I like bagels. Answer:',
|
6615
6702
|
config=types.GenerateContentConfig(
|
6616
6703
|
system_instruction=
|
@@ -6632,7 +6719,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6632
6719
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
6633
6720
|
)
|
6634
6721
|
automatic_function_calling_history: list[types.Content] = []
|
6635
|
-
response =
|
6722
|
+
response = types.GenerateContentResponse()
|
6636
6723
|
while remaining_remote_calls_afc > 0:
|
6637
6724
|
response = await self._generate_content(
|
6638
6725
|
model=model, contents=contents, config=config
|
@@ -6665,12 +6752,13 @@ class AsyncModels(_api_module.BaseModule):
|
|
6665
6752
|
contents = t.t_contents(self._api_client, contents)
|
6666
6753
|
if not automatic_function_calling_history:
|
6667
6754
|
automatic_function_calling_history.extend(contents)
|
6668
|
-
contents
|
6669
|
-
|
6755
|
+
if isinstance(contents, list):
|
6756
|
+
contents.append(func_call_content)
|
6757
|
+
contents.append(func_response_content)
|
6670
6758
|
automatic_function_calling_history.append(func_call_content)
|
6671
6759
|
automatic_function_calling_history.append(func_response_content)
|
6672
6760
|
|
6673
|
-
if _extra_utils.should_append_afc_history(config):
|
6761
|
+
if _extra_utils.should_append_afc_history(config) and response is not None:
|
6674
6762
|
response.automatic_function_calling_history = (
|
6675
6763
|
automatic_function_calling_history
|
6676
6764
|
)
|
@@ -6686,21 +6774,21 @@ class AsyncModels(_api_module.BaseModule):
|
|
6686
6774
|
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
6687
6775
|
|
6688
6776
|
For the `model` parameter, supported formats for Vertex AI API include:
|
6689
|
-
- The Gemini model ID, for example: 'gemini-
|
6777
|
+
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
6690
6778
|
- The full resource name starts with 'projects/', for example:
|
6691
|
-
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-
|
6779
|
+
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
|
6692
6780
|
- The partial resource name with 'publishers/', for example:
|
6693
|
-
'publishers/google/models/gemini-
|
6781
|
+
'publishers/google/models/gemini-2.0-flash' or
|
6694
6782
|
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
6695
6783
|
- `/` separated publisher and model name, for example:
|
6696
|
-
'google/gemini-
|
6784
|
+
'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
|
6697
6785
|
|
6698
6786
|
For the `model` parameter, supported formats for Gemini API include:
|
6699
|
-
- The Gemini model ID, for example: 'gemini-
|
6787
|
+
- The Gemini model ID, for example: 'gemini-2.0-flash'
|
6700
6788
|
- The model name starts with 'models/', for example:
|
6701
|
-
'models/gemini-
|
6702
|
-
-
|
6703
|
-
|
6789
|
+
'models/gemini-2.0-flash'
|
6790
|
+
- For tuned models, the model name starts with 'tunedModels/',
|
6791
|
+
for example:
|
6704
6792
|
'tunedModels/1234567890123456789'
|
6705
6793
|
|
6706
6794
|
Some models support multimodal input and output.
|
@@ -6717,7 +6805,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6717
6805
|
)
|
6718
6806
|
|
6719
6807
|
async for chunk in await client.aio.models.generate_content_stream(
|
6720
|
-
model='gemini-
|
6808
|
+
model='gemini-2.0-flash',
|
6721
6809
|
contents='''What is a good name for a flower shop that specializes in
|
6722
6810
|
selling bouquets of dried flowers?'''
|
6723
6811
|
):
|
@@ -6728,7 +6816,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
6728
6816
|
# * Timeless Petals
|
6729
6817
|
|
6730
6818
|
async for chunk in awiat client.aio.models.generate_content_stream(
|
6731
|
-
model='gemini-
|
6819
|
+
model='gemini-2.0-flash',
|
6732
6820
|
contents=[
|
6733
6821
|
types.Part.from_text('What is shown in this image?'),
|
6734
6822
|
types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
|
@@ -6827,8 +6915,9 @@ class AsyncModels(_api_module.BaseModule):
|
|
6827
6915
|
contents = t.t_contents(self._api_client, contents)
|
6828
6916
|
if not automatic_function_calling_history:
|
6829
6917
|
automatic_function_calling_history.extend(contents)
|
6830
|
-
contents
|
6831
|
-
|
6918
|
+
if isinstance(contents, list):
|
6919
|
+
contents.append(func_call_content)
|
6920
|
+
contents.append(func_response_content)
|
6832
6921
|
automatic_function_calling_history.append(func_call_content)
|
6833
6922
|
automatic_function_calling_history.append(func_response_content)
|
6834
6923
|
|