google-genai 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -175,15 +175,9 @@ def _Schema_to_mldev(
175
175
  if getv(from_object, ['pattern']) is not None:
176
176
  raise ValueError('pattern parameter is not supported in Gemini API.')
177
177
 
178
- if getv(from_object, ['minimum']) is not None:
179
- raise ValueError('minimum parameter is not supported in Gemini API.')
180
-
181
178
  if getv(from_object, ['default']) is not None:
182
179
  raise ValueError('default parameter is not supported in Gemini API.')
183
180
 
184
- if getv(from_object, ['any_of']) is not None:
185
- raise ValueError('any_of parameter is not supported in Gemini API.')
186
-
187
181
  if getv(from_object, ['max_length']) is not None:
188
182
  raise ValueError('max_length parameter is not supported in Gemini API.')
189
183
 
@@ -196,12 +190,12 @@ def _Schema_to_mldev(
196
190
  if getv(from_object, ['min_properties']) is not None:
197
191
  raise ValueError('min_properties parameter is not supported in Gemini API.')
198
192
 
199
- if getv(from_object, ['maximum']) is not None:
200
- raise ValueError('maximum parameter is not supported in Gemini API.')
201
-
202
193
  if getv(from_object, ['max_properties']) is not None:
203
194
  raise ValueError('max_properties parameter is not supported in Gemini API.')
204
195
 
196
+ if getv(from_object, ['any_of']) is not None:
197
+ setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
198
+
205
199
  if getv(from_object, ['description']) is not None:
206
200
  setv(to_object, ['description'], getv(from_object, ['description']))
207
201
 
@@ -217,9 +211,15 @@ def _Schema_to_mldev(
217
211
  if getv(from_object, ['max_items']) is not None:
218
212
  setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
219
213
 
214
+ if getv(from_object, ['maximum']) is not None:
215
+ setv(to_object, ['maximum'], getv(from_object, ['maximum']))
216
+
220
217
  if getv(from_object, ['min_items']) is not None:
221
218
  setv(to_object, ['minItems'], getv(from_object, ['min_items']))
222
219
 
220
+ if getv(from_object, ['minimum']) is not None:
221
+ setv(to_object, ['minimum'], getv(from_object, ['minimum']))
222
+
223
223
  if getv(from_object, ['nullable']) is not None:
224
224
  setv(to_object, ['nullable'], getv(from_object, ['nullable']))
225
225
 
@@ -254,15 +254,9 @@ def _Schema_to_vertex(
254
254
  if getv(from_object, ['pattern']) is not None:
255
255
  setv(to_object, ['pattern'], getv(from_object, ['pattern']))
256
256
 
257
- if getv(from_object, ['minimum']) is not None:
258
- setv(to_object, ['minimum'], getv(from_object, ['minimum']))
259
-
260
257
  if getv(from_object, ['default']) is not None:
261
258
  setv(to_object, ['default'], getv(from_object, ['default']))
262
259
 
263
- if getv(from_object, ['any_of']) is not None:
264
- setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
265
-
266
260
  if getv(from_object, ['max_length']) is not None:
267
261
  setv(to_object, ['maxLength'], getv(from_object, ['max_length']))
268
262
 
@@ -275,12 +269,12 @@ def _Schema_to_vertex(
275
269
  if getv(from_object, ['min_properties']) is not None:
276
270
  setv(to_object, ['minProperties'], getv(from_object, ['min_properties']))
277
271
 
278
- if getv(from_object, ['maximum']) is not None:
279
- setv(to_object, ['maximum'], getv(from_object, ['maximum']))
280
-
281
272
  if getv(from_object, ['max_properties']) is not None:
282
273
  setv(to_object, ['maxProperties'], getv(from_object, ['max_properties']))
283
274
 
275
+ if getv(from_object, ['any_of']) is not None:
276
+ setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
277
+
284
278
  if getv(from_object, ['description']) is not None:
285
279
  setv(to_object, ['description'], getv(from_object, ['description']))
286
280
 
@@ -296,9 +290,15 @@ def _Schema_to_vertex(
296
290
  if getv(from_object, ['max_items']) is not None:
297
291
  setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
298
292
 
293
+ if getv(from_object, ['maximum']) is not None:
294
+ setv(to_object, ['maximum'], getv(from_object, ['maximum']))
295
+
299
296
  if getv(from_object, ['min_items']) is not None:
300
297
  setv(to_object, ['minItems'], getv(from_object, ['min_items']))
301
298
 
299
+ if getv(from_object, ['minimum']) is not None:
300
+ setv(to_object, ['minimum'], getv(from_object, ['minimum']))
301
+
302
302
  if getv(from_object, ['nullable']) is not None:
303
303
  setv(to_object, ['nullable'], getv(from_object, ['nullable']))
304
304
 
@@ -1351,10 +1351,8 @@ def _GenerateImagesConfig_to_mldev(
1351
1351
  raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
1352
1352
 
1353
1353
  if getv(from_object, ['negative_prompt']) is not None:
1354
- setv(
1355
- parent_object,
1356
- ['parameters', 'negativePrompt'],
1357
- getv(from_object, ['negative_prompt']),
1354
+ raise ValueError(
1355
+ 'negative_prompt parameter is not supported in Gemini API.'
1358
1356
  )
1359
1357
 
1360
1358
  if getv(from_object, ['number_of_images']) is not None:
@@ -1910,10 +1908,8 @@ def _EditImageConfig_to_mldev(
1910
1908
  raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
1911
1909
 
1912
1910
  if getv(from_object, ['negative_prompt']) is not None:
1913
- setv(
1914
- parent_object,
1915
- ['parameters', 'negativePrompt'],
1916
- getv(from_object, ['negative_prompt']),
1911
+ raise ValueError(
1912
+ 'negative_prompt parameter is not supported in Gemini API.'
1917
1913
  )
1918
1914
 
1919
1915
  if getv(from_object, ['number_of_images']) is not None:
@@ -2003,6 +1999,9 @@ def _EditImageConfig_to_mldev(
2003
1999
  getv(from_object, ['edit_mode']),
2004
2000
  )
2005
2001
 
2002
+ if getv(from_object, ['base_steps']) is not None:
2003
+ raise ValueError('base_steps parameter is not supported in Gemini API.')
2004
+
2006
2005
  return to_object
2007
2006
 
2008
2007
 
@@ -2107,6 +2106,13 @@ def _EditImageConfig_to_vertex(
2107
2106
  getv(from_object, ['edit_mode']),
2108
2107
  )
2109
2108
 
2109
+ if getv(from_object, ['base_steps']) is not None:
2110
+ setv(
2111
+ parent_object,
2112
+ ['parameters', 'editConfig', 'baseSteps'],
2113
+ getv(from_object, ['base_steps']),
2114
+ )
2115
+
2110
2116
  return to_object
2111
2117
 
2112
2118
 
@@ -3561,6 +3567,48 @@ def _Image_from_vertex(
3561
3567
  return to_object
3562
3568
 
3563
3569
 
3570
+ def _SafetyAttributes_from_mldev(
3571
+ api_client: BaseApiClient,
3572
+ from_object: Union[dict, object],
3573
+ parent_object: Optional[dict] = None,
3574
+ ) -> dict:
3575
+ to_object: dict[str, Any] = {}
3576
+ if getv(from_object, ['safetyAttributes', 'categories']) is not None:
3577
+ setv(
3578
+ to_object,
3579
+ ['categories'],
3580
+ getv(from_object, ['safetyAttributes', 'categories']),
3581
+ )
3582
+
3583
+ if getv(from_object, ['safetyAttributes', 'scores']) is not None:
3584
+ setv(
3585
+ to_object, ['scores'], getv(from_object, ['safetyAttributes', 'scores'])
3586
+ )
3587
+
3588
+ return to_object
3589
+
3590
+
3591
+ def _SafetyAttributes_from_vertex(
3592
+ api_client: BaseApiClient,
3593
+ from_object: Union[dict, object],
3594
+ parent_object: Optional[dict] = None,
3595
+ ) -> dict:
3596
+ to_object: dict[str, Any] = {}
3597
+ if getv(from_object, ['safetyAttributes', 'categories']) is not None:
3598
+ setv(
3599
+ to_object,
3600
+ ['categories'],
3601
+ getv(from_object, ['safetyAttributes', 'categories']),
3602
+ )
3603
+
3604
+ if getv(from_object, ['safetyAttributes', 'scores']) is not None:
3605
+ setv(
3606
+ to_object, ['scores'], getv(from_object, ['safetyAttributes', 'scores'])
3607
+ )
3608
+
3609
+ return to_object
3610
+
3611
+
3564
3612
  def _GeneratedImage_from_mldev(
3565
3613
  api_client: BaseApiClient,
3566
3614
  from_object: Union[dict, object],
@@ -3581,6 +3629,15 @@ def _GeneratedImage_from_mldev(
3581
3629
  getv(from_object, ['raiFilteredReason']),
3582
3630
  )
3583
3631
 
3632
+ if getv(from_object, ['_self']) is not None:
3633
+ setv(
3634
+ to_object,
3635
+ ['safety_attributes'],
3636
+ _SafetyAttributes_from_mldev(
3637
+ api_client, getv(from_object, ['_self']), to_object
3638
+ ),
3639
+ )
3640
+
3584
3641
  return to_object
3585
3642
 
3586
3643
 
@@ -3604,6 +3661,15 @@ def _GeneratedImage_from_vertex(
3604
3661
  getv(from_object, ['raiFilteredReason']),
3605
3662
  )
3606
3663
 
3664
+ if getv(from_object, ['_self']) is not None:
3665
+ setv(
3666
+ to_object,
3667
+ ['safety_attributes'],
3668
+ _SafetyAttributes_from_vertex(
3669
+ api_client, getv(from_object, ['_self']), to_object
3670
+ ),
3671
+ )
3672
+
3607
3673
  if getv(from_object, ['prompt']) is not None:
3608
3674
  setv(to_object, ['enhanced_prompt'], getv(from_object, ['prompt']))
3609
3675
 
@@ -4539,7 +4605,7 @@ class Models(_api_module.BaseModule):
4539
4605
  self._api_client._verify_response(return_value)
4540
4606
  return return_value
4541
4607
 
4542
- def edit_image(
4608
+ def _edit_image(
4543
4609
  self,
4544
4610
  *,
4545
4611
  model: str,
@@ -4980,7 +5046,7 @@ class Models(_api_module.BaseModule):
4980
5046
  .. code-block:: python
4981
5047
 
4982
5048
  response = client.models.count_tokens(
4983
- model='gemini-1.5-flash',
5049
+ model='gemini-2.0-flash',
4984
5050
  contents='What is your name?',
4985
5051
  )
4986
5052
  print(response)
@@ -5054,9 +5120,9 @@ class Models(_api_module.BaseModule):
5054
5120
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
5055
5121
  config: Optional[types.ComputeTokensConfigOrDict] = None,
5056
5122
  ) -> types.ComputeTokensResponse:
5057
- """Return a list of tokens based on the input contents.
5123
+ """Given a list of contents, returns a corresponding TokensInfo containing the
5058
5124
 
5059
- Only text is supported.
5125
+ list of tokens and list of token ids.
5060
5126
 
5061
5127
  This method is not supported by the Gemini Developer API.
5062
5128
 
@@ -5069,7 +5135,7 @@ class Models(_api_module.BaseModule):
5069
5135
  .. code-block:: python
5070
5136
 
5071
5137
  response = client.models.compute_tokens(
5072
- model='gemini-1.5-flash',
5138
+ model='gemini-2.0-flash',
5073
5139
  contents='What is your name?',
5074
5140
  )
5075
5141
  print(response)
@@ -5234,21 +5300,21 @@ class Models(_api_module.BaseModule):
5234
5300
  """Makes an API request to generate content using a model.
5235
5301
 
5236
5302
  For the `model` parameter, supported formats for Vertex AI API include:
5237
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5303
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5238
5304
  - The full resource name starts with 'projects/', for example:
5239
- 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5305
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
5240
5306
  - The partial resource name with 'publishers/', for example:
5241
- 'publishers/google/models/gemini-1.5-flash-002' or
5307
+ 'publishers/google/models/gemini-2.0-flash' or
5242
5308
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
5243
5309
  - `/` separated publisher and model name, for example:
5244
- 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5310
+ 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
5245
5311
 
5246
5312
  For the `model` parameter, supported formats for Gemini API include:
5247
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5313
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5248
5314
  - The model name starts with 'models/', for example:
5249
- 'models/gemini-1.5-flash-002'
5250
- - if you would like to use a tuned model, the model name starts with
5251
- 'tunedModels/', for example:
5315
+ 'models/gemini-2.0-flash'
5316
+ - For tuned models, the model name starts with 'tunedModels/',
5317
+ for example:
5252
5318
  'tunedModels/1234567890123456789'
5253
5319
 
5254
5320
  Some models support multimodal input and output.
@@ -5265,7 +5331,7 @@ class Models(_api_module.BaseModule):
5265
5331
  )
5266
5332
 
5267
5333
  response = client.models.generate_content(
5268
- model='gemini-1.5-flash-002',
5334
+ model='gemini-2.0-flash',
5269
5335
  contents='''What is a good name for a flower shop that specializes in
5270
5336
  selling bouquets of dried flowers?'''
5271
5337
  )
@@ -5276,7 +5342,7 @@ class Models(_api_module.BaseModule):
5276
5342
  # * Timeless Petals
5277
5343
 
5278
5344
  response = client.models.generate_content(
5279
- model='gemini-1.5-flash-002',
5345
+ model='gemini-2.0-flash',
5280
5346
  contents=[
5281
5347
  types.Part.from_text('What is shown in this image?'),
5282
5348
  types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
@@ -5354,21 +5420,21 @@ class Models(_api_module.BaseModule):
5354
5420
  """Makes an API request to generate content using a model and yields the model's response in chunks.
5355
5421
 
5356
5422
  For the `model` parameter, supported formats for Vertex AI API include:
5357
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5423
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5358
5424
  - The full resource name starts with 'projects/', for example:
5359
- 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5425
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
5360
5426
  - The partial resource name with 'publishers/', for example:
5361
- 'publishers/google/models/gemini-1.5-flash-002' or
5427
+ 'publishers/google/models/gemini-2.0-flash' or
5362
5428
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
5363
5429
  - `/` separated publisher and model name, for example:
5364
- 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5430
+ 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
5365
5431
 
5366
5432
  For the `model` parameter, supported formats for Gemini API include:
5367
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5433
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5368
5434
  - The model name starts with 'models/', for example:
5369
- 'models/gemini-1.5-flash-002'
5370
- - If you would like to use a tuned model, the model name starts with
5371
- 'tunedModels/', for example:
5435
+ 'models/gemini-2.0-flash'
5436
+ - For tuned models, the model name starts with 'tunedModels/',
5437
+ for example:
5372
5438
  'tunedModels/1234567890123456789'
5373
5439
 
5374
5440
  Some models support multimodal input and output.
@@ -5385,7 +5451,7 @@ class Models(_api_module.BaseModule):
5385
5451
  )
5386
5452
 
5387
5453
  for chunk in client.models.generate_content_stream(
5388
- model='gemini-1.5-flash-002',
5454
+ model='gemini-2.0-flash',
5389
5455
  contents='''What is a good name for a flower shop that specializes in
5390
5456
  selling bouquets of dried flowers?'''
5391
5457
  ):
@@ -5396,7 +5462,7 @@ class Models(_api_module.BaseModule):
5396
5462
  # * Timeless Petals
5397
5463
 
5398
5464
  for chunk in client.models.generate_content_stream(
5399
- model='gemini-1.5-flash-002',
5465
+ model='gemini-2.0-flash',
5400
5466
  contents=[
5401
5467
  types.Part.from_text('What is shown in this image?'),
5402
5468
  types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
@@ -5492,6 +5558,62 @@ class Models(_api_module.BaseModule):
5492
5558
  automatic_function_calling_history.append(func_call_content)
5493
5559
  automatic_function_calling_history.append(func_response_content)
5494
5560
 
5561
+ def edit_image(
5562
+ self,
5563
+ *,
5564
+ model: str,
5565
+ prompt: str,
5566
+ reference_images: list[types._ReferenceImageAPIOrDict],
5567
+ config: Optional[types.EditImageConfigOrDict] = None,
5568
+ ) -> types.EditImageResponse:
5569
+ """Edits an image based on a text description and configuration.
5570
+
5571
+ Args:
5572
+ model (str): The model to use.
5573
+ prompt (str): A text description of the edit to apply to the image.
5574
+ reference_images (list[Union[RawReferenceImage, MaskReferenceImage,
5575
+ ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The
5576
+ reference images for editing.
5577
+ config (EditImageConfig): Configuration for editing.
5578
+
5579
+ Usage:
5580
+
5581
+ .. code-block:: python
5582
+
5583
+ from google.genai.types import RawReferenceImage, MaskReferenceImage
5584
+
5585
+ raw_ref_image = RawReferenceImage(
5586
+ reference_id=1,
5587
+ reference_image=types.Image.from_file(IMAGE_FILE_PATH),
5588
+ )
5589
+
5590
+ mask_ref_image = MaskReferenceImage(
5591
+ reference_id=2,
5592
+ config=types.MaskReferenceConfig(
5593
+ mask_mode='MASK_MODE_FOREGROUND',
5594
+ mask_dilation=0.06,
5595
+ ),
5596
+ )
5597
+ response = client.models.edit_image(
5598
+ model='imagen-3.0-capability-001',
5599
+ prompt='man with dog',
5600
+ reference_images=[raw_ref_image, mask_ref_image],
5601
+ config=types.EditImageConfig(
5602
+ edit_mode= "EDIT_MODE_INPAINT_INSERTION",
5603
+ number_of_images= 1,
5604
+ include_rai_reason= True,
5605
+ )
5606
+ )
5607
+ response.generated_images[0].image.show()
5608
+ # Shows a man with a dog instead of a cat.
5609
+ """
5610
+ return self._edit_image(
5611
+ model=model,
5612
+ prompt=prompt,
5613
+ reference_images=reference_images,
5614
+ config=config,
5615
+ )
5616
+
5495
5617
  def upscale_image(
5496
5618
  self,
5497
5619
  *,
@@ -5924,7 +6046,7 @@ class AsyncModels(_api_module.BaseModule):
5924
6046
  self._api_client._verify_response(return_value)
5925
6047
  return return_value
5926
6048
 
5927
- async def edit_image(
6049
+ async def _edit_image(
5928
6050
  self,
5929
6051
  *,
5930
6052
  model: str,
@@ -6365,7 +6487,7 @@ class AsyncModels(_api_module.BaseModule):
6365
6487
  .. code-block:: python
6366
6488
 
6367
6489
  response = await client.aio.models.count_tokens(
6368
- model='gemini-1.5-flash',
6490
+ model='gemini-2.0-flash',
6369
6491
  contents='What is your name?',
6370
6492
  )
6371
6493
  print(response)
@@ -6439,11 +6561,10 @@ class AsyncModels(_api_module.BaseModule):
6439
6561
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
6440
6562
  config: Optional[types.ComputeTokensConfigOrDict] = None,
6441
6563
  ) -> types.ComputeTokensResponse:
6442
- """Return a list of tokens based on the input contents.
6564
+ """Given a list of contents, returns a corresponding TokensInfo containing the
6443
6565
 
6444
- Only text is supported.
6566
+ list of tokens and list of token ids.
6445
6567
 
6446
- This method is not supported by the Gemini Developer API.
6447
6568
 
6448
6569
  Args:
6449
6570
  model (str): The model to use.
@@ -6454,7 +6575,7 @@ class AsyncModels(_api_module.BaseModule):
6454
6575
  .. code-block:: python
6455
6576
 
6456
6577
  response = await client.aio.models.compute_tokens(
6457
- model='gemini-1.5-flash',
6578
+ model='gemini-2.0-flash',
6458
6579
  contents='What is your name?',
6459
6580
  )
6460
6581
  print(response)
@@ -6632,7 +6753,7 @@ class AsyncModels(_api_module.BaseModule):
6632
6753
  )
6633
6754
 
6634
6755
  response = await client.aio.models.generate_content(
6635
- model='gemini-1.5-flash-002',
6756
+ model='gemini-2.0-flash',
6636
6757
  contents='User input: I like bagels. Answer:',
6637
6758
  config=types.GenerateContentConfig(
6638
6759
  system_instruction=
@@ -6709,21 +6830,21 @@ class AsyncModels(_api_module.BaseModule):
6709
6830
  """Makes an API request to generate content using a model and yields the model's response in chunks.
6710
6831
 
6711
6832
  For the `model` parameter, supported formats for Vertex AI API include:
6712
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
6833
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
6713
6834
  - The full resource name starts with 'projects/', for example:
6714
- 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
6835
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
6715
6836
  - The partial resource name with 'publishers/', for example:
6716
- 'publishers/google/models/gemini-1.5-flash-002' or
6837
+ 'publishers/google/models/gemini-2.0-flash' or
6717
6838
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
6718
6839
  - `/` separated publisher and model name, for example:
6719
- 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
6840
+ 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
6720
6841
 
6721
6842
  For the `model` parameter, supported formats for Gemini API include:
6722
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
6843
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
6723
6844
  - The model name starts with 'models/', for example:
6724
- 'models/gemini-1.5-flash-002'
6725
- - If you would like to use a tuned model, the model name starts with
6726
- 'tunedModels/', for example:
6845
+ 'models/gemini-2.0-flash'
6846
+ - For tuned models, the model name starts with 'tunedModels/',
6847
+ for example:
6727
6848
  'tunedModels/1234567890123456789'
6728
6849
 
6729
6850
  Some models support multimodal input and output.
@@ -6740,7 +6861,7 @@ class AsyncModels(_api_module.BaseModule):
6740
6861
  )
6741
6862
 
6742
6863
  async for chunk in await client.aio.models.generate_content_stream(
6743
- model='gemini-1.5-flash-002',
6864
+ model='gemini-2.0-flash',
6744
6865
  contents='''What is a good name for a flower shop that specializes in
6745
6866
  selling bouquets of dried flowers?'''
6746
6867
  ):
@@ -6751,7 +6872,7 @@ class AsyncModels(_api_module.BaseModule):
6751
6872
  # * Timeless Petals
6752
6873
 
6753
6874
  async for chunk in awiat client.aio.models.generate_content_stream(
6754
- model='gemini-1.5-flash-002',
6875
+ model='gemini-2.0-flash',
6755
6876
  contents=[
6756
6877
  types.Part.from_text('What is shown in this image?'),
6757
6878
  types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
@@ -6858,6 +6979,62 @@ class AsyncModels(_api_module.BaseModule):
6858
6979
 
6859
6980
  return async_generator(model, contents, config)
6860
6981
 
6982
+ async def edit_image(
6983
+ self,
6984
+ *,
6985
+ model: str,
6986
+ prompt: str,
6987
+ reference_images: list[types._ReferenceImageAPIOrDict],
6988
+ config: Optional[types.EditImageConfigOrDict] = None,
6989
+ ) -> types.EditImageResponse:
6990
+ """Edits an image based on a text description and configuration.
6991
+
6992
+ Args:
6993
+ model (str): The model to use.
6994
+ prompt (str): A text description of the edit to apply to the image.
6995
+ reference_images (list[Union[RawReferenceImage, MaskReferenceImage,
6996
+ ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The
6997
+ reference images for editing.
6998
+ config (EditImageConfig): Configuration for editing.
6999
+
7000
+ Usage:
7001
+
7002
+ .. code-block:: python
7003
+
7004
+ from google.genai.types import RawReferenceImage, MaskReferenceImage
7005
+
7006
+ raw_ref_image = RawReferenceImage(
7007
+ reference_id=1,
7008
+ reference_image=types.Image.from_file(IMAGE_FILE_PATH),
7009
+ )
7010
+
7011
+ mask_ref_image = MaskReferenceImage(
7012
+ reference_id=2,
7013
+ config=types.MaskReferenceConfig(
7014
+ mask_mode='MASK_MODE_FOREGROUND',
7015
+ mask_dilation=0.06,
7016
+ ),
7017
+ )
7018
+ response = await client.aio.models.edit_image(
7019
+ model='imagen-3.0-capability-001',
7020
+ prompt='man with dog',
7021
+ reference_images=[raw_ref_image, mask_ref_image],
7022
+ config=types.EditImageConfig(
7023
+ edit_mode= "EDIT_MODE_INPAINT_INSERTION",
7024
+ number_of_images= 1,
7025
+ include_rai_reason= True,
7026
+ )
7027
+ )
7028
+ response.generated_images[0].image.show()
7029
+ # Shows a man with a dog instead of a cat.
7030
+ """
7031
+ return await self._edit_image(
7032
+ model=model,
7033
+ prompt=prompt,
7034
+ reference_images=reference_images,
7035
+ config=config,
7036
+ )
7037
+
6861
7038
  async def list(
6862
7039
  self,
6863
7040
  *,
@@ -465,10 +465,24 @@ class Operations(_api_module.BaseModule):
465
465
  # TODO(b/398233524): Cast operation types
466
466
  if self._api_client.vertexai:
467
467
  resource_name = operation_name.rpartition('/operations/')[0]
468
+ http_options = types.HttpOptions()
469
+ if isinstance(config, dict):
470
+ dict_options = config.get('http_options', None)
471
+ if dict_options is not None:
472
+ http_options = types.HttpOptions(**dict(dict_options))
473
+ elif isinstance(config, types.GetOperationConfig) and config is not None:
474
+ http_options = (
475
+ config.http_options
476
+ if config.http_options is not None
477
+ else types.HttpOptions()
478
+ )
479
+ fetch_operation_config = types.FetchPredictOperationConfig(
480
+ http_options=http_options
481
+ )
468
482
  return self._fetch_predict_operation(
469
483
  operation_name=operation_name,
470
484
  resource_name=resource_name,
471
- config=config,
485
+ config=fetch_operation_config,
472
486
  )
473
487
  else:
474
488
  return self._get_operation(
@@ -623,10 +637,24 @@ class AsyncOperations(_api_module.BaseModule):
623
637
 
624
638
  if self._api_client.vertexai:
625
639
  resource_name = operation_name.rpartition('/operations/')[0]
640
+ http_options = types.HttpOptions()
641
+ if isinstance(config, dict):
642
+ dict_options = config.get('http_options', None)
643
+ if dict_options is not None:
644
+ http_options = types.HttpOptions(**dict(dict_options))
645
+ elif isinstance(config, types.GetOperationConfig) and config is not None:
646
+ http_options = (
647
+ config.http_options
648
+ if config.http_options is not None
649
+ else types.HttpOptions()
650
+ )
651
+ fetch_operation_config = types.FetchPredictOperationConfig(
652
+ http_options=http_options
653
+ )
626
654
  return await self._fetch_predict_operation(
627
655
  operation_name=operation_name,
628
656
  resource_name=resource_name,
629
- config=config,
657
+ config=fetch_operation_config,
630
658
  )
631
659
  else:
632
660
  return await self._get_operation(
google/genai/pagers.py CHANGED
@@ -65,9 +65,9 @@ class _BasePager(Generic[T]):
65
65
 
66
66
  @property
67
67
  def page(self) -> list[T]:
68
- """Returns the current page, which is a list of items.
68
+ """Returns a subset of the entire list of items.
69
69
 
70
- The returned list of items is a subset of the entire list.
70
+ For the number of items returned, see `pageSize()`.
71
71
 
72
72
  Usage:
73
73
 
@@ -97,9 +97,7 @@ class _BasePager(Generic[T]):
97
97
 
98
98
  @property
99
99
  def page_size(self) -> int:
100
- """Returns the length of the page fetched each time by this pager.
101
-
102
- The number of items in the page is less than or equal to the page length.
100
+ """Returns the maximum number of items fetched by the pager at one time.
103
101
 
104
102
  Usage:
105
103