google-genai 1.32.0__py3-none-any.whl → 1.34.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/caches.py CHANGED
@@ -84,6 +84,23 @@ def _FileData_to_mldev(
84
84
  return to_object
85
85
 
86
86
 
87
+ def _FunctionCall_to_mldev(
88
+ from_object: Union[dict[str, Any], object],
89
+ parent_object: Optional[dict[str, Any]] = None,
90
+ ) -> dict[str, Any]:
91
+ to_object: dict[str, Any] = {}
92
+ if getv(from_object, ['id']) is not None:
93
+ setv(to_object, ['id'], getv(from_object, ['id']))
94
+
95
+ if getv(from_object, ['args']) is not None:
96
+ setv(to_object, ['args'], getv(from_object, ['args']))
97
+
98
+ if getv(from_object, ['name']) is not None:
99
+ setv(to_object, ['name'], getv(from_object, ['name']))
100
+
101
+ return to_object
102
+
103
+
87
104
  def _Part_to_mldev(
88
105
  from_object: Union[dict[str, Any], object],
89
106
  parent_object: Optional[dict[str, Any]] = None,
@@ -122,6 +139,13 @@ def _Part_to_mldev(
122
139
  getv(from_object, ['thought_signature']),
123
140
  )
124
141
 
142
+ if getv(from_object, ['function_call']) is not None:
143
+ setv(
144
+ to_object,
145
+ ['functionCall'],
146
+ _FunctionCall_to_mldev(getv(from_object, ['function_call']), to_object),
147
+ )
148
+
125
149
  if getv(from_object, ['code_execution_result']) is not None:
126
150
  setv(
127
151
  to_object,
@@ -132,9 +156,6 @@ def _Part_to_mldev(
132
156
  if getv(from_object, ['executable_code']) is not None:
133
157
  setv(to_object, ['executableCode'], getv(from_object, ['executable_code']))
134
158
 
135
- if getv(from_object, ['function_call']) is not None:
136
- setv(to_object, ['functionCall'], getv(from_object, ['function_call']))
137
-
138
159
  if getv(from_object, ['function_response']) is not None:
139
160
  setv(
140
161
  to_object,
@@ -684,6 +705,23 @@ def _FileData_to_vertex(
684
705
  return to_object
685
706
 
686
707
 
708
+ def _FunctionCall_to_vertex(
709
+ from_object: Union[dict[str, Any], object],
710
+ parent_object: Optional[dict[str, Any]] = None,
711
+ ) -> dict[str, Any]:
712
+ to_object: dict[str, Any] = {}
713
+ if getv(from_object, ['id']) is not None:
714
+ raise ValueError('id parameter is not supported in Vertex AI.')
715
+
716
+ if getv(from_object, ['args']) is not None:
717
+ setv(to_object, ['args'], getv(from_object, ['args']))
718
+
719
+ if getv(from_object, ['name']) is not None:
720
+ setv(to_object, ['name'], getv(from_object, ['name']))
721
+
722
+ return to_object
723
+
724
+
687
725
  def _Part_to_vertex(
688
726
  from_object: Union[dict[str, Any], object],
689
727
  parent_object: Optional[dict[str, Any]] = None,
@@ -722,6 +760,15 @@ def _Part_to_vertex(
722
760
  getv(from_object, ['thought_signature']),
723
761
  )
724
762
 
763
+ if getv(from_object, ['function_call']) is not None:
764
+ setv(
765
+ to_object,
766
+ ['functionCall'],
767
+ _FunctionCall_to_vertex(
768
+ getv(from_object, ['function_call']), to_object
769
+ ),
770
+ )
771
+
725
772
  if getv(from_object, ['code_execution_result']) is not None:
726
773
  setv(
727
774
  to_object,
@@ -732,9 +779,6 @@ def _Part_to_vertex(
732
779
  if getv(from_object, ['executable_code']) is not None:
733
780
  setv(to_object, ['executableCode'], getv(from_object, ['executable_code']))
734
781
 
735
- if getv(from_object, ['function_call']) is not None:
736
- setv(to_object, ['functionCall'], getv(from_object, ['function_call']))
737
-
738
782
  if getv(from_object, ['function_response']) is not None:
739
783
  setv(
740
784
  to_object,
google/genai/files.py CHANGED
@@ -434,6 +434,7 @@ class Files(_api_module.BaseModule):
434
434
  config, 'should_return_http_response', None
435
435
  ):
436
436
  return_value = types.CreateFileResponse(sdk_http_response=response)
437
+ self._api_client._verify_response(return_value)
437
438
  return return_value
438
439
 
439
440
  response_dict = '' if not response.body else json.loads(response.body)
@@ -916,6 +917,7 @@ class AsyncFiles(_api_module.BaseModule):
916
917
  config, 'should_return_http_response', None
917
918
  ):
918
919
  return_value = types.CreateFileResponse(sdk_http_response=response)
920
+ self._api_client._verify_response(return_value)
919
921
  return return_value
920
922
 
921
923
  response_dict = '' if not response.body else json.loads(response.body)
@@ -303,9 +303,20 @@ class LocalTokenizer:
303
303
 
304
304
  Args:
305
305
  contents: The contents to tokenize.
306
+ config: The configuration for counting tokens.
306
307
 
307
308
  Returns:
308
309
  A `CountTokensResult` containing the total number of tokens.
310
+
311
+ Usage:
312
+
313
+ .. code-block:: python
314
+
315
+ from google import genai
316
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
317
+ result = tokenizer.count_tokens("What is your name?")
318
+ print(result)
319
+ # total_tokens=5
309
320
  """
310
321
  processed_contents = t.t_contents(contents)
311
322
  text_accumulator = _TextsAccumulator()
@@ -330,7 +341,24 @@ class LocalTokenizer:
330
341
  self,
331
342
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
332
343
  ) -> types.ComputeTokensResult:
333
- """Computes the tokens ids and string pieces in the input."""
344
+ """Computes the tokens ids and string pieces in the input.
345
+
346
+ Args:
347
+ contents: The contents to tokenize.
348
+
349
+ Returns:
350
+ A `ComputeTokensResult` containing the token information.
351
+
352
+ Usage:
353
+
354
+ .. code-block:: python
355
+
356
+ from google import genai
357
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
358
+ result = tokenizer.compute_tokens("What is your name?")
359
+ print(result)
360
+ # tokens_info=[TokensInfo(token_ids=[279, 329, 1313, 2508, 13], tokens=[b' What', b' is', b' your', b' name', b'?'], role='user')]
361
+ """
334
362
  processed_contents = t.t_contents(contents)
335
363
  text_accumulator = _TextsAccumulator()
336
364
  for content in processed_contents:
google/genai/models.py CHANGED
@@ -88,6 +88,23 @@ def _FileData_to_mldev(
88
88
  return to_object
89
89
 
90
90
 
91
+ def _FunctionCall_to_mldev(
92
+ from_object: Union[dict[str, Any], object],
93
+ parent_object: Optional[dict[str, Any]] = None,
94
+ ) -> dict[str, Any]:
95
+ to_object: dict[str, Any] = {}
96
+ if getv(from_object, ['id']) is not None:
97
+ setv(to_object, ['id'], getv(from_object, ['id']))
98
+
99
+ if getv(from_object, ['args']) is not None:
100
+ setv(to_object, ['args'], getv(from_object, ['args']))
101
+
102
+ if getv(from_object, ['name']) is not None:
103
+ setv(to_object, ['name'], getv(from_object, ['name']))
104
+
105
+ return to_object
106
+
107
+
91
108
  def _Part_to_mldev(
92
109
  from_object: Union[dict[str, Any], object],
93
110
  parent_object: Optional[dict[str, Any]] = None,
@@ -126,6 +143,13 @@ def _Part_to_mldev(
126
143
  getv(from_object, ['thought_signature']),
127
144
  )
128
145
 
146
+ if getv(from_object, ['function_call']) is not None:
147
+ setv(
148
+ to_object,
149
+ ['functionCall'],
150
+ _FunctionCall_to_mldev(getv(from_object, ['function_call']), to_object),
151
+ )
152
+
129
153
  if getv(from_object, ['code_execution_result']) is not None:
130
154
  setv(
131
155
  to_object,
@@ -136,9 +160,6 @@ def _Part_to_mldev(
136
160
  if getv(from_object, ['executable_code']) is not None:
137
161
  setv(to_object, ['executableCode'], getv(from_object, ['executable_code']))
138
162
 
139
- if getv(from_object, ['function_call']) is not None:
140
- setv(to_object, ['functionCall'], getv(from_object, ['function_call']))
141
-
142
163
  if getv(from_object, ['function_response']) is not None:
143
164
  setv(
144
165
  to_object,
@@ -1330,7 +1351,11 @@ def _GenerateVideosConfig_to_mldev(
1330
1351
  )
1331
1352
 
1332
1353
  if getv(from_object, ['resolution']) is not None:
1333
- raise ValueError('resolution parameter is not supported in Gemini API.')
1354
+ setv(
1355
+ parent_object,
1356
+ ['parameters', 'resolution'],
1357
+ getv(from_object, ['resolution']),
1358
+ )
1334
1359
 
1335
1360
  if getv(from_object, ['person_generation']) is not None:
1336
1361
  setv(
@@ -1473,6 +1498,23 @@ def _FileData_to_vertex(
1473
1498
  return to_object
1474
1499
 
1475
1500
 
1501
+ def _FunctionCall_to_vertex(
1502
+ from_object: Union[dict[str, Any], object],
1503
+ parent_object: Optional[dict[str, Any]] = None,
1504
+ ) -> dict[str, Any]:
1505
+ to_object: dict[str, Any] = {}
1506
+ if getv(from_object, ['id']) is not None:
1507
+ raise ValueError('id parameter is not supported in Vertex AI.')
1508
+
1509
+ if getv(from_object, ['args']) is not None:
1510
+ setv(to_object, ['args'], getv(from_object, ['args']))
1511
+
1512
+ if getv(from_object, ['name']) is not None:
1513
+ setv(to_object, ['name'], getv(from_object, ['name']))
1514
+
1515
+ return to_object
1516
+
1517
+
1476
1518
  def _Part_to_vertex(
1477
1519
  from_object: Union[dict[str, Any], object],
1478
1520
  parent_object: Optional[dict[str, Any]] = None,
@@ -1511,6 +1553,15 @@ def _Part_to_vertex(
1511
1553
  getv(from_object, ['thought_signature']),
1512
1554
  )
1513
1555
 
1556
+ if getv(from_object, ['function_call']) is not None:
1557
+ setv(
1558
+ to_object,
1559
+ ['functionCall'],
1560
+ _FunctionCall_to_vertex(
1561
+ getv(from_object, ['function_call']), to_object
1562
+ ),
1563
+ )
1564
+
1514
1565
  if getv(from_object, ['code_execution_result']) is not None:
1515
1566
  setv(
1516
1567
  to_object,
@@ -1521,9 +1572,6 @@ def _Part_to_vertex(
1521
1572
  if getv(from_object, ['executable_code']) is not None:
1522
1573
  setv(to_object, ['executableCode'], getv(from_object, ['executable_code']))
1523
1574
 
1524
- if getv(from_object, ['function_call']) is not None:
1525
- setv(to_object, ['functionCall'], getv(from_object, ['function_call']))
1526
-
1527
1575
  if getv(from_object, ['function_response']) is not None:
1528
1576
  setv(
1529
1577
  to_object,
@@ -3710,6 +3758,23 @@ def _FileData_from_mldev(
3710
3758
  return to_object
3711
3759
 
3712
3760
 
3761
+ def _FunctionCall_from_mldev(
3762
+ from_object: Union[dict[str, Any], object],
3763
+ parent_object: Optional[dict[str, Any]] = None,
3764
+ ) -> dict[str, Any]:
3765
+ to_object: dict[str, Any] = {}
3766
+ if getv(from_object, ['id']) is not None:
3767
+ setv(to_object, ['id'], getv(from_object, ['id']))
3768
+
3769
+ if getv(from_object, ['args']) is not None:
3770
+ setv(to_object, ['args'], getv(from_object, ['args']))
3771
+
3772
+ if getv(from_object, ['name']) is not None:
3773
+ setv(to_object, ['name'], getv(from_object, ['name']))
3774
+
3775
+ return to_object
3776
+
3777
+
3713
3778
  def _Part_from_mldev(
3714
3779
  from_object: Union[dict[str, Any], object],
3715
3780
  parent_object: Optional[dict[str, Any]] = None,
@@ -3748,6 +3813,15 @@ def _Part_from_mldev(
3748
3813
  getv(from_object, ['thoughtSignature']),
3749
3814
  )
3750
3815
 
3816
+ if getv(from_object, ['functionCall']) is not None:
3817
+ setv(
3818
+ to_object,
3819
+ ['function_call'],
3820
+ _FunctionCall_from_mldev(
3821
+ getv(from_object, ['functionCall']), to_object
3822
+ ),
3823
+ )
3824
+
3751
3825
  if getv(from_object, ['codeExecutionResult']) is not None:
3752
3826
  setv(
3753
3827
  to_object,
@@ -3758,9 +3832,6 @@ def _Part_from_mldev(
3758
3832
  if getv(from_object, ['executableCode']) is not None:
3759
3833
  setv(to_object, ['executable_code'], getv(from_object, ['executableCode']))
3760
3834
 
3761
- if getv(from_object, ['functionCall']) is not None:
3762
- setv(to_object, ['function_call'], getv(from_object, ['functionCall']))
3763
-
3764
3835
  if getv(from_object, ['functionResponse']) is not None:
3765
3836
  setv(
3766
3837
  to_object,
@@ -4372,6 +4443,21 @@ def _FileData_from_vertex(
4372
4443
  return to_object
4373
4444
 
4374
4445
 
4446
+ def _FunctionCall_from_vertex(
4447
+ from_object: Union[dict[str, Any], object],
4448
+ parent_object: Optional[dict[str, Any]] = None,
4449
+ ) -> dict[str, Any]:
4450
+ to_object: dict[str, Any] = {}
4451
+
4452
+ if getv(from_object, ['args']) is not None:
4453
+ setv(to_object, ['args'], getv(from_object, ['args']))
4454
+
4455
+ if getv(from_object, ['name']) is not None:
4456
+ setv(to_object, ['name'], getv(from_object, ['name']))
4457
+
4458
+ return to_object
4459
+
4460
+
4375
4461
  def _Part_from_vertex(
4376
4462
  from_object: Union[dict[str, Any], object],
4377
4463
  parent_object: Optional[dict[str, Any]] = None,
@@ -4410,6 +4496,15 @@ def _Part_from_vertex(
4410
4496
  getv(from_object, ['thoughtSignature']),
4411
4497
  )
4412
4498
 
4499
+ if getv(from_object, ['functionCall']) is not None:
4500
+ setv(
4501
+ to_object,
4502
+ ['function_call'],
4503
+ _FunctionCall_from_vertex(
4504
+ getv(from_object, ['functionCall']), to_object
4505
+ ),
4506
+ )
4507
+
4413
4508
  if getv(from_object, ['codeExecutionResult']) is not None:
4414
4509
  setv(
4415
4510
  to_object,
@@ -4420,9 +4515,6 @@ def _Part_from_vertex(
4420
4515
  if getv(from_object, ['executableCode']) is not None:
4421
4516
  setv(to_object, ['executable_code'], getv(from_object, ['executableCode']))
4422
4517
 
4423
- if getv(from_object, ['functionCall']) is not None:
4424
- setv(to_object, ['function_call'], getv(from_object, ['functionCall']))
4425
-
4426
4518
  if getv(from_object, ['functionResponse']) is not None:
4427
4519
  setv(
4428
4520
  to_object,
@@ -5256,6 +5348,13 @@ class Models(_api_module.BaseModule):
5256
5348
  'post', path, request_dict, http_options
5257
5349
  )
5258
5350
 
5351
+ if config is not None and getattr(
5352
+ config, 'should_return_http_response', None
5353
+ ):
5354
+ return_value = types.GenerateContentResponse(sdk_http_response=response)
5355
+ self._api_client._verify_response(return_value)
5356
+ return return_value
5357
+
5259
5358
  response_dict = '' if not response.body else json.loads(response.body)
5260
5359
 
5261
5360
  if self._api_client.vertexai:
@@ -5455,13 +5554,7 @@ class Models(_api_module.BaseModule):
5455
5554
  prompt: str,
5456
5555
  config: Optional[types.GenerateImagesConfigOrDict] = None,
5457
5556
  ) -> types.GenerateImagesResponse:
5458
- """Generates images based on a text description and configuration.
5459
-
5460
- Args:
5461
- model (str): The model to use.
5462
- prompt (str): A text description of the images to generate.
5463
- config (GenerateImagesConfig): Configuration for generation.
5464
- """
5557
+ """Private method for generating images."""
5465
5558
 
5466
5559
  parameter_model = types._GenerateImagesParameters(
5467
5560
  model=model,
@@ -5534,47 +5627,7 @@ class Models(_api_module.BaseModule):
5534
5627
  reference_images: list[types._ReferenceImageAPIOrDict],
5535
5628
  config: Optional[types.EditImageConfigOrDict] = None,
5536
5629
  ) -> types.EditImageResponse:
5537
- """Edits an image based on a text description and configuration.
5538
-
5539
- Args:
5540
- model (str): The model to use.
5541
- prompt (str): A text description of the edit to apply to the image.
5542
- reference_images (list[Union[RawReferenceImage, MaskReferenceImage,
5543
- ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The
5544
- reference images for editing.
5545
- config (EditImageConfig): Configuration for editing.
5546
-
5547
- Usage:
5548
-
5549
- .. code-block:: python
5550
-
5551
- from google.genai.types import RawReferenceImage, MaskReferenceImage
5552
-
5553
- raw_ref_image = RawReferenceImage(
5554
- reference_id=1,
5555
- reference_image=types.Image.from_file(IMAGE_FILE_PATH),
5556
- )
5557
-
5558
- mask_ref_image = MaskReferenceImage(
5559
- reference_id=2,
5560
- config=types.MaskReferenceConfig(
5561
- mask_mode='MASK_MODE_FOREGROUND',
5562
- mask_dilation=0.06,
5563
- ),
5564
- )
5565
- response = client.models.edit_image(
5566
- model='imagen-3.0-capability-001',
5567
- prompt='man with dog',
5568
- reference_images=[raw_ref_image, mask_ref_image],
5569
- config=types.EditImageConfig(
5570
- edit_mode= "EDIT_MODE_INPAINT_INSERTION",
5571
- number_of_images= 1,
5572
- include_rai_reason= True,
5573
- )
5574
- )
5575
- response.generated_images[0].image.show()
5576
- # Shows a man with a dog instead of a cat.
5577
- """
5630
+ """Private method for editing an image."""
5578
5631
 
5579
5632
  parameter_model = types._EditImageParameters(
5580
5633
  model=model,
@@ -5638,14 +5691,7 @@ class Models(_api_module.BaseModule):
5638
5691
  upscale_factor: str,
5639
5692
  config: Optional[types._UpscaleImageAPIConfigOrDict] = None,
5640
5693
  ) -> types.UpscaleImageResponse:
5641
- """Upscales an image.
5642
-
5643
- Args:
5644
- model (str): The model to use.
5645
- image (Image): The input image for upscaling.
5646
- upscale_factor (str): The factor to upscale the image (x2 or x4).
5647
- config (_UpscaleImageAPIConfig): Configuration for upscaling.
5648
- """
5694
+ """Private method for upscaling an image."""
5649
5695
 
5650
5696
  parameter_model = types._UpscaleImageAPIParameters(
5651
5697
  model=model,
@@ -6331,39 +6377,7 @@ class Models(_api_module.BaseModule):
6331
6377
  source: Optional[types.GenerateVideosSourceOrDict] = None,
6332
6378
  config: Optional[types.GenerateVideosConfigOrDict] = None,
6333
6379
  ) -> types.GenerateVideosOperation:
6334
- """Generates videos based on an input (text, image, or video) and configuration.
6335
-
6336
- The following use cases are supported:
6337
- 1. Text to video generation.
6338
- 2a. Image to video generation (additional text prompt is optional).
6339
- 2b. Image to video generation with frame interpolation (specify last_frame
6340
- in config).
6341
- 3. Video extension (additional text prompt is optional)
6342
-
6343
- Args:
6344
- model: The model to use.
6345
- prompt: The text prompt for generating the videos. Optional for image to
6346
- video and video extension use cases.
6347
- image: The input image for generating the videos. Optional if prompt is
6348
- provided.
6349
- video: The input video for video extension use cases. Optional if prompt
6350
- or image is provided.
6351
- config: Configuration for generation.
6352
-
6353
- Usage:
6354
-
6355
- ```
6356
- operation = client.models.generate_videos(
6357
- model="veo-2.0-generate-001",
6358
- prompt="A neon hologram of a cat driving at top speed",
6359
- )
6360
- while not operation.done:
6361
- time.sleep(10)
6362
- operation = client.operations.get(operation)
6363
-
6364
- operation.result.generated_videos[0].video.uri
6365
- ```
6366
- """
6380
+ """Private method for generating videos."""
6367
6381
 
6368
6382
  parameter_model = types._GenerateVideosParameters(
6369
6383
  model=model,
@@ -7081,6 +7095,13 @@ class AsyncModels(_api_module.BaseModule):
7081
7095
  'post', path, request_dict, http_options
7082
7096
  )
7083
7097
 
7098
+ if config is not None and getattr(
7099
+ config, 'should_return_http_response', None
7100
+ ):
7101
+ return_value = types.GenerateContentResponse(sdk_http_response=response)
7102
+ self._api_client._verify_response(return_value)
7103
+ return return_value
7104
+
7084
7105
  response_dict = '' if not response.body else json.loads(response.body)
7085
7106
 
7086
7107
  if self._api_client.vertexai:
@@ -7285,13 +7306,7 @@ class AsyncModels(_api_module.BaseModule):
7285
7306
  prompt: str,
7286
7307
  config: Optional[types.GenerateImagesConfigOrDict] = None,
7287
7308
  ) -> types.GenerateImagesResponse:
7288
- """Generates images based on a text description and configuration.
7289
-
7290
- Args:
7291
- model (str): The model to use.
7292
- prompt (str): A text description of the images to generate.
7293
- config (GenerateImagesConfig): Configuration for generation.
7294
- """
7309
+ """Private method for generating images asynchronously."""
7295
7310
 
7296
7311
  parameter_model = types._GenerateImagesParameters(
7297
7312
  model=model,
@@ -7364,47 +7379,7 @@ class AsyncModels(_api_module.BaseModule):
7364
7379
  reference_images: list[types._ReferenceImageAPIOrDict],
7365
7380
  config: Optional[types.EditImageConfigOrDict] = None,
7366
7381
  ) -> types.EditImageResponse:
7367
- """Edits an image based on a text description and configuration.
7368
-
7369
- Args:
7370
- model (str): The model to use.
7371
- prompt (str): A text description of the edit to apply to the image.
7372
- reference_images (list[Union[RawReferenceImage, MaskReferenceImage,
7373
- ControlReferenceImage, StyleReferenceImage, SubjectReferenceImage]): The
7374
- reference images for editing.
7375
- config (EditImageConfig): Configuration for editing.
7376
-
7377
- Usage:
7378
-
7379
- .. code-block:: python
7380
-
7381
- from google.genai.types import RawReferenceImage, MaskReferenceImage
7382
-
7383
- raw_ref_image = RawReferenceImage(
7384
- reference_id=1,
7385
- reference_image=types.Image.from_file(IMAGE_FILE_PATH),
7386
- )
7387
-
7388
- mask_ref_image = MaskReferenceImage(
7389
- reference_id=2,
7390
- config=types.MaskReferenceConfig(
7391
- mask_mode='MASK_MODE_FOREGROUND',
7392
- mask_dilation=0.06,
7393
- ),
7394
- )
7395
- response = await client.aio.models.edit_image(
7396
- model='imagen-3.0-capability-001',
7397
- prompt='man with dog',
7398
- reference_images=[raw_ref_image, mask_ref_image],
7399
- config=types.EditImageConfig(
7400
- edit_mode= "EDIT_MODE_INPAINT_INSERTION",
7401
- number_of_images= 1,
7402
- include_rai_reason= True,
7403
- )
7404
- )
7405
- response.generated_images[0].image.show()
7406
- # Shows a man with a dog instead of a cat.
7407
- """
7382
+ """Private method for editing an image asynchronously."""
7408
7383
 
7409
7384
  parameter_model = types._EditImageParameters(
7410
7385
  model=model,
@@ -7468,14 +7443,7 @@ class AsyncModels(_api_module.BaseModule):
7468
7443
  upscale_factor: str,
7469
7444
  config: Optional[types._UpscaleImageAPIConfigOrDict] = None,
7470
7445
  ) -> types.UpscaleImageResponse:
7471
- """Upscales an image.
7472
-
7473
- Args:
7474
- model (str): The model to use.
7475
- image (Image): The input image for upscaling.
7476
- upscale_factor (str): The factor to upscale the image (x2 or x4).
7477
- config (_UpscaleImageAPIConfig): Configuration for upscaling.
7478
- """
7446
+ """Private method for upscaling an image asynchronously."""
7479
7447
 
7480
7448
  parameter_model = types._UpscaleImageAPIParameters(
7481
7449
  model=model,
@@ -8167,39 +8135,7 @@ class AsyncModels(_api_module.BaseModule):
8167
8135
  source: Optional[types.GenerateVideosSourceOrDict] = None,
8168
8136
  config: Optional[types.GenerateVideosConfigOrDict] = None,
8169
8137
  ) -> types.GenerateVideosOperation:
8170
- """Generates videos based on an input (text, image, or video) and configuration.
8171
-
8172
- The following use cases are supported:
8173
- 1. Text to video generation.
8174
- 2a. Image to video generation (additional text prompt is optional).
8175
- 2b. Image to video generation with frame interpolation (specify last_frame
8176
- in config).
8177
- 3. Video extension (additional text prompt is optional)
8178
-
8179
- Args:
8180
- model: The model to use.
8181
- prompt: The text prompt for generating the videos. Optional for image to
8182
- video and video extension use cases.
8183
- image: The input image for generating the videos. Optional if prompt is
8184
- provided.
8185
- video: The input video for video extension use cases. Optional if prompt
8186
- or image is provided.
8187
- config: Configuration for generation.
8188
-
8189
- Usage:
8190
-
8191
- ```
8192
- operation = client.models.generate_videos(
8193
- model="veo-2.0-generate-001",
8194
- prompt="A neon hologram of a cat driving at top speed",
8195
- )
8196
- while not operation.done:
8197
- time.sleep(10)
8198
- operation = client.operations.get(operation)
8199
-
8200
- operation.result.generated_videos[0].video.uri
8201
- ```
8202
- """
8138
+ """Private method for generating videos asynchronously."""
8203
8139
 
8204
8140
  parameter_model = types._GenerateVideosParameters(
8205
8141
  model=model,