google-genai 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -16,13 +16,14 @@
16
16
  # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
17
 
18
18
  import logging
19
- from typing import AsyncIterator, Iterator, Optional, Union
19
+ from typing import AsyncIterator, Awaitable, Iterator, Optional, Union
20
20
  from urllib.parse import urlencode
21
+ from . import _api_module
21
22
  from . import _common
22
23
  from . import _extra_utils
23
24
  from . import _transformers as t
24
25
  from . import types
25
- from ._api_client import ApiClient, HttpOptionsDict
26
+ from ._api_client import ApiClient
26
27
  from ._common import get_value_by_path as getv
27
28
  from ._common import set_value_by_path as setv
28
29
  from .pagers import AsyncPager, Pager
@@ -35,7 +36,7 @@ def _Part_to_mldev(
35
36
  ) -> dict:
36
37
  to_object = {}
37
38
  if getv(from_object, ['video_metadata']) is not None:
38
- raise ValueError('video_metadata parameter is not supported in Google AI.')
39
+ raise ValueError('video_metadata parameter is not supported in Gemini API.')
39
40
 
40
41
  if getv(from_object, ['thought']) is not None:
41
42
  setv(to_object, ['thought'], getv(from_object, ['thought']))
@@ -167,51 +168,51 @@ def _Schema_to_mldev(
167
168
  ) -> dict:
168
169
  to_object = {}
169
170
  if getv(from_object, ['min_items']) is not None:
170
- raise ValueError('min_items parameter is not supported in Google AI.')
171
+ raise ValueError('min_items parameter is not supported in Gemini API.')
171
172
 
172
173
  if getv(from_object, ['example']) is not None:
173
- raise ValueError('example parameter is not supported in Google AI.')
174
+ raise ValueError('example parameter is not supported in Gemini API.')
174
175
 
175
176
  if getv(from_object, ['property_ordering']) is not None:
176
177
  raise ValueError(
177
- 'property_ordering parameter is not supported in Google AI.'
178
+ 'property_ordering parameter is not supported in Gemini API.'
178
179
  )
179
180
 
180
181
  if getv(from_object, ['pattern']) is not None:
181
- raise ValueError('pattern parameter is not supported in Google AI.')
182
+ raise ValueError('pattern parameter is not supported in Gemini API.')
182
183
 
183
184
  if getv(from_object, ['minimum']) is not None:
184
- raise ValueError('minimum parameter is not supported in Google AI.')
185
+ raise ValueError('minimum parameter is not supported in Gemini API.')
185
186
 
186
187
  if getv(from_object, ['default']) is not None:
187
- raise ValueError('default parameter is not supported in Google AI.')
188
+ raise ValueError('default parameter is not supported in Gemini API.')
188
189
 
189
190
  if getv(from_object, ['any_of']) is not None:
190
- raise ValueError('any_of parameter is not supported in Google AI.')
191
+ raise ValueError('any_of parameter is not supported in Gemini API.')
191
192
 
192
193
  if getv(from_object, ['max_length']) is not None:
193
- raise ValueError('max_length parameter is not supported in Google AI.')
194
+ raise ValueError('max_length parameter is not supported in Gemini API.')
194
195
 
195
196
  if getv(from_object, ['title']) is not None:
196
- raise ValueError('title parameter is not supported in Google AI.')
197
+ raise ValueError('title parameter is not supported in Gemini API.')
197
198
 
198
199
  if getv(from_object, ['min_length']) is not None:
199
- raise ValueError('min_length parameter is not supported in Google AI.')
200
+ raise ValueError('min_length parameter is not supported in Gemini API.')
200
201
 
201
202
  if getv(from_object, ['min_properties']) is not None:
202
- raise ValueError('min_properties parameter is not supported in Google AI.')
203
+ raise ValueError('min_properties parameter is not supported in Gemini API.')
203
204
 
204
205
  if getv(from_object, ['max_items']) is not None:
205
- raise ValueError('max_items parameter is not supported in Google AI.')
206
+ raise ValueError('max_items parameter is not supported in Gemini API.')
206
207
 
207
208
  if getv(from_object, ['maximum']) is not None:
208
- raise ValueError('maximum parameter is not supported in Google AI.')
209
+ raise ValueError('maximum parameter is not supported in Gemini API.')
209
210
 
210
211
  if getv(from_object, ['nullable']) is not None:
211
- raise ValueError('nullable parameter is not supported in Google AI.')
212
+ raise ValueError('nullable parameter is not supported in Gemini API.')
212
213
 
213
214
  if getv(from_object, ['max_properties']) is not None:
214
- raise ValueError('max_properties parameter is not supported in Google AI.')
215
+ raise ValueError('max_properties parameter is not supported in Gemini API.')
215
216
 
216
217
  if getv(from_object, ['type']) is not None:
217
218
  setv(to_object, ['type'], getv(from_object, ['type']))
@@ -323,7 +324,7 @@ def _SafetySetting_to_mldev(
323
324
  ) -> dict:
324
325
  to_object = {}
325
326
  if getv(from_object, ['method']) is not None:
326
- raise ValueError('method parameter is not supported in Google AI.')
327
+ raise ValueError('method parameter is not supported in Gemini API.')
327
328
 
328
329
  if getv(from_object, ['category']) is not None:
329
330
  setv(to_object, ['category'], getv(from_object, ['category']))
@@ -359,7 +360,7 @@ def _FunctionDeclaration_to_mldev(
359
360
  ) -> dict:
360
361
  to_object = {}
361
362
  if getv(from_object, ['response']) is not None:
362
- raise ValueError('response parameter is not supported in Google AI.')
363
+ raise ValueError('response parameter is not supported in Gemini API.')
363
364
 
364
365
  if getv(from_object, ['description']) is not None:
365
366
  setv(to_object, ['description'], getv(from_object, ['description']))
@@ -515,7 +516,7 @@ def _Tool_to_mldev(
515
516
  )
516
517
 
517
518
  if getv(from_object, ['retrieval']) is not None:
518
- raise ValueError('retrieval parameter is not supported in Google AI.')
519
+ raise ValueError('retrieval parameter is not supported in Gemini API.')
519
520
 
520
521
  if getv(from_object, ['google_search']) is not None:
521
522
  setv(
@@ -796,6 +797,7 @@ def _GenerateContentConfig_to_mldev(
796
797
  parent_object: dict = None,
797
798
  ) -> dict:
798
799
  to_object = {}
800
+
799
801
  if getv(from_object, ['system_instruction']) is not None:
800
802
  setv(
801
803
  parent_object,
@@ -871,7 +873,7 @@ def _GenerateContentConfig_to_mldev(
871
873
  )
872
874
 
873
875
  if getv(from_object, ['routing_config']) is not None:
874
- raise ValueError('routing_config parameter is not supported in Google AI.')
876
+ raise ValueError('routing_config parameter is not supported in Gemini API.')
875
877
 
876
878
  if getv(from_object, ['safety_settings']) is not None:
877
879
  setv(
@@ -920,7 +922,7 @@ def _GenerateContentConfig_to_mldev(
920
922
 
921
923
  if getv(from_object, ['media_resolution']) is not None:
922
924
  raise ValueError(
923
- 'media_resolution parameter is not supported in Google AI.'
925
+ 'media_resolution parameter is not supported in Gemini API.'
924
926
  )
925
927
 
926
928
  if getv(from_object, ['speech_config']) is not None:
@@ -935,7 +937,9 @@ def _GenerateContentConfig_to_mldev(
935
937
  )
936
938
 
937
939
  if getv(from_object, ['audio_timestamp']) is not None:
938
- raise ValueError('audio_timestamp parameter is not supported in Google AI.')
940
+ raise ValueError(
941
+ 'audio_timestamp parameter is not supported in Gemini API.'
942
+ )
939
943
 
940
944
  if getv(from_object, ['thinking_config']) is not None:
941
945
  setv(
@@ -955,6 +959,7 @@ def _GenerateContentConfig_to_vertex(
955
959
  parent_object: dict = None,
956
960
  ) -> dict:
957
961
  to_object = {}
962
+
958
963
  if getv(from_object, ['system_instruction']) is not None:
959
964
  setv(
960
965
  parent_object,
@@ -1188,8 +1193,6 @@ def _EmbedContentConfig_to_mldev(
1188
1193
  parent_object: dict = None,
1189
1194
  ) -> dict:
1190
1195
  to_object = {}
1191
- if getv(from_object, ['http_options']) is not None:
1192
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
1193
1196
 
1194
1197
  if getv(from_object, ['task_type']) is not None:
1195
1198
  setv(
@@ -1209,10 +1212,10 @@ def _EmbedContentConfig_to_mldev(
1209
1212
  )
1210
1213
 
1211
1214
  if getv(from_object, ['mime_type']) is not None:
1212
- raise ValueError('mime_type parameter is not supported in Google AI.')
1215
+ raise ValueError('mime_type parameter is not supported in Gemini API.')
1213
1216
 
1214
1217
  if getv(from_object, ['auto_truncate']) is not None:
1215
- raise ValueError('auto_truncate parameter is not supported in Google AI.')
1218
+ raise ValueError('auto_truncate parameter is not supported in Gemini API.')
1216
1219
 
1217
1220
  return to_object
1218
1221
 
@@ -1223,8 +1226,6 @@ def _EmbedContentConfig_to_vertex(
1223
1226
  parent_object: dict = None,
1224
1227
  ) -> dict:
1225
1228
  to_object = {}
1226
- if getv(from_object, ['http_options']) is not None:
1227
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
1228
1229
 
1229
1230
  if getv(from_object, ['task_type']) is not None:
1230
1231
  setv(
@@ -1329,17 +1330,15 @@ def _EmbedContentParameters_to_vertex(
1329
1330
  return to_object
1330
1331
 
1331
1332
 
1332
- def _GenerateImageConfig_to_mldev(
1333
+ def _GenerateImagesConfig_to_mldev(
1333
1334
  api_client: ApiClient,
1334
1335
  from_object: Union[dict, object],
1335
1336
  parent_object: dict = None,
1336
1337
  ) -> dict:
1337
1338
  to_object = {}
1338
- if getv(from_object, ['http_options']) is not None:
1339
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
1340
1339
 
1341
1340
  if getv(from_object, ['output_gcs_uri']) is not None:
1342
- raise ValueError('output_gcs_uri parameter is not supported in Google AI.')
1341
+ raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
1343
1342
 
1344
1343
  if getv(from_object, ['negative_prompt']) is not None:
1345
1344
  setv(
@@ -1363,9 +1362,12 @@ def _GenerateImageConfig_to_mldev(
1363
1362
  )
1364
1363
 
1365
1364
  if getv(from_object, ['seed']) is not None:
1366
- raise ValueError('seed parameter is not supported in Google AI.')
1365
+ raise ValueError('seed parameter is not supported in Gemini API.')
1367
1366
 
1368
1367
  if getv(from_object, ['safety_filter_level']) is not None:
1368
+ _SafetyFilterLevel_to_mldev_enum_validate(
1369
+ getv(from_object, ['safety_filter_level'])
1370
+ )
1369
1371
  setv(
1370
1372
  parent_object,
1371
1373
  ['parameters', 'safetySetting'],
@@ -1373,6 +1375,9 @@ def _GenerateImageConfig_to_mldev(
1373
1375
  )
1374
1376
 
1375
1377
  if getv(from_object, ['person_generation']) is not None:
1378
+ _PersonGeneration_to_mldev_enum_validate(
1379
+ getv(from_object, ['person_generation'])
1380
+ )
1376
1381
  setv(
1377
1382
  parent_object,
1378
1383
  ['parameters', 'personGeneration'],
@@ -1415,7 +1420,7 @@ def _GenerateImageConfig_to_mldev(
1415
1420
  )
1416
1421
 
1417
1422
  if getv(from_object, ['add_watermark']) is not None:
1418
- raise ValueError('add_watermark parameter is not supported in Google AI.')
1423
+ raise ValueError('add_watermark parameter is not supported in Gemini API.')
1419
1424
 
1420
1425
  if getv(from_object, ['aspect_ratio']) is not None:
1421
1426
  setv(
@@ -1424,17 +1429,22 @@ def _GenerateImageConfig_to_mldev(
1424
1429
  getv(from_object, ['aspect_ratio']),
1425
1430
  )
1426
1431
 
1432
+ if getv(from_object, ['enhance_prompt']) is not None:
1433
+ setv(
1434
+ parent_object,
1435
+ ['parameters', 'enhancePrompt'],
1436
+ getv(from_object, ['enhance_prompt']),
1437
+ )
1438
+
1427
1439
  return to_object
1428
1440
 
1429
1441
 
1430
- def _GenerateImageConfig_to_vertex(
1442
+ def _GenerateImagesConfig_to_vertex(
1431
1443
  api_client: ApiClient,
1432
1444
  from_object: Union[dict, object],
1433
1445
  parent_object: dict = None,
1434
1446
  ) -> dict:
1435
1447
  to_object = {}
1436
- if getv(from_object, ['http_options']) is not None:
1437
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
1438
1448
 
1439
1449
  if getv(from_object, ['output_gcs_uri']) is not None:
1440
1450
  setv(
@@ -1530,10 +1540,17 @@ def _GenerateImageConfig_to_vertex(
1530
1540
  getv(from_object, ['aspect_ratio']),
1531
1541
  )
1532
1542
 
1543
+ if getv(from_object, ['enhance_prompt']) is not None:
1544
+ setv(
1545
+ parent_object,
1546
+ ['parameters', 'enhancePrompt'],
1547
+ getv(from_object, ['enhance_prompt']),
1548
+ )
1549
+
1533
1550
  return to_object
1534
1551
 
1535
1552
 
1536
- def _GenerateImageParameters_to_mldev(
1553
+ def _GenerateImagesParameters_to_mldev(
1537
1554
  api_client: ApiClient,
1538
1555
  from_object: Union[dict, object],
1539
1556
  parent_object: dict = None,
@@ -1553,7 +1570,7 @@ def _GenerateImageParameters_to_mldev(
1553
1570
  setv(
1554
1571
  to_object,
1555
1572
  ['config'],
1556
- _GenerateImageConfig_to_mldev(
1573
+ _GenerateImagesConfig_to_mldev(
1557
1574
  api_client, getv(from_object, ['config']), to_object
1558
1575
  ),
1559
1576
  )
@@ -1561,7 +1578,7 @@ def _GenerateImageParameters_to_mldev(
1561
1578
  return to_object
1562
1579
 
1563
1580
 
1564
- def _GenerateImageParameters_to_vertex(
1581
+ def _GenerateImagesParameters_to_vertex(
1565
1582
  api_client: ApiClient,
1566
1583
  from_object: Union[dict, object],
1567
1584
  parent_object: dict = None,
@@ -1581,7 +1598,7 @@ def _GenerateImageParameters_to_vertex(
1581
1598
  setv(
1582
1599
  to_object,
1583
1600
  ['config'],
1584
- _GenerateImageConfig_to_vertex(
1601
+ _GenerateImagesConfig_to_vertex(
1585
1602
  api_client, getv(from_object, ['config']), to_object
1586
1603
  ),
1587
1604
  )
@@ -1596,7 +1613,7 @@ def _Image_to_mldev(
1596
1613
  ) -> dict:
1597
1614
  to_object = {}
1598
1615
  if getv(from_object, ['gcs_uri']) is not None:
1599
- raise ValueError('gcs_uri parameter is not supported in Google AI.')
1616
+ raise ValueError('gcs_uri parameter is not supported in Gemini API.')
1600
1617
 
1601
1618
  if getv(from_object, ['image_bytes']) is not None:
1602
1619
  setv(
@@ -1605,6 +1622,9 @@ def _Image_to_mldev(
1605
1622
  t.t_bytes(api_client, getv(from_object, ['image_bytes'])),
1606
1623
  )
1607
1624
 
1625
+ if getv(from_object, ['mime_type']) is not None:
1626
+ setv(to_object, ['mimeType'], getv(from_object, ['mime_type']))
1627
+
1608
1628
  return to_object
1609
1629
 
1610
1630
 
@@ -1624,6 +1644,9 @@ def _Image_to_vertex(
1624
1644
  t.t_bytes(api_client, getv(from_object, ['image_bytes'])),
1625
1645
  )
1626
1646
 
1647
+ if getv(from_object, ['mime_type']) is not None:
1648
+ setv(to_object, ['mimeType'], getv(from_object, ['mime_type']))
1649
+
1627
1650
  return to_object
1628
1651
 
1629
1652
 
@@ -1634,15 +1657,15 @@ def _MaskReferenceConfig_to_mldev(
1634
1657
  ) -> dict:
1635
1658
  to_object = {}
1636
1659
  if getv(from_object, ['mask_mode']) is not None:
1637
- raise ValueError('mask_mode parameter is not supported in Google AI.')
1660
+ raise ValueError('mask_mode parameter is not supported in Gemini API.')
1638
1661
 
1639
1662
  if getv(from_object, ['segmentation_classes']) is not None:
1640
1663
  raise ValueError(
1641
- 'segmentation_classes parameter is not supported in Google AI.'
1664
+ 'segmentation_classes parameter is not supported in Gemini API.'
1642
1665
  )
1643
1666
 
1644
1667
  if getv(from_object, ['mask_dilation']) is not None:
1645
- raise ValueError('mask_dilation parameter is not supported in Google AI.')
1668
+ raise ValueError('mask_dilation parameter is not supported in Gemini API.')
1646
1669
 
1647
1670
  return to_object
1648
1671
 
@@ -1674,12 +1697,12 @@ def _ControlReferenceConfig_to_mldev(
1674
1697
  ) -> dict:
1675
1698
  to_object = {}
1676
1699
  if getv(from_object, ['control_type']) is not None:
1677
- raise ValueError('control_type parameter is not supported in Google AI.')
1700
+ raise ValueError('control_type parameter is not supported in Gemini API.')
1678
1701
 
1679
1702
  if getv(from_object, ['enable_control_image_computation']) is not None:
1680
1703
  raise ValueError(
1681
- 'enable_control_image_computation parameter is not supported in'
1682
- ' Google AI.'
1704
+ 'enable_control_image_computation parameter is not supported in Gemini'
1705
+ ' API.'
1683
1706
  )
1684
1707
 
1685
1708
  return to_object
@@ -1712,7 +1735,7 @@ def _StyleReferenceConfig_to_mldev(
1712
1735
  to_object = {}
1713
1736
  if getv(from_object, ['style_description']) is not None:
1714
1737
  raise ValueError(
1715
- 'style_description parameter is not supported in Google AI.'
1738
+ 'style_description parameter is not supported in Gemini API.'
1716
1739
  )
1717
1740
 
1718
1741
  return to_object
@@ -1741,11 +1764,11 @@ def _SubjectReferenceConfig_to_mldev(
1741
1764
  ) -> dict:
1742
1765
  to_object = {}
1743
1766
  if getv(from_object, ['subject_type']) is not None:
1744
- raise ValueError('subject_type parameter is not supported in Google AI.')
1767
+ raise ValueError('subject_type parameter is not supported in Gemini API.')
1745
1768
 
1746
1769
  if getv(from_object, ['subject_description']) is not None:
1747
1770
  raise ValueError(
1748
- 'subject_description parameter is not supported in Google AI.'
1771
+ 'subject_description parameter is not supported in Gemini API.'
1749
1772
  )
1750
1773
 
1751
1774
  return to_object
@@ -1777,32 +1800,34 @@ def _ReferenceImageAPI_to_mldev(
1777
1800
  ) -> dict:
1778
1801
  to_object = {}
1779
1802
  if getv(from_object, ['reference_image']) is not None:
1780
- raise ValueError('reference_image parameter is not supported in Google AI.')
1803
+ raise ValueError(
1804
+ 'reference_image parameter is not supported in Gemini API.'
1805
+ )
1781
1806
 
1782
1807
  if getv(from_object, ['reference_id']) is not None:
1783
- raise ValueError('reference_id parameter is not supported in Google AI.')
1808
+ raise ValueError('reference_id parameter is not supported in Gemini API.')
1784
1809
 
1785
1810
  if getv(from_object, ['reference_type']) is not None:
1786
- raise ValueError('reference_type parameter is not supported in Google AI.')
1811
+ raise ValueError('reference_type parameter is not supported in Gemini API.')
1787
1812
 
1788
1813
  if getv(from_object, ['mask_image_config']) is not None:
1789
1814
  raise ValueError(
1790
- 'mask_image_config parameter is not supported in Google AI.'
1815
+ 'mask_image_config parameter is not supported in Gemini API.'
1791
1816
  )
1792
1817
 
1793
1818
  if getv(from_object, ['control_image_config']) is not None:
1794
1819
  raise ValueError(
1795
- 'control_image_config parameter is not supported in Google AI.'
1820
+ 'control_image_config parameter is not supported in Gemini API.'
1796
1821
  )
1797
1822
 
1798
1823
  if getv(from_object, ['style_image_config']) is not None:
1799
1824
  raise ValueError(
1800
- 'style_image_config parameter is not supported in Google AI.'
1825
+ 'style_image_config parameter is not supported in Gemini API.'
1801
1826
  )
1802
1827
 
1803
1828
  if getv(from_object, ['subject_image_config']) is not None:
1804
1829
  raise ValueError(
1805
- 'subject_image_config parameter is not supported in Google AI.'
1830
+ 'subject_image_config parameter is not supported in Gemini API.'
1806
1831
  )
1807
1832
 
1808
1833
  return to_object
@@ -1874,11 +1899,9 @@ def _EditImageConfig_to_mldev(
1874
1899
  parent_object: dict = None,
1875
1900
  ) -> dict:
1876
1901
  to_object = {}
1877
- if getv(from_object, ['http_options']) is not None:
1878
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
1879
1902
 
1880
1903
  if getv(from_object, ['output_gcs_uri']) is not None:
1881
- raise ValueError('output_gcs_uri parameter is not supported in Google AI.')
1904
+ raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
1882
1905
 
1883
1906
  if getv(from_object, ['negative_prompt']) is not None:
1884
1907
  setv(
@@ -1902,9 +1925,12 @@ def _EditImageConfig_to_mldev(
1902
1925
  )
1903
1926
 
1904
1927
  if getv(from_object, ['seed']) is not None:
1905
- raise ValueError('seed parameter is not supported in Google AI.')
1928
+ raise ValueError('seed parameter is not supported in Gemini API.')
1906
1929
 
1907
1930
  if getv(from_object, ['safety_filter_level']) is not None:
1931
+ _SafetyFilterLevel_to_mldev_enum_validate(
1932
+ getv(from_object, ['safety_filter_level'])
1933
+ )
1908
1934
  setv(
1909
1935
  parent_object,
1910
1936
  ['parameters', 'safetySetting'],
@@ -1912,6 +1938,9 @@ def _EditImageConfig_to_mldev(
1912
1938
  )
1913
1939
 
1914
1940
  if getv(from_object, ['person_generation']) is not None:
1941
+ _PersonGeneration_to_mldev_enum_validate(
1942
+ getv(from_object, ['person_generation'])
1943
+ )
1915
1944
  setv(
1916
1945
  parent_object,
1917
1946
  ['parameters', 'personGeneration'],
@@ -1954,6 +1983,7 @@ def _EditImageConfig_to_mldev(
1954
1983
  )
1955
1984
 
1956
1985
  if getv(from_object, ['edit_mode']) is not None:
1986
+ _EditMode_to_mldev_enum_validate(getv(from_object, ['edit_mode']))
1957
1987
  setv(
1958
1988
  parent_object,
1959
1989
  ['parameters', 'editMode'],
@@ -1969,8 +1999,6 @@ def _EditImageConfig_to_vertex(
1969
1999
  parent_object: dict = None,
1970
2000
  ) -> dict:
1971
2001
  to_object = {}
1972
- if getv(from_object, ['http_options']) is not None:
1973
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
1974
2002
 
1975
2003
  if getv(from_object, ['output_gcs_uri']) is not None:
1976
2004
  setv(
@@ -2144,8 +2172,6 @@ def _UpscaleImageAPIConfig_to_mldev(
2144
2172
  parent_object: dict = None,
2145
2173
  ) -> dict:
2146
2174
  to_object = {}
2147
- if getv(from_object, ['http_options']) is not None:
2148
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2149
2175
 
2150
2176
  if getv(from_object, ['include_rai_reason']) is not None:
2151
2177
  setv(
@@ -2187,8 +2213,6 @@ def _UpscaleImageAPIConfig_to_vertex(
2187
2213
  parent_object: dict = None,
2188
2214
  ) -> dict:
2189
2215
  to_object = {}
2190
- if getv(from_object, ['http_options']) is not None:
2191
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2192
2216
 
2193
2217
  if getv(from_object, ['include_rai_reason']) is not None:
2194
2218
  setv(
@@ -2315,6 +2339,9 @@ def _GetModelParameters_to_mldev(
2315
2339
  t.t_model(api_client, getv(from_object, ['model'])),
2316
2340
  )
2317
2341
 
2342
+ if getv(from_object, ['config']) is not None:
2343
+ setv(to_object, ['config'], getv(from_object, ['config']))
2344
+
2318
2345
  return to_object
2319
2346
 
2320
2347
 
@@ -2331,6 +2358,9 @@ def _GetModelParameters_to_vertex(
2331
2358
  t.t_model(api_client, getv(from_object, ['model'])),
2332
2359
  )
2333
2360
 
2361
+ if getv(from_object, ['config']) is not None:
2362
+ setv(to_object, ['config'], getv(from_object, ['config']))
2363
+
2334
2364
  return to_object
2335
2365
 
2336
2366
 
@@ -2340,8 +2370,6 @@ def _ListModelsConfig_to_mldev(
2340
2370
  parent_object: dict = None,
2341
2371
  ) -> dict:
2342
2372
  to_object = {}
2343
- if getv(from_object, ['http_options']) is not None:
2344
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2345
2373
 
2346
2374
  if getv(from_object, ['page_size']) is not None:
2347
2375
  setv(
@@ -2374,8 +2402,6 @@ def _ListModelsConfig_to_vertex(
2374
2402
  parent_object: dict = None,
2375
2403
  ) -> dict:
2376
2404
  to_object = {}
2377
- if getv(from_object, ['http_options']) is not None:
2378
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2379
2405
 
2380
2406
  if getv(from_object, ['page_size']) is not None:
2381
2407
  setv(
@@ -2444,6 +2470,7 @@ def _UpdateModelConfig_to_mldev(
2444
2470
  parent_object: dict = None,
2445
2471
  ) -> dict:
2446
2472
  to_object = {}
2473
+
2447
2474
  if getv(from_object, ['display_name']) is not None:
2448
2475
  setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
2449
2476
 
@@ -2459,6 +2486,7 @@ def _UpdateModelConfig_to_vertex(
2459
2486
  parent_object: dict = None,
2460
2487
  ) -> dict:
2461
2488
  to_object = {}
2489
+
2462
2490
  if getv(from_object, ['display_name']) is not None:
2463
2491
  setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
2464
2492
 
@@ -2531,6 +2559,9 @@ def _DeleteModelParameters_to_mldev(
2531
2559
  t.t_model(api_client, getv(from_object, ['model'])),
2532
2560
  )
2533
2561
 
2562
+ if getv(from_object, ['config']) is not None:
2563
+ setv(to_object, ['config'], getv(from_object, ['config']))
2564
+
2534
2565
  return to_object
2535
2566
 
2536
2567
 
@@ -2547,6 +2578,9 @@ def _DeleteModelParameters_to_vertex(
2547
2578
  t.t_model(api_client, getv(from_object, ['model'])),
2548
2579
  )
2549
2580
 
2581
+ if getv(from_object, ['config']) is not None:
2582
+ setv(to_object, ['config'], getv(from_object, ['config']))
2583
+
2550
2584
  return to_object
2551
2585
 
2552
2586
 
@@ -2556,8 +2590,6 @@ def _CountTokensConfig_to_mldev(
2556
2590
  parent_object: dict = None,
2557
2591
  ) -> dict:
2558
2592
  to_object = {}
2559
- if getv(from_object, ['http_options']) is not None:
2560
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2561
2593
 
2562
2594
  if getv(from_object, ['system_instruction']) is not None:
2563
2595
  setv(
@@ -2582,7 +2614,7 @@ def _CountTokensConfig_to_mldev(
2582
2614
 
2583
2615
  if getv(from_object, ['generation_config']) is not None:
2584
2616
  raise ValueError(
2585
- 'generation_config parameter is not supported in Google AI.'
2617
+ 'generation_config parameter is not supported in Gemini API.'
2586
2618
  )
2587
2619
 
2588
2620
  return to_object
@@ -2594,8 +2626,6 @@ def _CountTokensConfig_to_vertex(
2594
2626
  parent_object: dict = None,
2595
2627
  ) -> dict:
2596
2628
  to_object = {}
2597
- if getv(from_object, ['http_options']) is not None:
2598
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2599
2629
 
2600
2630
  if getv(from_object, ['system_instruction']) is not None:
2601
2631
  setv(
@@ -2702,30 +2732,6 @@ def _CountTokensParameters_to_vertex(
2702
2732
  return to_object
2703
2733
 
2704
2734
 
2705
- def _ComputeTokensConfig_to_mldev(
2706
- api_client: ApiClient,
2707
- from_object: Union[dict, object],
2708
- parent_object: dict = None,
2709
- ) -> dict:
2710
- to_object = {}
2711
- if getv(from_object, ['http_options']) is not None:
2712
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2713
-
2714
- return to_object
2715
-
2716
-
2717
- def _ComputeTokensConfig_to_vertex(
2718
- api_client: ApiClient,
2719
- from_object: Union[dict, object],
2720
- parent_object: dict = None,
2721
- ) -> dict:
2722
- to_object = {}
2723
- if getv(from_object, ['http_options']) is not None:
2724
- setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
2725
-
2726
- return to_object
2727
-
2728
-
2729
2735
  def _ComputeTokensParameters_to_mldev(
2730
2736
  api_client: ApiClient,
2731
2737
  from_object: Union[dict, object],
@@ -2740,16 +2746,10 @@ def _ComputeTokensParameters_to_mldev(
2740
2746
  )
2741
2747
 
2742
2748
  if getv(from_object, ['contents']) is not None:
2743
- raise ValueError('contents parameter is not supported in Google AI.')
2749
+ raise ValueError('contents parameter is not supported in Gemini API.')
2744
2750
 
2745
2751
  if getv(from_object, ['config']) is not None:
2746
- setv(
2747
- to_object,
2748
- ['config'],
2749
- _ComputeTokensConfig_to_mldev(
2750
- api_client, getv(from_object, ['config']), to_object
2751
- ),
2752
- )
2752
+ setv(to_object, ['config'], getv(from_object, ['config']))
2753
2753
 
2754
2754
  return to_object
2755
2755
 
@@ -2780,17 +2780,76 @@ def _ComputeTokensParameters_to_vertex(
2780
2780
  )
2781
2781
 
2782
2782
  if getv(from_object, ['config']) is not None:
2783
- setv(
2784
- to_object,
2785
- ['config'],
2786
- _ComputeTokensConfig_to_vertex(
2787
- api_client, getv(from_object, ['config']), to_object
2788
- ),
2789
- )
2783
+ setv(to_object, ['config'], getv(from_object, ['config']))
2790
2784
 
2791
2785
  return to_object
2792
2786
 
2793
2787
 
2788
+ def _MediaResolution_to_mldev_enum_validate(enum_value: any):
2789
+ if enum_value in set([
2790
+ 'MEDIA_RESOLUTION_UNSPECIFIED',
2791
+ 'MEDIA_RESOLUTION_LOW',
2792
+ 'MEDIA_RESOLUTION_MEDIUM',
2793
+ 'MEDIA_RESOLUTION_HIGH',
2794
+ ]):
2795
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2796
+
2797
+
2798
+ def _SafetyFilterLevel_to_mldev_enum_validate(enum_value: any):
2799
+ if enum_value in set(['BLOCK_NONE']):
2800
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2801
+
2802
+
2803
+ def _PersonGeneration_to_mldev_enum_validate(enum_value: any):
2804
+ if enum_value in set(['ALLOW_ALL']):
2805
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2806
+
2807
+
2808
+ def _MaskReferenceMode_to_mldev_enum_validate(enum_value: any):
2809
+ if enum_value in set([
2810
+ 'MASK_MODE_DEFAULT',
2811
+ 'MASK_MODE_USER_PROVIDED',
2812
+ 'MASK_MODE_BACKGROUND',
2813
+ 'MASK_MODE_FOREGROUND',
2814
+ 'MASK_MODE_SEMANTIC',
2815
+ ]):
2816
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2817
+
2818
+
2819
+ def _ControlReferenceType_to_mldev_enum_validate(enum_value: any):
2820
+ if enum_value in set([
2821
+ 'CONTROL_TYPE_DEFAULT',
2822
+ 'CONTROL_TYPE_CANNY',
2823
+ 'CONTROL_TYPE_SCRIBBLE',
2824
+ 'CONTROL_TYPE_FACE_MESH',
2825
+ ]):
2826
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2827
+
2828
+
2829
+ def _SubjectReferenceType_to_mldev_enum_validate(enum_value: any):
2830
+ if enum_value in set([
2831
+ 'SUBJECT_TYPE_DEFAULT',
2832
+ 'SUBJECT_TYPE_PERSON',
2833
+ 'SUBJECT_TYPE_ANIMAL',
2834
+ 'SUBJECT_TYPE_PRODUCT',
2835
+ ]):
2836
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2837
+
2838
+
2839
+ def _EditMode_to_mldev_enum_validate(enum_value: any):
2840
+ if enum_value in set([
2841
+ 'EDIT_MODE_DEFAULT',
2842
+ 'EDIT_MODE_INPAINT_REMOVAL',
2843
+ 'EDIT_MODE_INPAINT_INSERTION',
2844
+ 'EDIT_MODE_OUTPAINT',
2845
+ 'EDIT_MODE_CONTROLLED_EDITING',
2846
+ 'EDIT_MODE_STYLE',
2847
+ 'EDIT_MODE_BGSWAP',
2848
+ 'EDIT_MODE_PRODUCT_IMAGE',
2849
+ ]):
2850
+ raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2851
+
2852
+
2794
2853
  def _Part_from_mldev(
2795
2854
  api_client: ApiClient,
2796
2855
  from_object: Union[dict, object],
@@ -3259,6 +3318,9 @@ def _Image_from_mldev(
3259
3318
  t.t_bytes(api_client, getv(from_object, ['bytesBase64Encoded'])),
3260
3319
  )
3261
3320
 
3321
+ if getv(from_object, ['mimeType']) is not None:
3322
+ setv(to_object, ['mime_type'], getv(from_object, ['mimeType']))
3323
+
3262
3324
  return to_object
3263
3325
 
3264
3326
 
@@ -3278,6 +3340,9 @@ def _Image_from_vertex(
3278
3340
  t.t_bytes(api_client, getv(from_object, ['bytesBase64Encoded'])),
3279
3341
  )
3280
3342
 
3343
+ if getv(from_object, ['mimeType']) is not None:
3344
+ setv(to_object, ['mime_type'], getv(from_object, ['mimeType']))
3345
+
3281
3346
  return to_object
3282
3347
 
3283
3348
 
@@ -3327,7 +3392,7 @@ def _GeneratedImage_from_vertex(
3327
3392
  return to_object
3328
3393
 
3329
3394
 
3330
- def _GenerateImageResponse_from_mldev(
3395
+ def _GenerateImagesResponse_from_mldev(
3331
3396
  api_client: ApiClient,
3332
3397
  from_object: Union[dict, object],
3333
3398
  parent_object: dict = None,
@@ -3346,7 +3411,7 @@ def _GenerateImageResponse_from_mldev(
3346
3411
  return to_object
3347
3412
 
3348
3413
 
3349
- def _GenerateImageResponse_from_vertex(
3414
+ def _GenerateImagesResponse_from_vertex(
3350
3415
  api_client: ApiClient,
3351
3416
  from_object: Union[dict, object],
3352
3417
  parent_object: dict = None,
@@ -3726,7 +3791,7 @@ def _ComputeTokensResponse_from_vertex(
3726
3791
  return to_object
3727
3792
 
3728
3793
 
3729
- class Models(_common.BaseModule):
3794
+ class Models(_api_module.BaseModule):
3730
3795
 
3731
3796
  def _generate_content(
3732
3797
  self,
@@ -3755,8 +3820,14 @@ class Models(_common.BaseModule):
3755
3820
  if query_params:
3756
3821
  path = f'{path}?{urlencode(query_params)}'
3757
3822
  # TODO: remove the hack that pops config.
3758
- config = request_dict.pop('config', None)
3759
- http_options = config.pop('httpOptions', None) if config else None
3823
+ request_dict.pop('config', None)
3824
+
3825
+ http_options = None
3826
+ if isinstance(config, dict):
3827
+ http_options = config.get('http_options', None)
3828
+ elif hasattr(config, 'http_options'):
3829
+ http_options = config.http_options
3830
+
3760
3831
  request_dict = _common.convert_to_dict(request_dict)
3761
3832
  request_dict = _common.encode_unserializable_types(request_dict)
3762
3833
 
@@ -3774,7 +3845,7 @@ class Models(_common.BaseModule):
3774
3845
  )
3775
3846
 
3776
3847
  return_value = types.GenerateContentResponse._from_response(
3777
- response_dict, parameter_model
3848
+ response=response_dict, kwargs=parameter_model
3778
3849
  )
3779
3850
  self._api_client._verify_response(return_value)
3780
3851
  return return_value
@@ -3810,8 +3881,14 @@ class Models(_common.BaseModule):
3810
3881
  if query_params:
3811
3882
  path = f'{path}?{urlencode(query_params)}'
3812
3883
  # TODO: remove the hack that pops config.
3813
- config = request_dict.pop('config', None)
3814
- http_options = config.pop('httpOptions', None) if config else None
3884
+ request_dict.pop('config', None)
3885
+
3886
+ http_options = None
3887
+ if isinstance(config, dict):
3888
+ http_options = config.get('http_options', None)
3889
+ elif hasattr(config, 'http_options'):
3890
+ http_options = config.http_options
3891
+
3815
3892
  request_dict = _common.convert_to_dict(request_dict)
3816
3893
  request_dict = _common.encode_unserializable_types(request_dict)
3817
3894
 
@@ -3829,7 +3906,7 @@ class Models(_common.BaseModule):
3829
3906
  )
3830
3907
 
3831
3908
  return_value = types.GenerateContentResponse._from_response(
3832
- response_dict, parameter_model
3909
+ response=response_dict, kwargs=parameter_model
3833
3910
  )
3834
3911
  self._api_client._verify_response(return_value)
3835
3912
  yield return_value
@@ -3884,8 +3961,14 @@ class Models(_common.BaseModule):
3884
3961
  if query_params:
3885
3962
  path = f'{path}?{urlencode(query_params)}'
3886
3963
  # TODO: remove the hack that pops config.
3887
- config = request_dict.pop('config', None)
3888
- http_options = config.pop('httpOptions', None) if config else None
3964
+ request_dict.pop('config', None)
3965
+
3966
+ http_options = None
3967
+ if isinstance(config, dict):
3968
+ http_options = config.get('http_options', None)
3969
+ elif hasattr(config, 'http_options'):
3970
+ http_options = config.http_options
3971
+
3889
3972
  request_dict = _common.convert_to_dict(request_dict)
3890
3973
  request_dict = _common.encode_unserializable_types(request_dict)
3891
3974
 
@@ -3903,33 +3986,33 @@ class Models(_common.BaseModule):
3903
3986
  )
3904
3987
 
3905
3988
  return_value = types.EmbedContentResponse._from_response(
3906
- response_dict, parameter_model
3989
+ response=response_dict, kwargs=parameter_model
3907
3990
  )
3908
3991
  self._api_client._verify_response(return_value)
3909
3992
  return return_value
3910
3993
 
3911
- def generate_image(
3994
+ def generate_images(
3912
3995
  self,
3913
3996
  *,
3914
3997
  model: str,
3915
3998
  prompt: str,
3916
- config: Optional[types.GenerateImageConfigOrDict] = None,
3917
- ) -> types.GenerateImageResponse:
3918
- """Generates an image based on a text description and configuration.
3999
+ config: Optional[types.GenerateImagesConfigOrDict] = None,
4000
+ ) -> types.GenerateImagesResponse:
4001
+ """Generates images based on a text description and configuration.
3919
4002
 
3920
4003
  Args:
3921
4004
  model (str): The model to use.
3922
- prompt (str): A text description of the image to generate.
3923
- config (GenerateImageConfig): Configuration for generation.
4005
+ prompt (str): A text description of the images to generate.
4006
+ config (GenerateImagesConfig): Configuration for generation.
3924
4007
 
3925
4008
  Usage:
3926
4009
 
3927
4010
  .. code-block:: python
3928
4011
 
3929
- response = client.models.generate_image(
4012
+ response = client.models.generate_images(
3930
4013
  model='imagen-3.0-generate-001',
3931
4014
  prompt='Man with a dog',
3932
- config=types.GenerateImageConfig(
4015
+ config=types.GenerateImagesConfig(
3933
4016
  number_of_images= 1,
3934
4017
  include_rai_reason= True,
3935
4018
  )
@@ -3938,19 +4021,19 @@ class Models(_common.BaseModule):
3938
4021
  # Shows a man with a dog.
3939
4022
  """
3940
4023
 
3941
- parameter_model = types._GenerateImageParameters(
4024
+ parameter_model = types._GenerateImagesParameters(
3942
4025
  model=model,
3943
4026
  prompt=prompt,
3944
4027
  config=config,
3945
4028
  )
3946
4029
 
3947
4030
  if self._api_client.vertexai:
3948
- request_dict = _GenerateImageParameters_to_vertex(
4031
+ request_dict = _GenerateImagesParameters_to_vertex(
3949
4032
  self._api_client, parameter_model
3950
4033
  )
3951
4034
  path = '{model}:predict'.format_map(request_dict.get('_url'))
3952
4035
  else:
3953
- request_dict = _GenerateImageParameters_to_mldev(
4036
+ request_dict = _GenerateImagesParameters_to_mldev(
3954
4037
  self._api_client, parameter_model
3955
4038
  )
3956
4039
  path = '{model}:predict'.format_map(request_dict.get('_url'))
@@ -3958,8 +4041,14 @@ class Models(_common.BaseModule):
3958
4041
  if query_params:
3959
4042
  path = f'{path}?{urlencode(query_params)}'
3960
4043
  # TODO: remove the hack that pops config.
3961
- config = request_dict.pop('config', None)
3962
- http_options = config.pop('httpOptions', None) if config else None
4044
+ request_dict.pop('config', None)
4045
+
4046
+ http_options = None
4047
+ if isinstance(config, dict):
4048
+ http_options = config.get('http_options', None)
4049
+ elif hasattr(config, 'http_options'):
4050
+ http_options = config.http_options
4051
+
3963
4052
  request_dict = _common.convert_to_dict(request_dict)
3964
4053
  request_dict = _common.encode_unserializable_types(request_dict)
3965
4054
 
@@ -3968,16 +4057,16 @@ class Models(_common.BaseModule):
3968
4057
  )
3969
4058
 
3970
4059
  if self._api_client.vertexai:
3971
- response_dict = _GenerateImageResponse_from_vertex(
4060
+ response_dict = _GenerateImagesResponse_from_vertex(
3972
4061
  self._api_client, response_dict
3973
4062
  )
3974
4063
  else:
3975
- response_dict = _GenerateImageResponse_from_mldev(
4064
+ response_dict = _GenerateImagesResponse_from_mldev(
3976
4065
  self._api_client, response_dict
3977
4066
  )
3978
4067
 
3979
- return_value = types.GenerateImageResponse._from_response(
3980
- response_dict, parameter_model
4068
+ return_value = types.GenerateImagesResponse._from_response(
4069
+ response=response_dict, kwargs=parameter_model
3981
4070
  )
3982
4071
  self._api_client._verify_response(return_value)
3983
4072
  return return_value
@@ -4051,8 +4140,14 @@ class Models(_common.BaseModule):
4051
4140
  if query_params:
4052
4141
  path = f'{path}?{urlencode(query_params)}'
4053
4142
  # TODO: remove the hack that pops config.
4054
- config = request_dict.pop('config', None)
4055
- http_options = config.pop('httpOptions', None) if config else None
4143
+ request_dict.pop('config', None)
4144
+
4145
+ http_options = None
4146
+ if isinstance(config, dict):
4147
+ http_options = config.get('http_options', None)
4148
+ elif hasattr(config, 'http_options'):
4149
+ http_options = config.http_options
4150
+
4056
4151
  request_dict = _common.convert_to_dict(request_dict)
4057
4152
  request_dict = _common.encode_unserializable_types(request_dict)
4058
4153
 
@@ -4070,7 +4165,7 @@ class Models(_common.BaseModule):
4070
4165
  )
4071
4166
 
4072
4167
  return_value = types.EditImageResponse._from_response(
4073
- response_dict, parameter_model
4168
+ response=response_dict, kwargs=parameter_model
4074
4169
  )
4075
4170
  self._api_client._verify_response(return_value)
4076
4171
  return return_value
@@ -4111,8 +4206,14 @@ class Models(_common.BaseModule):
4111
4206
  if query_params:
4112
4207
  path = f'{path}?{urlencode(query_params)}'
4113
4208
  # TODO: remove the hack that pops config.
4114
- config = request_dict.pop('config', None)
4115
- http_options = config.pop('httpOptions', None) if config else None
4209
+ request_dict.pop('config', None)
4210
+
4211
+ http_options = None
4212
+ if isinstance(config, dict):
4213
+ http_options = config.get('http_options', None)
4214
+ elif hasattr(config, 'http_options'):
4215
+ http_options = config.http_options
4216
+
4116
4217
  request_dict = _common.convert_to_dict(request_dict)
4117
4218
  request_dict = _common.encode_unserializable_types(request_dict)
4118
4219
 
@@ -4130,14 +4231,17 @@ class Models(_common.BaseModule):
4130
4231
  )
4131
4232
 
4132
4233
  return_value = types.UpscaleImageResponse._from_response(
4133
- response_dict, parameter_model
4234
+ response=response_dict, kwargs=parameter_model
4134
4235
  )
4135
4236
  self._api_client._verify_response(return_value)
4136
4237
  return return_value
4137
4238
 
4138
- def get(self, *, model: str) -> types.Model:
4239
+ def get(
4240
+ self, *, model: str, config: Optional[types.GetModelConfigOrDict] = None
4241
+ ) -> types.Model:
4139
4242
  parameter_model = types._GetModelParameters(
4140
4243
  model=model,
4244
+ config=config,
4141
4245
  )
4142
4246
 
4143
4247
  if self._api_client.vertexai:
@@ -4154,8 +4258,14 @@ class Models(_common.BaseModule):
4154
4258
  if query_params:
4155
4259
  path = f'{path}?{urlencode(query_params)}'
4156
4260
  # TODO: remove the hack that pops config.
4157
- config = request_dict.pop('config', None)
4158
- http_options = config.pop('httpOptions', None) if config else None
4261
+ request_dict.pop('config', None)
4262
+
4263
+ http_options = None
4264
+ if isinstance(config, dict):
4265
+ http_options = config.get('http_options', None)
4266
+ elif hasattr(config, 'http_options'):
4267
+ http_options = config.http_options
4268
+
4159
4269
  request_dict = _common.convert_to_dict(request_dict)
4160
4270
  request_dict = _common.encode_unserializable_types(request_dict)
4161
4271
 
@@ -4168,7 +4278,9 @@ class Models(_common.BaseModule):
4168
4278
  else:
4169
4279
  response_dict = _Model_from_mldev(self._api_client, response_dict)
4170
4280
 
4171
- return_value = types.Model._from_response(response_dict, parameter_model)
4281
+ return_value = types.Model._from_response(
4282
+ response=response_dict, kwargs=parameter_model
4283
+ )
4172
4284
  self._api_client._verify_response(return_value)
4173
4285
  return return_value
4174
4286
 
@@ -4193,8 +4305,14 @@ class Models(_common.BaseModule):
4193
4305
  if query_params:
4194
4306
  path = f'{path}?{urlencode(query_params)}'
4195
4307
  # TODO: remove the hack that pops config.
4196
- config = request_dict.pop('config', None)
4197
- http_options = config.pop('httpOptions', None) if config else None
4308
+ request_dict.pop('config', None)
4309
+
4310
+ http_options = None
4311
+ if isinstance(config, dict):
4312
+ http_options = config.get('http_options', None)
4313
+ elif hasattr(config, 'http_options'):
4314
+ http_options = config.http_options
4315
+
4198
4316
  request_dict = _common.convert_to_dict(request_dict)
4199
4317
  request_dict = _common.encode_unserializable_types(request_dict)
4200
4318
 
@@ -4212,7 +4330,7 @@ class Models(_common.BaseModule):
4212
4330
  )
4213
4331
 
4214
4332
  return_value = types.ListModelsResponse._from_response(
4215
- response_dict, parameter_model
4333
+ response=response_dict, kwargs=parameter_model
4216
4334
  )
4217
4335
  self._api_client._verify_response(return_value)
4218
4336
  return return_value
@@ -4242,8 +4360,14 @@ class Models(_common.BaseModule):
4242
4360
  if query_params:
4243
4361
  path = f'{path}?{urlencode(query_params)}'
4244
4362
  # TODO: remove the hack that pops config.
4245
- config = request_dict.pop('config', None)
4246
- http_options = config.pop('httpOptions', None) if config else None
4363
+ request_dict.pop('config', None)
4364
+
4365
+ http_options = None
4366
+ if isinstance(config, dict):
4367
+ http_options = config.get('http_options', None)
4368
+ elif hasattr(config, 'http_options'):
4369
+ http_options = config.http_options
4370
+
4247
4371
  request_dict = _common.convert_to_dict(request_dict)
4248
4372
  request_dict = _common.encode_unserializable_types(request_dict)
4249
4373
 
@@ -4256,13 +4380,21 @@ class Models(_common.BaseModule):
4256
4380
  else:
4257
4381
  response_dict = _Model_from_mldev(self._api_client, response_dict)
4258
4382
 
4259
- return_value = types.Model._from_response(response_dict, parameter_model)
4383
+ return_value = types.Model._from_response(
4384
+ response=response_dict, kwargs=parameter_model
4385
+ )
4260
4386
  self._api_client._verify_response(return_value)
4261
4387
  return return_value
4262
4388
 
4263
- def delete(self, *, model: str) -> types.DeleteModelResponse:
4389
+ def delete(
4390
+ self,
4391
+ *,
4392
+ model: str,
4393
+ config: Optional[types.DeleteModelConfigOrDict] = None,
4394
+ ) -> types.DeleteModelResponse:
4264
4395
  parameter_model = types._DeleteModelParameters(
4265
4396
  model=model,
4397
+ config=config,
4266
4398
  )
4267
4399
 
4268
4400
  if self._api_client.vertexai:
@@ -4279,8 +4411,14 @@ class Models(_common.BaseModule):
4279
4411
  if query_params:
4280
4412
  path = f'{path}?{urlencode(query_params)}'
4281
4413
  # TODO: remove the hack that pops config.
4282
- config = request_dict.pop('config', None)
4283
- http_options = config.pop('httpOptions', None) if config else None
4414
+ request_dict.pop('config', None)
4415
+
4416
+ http_options = None
4417
+ if isinstance(config, dict):
4418
+ http_options = config.get('http_options', None)
4419
+ elif hasattr(config, 'http_options'):
4420
+ http_options = config.http_options
4421
+
4284
4422
  request_dict = _common.convert_to_dict(request_dict)
4285
4423
  request_dict = _common.encode_unserializable_types(request_dict)
4286
4424
 
@@ -4298,7 +4436,7 @@ class Models(_common.BaseModule):
4298
4436
  )
4299
4437
 
4300
4438
  return_value = types.DeleteModelResponse._from_response(
4301
- response_dict, parameter_model
4439
+ response=response_dict, kwargs=parameter_model
4302
4440
  )
4303
4441
  self._api_client._verify_response(return_value)
4304
4442
  return return_value
@@ -4350,8 +4488,14 @@ class Models(_common.BaseModule):
4350
4488
  if query_params:
4351
4489
  path = f'{path}?{urlencode(query_params)}'
4352
4490
  # TODO: remove the hack that pops config.
4353
- config = request_dict.pop('config', None)
4354
- http_options = config.pop('httpOptions', None) if config else None
4491
+ request_dict.pop('config', None)
4492
+
4493
+ http_options = None
4494
+ if isinstance(config, dict):
4495
+ http_options = config.get('http_options', None)
4496
+ elif hasattr(config, 'http_options'):
4497
+ http_options = config.http_options
4498
+
4355
4499
  request_dict = _common.convert_to_dict(request_dict)
4356
4500
  request_dict = _common.encode_unserializable_types(request_dict)
4357
4501
 
@@ -4369,7 +4513,7 @@ class Models(_common.BaseModule):
4369
4513
  )
4370
4514
 
4371
4515
  return_value = types.CountTokensResponse._from_response(
4372
- response_dict, parameter_model
4516
+ response=response_dict, kwargs=parameter_model
4373
4517
  )
4374
4518
  self._api_client._verify_response(return_value)
4375
4519
  return return_value
@@ -4421,8 +4565,14 @@ class Models(_common.BaseModule):
4421
4565
  if query_params:
4422
4566
  path = f'{path}?{urlencode(query_params)}'
4423
4567
  # TODO: remove the hack that pops config.
4424
- config = request_dict.pop('config', None)
4425
- http_options = config.pop('httpOptions', None) if config else None
4568
+ request_dict.pop('config', None)
4569
+
4570
+ http_options = None
4571
+ if isinstance(config, dict):
4572
+ http_options = config.get('http_options', None)
4573
+ elif hasattr(config, 'http_options'):
4574
+ http_options = config.http_options
4575
+
4426
4576
  request_dict = _common.convert_to_dict(request_dict)
4427
4577
  request_dict = _common.encode_unserializable_types(request_dict)
4428
4578
 
@@ -4440,7 +4590,7 @@ class Models(_common.BaseModule):
4440
4590
  )
4441
4591
 
4442
4592
  return_value = types.ComputeTokensResponse._from_response(
4443
- response_dict, parameter_model
4593
+ response=response_dict, kwargs=parameter_model
4444
4594
  )
4445
4595
  self._api_client._verify_response(return_value)
4446
4596
  return return_value
@@ -4454,6 +4604,24 @@ class Models(_common.BaseModule):
4454
4604
  ) -> types.GenerateContentResponse:
4455
4605
  """Makes an API request to generate content using a model.
4456
4606
 
4607
+ For the `model` parameter, supported format for Vertex AI API includes:
4608
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4609
+ - the full resource name starts with 'projects/', for example:
4610
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
4611
+ - the partial resource name with 'publishers/', for example:
4612
+ 'publishers/google/models/gemini-1.5-flash-002' or
4613
+ 'publishers/meta/models/llama-3.1-405b-instruct-maas'
4614
+ - `/` separated publisher and model name, for example:
4615
+ 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
4616
+
4617
+ For the `model` parameter, supported format for Gemini API includes:
4618
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4619
+ - the model name starts with 'models/', for example:
4620
+ 'models/gemini-1.5-flash-002'
4621
+ - if you would like to use a tuned model, the model name starts with
4622
+ 'tunedModels/', for example:
4623
+ 'tunedModels/1234567890123456789'
4624
+
4457
4625
  Some models support multimodal input and output.
4458
4626
 
4459
4627
  Usage:
@@ -4626,13 +4794,7 @@ class Models(_common.BaseModule):
4626
4794
  )
4627
4795
  if self._api_client.vertexai:
4628
4796
  config = config.copy()
4629
- if config.query_base:
4630
- http_options = (
4631
- config.http_options if config.http_options else HttpOptionsDict()
4632
- )
4633
- http_options['skip_project_and_location_in_path'] = True
4634
- config.http_options = http_options
4635
- else:
4797
+ if not config.query_base:
4636
4798
  # Filter for tuning jobs artifacts by labels.
4637
4799
  filter_value = config.filter
4638
4800
  config.filter = (
@@ -4650,7 +4812,7 @@ class Models(_common.BaseModule):
4650
4812
  )
4651
4813
 
4652
4814
 
4653
- class AsyncModels(_common.BaseModule):
4815
+ class AsyncModels(_api_module.BaseModule):
4654
4816
 
4655
4817
  async def _generate_content(
4656
4818
  self,
@@ -4679,8 +4841,14 @@ class AsyncModels(_common.BaseModule):
4679
4841
  if query_params:
4680
4842
  path = f'{path}?{urlencode(query_params)}'
4681
4843
  # TODO: remove the hack that pops config.
4682
- config = request_dict.pop('config', None)
4683
- http_options = config.pop('httpOptions', None) if config else None
4844
+ request_dict.pop('config', None)
4845
+
4846
+ http_options = None
4847
+ if isinstance(config, dict):
4848
+ http_options = config.get('http_options', None)
4849
+ elif hasattr(config, 'http_options'):
4850
+ http_options = config.http_options
4851
+
4684
4852
  request_dict = _common.convert_to_dict(request_dict)
4685
4853
  request_dict = _common.encode_unserializable_types(request_dict)
4686
4854
 
@@ -4698,7 +4866,7 @@ class AsyncModels(_common.BaseModule):
4698
4866
  )
4699
4867
 
4700
4868
  return_value = types.GenerateContentResponse._from_response(
4701
- response_dict, parameter_model
4869
+ response=response_dict, kwargs=parameter_model
4702
4870
  )
4703
4871
  self._api_client._verify_response(return_value)
4704
4872
  return return_value
@@ -4709,7 +4877,7 @@ class AsyncModels(_common.BaseModule):
4709
4877
  model: str,
4710
4878
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
4711
4879
  config: Optional[types.GenerateContentConfigOrDict] = None,
4712
- ) -> AsyncIterator[types.GenerateContentResponse]:
4880
+ ) -> Awaitable[AsyncIterator[types.GenerateContentResponse]]:
4713
4881
  parameter_model = types._GenerateContentParameters(
4714
4882
  model=model,
4715
4883
  contents=contents,
@@ -4734,29 +4902,40 @@ class AsyncModels(_common.BaseModule):
4734
4902
  if query_params:
4735
4903
  path = f'{path}?{urlencode(query_params)}'
4736
4904
  # TODO: remove the hack that pops config.
4737
- config = request_dict.pop('config', None)
4738
- http_options = config.pop('httpOptions', None) if config else None
4905
+ request_dict.pop('config', None)
4906
+
4907
+ http_options = None
4908
+ if isinstance(config, dict):
4909
+ http_options = config.get('http_options', None)
4910
+ elif hasattr(config, 'http_options'):
4911
+ http_options = config.http_options
4912
+
4739
4913
  request_dict = _common.convert_to_dict(request_dict)
4740
4914
  request_dict = _common.encode_unserializable_types(request_dict)
4741
4915
 
4742
- async for response_dict in self._api_client.async_request_streamed(
4916
+ response_stream = await self._api_client.async_request_streamed(
4743
4917
  'post', path, request_dict, http_options
4744
- ):
4918
+ )
4745
4919
 
4746
- if self._api_client.vertexai:
4747
- response_dict = _GenerateContentResponse_from_vertex(
4748
- self._api_client, response_dict
4749
- )
4750
- else:
4751
- response_dict = _GenerateContentResponse_from_mldev(
4752
- self._api_client, response_dict
4920
+ async def async_generator():
4921
+ async for response_dict in response_stream:
4922
+
4923
+ if self._api_client.vertexai:
4924
+ response_dict = _GenerateContentResponse_from_vertex(
4925
+ self._api_client, response_dict
4926
+ )
4927
+ else:
4928
+ response_dict = _GenerateContentResponse_from_mldev(
4929
+ self._api_client, response_dict
4930
+ )
4931
+
4932
+ return_value = types.GenerateContentResponse._from_response(
4933
+ response=response_dict, kwargs=parameter_model
4753
4934
  )
4935
+ self._api_client._verify_response(return_value)
4936
+ yield return_value
4754
4937
 
4755
- return_value = types.GenerateContentResponse._from_response(
4756
- response_dict, parameter_model
4757
- )
4758
- self._api_client._verify_response(return_value)
4759
- yield return_value
4938
+ return async_generator()
4760
4939
 
4761
4940
  async def embed_content(
4762
4941
  self,
@@ -4808,8 +4987,14 @@ class AsyncModels(_common.BaseModule):
4808
4987
  if query_params:
4809
4988
  path = f'{path}?{urlencode(query_params)}'
4810
4989
  # TODO: remove the hack that pops config.
4811
- config = request_dict.pop('config', None)
4812
- http_options = config.pop('httpOptions', None) if config else None
4990
+ request_dict.pop('config', None)
4991
+
4992
+ http_options = None
4993
+ if isinstance(config, dict):
4994
+ http_options = config.get('http_options', None)
4995
+ elif hasattr(config, 'http_options'):
4996
+ http_options = config.http_options
4997
+
4813
4998
  request_dict = _common.convert_to_dict(request_dict)
4814
4999
  request_dict = _common.encode_unserializable_types(request_dict)
4815
5000
 
@@ -4827,33 +5012,33 @@ class AsyncModels(_common.BaseModule):
4827
5012
  )
4828
5013
 
4829
5014
  return_value = types.EmbedContentResponse._from_response(
4830
- response_dict, parameter_model
5015
+ response=response_dict, kwargs=parameter_model
4831
5016
  )
4832
5017
  self._api_client._verify_response(return_value)
4833
5018
  return return_value
4834
5019
 
4835
- async def generate_image(
5020
+ async def generate_images(
4836
5021
  self,
4837
5022
  *,
4838
5023
  model: str,
4839
5024
  prompt: str,
4840
- config: Optional[types.GenerateImageConfigOrDict] = None,
4841
- ) -> types.GenerateImageResponse:
4842
- """Generates an image based on a text description and configuration.
5025
+ config: Optional[types.GenerateImagesConfigOrDict] = None,
5026
+ ) -> types.GenerateImagesResponse:
5027
+ """Generates images based on a text description and configuration.
4843
5028
 
4844
5029
  Args:
4845
5030
  model (str): The model to use.
4846
- prompt (str): A text description of the image to generate.
4847
- config (GenerateImageConfig): Configuration for generation.
5031
+ prompt (str): A text description of the images to generate.
5032
+ config (GenerateImagesConfig): Configuration for generation.
4848
5033
 
4849
5034
  Usage:
4850
5035
 
4851
5036
  .. code-block:: python
4852
5037
 
4853
- response = client.models.generate_image(
5038
+ response = client.models.generate_images(
4854
5039
  model='imagen-3.0-generate-001',
4855
5040
  prompt='Man with a dog',
4856
- config=types.GenerateImageConfig(
5041
+ config=types.GenerateImagesConfig(
4857
5042
  number_of_images= 1,
4858
5043
  include_rai_reason= True,
4859
5044
  )
@@ -4862,19 +5047,19 @@ class AsyncModels(_common.BaseModule):
4862
5047
  # Shows a man with a dog.
4863
5048
  """
4864
5049
 
4865
- parameter_model = types._GenerateImageParameters(
5050
+ parameter_model = types._GenerateImagesParameters(
4866
5051
  model=model,
4867
5052
  prompt=prompt,
4868
5053
  config=config,
4869
5054
  )
4870
5055
 
4871
5056
  if self._api_client.vertexai:
4872
- request_dict = _GenerateImageParameters_to_vertex(
5057
+ request_dict = _GenerateImagesParameters_to_vertex(
4873
5058
  self._api_client, parameter_model
4874
5059
  )
4875
5060
  path = '{model}:predict'.format_map(request_dict.get('_url'))
4876
5061
  else:
4877
- request_dict = _GenerateImageParameters_to_mldev(
5062
+ request_dict = _GenerateImagesParameters_to_mldev(
4878
5063
  self._api_client, parameter_model
4879
5064
  )
4880
5065
  path = '{model}:predict'.format_map(request_dict.get('_url'))
@@ -4882,8 +5067,14 @@ class AsyncModels(_common.BaseModule):
4882
5067
  if query_params:
4883
5068
  path = f'{path}?{urlencode(query_params)}'
4884
5069
  # TODO: remove the hack that pops config.
4885
- config = request_dict.pop('config', None)
4886
- http_options = config.pop('httpOptions', None) if config else None
5070
+ request_dict.pop('config', None)
5071
+
5072
+ http_options = None
5073
+ if isinstance(config, dict):
5074
+ http_options = config.get('http_options', None)
5075
+ elif hasattr(config, 'http_options'):
5076
+ http_options = config.http_options
5077
+
4887
5078
  request_dict = _common.convert_to_dict(request_dict)
4888
5079
  request_dict = _common.encode_unserializable_types(request_dict)
4889
5080
 
@@ -4892,16 +5083,16 @@ class AsyncModels(_common.BaseModule):
4892
5083
  )
4893
5084
 
4894
5085
  if self._api_client.vertexai:
4895
- response_dict = _GenerateImageResponse_from_vertex(
5086
+ response_dict = _GenerateImagesResponse_from_vertex(
4896
5087
  self._api_client, response_dict
4897
5088
  )
4898
5089
  else:
4899
- response_dict = _GenerateImageResponse_from_mldev(
5090
+ response_dict = _GenerateImagesResponse_from_mldev(
4900
5091
  self._api_client, response_dict
4901
5092
  )
4902
5093
 
4903
- return_value = types.GenerateImageResponse._from_response(
4904
- response_dict, parameter_model
5094
+ return_value = types.GenerateImagesResponse._from_response(
5095
+ response=response_dict, kwargs=parameter_model
4905
5096
  )
4906
5097
  self._api_client._verify_response(return_value)
4907
5098
  return return_value
@@ -4975,8 +5166,14 @@ class AsyncModels(_common.BaseModule):
4975
5166
  if query_params:
4976
5167
  path = f'{path}?{urlencode(query_params)}'
4977
5168
  # TODO: remove the hack that pops config.
4978
- config = request_dict.pop('config', None)
4979
- http_options = config.pop('httpOptions', None) if config else None
5169
+ request_dict.pop('config', None)
5170
+
5171
+ http_options = None
5172
+ if isinstance(config, dict):
5173
+ http_options = config.get('http_options', None)
5174
+ elif hasattr(config, 'http_options'):
5175
+ http_options = config.http_options
5176
+
4980
5177
  request_dict = _common.convert_to_dict(request_dict)
4981
5178
  request_dict = _common.encode_unserializable_types(request_dict)
4982
5179
 
@@ -4994,7 +5191,7 @@ class AsyncModels(_common.BaseModule):
4994
5191
  )
4995
5192
 
4996
5193
  return_value = types.EditImageResponse._from_response(
4997
- response_dict, parameter_model
5194
+ response=response_dict, kwargs=parameter_model
4998
5195
  )
4999
5196
  self._api_client._verify_response(return_value)
5000
5197
  return return_value
@@ -5035,8 +5232,14 @@ class AsyncModels(_common.BaseModule):
5035
5232
  if query_params:
5036
5233
  path = f'{path}?{urlencode(query_params)}'
5037
5234
  # TODO: remove the hack that pops config.
5038
- config = request_dict.pop('config', None)
5039
- http_options = config.pop('httpOptions', None) if config else None
5235
+ request_dict.pop('config', None)
5236
+
5237
+ http_options = None
5238
+ if isinstance(config, dict):
5239
+ http_options = config.get('http_options', None)
5240
+ elif hasattr(config, 'http_options'):
5241
+ http_options = config.http_options
5242
+
5040
5243
  request_dict = _common.convert_to_dict(request_dict)
5041
5244
  request_dict = _common.encode_unserializable_types(request_dict)
5042
5245
 
@@ -5054,14 +5257,17 @@ class AsyncModels(_common.BaseModule):
5054
5257
  )
5055
5258
 
5056
5259
  return_value = types.UpscaleImageResponse._from_response(
5057
- response_dict, parameter_model
5260
+ response=response_dict, kwargs=parameter_model
5058
5261
  )
5059
5262
  self._api_client._verify_response(return_value)
5060
5263
  return return_value
5061
5264
 
5062
- async def get(self, *, model: str) -> types.Model:
5265
+ async def get(
5266
+ self, *, model: str, config: Optional[types.GetModelConfigOrDict] = None
5267
+ ) -> types.Model:
5063
5268
  parameter_model = types._GetModelParameters(
5064
5269
  model=model,
5270
+ config=config,
5065
5271
  )
5066
5272
 
5067
5273
  if self._api_client.vertexai:
@@ -5078,8 +5284,14 @@ class AsyncModels(_common.BaseModule):
5078
5284
  if query_params:
5079
5285
  path = f'{path}?{urlencode(query_params)}'
5080
5286
  # TODO: remove the hack that pops config.
5081
- config = request_dict.pop('config', None)
5082
- http_options = config.pop('httpOptions', None) if config else None
5287
+ request_dict.pop('config', None)
5288
+
5289
+ http_options = None
5290
+ if isinstance(config, dict):
5291
+ http_options = config.get('http_options', None)
5292
+ elif hasattr(config, 'http_options'):
5293
+ http_options = config.http_options
5294
+
5083
5295
  request_dict = _common.convert_to_dict(request_dict)
5084
5296
  request_dict = _common.encode_unserializable_types(request_dict)
5085
5297
 
@@ -5092,7 +5304,9 @@ class AsyncModels(_common.BaseModule):
5092
5304
  else:
5093
5305
  response_dict = _Model_from_mldev(self._api_client, response_dict)
5094
5306
 
5095
- return_value = types.Model._from_response(response_dict, parameter_model)
5307
+ return_value = types.Model._from_response(
5308
+ response=response_dict, kwargs=parameter_model
5309
+ )
5096
5310
  self._api_client._verify_response(return_value)
5097
5311
  return return_value
5098
5312
 
@@ -5117,8 +5331,14 @@ class AsyncModels(_common.BaseModule):
5117
5331
  if query_params:
5118
5332
  path = f'{path}?{urlencode(query_params)}'
5119
5333
  # TODO: remove the hack that pops config.
5120
- config = request_dict.pop('config', None)
5121
- http_options = config.pop('httpOptions', None) if config else None
5334
+ request_dict.pop('config', None)
5335
+
5336
+ http_options = None
5337
+ if isinstance(config, dict):
5338
+ http_options = config.get('http_options', None)
5339
+ elif hasattr(config, 'http_options'):
5340
+ http_options = config.http_options
5341
+
5122
5342
  request_dict = _common.convert_to_dict(request_dict)
5123
5343
  request_dict = _common.encode_unserializable_types(request_dict)
5124
5344
 
@@ -5136,7 +5356,7 @@ class AsyncModels(_common.BaseModule):
5136
5356
  )
5137
5357
 
5138
5358
  return_value = types.ListModelsResponse._from_response(
5139
- response_dict, parameter_model
5359
+ response=response_dict, kwargs=parameter_model
5140
5360
  )
5141
5361
  self._api_client._verify_response(return_value)
5142
5362
  return return_value
@@ -5166,8 +5386,14 @@ class AsyncModels(_common.BaseModule):
5166
5386
  if query_params:
5167
5387
  path = f'{path}?{urlencode(query_params)}'
5168
5388
  # TODO: remove the hack that pops config.
5169
- config = request_dict.pop('config', None)
5170
- http_options = config.pop('httpOptions', None) if config else None
5389
+ request_dict.pop('config', None)
5390
+
5391
+ http_options = None
5392
+ if isinstance(config, dict):
5393
+ http_options = config.get('http_options', None)
5394
+ elif hasattr(config, 'http_options'):
5395
+ http_options = config.http_options
5396
+
5171
5397
  request_dict = _common.convert_to_dict(request_dict)
5172
5398
  request_dict = _common.encode_unserializable_types(request_dict)
5173
5399
 
@@ -5180,13 +5406,21 @@ class AsyncModels(_common.BaseModule):
5180
5406
  else:
5181
5407
  response_dict = _Model_from_mldev(self._api_client, response_dict)
5182
5408
 
5183
- return_value = types.Model._from_response(response_dict, parameter_model)
5409
+ return_value = types.Model._from_response(
5410
+ response=response_dict, kwargs=parameter_model
5411
+ )
5184
5412
  self._api_client._verify_response(return_value)
5185
5413
  return return_value
5186
5414
 
5187
- async def delete(self, *, model: str) -> types.DeleteModelResponse:
5415
+ async def delete(
5416
+ self,
5417
+ *,
5418
+ model: str,
5419
+ config: Optional[types.DeleteModelConfigOrDict] = None,
5420
+ ) -> types.DeleteModelResponse:
5188
5421
  parameter_model = types._DeleteModelParameters(
5189
5422
  model=model,
5423
+ config=config,
5190
5424
  )
5191
5425
 
5192
5426
  if self._api_client.vertexai:
@@ -5203,8 +5437,14 @@ class AsyncModels(_common.BaseModule):
5203
5437
  if query_params:
5204
5438
  path = f'{path}?{urlencode(query_params)}'
5205
5439
  # TODO: remove the hack that pops config.
5206
- config = request_dict.pop('config', None)
5207
- http_options = config.pop('httpOptions', None) if config else None
5440
+ request_dict.pop('config', None)
5441
+
5442
+ http_options = None
5443
+ if isinstance(config, dict):
5444
+ http_options = config.get('http_options', None)
5445
+ elif hasattr(config, 'http_options'):
5446
+ http_options = config.http_options
5447
+
5208
5448
  request_dict = _common.convert_to_dict(request_dict)
5209
5449
  request_dict = _common.encode_unserializable_types(request_dict)
5210
5450
 
@@ -5222,7 +5462,7 @@ class AsyncModels(_common.BaseModule):
5222
5462
  )
5223
5463
 
5224
5464
  return_value = types.DeleteModelResponse._from_response(
5225
- response_dict, parameter_model
5465
+ response=response_dict, kwargs=parameter_model
5226
5466
  )
5227
5467
  self._api_client._verify_response(return_value)
5228
5468
  return return_value
@@ -5274,8 +5514,14 @@ class AsyncModels(_common.BaseModule):
5274
5514
  if query_params:
5275
5515
  path = f'{path}?{urlencode(query_params)}'
5276
5516
  # TODO: remove the hack that pops config.
5277
- config = request_dict.pop('config', None)
5278
- http_options = config.pop('httpOptions', None) if config else None
5517
+ request_dict.pop('config', None)
5518
+
5519
+ http_options = None
5520
+ if isinstance(config, dict):
5521
+ http_options = config.get('http_options', None)
5522
+ elif hasattr(config, 'http_options'):
5523
+ http_options = config.http_options
5524
+
5279
5525
  request_dict = _common.convert_to_dict(request_dict)
5280
5526
  request_dict = _common.encode_unserializable_types(request_dict)
5281
5527
 
@@ -5293,7 +5539,7 @@ class AsyncModels(_common.BaseModule):
5293
5539
  )
5294
5540
 
5295
5541
  return_value = types.CountTokensResponse._from_response(
5296
- response_dict, parameter_model
5542
+ response=response_dict, kwargs=parameter_model
5297
5543
  )
5298
5544
  self._api_client._verify_response(return_value)
5299
5545
  return return_value
@@ -5345,8 +5591,14 @@ class AsyncModels(_common.BaseModule):
5345
5591
  if query_params:
5346
5592
  path = f'{path}?{urlencode(query_params)}'
5347
5593
  # TODO: remove the hack that pops config.
5348
- config = request_dict.pop('config', None)
5349
- http_options = config.pop('httpOptions', None) if config else None
5594
+ request_dict.pop('config', None)
5595
+
5596
+ http_options = None
5597
+ if isinstance(config, dict):
5598
+ http_options = config.get('http_options', None)
5599
+ elif hasattr(config, 'http_options'):
5600
+ http_options = config.http_options
5601
+
5350
5602
  request_dict = _common.convert_to_dict(request_dict)
5351
5603
  request_dict = _common.encode_unserializable_types(request_dict)
5352
5604
 
@@ -5364,7 +5616,7 @@ class AsyncModels(_common.BaseModule):
5364
5616
  )
5365
5617
 
5366
5618
  return_value = types.ComputeTokensResponse._from_response(
5367
- response_dict, parameter_model
5619
+ response=response_dict, kwargs=parameter_model
5368
5620
  )
5369
5621
  self._api_client._verify_response(return_value)
5370
5622
  return return_value
@@ -5487,13 +5739,7 @@ class AsyncModels(_common.BaseModule):
5487
5739
  )
5488
5740
  if self._api_client.vertexai:
5489
5741
  config = config.copy()
5490
- if config.query_base:
5491
- http_options = (
5492
- config.http_options if config.http_options else HttpOptionsDict()
5493
- )
5494
- http_options['skip_project_and_location_in_path'] = True
5495
- config.http_options = http_options
5496
- else:
5742
+ if not config.query_base:
5497
5743
  # Filter for tuning jobs artifacts by labels.
5498
5744
  filter_value = config.filter
5499
5745
  config.filter = (