google-genai 0.4.0__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/caches.py CHANGED
@@ -13,6 +13,8 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
+ # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
+
16
18
  from typing import Optional, Union
17
19
  from urllib.parse import urlencode
18
20
  from . import _common
@@ -1272,14 +1274,14 @@ class Caches(_common.BaseModule):
1272
1274
  config=config,
1273
1275
  )
1274
1276
 
1275
- if self.api_client.vertexai:
1277
+ if self._api_client.vertexai:
1276
1278
  request_dict = _CreateCachedContentParameters_to_vertex(
1277
- self.api_client, parameter_model
1279
+ self._api_client, parameter_model
1278
1280
  )
1279
1281
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1280
1282
  else:
1281
1283
  request_dict = _CreateCachedContentParameters_to_mldev(
1282
- self.api_client, parameter_model
1284
+ self._api_client, parameter_model
1283
1285
  )
1284
1286
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1285
1287
  query_params = request_dict.get('_query')
@@ -1291,19 +1293,21 @@ class Caches(_common.BaseModule):
1291
1293
  request_dict = _common.convert_to_dict(request_dict)
1292
1294
  request_dict = _common.apply_base64_encoding(request_dict)
1293
1295
 
1294
- response_dict = self.api_client.request(
1296
+ response_dict = self._api_client.request(
1295
1297
  'post', path, request_dict, http_options
1296
1298
  )
1297
1299
 
1298
- if self.api_client.vertexai:
1299
- response_dict = _CachedContent_from_vertex(self.api_client, response_dict)
1300
+ if self._api_client.vertexai:
1301
+ response_dict = _CachedContent_from_vertex(
1302
+ self._api_client, response_dict
1303
+ )
1300
1304
  else:
1301
- response_dict = _CachedContent_from_mldev(self.api_client, response_dict)
1305
+ response_dict = _CachedContent_from_mldev(self._api_client, response_dict)
1302
1306
 
1303
1307
  return_value = types.CachedContent._from_response(
1304
1308
  response_dict, parameter_model
1305
1309
  )
1306
- self.api_client._verify_response(return_value)
1310
+ self._api_client._verify_response(return_value)
1307
1311
  return return_value
1308
1312
 
1309
1313
  def get(
@@ -1325,14 +1329,14 @@ class Caches(_common.BaseModule):
1325
1329
  config=config,
1326
1330
  )
1327
1331
 
1328
- if self.api_client.vertexai:
1332
+ if self._api_client.vertexai:
1329
1333
  request_dict = _GetCachedContentParameters_to_vertex(
1330
- self.api_client, parameter_model
1334
+ self._api_client, parameter_model
1331
1335
  )
1332
1336
  path = '{name}'.format_map(request_dict.get('_url'))
1333
1337
  else:
1334
1338
  request_dict = _GetCachedContentParameters_to_mldev(
1335
- self.api_client, parameter_model
1339
+ self._api_client, parameter_model
1336
1340
  )
1337
1341
  path = '{name}'.format_map(request_dict.get('_url'))
1338
1342
  query_params = request_dict.get('_query')
@@ -1344,19 +1348,21 @@ class Caches(_common.BaseModule):
1344
1348
  request_dict = _common.convert_to_dict(request_dict)
1345
1349
  request_dict = _common.apply_base64_encoding(request_dict)
1346
1350
 
1347
- response_dict = self.api_client.request(
1351
+ response_dict = self._api_client.request(
1348
1352
  'get', path, request_dict, http_options
1349
1353
  )
1350
1354
 
1351
- if self.api_client.vertexai:
1352
- response_dict = _CachedContent_from_vertex(self.api_client, response_dict)
1355
+ if self._api_client.vertexai:
1356
+ response_dict = _CachedContent_from_vertex(
1357
+ self._api_client, response_dict
1358
+ )
1353
1359
  else:
1354
- response_dict = _CachedContent_from_mldev(self.api_client, response_dict)
1360
+ response_dict = _CachedContent_from_mldev(self._api_client, response_dict)
1355
1361
 
1356
1362
  return_value = types.CachedContent._from_response(
1357
1363
  response_dict, parameter_model
1358
1364
  )
1359
- self.api_client._verify_response(return_value)
1365
+ self._api_client._verify_response(return_value)
1360
1366
  return return_value
1361
1367
 
1362
1368
  def delete(
@@ -1380,14 +1386,14 @@ class Caches(_common.BaseModule):
1380
1386
  config=config,
1381
1387
  )
1382
1388
 
1383
- if self.api_client.vertexai:
1389
+ if self._api_client.vertexai:
1384
1390
  request_dict = _DeleteCachedContentParameters_to_vertex(
1385
- self.api_client, parameter_model
1391
+ self._api_client, parameter_model
1386
1392
  )
1387
1393
  path = '{name}'.format_map(request_dict.get('_url'))
1388
1394
  else:
1389
1395
  request_dict = _DeleteCachedContentParameters_to_mldev(
1390
- self.api_client, parameter_model
1396
+ self._api_client, parameter_model
1391
1397
  )
1392
1398
  path = '{name}'.format_map(request_dict.get('_url'))
1393
1399
  query_params = request_dict.get('_query')
@@ -1399,23 +1405,23 @@ class Caches(_common.BaseModule):
1399
1405
  request_dict = _common.convert_to_dict(request_dict)
1400
1406
  request_dict = _common.apply_base64_encoding(request_dict)
1401
1407
 
1402
- response_dict = self.api_client.request(
1408
+ response_dict = self._api_client.request(
1403
1409
  'delete', path, request_dict, http_options
1404
1410
  )
1405
1411
 
1406
- if self.api_client.vertexai:
1412
+ if self._api_client.vertexai:
1407
1413
  response_dict = _DeleteCachedContentResponse_from_vertex(
1408
- self.api_client, response_dict
1414
+ self._api_client, response_dict
1409
1415
  )
1410
1416
  else:
1411
1417
  response_dict = _DeleteCachedContentResponse_from_mldev(
1412
- self.api_client, response_dict
1418
+ self._api_client, response_dict
1413
1419
  )
1414
1420
 
1415
1421
  return_value = types.DeleteCachedContentResponse._from_response(
1416
1422
  response_dict, parameter_model
1417
1423
  )
1418
- self.api_client._verify_response(return_value)
1424
+ self._api_client._verify_response(return_value)
1419
1425
  return return_value
1420
1426
 
1421
1427
  def update(
@@ -1441,14 +1447,14 @@ class Caches(_common.BaseModule):
1441
1447
  config=config,
1442
1448
  )
1443
1449
 
1444
- if self.api_client.vertexai:
1450
+ if self._api_client.vertexai:
1445
1451
  request_dict = _UpdateCachedContentParameters_to_vertex(
1446
- self.api_client, parameter_model
1452
+ self._api_client, parameter_model
1447
1453
  )
1448
1454
  path = '{name}'.format_map(request_dict.get('_url'))
1449
1455
  else:
1450
1456
  request_dict = _UpdateCachedContentParameters_to_mldev(
1451
- self.api_client, parameter_model
1457
+ self._api_client, parameter_model
1452
1458
  )
1453
1459
  path = '{name}'.format_map(request_dict.get('_url'))
1454
1460
  query_params = request_dict.get('_query')
@@ -1460,19 +1466,21 @@ class Caches(_common.BaseModule):
1460
1466
  request_dict = _common.convert_to_dict(request_dict)
1461
1467
  request_dict = _common.apply_base64_encoding(request_dict)
1462
1468
 
1463
- response_dict = self.api_client.request(
1469
+ response_dict = self._api_client.request(
1464
1470
  'patch', path, request_dict, http_options
1465
1471
  )
1466
1472
 
1467
- if self.api_client.vertexai:
1468
- response_dict = _CachedContent_from_vertex(self.api_client, response_dict)
1473
+ if self._api_client.vertexai:
1474
+ response_dict = _CachedContent_from_vertex(
1475
+ self._api_client, response_dict
1476
+ )
1469
1477
  else:
1470
- response_dict = _CachedContent_from_mldev(self.api_client, response_dict)
1478
+ response_dict = _CachedContent_from_mldev(self._api_client, response_dict)
1471
1479
 
1472
1480
  return_value = types.CachedContent._from_response(
1473
1481
  response_dict, parameter_model
1474
1482
  )
1475
- self.api_client._verify_response(return_value)
1483
+ self._api_client._verify_response(return_value)
1476
1484
  return return_value
1477
1485
 
1478
1486
  def _list(
@@ -1491,14 +1499,14 @@ class Caches(_common.BaseModule):
1491
1499
  config=config,
1492
1500
  )
1493
1501
 
1494
- if self.api_client.vertexai:
1502
+ if self._api_client.vertexai:
1495
1503
  request_dict = _ListCachedContentsParameters_to_vertex(
1496
- self.api_client, parameter_model
1504
+ self._api_client, parameter_model
1497
1505
  )
1498
1506
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1499
1507
  else:
1500
1508
  request_dict = _ListCachedContentsParameters_to_mldev(
1501
- self.api_client, parameter_model
1509
+ self._api_client, parameter_model
1502
1510
  )
1503
1511
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1504
1512
  query_params = request_dict.get('_query')
@@ -1510,23 +1518,23 @@ class Caches(_common.BaseModule):
1510
1518
  request_dict = _common.convert_to_dict(request_dict)
1511
1519
  request_dict = _common.apply_base64_encoding(request_dict)
1512
1520
 
1513
- response_dict = self.api_client.request(
1521
+ response_dict = self._api_client.request(
1514
1522
  'get', path, request_dict, http_options
1515
1523
  )
1516
1524
 
1517
- if self.api_client.vertexai:
1525
+ if self._api_client.vertexai:
1518
1526
  response_dict = _ListCachedContentsResponse_from_vertex(
1519
- self.api_client, response_dict
1527
+ self._api_client, response_dict
1520
1528
  )
1521
1529
  else:
1522
1530
  response_dict = _ListCachedContentsResponse_from_mldev(
1523
- self.api_client, response_dict
1531
+ self._api_client, response_dict
1524
1532
  )
1525
1533
 
1526
1534
  return_value = types.ListCachedContentsResponse._from_response(
1527
1535
  response_dict, parameter_model
1528
1536
  )
1529
- self.api_client._verify_response(return_value)
1537
+ self._api_client._verify_response(return_value)
1530
1538
  return return_value
1531
1539
 
1532
1540
  def list(
@@ -1574,14 +1582,14 @@ class AsyncCaches(_common.BaseModule):
1574
1582
  config=config,
1575
1583
  )
1576
1584
 
1577
- if self.api_client.vertexai:
1585
+ if self._api_client.vertexai:
1578
1586
  request_dict = _CreateCachedContentParameters_to_vertex(
1579
- self.api_client, parameter_model
1587
+ self._api_client, parameter_model
1580
1588
  )
1581
1589
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1582
1590
  else:
1583
1591
  request_dict = _CreateCachedContentParameters_to_mldev(
1584
- self.api_client, parameter_model
1592
+ self._api_client, parameter_model
1585
1593
  )
1586
1594
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1587
1595
  query_params = request_dict.get('_query')
@@ -1593,19 +1601,21 @@ class AsyncCaches(_common.BaseModule):
1593
1601
  request_dict = _common.convert_to_dict(request_dict)
1594
1602
  request_dict = _common.apply_base64_encoding(request_dict)
1595
1603
 
1596
- response_dict = await self.api_client.async_request(
1604
+ response_dict = await self._api_client.async_request(
1597
1605
  'post', path, request_dict, http_options
1598
1606
  )
1599
1607
 
1600
- if self.api_client.vertexai:
1601
- response_dict = _CachedContent_from_vertex(self.api_client, response_dict)
1608
+ if self._api_client.vertexai:
1609
+ response_dict = _CachedContent_from_vertex(
1610
+ self._api_client, response_dict
1611
+ )
1602
1612
  else:
1603
- response_dict = _CachedContent_from_mldev(self.api_client, response_dict)
1613
+ response_dict = _CachedContent_from_mldev(self._api_client, response_dict)
1604
1614
 
1605
1615
  return_value = types.CachedContent._from_response(
1606
1616
  response_dict, parameter_model
1607
1617
  )
1608
- self.api_client._verify_response(return_value)
1618
+ self._api_client._verify_response(return_value)
1609
1619
  return return_value
1610
1620
 
1611
1621
  async def get(
@@ -1627,14 +1637,14 @@ class AsyncCaches(_common.BaseModule):
1627
1637
  config=config,
1628
1638
  )
1629
1639
 
1630
- if self.api_client.vertexai:
1640
+ if self._api_client.vertexai:
1631
1641
  request_dict = _GetCachedContentParameters_to_vertex(
1632
- self.api_client, parameter_model
1642
+ self._api_client, parameter_model
1633
1643
  )
1634
1644
  path = '{name}'.format_map(request_dict.get('_url'))
1635
1645
  else:
1636
1646
  request_dict = _GetCachedContentParameters_to_mldev(
1637
- self.api_client, parameter_model
1647
+ self._api_client, parameter_model
1638
1648
  )
1639
1649
  path = '{name}'.format_map(request_dict.get('_url'))
1640
1650
  query_params = request_dict.get('_query')
@@ -1646,19 +1656,21 @@ class AsyncCaches(_common.BaseModule):
1646
1656
  request_dict = _common.convert_to_dict(request_dict)
1647
1657
  request_dict = _common.apply_base64_encoding(request_dict)
1648
1658
 
1649
- response_dict = await self.api_client.async_request(
1659
+ response_dict = await self._api_client.async_request(
1650
1660
  'get', path, request_dict, http_options
1651
1661
  )
1652
1662
 
1653
- if self.api_client.vertexai:
1654
- response_dict = _CachedContent_from_vertex(self.api_client, response_dict)
1663
+ if self._api_client.vertexai:
1664
+ response_dict = _CachedContent_from_vertex(
1665
+ self._api_client, response_dict
1666
+ )
1655
1667
  else:
1656
- response_dict = _CachedContent_from_mldev(self.api_client, response_dict)
1668
+ response_dict = _CachedContent_from_mldev(self._api_client, response_dict)
1657
1669
 
1658
1670
  return_value = types.CachedContent._from_response(
1659
1671
  response_dict, parameter_model
1660
1672
  )
1661
- self.api_client._verify_response(return_value)
1673
+ self._api_client._verify_response(return_value)
1662
1674
  return return_value
1663
1675
 
1664
1676
  async def delete(
@@ -1682,14 +1694,14 @@ class AsyncCaches(_common.BaseModule):
1682
1694
  config=config,
1683
1695
  )
1684
1696
 
1685
- if self.api_client.vertexai:
1697
+ if self._api_client.vertexai:
1686
1698
  request_dict = _DeleteCachedContentParameters_to_vertex(
1687
- self.api_client, parameter_model
1699
+ self._api_client, parameter_model
1688
1700
  )
1689
1701
  path = '{name}'.format_map(request_dict.get('_url'))
1690
1702
  else:
1691
1703
  request_dict = _DeleteCachedContentParameters_to_mldev(
1692
- self.api_client, parameter_model
1704
+ self._api_client, parameter_model
1693
1705
  )
1694
1706
  path = '{name}'.format_map(request_dict.get('_url'))
1695
1707
  query_params = request_dict.get('_query')
@@ -1701,23 +1713,23 @@ class AsyncCaches(_common.BaseModule):
1701
1713
  request_dict = _common.convert_to_dict(request_dict)
1702
1714
  request_dict = _common.apply_base64_encoding(request_dict)
1703
1715
 
1704
- response_dict = await self.api_client.async_request(
1716
+ response_dict = await self._api_client.async_request(
1705
1717
  'delete', path, request_dict, http_options
1706
1718
  )
1707
1719
 
1708
- if self.api_client.vertexai:
1720
+ if self._api_client.vertexai:
1709
1721
  response_dict = _DeleteCachedContentResponse_from_vertex(
1710
- self.api_client, response_dict
1722
+ self._api_client, response_dict
1711
1723
  )
1712
1724
  else:
1713
1725
  response_dict = _DeleteCachedContentResponse_from_mldev(
1714
- self.api_client, response_dict
1726
+ self._api_client, response_dict
1715
1727
  )
1716
1728
 
1717
1729
  return_value = types.DeleteCachedContentResponse._from_response(
1718
1730
  response_dict, parameter_model
1719
1731
  )
1720
- self.api_client._verify_response(return_value)
1732
+ self._api_client._verify_response(return_value)
1721
1733
  return return_value
1722
1734
 
1723
1735
  async def update(
@@ -1743,14 +1755,14 @@ class AsyncCaches(_common.BaseModule):
1743
1755
  config=config,
1744
1756
  )
1745
1757
 
1746
- if self.api_client.vertexai:
1758
+ if self._api_client.vertexai:
1747
1759
  request_dict = _UpdateCachedContentParameters_to_vertex(
1748
- self.api_client, parameter_model
1760
+ self._api_client, parameter_model
1749
1761
  )
1750
1762
  path = '{name}'.format_map(request_dict.get('_url'))
1751
1763
  else:
1752
1764
  request_dict = _UpdateCachedContentParameters_to_mldev(
1753
- self.api_client, parameter_model
1765
+ self._api_client, parameter_model
1754
1766
  )
1755
1767
  path = '{name}'.format_map(request_dict.get('_url'))
1756
1768
  query_params = request_dict.get('_query')
@@ -1762,19 +1774,21 @@ class AsyncCaches(_common.BaseModule):
1762
1774
  request_dict = _common.convert_to_dict(request_dict)
1763
1775
  request_dict = _common.apply_base64_encoding(request_dict)
1764
1776
 
1765
- response_dict = await self.api_client.async_request(
1777
+ response_dict = await self._api_client.async_request(
1766
1778
  'patch', path, request_dict, http_options
1767
1779
  )
1768
1780
 
1769
- if self.api_client.vertexai:
1770
- response_dict = _CachedContent_from_vertex(self.api_client, response_dict)
1781
+ if self._api_client.vertexai:
1782
+ response_dict = _CachedContent_from_vertex(
1783
+ self._api_client, response_dict
1784
+ )
1771
1785
  else:
1772
- response_dict = _CachedContent_from_mldev(self.api_client, response_dict)
1786
+ response_dict = _CachedContent_from_mldev(self._api_client, response_dict)
1773
1787
 
1774
1788
  return_value = types.CachedContent._from_response(
1775
1789
  response_dict, parameter_model
1776
1790
  )
1777
- self.api_client._verify_response(return_value)
1791
+ self._api_client._verify_response(return_value)
1778
1792
  return return_value
1779
1793
 
1780
1794
  async def _list(
@@ -1793,14 +1807,14 @@ class AsyncCaches(_common.BaseModule):
1793
1807
  config=config,
1794
1808
  )
1795
1809
 
1796
- if self.api_client.vertexai:
1810
+ if self._api_client.vertexai:
1797
1811
  request_dict = _ListCachedContentsParameters_to_vertex(
1798
- self.api_client, parameter_model
1812
+ self._api_client, parameter_model
1799
1813
  )
1800
1814
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1801
1815
  else:
1802
1816
  request_dict = _ListCachedContentsParameters_to_mldev(
1803
- self.api_client, parameter_model
1817
+ self._api_client, parameter_model
1804
1818
  )
1805
1819
  path = 'cachedContents'.format_map(request_dict.get('_url'))
1806
1820
  query_params = request_dict.get('_query')
@@ -1812,23 +1826,23 @@ class AsyncCaches(_common.BaseModule):
1812
1826
  request_dict = _common.convert_to_dict(request_dict)
1813
1827
  request_dict = _common.apply_base64_encoding(request_dict)
1814
1828
 
1815
- response_dict = await self.api_client.async_request(
1829
+ response_dict = await self._api_client.async_request(
1816
1830
  'get', path, request_dict, http_options
1817
1831
  )
1818
1832
 
1819
- if self.api_client.vertexai:
1833
+ if self._api_client.vertexai:
1820
1834
  response_dict = _ListCachedContentsResponse_from_vertex(
1821
- self.api_client, response_dict
1835
+ self._api_client, response_dict
1822
1836
  )
1823
1837
  else:
1824
1838
  response_dict = _ListCachedContentsResponse_from_mldev(
1825
- self.api_client, response_dict
1839
+ self._api_client, response_dict
1826
1840
  )
1827
1841
 
1828
1842
  return_value = types.ListCachedContentsResponse._from_response(
1829
1843
  response_dict, parameter_model
1830
1844
  )
1831
- self.api_client._verify_response(return_value)
1845
+ self._api_client._verify_response(return_value)
1832
1846
  return return_value
1833
1847
 
1834
1848
  async def list(
google/genai/chats.py CHANGED
@@ -21,9 +21,7 @@ from .models import AsyncModels, Models
21
21
  from .types import Content, ContentDict, GenerateContentConfigOrDict, GenerateContentResponse, Part, PartUnionDict
22
22
 
23
23
 
24
- def _validate_response(
25
- response: GenerateContentResponse
26
- ) -> bool:
24
+ def _validate_response(response: GenerateContentResponse) -> bool:
27
25
  if not response.candidates:
28
26
  return False
29
27
  if not response.candidates[0].content:
@@ -77,7 +75,7 @@ class Chat(_BaseChat):
77
75
  response = chat.send_message('tell me a story')
78
76
  """
79
77
 
80
- input_content = t.t_content(self._modules.api_client, message)
78
+ input_content = t.t_content(self._modules._api_client, message)
81
79
  response = self._modules.generate_content(
82
80
  model=self._model,
83
81
  contents=self._curated_history + [input_content],
@@ -113,7 +111,7 @@ class Chat(_BaseChat):
113
111
  print(chunk.text)
114
112
  """
115
113
 
116
- input_content = t.t_content(self._modules.api_client, message)
114
+ input_content = t.t_content(self._modules._api_client, message)
117
115
  output_contents = []
118
116
  finish_reason = None
119
117
  for chunk in self._modules.generate_content_stream(
@@ -184,7 +182,7 @@ class AsyncChat(_BaseChat):
184
182
  response = await chat.send_message('tell me a story')
185
183
  """
186
184
 
187
- input_content = t.t_content(self._modules.api_client, message)
185
+ input_content = t.t_content(self._modules._api_client, message)
188
186
  response = await self._modules.generate_content(
189
187
  model=self._model,
190
188
  contents=self._curated_history + [input_content],
@@ -219,7 +217,7 @@ class AsyncChat(_BaseChat):
219
217
  print(chunk.text)
220
218
  """
221
219
 
222
- input_content = t.t_content(self._modules.api_client, message)
220
+ input_content = t.t_content(self._modules._api_client, message)
223
221
  output_contents = []
224
222
  finish_reason = None
225
223
  async for chunk in self._modules.generate_content_stream(
@@ -240,7 +238,6 @@ class AsyncChat(_BaseChat):
240
238
  class AsyncChats:
241
239
  """A util class to create async chat sessions."""
242
240
 
243
-
244
241
  def __init__(self, modules: AsyncModels):
245
242
  self._modules = modules
246
243