google-genai 1.41.0__py3-none-any.whl → 1.42.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.41.0
3
+ Version: 1.42.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -296,7 +296,7 @@ See the 'Create a client' section above to initialize a client.
296
296
 
297
297
  ```python
298
298
  response = client.models.generate_content(
299
- model='gemini-2.0-flash-001', contents='Why is the sky blue?'
299
+ model='gemini-2.5-flash', contents='Why is the sky blue?'
300
300
  )
301
301
  print(response.text)
302
302
  ```
@@ -313,7 +313,7 @@ python code.
313
313
  ```python
314
314
  file = client.files.upload(file='a11.txt')
315
315
  response = client.models.generate_content(
316
- model='gemini-2.0-flash-001',
316
+ model='gemini-2.5-flash',
317
317
  contents=['Could you summarize this file?', file]
318
318
  )
319
319
  print(response.text)
@@ -617,7 +617,7 @@ print(async_pager[0])
617
617
  from google.genai import types
618
618
 
619
619
  response = client.models.generate_content(
620
- model='gemini-2.0-flash-001',
620
+ model='gemini-2.5-flash',
621
621
  contents='Say something bad.',
622
622
  config=types.GenerateContentConfig(
623
623
  safety_settings=[
@@ -651,7 +651,7 @@ def get_current_weather(location: str) -> str:
651
651
 
652
652
 
653
653
  response = client.models.generate_content(
654
- model='gemini-2.0-flash-001',
654
+ model='gemini-2.5-flash',
655
655
  contents='What is the weather like in Boston?',
656
656
  config=types.GenerateContentConfig(tools=[get_current_weather]),
657
657
  )
@@ -667,7 +667,7 @@ as follows:
667
667
  from google.genai import types
668
668
 
669
669
  response = client.models.generate_content(
670
- model='gemini-2.0-flash-001',
670
+ model='gemini-2.5-flash',
671
671
  contents='What is the weather like in Boston?',
672
672
  config=types.GenerateContentConfig(
673
673
  tools=[get_current_weather],
@@ -714,7 +714,7 @@ function = types.FunctionDeclaration(
714
714
  tool = types.Tool(function_declarations=[function])
715
715
 
716
716
  response = client.models.generate_content(
717
- model='gemini-2.0-flash-001',
717
+ model='gemini-2.5-flash',
718
718
  contents='What is the weather like in Boston?',
719
719
  config=types.GenerateContentConfig(tools=[tool]),
720
720
  )
@@ -758,7 +758,7 @@ function_response_content = types.Content(
758
758
  )
759
759
 
760
760
  response = client.models.generate_content(
761
- model='gemini-2.0-flash-001',
761
+ model='gemini-2.5-flash',
762
762
  contents=[
763
763
  user_prompt_content,
764
764
  function_call_content,
@@ -793,7 +793,7 @@ def get_current_weather(location: str) -> str:
793
793
  return "sunny"
794
794
 
795
795
  response = client.models.generate_content(
796
- model="gemini-2.0-flash-001",
796
+ model="gemini-2.5-flash",
797
797
  contents="What is the weather like in Boston?",
798
798
  config=types.GenerateContentConfig(
799
799
  tools=[get_current_weather],
@@ -823,7 +823,7 @@ def get_current_weather(location: str) -> str:
823
823
  return "sunny"
824
824
 
825
825
  response = client.models.generate_content(
826
- model="gemini-2.0-flash-001",
826
+ model="gemini-2.5-flash",
827
827
  contents="What is the weather like in Boston?",
828
828
  config=types.GenerateContentConfig(
829
829
  tools=[get_current_weather],
@@ -913,7 +913,7 @@ user_profile = {
913
913
  }
914
914
 
915
915
  response = client.models.generate_content(
916
- model='gemini-2.0-flash',
916
+ model='gemini-2.5-flash',
917
917
  contents='Give me a random user profile.',
918
918
  config={
919
919
  'response_mime_type': 'application/json',
@@ -943,7 +943,7 @@ class CountryInfo(BaseModel):
943
943
 
944
944
 
945
945
  response = client.models.generate_content(
946
- model='gemini-2.0-flash-001',
946
+ model='gemini-2.5-flash',
947
947
  contents='Give me information for the United States.',
948
948
  config=types.GenerateContentConfig(
949
949
  response_mime_type='application/json',
@@ -957,7 +957,7 @@ print(response.text)
957
957
  from google.genai import types
958
958
 
959
959
  response = client.models.generate_content(
960
- model='gemini-2.0-flash-001',
960
+ model='gemini-2.5-flash',
961
961
  contents='Give me information for the United States.',
962
962
  config=types.GenerateContentConfig(
963
963
  response_mime_type='application/json',
@@ -995,6 +995,8 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
995
995
  values as the response.
996
996
 
997
997
  ```python
998
+ from enum import Enum
999
+
998
1000
  class InstrumentEnum(Enum):
999
1001
  PERCUSSION = 'Percussion'
1000
1002
  STRING = 'String'
@@ -1003,7 +1005,7 @@ class InstrumentEnum(Enum):
1003
1005
  KEYBOARD = 'Keyboard'
1004
1006
 
1005
1007
  response = client.models.generate_content(
1006
- model='gemini-2.0-flash-001',
1008
+ model='gemini-2.5-flash',
1007
1009
  contents='What instrument plays multiple notes at once?',
1008
1010
  config={
1009
1011
  'response_mime_type': 'text/x.enum',
@@ -1029,7 +1031,7 @@ class InstrumentEnum(Enum):
1029
1031
  KEYBOARD = 'Keyboard'
1030
1032
 
1031
1033
  response = client.models.generate_content(
1032
- model='gemini-2.0-flash-001',
1034
+ model='gemini-2.5-flash',
1033
1035
  contents='What instrument plays multiple notes at once?',
1034
1036
  config={
1035
1037
  'response_mime_type': 'application/json',
@@ -1048,7 +1050,7 @@ to you, rather than being returned as one chunk.
1048
1050
 
1049
1051
  ```python
1050
1052
  for chunk in client.models.generate_content_stream(
1051
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1053
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1052
1054
  ):
1053
1055
  print(chunk.text, end='')
1054
1056
  ```
@@ -1062,7 +1064,7 @@ you can use the `from_uri` class method to create a `Part` object.
1062
1064
  from google.genai import types
1063
1065
 
1064
1066
  for chunk in client.models.generate_content_stream(
1065
- model='gemini-2.0-flash-001',
1067
+ model='gemini-2.5-flash',
1066
1068
  contents=[
1067
1069
  'What is this image about?',
1068
1070
  types.Part.from_uri(
@@ -1086,7 +1088,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
1086
1088
  image_bytes = f.read()
1087
1089
 
1088
1090
  for chunk in client.models.generate_content_stream(
1089
- model='gemini-2.0-flash-001',
1091
+ model='gemini-2.5-flash',
1090
1092
  contents=[
1091
1093
  'What is this image about?',
1092
1094
  types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
@@ -1105,7 +1107,7 @@ of `client.models.generate_content`
1105
1107
 
1106
1108
  ```python
1107
1109
  response = await client.aio.models.generate_content(
1108
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1110
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1109
1111
  )
1110
1112
 
1111
1113
  print(response.text)
@@ -1116,7 +1118,7 @@ print(response.text)
1116
1118
 
1117
1119
  ```python
1118
1120
  async for chunk in await client.aio.models.generate_content_stream(
1119
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1121
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1120
1122
  ):
1121
1123
  print(chunk.text, end='')
1122
1124
  ```
@@ -1125,7 +1127,7 @@ async for chunk in await client.aio.models.generate_content_stream(
1125
1127
 
1126
1128
  ```python
1127
1129
  response = client.models.count_tokens(
1128
- model='gemini-2.0-flash-001',
1130
+ model='gemini-2.5-flash',
1129
1131
  contents='why is the sky blue?',
1130
1132
  )
1131
1133
  print(response)
@@ -1137,7 +1139,7 @@ Compute tokens is only supported in Vertex AI.
1137
1139
 
1138
1140
  ```python
1139
1141
  response = client.models.compute_tokens(
1140
- model='gemini-2.0-flash-001',
1142
+ model='gemini-2.5-flash',
1141
1143
  contents='why is the sky blue?',
1142
1144
  )
1143
1145
  print(response)
@@ -1147,7 +1149,7 @@ print(response)
1147
1149
 
1148
1150
  ```python
1149
1151
  response = await client.aio.models.count_tokens(
1150
- model='gemini-2.0-flash-001',
1152
+ model='gemini-2.5-flash',
1151
1153
  contents='why is the sky blue?',
1152
1154
  )
1153
1155
  print(response)
@@ -1156,14 +1158,14 @@ print(response)
1156
1158
  #### Local Count Tokens
1157
1159
 
1158
1160
  ```python
1159
- tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1161
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
1160
1162
  result = tokenizer.count_tokens("What is your name?")
1161
1163
  ```
1162
1164
 
1163
1165
  #### Local Compute Tokens
1164
1166
 
1165
1167
  ```python
1166
- tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1168
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
1167
1169
  result = tokenizer.compute_tokens("What is your name?")
1168
1170
  ```
1169
1171
 
@@ -1376,7 +1378,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
1376
1378
  ### Send Message (Synchronous Non-Streaming)
1377
1379
 
1378
1380
  ```python
1379
- chat = client.chats.create(model='gemini-2.0-flash-001')
1381
+ chat = client.chats.create(model='gemini-2.5-flash')
1380
1382
  response = chat.send_message('tell me a story')
1381
1383
  print(response.text)
1382
1384
  response = chat.send_message('summarize the story you told me in 1 sentence')
@@ -1386,7 +1388,7 @@ print(response.text)
1386
1388
  ### Send Message (Synchronous Streaming)
1387
1389
 
1388
1390
  ```python
1389
- chat = client.chats.create(model='gemini-2.0-flash-001')
1391
+ chat = client.chats.create(model='gemini-2.5-flash')
1390
1392
  for chunk in chat.send_message_stream('tell me a story'):
1391
1393
  print(chunk.text)
1392
1394
  ```
@@ -1394,7 +1396,7 @@ for chunk in chat.send_message_stream('tell me a story'):
1394
1396
  ### Send Message (Asynchronous Non-Streaming)
1395
1397
 
1396
1398
  ```python
1397
- chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1399
+ chat = client.aio.chats.create(model='gemini-2.5-flash')
1398
1400
  response = await chat.send_message('tell me a story')
1399
1401
  print(response.text)
1400
1402
  ```
@@ -1402,7 +1404,7 @@ print(response.text)
1402
1404
  ### Send Message (Asynchronous Streaming)
1403
1405
 
1404
1406
  ```python
1405
- chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1407
+ chat = client.aio.chats.create(model='gemini-2.5-flash')
1406
1408
  async for chunk in await chat.send_message_stream('tell me a story'):
1407
1409
  print(chunk.text)
1408
1410
  ```
@@ -1461,7 +1463,7 @@ else:
1461
1463
  file_uris = [file1.uri, file2.uri]
1462
1464
 
1463
1465
  cached_content = client.caches.create(
1464
- model='gemini-2.0-flash-001',
1466
+ model='gemini-2.5-flash',
1465
1467
  config=types.CreateCachedContentConfig(
1466
1468
  contents=[
1467
1469
  types.Content(
@@ -1496,7 +1498,7 @@ cached_content = client.caches.get(name=cached_content.name)
1496
1498
  from google.genai import types
1497
1499
 
1498
1500
  response = client.models.generate_content(
1499
- model='gemini-2.0-flash-001',
1501
+ model='gemini-2.5-flash',
1500
1502
  contents='Summarize the pdfs',
1501
1503
  config=types.GenerateContentConfig(
1502
1504
  cached_content=cached_content.name,
@@ -1518,7 +1520,7 @@ section above to initialize a client.
1518
1520
  ```python
1519
1521
  from google.genai import types
1520
1522
 
1521
- model = 'gemini-2.0-flash-001'
1523
+ model = 'gemini-2.5-flash'
1522
1524
  training_dataset = types.TuningDataset(
1523
1525
  # or gcs_uri=my_vertex_multimodal_dataset
1524
1526
  gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
@@ -1672,7 +1674,7 @@ Vertex AI:
1672
1674
  ```python
1673
1675
  # Specify model and source file only, destination and job display name will be auto-populated
1674
1676
  job = client.batches.create(
1675
- model='gemini-2.0-flash-001',
1677
+ model='gemini-2.5-flash',
1676
1678
  src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1677
1679
  )
1678
1680
 
@@ -1684,7 +1686,7 @@ Gemini Developer API:
1684
1686
  ```python
1685
1687
  # Create a batch job with inlined requests
1686
1688
  batch_job = client.batches.create(
1687
- model="gemini-2.0-flash",
1689
+ model="gemini-2.5-flash",
1688
1690
  src=[{
1689
1691
  "contents": [{
1690
1692
  "parts": [{
@@ -1699,7 +1701,7 @@ batch_job = client.batches.create(
1699
1701
  job
1700
1702
  ```
1701
1703
 
1702
- In order to create a batch job with file name. Need to upload a jsonl file.
1704
+ In order to create a batch job with file name. Need to upload a json file.
1703
1705
  For example myrequests.json:
1704
1706
 
1705
1707
  ```
@@ -1712,14 +1714,14 @@ Then upload the file.
1712
1714
  ```python
1713
1715
  # Upload the file
1714
1716
  file = client.files.upload(
1715
- file='myrequest.json',
1716
- config=types.UploadFileConfig(display_name='test_json')
1717
+ file='myrequests.json',
1718
+ config=types.UploadFileConfig(display_name='test-json')
1717
1719
  )
1718
1720
 
1719
1721
  # Create a batch job with file name
1720
1722
  batch_job = client.batches.create(
1721
1723
  model="gemini-2.0-flash",
1722
- src="files/file_name",
1724
+ src="files/test-json",
1723
1725
  )
1724
1726
  ```
1725
1727
 
@@ -0,0 +1,39 @@
1
+ google/genai/__init__.py,sha256=SKz_9WQKA3R4OpJIDJlgssVfizLNDG2tuWtOD9pxrPE,729
2
+ google/genai/_adapters.py,sha256=Kok38miNYJff2n--l0zEK_hbq0y2rWOH7k75J7SMYbQ,1744
3
+ google/genai/_api_client.py,sha256=uLgUFk9HC2xIJglFoxPXLUazN6uVmwqDHIJA_PbVIr0,62127
4
+ google/genai/_api_module.py,sha256=lj8eUWx8_LBGBz-49qz6_ywWm3GYp3d8Bg5JoOHbtbI,902
5
+ google/genai/_automatic_function_calling_util.py,sha256=xXNkJR-pzSMkeSXMz3Jw-kMHFbTJEiRJ3wocuwtWW4I,11627
6
+ google/genai/_base_transformers.py,sha256=wljA6m4tLl4XLGlBC2DNOls5N9-X9tffBq0M7i8jgpw,1034
7
+ google/genai/_base_url.py,sha256=E5H4dew14Y16qfnB3XRnjSCi19cJVlkaMNoM_8ip-PM,1597
8
+ google/genai/_common.py,sha256=SWlfo_Yg6KK-gnSYUMU4EgWFvGmAbi99xPrp-ZR7atA,24150
9
+ google/genai/_extra_utils.py,sha256=YLw64xzAKD_fQJp327-GGZM3kQ0sVdhNXMeDaaNkVFE,23011
10
+ google/genai/_live_converters.py,sha256=J2-mFI182xTlKzHd6rFSHp743Ju8Fg05QUTbgEMkM8g,41592
11
+ google/genai/_local_tokenizer_loader.py,sha256=cGN1F0f7hNjRIGCGTLeox7IGAZf_YcvZjSp2rCyhUak,7465
12
+ google/genai/_mcp_utils.py,sha256=HuWJ8FUjquv40Mf_QjcL5r5yXWrS-JjINsjlOSbbyAc,3870
13
+ google/genai/_operations_converters.py,sha256=hPmrlU_yJWT4di2arA0VKaoQIB1MbCPglmAZ4D8M-Ds,8744
14
+ google/genai/_replay_api_client.py,sha256=E_tbw61TfOgxXhlKfoycN9X_5XaX5yBQHMnPj34w6nk,22731
15
+ google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
16
+ google/genai/_tokens_converters.py,sha256=0a7GH3-tnM_JT3v7eSWYf7lGfRZ7m_WAZ_26OoL7Qa8,13614
17
+ google/genai/_transformers.py,sha256=lW5gri3gEWR7n-wtujyvd4LTsS6-ff6hdVqOBX22VUg,42051
18
+ google/genai/batches.py,sha256=YVq7BsIlW7mQgrPkZ_qSCVB2Kt_1G2l9jX38NFn7Eak,74345
19
+ google/genai/caches.py,sha256=lG9SxUoWjGB-J0pwtmU2RakIvGDyOZnaSqe-eueV4eQ,44265
20
+ google/genai/chats.py,sha256=pIBw8d13llupLn4a7vP6vnpbzDcvCCrZZ-Q2r8Cvo7g,16652
21
+ google/genai/client.py,sha256=bwKV5gHKpxzmfFTtoudQ_hEz5QfUzKYMJHYT-AnQfNU,13066
22
+ google/genai/errors.py,sha256=dLH0Bo8-Y0K7zKASU5O0y_0FSKpSFJn8JPcnwIUvtIM,6089
23
+ google/genai/files.py,sha256=2TkcZo7iviHA48OEjc9YnyirZ-umBUN7Z4Gdr4nHyJI,31551
24
+ google/genai/live.py,sha256=e3oKgUV3hda-FP9Prx8cG0d8nPDKNRRJn5gn9oMlrXk,40413
25
+ google/genai/live_music.py,sha256=Y7I7jh5SAKgyjBIMLboH0oTnZJ18uOT2SpRDKURvp94,6783
26
+ google/genai/local_tokenizer.py,sha256=EKZ72cV2Zfutlo_efMOPnLRNZN4WQe57rD3G80cF340,14109
27
+ google/genai/models.py,sha256=sxiZyGuhci-dTpqb9a-zKCYdJBIQm6RiMk34xNq4RX0,224324
28
+ google/genai/operations.py,sha256=KgM5vsagUnAMGk9wKxuQYBUh_6bwrPQ9BzZvydiumQA,16208
29
+ google/genai/pagers.py,sha256=m0SfWWn1EJs2k1On3DZx371qb8g2BRm_188ExsicIRc,7098
30
+ google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
31
+ google/genai/tokens.py,sha256=4BPW0gGWFeFVk3INkuY2tfREnsrvzQDhouvRI6_F9Q8,12235
32
+ google/genai/tunings.py,sha256=VmCBW_RR16QbzVpimi7pTEv6XTVeyDGpwqmJqetUj0o,58175
33
+ google/genai/types.py,sha256=n9PvKsdTgydZug-6JbjIjnEk6WvXb5nut_zF6tHjWLk,558477
34
+ google/genai/version.py,sha256=IywLlohBT1FvKiX8GIR6rqqIZVkzZF3HmTK_9TuJN5o,627
35
+ google_genai-1.42.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
36
+ google_genai-1.42.0.dist-info/METADATA,sha256=eDIdLeToHF_DET2M-_wgH2xjjyb_3-zJPgp3Ya6Z5Vc,45280
37
+ google_genai-1.42.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
+ google_genai-1.42.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
39
+ google_genai-1.42.0.dist-info/RECORD,,
@@ -1,39 +0,0 @@
1
- google/genai/__init__.py,sha256=SKz_9WQKA3R4OpJIDJlgssVfizLNDG2tuWtOD9pxrPE,729
2
- google/genai/_adapters.py,sha256=Kok38miNYJff2n--l0zEK_hbq0y2rWOH7k75J7SMYbQ,1744
3
- google/genai/_api_client.py,sha256=MjXU31z7dtbix5hSVM0dkM_TFvpUfonyvRhEdQ-NX-k,62056
4
- google/genai/_api_module.py,sha256=lj8eUWx8_LBGBz-49qz6_ywWm3GYp3d8Bg5JoOHbtbI,902
5
- google/genai/_automatic_function_calling_util.py,sha256=xXNkJR-pzSMkeSXMz3Jw-kMHFbTJEiRJ3wocuwtWW4I,11627
6
- google/genai/_base_transformers.py,sha256=wljA6m4tLl4XLGlBC2DNOls5N9-X9tffBq0M7i8jgpw,1034
7
- google/genai/_base_url.py,sha256=E5H4dew14Y16qfnB3XRnjSCi19cJVlkaMNoM_8ip-PM,1597
8
- google/genai/_common.py,sha256=YU5842SoZOW0usJRnz58Wi8hqJ1f-Dd01flGTsqI8-U,20653
9
- google/genai/_extra_utils.py,sha256=YLw64xzAKD_fQJp327-GGZM3kQ0sVdhNXMeDaaNkVFE,23011
10
- google/genai/_live_converters.py,sha256=OZV6-i-YR_HSKuBzu5EAjbqOv78efGo_1OVfRqoExxw,109492
11
- google/genai/_local_tokenizer_loader.py,sha256=cGN1F0f7hNjRIGCGTLeox7IGAZf_YcvZjSp2rCyhUak,7465
12
- google/genai/_mcp_utils.py,sha256=HuWJ8FUjquv40Mf_QjcL5r5yXWrS-JjINsjlOSbbyAc,3870
13
- google/genai/_operations_converters.py,sha256=hPmrlU_yJWT4di2arA0VKaoQIB1MbCPglmAZ4D8M-Ds,8744
14
- google/genai/_replay_api_client.py,sha256=MmpzqE5AxVeyvCahEnmukYGIZqN8lxS-suSgUszvLSw,22555
15
- google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
16
- google/genai/_tokens_converters.py,sha256=7b1fe9fzigJfuwR17GF6YUHU8ef8H57xruTNQkCYfDE,25902
17
- google/genai/_transformers.py,sha256=-1GIrDNS4fH6Qx2jNm-VhcmlTSjWxanna7N7Tp_ctQ8,40861
18
- google/genai/batches.py,sha256=Xs4pZ6t8sjKGZ2N3ATOPFbobDgWxduSqc9-Zpo2Sdj8,102539
19
- google/genai/caches.py,sha256=xq3kZBpUoc7Tv6In15gDOrKBnzfeIG3lCBY4zM8Y2sQ,67127
20
- google/genai/chats.py,sha256=pIBw8d13llupLn4a7vP6vnpbzDcvCCrZZ-Q2r8Cvo7g,16652
21
- google/genai/client.py,sha256=bwKV5gHKpxzmfFTtoudQ_hEz5QfUzKYMJHYT-AnQfNU,13066
22
- google/genai/errors.py,sha256=zaPEs_GrtZuypvSPnOe32CTHO6nEVtshvc3Av2ug2Ac,5822
23
- google/genai/files.py,sha256=1H6Kdxo4wAOW___7o3j9FJg_B5Dw2O0SJk5fEhC8wvk,36896
24
- google/genai/live.py,sha256=PM5fXqhURdrdyM7AVCH_ogLe8_Zzhixzz2rXWm0FHxA,40590
25
- google/genai/live_music.py,sha256=3GG9nsto8Vhkohcs-4CPMS4DFp1ZtMuLYzHfvEPYAeg,6971
26
- google/genai/local_tokenizer.py,sha256=EKZ72cV2Zfutlo_efMOPnLRNZN4WQe57rD3G80cF340,14109
27
- google/genai/models.py,sha256=2Rnjqhh2AsZZiqN6c8LK4Z_O0XhP7gpSb7MU9DypHqU,267634
28
- google/genai/operations.py,sha256=mgRuVCoqUf7z0RfB84W9x7S839VCttmIQZkXovHAMrE,17061
29
- google/genai/pagers.py,sha256=m0SfWWn1EJs2k1On3DZx371qb8g2BRm_188ExsicIRc,7098
30
- google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
31
- google/genai/tokens.py,sha256=8RbZ0kgvyKT3SwbgIUOHr6TTZL24v4fqYarhlA8r1ac,12503
32
- google/genai/tunings.py,sha256=958t2gwEu4XqLgYv7gK3pwKlqM5IrwYBDpQDujFPp50,63155
33
- google/genai/types.py,sha256=tfpKdSkJd7wcDqbmbIZLFUkBhu4tTNecRODj_WHBzqc,555885
34
- google/genai/version.py,sha256=f0C75ye5LDF8-vIJ_lGyMfMPwqvU5cpOMMxPDtXJ_hw,627
35
- google_genai-1.41.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
36
- google_genai-1.41.0.dist-info/METADATA,sha256=DMCrhJAI1tN-JAhdwzOhD5ZQpO7pCgmXV7xZRuwqEHY,45381
37
- google_genai-1.41.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
- google_genai-1.41.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
39
- google_genai-1.41.0.dist-info/RECORD,,