google-genai 1.46.0__py3-none-any.whl → 1.48.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.46.0' # x-release-please-version
16
+ __version__ = '1.48.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.46.0
3
+ Version: 1.48.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License-Expression: Apache-2.0
@@ -9,7 +9,6 @@ Classifier: Intended Audience :: Developers
9
9
  Classifier: Operating System :: OS Independent
10
10
  Classifier: Programming Language :: Python
11
11
  Classifier: Programming Language :: Python :: 3
12
- Classifier: Programming Language :: Python :: 3.9
13
12
  Classifier: Programming Language :: Python :: 3.10
14
13
  Classifier: Programming Language :: Python :: 3.11
15
14
  Classifier: Programming Language :: Python :: 3.12
@@ -17,13 +16,13 @@ Classifier: Programming Language :: Python :: 3.13
17
16
  Classifier: Programming Language :: Python :: 3.14
18
17
  Classifier: Topic :: Internet
19
18
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
- Requires-Python: >=3.9
19
+ Requires-Python: >=3.10
21
20
  Description-Content-Type: text/markdown
22
21
  License-File: LICENSE
23
22
  Requires-Dist: anyio<5.0.0,>=4.8.0
24
23
  Requires-Dist: google-auth<3.0.0,>=2.14.1
25
24
  Requires-Dist: httpx<1.0.0,>=0.28.1
26
- Requires-Dist: pydantic<3.0.0,>=2.0.0
25
+ Requires-Dist: pydantic<3.0.0,>=2.9.0
27
26
  Requires-Dist: requests<3.0.0,>=2.28.1
28
27
  Requires-Dist: tenacity<9.2.0,>=8.2.3
29
28
  Requires-Dist: websockets<15.1.0,>=13.0.0
@@ -128,7 +127,6 @@ Explicitly close the sync client to ensure that resources, such as the
128
127
  underlying HTTP connections, are properly cleaned up and closed.
129
128
 
130
129
  ```python
131
-
132
130
  from google.genai import Client
133
131
 
134
132
  client = Client()
@@ -147,7 +145,6 @@ client.close()
147
145
  To explicitly close the async client:
148
146
 
149
147
  ```python
150
-
151
148
  from google.genai import Client
152
149
 
153
150
  aclient = Client(
@@ -174,15 +171,14 @@ By using the sync client context manager, it will close the underlying
174
171
  from google.genai import Client
175
172
 
176
173
  with Client() as client:
177
- response_1 = client.models.generate_content(
178
- model=MODEL_ID,
179
- contents='Hello',
180
- )
181
- response_2 = client.models.generate_content(
182
- model=MODEL_ID,
183
- contents='Ask a question',
184
- )
185
-
174
+ response_1 = client.models.generate_content(
175
+ model=MODEL_ID,
176
+ contents='Hello',
177
+ )
178
+ response_2 = client.models.generate_content(
179
+ model=MODEL_ID,
180
+ contents='Ask a question',
181
+ )
186
182
  ```
187
183
 
188
184
  By using the async client context manager, it will close the underlying
@@ -192,15 +188,14 @@ By using the async client context manager, it will close the underlying
192
188
  from google.genai import Client
193
189
 
194
190
  async with Client().aio as aclient:
195
- response_1 = await aclient.models.generate_content(
196
- model=MODEL_ID,
197
- contents='Hello',
198
- )
199
- response_2 = await aclient.models.generate_content(
200
- model=MODEL_ID,
201
- contents='Ask a question',
202
- )
203
-
191
+ response_1 = await aclient.models.generate_content(
192
+ model=MODEL_ID,
193
+ contents='Hello',
194
+ )
195
+ response_2 = await aclient.models.generate_content(
196
+ model=MODEL_ID,
197
+ contents='Ask a question',
198
+ )
204
199
  ```
205
200
 
206
201
  ### API Selection
@@ -245,7 +240,6 @@ Additional args of `aiohttp.ClientSession.request()` ([see _RequestOptions args]
245
240
  through the following way:
246
241
 
247
242
  ```python
248
-
249
243
  http_options = types.HttpOptions(
250
244
  async_client_args={'cookies': ..., 'ssl': ...},
251
245
  )
@@ -259,7 +253,6 @@ Both httpx and aiohttp libraries use `urllib.request.getproxies` from
259
253
  environment variables. Before client initialization, you may set proxy (and
260
254
  optional SSL_CERT_FILE) by setting the environment variables:
261
255
 
262
-
263
256
  ```bash
264
257
  export HTTPS_PROXY='http://username:password@proxy_uri:port'
265
258
  export SSL_CERT_FILE='client.pem'
@@ -270,7 +263,6 @@ args to `httpx.Client()`. You may install `httpx[socks]` to use it.
270
263
  Then, you can pass it through the following way:
271
264
 
272
265
  ```python
273
-
274
266
  http_options = types.HttpOptions(
275
267
  client_args={'proxy': 'socks5://user:pass@host:port'},
276
268
  async_client_args={'proxy': 'socks5://user:pass@host:port'},
@@ -286,16 +278,14 @@ In some cases you might need a custom base url (for example, API gateway proxy
286
278
  You may pass the custom base url like this:
287
279
 
288
280
  ```python
289
-
290
281
  base_url = 'https://test-api-gateway-proxy.com'
291
282
  client = Client(
292
- vertexai=True,
293
- http_options={
294
- 'base_url': base_url,
295
- 'headers': {'Authorization': 'Bearer test_token'},
296
- },
283
+ vertexai=True, # Currently only vertexai=True is supported
284
+ http_options={
285
+ 'base_url': base_url,
286
+ 'headers': {'Authorization': 'Bearer test_token'},
287
+ },
297
288
  )
298
-
299
289
  ```
300
290
 
301
291
  ## Types
@@ -329,17 +319,17 @@ response = client.models.generate_content(
329
319
  model='gemini-2.5-flash-image',
330
320
  contents='A cartoon infographic for flying sneakers',
331
321
  config=types.GenerateContentConfig(
332
- response_modalities=["IMAGE"],
333
- image_config=types.ImageConfig(
334
- aspect_ratio="9:16",
335
- ),
322
+ response_modalities=["IMAGE"],
323
+ image_config=types.ImageConfig(
324
+ aspect_ratio="9:16",
325
+ ),
336
326
  ),
337
327
  )
338
328
 
339
329
  for part in response.parts:
340
- if part.inline_data:
341
- generated_image = part.as_image()
342
- generated_image.show()
330
+ if part.inline_data:
331
+ generated_image = part.as_image()
332
+ generated_image.show()
343
333
  ```
344
334
 
345
335
  #### with uploaded file (Gemini Developer API only)
@@ -374,8 +364,8 @@ This is the canonical way to provide contents, SDK will not do any conversion.
374
364
  from google.genai import types
375
365
 
376
366
  contents = types.Content(
377
- role='user',
378
- parts=[types.Part.from_text(text='Why is the sky blue?')]
367
+ role='user',
368
+ parts=[types.Part.from_text(text='Why is the sky blue?')]
379
369
  )
380
370
  ```
381
371
 
@@ -383,10 +373,10 @@ SDK converts this to
383
373
 
384
374
  ```python
385
375
  [
386
- types.Content(
387
- role='user',
388
- parts=[types.Part.from_text(text='Why is the sky blue?')]
389
- )
376
+ types.Content(
377
+ role='user',
378
+ parts=[types.Part.from_text(text='Why is the sky blue?')]
379
+ )
390
380
  ]
391
381
  ```
392
382
 
@@ -400,11 +390,11 @@ The SDK will assume this is a text part, and it converts this into the following
400
390
 
401
391
  ```python
402
392
  [
403
- types.UserContent(
404
- parts=[
405
- types.Part.from_text(text='Why is the sky blue?')
406
- ]
407
- )
393
+ types.UserContent(
394
+ parts=[
395
+ types.Part.from_text(text='Why is the sky blue?')
396
+ ]
397
+ )
408
398
  ]
409
399
  ```
410
400
 
@@ -422,12 +412,12 @@ like the following:
422
412
 
423
413
  ```python
424
414
  [
425
- types.UserContent(
426
- parts=[
427
- types.Part.from_text(text='Why is the sky blue?'),
428
- types.Part.from_text(text='Why is the cloud white?'),
429
- ]
430
- )
415
+ types.UserContent(
416
+ parts=[
417
+ types.Part.from_text(text='Why is the sky blue?'),
418
+ types.Part.from_text(text='Why is the cloud white?'),
419
+ ]
420
+ )
431
421
  ]
432
422
  ```
433
423
 
@@ -440,8 +430,8 @@ Where a `types.UserContent` is a subclass of `types.Content`, the
440
430
  from google.genai import types
441
431
 
442
432
  contents = types.Part.from_function_call(
443
- name='get_weather_by_location',
444
- args={'location': 'Boston'}
433
+ name='get_weather_by_location',
434
+ args={'location': 'Boston'}
445
435
  )
446
436
  ```
447
437
 
@@ -449,14 +439,14 @@ The SDK converts a function call part to a content with a `model` role:
449
439
 
450
440
  ```python
451
441
  [
452
- types.ModelContent(
453
- parts=[
454
- types.Part.from_function_call(
455
- name='get_weather_by_location',
456
- args={'location': 'Boston'}
457
- )
458
- ]
459
- )
442
+ types.ModelContent(
443
+ parts=[
444
+ types.Part.from_function_call(
445
+ name='get_weather_by_location',
446
+ args={'location': 'Boston'}
447
+ )
448
+ ]
449
+ )
460
450
  ]
461
451
  ```
462
452
 
@@ -469,14 +459,14 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
469
459
  from google.genai import types
470
460
 
471
461
  contents = [
472
- types.Part.from_function_call(
473
- name='get_weather_by_location',
474
- args={'location': 'Boston'}
475
- ),
476
- types.Part.from_function_call(
477
- name='get_weather_by_location',
478
- args={'location': 'New York'}
479
- ),
462
+ types.Part.from_function_call(
463
+ name='get_weather_by_location',
464
+ args={'location': 'Boston'}
465
+ ),
466
+ types.Part.from_function_call(
467
+ name='get_weather_by_location',
468
+ args={'location': 'New York'}
469
+ ),
480
470
  ]
481
471
  ```
482
472
 
@@ -484,18 +474,18 @@ The SDK converts a list of function call parts to the a content with a `model` r
484
474
 
485
475
  ```python
486
476
  [
487
- types.ModelContent(
488
- parts=[
489
- types.Part.from_function_call(
490
- name='get_weather_by_location',
491
- args={'location': 'Boston'}
492
- ),
493
- types.Part.from_function_call(
494
- name='get_weather_by_location',
495
- args={'location': 'New York'}
496
- )
497
- ]
498
- )
477
+ types.ModelContent(
478
+ parts=[
479
+ types.Part.from_function_call(
480
+ name='get_weather_by_location',
481
+ args={'location': 'Boston'}
482
+ ),
483
+ types.Part.from_function_call(
484
+ name='get_weather_by_location',
485
+ args={'location': 'New York'}
486
+ )
487
+ ]
488
+ )
499
489
  ]
500
490
  ```
501
491
 
@@ -508,8 +498,8 @@ Where a `types.ModelContent` is a subclass of `types.Content`, the
508
498
  from google.genai import types
509
499
 
510
500
  contents = types.Part.from_uri(
511
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
512
- mime_type: 'image/jpeg',
501
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
502
+ mime_type: 'image/jpeg',
513
503
  )
514
504
  ```
515
505
 
@@ -517,12 +507,12 @@ The SDK converts all non function call parts into a content with a `user` role.
517
507
 
518
508
  ```python
519
509
  [
520
- types.UserContent(parts=[
521
- types.Part.from_uri(
522
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
523
- mime_type: 'image/jpeg',
524
- )
525
- ])
510
+ types.UserContent(parts=[
511
+ types.Part.from_uri(
512
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
513
+ mime_type: 'image/jpeg',
514
+ )
515
+ ])
526
516
  ]
527
517
  ```
528
518
 
@@ -532,11 +522,11 @@ The SDK converts all non function call parts into a content with a `user` role.
532
522
  from google.genai import types
533
523
 
534
524
  contents = [
535
- types.Part.from_text('What is this image about?'),
536
- types.Part.from_uri(
537
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
538
- mime_type: 'image/jpeg',
539
- )
525
+ types.Part.from_text('What is this image about?'),
526
+ types.Part.from_uri(
527
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
528
+ mime_type: 'image/jpeg',
529
+ )
540
530
  ]
541
531
  ```
542
532
 
@@ -544,15 +534,15 @@ The SDK will convert the list of parts into a content with a `user` role
544
534
 
545
535
  ```python
546
536
  [
547
- types.UserContent(
548
- parts=[
549
- types.Part.from_text('What is this image about?'),
550
- types.Part.from_uri(
551
- file_uri: 'gs://generativeai-downloads/images/scones.jpg',
552
- mime_type: 'image/jpeg',
553
- )
554
- ]
555
- )
537
+ types.UserContent(
538
+ parts=[
539
+ types.Part.from_text('What is this image about?'),
540
+ types.Part.from_uri(
541
+ file_uri: 'gs://generativeai-downloads/images/scones.jpg',
542
+ mime_type: 'image/jpeg',
543
+ )
544
+ ]
545
+ )
556
546
  ]
557
547
  ```
558
548
 
@@ -686,7 +676,7 @@ def get_current_weather(location: str) -> str:
686
676
  """Returns the current weather.
687
677
 
688
678
  Args:
689
- location: The city and state, e.g. San Francisco, CA
679
+ location: The city and state, e.g. San Francisco, CA
690
680
  """
691
681
  return 'sunny'
692
682
 
@@ -708,14 +698,14 @@ as follows:
708
698
  from google.genai import types
709
699
 
710
700
  response = client.models.generate_content(
711
- model='gemini-2.5-flash',
712
- contents='What is the weather like in Boston?',
713
- config=types.GenerateContentConfig(
714
- tools=[get_current_weather],
715
- automatic_function_calling=types.AutomaticFunctionCallingConfig(
716
- disable=True
701
+ model='gemini-2.5-flash',
702
+ contents='What is the weather like in Boston?',
703
+ config=types.GenerateContentConfig(
704
+ tools=[get_current_weather],
705
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
706
+ disable=True
707
+ ),
717
708
  ),
718
- ),
719
709
  )
720
710
  ```
721
711
 
@@ -829,7 +819,7 @@ def get_current_weather(location: str) -> str:
829
819
  """Returns the current weather.
830
820
 
831
821
  Args:
832
- location: The city and state, e.g. San Francisco, CA
822
+ location: The city and state, e.g. San Francisco, CA
833
823
  """
834
824
  return "sunny"
835
825
 
@@ -859,7 +849,7 @@ def get_current_weather(location: str) -> str:
859
849
  """Returns the current weather.
860
850
 
861
851
  Args:
862
- location: The city and state, e.g. San Francisco, CA
852
+ location: The city and state, e.g. San Francisco, CA
863
853
  """
864
854
  return "sunny"
865
855
 
@@ -1039,20 +1029,20 @@ values as the response.
1039
1029
  from enum import Enum
1040
1030
 
1041
1031
  class InstrumentEnum(Enum):
1042
- PERCUSSION = 'Percussion'
1043
- STRING = 'String'
1044
- WOODWIND = 'Woodwind'
1045
- BRASS = 'Brass'
1046
- KEYBOARD = 'Keyboard'
1032
+ PERCUSSION = 'Percussion'
1033
+ STRING = 'String'
1034
+ WOODWIND = 'Woodwind'
1035
+ BRASS = 'Brass'
1036
+ KEYBOARD = 'Keyboard'
1047
1037
 
1048
1038
  response = client.models.generate_content(
1049
- model='gemini-2.5-flash',
1050
- contents='What instrument plays multiple notes at once?',
1051
- config={
1052
- 'response_mime_type': 'text/x.enum',
1053
- 'response_schema': InstrumentEnum,
1054
- },
1055
- )
1039
+ model='gemini-2.5-flash',
1040
+ contents='What instrument plays multiple notes at once?',
1041
+ config={
1042
+ 'response_mime_type': 'text/x.enum',
1043
+ 'response_schema': InstrumentEnum,
1044
+ },
1045
+ )
1056
1046
  print(response.text)
1057
1047
  ```
1058
1048
 
@@ -1065,20 +1055,20 @@ identical but in quotes.
1065
1055
  from enum import Enum
1066
1056
 
1067
1057
  class InstrumentEnum(Enum):
1068
- PERCUSSION = 'Percussion'
1069
- STRING = 'String'
1070
- WOODWIND = 'Woodwind'
1071
- BRASS = 'Brass'
1072
- KEYBOARD = 'Keyboard'
1058
+ PERCUSSION = 'Percussion'
1059
+ STRING = 'String'
1060
+ WOODWIND = 'Woodwind'
1061
+ BRASS = 'Brass'
1062
+ KEYBOARD = 'Keyboard'
1073
1063
 
1074
1064
  response = client.models.generate_content(
1075
- model='gemini-2.5-flash',
1076
- contents='What instrument plays multiple notes at once?',
1077
- config={
1078
- 'response_mime_type': 'application/json',
1079
- 'response_schema': InstrumentEnum,
1080
- },
1081
- )
1065
+ model='gemini-2.5-flash',
1066
+ contents='What instrument plays multiple notes at once?',
1067
+ config={
1068
+ 'response_mime_type': 'application/json',
1069
+ 'response_schema': InstrumentEnum,
1070
+ },
1071
+ )
1082
1072
  print(response.text)
1083
1073
  ```
1084
1074
 
@@ -1156,7 +1146,6 @@ print(response.text)
1156
1146
 
1157
1147
  ### Generate Content (Asynchronous Streaming)
1158
1148
 
1159
-
1160
1149
  ```python
1161
1150
  async for chunk in await client.aio.models.generate_content_stream(
1162
1151
  model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
@@ -1214,7 +1203,7 @@ result = tokenizer.compute_tokens("What is your name?")
1214
1203
 
1215
1204
  ```python
1216
1205
  response = client.models.embed_content(
1217
- model='text-embedding-004',
1206
+ model='gemini-embedding-001',
1218
1207
  contents='why is the sky blue?',
1219
1208
  )
1220
1209
  print(response)
@@ -1225,7 +1214,7 @@ from google.genai import types
1225
1214
 
1226
1215
  # multiple contents with config
1227
1216
  response = client.models.embed_content(
1228
- model='text-embedding-004',
1217
+ model='gemini-embedding-001',
1229
1218
  contents=['why is the sky blue?', 'What is your age?'],
1230
1219
  config=types.EmbedContentConfig(output_dimensionality=10),
1231
1220
  )
@@ -1455,7 +1444,7 @@ async for chunk in await chat.send_message_stream('tell me a story'):
1455
1444
  Files are only supported in Gemini Developer API. See the 'Create a client'
1456
1445
  section above to initialize a client.
1457
1446
 
1458
- ```cmd
1447
+ ```sh
1459
1448
  !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
1460
1449
  !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf .
1461
1450
  ```
@@ -1556,14 +1545,14 @@ section above to initialize a client.
1556
1545
 
1557
1546
  ### Tune
1558
1547
 
1559
- - Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
1548
+ - Vertex AI supports tuning from GCS source or from a [Vertex AI Multimodal Dataset](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/datasets)
1560
1549
 
1561
1550
  ```python
1562
1551
  from google.genai import types
1563
1552
 
1564
1553
  model = 'gemini-2.5-flash'
1565
1554
  training_dataset = types.TuningDataset(
1566
- # or gcs_uri=my_vertex_multimodal_dataset
1555
+ # or gcs_uri=my_vertex_multimodal_dataset
1567
1556
  gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1568
1557
  )
1569
1558
  ```
@@ -1719,7 +1708,7 @@ job = client.batches.create(
1719
1708
  src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1720
1709
  )
1721
1710
 
1722
- job
1711
+ print(job)
1723
1712
  ```
1724
1713
 
1725
1714
  Gemini Developer API:
@@ -1729,13 +1718,13 @@ Gemini Developer API:
1729
1718
  batch_job = client.batches.create(
1730
1719
  model="gemini-2.5-flash",
1731
1720
  src=[{
1732
- "contents": [{
1733
- "parts": [{
1734
- "text": "Hello!",
1721
+ "contents": [{
1722
+ "parts": [{
1723
+ "text": "Hello!",
1724
+ }],
1725
+ "role": "user",
1735
1726
  }],
1736
- "role": "user",
1737
- }],
1738
- "config": {"response_modalities": ["text"]},
1727
+ "config": {"response_modalities": ["text"]},
1739
1728
  }],
1740
1729
  )
1741
1730
 
@@ -1843,13 +1832,13 @@ To handle errors raised by the model service, the SDK provides this [APIError](h
1843
1832
  from google.genai import errors
1844
1833
 
1845
1834
  try:
1846
- client.models.generate_content(
1847
- model="invalid-model-name",
1848
- contents="What is your name?",
1849
- )
1835
+ client.models.generate_content(
1836
+ model="invalid-model-name",
1837
+ contents="What is your name?",
1838
+ )
1850
1839
  except errors.APIError as e:
1851
- print(e.code) # 404
1852
- print(e.message)
1840
+ print(e.code) # 404
1841
+ print(e.message)
1853
1842
  ```
1854
1843
 
1855
1844
  ## Extra Request Body
@@ -1,39 +1,39 @@
1
1
  google/genai/__init__.py,sha256=SKz_9WQKA3R4OpJIDJlgssVfizLNDG2tuWtOD9pxrPE,729
2
2
  google/genai/_adapters.py,sha256=Kok38miNYJff2n--l0zEK_hbq0y2rWOH7k75J7SMYbQ,1744
3
- google/genai/_api_client.py,sha256=_y6XAs65RXUqCi-G4vKiTkGM7x14eSYaNbJLbbdcagc,62975
3
+ google/genai/_api_client.py,sha256=magDCIaa674RDE_oyqxjHxlbZ7Gl7qSq9GHKEOd4p0w,63909
4
4
  google/genai/_api_module.py,sha256=lj8eUWx8_LBGBz-49qz6_ywWm3GYp3d8Bg5JoOHbtbI,902
5
5
  google/genai/_automatic_function_calling_util.py,sha256=xXNkJR-pzSMkeSXMz3Jw-kMHFbTJEiRJ3wocuwtWW4I,11627
6
6
  google/genai/_base_transformers.py,sha256=wljA6m4tLl4XLGlBC2DNOls5N9-X9tffBq0M7i8jgpw,1034
7
7
  google/genai/_base_url.py,sha256=E5H4dew14Y16qfnB3XRnjSCi19cJVlkaMNoM_8ip-PM,1597
8
8
  google/genai/_common.py,sha256=6_psdFl0iBRwgyIKOuGtugpTCHPGB2zZzsJCVcI_2oI,24114
9
- google/genai/_extra_utils.py,sha256=YLw64xzAKD_fQJp327-GGZM3kQ0sVdhNXMeDaaNkVFE,23011
10
- google/genai/_live_converters.py,sha256=b4TZW_BobO4fPkoIiOKVDcwKmQl5FvXaucENY55_YWo,42296
9
+ google/genai/_extra_utils.py,sha256=dqSB1XHO1TLlGqtZz9sShtFnJc2UZgDlG89-dNAAgD0,24263
10
+ google/genai/_live_converters.py,sha256=zJ8ZbFWA2vhO0_fbOtWQo7Zp82nViEisYfFupMyZokc,42458
11
11
  google/genai/_local_tokenizer_loader.py,sha256=cGN1F0f7hNjRIGCGTLeox7IGAZf_YcvZjSp2rCyhUak,7465
12
12
  google/genai/_mcp_utils.py,sha256=HuWJ8FUjquv40Mf_QjcL5r5yXWrS-JjINsjlOSbbyAc,3870
13
13
  google/genai/_operations_converters.py,sha256=8w4WSeA_KSyc56JcL1MTknZHIds0gF3E8YdriluUJfY,8708
14
14
  google/genai/_replay_api_client.py,sha256=oCPZULWpmjahOn5pvY7KkCB_cksNwm7pc4nuTnqqqV8,22956
15
15
  google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
16
- google/genai/_tokens_converters.py,sha256=xQY6yWtt7iJtfygfmd29d9mjjGKOpy0xG3yTdlr7APk,14137
17
- google/genai/_transformers.py,sha256=tx6FecRkfQbEmmgXZrb8ndIRacAfluKIFlyQilslWG0,42782
18
- google/genai/batches.py,sha256=wWkpsY7-a_jxkpkAu6PlbJ2VKRS7j8vy_a-BRPI-BZY,74986
19
- google/genai/caches.py,sha256=uQLFO0JuzBGm1Gv92v2Dqp_QQ4qoPTX1vqI1grHyfKU,44788
16
+ google/genai/_tokens_converters.py,sha256=oDnzbQRA1abHBV9JLFV_5G7Pzm1cRPDUe_MVNTxI6LQ,14299
17
+ google/genai/_transformers.py,sha256=h2VV2AnKdvHQtEwiiLV3vJFezYBYSfacvPmF5kcocEE,43165
18
+ google/genai/batches.py,sha256=FCfGgcueOI3_yTRUXp2XccpnWeNlH1ex5OKbbNESL3Y,75148
19
+ google/genai/caches.py,sha256=CAJyiBHcSuaDimxTfd5y9-iVRHGvmOfcuNnWEfktXzQ,44950
20
20
  google/genai/chats.py,sha256=pIBw8d13llupLn4a7vP6vnpbzDcvCCrZZ-Q2r8Cvo7g,16652
21
- google/genai/client.py,sha256=bwKV5gHKpxzmfFTtoudQ_hEz5QfUzKYMJHYT-AnQfNU,13066
21
+ google/genai/client.py,sha256=_2B9w4cyah1kepMYwaaCpEDtbL3JWSt0Qx7k1IJXCFU,13110
22
22
  google/genai/errors.py,sha256=dLH0Bo8-Y0K7zKASU5O0y_0FSKpSFJn8JPcnwIUvtIM,6089
23
23
  google/genai/files.py,sha256=2TkcZo7iviHA48OEjc9YnyirZ-umBUN7Z4Gdr4nHyJI,31551
24
- google/genai/live.py,sha256=1YfDR2VTqeHp2YJkgX2j1KHDaLcGCLN4Y6O9T4cM-4U,40996
24
+ google/genai/live.py,sha256=IzBIHjjasfHivDxhi4HvMru0G8LlkFsM_e6QCgSL1cQ,41278
25
25
  google/genai/live_music.py,sha256=Y7I7jh5SAKgyjBIMLboH0oTnZJ18uOT2SpRDKURvp94,6783
26
26
  google/genai/local_tokenizer.py,sha256=EKZ72cV2Zfutlo_efMOPnLRNZN4WQe57rD3G80cF340,14109
27
- google/genai/models.py,sha256=Zhl9ns-JJfUhYt_XTycx6DzMB7OP8eA6rpPwbq6q3vQ,227545
27
+ google/genai/models.py,sha256=7PR9QXjDqQ9j6xHoLFqvwx9_E9E88gPuebk0BafZmqc,232918
28
28
  google/genai/operations.py,sha256=KgM5vsagUnAMGk9wKxuQYBUh_6bwrPQ9BzZvydiumQA,16208
29
29
  google/genai/pagers.py,sha256=m0SfWWn1EJs2k1On3DZx371qb8g2BRm_188ExsicIRc,7098
30
30
  google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
31
31
  google/genai/tokens.py,sha256=4BPW0gGWFeFVk3INkuY2tfREnsrvzQDhouvRI6_F9Q8,12235
32
- google/genai/tunings.py,sha256=QO7n8hRXJhiw7B_Jr2dBxxnhvnKGhtkPH6721Jt2k2w,57071
33
- google/genai/types.py,sha256=T5I04cmt668wxI28OX9F-8xahHnmfqotSczwpc1xMgg,567055
34
- google/genai/version.py,sha256=meVKVLiGBg5qMEJ0AIQYDf7Icw5Mxy1_bXQeMsePcig,627
35
- google_genai-1.46.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
36
- google_genai-1.46.0.dist-info/METADATA,sha256=XH1pYkjPp84lsrs-fteMEsQGrmHS2OENsfTSnKcbMPQ,46242
37
- google_genai-1.46.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
- google_genai-1.46.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
39
- google_genai-1.46.0.dist-info/RECORD,,
32
+ google/genai/tunings.py,sha256=qCkvzZsqlfJL-KPUtZtjTp4rQeLYTc5LCYJfJgKyJhM,63586
33
+ google/genai/types.py,sha256=QVdMdjrgtZjkN4EinSnAgOc9tKeJ7xWjpztVq8tTrHQ,584659
34
+ google/genai/version.py,sha256=uVriLvLljZ_EUwzUyeuTH0mIiXjfBiPSFoXMwP29l6E,627
35
+ google_genai-1.48.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
36
+ google_genai-1.48.0.dist-info/METADATA,sha256=lnyEk6qDliPNImLJjxz99V---_l4F3PK7P_v_d79Xjk,46732
37
+ google_genai-1.48.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
+ google_genai-1.48.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
39
+ google_genai-1.48.0.dist-info/RECORD,,