google-genai 0.0.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +2 -0
- google/genai/_api_client.py +14 -6
- google/genai/_automatic_function_calling_util.py +0 -44
- google/genai/_extra_utils.py +15 -0
- google/genai/_transformers.py +3 -2
- google/genai/batches.py +254 -4
- google/genai/caches.py +10 -0
- google/genai/chats.py +14 -2
- google/genai/files.py +6 -0
- google/genai/live.py +74 -42
- google/genai/models.py +110 -11
- google/genai/tunings.py +317 -4
- google/genai/types.py +482 -85
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/METADATA +75 -58
- google_genai-0.2.0.dist-info/RECORD +24 -0
- google_genai-0.0.1.dist-info/RECORD +0 -24
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/LICENSE +0 -0
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/WHEEL +0 -0
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,10 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: google-genai
|
3
|
-
Version: 0.0
|
3
|
+
Version: 0.2.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
7
|
-
Project-URL: Homepage, https://github.com/
|
7
|
+
Project-URL: Homepage, https://github.com/googleapis/python-genai
|
8
8
|
Classifier: Intended Audience :: Developers
|
9
9
|
Classifier: License :: OSI Approved :: Apache Software License
|
10
10
|
Classifier: Operating System :: OS Independent
|
@@ -14,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.10
|
15
15
|
Classifier: Programming Language :: Python :: 3.11
|
16
16
|
Classifier: Programming Language :: Python :: 3.12
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
17
18
|
Classifier: Topic :: Internet
|
18
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
19
20
|
Requires-Python: >=3.9
|
@@ -27,6 +28,13 @@ Requires-Dist: websockets<15.0dev,>=13.0
|
|
27
28
|
|
28
29
|
# Google Gen AI SDK
|
29
30
|
|
31
|
+
[](https://pypi.org/project/google-genai/)
|
32
|
+
|
33
|
+
--------
|
34
|
+
**Documentation:** https://googleapis.github.io/python-genai/
|
35
|
+
|
36
|
+
-----
|
37
|
+
|
30
38
|
## Imports
|
31
39
|
|
32
40
|
``` python
|
@@ -66,7 +74,7 @@ The `client.models` modules exposes model inferencing and model getters.
|
|
66
74
|
|
67
75
|
``` python
|
68
76
|
response = client.models.generate_content(
|
69
|
-
model='gemini-
|
77
|
+
model='gemini-2.0-flash-exp', contents='What is your name?'
|
70
78
|
)
|
71
79
|
print(response.text)
|
72
80
|
```
|
@@ -75,7 +83,7 @@ print(response.text)
|
|
75
83
|
|
76
84
|
``` python
|
77
85
|
response = client.models.generate_content(
|
78
|
-
model='gemini-
|
86
|
+
model='gemini-2.0-flash-exp',
|
79
87
|
contents='high',
|
80
88
|
config=types.GenerateContentConfig(
|
81
89
|
system_instruction='I say high, you say low',
|
@@ -92,7 +100,7 @@ dictionaries. You can get the type from `google.genai.types`.
|
|
92
100
|
|
93
101
|
``` python
|
94
102
|
response = client.models.generate_content(
|
95
|
-
model='gemini-
|
103
|
+
model='gemini-2.0-flash-exp',
|
96
104
|
contents=types.Part.from_text('Why is sky blue?'),
|
97
105
|
config=types.GenerateContentConfig(
|
98
106
|
temperature=0,
|
@@ -114,7 +122,7 @@ response
|
|
114
122
|
|
115
123
|
``` python
|
116
124
|
response = client.models.generate_content(
|
117
|
-
model='gemini-
|
125
|
+
model='gemini-2.0-flash-exp',
|
118
126
|
contents='Say something bad.',
|
119
127
|
config=types.GenerateContentConfig(
|
120
128
|
safety_settings= [types.SafetySetting(
|
@@ -143,7 +151,7 @@ def get_current_weather(location: str,) -> int:
|
|
143
151
|
return 'sunny'
|
144
152
|
|
145
153
|
response = client.models.generate_content(
|
146
|
-
model='gemini-
|
154
|
+
model='gemini-2.0-flash-exp',
|
147
155
|
contents="What is the weather like in Boston?",
|
148
156
|
config=types.GenerateContentConfig(tools=[get_current_weather],)
|
149
157
|
)
|
@@ -171,7 +179,7 @@ tool = types.Tool(function_declarations=[function])
|
|
171
179
|
|
172
180
|
|
173
181
|
response = client.models.generate_content(
|
174
|
-
model='gemini-
|
182
|
+
model='gemini-2.0-flash-exp',
|
175
183
|
contents="What is the weather like in Boston?",
|
176
184
|
config=types.GenerateContentConfig(tools=[tool],)
|
177
185
|
)
|
@@ -191,7 +199,7 @@ function_response_part = types.Part.from_function_response(
|
|
191
199
|
)
|
192
200
|
|
193
201
|
response = client.models.generate_content(
|
194
|
-
model='gemini-
|
202
|
+
model='gemini-2.0-flash-exp',
|
195
203
|
contents=[
|
196
204
|
types.Part.from_text("What is the weather like in Boston?"),
|
197
205
|
function_call_part,
|
@@ -221,7 +229,7 @@ class CountryInfo(BaseModel):
|
|
221
229
|
|
222
230
|
|
223
231
|
response = client.models.generate_content(
|
224
|
-
model='gemini-
|
232
|
+
model='gemini-2.0-flash-exp',
|
225
233
|
contents='Give me information of the United States.',
|
226
234
|
config=types.GenerateContentConfig(
|
227
235
|
response_mime_type= 'application/json',
|
@@ -233,7 +241,7 @@ print(response.text)
|
|
233
241
|
|
234
242
|
``` python
|
235
243
|
response = client.models.generate_content(
|
236
|
-
model='gemini-
|
244
|
+
model='gemini-2.0-flash-exp',
|
237
245
|
contents='Give me information of the United States.',
|
238
246
|
config={
|
239
247
|
'response_mime_type': 'application/json',
|
@@ -267,7 +275,7 @@ print(response.text)
|
|
267
275
|
|
268
276
|
``` python
|
269
277
|
for chunk in client.models.generate_content_stream(
|
270
|
-
model='gemini-
|
278
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
271
279
|
):
|
272
280
|
print(chunk.text)
|
273
281
|
```
|
@@ -282,7 +290,7 @@ of `client.models.generate_content`
|
|
282
290
|
|
283
291
|
``` python
|
284
292
|
request = await client.aio.models.generate_content(
|
285
|
-
model='gemini-
|
293
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
286
294
|
)
|
287
295
|
|
288
296
|
print(response.text)
|
@@ -292,7 +300,7 @@ print(response.text)
|
|
292
300
|
|
293
301
|
``` python
|
294
302
|
async for response in client.aio.models.generate_content_stream(
|
295
|
-
model='gemini-
|
303
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
296
304
|
):
|
297
305
|
print(response.text)
|
298
306
|
```
|
@@ -301,7 +309,7 @@ async for response in client.aio.models.generate_content_stream(
|
|
301
309
|
|
302
310
|
``` python
|
303
311
|
response = client.models.count_tokens(
|
304
|
-
model='gemini-
|
312
|
+
model='gemini-2.0-flash-exp',
|
305
313
|
contents='What is your name?',
|
306
314
|
)
|
307
315
|
print(response)
|
@@ -313,7 +321,7 @@ Compute tokens is not supported by Google AI.
|
|
313
321
|
|
314
322
|
``` python
|
315
323
|
response = client.models.compute_tokens(
|
316
|
-
model='gemini-
|
324
|
+
model='gemini-2.0-flash-exp',
|
317
325
|
contents='What is your name?',
|
318
326
|
)
|
319
327
|
print(response)
|
@@ -323,7 +331,7 @@ print(response)
|
|
323
331
|
|
324
332
|
``` python
|
325
333
|
response = await client.aio.models.count_tokens(
|
326
|
-
model='gemini-
|
334
|
+
model='gemini-2.0-flash-exp',
|
327
335
|
contents='What is your name?',
|
328
336
|
)
|
329
337
|
print(response)
|
@@ -360,14 +368,12 @@ Support for generate image in Google AI is behind an allowlist
|
|
360
368
|
# Generate Image
|
361
369
|
response1 = client.models.generate_image(
|
362
370
|
model='imagen-3.0-generate-001',
|
363
|
-
prompt='
|
371
|
+
prompt='An umbrella in the foreground, and a rainy night sky in the background',
|
364
372
|
config=types.GenerateImageConfig(
|
373
|
+
negative_prompt= "human",
|
365
374
|
number_of_images= 1,
|
366
|
-
person_generation= "ALLOW_ADULT",
|
367
|
-
safety_filter_level= "BLOCK_LOW_AND_ABOVE",
|
368
375
|
include_rai_reason= True,
|
369
|
-
|
370
|
-
aspect_ratio= "4:3"
|
376
|
+
output_mime_type= "image/jpeg"
|
371
377
|
)
|
372
378
|
)
|
373
379
|
response1.generated_images[0].image.show()
|
@@ -375,9 +381,10 @@ response1.generated_images[0].image.show()
|
|
375
381
|
|
376
382
|
#### Upscale Image
|
377
383
|
|
384
|
+
Upscale image is not supported in Google AI.
|
385
|
+
|
378
386
|
``` python
|
379
|
-
# Upscale the generated image from
|
380
|
-
from google.genai.types import Image
|
387
|
+
# Upscale the generated image from above
|
381
388
|
response2 = client.models.upscale_image(
|
382
389
|
model='imagen-3.0-generate-001',
|
383
390
|
image=response1.generated_images[0].image,
|
@@ -388,21 +395,36 @@ response2.generated_images[0].image.show()
|
|
388
395
|
|
389
396
|
#### Edit Image
|
390
397
|
|
398
|
+
Edit image is not supported in Google AI.
|
399
|
+
|
391
400
|
``` python
|
392
|
-
# Edit the generated image from
|
393
|
-
from google.genai.types import
|
401
|
+
# Edit the generated image from above
|
402
|
+
from google.genai.types import RawReferenceImage, MaskReferenceImage
|
403
|
+
raw_ref_image = RawReferenceImage(
|
404
|
+
reference_id=1,
|
405
|
+
reference_image=response1.generated_images[0].image,
|
406
|
+
)
|
407
|
+
|
408
|
+
# Model computes a mask of the background
|
409
|
+
mask_ref_image = MaskReferenceImage(
|
410
|
+
reference_id=2,
|
411
|
+
config=types.MaskReferenceConfig(
|
412
|
+
mask_mode='MASK_MODE_BACKGROUND',
|
413
|
+
mask_dilation=0,
|
414
|
+
),
|
415
|
+
)
|
416
|
+
|
394
417
|
response3 = client.models.edit_image(
|
395
|
-
model='
|
396
|
-
prompt='
|
397
|
-
|
418
|
+
model='imagen-3.0-capability-preview-0930',
|
419
|
+
prompt='Sunlight and clear sky',
|
420
|
+
reference_images=[raw_ref_image, mask_ref_image],
|
398
421
|
config=types.EditImageConfig(
|
399
|
-
edit_mode=
|
400
|
-
mask_type="semantic",
|
401
|
-
segmentation_classes=[156],
|
422
|
+
edit_mode= 'EDIT_MODE_INPAINT_INSERTION',
|
402
423
|
number_of_images= 1,
|
424
|
+
negative_prompt= 'human',
|
403
425
|
include_rai_reason= True,
|
404
|
-
|
405
|
-
)
|
426
|
+
output_mime_type= 'image/jpeg',
|
427
|
+
),
|
406
428
|
)
|
407
429
|
response3.generated_images[0].image.show()
|
408
430
|
```
|
@@ -439,7 +461,7 @@ client.files.delete(name=file3.name)
|
|
439
461
|
### Create
|
440
462
|
|
441
463
|
``` python
|
442
|
-
if client.
|
464
|
+
if client.vertexai:
|
443
465
|
file_uris = [
|
444
466
|
'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
|
445
467
|
'gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf'
|
@@ -489,7 +511,7 @@ client.models.generate_content(
|
|
489
511
|
## Tunings
|
490
512
|
|
491
513
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
492
|
-
tuning through `tune` and
|
514
|
+
tuning through `tune` and distillation through `distill`
|
493
515
|
|
494
516
|
### Tune
|
495
517
|
|
@@ -497,7 +519,7 @@ tuning through `tune` and distiallation through `distill`
|
|
497
519
|
- Google AI supports tuning from inline examples
|
498
520
|
|
499
521
|
``` python
|
500
|
-
if client.
|
522
|
+
if client.vertexai:
|
501
523
|
model = 'gemini-1.5-pro-002'
|
502
524
|
training_dataset=types.TuningDataset(
|
503
525
|
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
@@ -576,9 +598,9 @@ for model in client.models.list(config={'page_size': 10}):
|
|
576
598
|
``` python
|
577
599
|
pager = client.models.list(config={'page_size': 10})
|
578
600
|
print(pager.page_size)
|
579
|
-
print(pager
|
601
|
+
print(pager[0])
|
580
602
|
pager.next_page()
|
581
|
-
print(pager
|
603
|
+
print(pager[0])
|
582
604
|
```
|
583
605
|
|
584
606
|
#### Async
|
@@ -591,15 +613,15 @@ async for job in await client.aio.models.list(config={'page_size': 10}):
|
|
591
613
|
``` python
|
592
614
|
async_pager = await client.aio.models.list(config={'page_size': 10})
|
593
615
|
print(async_pager.page_size)
|
594
|
-
print(async_pager
|
616
|
+
print(async_pager[0])
|
595
617
|
await async_pager.next_page()
|
596
|
-
print(async_pager
|
618
|
+
print(async_pager[0])
|
597
619
|
```
|
598
620
|
|
599
621
|
### Update Tuned Model
|
600
622
|
|
601
623
|
``` python
|
602
|
-
model = pager
|
624
|
+
model = pager[0]
|
603
625
|
|
604
626
|
model = client.models.update(
|
605
627
|
model=model.name,
|
@@ -624,7 +646,7 @@ distillation_job = client.tunings.distill(
|
|
624
646
|
config=genai.types.CreateDistillationJobConfig(
|
625
647
|
epoch_count=1,
|
626
648
|
pipeline_root_directory=(
|
627
|
-
"gs://
|
649
|
+
"gs://my-bucket"
|
628
650
|
),
|
629
651
|
),
|
630
652
|
)
|
@@ -659,9 +681,9 @@ for job in client.tunings.list(config={'page_size': 10}):
|
|
659
681
|
``` python
|
660
682
|
pager = client.tunings.list(config={'page_size': 10})
|
661
683
|
print(pager.page_size)
|
662
|
-
print(pager
|
684
|
+
print(pager[0])
|
663
685
|
pager.next_page()
|
664
|
-
print(pager
|
686
|
+
print(pager[0])
|
665
687
|
```
|
666
688
|
|
667
689
|
#### Async
|
@@ -674,9 +696,9 @@ async for job in await client.aio.tunings.list(config={'page_size': 10}):
|
|
674
696
|
``` python
|
675
697
|
async_pager = await client.aio.tunings.list(config={'page_size': 10})
|
676
698
|
print(async_pager.page_size)
|
677
|
-
print(async_pager
|
699
|
+
print(async_pager[0])
|
678
700
|
await async_pager.next_page()
|
679
|
-
print(async_pager
|
701
|
+
print(async_pager[0])
|
680
702
|
```
|
681
703
|
|
682
704
|
## Batch Prediction
|
@@ -689,12 +711,7 @@ Only supported in Vertex AI.
|
|
689
711
|
# Specify model and source file only, destination and job display name will be auto-populated
|
690
712
|
job = client.batches.create(
|
691
713
|
model='gemini-1.5-flash-002',
|
692
|
-
src='bq://
|
693
|
-
# # optionally specify destination and display_name by yourself
|
694
|
-
# config = {
|
695
|
-
# 'dest': 'bq://vertex-sdk-dev.unified_genai_tests_batches.generate_content_responses',
|
696
|
-
# 'display_name': 'create_batch_job_demo'
|
697
|
-
# }
|
714
|
+
src='bq://my-project.my-dataset.my-table',
|
698
715
|
)
|
699
716
|
|
700
717
|
job
|
@@ -733,9 +750,9 @@ for job in client.batches.list(config={'page_size': 10}):
|
|
733
750
|
``` python
|
734
751
|
pager = client.batches.list(config={'page_size': 10})
|
735
752
|
print(pager.page_size)
|
736
|
-
print(pager
|
753
|
+
print(pager[0])
|
737
754
|
pager.next_page()
|
738
|
-
print(pager
|
755
|
+
print(pager[0])
|
739
756
|
```
|
740
757
|
|
741
758
|
#### Async
|
@@ -746,11 +763,11 @@ async for job in await client.aio.batches.list(config={'page_size': 10}):
|
|
746
763
|
```
|
747
764
|
|
748
765
|
``` python
|
749
|
-
async_pager = await client.aio.
|
766
|
+
async_pager = await client.aio.batches.list(config={'page_size': 10})
|
750
767
|
print(async_pager.page_size)
|
751
|
-
print(async_pager
|
768
|
+
print(async_pager[0])
|
752
769
|
await async_pager.next_page()
|
753
|
-
print(async_pager
|
770
|
+
print(async_pager[0])
|
754
771
|
```
|
755
772
|
|
756
773
|
### Delete
|
@@ -0,0 +1,24 @@
|
|
1
|
+
google/genai/__init__.py,sha256=bO4TBLSOack_93tDti_USNTkE8cvkLUn45TH-WS-HOE,674
|
2
|
+
google/genai/_api_client.py,sha256=74qm-UTxDQ3-KCb61eqD3SAM379MrsQw8ly6GBLoDB8,15963
|
3
|
+
google/genai/_automatic_function_calling_util.py,sha256=E25_66RH3DbDIucq7x-93XWPPBwB9FnzwD1NCGyPrjM,10242
|
4
|
+
google/genai/_common.py,sha256=Yj5cBkq5QRNFSBqvpB949Rjo7cbIhdtKp5dJxMW_I6I,7971
|
5
|
+
google/genai/_extra_utils.py,sha256=GQZnraFCrMffqrBEpurdcBmgrltRsnYgMizt-Ok6xX8,11098
|
6
|
+
google/genai/_replay_api_client.py,sha256=QPNg4SBpOLS58bx-kuJQngxy1tbjMpCpJzmImCwYePA,16226
|
7
|
+
google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759
|
8
|
+
google/genai/_transformers.py,sha256=_zWNr7zFTrUFniECYaZUn0n4TdioLpj783l3-z1XvIE,13443
|
9
|
+
google/genai/batches.py,sha256=Wi4Kptampp2WepAqv_AawwNCR6MKVhLKmzJdYXDQ_aE,37148
|
10
|
+
google/genai/caches.py,sha256=LJm2raykec7_iCHsVbEtX4v942mR-OSQvxTVKcBN2RA,53434
|
11
|
+
google/genai/chats.py,sha256=x-vCXrsxZ8kdEZ_0ZDfrBQnQ9urCr42x3urP0OXHyTo,5688
|
12
|
+
google/genai/client.py,sha256=HH_lYnjPOwW-4Vgynyw4K8cwurT2g578Dc51H_uk7GY,9244
|
13
|
+
google/genai/errors.py,sha256=TrlUk1jz7r1aN1lrL3FZZ30LU4iMfSonm1ZwEAk07k4,3048
|
14
|
+
google/genai/files.py,sha256=dn3q8P9aTN9OG3PtA4AYDs9hF6Uk-jkMjgAW7dSlt_4,35573
|
15
|
+
google/genai/live.py,sha256=T-pOtq7k43wE2VjQzqLrx-kqhotS66I2PY_NHBdv9G8,22056
|
16
|
+
google/genai/models.py,sha256=t5XgwlgkNrQKb6eww0oBGzjMiMQaj-BQedc8lVdJHz4,154834
|
17
|
+
google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
|
18
|
+
google/genai/tunings.py,sha256=tFTSEaECKZ6xeYcxUTIKUmXqPoDymYP3eyTcEKjnPa4,49010
|
19
|
+
google/genai/types.py,sha256=mIjtCSXbp6CRL5iEhtdxczoMTtyQ1EKYpBlzLvGIedY,263841
|
20
|
+
google_genai-0.2.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
21
|
+
google_genai-0.2.0.dist-info/METADATA,sha256=qhc4AtoMxFa_-BIEzNTw36s-cE1imA3zOoILYIpL7as,17371
|
22
|
+
google_genai-0.2.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
23
|
+
google_genai-0.2.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
24
|
+
google_genai-0.2.0.dist-info/RECORD,,
|
@@ -1,24 +0,0 @@
|
|
1
|
-
google/genai/__init__.py,sha256=49gQv3uugzoY9BDHbhMKfHzYfn5CcttAm47Ecp6gvQs,651
|
2
|
-
google/genai/_api_client.py,sha256=gzgQ7-Zu48trXpcr0WR6OqBk-BzujsFkNKBdifjS3U8,15637
|
3
|
-
google/genai/_automatic_function_calling_util.py,sha256=Q7_wty4RNBoYBT2aHWf2EdJSJkqG965nri8F5NCLx20,11578
|
4
|
-
google/genai/_common.py,sha256=Yj5cBkq5QRNFSBqvpB949Rjo7cbIhdtKp5dJxMW_I6I,7971
|
5
|
-
google/genai/_extra_utils.py,sha256=TT3YieI_SxSWYyLJwhe7i33262vf2W0YTatBSF0dPHg,10684
|
6
|
-
google/genai/_replay_api_client.py,sha256=QPNg4SBpOLS58bx-kuJQngxy1tbjMpCpJzmImCwYePA,16226
|
7
|
-
google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759
|
8
|
-
google/genai/_transformers.py,sha256=CYyQibFoW3ZZAOXrIb_BPZulLcmJvuBNJcCp3m3sGeQ,13475
|
9
|
-
google/genai/batches.py,sha256=wpdyI5IFXDBLPzVkkEAcElPZEt76lWBkf7fNpqG9bb8,30406
|
10
|
-
google/genai/caches.py,sha256=h4MX19ZUC7CAd7DY91FWPjUbTQHG-3-Oi4eDU-5nn40,53424
|
11
|
-
google/genai/chats.py,sha256=80GWA431KehYTfAggMh6W3jHdDizbjk0RfO8MXilwDM,5342
|
12
|
-
google/genai/client.py,sha256=HH_lYnjPOwW-4Vgynyw4K8cwurT2g578Dc51H_uk7GY,9244
|
13
|
-
google/genai/errors.py,sha256=TrlUk1jz7r1aN1lrL3FZZ30LU4iMfSonm1ZwEAk07k4,3048
|
14
|
-
google/genai/files.py,sha256=9B1Xsb2R7jBAU3Ot-1Z4YJjC9gQEywzAfLT24yHN4EI,35567
|
15
|
-
google/genai/live.py,sha256=htpzNj8L8nionvbzQJLNZbwD4_1UQHTBGkgQVHXRkQo,20835
|
16
|
-
google/genai/models.py,sha256=sQwTPqjxiv4jhP-Rn6nBlAyGBP_gK6lAUcVL56DwcwE,151681
|
17
|
-
google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
|
18
|
-
google/genai/tunings.py,sha256=srGxr4KlRxZ01lPT5ohtrNxqlA9I6RCbCLcxcfATY4s,39452
|
19
|
-
google/genai/types.py,sha256=Fm-Y8CTYF6BY3ZE0EgzOCJ6C1SuygqT6Kfu4_Obw5Ko,250959
|
20
|
-
google_genai-0.0.1.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
21
|
-
google_genai-0.0.1.dist-info/METADATA,sha256=TgTn66RX2xEykoOUkwVsSncghI8TGug0GhHe5QfHDhE,17151
|
22
|
-
google_genai-0.0.1.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
23
|
-
google_genai-0.0.1.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
24
|
-
google_genai-0.0.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|