google-genai 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +72 -78
- google/genai/_api_module.py +24 -0
- google/genai/_automatic_function_calling_util.py +43 -22
- google/genai/_common.py +0 -6
- google/genai/_extra_utils.py +22 -16
- google/genai/_replay_api_client.py +2 -2
- google/genai/_test_api_client.py +1 -1
- google/genai/_transformers.py +218 -97
- google/genai/batches.py +194 -155
- google/genai/caches.py +117 -134
- google/genai/chats.py +22 -18
- google/genai/client.py +31 -37
- google/genai/files.py +94 -125
- google/genai/live.py +11 -5
- google/genai/models.py +500 -254
- google/genai/tunings.py +85 -422
- google/genai/types.py +495 -458
- google/genai/version.py +1 -1
- {google_genai-0.6.0.dist-info → google_genai-0.7.0.dist-info}/METADATA +116 -68
- google_genai-0.7.0.dist-info/RECORD +26 -0
- google_genai-0.6.0.dist-info/RECORD +0 -25
- {google_genai-0.6.0.dist-info → google_genai-0.7.0.dist-info}/LICENSE +0 -0
- {google_genai-0.6.0.dist-info → google_genai-0.7.0.dist-info}/WHEEL +0 -0
- {google_genai-0.6.0.dist-info → google_genai-0.7.0.dist-info}/top_level.txt +0 -0
google/genai/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: google-genai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.7.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
@@ -21,7 +21,6 @@ Requires-Python: >=3.9
|
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
License-File: LICENSE
|
23
23
|
Requires-Dist: google-auth<3.0.0dev,>=2.14.1
|
24
|
-
Requires-Dist: pillow<12.0.0,>=10.0.0
|
25
24
|
Requires-Dist: pydantic<3.0.0dev,>=2.0.0
|
26
25
|
Requires-Dist: requests<3.0.0dev,>=2.28.1
|
27
26
|
Requires-Dist: websockets<15.0dev,>=13.0
|
@@ -83,12 +82,12 @@ The `client.models` modules exposes model inferencing and model getters.
|
|
83
82
|
|
84
83
|
```python
|
85
84
|
response = client.models.generate_content(
|
86
|
-
model="gemini-2.0-flash-exp", contents="
|
85
|
+
model="gemini-2.0-flash-exp", contents="why is the sky blue?"
|
87
86
|
)
|
88
87
|
print(response.text)
|
89
88
|
```
|
90
89
|
|
91
|
-
#### with uploaded file (
|
90
|
+
#### with uploaded file (Gemini API only)
|
92
91
|
download the file in console.
|
93
92
|
|
94
93
|
```cmd
|
@@ -127,7 +126,7 @@ dictionaries. You can get the type from `google.genai.types`.
|
|
127
126
|
```python
|
128
127
|
response = client.models.generate_content(
|
129
128
|
model="gemini-2.0-flash-exp",
|
130
|
-
contents=types.Part.from_text("Why is the sky blue?"),
|
129
|
+
contents=types.Part.from_text(text="Why is the sky blue?"),
|
131
130
|
config=types.GenerateContentConfig(
|
132
131
|
temperature=0,
|
133
132
|
top_p=0.95,
|
@@ -141,7 +140,43 @@ response = client.models.generate_content(
|
|
141
140
|
),
|
142
141
|
)
|
143
142
|
|
144
|
-
response
|
143
|
+
print(response.text)
|
144
|
+
```
|
145
|
+
|
146
|
+
### Thinking
|
147
|
+
|
148
|
+
The Gemini 2.0 Flash Thinking model is an experimental model that could return
|
149
|
+
"thoughts" as part of its response.
|
150
|
+
|
151
|
+
#### Gemini Developer API
|
152
|
+
|
153
|
+
Thinking config is only available in v1alpha for Gemini AI API.
|
154
|
+
|
155
|
+
```python
|
156
|
+
response = client.models.generate_content(
|
157
|
+
model='gemini-2.0-flash-thinking-exp',
|
158
|
+
contents='What is the sum of natural numbers from 1 to 100?',
|
159
|
+
config=types.GenerateContentConfig(
|
160
|
+
thinking_config=types.ThinkingConfig(include_thoughts=True),
|
161
|
+
http_options=types.HttpOptions(api_version='v1alpha'),
|
162
|
+
)
|
163
|
+
)
|
164
|
+
for part in response.candidates[0].content.parts:
|
165
|
+
print(part)
|
166
|
+
```
|
167
|
+
|
168
|
+
#### Vertex AI API
|
169
|
+
|
170
|
+
```python
|
171
|
+
response = client.models.generate_content(
|
172
|
+
model='gemini-2.0-flash-thinking-exp-01-21',
|
173
|
+
contents='What is the sum of natural numbers from 1 to 100?',
|
174
|
+
config=types.GenerateContentConfig(
|
175
|
+
thinking_config=types.ThinkingConfig(include_thoughts=True),
|
176
|
+
)
|
177
|
+
)
|
178
|
+
for part in response.candidates[0].content.parts:
|
179
|
+
print(part)
|
145
180
|
```
|
146
181
|
|
147
182
|
### List Base Models
|
@@ -232,10 +267,10 @@ Then you will receive a function call part in the response.
|
|
232
267
|
function = types.FunctionDeclaration(
|
233
268
|
name="get_current_weather",
|
234
269
|
description="Get the current weather in a given location",
|
235
|
-
parameters=types.
|
270
|
+
parameters=types.Schema(
|
236
271
|
type="OBJECT",
|
237
272
|
properties={
|
238
|
-
"location": types.
|
273
|
+
"location": types.Schema(
|
239
274
|
type="STRING",
|
240
275
|
description="The city and state, e.g. San Francisco, CA",
|
241
276
|
),
|
@@ -263,10 +298,9 @@ The following example shows how to do it for a simple function invocation.
|
|
263
298
|
```python
|
264
299
|
user_prompt_content = types.Content(
|
265
300
|
role="user",
|
266
|
-
parts=[types.Part.from_text("What is the weather like in Boston?")],
|
301
|
+
parts=[types.Part.from_text(text="What is the weather like in Boston?")],
|
267
302
|
)
|
268
|
-
|
269
|
-
function_call_part = function_call_content.parts[0]
|
303
|
+
function_call_part = response.function_calls[0]
|
270
304
|
|
271
305
|
|
272
306
|
try:
|
@@ -366,6 +400,57 @@ response = client.models.generate_content(
|
|
366
400
|
print(response.text)
|
367
401
|
```
|
368
402
|
|
403
|
+
### Enum Response Schema
|
404
|
+
|
405
|
+
#### Text Response
|
406
|
+
|
407
|
+
You can set response_mime_type to 'text/x.enum' to return one of those enum
|
408
|
+
values as the response.
|
409
|
+
|
410
|
+
```python
|
411
|
+
class InstrumentEnum(Enum):
|
412
|
+
PERCUSSION = 'Percussion'
|
413
|
+
STRING = 'String'
|
414
|
+
WOODWIND = 'Woodwind'
|
415
|
+
BRASS = 'Brass'
|
416
|
+
KEYBOARD = 'Keyboard'
|
417
|
+
|
418
|
+
response = client.models.generate_content(
|
419
|
+
model='gemini-2.0-flash-exp',
|
420
|
+
contents='What instrument plays multiple notes at once?',
|
421
|
+
config={
|
422
|
+
'response_mime_type': 'text/x.enum',
|
423
|
+
'response_schema': InstrumentEnum,
|
424
|
+
},
|
425
|
+
)
|
426
|
+
print(response.text)
|
427
|
+
```
|
428
|
+
|
429
|
+
#### JSON Response
|
430
|
+
|
431
|
+
You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
|
432
|
+
|
433
|
+
```python
|
434
|
+
from enum import Enum
|
435
|
+
|
436
|
+
class InstrumentEnum(Enum):
|
437
|
+
PERCUSSION = 'Percussion'
|
438
|
+
STRING = 'String'
|
439
|
+
WOODWIND = 'Woodwind'
|
440
|
+
BRASS = 'Brass'
|
441
|
+
KEYBOARD = 'Keyboard'
|
442
|
+
|
443
|
+
response = client.models.generate_content(
|
444
|
+
model='gemini-2.0-flash-exp',
|
445
|
+
contents='What instrument plays multiple notes at once?',
|
446
|
+
config={
|
447
|
+
'response_mime_type': 'application/json',
|
448
|
+
'response_schema': InstrumentEnum,
|
449
|
+
},
|
450
|
+
)
|
451
|
+
print(response.text)
|
452
|
+
```
|
453
|
+
|
369
454
|
### Streaming
|
370
455
|
|
371
456
|
#### Streaming for text content
|
@@ -434,10 +519,10 @@ print(response.text)
|
|
434
519
|
### Streaming
|
435
520
|
|
436
521
|
```python
|
437
|
-
async for
|
522
|
+
async for chunk in await client.aio.models.generate_content_stream(
|
438
523
|
model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
|
439
524
|
):
|
440
|
-
print(
|
525
|
+
print(chunk.text, end="")
|
441
526
|
```
|
442
527
|
|
443
528
|
### Count Tokens and Compute Tokens
|
@@ -445,7 +530,7 @@ async for response in client.aio.models.generate_content_stream(
|
|
445
530
|
```python
|
446
531
|
response = client.models.count_tokens(
|
447
532
|
model="gemini-2.0-flash-exp",
|
448
|
-
contents="
|
533
|
+
contents="why is the sky blue?",
|
449
534
|
)
|
450
535
|
print(response)
|
451
536
|
```
|
@@ -457,7 +542,7 @@ Compute tokens is only supported in Vertex AI.
|
|
457
542
|
```python
|
458
543
|
response = client.models.compute_tokens(
|
459
544
|
model="gemini-2.0-flash-exp",
|
460
|
-
contents="
|
545
|
+
contents="why is the sky blue?",
|
461
546
|
)
|
462
547
|
print(response)
|
463
548
|
```
|
@@ -467,7 +552,7 @@ print(response)
|
|
467
552
|
```python
|
468
553
|
response = await client.aio.models.count_tokens(
|
469
554
|
model="gemini-2.0-flash-exp",
|
470
|
-
contents="
|
555
|
+
contents="why is the sky blue?",
|
471
556
|
)
|
472
557
|
print(response)
|
473
558
|
```
|
@@ -477,7 +562,7 @@ print(response)
|
|
477
562
|
```python
|
478
563
|
response = client.models.embed_content(
|
479
564
|
model="text-embedding-004",
|
480
|
-
contents="
|
565
|
+
contents="why is the sky blue?",
|
481
566
|
)
|
482
567
|
print(response)
|
483
568
|
```
|
@@ -486,7 +571,7 @@ print(response)
|
|
486
571
|
# multiple contents with config
|
487
572
|
response = client.models.embed_content(
|
488
573
|
model="text-embedding-004",
|
489
|
-
contents=["
|
574
|
+
contents=["why is the sky blue?", "What is your age?"],
|
490
575
|
config=types.EmbedContentConfig(output_dimensionality=10),
|
491
576
|
)
|
492
577
|
|
@@ -495,16 +580,16 @@ print(response)
|
|
495
580
|
|
496
581
|
### Imagen
|
497
582
|
|
498
|
-
#### Generate
|
583
|
+
#### Generate Images
|
499
584
|
|
500
|
-
Support for generate
|
585
|
+
Support for generate images in Gemini Developer API is behind an allowlist
|
501
586
|
|
502
587
|
```python
|
503
588
|
# Generate Image
|
504
|
-
response1 = client.models.
|
505
|
-
model="imagen-3.0-generate-
|
589
|
+
response1 = client.models.generate_images(
|
590
|
+
model="imagen-3.0-generate-002",
|
506
591
|
prompt="An umbrella in the foreground, and a rainy night sky in the background",
|
507
|
-
config=types.
|
592
|
+
config=types.GenerateImagesConfig(
|
508
593
|
negative_prompt="human",
|
509
594
|
number_of_images=1,
|
510
595
|
include_rai_reason=True,
|
@@ -603,7 +688,7 @@ print(response.text)
|
|
603
688
|
|
604
689
|
```python
|
605
690
|
chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
|
606
|
-
async for chunk in chat.send_message_stream("tell me a story"):
|
691
|
+
async for chunk in await chat.send_message_stream("tell me a story"):
|
607
692
|
print(chunk.text)
|
608
693
|
```
|
609
694
|
|
@@ -679,7 +764,7 @@ cached_content = client.caches.create(
|
|
679
764
|
cached_content = client.caches.get(name=cached_content.name)
|
680
765
|
```
|
681
766
|
|
682
|
-
### Generate Content
|
767
|
+
### Generate Content with Caches
|
683
768
|
|
684
769
|
```python
|
685
770
|
response = client.models.generate_content(
|
@@ -695,7 +780,7 @@ print(response.text)
|
|
695
780
|
## Tunings
|
696
781
|
|
697
782
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
698
|
-
tuning through `tune
|
783
|
+
tuning through `tune`.
|
699
784
|
|
700
785
|
### Tune
|
701
786
|
|
@@ -760,7 +845,7 @@ while tuning_job.state in running_states:
|
|
760
845
|
```python
|
761
846
|
response = client.models.generate_content(
|
762
847
|
model=tuning_job.tuned_model.endpoint,
|
763
|
-
contents="
|
848
|
+
contents="why is the sky blue?",
|
764
849
|
)
|
765
850
|
|
766
851
|
print(response.text)
|
@@ -820,43 +905,6 @@ model = client.models.update(
|
|
820
905
|
print(model)
|
821
906
|
```
|
822
907
|
|
823
|
-
### Distillation
|
824
|
-
|
825
|
-
Only supported in Vertex AI. Requires allowlist.
|
826
|
-
|
827
|
-
```python
|
828
|
-
distillation_job = client.tunings.distill(
|
829
|
-
student_model="gemma-2b-1.1-it",
|
830
|
-
teacher_model="gemini-1.5-pro-002",
|
831
|
-
training_dataset=genai.types.DistillationDataset(
|
832
|
-
gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
|
833
|
-
),
|
834
|
-
config=genai.types.CreateDistillationJobConfig(
|
835
|
-
epoch_count=1,
|
836
|
-
pipeline_root_directory=("gs://my-bucket"),
|
837
|
-
),
|
838
|
-
)
|
839
|
-
print(distillation_job)
|
840
|
-
```
|
841
|
-
|
842
|
-
```python
|
843
|
-
completed_states = set(
|
844
|
-
[
|
845
|
-
"JOB_STATE_SUCCEEDED",
|
846
|
-
"JOB_STATE_FAILED",
|
847
|
-
"JOB_STATE_CANCELLED",
|
848
|
-
"JOB_STATE_PAUSED",
|
849
|
-
]
|
850
|
-
)
|
851
|
-
|
852
|
-
while distillation_job.state not in completed_states:
|
853
|
-
print(distillation_job.state)
|
854
|
-
distillation_job = client.tunings.get(name=distillation_job.name)
|
855
|
-
time.sleep(10)
|
856
|
-
|
857
|
-
print(distillation_job)
|
858
|
-
```
|
859
|
-
|
860
908
|
|
861
909
|
### List Tuning Jobs
|
862
910
|
|
@@ -932,12 +980,12 @@ job
|
|
932
980
|
### List
|
933
981
|
|
934
982
|
```python
|
935
|
-
for job in client.batches.list(config=types.
|
983
|
+
for job in client.batches.list(config=types.ListBatchJobsConfig(page_size=10)):
|
936
984
|
print(job)
|
937
985
|
```
|
938
986
|
|
939
987
|
```python
|
940
|
-
pager = client.batches.list(config=types.
|
988
|
+
pager = client.batches.list(config=types.ListBatchJobsConfig(page_size=10))
|
941
989
|
print(pager.page_size)
|
942
990
|
print(pager[0])
|
943
991
|
pager.next_page()
|
@@ -948,14 +996,14 @@ print(pager[0])
|
|
948
996
|
|
949
997
|
```python
|
950
998
|
async for job in await client.aio.batches.list(
|
951
|
-
config=types.
|
999
|
+
config=types.ListBatchJobsConfig(page_size=10)
|
952
1000
|
):
|
953
1001
|
print(job)
|
954
1002
|
```
|
955
1003
|
|
956
1004
|
```python
|
957
1005
|
async_pager = await client.aio.batches.list(
|
958
|
-
config=types.
|
1006
|
+
config=types.ListBatchJobsConfig(page_size=10)
|
959
1007
|
)
|
960
1008
|
print(async_pager.page_size)
|
961
1009
|
print(async_pager[0])
|
@@ -0,0 +1,26 @@
|
|
1
|
+
google/genai/__init__.py,sha256=IYw-PcsdgjSpS1mU_ZcYkTfPocsJ4aVmrDxP7vX7c6Y,709
|
2
|
+
google/genai/_api_client.py,sha256=JPCynlSCc_yPEV4-rdV2-BPv-OHoBvotxXbI0VLA3Q4,22882
|
3
|
+
google/genai/_api_module.py,sha256=9bxmtcSTpT8Ht6VwJyw7fQqiR0jJXz7350dWGl-bC5E,780
|
4
|
+
google/genai/_automatic_function_calling_util.py,sha256=sEaDAeHjv-H71o1L3_P8sqOslK4TK0Rybn4WPymeEBk,10665
|
5
|
+
google/genai/_common.py,sha256=aAtTQeGYn8WeiQD2lbjYiHZXWzBdj59wKiqAyQeaByc,8610
|
6
|
+
google/genai/_extra_utils.py,sha256=y-6Jr2GN2BKZV67I6fTgDtwfsOTQs7QlLDBQdmW_jKk,11258
|
7
|
+
google/genai/_replay_api_client.py,sha256=8KMjwjah52Mgf0rURK-CO10MqtULh8vkRSOT4-AqPpU,14722
|
8
|
+
google/genai/_test_api_client.py,sha256=2PvDcW3h01U4UOSoj7TUo6TwdBHSEN_lO2tXjBoh5Fw,4765
|
9
|
+
google/genai/_transformers.py,sha256=AiSVoQML3MK6AP5xTStIiJUOlrZO4m_qBULOjgdZHC0,21963
|
10
|
+
google/genai/batches.py,sha256=jv8pW_g_cZee6ol5ER5bQRUkXsj4IUcZC5cMo-YAnt0,38033
|
11
|
+
google/genai/caches.py,sha256=jsiclHO71kIa2CNrds3O8PL2fCNr_dlhUSPjhiRsjNE,53152
|
12
|
+
google/genai/chats.py,sha256=GyufXQPtyP_v4L3943xaKXMpo1Us9sBTdMSTUV4P6s8,7827
|
13
|
+
google/genai/client.py,sha256=MTZ3DOXk1_xgljaHlvF16jr_SKVPRfU8lZ1eH_dfDeQ,9334
|
14
|
+
google/genai/errors.py,sha256=DtpDZT5UDqumk2cTRUlg3k4ypmO_0tkMNzJgA3qzCmc,3666
|
15
|
+
google/genai/files.py,sha256=FQLGv-at7T_AZiVkhTsreIaLK_U1acufx-zt8coOjkc,41706
|
16
|
+
google/genai/live.py,sha256=wxz8ebqcPR6JJs39OOVz8zPzfAf31Zol7sGE7byQEyI,23302
|
17
|
+
google/genai/models.py,sha256=mitGmy1myd6-zmmV8BEDkdc88cbp8ZF9SyZTNeWnBfk,166628
|
18
|
+
google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
|
19
|
+
google/genai/tunings.py,sha256=iJJYn1O_wjFKTIL8VS2zpIRqpfCNRrO2REP2ztgFW6M,39144
|
20
|
+
google/genai/types.py,sha256=vOP1JkluVGFhmNFBJk6Z1BoOv4zzwysjIUiU7GFPXwg,273187
|
21
|
+
google/genai/version.py,sha256=nKmMytxBOALBA2ILryXEQSQpZZJxtfZwKkJcznog9Lw,626
|
22
|
+
google_genai-0.7.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
23
|
+
google_genai-0.7.0.dist-info/METADATA,sha256=eUcx3JIC0OOIh3_hT-pfkYvkyP2ZWrUXU5JoCZqTDwg,23948
|
24
|
+
google_genai-0.7.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
25
|
+
google_genai-0.7.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
26
|
+
google_genai-0.7.0.dist-info/RECORD,,
|
@@ -1,25 +0,0 @@
|
|
1
|
-
google/genai/__init__.py,sha256=IYw-PcsdgjSpS1mU_ZcYkTfPocsJ4aVmrDxP7vX7c6Y,709
|
2
|
-
google/genai/_api_client.py,sha256=_Ta8Sjkg5hyO9UspTboevtk19-h7x1DBD940reMVM5c,23341
|
3
|
-
google/genai/_automatic_function_calling_util.py,sha256=qbMCO8x6THe1O7Bn-L97rlbDSYJoX_gUfztvKHh-u6E,10078
|
4
|
-
google/genai/_common.py,sha256=xJadfXcUP2ry348wz6Pd7v6abTi_5KlCieJre2nZUi8,8724
|
5
|
-
google/genai/_extra_utils.py,sha256=QhmZV-vfKfNQ2KMUVdAD-Le37qzLD4dSLl953zyOvCk,11101
|
6
|
-
google/genai/_replay_api_client.py,sha256=xD6NAxy-8vJQS8FtcWf4dpVMjW90V5xaDO9-ndzY8VU,14722
|
7
|
-
google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759
|
8
|
-
google/genai/_transformers.py,sha256=pMMq6IJTLh_QKHi63XTYKNbWOPJ2mpXLB2Q6v78eQsk,19268
|
9
|
-
google/genai/batches.py,sha256=1TYMFgpe6-jz1WG-PzP2aj0p_Pvb0y81HFfnrJ8Vj7g,37504
|
10
|
-
google/genai/caches.py,sha256=yXk1apId5SWzfZ5yi6ejonfT7AWiYgzb3FsRxAXFN5s,53998
|
11
|
-
google/genai/chats.py,sha256=GobIFlez3eTRWWDtUycnubrMz0hB3v3gvDVSdMFJTNc,7642
|
12
|
-
google/genai/client.py,sha256=AM7yAthSW5Ajnig2BfwCNPExjTTtrZR0b0pgyThnDBo,9497
|
13
|
-
google/genai/errors.py,sha256=DtpDZT5UDqumk2cTRUlg3k4ypmO_0tkMNzJgA3qzCmc,3666
|
14
|
-
google/genai/files.py,sha256=ySacctcUekVTHMHKWepEfge-KgVMvJlek6UIxn7qHSw,42537
|
15
|
-
google/genai/live.py,sha256=vWAzuyAbK63n1Vr-iRNMuDq3McjgmSXY5ToO2bzdp28,23106
|
16
|
-
google/genai/models.py,sha256=2Aj98r1B5cxryAxBRm1iUhpZ3px0c26ZhyH_0h9NQp4,160762
|
17
|
-
google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
|
18
|
-
google/genai/tunings.py,sha256=IddtUlYqPZmaicPoO4yhCPefop_FhJiKl9l4dyPEeVE,49396
|
19
|
-
google/genai/types.py,sha256=wFhtYoto-ZfcqqRyu9ZGOQss9wBwPYTdcLuCjnil_kY,274625
|
20
|
-
google/genai/version.py,sha256=GkH2NQgVNItrcxGOfKBijKTmc_gqY3cPzldxCE11Jm4,626
|
21
|
-
google_genai-0.6.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
22
|
-
google_genai-0.6.0.dist-info/METADATA,sha256=AiUA_1JhtT5M6vqc6mzs0i6qI1RJFOccuUJH-ZlX_z8,22743
|
23
|
-
google_genai-0.6.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
24
|
-
google_genai-0.6.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
25
|
-
google_genai-0.6.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|