google-genai 0.6.0__tar.gz → 0.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-0.6.0/google_genai.egg-info → google_genai-0.8.0}/PKG-INFO +119 -70
- {google_genai-0.6.0 → google_genai-0.8.0}/README.md +118 -68
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_api_client.py +74 -82
- google_genai-0.8.0/google/genai/_api_module.py +24 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_automatic_function_calling_util.py +43 -22
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_common.py +11 -8
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_extra_utils.py +22 -16
- google_genai-0.8.0/google/genai/_operations.py +365 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_replay_api_client.py +7 -2
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_test_api_client.py +1 -1
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/_transformers.py +218 -97
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/batches.py +194 -155
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/caches.py +117 -134
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/chats.py +22 -18
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/client.py +31 -37
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/files.py +154 -183
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/live.py +11 -5
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/models.py +506 -254
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/tunings.py +85 -422
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/types.py +647 -458
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/version.py +1 -1
- {google_genai-0.6.0 → google_genai-0.8.0/google_genai.egg-info}/PKG-INFO +119 -70
- {google_genai-0.6.0 → google_genai-0.8.0}/google_genai.egg-info/SOURCES.txt +2 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google_genai.egg-info/requires.txt +0 -1
- {google_genai-0.6.0 → google_genai-0.8.0}/pyproject.toml +1 -2
- {google_genai-0.6.0 → google_genai-0.8.0}/LICENSE +0 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/__init__.py +0 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/errors.py +0 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google/genai/pagers.py +0 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/google_genai.egg-info/top_level.txt +0 -0
- {google_genai-0.6.0 → google_genai-0.8.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: google-genai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.8.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
@@ -21,7 +21,6 @@ Requires-Python: >=3.9
|
|
21
21
|
Description-Content-Type: text/markdown
|
22
22
|
License-File: LICENSE
|
23
23
|
Requires-Dist: google-auth<3.0.0dev,>=2.14.1
|
24
|
-
Requires-Dist: pillow<12.0.0,>=10.0.0
|
25
24
|
Requires-Dist: pydantic<3.0.0dev,>=2.0.0
|
26
25
|
Requires-Dist: requests<3.0.0dev,>=2.28.1
|
27
26
|
Requires-Dist: websockets<15.0dev,>=13.0
|
@@ -83,12 +82,12 @@ The `client.models` modules exposes model inferencing and model getters.
|
|
83
82
|
|
84
83
|
```python
|
85
84
|
response = client.models.generate_content(
|
86
|
-
model="gemini-2.0-flash-exp", contents="
|
85
|
+
model="gemini-2.0-flash-exp", contents="why is the sky blue?"
|
87
86
|
)
|
88
87
|
print(response.text)
|
89
88
|
```
|
90
89
|
|
91
|
-
#### with uploaded file (
|
90
|
+
#### with uploaded file (Gemini API only)
|
92
91
|
download the file in console.
|
93
92
|
|
94
93
|
```cmd
|
@@ -98,9 +97,10 @@ download the file in console.
|
|
98
97
|
python code.
|
99
98
|
|
100
99
|
```python
|
101
|
-
file = client.files.upload(path="a11.
|
100
|
+
file = client.files.upload(path="a11.txt")
|
102
101
|
response = client.models.generate_content(
|
103
|
-
model="gemini-2.0-flash-exp",
|
102
|
+
model="gemini-2.0-flash-exp",
|
103
|
+
contents=["Could you summarize this file?", file]
|
104
104
|
)
|
105
105
|
print(response.text)
|
106
106
|
```
|
@@ -127,7 +127,7 @@ dictionaries. You can get the type from `google.genai.types`.
|
|
127
127
|
```python
|
128
128
|
response = client.models.generate_content(
|
129
129
|
model="gemini-2.0-flash-exp",
|
130
|
-
contents=types.Part.from_text("Why is the sky blue?"),
|
130
|
+
contents=types.Part.from_text(text="Why is the sky blue?"),
|
131
131
|
config=types.GenerateContentConfig(
|
132
132
|
temperature=0,
|
133
133
|
top_p=0.95,
|
@@ -141,7 +141,43 @@ response = client.models.generate_content(
|
|
141
141
|
),
|
142
142
|
)
|
143
143
|
|
144
|
-
response
|
144
|
+
print(response.text)
|
145
|
+
```
|
146
|
+
|
147
|
+
### Thinking
|
148
|
+
|
149
|
+
The Gemini 2.0 Flash Thinking model is an experimental model that could return
|
150
|
+
"thoughts" as part of its response.
|
151
|
+
|
152
|
+
#### Gemini Developer API
|
153
|
+
|
154
|
+
Thinking config is only available in v1alpha for Gemini AI API.
|
155
|
+
|
156
|
+
```python
|
157
|
+
response = client.models.generate_content(
|
158
|
+
model='gemini-2.0-flash-thinking-exp',
|
159
|
+
contents='What is the sum of natural numbers from 1 to 100?',
|
160
|
+
config=types.GenerateContentConfig(
|
161
|
+
thinking_config=types.ThinkingConfig(include_thoughts=True),
|
162
|
+
http_options=types.HttpOptions(api_version='v1alpha'),
|
163
|
+
)
|
164
|
+
)
|
165
|
+
for part in response.candidates[0].content.parts:
|
166
|
+
print(part)
|
167
|
+
```
|
168
|
+
|
169
|
+
#### Vertex AI API
|
170
|
+
|
171
|
+
```python
|
172
|
+
response = client.models.generate_content(
|
173
|
+
model='gemini-2.0-flash-thinking-exp-01-21',
|
174
|
+
contents='What is the sum of natural numbers from 1 to 100?',
|
175
|
+
config=types.GenerateContentConfig(
|
176
|
+
thinking_config=types.ThinkingConfig(include_thoughts=True),
|
177
|
+
)
|
178
|
+
)
|
179
|
+
for part in response.candidates[0].content.parts:
|
180
|
+
print(part)
|
145
181
|
```
|
146
182
|
|
147
183
|
### List Base Models
|
@@ -232,10 +268,10 @@ Then you will receive a function call part in the response.
|
|
232
268
|
function = types.FunctionDeclaration(
|
233
269
|
name="get_current_weather",
|
234
270
|
description="Get the current weather in a given location",
|
235
|
-
parameters=types.
|
271
|
+
parameters=types.Schema(
|
236
272
|
type="OBJECT",
|
237
273
|
properties={
|
238
|
-
"location": types.
|
274
|
+
"location": types.Schema(
|
239
275
|
type="STRING",
|
240
276
|
description="The city and state, e.g. San Francisco, CA",
|
241
277
|
),
|
@@ -263,10 +299,9 @@ The following example shows how to do it for a simple function invocation.
|
|
263
299
|
```python
|
264
300
|
user_prompt_content = types.Content(
|
265
301
|
role="user",
|
266
|
-
parts=[types.Part.from_text("What is the weather like in Boston?")],
|
302
|
+
parts=[types.Part.from_text(text="What is the weather like in Boston?")],
|
267
303
|
)
|
268
|
-
|
269
|
-
function_call_part = function_call_content.parts[0]
|
304
|
+
function_call_part = response.function_calls[0]
|
270
305
|
|
271
306
|
|
272
307
|
try:
|
@@ -366,6 +401,57 @@ response = client.models.generate_content(
|
|
366
401
|
print(response.text)
|
367
402
|
```
|
368
403
|
|
404
|
+
### Enum Response Schema
|
405
|
+
|
406
|
+
#### Text Response
|
407
|
+
|
408
|
+
You can set response_mime_type to 'text/x.enum' to return one of those enum
|
409
|
+
values as the response.
|
410
|
+
|
411
|
+
```python
|
412
|
+
class InstrumentEnum(Enum):
|
413
|
+
PERCUSSION = 'Percussion'
|
414
|
+
STRING = 'String'
|
415
|
+
WOODWIND = 'Woodwind'
|
416
|
+
BRASS = 'Brass'
|
417
|
+
KEYBOARD = 'Keyboard'
|
418
|
+
|
419
|
+
response = client.models.generate_content(
|
420
|
+
model='gemini-2.0-flash-exp',
|
421
|
+
contents='What instrument plays multiple notes at once?',
|
422
|
+
config={
|
423
|
+
'response_mime_type': 'text/x.enum',
|
424
|
+
'response_schema': InstrumentEnum,
|
425
|
+
},
|
426
|
+
)
|
427
|
+
print(response.text)
|
428
|
+
```
|
429
|
+
|
430
|
+
#### JSON Response
|
431
|
+
|
432
|
+
You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
|
433
|
+
|
434
|
+
```python
|
435
|
+
from enum import Enum
|
436
|
+
|
437
|
+
class InstrumentEnum(Enum):
|
438
|
+
PERCUSSION = 'Percussion'
|
439
|
+
STRING = 'String'
|
440
|
+
WOODWIND = 'Woodwind'
|
441
|
+
BRASS = 'Brass'
|
442
|
+
KEYBOARD = 'Keyboard'
|
443
|
+
|
444
|
+
response = client.models.generate_content(
|
445
|
+
model='gemini-2.0-flash-exp',
|
446
|
+
contents='What instrument plays multiple notes at once?',
|
447
|
+
config={
|
448
|
+
'response_mime_type': 'application/json',
|
449
|
+
'response_schema': InstrumentEnum,
|
450
|
+
},
|
451
|
+
)
|
452
|
+
print(response.text)
|
453
|
+
```
|
454
|
+
|
369
455
|
### Streaming
|
370
456
|
|
371
457
|
#### Streaming for text content
|
@@ -434,10 +520,10 @@ print(response.text)
|
|
434
520
|
### Streaming
|
435
521
|
|
436
522
|
```python
|
437
|
-
async for
|
523
|
+
async for chunk in await client.aio.models.generate_content_stream(
|
438
524
|
model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
|
439
525
|
):
|
440
|
-
print(
|
526
|
+
print(chunk.text, end="")
|
441
527
|
```
|
442
528
|
|
443
529
|
### Count Tokens and Compute Tokens
|
@@ -445,7 +531,7 @@ async for response in client.aio.models.generate_content_stream(
|
|
445
531
|
```python
|
446
532
|
response = client.models.count_tokens(
|
447
533
|
model="gemini-2.0-flash-exp",
|
448
|
-
contents="
|
534
|
+
contents="why is the sky blue?",
|
449
535
|
)
|
450
536
|
print(response)
|
451
537
|
```
|
@@ -457,7 +543,7 @@ Compute tokens is only supported in Vertex AI.
|
|
457
543
|
```python
|
458
544
|
response = client.models.compute_tokens(
|
459
545
|
model="gemini-2.0-flash-exp",
|
460
|
-
contents="
|
546
|
+
contents="why is the sky blue?",
|
461
547
|
)
|
462
548
|
print(response)
|
463
549
|
```
|
@@ -467,7 +553,7 @@ print(response)
|
|
467
553
|
```python
|
468
554
|
response = await client.aio.models.count_tokens(
|
469
555
|
model="gemini-2.0-flash-exp",
|
470
|
-
contents="
|
556
|
+
contents="why is the sky blue?",
|
471
557
|
)
|
472
558
|
print(response)
|
473
559
|
```
|
@@ -477,7 +563,7 @@ print(response)
|
|
477
563
|
```python
|
478
564
|
response = client.models.embed_content(
|
479
565
|
model="text-embedding-004",
|
480
|
-
contents="
|
566
|
+
contents="why is the sky blue?",
|
481
567
|
)
|
482
568
|
print(response)
|
483
569
|
```
|
@@ -486,7 +572,7 @@ print(response)
|
|
486
572
|
# multiple contents with config
|
487
573
|
response = client.models.embed_content(
|
488
574
|
model="text-embedding-004",
|
489
|
-
contents=["
|
575
|
+
contents=["why is the sky blue?", "What is your age?"],
|
490
576
|
config=types.EmbedContentConfig(output_dimensionality=10),
|
491
577
|
)
|
492
578
|
|
@@ -495,16 +581,16 @@ print(response)
|
|
495
581
|
|
496
582
|
### Imagen
|
497
583
|
|
498
|
-
#### Generate
|
584
|
+
#### Generate Images
|
499
585
|
|
500
|
-
Support for generate
|
586
|
+
Support for generate images in Gemini Developer API is behind an allowlist
|
501
587
|
|
502
588
|
```python
|
503
589
|
# Generate Image
|
504
|
-
response1 = client.models.
|
505
|
-
model="imagen-3.0-generate-
|
590
|
+
response1 = client.models.generate_images(
|
591
|
+
model="imagen-3.0-generate-002",
|
506
592
|
prompt="An umbrella in the foreground, and a rainy night sky in the background",
|
507
|
-
config=types.
|
593
|
+
config=types.GenerateImagesConfig(
|
508
594
|
negative_prompt="human",
|
509
595
|
number_of_images=1,
|
510
596
|
include_rai_reason=True,
|
@@ -603,7 +689,7 @@ print(response.text)
|
|
603
689
|
|
604
690
|
```python
|
605
691
|
chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
|
606
|
-
async for chunk in chat.send_message_stream("tell me a story"):
|
692
|
+
async for chunk in await chat.send_message_stream("tell me a story"):
|
607
693
|
print(chunk.text)
|
608
694
|
```
|
609
695
|
|
@@ -679,7 +765,7 @@ cached_content = client.caches.create(
|
|
679
765
|
cached_content = client.caches.get(name=cached_content.name)
|
680
766
|
```
|
681
767
|
|
682
|
-
### Generate Content
|
768
|
+
### Generate Content with Caches
|
683
769
|
|
684
770
|
```python
|
685
771
|
response = client.models.generate_content(
|
@@ -695,7 +781,7 @@ print(response.text)
|
|
695
781
|
## Tunings
|
696
782
|
|
697
783
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
698
|
-
tuning through `tune
|
784
|
+
tuning through `tune`.
|
699
785
|
|
700
786
|
### Tune
|
701
787
|
|
@@ -760,7 +846,7 @@ while tuning_job.state in running_states:
|
|
760
846
|
```python
|
761
847
|
response = client.models.generate_content(
|
762
848
|
model=tuning_job.tuned_model.endpoint,
|
763
|
-
contents="
|
849
|
+
contents="why is the sky blue?",
|
764
850
|
)
|
765
851
|
|
766
852
|
print(response.text)
|
@@ -820,43 +906,6 @@ model = client.models.update(
|
|
820
906
|
print(model)
|
821
907
|
```
|
822
908
|
|
823
|
-
### Distillation
|
824
|
-
|
825
|
-
Only supported in Vertex AI. Requires allowlist.
|
826
|
-
|
827
|
-
```python
|
828
|
-
distillation_job = client.tunings.distill(
|
829
|
-
student_model="gemma-2b-1.1-it",
|
830
|
-
teacher_model="gemini-1.5-pro-002",
|
831
|
-
training_dataset=genai.types.DistillationDataset(
|
832
|
-
gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
|
833
|
-
),
|
834
|
-
config=genai.types.CreateDistillationJobConfig(
|
835
|
-
epoch_count=1,
|
836
|
-
pipeline_root_directory=("gs://my-bucket"),
|
837
|
-
),
|
838
|
-
)
|
839
|
-
print(distillation_job)
|
840
|
-
```
|
841
|
-
|
842
|
-
```python
|
843
|
-
completed_states = set(
|
844
|
-
[
|
845
|
-
"JOB_STATE_SUCCEEDED",
|
846
|
-
"JOB_STATE_FAILED",
|
847
|
-
"JOB_STATE_CANCELLED",
|
848
|
-
"JOB_STATE_PAUSED",
|
849
|
-
]
|
850
|
-
)
|
851
|
-
|
852
|
-
while distillation_job.state not in completed_states:
|
853
|
-
print(distillation_job.state)
|
854
|
-
distillation_job = client.tunings.get(name=distillation_job.name)
|
855
|
-
time.sleep(10)
|
856
|
-
|
857
|
-
print(distillation_job)
|
858
|
-
```
|
859
|
-
|
860
909
|
|
861
910
|
### List Tuning Jobs
|
862
911
|
|
@@ -932,12 +981,12 @@ job
|
|
932
981
|
### List
|
933
982
|
|
934
983
|
```python
|
935
|
-
for job in client.batches.list(config=types.
|
984
|
+
for job in client.batches.list(config=types.ListBatchJobsConfig(page_size=10)):
|
936
985
|
print(job)
|
937
986
|
```
|
938
987
|
|
939
988
|
```python
|
940
|
-
pager = client.batches.list(config=types.
|
989
|
+
pager = client.batches.list(config=types.ListBatchJobsConfig(page_size=10))
|
941
990
|
print(pager.page_size)
|
942
991
|
print(pager[0])
|
943
992
|
pager.next_page()
|
@@ -948,14 +997,14 @@ print(pager[0])
|
|
948
997
|
|
949
998
|
```python
|
950
999
|
async for job in await client.aio.batches.list(
|
951
|
-
config=types.
|
1000
|
+
config=types.ListBatchJobsConfig(page_size=10)
|
952
1001
|
):
|
953
1002
|
print(job)
|
954
1003
|
```
|
955
1004
|
|
956
1005
|
```python
|
957
1006
|
async_pager = await client.aio.batches.list(
|
958
|
-
config=types.
|
1007
|
+
config=types.ListBatchJobsConfig(page_size=10)
|
959
1008
|
)
|
960
1009
|
print(async_pager.page_size)
|
961
1010
|
print(async_pager[0])
|