google-genai 0.6.0__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {google_genai-0.6.0/google_genai.egg-info → google_genai-0.7.0}/PKG-INFO +116 -68
  2. {google_genai-0.6.0 → google_genai-0.7.0}/README.md +115 -66
  3. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_api_client.py +72 -78
  4. google_genai-0.7.0/google/genai/_api_module.py +24 -0
  5. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_automatic_function_calling_util.py +43 -22
  6. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_common.py +0 -6
  7. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_extra_utils.py +22 -16
  8. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_replay_api_client.py +2 -2
  9. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_test_api_client.py +1 -1
  10. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/_transformers.py +218 -97
  11. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/batches.py +194 -155
  12. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/caches.py +117 -134
  13. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/chats.py +22 -18
  14. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/client.py +31 -37
  15. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/files.py +94 -125
  16. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/live.py +11 -5
  17. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/models.py +500 -254
  18. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/tunings.py +85 -422
  19. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/types.py +495 -458
  20. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/version.py +1 -1
  21. {google_genai-0.6.0 → google_genai-0.7.0/google_genai.egg-info}/PKG-INFO +116 -68
  22. {google_genai-0.6.0 → google_genai-0.7.0}/google_genai.egg-info/SOURCES.txt +1 -0
  23. {google_genai-0.6.0 → google_genai-0.7.0}/google_genai.egg-info/requires.txt +0 -1
  24. {google_genai-0.6.0 → google_genai-0.7.0}/pyproject.toml +1 -2
  25. {google_genai-0.6.0 → google_genai-0.7.0}/LICENSE +0 -0
  26. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/__init__.py +0 -0
  27. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/errors.py +0 -0
  28. {google_genai-0.6.0 → google_genai-0.7.0}/google/genai/pagers.py +0 -0
  29. {google_genai-0.6.0 → google_genai-0.7.0}/google_genai.egg-info/dependency_links.txt +0 -0
  30. {google_genai-0.6.0 → google_genai-0.7.0}/google_genai.egg-info/top_level.txt +0 -0
  31. {google_genai-0.6.0 → google_genai-0.7.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: google-genai
3
- Version: 0.6.0
3
+ Version: 0.7.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -21,7 +21,6 @@ Requires-Python: >=3.9
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
23
  Requires-Dist: google-auth<3.0.0dev,>=2.14.1
24
- Requires-Dist: pillow<12.0.0,>=10.0.0
25
24
  Requires-Dist: pydantic<3.0.0dev,>=2.0.0
26
25
  Requires-Dist: requests<3.0.0dev,>=2.28.1
27
26
  Requires-Dist: websockets<15.0dev,>=13.0
@@ -83,12 +82,12 @@ The `client.models` modules exposes model inferencing and model getters.
83
82
 
84
83
  ```python
85
84
  response = client.models.generate_content(
86
- model="gemini-2.0-flash-exp", contents="What is your name?"
85
+ model="gemini-2.0-flash-exp", contents="why is the sky blue?"
87
86
  )
88
87
  print(response.text)
89
88
  ```
90
89
 
91
- #### with uploaded file (Google AI only)
90
+ #### with uploaded file (Gemini API only)
92
91
  download the file in console.
93
92
 
94
93
  ```cmd
@@ -127,7 +126,7 @@ dictionaries. You can get the type from `google.genai.types`.
127
126
  ```python
128
127
  response = client.models.generate_content(
129
128
  model="gemini-2.0-flash-exp",
130
- contents=types.Part.from_text("Why is the sky blue?"),
129
+ contents=types.Part.from_text(text="Why is the sky blue?"),
131
130
  config=types.GenerateContentConfig(
132
131
  temperature=0,
133
132
  top_p=0.95,
@@ -141,7 +140,43 @@ response = client.models.generate_content(
141
140
  ),
142
141
  )
143
142
 
144
- response
143
+ print(response.text)
144
+ ```
145
+
146
+ ### Thinking
147
+
148
+ The Gemini 2.0 Flash Thinking model is an experimental model that could return
149
+ "thoughts" as part of its response.
150
+
151
+ #### Gemini Developer API
152
+
153
+ Thinking config is only available in v1alpha for Gemini AI API.
154
+
155
+ ```python
156
+ response = client.models.generate_content(
157
+ model='gemini-2.0-flash-thinking-exp',
158
+ contents='What is the sum of natural numbers from 1 to 100?',
159
+ config=types.GenerateContentConfig(
160
+ thinking_config=types.ThinkingConfig(include_thoughts=True),
161
+ http_options=types.HttpOptions(api_version='v1alpha'),
162
+ )
163
+ )
164
+ for part in response.candidates[0].content.parts:
165
+ print(part)
166
+ ```
167
+
168
+ #### Vertex AI API
169
+
170
+ ```python
171
+ response = client.models.generate_content(
172
+ model='gemini-2.0-flash-thinking-exp-01-21',
173
+ contents='What is the sum of natural numbers from 1 to 100?',
174
+ config=types.GenerateContentConfig(
175
+ thinking_config=types.ThinkingConfig(include_thoughts=True),
176
+ )
177
+ )
178
+ for part in response.candidates[0].content.parts:
179
+ print(part)
145
180
  ```
146
181
 
147
182
  ### List Base Models
@@ -232,10 +267,10 @@ Then you will receive a function call part in the response.
232
267
  function = types.FunctionDeclaration(
233
268
  name="get_current_weather",
234
269
  description="Get the current weather in a given location",
235
- parameters=types.FunctionParameters(
270
+ parameters=types.Schema(
236
271
  type="OBJECT",
237
272
  properties={
238
- "location": types.ParameterType(
273
+ "location": types.Schema(
239
274
  type="STRING",
240
275
  description="The city and state, e.g. San Francisco, CA",
241
276
  ),
@@ -263,10 +298,9 @@ The following example shows how to do it for a simple function invocation.
263
298
  ```python
264
299
  user_prompt_content = types.Content(
265
300
  role="user",
266
- parts=[types.Part.from_text("What is the weather like in Boston?")],
301
+ parts=[types.Part.from_text(text="What is the weather like in Boston?")],
267
302
  )
268
- function_call_content = response.candidates[0].content
269
- function_call_part = function_call_content.parts[0]
303
+ function_call_part = response.function_calls[0]
270
304
 
271
305
 
272
306
  try:
@@ -366,6 +400,57 @@ response = client.models.generate_content(
366
400
  print(response.text)
367
401
  ```
368
402
 
403
+ ### Enum Response Schema
404
+
405
+ #### Text Response
406
+
407
+ You can set response_mime_type to 'text/x.enum' to return one of those enum
408
+ values as the response.
409
+
410
+ ```python
411
+ class InstrumentEnum(Enum):
412
+ PERCUSSION = 'Percussion'
413
+ STRING = 'String'
414
+ WOODWIND = 'Woodwind'
415
+ BRASS = 'Brass'
416
+ KEYBOARD = 'Keyboard'
417
+
418
+ response = client.models.generate_content(
419
+ model='gemini-2.0-flash-exp',
420
+ contents='What instrument plays multiple notes at once?',
421
+ config={
422
+ 'response_mime_type': 'text/x.enum',
423
+ 'response_schema': InstrumentEnum,
424
+ },
425
+ )
426
+ print(response.text)
427
+ ```
428
+
429
+ #### JSON Response
430
+
431
+ You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
432
+
433
+ ```python
434
+ from enum import Enum
435
+
436
+ class InstrumentEnum(Enum):
437
+ PERCUSSION = 'Percussion'
438
+ STRING = 'String'
439
+ WOODWIND = 'Woodwind'
440
+ BRASS = 'Brass'
441
+ KEYBOARD = 'Keyboard'
442
+
443
+ response = client.models.generate_content(
444
+ model='gemini-2.0-flash-exp',
445
+ contents='What instrument plays multiple notes at once?',
446
+ config={
447
+ 'response_mime_type': 'application/json',
448
+ 'response_schema': InstrumentEnum,
449
+ },
450
+ )
451
+ print(response.text)
452
+ ```
453
+
369
454
  ### Streaming
370
455
 
371
456
  #### Streaming for text content
@@ -434,10 +519,10 @@ print(response.text)
434
519
  ### Streaming
435
520
 
436
521
  ```python
437
- async for response in client.aio.models.generate_content_stream(
522
+ async for chunk in await client.aio.models.generate_content_stream(
438
523
  model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
439
524
  ):
440
- print(response.text, end="")
525
+ print(chunk.text, end="")
441
526
  ```
442
527
 
443
528
  ### Count Tokens and Compute Tokens
@@ -445,7 +530,7 @@ async for response in client.aio.models.generate_content_stream(
445
530
  ```python
446
531
  response = client.models.count_tokens(
447
532
  model="gemini-2.0-flash-exp",
448
- contents="What is your name?",
533
+ contents="why is the sky blue?",
449
534
  )
450
535
  print(response)
451
536
  ```
@@ -457,7 +542,7 @@ Compute tokens is only supported in Vertex AI.
457
542
  ```python
458
543
  response = client.models.compute_tokens(
459
544
  model="gemini-2.0-flash-exp",
460
- contents="What is your name?",
545
+ contents="why is the sky blue?",
461
546
  )
462
547
  print(response)
463
548
  ```
@@ -467,7 +552,7 @@ print(response)
467
552
  ```python
468
553
  response = await client.aio.models.count_tokens(
469
554
  model="gemini-2.0-flash-exp",
470
- contents="What is your name?",
555
+ contents="why is the sky blue?",
471
556
  )
472
557
  print(response)
473
558
  ```
@@ -477,7 +562,7 @@ print(response)
477
562
  ```python
478
563
  response = client.models.embed_content(
479
564
  model="text-embedding-004",
480
- contents="What is your name?",
565
+ contents="why is the sky blue?",
481
566
  )
482
567
  print(response)
483
568
  ```
@@ -486,7 +571,7 @@ print(response)
486
571
  # multiple contents with config
487
572
  response = client.models.embed_content(
488
573
  model="text-embedding-004",
489
- contents=["What is your name?", "What is your age?"],
574
+ contents=["why is the sky blue?", "What is your age?"],
490
575
  config=types.EmbedContentConfig(output_dimensionality=10),
491
576
  )
492
577
 
@@ -495,16 +580,16 @@ print(response)
495
580
 
496
581
  ### Imagen
497
582
 
498
- #### Generate Image
583
+ #### Generate Images
499
584
 
500
- Support for generate image in Gemini Developer API is behind an allowlist
585
+ Support for generate images in Gemini Developer API is behind an allowlist
501
586
 
502
587
  ```python
503
588
  # Generate Image
504
- response1 = client.models.generate_image(
505
- model="imagen-3.0-generate-001",
589
+ response1 = client.models.generate_images(
590
+ model="imagen-3.0-generate-002",
506
591
  prompt="An umbrella in the foreground, and a rainy night sky in the background",
507
- config=types.GenerateImageConfig(
592
+ config=types.GenerateImagesConfig(
508
593
  negative_prompt="human",
509
594
  number_of_images=1,
510
595
  include_rai_reason=True,
@@ -603,7 +688,7 @@ print(response.text)
603
688
 
604
689
  ```python
605
690
  chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
606
- async for chunk in chat.send_message_stream("tell me a story"):
691
+ async for chunk in await chat.send_message_stream("tell me a story"):
607
692
  print(chunk.text)
608
693
  ```
609
694
 
@@ -679,7 +764,7 @@ cached_content = client.caches.create(
679
764
  cached_content = client.caches.get(name=cached_content.name)
680
765
  ```
681
766
 
682
- ### Generate Content
767
+ ### Generate Content with Caches
683
768
 
684
769
  ```python
685
770
  response = client.models.generate_content(
@@ -695,7 +780,7 @@ print(response.text)
695
780
  ## Tunings
696
781
 
697
782
  `client.tunings` contains tuning job APIs and supports supervised fine
698
- tuning through `tune` and distillation through `distill`
783
+ tuning through `tune`.
699
784
 
700
785
  ### Tune
701
786
 
@@ -760,7 +845,7 @@ while tuning_job.state in running_states:
760
845
  ```python
761
846
  response = client.models.generate_content(
762
847
  model=tuning_job.tuned_model.endpoint,
763
- contents="What is your name?",
848
+ contents="why is the sky blue?",
764
849
  )
765
850
 
766
851
  print(response.text)
@@ -820,43 +905,6 @@ model = client.models.update(
820
905
  print(model)
821
906
  ```
822
907
 
823
- ### Distillation
824
-
825
- Only supported in Vertex AI. Requires allowlist.
826
-
827
- ```python
828
- distillation_job = client.tunings.distill(
829
- student_model="gemma-2b-1.1-it",
830
- teacher_model="gemini-1.5-pro-002",
831
- training_dataset=genai.types.DistillationDataset(
832
- gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
833
- ),
834
- config=genai.types.CreateDistillationJobConfig(
835
- epoch_count=1,
836
- pipeline_root_directory=("gs://my-bucket"),
837
- ),
838
- )
839
- print(distillation_job)
840
- ```
841
-
842
- ```python
843
- completed_states = set(
844
- [
845
- "JOB_STATE_SUCCEEDED",
846
- "JOB_STATE_FAILED",
847
- "JOB_STATE_CANCELLED",
848
- "JOB_STATE_PAUSED",
849
- ]
850
- )
851
-
852
- while distillation_job.state not in completed_states:
853
- print(distillation_job.state)
854
- distillation_job = client.tunings.get(name=distillation_job.name)
855
- time.sleep(10)
856
-
857
- print(distillation_job)
858
- ```
859
-
860
908
 
861
909
  ### List Tuning Jobs
862
910
 
@@ -932,12 +980,12 @@ job
932
980
  ### List
933
981
 
934
982
  ```python
935
- for job in client.batches.list(config=types.ListBatchJobConfig(page_size=10)):
983
+ for job in client.batches.list(config=types.ListBatchJobsConfig(page_size=10)):
936
984
  print(job)
937
985
  ```
938
986
 
939
987
  ```python
940
- pager = client.batches.list(config=types.ListBatchJobConfig(page_size=10))
988
+ pager = client.batches.list(config=types.ListBatchJobsConfig(page_size=10))
941
989
  print(pager.page_size)
942
990
  print(pager[0])
943
991
  pager.next_page()
@@ -948,14 +996,14 @@ print(pager[0])
948
996
 
949
997
  ```python
950
998
  async for job in await client.aio.batches.list(
951
- config=types.ListBatchJobConfig(page_size=10)
999
+ config=types.ListBatchJobsConfig(page_size=10)
952
1000
  ):
953
1001
  print(job)
954
1002
  ```
955
1003
 
956
1004
  ```python
957
1005
  async_pager = await client.aio.batches.list(
958
- config=types.ListBatchJobConfig(page_size=10)
1006
+ config=types.ListBatchJobsConfig(page_size=10)
959
1007
  )
960
1008
  print(async_pager.page_size)
961
1009
  print(async_pager[0])
@@ -55,12 +55,12 @@ The `client.models` modules exposes model inferencing and model getters.
55
55
 
56
56
  ```python
57
57
  response = client.models.generate_content(
58
- model="gemini-2.0-flash-exp", contents="What is your name?"
58
+ model="gemini-2.0-flash-exp", contents="why is the sky blue?"
59
59
  )
60
60
  print(response.text)
61
61
  ```
62
62
 
63
- #### with uploaded file (Google AI only)
63
+ #### with uploaded file (Gemini API only)
64
64
  download the file in console.
65
65
 
66
66
  ```cmd
@@ -99,7 +99,7 @@ dictionaries. You can get the type from `google.genai.types`.
99
99
  ```python
100
100
  response = client.models.generate_content(
101
101
  model="gemini-2.0-flash-exp",
102
- contents=types.Part.from_text("Why is the sky blue?"),
102
+ contents=types.Part.from_text(text="Why is the sky blue?"),
103
103
  config=types.GenerateContentConfig(
104
104
  temperature=0,
105
105
  top_p=0.95,
@@ -113,7 +113,43 @@ response = client.models.generate_content(
113
113
  ),
114
114
  )
115
115
 
116
- response
116
+ print(response.text)
117
+ ```
118
+
119
+ ### Thinking
120
+
121
+ The Gemini 2.0 Flash Thinking model is an experimental model that could return
122
+ "thoughts" as part of its response.
123
+
124
+ #### Gemini Developer API
125
+
126
+ Thinking config is only available in v1alpha for Gemini AI API.
127
+
128
+ ```python
129
+ response = client.models.generate_content(
130
+ model='gemini-2.0-flash-thinking-exp',
131
+ contents='What is the sum of natural numbers from 1 to 100?',
132
+ config=types.GenerateContentConfig(
133
+ thinking_config=types.ThinkingConfig(include_thoughts=True),
134
+ http_options=types.HttpOptions(api_version='v1alpha'),
135
+ )
136
+ )
137
+ for part in response.candidates[0].content.parts:
138
+ print(part)
139
+ ```
140
+
141
+ #### Vertex AI API
142
+
143
+ ```python
144
+ response = client.models.generate_content(
145
+ model='gemini-2.0-flash-thinking-exp-01-21',
146
+ contents='What is the sum of natural numbers from 1 to 100?',
147
+ config=types.GenerateContentConfig(
148
+ thinking_config=types.ThinkingConfig(include_thoughts=True),
149
+ )
150
+ )
151
+ for part in response.candidates[0].content.parts:
152
+ print(part)
117
153
  ```
118
154
 
119
155
  ### List Base Models
@@ -204,10 +240,10 @@ Then you will receive a function call part in the response.
204
240
  function = types.FunctionDeclaration(
205
241
  name="get_current_weather",
206
242
  description="Get the current weather in a given location",
207
- parameters=types.FunctionParameters(
243
+ parameters=types.Schema(
208
244
  type="OBJECT",
209
245
  properties={
210
- "location": types.ParameterType(
246
+ "location": types.Schema(
211
247
  type="STRING",
212
248
  description="The city and state, e.g. San Francisco, CA",
213
249
  ),
@@ -235,10 +271,9 @@ The following example shows how to do it for a simple function invocation.
235
271
  ```python
236
272
  user_prompt_content = types.Content(
237
273
  role="user",
238
- parts=[types.Part.from_text("What is the weather like in Boston?")],
274
+ parts=[types.Part.from_text(text="What is the weather like in Boston?")],
239
275
  )
240
- function_call_content = response.candidates[0].content
241
- function_call_part = function_call_content.parts[0]
276
+ function_call_part = response.function_calls[0]
242
277
 
243
278
 
244
279
  try:
@@ -338,6 +373,57 @@ response = client.models.generate_content(
338
373
  print(response.text)
339
374
  ```
340
375
 
376
+ ### Enum Response Schema
377
+
378
+ #### Text Response
379
+
380
+ You can set response_mime_type to 'text/x.enum' to return one of those enum
381
+ values as the response.
382
+
383
+ ```python
384
+ class InstrumentEnum(Enum):
385
+ PERCUSSION = 'Percussion'
386
+ STRING = 'String'
387
+ WOODWIND = 'Woodwind'
388
+ BRASS = 'Brass'
389
+ KEYBOARD = 'Keyboard'
390
+
391
+ response = client.models.generate_content(
392
+ model='gemini-2.0-flash-exp',
393
+ contents='What instrument plays multiple notes at once?',
394
+ config={
395
+ 'response_mime_type': 'text/x.enum',
396
+ 'response_schema': InstrumentEnum,
397
+ },
398
+ )
399
+ print(response.text)
400
+ ```
401
+
402
+ #### JSON Response
403
+
404
+ You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
405
+
406
+ ```python
407
+ from enum import Enum
408
+
409
+ class InstrumentEnum(Enum):
410
+ PERCUSSION = 'Percussion'
411
+ STRING = 'String'
412
+ WOODWIND = 'Woodwind'
413
+ BRASS = 'Brass'
414
+ KEYBOARD = 'Keyboard'
415
+
416
+ response = client.models.generate_content(
417
+ model='gemini-2.0-flash-exp',
418
+ contents='What instrument plays multiple notes at once?',
419
+ config={
420
+ 'response_mime_type': 'application/json',
421
+ 'response_schema': InstrumentEnum,
422
+ },
423
+ )
424
+ print(response.text)
425
+ ```
426
+
341
427
  ### Streaming
342
428
 
343
429
  #### Streaming for text content
@@ -406,10 +492,10 @@ print(response.text)
406
492
  ### Streaming
407
493
 
408
494
  ```python
409
- async for response in client.aio.models.generate_content_stream(
495
+ async for chunk in await client.aio.models.generate_content_stream(
410
496
  model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
411
497
  ):
412
- print(response.text, end="")
498
+ print(chunk.text, end="")
413
499
  ```
414
500
 
415
501
  ### Count Tokens and Compute Tokens
@@ -417,7 +503,7 @@ async for response in client.aio.models.generate_content_stream(
417
503
  ```python
418
504
  response = client.models.count_tokens(
419
505
  model="gemini-2.0-flash-exp",
420
- contents="What is your name?",
506
+ contents="why is the sky blue?",
421
507
  )
422
508
  print(response)
423
509
  ```
@@ -429,7 +515,7 @@ Compute tokens is only supported in Vertex AI.
429
515
  ```python
430
516
  response = client.models.compute_tokens(
431
517
  model="gemini-2.0-flash-exp",
432
- contents="What is your name?",
518
+ contents="why is the sky blue?",
433
519
  )
434
520
  print(response)
435
521
  ```
@@ -439,7 +525,7 @@ print(response)
439
525
  ```python
440
526
  response = await client.aio.models.count_tokens(
441
527
  model="gemini-2.0-flash-exp",
442
- contents="What is your name?",
528
+ contents="why is the sky blue?",
443
529
  )
444
530
  print(response)
445
531
  ```
@@ -449,7 +535,7 @@ print(response)
449
535
  ```python
450
536
  response = client.models.embed_content(
451
537
  model="text-embedding-004",
452
- contents="What is your name?",
538
+ contents="why is the sky blue?",
453
539
  )
454
540
  print(response)
455
541
  ```
@@ -458,7 +544,7 @@ print(response)
458
544
  # multiple contents with config
459
545
  response = client.models.embed_content(
460
546
  model="text-embedding-004",
461
- contents=["What is your name?", "What is your age?"],
547
+ contents=["why is the sky blue?", "What is your age?"],
462
548
  config=types.EmbedContentConfig(output_dimensionality=10),
463
549
  )
464
550
 
@@ -467,16 +553,16 @@ print(response)
467
553
 
468
554
  ### Imagen
469
555
 
470
- #### Generate Image
556
+ #### Generate Images
471
557
 
472
- Support for generate image in Gemini Developer API is behind an allowlist
558
+ Support for generate images in Gemini Developer API is behind an allowlist
473
559
 
474
560
  ```python
475
561
  # Generate Image
476
- response1 = client.models.generate_image(
477
- model="imagen-3.0-generate-001",
562
+ response1 = client.models.generate_images(
563
+ model="imagen-3.0-generate-002",
478
564
  prompt="An umbrella in the foreground, and a rainy night sky in the background",
479
- config=types.GenerateImageConfig(
565
+ config=types.GenerateImagesConfig(
480
566
  negative_prompt="human",
481
567
  number_of_images=1,
482
568
  include_rai_reason=True,
@@ -575,7 +661,7 @@ print(response.text)
575
661
 
576
662
  ```python
577
663
  chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
578
- async for chunk in chat.send_message_stream("tell me a story"):
664
+ async for chunk in await chat.send_message_stream("tell me a story"):
579
665
  print(chunk.text)
580
666
  ```
581
667
 
@@ -651,7 +737,7 @@ cached_content = client.caches.create(
651
737
  cached_content = client.caches.get(name=cached_content.name)
652
738
  ```
653
739
 
654
- ### Generate Content
740
+ ### Generate Content with Caches
655
741
 
656
742
  ```python
657
743
  response = client.models.generate_content(
@@ -667,7 +753,7 @@ print(response.text)
667
753
  ## Tunings
668
754
 
669
755
  `client.tunings` contains tuning job APIs and supports supervised fine
670
- tuning through `tune` and distillation through `distill`
756
+ tuning through `tune`.
671
757
 
672
758
  ### Tune
673
759
 
@@ -732,7 +818,7 @@ while tuning_job.state in running_states:
732
818
  ```python
733
819
  response = client.models.generate_content(
734
820
  model=tuning_job.tuned_model.endpoint,
735
- contents="What is your name?",
821
+ contents="why is the sky blue?",
736
822
  )
737
823
 
738
824
  print(response.text)
@@ -792,43 +878,6 @@ model = client.models.update(
792
878
  print(model)
793
879
  ```
794
880
 
795
- ### Distillation
796
-
797
- Only supported in Vertex AI. Requires allowlist.
798
-
799
- ```python
800
- distillation_job = client.tunings.distill(
801
- student_model="gemma-2b-1.1-it",
802
- teacher_model="gemini-1.5-pro-002",
803
- training_dataset=genai.types.DistillationDataset(
804
- gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
805
- ),
806
- config=genai.types.CreateDistillationJobConfig(
807
- epoch_count=1,
808
- pipeline_root_directory=("gs://my-bucket"),
809
- ),
810
- )
811
- print(distillation_job)
812
- ```
813
-
814
- ```python
815
- completed_states = set(
816
- [
817
- "JOB_STATE_SUCCEEDED",
818
- "JOB_STATE_FAILED",
819
- "JOB_STATE_CANCELLED",
820
- "JOB_STATE_PAUSED",
821
- ]
822
- )
823
-
824
- while distillation_job.state not in completed_states:
825
- print(distillation_job.state)
826
- distillation_job = client.tunings.get(name=distillation_job.name)
827
- time.sleep(10)
828
-
829
- print(distillation_job)
830
- ```
831
-
832
881
 
833
882
  ### List Tuning Jobs
834
883
 
@@ -904,12 +953,12 @@ job
904
953
  ### List
905
954
 
906
955
  ```python
907
- for job in client.batches.list(config=types.ListBatchJobConfig(page_size=10)):
956
+ for job in client.batches.list(config=types.ListBatchJobsConfig(page_size=10)):
908
957
  print(job)
909
958
  ```
910
959
 
911
960
  ```python
912
- pager = client.batches.list(config=types.ListBatchJobConfig(page_size=10))
961
+ pager = client.batches.list(config=types.ListBatchJobsConfig(page_size=10))
913
962
  print(pager.page_size)
914
963
  print(pager[0])
915
964
  pager.next_page()
@@ -920,14 +969,14 @@ print(pager[0])
920
969
 
921
970
  ```python
922
971
  async for job in await client.aio.batches.list(
923
- config=types.ListBatchJobConfig(page_size=10)
972
+ config=types.ListBatchJobsConfig(page_size=10)
924
973
  ):
925
974
  print(job)
926
975
  ```
927
976
 
928
977
  ```python
929
978
  async_pager = await client.aio.batches.list(
930
- config=types.ListBatchJobConfig(page_size=10)
979
+ config=types.ListBatchJobsConfig(page_size=10)
931
980
  )
932
981
  print(async_pager.page_size)
933
982
  print(async_pager[0])