google-genai 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: google-genai
3
- Version: 1.0.0
3
+ Version: 1.1.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -56,13 +56,13 @@ different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs
56
56
 
57
57
  ```python
58
58
  # Only run this block for Gemini Developer API
59
- client = genai.Client(api_key="GEMINI_API_KEY")
59
+ client = genai.Client(api_key='GEMINI_API_KEY')
60
60
  ```
61
61
 
62
62
  ```python
63
63
  # Only run this block for Vertex AI API
64
64
  client = genai.Client(
65
- vertexai=True, project="your-project-id", location="us-central1"
65
+ vertexai=True, project='your-project-id', location='us-central1'
66
66
  )
67
67
  ```
68
68
 
@@ -99,7 +99,7 @@ The `client.models` modules exposes model inferencing and model getters.
99
99
 
100
100
  ```python
101
101
  response = client.models.generate_content(
102
- model="gemini-2.0-flash-exp", contents="why is the sky blue?"
102
+ model='gemini-2.0-flash-001', contents='why is the sky blue?'
103
103
  )
104
104
  print(response.text)
105
105
  ```
@@ -114,22 +114,53 @@ download the file in console.
114
114
  python code.
115
115
 
116
116
  ```python
117
- file = client.files.upload(path="a11.txt")
117
+ file = client.files.upload(path='a11.txt')
118
118
  response = client.models.generate_content(
119
- model="gemini-2.0-flash-exp",
120
- contents=["Could you summarize this file?", file]
119
+ model='gemini-2.0-flash-001',
120
+ contents=['Could you summarize this file?', file]
121
121
  )
122
122
  print(response.text)
123
123
  ```
124
124
 
125
+ #### How to structure `contents`
126
+ There are several ways to structure the `contents` in your request.
127
+
128
+ Provide a single string as shown in the text example above:
129
+
130
+ ```python
131
+ contents='Can you recommend some things to do in Boston and New York in the winter?'
132
+ ```
133
+
134
+ Provide a single `Content` instance with multiple `Part` instances:
135
+
136
+ ```python
137
+ contents=types.Content(parts=[
138
+ types.Part.from_text(text='Can you recommend some things to do in Boston in the winter?'),
139
+ types.Part.from_text(text='Can you recommend some things to do in New York in the winter?')
140
+ ], role='user')
141
+ ```
142
+
143
+ When sending more than one input type, provide a list with multiple `Content`
144
+ instances:
145
+
146
+ ```python
147
+ contents=[
148
+ 'What is this a picture of?',
149
+ types.Part.from_uri(
150
+ file_uri='gs://generativeai-downloads/images/scones.jpg',
151
+ mime_type='image/jpeg',
152
+ ),
153
+ ],
154
+ ```
155
+
125
156
  ### System Instructions and Other Configs
126
157
 
127
158
  ```python
128
159
  response = client.models.generate_content(
129
- model="gemini-2.0-flash-exp",
130
- contents="high",
160
+ model='gemini-2.0-flash-001',
161
+ contents='high',
131
162
  config=types.GenerateContentConfig(
132
- system_instruction="I say high, you say low",
163
+ system_instruction='I say high, you say low',
133
164
  temperature=0.3,
134
165
  ),
135
166
  )
@@ -143,8 +174,8 @@ dictionaries. You can get the type from `google.genai.types`.
143
174
 
144
175
  ```python
145
176
  response = client.models.generate_content(
146
- model="gemini-2.0-flash-exp",
147
- contents=types.Part.from_text(text="Why is the sky blue?"),
177
+ model='gemini-2.0-flash-001',
178
+ contents=types.Part.from_text(text='Why is the sky blue?'),
148
179
  config=types.GenerateContentConfig(
149
180
  temperature=0,
150
181
  top_p=0.95,
@@ -152,7 +183,7 @@ response = client.models.generate_content(
152
183
  candidate_count=1,
153
184
  seed=5,
154
185
  max_output_tokens=100,
155
- stop_sequences=["STOP!"],
186
+ stop_sequences=['STOP!'],
156
187
  presence_penalty=0.0,
157
188
  frequency_penalty=0.0,
158
189
  ),
@@ -171,7 +202,7 @@ for model in client.models.list():
171
202
  ```
172
203
 
173
204
  ```python
174
- pager = client.models.list(config={"page_size": 10})
205
+ pager = client.models.list(config={'page_size': 10})
175
206
  print(pager.page_size)
176
207
  print(pager[0])
177
208
  pager.next_page()
@@ -186,7 +217,7 @@ async for job in await client.aio.models.list():
186
217
  ```
187
218
 
188
219
  ```python
189
- async_pager = await client.aio.models.list(config={"page_size": 10})
220
+ async_pager = await client.aio.models.list(config={'page_size': 10})
190
221
  print(async_pager.page_size)
191
222
  print(async_pager[0])
192
223
  await async_pager.next_page()
@@ -197,13 +228,13 @@ print(async_pager[0])
197
228
 
198
229
  ```python
199
230
  response = client.models.generate_content(
200
- model="gemini-2.0-flash-exp",
201
- contents="Say something bad.",
231
+ model='gemini-2.0-flash-001',
232
+ contents='Say something bad.',
202
233
  config=types.GenerateContentConfig(
203
234
  safety_settings=[
204
235
  types.SafetySetting(
205
- category="HARM_CATEGORY_HATE_SPEECH",
206
- threshold="BLOCK_ONLY_HIGH",
236
+ category='HARM_CATEGORY_HATE_SPEECH',
237
+ threshold='BLOCK_ONLY_HIGH',
207
238
  )
208
239
  ]
209
240
  ),
@@ -225,12 +256,12 @@ def get_current_weather(location: str) -> str:
225
256
  Args:
226
257
  location: The city and state, e.g. San Francisco, CA
227
258
  """
228
- return "sunny"
259
+ return 'sunny'
229
260
 
230
261
 
231
262
  response = client.models.generate_content(
232
- model="gemini-2.0-flash-exp",
233
- contents="What is the weather like in Boston?",
263
+ model='gemini-2.0-flash-001',
264
+ contents='What is the weather like in Boston?',
234
265
  config=types.GenerateContentConfig(tools=[get_current_weather]),
235
266
  )
236
267
 
@@ -247,25 +278,25 @@ Then you will receive a function call part in the response.
247
278
 
248
279
  ```python
249
280
  function = types.FunctionDeclaration(
250
- name="get_current_weather",
251
- description="Get the current weather in a given location",
281
+ name='get_current_weather',
282
+ description='Get the current weather in a given location',
252
283
  parameters=types.Schema(
253
- type="OBJECT",
284
+ type='OBJECT',
254
285
  properties={
255
- "location": types.Schema(
256
- type="STRING",
257
- description="The city and state, e.g. San Francisco, CA",
286
+ 'location': types.Schema(
287
+ type='STRING',
288
+ description='The city and state, e.g. San Francisco, CA',
258
289
  ),
259
290
  },
260
- required=["location"],
291
+ required=['location'],
261
292
  ),
262
293
  )
263
294
 
264
295
  tool = types.Tool(function_declarations=[function])
265
296
 
266
297
  response = client.models.generate_content(
267
- model="gemini-2.0-flash-exp",
268
- contents="What is the weather like in Boston?",
298
+ model='gemini-2.0-flash-001',
299
+ contents='What is the weather like in Boston?',
269
300
  config=types.GenerateContentConfig(tools=[tool]),
270
301
  )
271
302
 
@@ -279,33 +310,34 @@ The following example shows how to do it for a simple function invocation.
279
310
 
280
311
  ```python
281
312
  user_prompt_content = types.Content(
282
- role="user",
283
- parts=[types.Part.from_text(text="What is the weather like in Boston?")],
313
+ role='user',
314
+ parts=[types.Part.from_text(text='What is the weather like in Boston?')],
284
315
  )
285
316
  function_call_part = response.function_calls[0]
317
+ function_call_content = response.candidates[0].content
286
318
 
287
319
 
288
320
  try:
289
321
  function_result = get_current_weather(
290
322
  **function_call_part.function_call.args
291
323
  )
292
- function_response = {"result": function_result}
324
+ function_response = {'result': function_result}
293
325
  except (
294
326
  Exception
295
327
  ) as e: # instead of raising the exception, you can let the model handle it
296
- function_response = {"error": str(e)}
328
+ function_response = {'error': str(e)}
297
329
 
298
330
 
299
331
  function_response_part = types.Part.from_function_response(
300
- name=function_call_part.function_call.name,
332
+ name=function_call_part.name,
301
333
  response=function_response,
302
334
  )
303
335
  function_response_content = types.Content(
304
- role="tool", parts=[function_response_part]
336
+ role='tool', parts=[function_response_part]
305
337
  )
306
338
 
307
339
  response = client.models.generate_content(
308
- model="gemini-2.0-flash-exp",
340
+ model='gemini-2.0-flash-001',
309
341
  contents=[
310
342
  user_prompt_content,
311
343
  function_call_content,
@@ -338,7 +370,7 @@ def get_current_weather(location: str) -> str:
338
370
  return "sunny"
339
371
 
340
372
  response = client.models.generate_content(
341
- model="gemini-2.0-flash-exp",
373
+ model="gemini-2.0-flash-001",
342
374
  contents="What is the weather like in Boston?",
343
375
  config=types.GenerateContentConfig(
344
376
  tools=[get_current_weather],
@@ -366,7 +398,7 @@ def get_current_weather(location: str) -> str:
366
398
  return "sunny"
367
399
 
368
400
  response = client.models.generate_content(
369
- model="gemini-2.0-flash-exp",
401
+ model="gemini-2.0-flash-001",
370
402
  contents="What is the weather like in Boston?",
371
403
  config=types.GenerateContentConfig(
372
404
  tools=[get_current_weather],
@@ -400,10 +432,10 @@ class CountryInfo(BaseModel):
400
432
 
401
433
 
402
434
  response = client.models.generate_content(
403
- model="gemini-2.0-flash-exp",
404
- contents="Give me information for the United States.",
435
+ model='gemini-2.0-flash-001',
436
+ contents='Give me information for the United States.',
405
437
  config=types.GenerateContentConfig(
406
- response_mime_type="application/json",
438
+ response_mime_type='application/json',
407
439
  response_schema=CountryInfo,
408
440
  ),
409
441
  )
@@ -412,30 +444,30 @@ print(response.text)
412
444
 
413
445
  ```python
414
446
  response = client.models.generate_content(
415
- model="gemini-2.0-flash-exp",
416
- contents="Give me information for the United States.",
447
+ model='gemini-2.0-flash-001',
448
+ contents='Give me information for the United States.',
417
449
  config=types.GenerateContentConfig(
418
- response_mime_type="application/json",
450
+ response_mime_type='application/json',
419
451
  response_schema={
420
- "required": [
421
- "name",
422
- "population",
423
- "capital",
424
- "continent",
425
- "gdp",
426
- "official_language",
427
- "total_area_sq_mi",
452
+ 'required': [
453
+ 'name',
454
+ 'population',
455
+ 'capital',
456
+ 'continent',
457
+ 'gdp',
458
+ 'official_language',
459
+ 'total_area_sq_mi',
428
460
  ],
429
- "properties": {
430
- "name": {"type": "STRING"},
431
- "population": {"type": "INTEGER"},
432
- "capital": {"type": "STRING"},
433
- "continent": {"type": "STRING"},
434
- "gdp": {"type": "INTEGER"},
435
- "official_language": {"type": "STRING"},
436
- "total_area_sq_mi": {"type": "INTEGER"},
461
+ 'properties': {
462
+ 'name': {'type': 'STRING'},
463
+ 'population': {'type': 'INTEGER'},
464
+ 'capital': {'type': 'STRING'},
465
+ 'continent': {'type': 'STRING'},
466
+ 'gdp': {'type': 'INTEGER'},
467
+ 'official_language': {'type': 'STRING'},
468
+ 'total_area_sq_mi': {'type': 'INTEGER'},
437
469
  },
438
- "type": "OBJECT",
470
+ 'type': 'OBJECT',
439
471
  },
440
472
  ),
441
473
  )
@@ -458,7 +490,7 @@ class InstrumentEnum(Enum):
458
490
  KEYBOARD = 'Keyboard'
459
491
 
460
492
  response = client.models.generate_content(
461
- model='gemini-2.0-flash-exp',
493
+ model='gemini-2.0-flash-001',
462
494
  contents='What instrument plays multiple notes at once?',
463
495
  config={
464
496
  'response_mime_type': 'text/x.enum',
@@ -483,7 +515,7 @@ class InstrumentEnum(Enum):
483
515
  KEYBOARD = 'Keyboard'
484
516
 
485
517
  response = client.models.generate_content(
486
- model='gemini-2.0-flash-exp',
518
+ model='gemini-2.0-flash-001',
487
519
  contents='What instrument plays multiple notes at once?',
488
520
  config={
489
521
  'response_mime_type': 'application/json',
@@ -499,9 +531,9 @@ print(response.text)
499
531
 
500
532
  ```python
501
533
  for chunk in client.models.generate_content_stream(
502
- model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
534
+ model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
503
535
  ):
504
- print(chunk.text, end="")
536
+ print(chunk.text, end='')
505
537
  ```
506
538
 
507
539
  #### Streaming for image content
@@ -511,35 +543,35 @@ you can use the `from_uri` class method to create a `Part` object.
511
543
 
512
544
  ```python
513
545
  for chunk in client.models.generate_content_stream(
514
- model="gemini-2.0-flash-exp",
546
+ model='gemini-2.0-flash-001',
515
547
  contents=[
516
- "What is this image about?",
548
+ 'What is this image about?',
517
549
  types.Part.from_uri(
518
- file_uri="gs://generativeai-downloads/images/scones.jpg",
519
- mime_type="image/jpeg",
550
+ file_uri='gs://generativeai-downloads/images/scones.jpg',
551
+ mime_type='image/jpeg',
520
552
  ),
521
553
  ],
522
554
  ):
523
- print(chunk.text, end="")
555
+ print(chunk.text, end='')
524
556
  ```
525
557
 
526
558
  If your image is stored in your local file system, you can read it in as bytes
527
559
  data and use the `from_bytes` class method to create a `Part` object.
528
560
 
529
561
  ```python
530
- YOUR_IMAGE_PATH = "your_image_path"
531
- YOUR_IMAGE_MIME_TYPE = "your_image_mime_type"
532
- with open(YOUR_IMAGE_PATH, "rb") as f:
562
+ YOUR_IMAGE_PATH = 'your_image_path'
563
+ YOUR_IMAGE_MIME_TYPE = 'your_image_mime_type'
564
+ with open(YOUR_IMAGE_PATH, 'rb') as f:
533
565
  image_bytes = f.read()
534
566
 
535
567
  for chunk in client.models.generate_content_stream(
536
- model="gemini-2.0-flash-exp",
568
+ model='gemini-2.0-flash-001',
537
569
  contents=[
538
- "What is this image about?",
570
+ 'What is this image about?',
539
571
  types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
540
572
  ],
541
573
  ):
542
- print(chunk.text, end="")
574
+ print(chunk.text, end='')
543
575
  ```
544
576
 
545
577
  ### Async
@@ -552,7 +584,7 @@ of `client.models.generate_content`
552
584
 
553
585
  ```python
554
586
  response = await client.aio.models.generate_content(
555
- model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
587
+ model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
556
588
  )
557
589
 
558
590
  print(response.text)
@@ -562,17 +594,17 @@ print(response.text)
562
594
 
563
595
  ```python
564
596
  async for chunk in await client.aio.models.generate_content_stream(
565
- model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
597
+ model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
566
598
  ):
567
- print(chunk.text, end="")
599
+ print(chunk.text, end='')
568
600
  ```
569
601
 
570
602
  ### Count Tokens and Compute Tokens
571
603
 
572
604
  ```python
573
605
  response = client.models.count_tokens(
574
- model="gemini-2.0-flash-exp",
575
- contents="why is the sky blue?",
606
+ model='gemini-2.0-flash-001',
607
+ contents='why is the sky blue?',
576
608
  )
577
609
  print(response)
578
610
  ```
@@ -583,8 +615,8 @@ Compute tokens is only supported in Vertex AI.
583
615
 
584
616
  ```python
585
617
  response = client.models.compute_tokens(
586
- model="gemini-2.0-flash-exp",
587
- contents="why is the sky blue?",
618
+ model='gemini-2.0-flash-001',
619
+ contents='why is the sky blue?',
588
620
  )
589
621
  print(response)
590
622
  ```
@@ -593,8 +625,8 @@ print(response)
593
625
 
594
626
  ```python
595
627
  response = await client.aio.models.count_tokens(
596
- model="gemini-2.0-flash-exp",
597
- contents="why is the sky blue?",
628
+ model='gemini-2.0-flash-001',
629
+ contents='why is the sky blue?',
598
630
  )
599
631
  print(response)
600
632
  ```
@@ -603,8 +635,8 @@ print(response)
603
635
 
604
636
  ```python
605
637
  response = client.models.embed_content(
606
- model="text-embedding-004",
607
- contents="why is the sky blue?",
638
+ model='text-embedding-004',
639
+ contents='why is the sky blue?',
608
640
  )
609
641
  print(response)
610
642
  ```
@@ -612,8 +644,8 @@ print(response)
612
644
  ```python
613
645
  # multiple contents with config
614
646
  response = client.models.embed_content(
615
- model="text-embedding-004",
616
- contents=["why is the sky blue?", "What is your age?"],
647
+ model='text-embedding-004',
648
+ contents=['why is the sky blue?', 'What is your age?'],
617
649
  config=types.EmbedContentConfig(output_dimensionality=10),
618
650
  )
619
651
 
@@ -629,13 +661,13 @@ Support for generate images in Gemini Developer API is behind an allowlist
629
661
  ```python
630
662
  # Generate Image
631
663
  response1 = client.models.generate_images(
632
- model="imagen-3.0-generate-002",
633
- prompt="An umbrella in the foreground, and a rainy night sky in the background",
664
+ model='imagen-3.0-generate-002',
665
+ prompt='An umbrella in the foreground, and a rainy night sky in the background',
634
666
  config=types.GenerateImagesConfig(
635
- negative_prompt="human",
667
+ negative_prompt='human',
636
668
  number_of_images=1,
637
669
  include_rai_reason=True,
638
- output_mime_type="image/jpeg",
670
+ output_mime_type='image/jpeg',
639
671
  ),
640
672
  )
641
673
  response1.generated_images[0].image.show()
@@ -648,12 +680,12 @@ Upscale image is only supported in Vertex AI.
648
680
  ```python
649
681
  # Upscale the generated image from above
650
682
  response2 = client.models.upscale_image(
651
- model="imagen-3.0-generate-001",
683
+ model='imagen-3.0-generate-001',
652
684
  image=response1.generated_images[0].image,
653
- upscale_factor="x2",
685
+ upscale_factor='x2',
654
686
  config=types.UpscaleImageConfig(
655
687
  include_rai_reason=True,
656
- output_mime_type="image/jpeg",
688
+ output_mime_type='image/jpeg',
657
689
  ),
658
690
  )
659
691
  response2.generated_images[0].image.show()
@@ -678,21 +710,21 @@ raw_ref_image = RawReferenceImage(
678
710
  mask_ref_image = MaskReferenceImage(
679
711
  reference_id=2,
680
712
  config=types.MaskReferenceConfig(
681
- mask_mode="MASK_MODE_BACKGROUND",
713
+ mask_mode='MASK_MODE_BACKGROUND',
682
714
  mask_dilation=0,
683
715
  ),
684
716
  )
685
717
 
686
718
  response3 = client.models.edit_image(
687
- model="imagen-3.0-capability-001",
688
- prompt="Sunlight and clear sky",
719
+ model='imagen-3.0-capability-001',
720
+ prompt='Sunlight and clear sky',
689
721
  reference_images=[raw_ref_image, mask_ref_image],
690
722
  config=types.EditImageConfig(
691
- edit_mode="EDIT_MODE_INPAINT_INSERTION",
723
+ edit_mode='EDIT_MODE_INPAINT_INSERTION',
692
724
  number_of_images=1,
693
- negative_prompt="human",
725
+ negative_prompt='human',
694
726
  include_rai_reason=True,
695
- output_mime_type="image/jpeg",
727
+ output_mime_type='image/jpeg',
696
728
  ),
697
729
  )
698
730
  response3.generated_images[0].image.show()
@@ -705,32 +737,32 @@ Create a chat session to start a multi-turn conversations with the model.
705
737
  ### Send Message
706
738
 
707
739
  ```python
708
- chat = client.chats.create(model="gemini-2.0-flash-exp")
709
- response = chat.send_message("tell me a story")
740
+ chat = client.chats.create(model='gemini-2.0-flash-001')
741
+ response = chat.send_message('tell me a story')
710
742
  print(response.text)
711
743
  ```
712
744
 
713
745
  ### Streaming
714
746
 
715
747
  ```python
716
- chat = client.chats.create(model="gemini-2.0-flash-exp")
717
- for chunk in chat.send_message_stream("tell me a story"):
748
+ chat = client.chats.create(model='gemini-2.0-flash-001')
749
+ for chunk in chat.send_message_stream('tell me a story'):
718
750
  print(chunk.text)
719
751
  ```
720
752
 
721
753
  ### Async
722
754
 
723
755
  ```python
724
- chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
725
- response = await chat.send_message("tell me a story")
756
+ chat = client.aio.chats.create(model='gemini-2.0-flash-001')
757
+ response = await chat.send_message('tell me a story')
726
758
  print(response.text)
727
759
  ```
728
760
 
729
761
  ### Async Streaming
730
762
 
731
763
  ```python
732
- chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
733
- async for chunk in await chat.send_message_stream("tell me a story"):
764
+ chat = client.aio.chats.create(model='gemini-2.0-flash-001')
765
+ async for chunk in await chat.send_message_stream('tell me a story'):
734
766
  print(chunk.text)
735
767
  ```
736
768
 
@@ -746,8 +778,8 @@ Files are only supported in Gemini Developer API.
746
778
  ### Upload
747
779
 
748
780
  ```python
749
- file1 = client.files.upload(path="2312.11805v3.pdf")
750
- file2 = client.files.upload(path="2403.05530.pdf")
781
+ file1 = client.files.upload(path='2312.11805v3.pdf')
782
+ file2 = client.files.upload(path='2403.05530.pdf')
751
783
 
752
784
  print(file1)
753
785
  print(file2)
@@ -756,7 +788,7 @@ print(file2)
756
788
  ### Delete
757
789
 
758
790
  ```python
759
- file3 = client.files.upload(path="2312.11805v3.pdf")
791
+ file3 = client.files.upload(path='2312.11805v3.pdf')
760
792
 
761
793
  client.files.delete(name=file3.name)
762
794
  ```
@@ -770,32 +802,32 @@ client.files.delete(name=file3.name)
770
802
  ```python
771
803
  if client.vertexai:
772
804
  file_uris = [
773
- "gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf",
774
- "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
805
+ 'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
806
+ 'gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf',
775
807
  ]
776
808
  else:
777
809
  file_uris = [file1.uri, file2.uri]
778
810
 
779
811
  cached_content = client.caches.create(
780
- model="gemini-1.5-pro-002",
812
+ model='gemini-1.5-pro-002',
781
813
  config=types.CreateCachedContentConfig(
782
814
  contents=[
783
815
  types.Content(
784
- role="user",
816
+ role='user',
785
817
  parts=[
786
818
  types.Part.from_uri(
787
- file_uri=file_uris[0], mime_type="application/pdf"
819
+ file_uri=file_uris[0], mime_type='application/pdf'
788
820
  ),
789
821
  types.Part.from_uri(
790
822
  file_uri=file_uris[1],
791
- mime_type="application/pdf",
823
+ mime_type='application/pdf',
792
824
  ),
793
825
  ],
794
826
  )
795
827
  ],
796
- system_instruction="What is the sum of the two pdfs?",
797
- display_name="test cache",
798
- ttl="3600s",
828
+ system_instruction='What is the sum of the two pdfs?',
829
+ display_name='test cache',
830
+ ttl='3600s',
799
831
  ),
800
832
  )
801
833
  ```
@@ -810,8 +842,8 @@ cached_content = client.caches.get(name=cached_content.name)
810
842
 
811
843
  ```python
812
844
  response = client.models.generate_content(
813
- model="gemini-1.5-pro-002",
814
- contents="Summarize the pdfs",
845
+ model='gemini-1.5-pro-002',
846
+ contents='Summarize the pdfs',
815
847
  config=types.GenerateContentConfig(
816
848
  cached_content=cached_content.name,
817
849
  ),
@@ -831,17 +863,17 @@ tuning through `tune`.
831
863
 
832
864
  ```python
833
865
  if client.vertexai:
834
- model = "gemini-1.5-pro-002"
866
+ model = 'gemini-1.5-pro-002'
835
867
  training_dataset = types.TuningDataset(
836
- gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
868
+ gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
837
869
  )
838
870
  else:
839
- model = "models/gemini-1.0-pro-001"
871
+ model = 'models/gemini-1.0-pro-001'
840
872
  training_dataset = types.TuningDataset(
841
873
  examples=[
842
874
  types.TuningExample(
843
- text_input=f"Input text {i}",
844
- output=f"Output text {i}",
875
+ text_input=f'Input text {i}',
876
+ output=f'Output text {i}',
845
877
  )
846
878
  for i in range(5)
847
879
  ],
@@ -853,7 +885,7 @@ tuning_job = client.tunings.tune(
853
885
  base_model=model,
854
886
  training_dataset=training_dataset,
855
887
  config=types.CreateTuningJobConfig(
856
- epoch_count=1, tuned_model_display_name="test_dataset_examples model"
888
+ epoch_count=1, tuned_model_display_name='test_dataset_examples model'
857
889
  ),
858
890
  )
859
891
  print(tuning_job)
@@ -871,8 +903,8 @@ import time
871
903
 
872
904
  running_states = set(
873
905
  [
874
- "JOB_STATE_PENDING",
875
- "JOB_STATE_RUNNING",
906
+ 'JOB_STATE_PENDING',
907
+ 'JOB_STATE_RUNNING',
876
908
  ]
877
909
  )
878
910
 
@@ -887,7 +919,7 @@ while tuning_job.state in running_states:
887
919
  ```python
888
920
  response = client.models.generate_content(
889
921
  model=tuning_job.tuned_model.endpoint,
890
- contents="why is the sky blue?",
922
+ contents='why is the sky blue?',
891
923
  )
892
924
 
893
925
  print(response.text)
@@ -905,12 +937,12 @@ print(tuned_model)
905
937
  To retrieve base models, see [list base models](#list-base-models).
906
938
 
907
939
  ```python
908
- for model in client.models.list(config={"page_size": 10, "query_base": False}):
940
+ for model in client.models.list(config={'page_size': 10, 'query_base': False}):
909
941
  print(model)
910
942
  ```
911
943
 
912
944
  ```python
913
- pager = client.models.list(config={"page_size": 10, "query_base": False})
945
+ pager = client.models.list(config={'page_size': 10, 'query_base': False})
914
946
  print(pager.page_size)
915
947
  print(pager[0])
916
948
  pager.next_page()
@@ -920,12 +952,12 @@ print(pager[0])
920
952
  #### Async
921
953
 
922
954
  ```python
923
- async for job in await client.aio.models.list(config={"page_size": 10, "query_base": False}):
955
+ async for job in await client.aio.models.list(config={'page_size': 10, 'query_base': False}):
924
956
  print(job)
925
957
  ```
926
958
 
927
959
  ```python
928
- async_pager = await client.aio.models.list(config={"page_size": 10, "query_base": False})
960
+ async_pager = await client.aio.models.list(config={'page_size': 10, 'query_base': False})
929
961
  print(async_pager.page_size)
930
962
  print(async_pager[0])
931
963
  await async_pager.next_page()
@@ -940,7 +972,7 @@ model = pager[0]
940
972
  model = client.models.update(
941
973
  model=model.name,
942
974
  config=types.UpdateModelConfig(
943
- display_name="my tuned model", description="my tuned model description"
975
+ display_name='my tuned model', description='my tuned model description'
944
976
  ),
945
977
  )
946
978
 
@@ -951,12 +983,12 @@ print(model)
951
983
  ### List Tuning Jobs
952
984
 
953
985
  ```python
954
- for job in client.tunings.list(config={"page_size": 10}):
986
+ for job in client.tunings.list(config={'page_size': 10}):
955
987
  print(job)
956
988
  ```
957
989
 
958
990
  ```python
959
- pager = client.tunings.list(config={"page_size": 10})
991
+ pager = client.tunings.list(config={'page_size': 10})
960
992
  print(pager.page_size)
961
993
  print(pager[0])
962
994
  pager.next_page()
@@ -966,12 +998,12 @@ print(pager[0])
966
998
  #### Async
967
999
 
968
1000
  ```python
969
- async for job in await client.aio.tunings.list(config={"page_size": 10}):
1001
+ async for job in await client.aio.tunings.list(config={'page_size': 10}):
970
1002
  print(job)
971
1003
  ```
972
1004
 
973
1005
  ```python
974
- async_pager = await client.aio.tunings.list(config={"page_size": 10})
1006
+ async_pager = await client.aio.tunings.list(config={'page_size': 10})
975
1007
  print(async_pager.page_size)
976
1008
  print(async_pager[0])
977
1009
  await async_pager.next_page()
@@ -987,8 +1019,8 @@ Only supported in Vertex AI.
987
1019
  ```python
988
1020
  # Specify model and source file only, destination and job display name will be auto-populated
989
1021
  job = client.batches.create(
990
- model="gemini-1.5-flash-002",
991
- src="bq://my-project.my-dataset.my-table",
1022
+ model='gemini-1.5-flash-002',
1023
+ src='bq://my-project.my-dataset.my-table',
992
1024
  )
993
1025
 
994
1026
  job
@@ -1004,10 +1036,10 @@ job.state
1004
1036
  ```python
1005
1037
  completed_states = set(
1006
1038
  [
1007
- "JOB_STATE_SUCCEEDED",
1008
- "JOB_STATE_FAILED",
1009
- "JOB_STATE_CANCELLED",
1010
- "JOB_STATE_PAUSED",
1039
+ 'JOB_STATE_SUCCEEDED',
1040
+ 'JOB_STATE_FAILED',
1041
+ 'JOB_STATE_CANCELLED',
1042
+ 'JOB_STATE_PAUSED',
1011
1043
  ]
1012
1044
  )
1013
1045