google-genai 0.5.0__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. google_genai-0.7.0/PKG-INFO +1021 -0
  2. google_genai-0.7.0/README.md +994 -0
  3. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_api_client.py +234 -131
  4. google_genai-0.7.0/google/genai/_api_module.py +24 -0
  5. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_automatic_function_calling_util.py +43 -22
  6. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_common.py +37 -12
  7. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_extra_utils.py +25 -19
  8. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_replay_api_client.py +47 -35
  9. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_test_api_client.py +1 -1
  10. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/_transformers.py +301 -51
  11. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/batches.py +204 -165
  12. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/caches.py +127 -144
  13. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/chats.py +22 -18
  14. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/client.py +32 -37
  15. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/errors.py +1 -1
  16. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/files.py +333 -165
  17. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/live.py +16 -6
  18. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/models.py +601 -283
  19. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/tunings.py +91 -428
  20. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/types.py +1190 -955
  21. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/version.py +1 -1
  22. google_genai-0.7.0/google_genai.egg-info/PKG-INFO +1021 -0
  23. {google_genai-0.5.0 → google_genai-0.7.0}/google_genai.egg-info/SOURCES.txt +1 -0
  24. {google_genai-0.5.0 → google_genai-0.7.0}/google_genai.egg-info/requires.txt +0 -1
  25. {google_genai-0.5.0 → google_genai-0.7.0}/pyproject.toml +1 -2
  26. google_genai-0.5.0/PKG-INFO +0 -888
  27. google_genai-0.5.0/README.md +0 -860
  28. google_genai-0.5.0/google_genai.egg-info/PKG-INFO +0 -888
  29. {google_genai-0.5.0 → google_genai-0.7.0}/LICENSE +0 -0
  30. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/__init__.py +0 -0
  31. {google_genai-0.5.0 → google_genai-0.7.0}/google/genai/pagers.py +0 -0
  32. {google_genai-0.5.0 → google_genai-0.7.0}/google_genai.egg-info/dependency_links.txt +0 -0
  33. {google_genai-0.5.0 → google_genai-0.7.0}/google_genai.egg-info/top_level.txt +0 -0
  34. {google_genai-0.5.0 → google_genai-0.7.0}/setup.cfg +0 -0
@@ -0,0 +1,1021 @@
1
+ Metadata-Version: 2.2
2
+ Name: google-genai
3
+ Version: 0.7.0
4
+ Summary: GenAI Python SDK
5
+ Author-email: Google LLC <googleapis-packages@google.com>
6
+ License: Apache-2.0
7
+ Project-URL: Homepage, https://github.com/googleapis/python-genai
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Operating System :: OS Independent
11
+ Classifier: Programming Language :: Python
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.9
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Programming Language :: Python :: 3.13
18
+ Classifier: Topic :: Internet
19
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
20
+ Requires-Python: >=3.9
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ Requires-Dist: google-auth<3.0.0dev,>=2.14.1
24
+ Requires-Dist: pydantic<3.0.0dev,>=2.0.0
25
+ Requires-Dist: requests<3.0.0dev,>=2.28.1
26
+ Requires-Dist: websockets<15.0dev,>=13.0
27
+
28
+ # Google Gen AI SDK
29
+
30
+ [![PyPI version](https://img.shields.io/pypi/v/google-genai.svg)](https://pypi.org/project/google-genai/)
31
+
32
+ --------
33
+ **Documentation:** https://googleapis.github.io/python-genai/
34
+
35
+ -----
36
+
37
+ Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs. This is an early release. API is subject to change. Please do not use this SDK in production environments at this stage.
38
+
39
+ ## Installation
40
+
41
+ ```cmd
42
+ pip install google-genai
43
+ ```
44
+
45
+ ## Imports
46
+
47
+ ```python
48
+ from google import genai
49
+ from google.genai import types
50
+ ```
51
+
52
+ ## Create a client
53
+
54
+ Please run one of the following code blocks to create a client for
55
+ different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview)).
56
+
57
+ ```python
58
+ # Only run this block for Gemini Developer API
59
+ client = genai.Client(api_key="GEMINI_API_KEY")
60
+ ```
61
+
62
+ ```python
63
+ # Only run this block for Vertex AI API
64
+ client = genai.Client(
65
+ vertexai=True, project="your-project-id", location="us-central1"
66
+ )
67
+ ```
68
+
69
+ ## Types
70
+
71
+ Parameter types can be specified as either dictionaries(`TypedDict`) or
72
+ [Pydantic Models](https://pydantic.readthedocs.io/en/stable/model.html).
73
+ Pydantic model types are available in the `types` module.
74
+
75
+ ## Models
76
+
77
+ The `client.models` modules exposes model inferencing and model getters.
78
+
79
+ ### Generate Content
80
+
81
+ #### with text content
82
+
83
+ ```python
84
+ response = client.models.generate_content(
85
+ model="gemini-2.0-flash-exp", contents="why is the sky blue?"
86
+ )
87
+ print(response.text)
88
+ ```
89
+
90
+ #### with uploaded file (Gemini API only)
91
+ download the file in console.
92
+
93
+ ```cmd
94
+ !wget -q https://storage.googleapis.com/generativeai-downloads/data/a11.txt
95
+ ```
96
+
97
+ python code.
98
+
99
+ ```python
100
+ file = client.files.upload(path="a11.text")
101
+ response = client.models.generate_content(
102
+ model="gemini-2.0-flash-exp", contents=["Summarize this file", file]
103
+ )
104
+ print(response.text)
105
+ ```
106
+
107
+ ### System Instructions and Other Configs
108
+
109
+ ```python
110
+ response = client.models.generate_content(
111
+ model="gemini-2.0-flash-exp",
112
+ contents="high",
113
+ config=types.GenerateContentConfig(
114
+ system_instruction="I say high, you say low",
115
+ temperature=0.3,
116
+ ),
117
+ )
118
+ print(response.text)
119
+ ```
120
+
121
+ ### Typed Config
122
+
123
+ All API methods support Pydantic types for parameters as well as
124
+ dictionaries. You can get the type from `google.genai.types`.
125
+
126
+ ```python
127
+ response = client.models.generate_content(
128
+ model="gemini-2.0-flash-exp",
129
+ contents=types.Part.from_text(text="Why is the sky blue?"),
130
+ config=types.GenerateContentConfig(
131
+ temperature=0,
132
+ top_p=0.95,
133
+ top_k=20,
134
+ candidate_count=1,
135
+ seed=5,
136
+ max_output_tokens=100,
137
+ stop_sequences=["STOP!"],
138
+ presence_penalty=0.0,
139
+ frequency_penalty=0.0,
140
+ ),
141
+ )
142
+
143
+ print(response.text)
144
+ ```
145
+
146
+ ### Thinking
147
+
148
+ The Gemini 2.0 Flash Thinking model is an experimental model that could return
149
+ "thoughts" as part of its response.
150
+
151
+ #### Gemini Developer API
152
+
153
+ Thinking config is only available in v1alpha for Gemini AI API.
154
+
155
+ ```python
156
+ response = client.models.generate_content(
157
+ model='gemini-2.0-flash-thinking-exp',
158
+ contents='What is the sum of natural numbers from 1 to 100?',
159
+ config=types.GenerateContentConfig(
160
+ thinking_config=types.ThinkingConfig(include_thoughts=True),
161
+ http_options=types.HttpOptions(api_version='v1alpha'),
162
+ )
163
+ )
164
+ for part in response.candidates[0].content.parts:
165
+ print(part)
166
+ ```
167
+
168
+ #### Vertex AI API
169
+
170
+ ```python
171
+ response = client.models.generate_content(
172
+ model='gemini-2.0-flash-thinking-exp-01-21',
173
+ contents='What is the sum of natural numbers from 1 to 100?',
174
+ config=types.GenerateContentConfig(
175
+ thinking_config=types.ThinkingConfig(include_thoughts=True),
176
+ )
177
+ )
178
+ for part in response.candidates[0].content.parts:
179
+ print(part)
180
+ ```
181
+
182
+ ### List Base Models
183
+
184
+ To retrieve tuned models, see [list tuned models](#list-tuned-models).
185
+
186
+ ```python
187
+ for model in client.models.list(config={'query_base':True}):
188
+ print(model)
189
+ ```
190
+
191
+ ```python
192
+ pager = client.models.list(config={"page_size": 10, 'query_base':True})
193
+ print(pager.page_size)
194
+ print(pager[0])
195
+ pager.next_page()
196
+ print(pager[0])
197
+ ```
198
+
199
+ #### Async
200
+
201
+ ```python
202
+ async for job in await client.aio.models.list(config={'query_base':True}):
203
+ print(job)
204
+ ```
205
+
206
+ ```python
207
+ async_pager = await client.aio.models.list(config={"page_size": 10, 'query_base':True})
208
+ print(async_pager.page_size)
209
+ print(async_pager[0])
210
+ await async_pager.next_page()
211
+ print(async_pager[0])
212
+ ```
213
+
214
+ ### Safety Settings
215
+
216
+ ```python
217
+ response = client.models.generate_content(
218
+ model="gemini-2.0-flash-exp",
219
+ contents="Say something bad.",
220
+ config=types.GenerateContentConfig(
221
+ safety_settings=[
222
+ types.SafetySetting(
223
+ category="HARM_CATEGORY_HATE_SPEECH",
224
+ threshold="BLOCK_ONLY_HIGH",
225
+ )
226
+ ]
227
+ ),
228
+ )
229
+ print(response.text)
230
+ ```
231
+
232
+ ### Function Calling
233
+
234
+ #### Automatic Python function Support
235
+
236
+ You can pass a Python function directly and it will be automatically
237
+ called and responded.
238
+
239
+ ```python
240
+ def get_current_weather(location: str) -> str:
241
+ """Returns the current weather.
242
+
243
+ Args:
244
+ location: The city and state, e.g. San Francisco, CA
245
+ """
246
+ return "sunny"
247
+
248
+
249
+ response = client.models.generate_content(
250
+ model="gemini-2.0-flash-exp",
251
+ contents="What is the weather like in Boston?",
252
+ config=types.GenerateContentConfig(tools=[get_current_weather]),
253
+ )
254
+
255
+ print(response.text)
256
+ ```
257
+
258
+ #### Manually declare and invoke a function for function calling
259
+
260
+ If you don't want to use the automatic function support, you can manually
261
+ declare the function and invoke it.
262
+
263
+ The following example shows how to declare a function and pass it as a tool.
264
+ Then you will receive a function call part in the response.
265
+
266
+ ```python
267
+ function = types.FunctionDeclaration(
268
+ name="get_current_weather",
269
+ description="Get the current weather in a given location",
270
+ parameters=types.Schema(
271
+ type="OBJECT",
272
+ properties={
273
+ "location": types.Schema(
274
+ type="STRING",
275
+ description="The city and state, e.g. San Francisco, CA",
276
+ ),
277
+ },
278
+ required=["location"],
279
+ ),
280
+ )
281
+
282
+ tool = types.Tool(function_declarations=[function])
283
+
284
+ response = client.models.generate_content(
285
+ model="gemini-2.0-flash-exp",
286
+ contents="What is the weather like in Boston?",
287
+ config=types.GenerateContentConfig(tools=[tool]),
288
+ )
289
+
290
+ print(response.function_calls[0])
291
+ ```
292
+
293
+ After you receive the function call part from the model, you can invoke the function
294
+ and get the function response. And then you can pass the function response to
295
+ the model.
296
+ The following example shows how to do it for a simple function invocation.
297
+
298
+ ```python
299
+ user_prompt_content = types.Content(
300
+ role="user",
301
+ parts=[types.Part.from_text(text="What is the weather like in Boston?")],
302
+ )
303
+ function_call_part = response.function_calls[0]
304
+
305
+
306
+ try:
307
+ function_result = get_current_weather(
308
+ **function_call_part.function_call.args
309
+ )
310
+ function_response = {"result": function_result}
311
+ except (
312
+ Exception
313
+ ) as e: # instead of raising the exception, you can let the model handle it
314
+ function_response = {"error": str(e)}
315
+
316
+
317
+ function_response_part = types.Part.from_function_response(
318
+ name=function_call_part.function_call.name,
319
+ response=function_response,
320
+ )
321
+ function_response_content = types.Content(
322
+ role="tool", parts=[function_response_part]
323
+ )
324
+
325
+ response = client.models.generate_content(
326
+ model="gemini-2.0-flash-exp",
327
+ contents=[
328
+ user_prompt_content,
329
+ function_call_content,
330
+ function_response_content,
331
+ ],
332
+ config=types.GenerateContentConfig(
333
+ tools=[tool],
334
+ ),
335
+ )
336
+
337
+ print(response.text)
338
+ ```
339
+
340
+ ### JSON Response Schema
341
+
342
+ #### Pydantic Model Schema support
343
+
344
+ Schemas can be provided as Pydantic Models.
345
+
346
+ ```python
347
+ from pydantic import BaseModel
348
+
349
+
350
+ class CountryInfo(BaseModel):
351
+ name: str
352
+ population: int
353
+ capital: str
354
+ continent: str
355
+ gdp: int
356
+ official_language: str
357
+ total_area_sq_mi: int
358
+
359
+
360
+ response = client.models.generate_content(
361
+ model="gemini-2.0-flash-exp",
362
+ contents="Give me information for the United States.",
363
+ config=types.GenerateContentConfig(
364
+ response_mime_type="application/json",
365
+ response_schema=CountryInfo,
366
+ ),
367
+ )
368
+ print(response.text)
369
+ ```
370
+
371
+ ```python
372
+ response = client.models.generate_content(
373
+ model="gemini-2.0-flash-exp",
374
+ contents="Give me information for the United States.",
375
+ config=types.GenerateContentConfig(
376
+ response_mime_type="application/json",
377
+ response_schema={
378
+ "required": [
379
+ "name",
380
+ "population",
381
+ "capital",
382
+ "continent",
383
+ "gdp",
384
+ "official_language",
385
+ "total_area_sq_mi",
386
+ ],
387
+ "properties": {
388
+ "name": {"type": "STRING"},
389
+ "population": {"type": "INTEGER"},
390
+ "capital": {"type": "STRING"},
391
+ "continent": {"type": "STRING"},
392
+ "gdp": {"type": "INTEGER"},
393
+ "official_language": {"type": "STRING"},
394
+ "total_area_sq_mi": {"type": "INTEGER"},
395
+ },
396
+ "type": "OBJECT",
397
+ },
398
+ ),
399
+ )
400
+ print(response.text)
401
+ ```
402
+
403
+ ### Enum Response Schema
404
+
405
+ #### Text Response
406
+
407
+ You can set response_mime_type to 'text/x.enum' to return one of those enum
408
+ values as the response.
409
+
410
+ ```python
411
+ class InstrumentEnum(Enum):
412
+ PERCUSSION = 'Percussion'
413
+ STRING = 'String'
414
+ WOODWIND = 'Woodwind'
415
+ BRASS = 'Brass'
416
+ KEYBOARD = 'Keyboard'
417
+
418
+ response = client.models.generate_content(
419
+ model='gemini-2.0-flash-exp',
420
+ contents='What instrument plays multiple notes at once?',
421
+ config={
422
+ 'response_mime_type': 'text/x.enum',
423
+ 'response_schema': InstrumentEnum,
424
+ },
425
+ )
426
+ print(response.text)
427
+ ```
428
+
429
+ #### JSON Response
430
+
431
+ You can also set response_mime_type to 'application/json', the response will be identical but in quotes.
432
+
433
+ ```python
434
+ from enum import Enum
435
+
436
+ class InstrumentEnum(Enum):
437
+ PERCUSSION = 'Percussion'
438
+ STRING = 'String'
439
+ WOODWIND = 'Woodwind'
440
+ BRASS = 'Brass'
441
+ KEYBOARD = 'Keyboard'
442
+
443
+ response = client.models.generate_content(
444
+ model='gemini-2.0-flash-exp',
445
+ contents='What instrument plays multiple notes at once?',
446
+ config={
447
+ 'response_mime_type': 'application/json',
448
+ 'response_schema': InstrumentEnum,
449
+ },
450
+ )
451
+ print(response.text)
452
+ ```
453
+
454
+ ### Streaming
455
+
456
+ #### Streaming for text content
457
+
458
+ ```python
459
+ for chunk in client.models.generate_content_stream(
460
+ model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
461
+ ):
462
+ print(chunk.text, end="")
463
+ ```
464
+
465
+ #### Streaming for image content
466
+
467
+ If your image is stored in [Google Cloud Storage](https://cloud.google.com/storage),
468
+ you can use the `from_uri` class method to create a `Part` object.
469
+
470
+ ```python
471
+ for chunk in client.models.generate_content_stream(
472
+ model="gemini-2.0-flash-exp",
473
+ contents=[
474
+ "What is this image about?",
475
+ types.Part.from_uri(
476
+ file_uri="gs://generativeai-downloads/images/scones.jpg",
477
+ mime_type="image/jpeg",
478
+ ),
479
+ ],
480
+ ):
481
+ print(chunk.text, end="")
482
+ ```
483
+
484
+ If your image is stored in your local file system, you can read it in as bytes
485
+ data and use the `from_bytes` class method to create a `Part` object.
486
+
487
+ ```python
488
+ YOUR_IMAGE_PATH = "your_image_path"
489
+ YOUR_IMAGE_MIME_TYPE = "your_image_mime_type"
490
+ with open(YOUR_IMAGE_PATH, "rb") as f:
491
+ image_bytes = f.read()
492
+
493
+ for chunk in client.models.generate_content_stream(
494
+ model="gemini-2.0-flash-exp",
495
+ contents=[
496
+ "What is this image about?",
497
+ types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
498
+ ],
499
+ ):
500
+ print(chunk.text, end="")
501
+ ```
502
+
503
+ ### Async
504
+
505
+ `client.aio` exposes all the analogous [`async` methods](https://docs.python.org/3/library/asyncio.html)
506
+ that are available on `client`
507
+
508
+ For example, `client.aio.models.generate_content` is the `async` version
509
+ of `client.models.generate_content`
510
+
511
+ ```python
512
+ response = await client.aio.models.generate_content(
513
+ model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
514
+ )
515
+
516
+ print(response.text)
517
+ ```
518
+
519
+ ### Streaming
520
+
521
+ ```python
522
+ async for chunk in await client.aio.models.generate_content_stream(
523
+ model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
524
+ ):
525
+ print(chunk.text, end="")
526
+ ```
527
+
528
+ ### Count Tokens and Compute Tokens
529
+
530
+ ```python
531
+ response = client.models.count_tokens(
532
+ model="gemini-2.0-flash-exp",
533
+ contents="why is the sky blue?",
534
+ )
535
+ print(response)
536
+ ```
537
+
538
+ #### Compute Tokens
539
+
540
+ Compute tokens is only supported in Vertex AI.
541
+
542
+ ```python
543
+ response = client.models.compute_tokens(
544
+ model="gemini-2.0-flash-exp",
545
+ contents="why is the sky blue?",
546
+ )
547
+ print(response)
548
+ ```
549
+
550
+ ##### Async
551
+
552
+ ```python
553
+ response = await client.aio.models.count_tokens(
554
+ model="gemini-2.0-flash-exp",
555
+ contents="why is the sky blue?",
556
+ )
557
+ print(response)
558
+ ```
559
+
560
+ ### Embed Content
561
+
562
+ ```python
563
+ response = client.models.embed_content(
564
+ model="text-embedding-004",
565
+ contents="why is the sky blue?",
566
+ )
567
+ print(response)
568
+ ```
569
+
570
+ ```python
571
+ # multiple contents with config
572
+ response = client.models.embed_content(
573
+ model="text-embedding-004",
574
+ contents=["why is the sky blue?", "What is your age?"],
575
+ config=types.EmbedContentConfig(output_dimensionality=10),
576
+ )
577
+
578
+ print(response)
579
+ ```
580
+
581
+ ### Imagen
582
+
583
+ #### Generate Images
584
+
585
+ Support for generate images in Gemini Developer API is behind an allowlist
586
+
587
+ ```python
588
+ # Generate Image
589
+ response1 = client.models.generate_images(
590
+ model="imagen-3.0-generate-002",
591
+ prompt="An umbrella in the foreground, and a rainy night sky in the background",
592
+ config=types.GenerateImagesConfig(
593
+ negative_prompt="human",
594
+ number_of_images=1,
595
+ include_rai_reason=True,
596
+ output_mime_type="image/jpeg",
597
+ ),
598
+ )
599
+ response1.generated_images[0].image.show()
600
+ ```
601
+
602
+ #### Upscale Image
603
+
604
+ Upscale image is only supported in Vertex AI.
605
+
606
+ ```python
607
+ # Upscale the generated image from above
608
+ response2 = client.models.upscale_image(
609
+ model="imagen-3.0-generate-001",
610
+ image=response1.generated_images[0].image,
611
+ upscale_factor="x2",
612
+ config=types.UpscaleImageConfig(
613
+ include_rai_reason=True,
614
+ output_mime_type="image/jpeg",
615
+ ),
616
+ )
617
+ response2.generated_images[0].image.show()
618
+ ```
619
+
620
+ #### Edit Image
621
+
622
+ Edit image uses a separate model from generate and upscale.
623
+
624
+ Edit image is only supported in Vertex AI.
625
+
626
+ ```python
627
+ # Edit the generated image from above
628
+ from google.genai.types import RawReferenceImage, MaskReferenceImage
629
+
630
+ raw_ref_image = RawReferenceImage(
631
+ reference_id=1,
632
+ reference_image=response1.generated_images[0].image,
633
+ )
634
+
635
+ # Model computes a mask of the background
636
+ mask_ref_image = MaskReferenceImage(
637
+ reference_id=2,
638
+ config=types.MaskReferenceConfig(
639
+ mask_mode="MASK_MODE_BACKGROUND",
640
+ mask_dilation=0,
641
+ ),
642
+ )
643
+
644
+ response3 = client.models.edit_image(
645
+ model="imagen-3.0-capability-001",
646
+ prompt="Sunlight and clear sky",
647
+ reference_images=[raw_ref_image, mask_ref_image],
648
+ config=types.EditImageConfig(
649
+ edit_mode="EDIT_MODE_INPAINT_INSERTION",
650
+ number_of_images=1,
651
+ negative_prompt="human",
652
+ include_rai_reason=True,
653
+ output_mime_type="image/jpeg",
654
+ ),
655
+ )
656
+ response3.generated_images[0].image.show()
657
+ ```
658
+
659
+ ## Chats
660
+
661
+ Create a chat session to start a multi-turn conversations with the model.
662
+
663
+ ### Send Message
664
+
665
+ ```python
666
+ chat = client.chats.create(model="gemini-2.0-flash-exp")
667
+ response = chat.send_message("tell me a story")
668
+ print(response.text)
669
+ ```
670
+
671
+ ### Streaming
672
+
673
+ ```python
674
+ chat = client.chats.create(model="gemini-2.0-flash-exp")
675
+ for chunk in chat.send_message_stream("tell me a story"):
676
+ print(chunk.text)
677
+ ```
678
+
679
+ ### Async
680
+
681
+ ```python
682
+ chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
683
+ response = await chat.send_message("tell me a story")
684
+ print(response.text)
685
+ ```
686
+
687
+ ### Async Streaming
688
+
689
+ ```python
690
+ chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
691
+ async for chunk in await chat.send_message_stream("tell me a story"):
692
+ print(chunk.text)
693
+ ```
694
+
695
+ ## Files
696
+
697
+ Files are only supported in Gemini Developer API.
698
+
699
+ ```cmd
700
+ !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf .
701
+ !gsutil cp gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf .
702
+ ```
703
+
704
+ ### Upload
705
+
706
+ ```python
707
+ file1 = client.files.upload(path="2312.11805v3.pdf")
708
+ file2 = client.files.upload(path="2403.05530.pdf")
709
+
710
+ print(file1)
711
+ print(file2)
712
+ ```
713
+
714
+ ### Delete
715
+
716
+ ```python
717
+ file3 = client.files.upload(path="2312.11805v3.pdf")
718
+
719
+ client.files.delete(name=file3.name)
720
+ ```
721
+
722
+ ## Caches
723
+
724
+ `client.caches` contains the control plane APIs for cached content
725
+
726
+ ### Create
727
+
728
+ ```python
729
+ if client.vertexai:
730
+ file_uris = [
731
+ "gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf",
732
+ "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
733
+ ]
734
+ else:
735
+ file_uris = [file1.uri, file2.uri]
736
+
737
+ cached_content = client.caches.create(
738
+ model="gemini-1.5-pro-002",
739
+ config=types.CreateCachedContentConfig(
740
+ contents=[
741
+ types.Content(
742
+ role="user",
743
+ parts=[
744
+ types.Part.from_uri(
745
+ file_uri=file_uris[0], mime_type="application/pdf"
746
+ ),
747
+ types.Part.from_uri(
748
+ file_uri=file_uris[1],
749
+ mime_type="application/pdf",
750
+ ),
751
+ ],
752
+ )
753
+ ],
754
+ system_instruction="What is the sum of the two pdfs?",
755
+ display_name="test cache",
756
+ ttl="3600s",
757
+ ),
758
+ )
759
+ ```
760
+
761
+ ### Get
762
+
763
+ ```python
764
+ cached_content = client.caches.get(name=cached_content.name)
765
+ ```
766
+
767
+ ### Generate Content with Caches
768
+
769
+ ```python
770
+ response = client.models.generate_content(
771
+ model="gemini-1.5-pro-002",
772
+ contents="Summarize the pdfs",
773
+ config=types.GenerateContentConfig(
774
+ cached_content=cached_content.name,
775
+ ),
776
+ )
777
+ print(response.text)
778
+ ```
779
+
780
+ ## Tunings
781
+
782
+ `client.tunings` contains tuning job APIs and supports supervised fine
783
+ tuning through `tune`.
784
+
785
+ ### Tune
786
+
787
+ - Vertex AI supports tuning from GCS source
788
+ - Gemini Developer API supports tuning from inline examples
789
+
790
+ ```python
791
+ if client.vertexai:
792
+ model = "gemini-1.5-pro-002"
793
+ training_dataset = types.TuningDataset(
794
+ gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
795
+ )
796
+ else:
797
+ model = "models/gemini-1.0-pro-001"
798
+ training_dataset = types.TuningDataset(
799
+ examples=[
800
+ types.TuningExample(
801
+ text_input=f"Input text {i}",
802
+ output=f"Output text {i}",
803
+ )
804
+ for i in range(5)
805
+ ],
806
+ )
807
+ ```
808
+
809
+ ```python
810
+ tuning_job = client.tunings.tune(
811
+ base_model=model,
812
+ training_dataset=training_dataset,
813
+ config=types.CreateTuningJobConfig(
814
+ epoch_count=1, tuned_model_display_name="test_dataset_examples model"
815
+ ),
816
+ )
817
+ print(tuning_job)
818
+ ```
819
+
820
+ ### Get Tuning Job
821
+
822
+ ```python
823
+ tuning_job = client.tunings.get(name=tuning_job.name)
824
+ print(tuning_job)
825
+ ```
826
+
827
+ ```python
828
+ import time
829
+
830
+ running_states = set(
831
+ [
832
+ "JOB_STATE_PENDING",
833
+ "JOB_STATE_RUNNING",
834
+ ]
835
+ )
836
+
837
+ while tuning_job.state in running_states:
838
+ print(tuning_job.state)
839
+ tuning_job = client.tunings.get(name=tuning_job.name)
840
+ time.sleep(10)
841
+ ```
842
+
843
+ #### Use Tuned Model
844
+
845
+ ```python
846
+ response = client.models.generate_content(
847
+ model=tuning_job.tuned_model.endpoint,
848
+ contents="why is the sky blue?",
849
+ )
850
+
851
+ print(response.text)
852
+ ```
853
+
854
+ ### Get Tuned Model
855
+
856
+ ```python
857
+ tuned_model = client.models.get(model=tuning_job.tuned_model.model)
858
+ print(tuned_model)
859
+ ```
860
+
861
+ ### List Tuned Models
862
+
863
+ To retrieve base models, see [list base models](#list-base-models).
864
+
865
+ ```python
866
+ for model in client.models.list(config={"page_size": 10}):
867
+ print(model)
868
+ ```
869
+
870
+ ```python
871
+ pager = client.models.list(config={"page_size": 10})
872
+ print(pager.page_size)
873
+ print(pager[0])
874
+ pager.next_page()
875
+ print(pager[0])
876
+ ```
877
+
878
+ #### Async
879
+
880
+ ```python
881
+ async for job in await client.aio.models.list(config={"page_size": 10}):
882
+ print(job)
883
+ ```
884
+
885
+ ```python
886
+ async_pager = await client.aio.models.list(config={"page_size": 10})
887
+ print(async_pager.page_size)
888
+ print(async_pager[0])
889
+ await async_pager.next_page()
890
+ print(async_pager[0])
891
+ ```
892
+
893
+ ### Update Tuned Model
894
+
895
+ ```python
896
+ model = pager[0]
897
+
898
+ model = client.models.update(
899
+ model=model.name,
900
+ config=types.UpdateModelConfig(
901
+ display_name="my tuned model", description="my tuned model description"
902
+ ),
903
+ )
904
+
905
+ print(model)
906
+ ```
907
+
908
+
909
+ ### List Tuning Jobs
910
+
911
+ ```python
912
+ for job in client.tunings.list(config={"page_size": 10}):
913
+ print(job)
914
+ ```
915
+
916
+ ```python
917
+ pager = client.tunings.list(config={"page_size": 10})
918
+ print(pager.page_size)
919
+ print(pager[0])
920
+ pager.next_page()
921
+ print(pager[0])
922
+ ```
923
+
924
+ #### Async
925
+
926
+ ```python
927
+ async for job in await client.aio.tunings.list(config={"page_size": 10}):
928
+ print(job)
929
+ ```
930
+
931
+ ```python
932
+ async_pager = await client.aio.tunings.list(config={"page_size": 10})
933
+ print(async_pager.page_size)
934
+ print(async_pager[0])
935
+ await async_pager.next_page()
936
+ print(async_pager[0])
937
+ ```
938
+
939
+ ## Batch Prediction
940
+
941
+ Only supported in Vertex AI.
942
+
943
+ ### Create
944
+
945
+ ```python
946
+ # Specify model and source file only, destination and job display name will be auto-populated
947
+ job = client.batches.create(
948
+ model="gemini-1.5-flash-002",
949
+ src="bq://my-project.my-dataset.my-table",
950
+ )
951
+
952
+ job
953
+ ```
954
+
955
+ ```python
956
+ # Get a job by name
957
+ job = client.batches.get(name=job.name)
958
+
959
+ job.state
960
+ ```
961
+
962
+ ```python
963
+ completed_states = set(
964
+ [
965
+ "JOB_STATE_SUCCEEDED",
966
+ "JOB_STATE_FAILED",
967
+ "JOB_STATE_CANCELLED",
968
+ "JOB_STATE_PAUSED",
969
+ ]
970
+ )
971
+
972
+ while job.state not in completed_states:
973
+ print(job.state)
974
+ job = client.batches.get(name=job.name)
975
+ time.sleep(30)
976
+
977
+ job
978
+ ```
979
+
980
+ ### List
981
+
982
+ ```python
983
+ for job in client.batches.list(config=types.ListBatchJobsConfig(page_size=10)):
984
+ print(job)
985
+ ```
986
+
987
+ ```python
988
+ pager = client.batches.list(config=types.ListBatchJobsConfig(page_size=10))
989
+ print(pager.page_size)
990
+ print(pager[0])
991
+ pager.next_page()
992
+ print(pager[0])
993
+ ```
994
+
995
+ #### Async
996
+
997
+ ```python
998
+ async for job in await client.aio.batches.list(
999
+ config=types.ListBatchJobsConfig(page_size=10)
1000
+ ):
1001
+ print(job)
1002
+ ```
1003
+
1004
+ ```python
1005
+ async_pager = await client.aio.batches.list(
1006
+ config=types.ListBatchJobsConfig(page_size=10)
1007
+ )
1008
+ print(async_pager.page_size)
1009
+ print(async_pager[0])
1010
+ await async_pager.next_page()
1011
+ print(async_pager[0])
1012
+ ```
1013
+
1014
+ ### Delete
1015
+
1016
+ ```python
1017
+ # Delete the job resource
1018
+ delete_job = client.batches.delete(name=job.name)
1019
+
1020
+ delete_job
1021
+ ```