google-genai 1.0.0__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {google_genai-1.0.0 → google_genai-1.3.0}/PKG-INFO +300 -157
  2. {google_genai-1.0.0 → google_genai-1.3.0}/README.md +297 -156
  3. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_api_client.py +152 -48
  4. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_api_module.py +5 -0
  5. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_automatic_function_calling_util.py +15 -15
  6. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_common.py +14 -1
  7. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_extra_utils.py +9 -4
  8. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_replay_api_client.py +32 -1
  9. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_transformers.py +71 -13
  10. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/batches.py +6 -3
  11. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/caches.py +13 -10
  12. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/chats.py +24 -8
  13. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/client.py +22 -11
  14. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/errors.py +18 -3
  15. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/files.py +8 -5
  16. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/live.py +64 -41
  17. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/models.py +679 -95
  18. google_genai-1.0.0/google/genai/_operations.py → google_genai-1.3.0/google/genai/operations.py +260 -20
  19. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/tunings.py +18 -78
  20. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/types.py +494 -15
  21. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/version.py +1 -1
  22. {google_genai-1.0.0 → google_genai-1.3.0}/google_genai.egg-info/PKG-INFO +300 -157
  23. {google_genai-1.0.0 → google_genai-1.3.0}/google_genai.egg-info/SOURCES.txt +1 -1
  24. {google_genai-1.0.0 → google_genai-1.3.0}/google_genai.egg-info/requires.txt +2 -0
  25. {google_genai-1.0.0 → google_genai-1.3.0}/pyproject.toml +3 -1
  26. {google_genai-1.0.0 → google_genai-1.3.0}/LICENSE +0 -0
  27. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/__init__.py +0 -0
  28. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/_test_api_client.py +0 -0
  29. {google_genai-1.0.0 → google_genai-1.3.0}/google/genai/pagers.py +0 -0
  30. {google_genai-1.0.0 → google_genai-1.3.0}/google_genai.egg-info/dependency_links.txt +0 -0
  31. {google_genai-1.0.0 → google_genai-1.3.0}/google_genai.egg-info/top_level.txt +0 -0
  32. {google_genai-1.0.0 → google_genai-1.3.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: google-genai
3
- Version: 1.0.0
3
+ Version: 1.3.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -21,9 +21,11 @@ Requires-Python: >=3.9
21
21
  Description-Content-Type: text/markdown
22
22
  License-File: LICENSE
23
23
  Requires-Dist: google-auth<3.0.0dev,>=2.14.1
24
+ Requires-Dist: httpx<1.0.0dev,>=0.28.1
24
25
  Requires-Dist: pydantic<3.0.0dev,>=2.0.0
25
26
  Requires-Dist: requests<3.0.0dev,>=2.28.1
26
27
  Requires-Dist: websockets<15.0dev,>=13.0
28
+ Requires-Dist: typing-extensions<5.0.0dev,>=4.11.0
27
29
 
28
30
  # Google Gen AI SDK
29
31
 
@@ -56,31 +58,66 @@ different services ([Gemini Developer API](https://ai.google.dev/gemini-api/docs
56
58
 
57
59
  ```python
58
60
  # Only run this block for Gemini Developer API
59
- client = genai.Client(api_key="GEMINI_API_KEY")
61
+ client = genai.Client(api_key='GEMINI_API_KEY')
60
62
  ```
61
63
 
62
64
  ```python
63
65
  # Only run this block for Vertex AI API
64
66
  client = genai.Client(
65
- vertexai=True, project="your-project-id", location="us-central1"
67
+ vertexai=True, project='your-project-id', location='us-central1'
66
68
  )
67
69
  ```
68
70
 
71
+ **(Optional) Using environment variables:**
72
+
73
+ You can create a client by configuring the necessary environment variables.
74
+ Configuration setup instructions depends on whether you're using the Gemini API
75
+ on Vertex AI or the ML Dev Gemini API.
76
+
77
+ **ML Dev Gemini API:** Set `GOOGLE_API_KEY` as shown below:
78
+
79
+ ```bash
80
+ export GOOGLE_API_KEY='your-api-key'
81
+ ```
82
+
83
+ **Vertex AI API:** Set `GOOGLE_GENAI_USE_VERTEXAI`, `GOOGLE_CLOUD_PROJECT`
84
+ and `GOOGLE_CLOUD_LOCATION`, as shown below:
85
+
86
+ ```bash
87
+ export GOOGLE_GENAI_USE_VERTEXAI=true
88
+ export GOOGLE_CLOUD_PROJECT='your-project-id'
89
+ export GOOGLE_CLOUD_LOCATION='us-central1'
90
+ ```
91
+
92
+ ```python
93
+ client = genai.Client()
94
+ ```
95
+
96
+ ### API Selection
97
+
98
+ By default, the SDK uses the beta API endpoints provided by Google to support
99
+ preview features in the APIs. The stable API endpoints can be selected by
100
+ setting the API version to `v1`.
101
+
69
102
  To set the API version use `http_options`. For example, to set the API version
70
103
  to `v1` for Vertex AI:
71
104
 
72
105
  ```python
73
106
  client = genai.Client(
74
- vertexai=True, project='your-project-id', location='us-central1',
75
- http_options={'api_version': 'v1'}
107
+ vertexai=True,
108
+ project='your-project-id',
109
+ location='us-central1',
110
+ http_options=types.HttpOptions(api_version='v1')
76
111
  )
77
112
  ```
78
113
 
79
- To set the API version to `v1alpha` for the Gemini API:
114
+ To set the API version to `v1alpha` for the Gemini Developer API:
80
115
 
81
116
  ```python
82
- client = genai.Client(api_key='GEMINI_API_KEY',
83
- http_options={'api_version': 'v1alpha'})
117
+ client = genai.Client(
118
+ api_key='GEMINI_API_KEY',
119
+ http_options=types.HttpOptions(api_version='v1alpha')
120
+ )
84
121
  ```
85
122
 
86
123
  ## Types
@@ -99,7 +136,7 @@ The `client.models` modules exposes model inferencing and model getters.
99
136
 
100
137
  ```python
101
138
  response = client.models.generate_content(
102
- model="gemini-2.0-flash-exp", contents="why is the sky blue?"
139
+ model='gemini-2.0-flash-001', contents='Why is the sky blue?'
103
140
  )
104
141
  print(response.text)
105
142
  ```
@@ -107,29 +144,60 @@ print(response.text)
107
144
  #### with uploaded file (Gemini API only)
108
145
  download the file in console.
109
146
 
110
- ```cmd
147
+ ```sh
111
148
  !wget -q https://storage.googleapis.com/generativeai-downloads/data/a11.txt
112
149
  ```
113
150
 
114
151
  python code.
115
152
 
116
153
  ```python
117
- file = client.files.upload(path="a11.txt")
154
+ file = client.files.upload(file='a11.txt')
118
155
  response = client.models.generate_content(
119
- model="gemini-2.0-flash-exp",
120
- contents=["Could you summarize this file?", file]
156
+ model='gemini-2.0-flash-001',
157
+ contents=['Could you summarize this file?', file]
121
158
  )
122
159
  print(response.text)
123
160
  ```
124
161
 
162
+ #### How to structure `contents`
163
+ There are several ways to structure the `contents` in your request.
164
+
165
+ Provide a single string as shown in the text example above:
166
+
167
+ ```python
168
+ contents='Can you recommend some things to do in Boston and New York in the winter?'
169
+ ```
170
+
171
+ Provide a single `Content` instance with multiple `Part` instances:
172
+
173
+ ```python
174
+ contents=types.Content(parts=[
175
+ types.Part.from_text(text='Can you recommend some things to do in Boston in the winter?'),
176
+ types.Part.from_text(text='Can you recommend some things to do in New York in the winter?')
177
+ ], role='user')
178
+ ```
179
+
180
+ When sending more than one input type, provide a list with multiple `Content`
181
+ instances:
182
+
183
+ ```python
184
+ contents=[
185
+ 'What is this a picture of?',
186
+ types.Part.from_uri(
187
+ file_uri='gs://generativeai-downloads/images/scones.jpg',
188
+ mime_type='image/jpeg',
189
+ ),
190
+ ],
191
+ ```
192
+
125
193
  ### System Instructions and Other Configs
126
194
 
127
195
  ```python
128
196
  response = client.models.generate_content(
129
- model="gemini-2.0-flash-exp",
130
- contents="high",
197
+ model='gemini-2.0-flash-001',
198
+ contents='high',
131
199
  config=types.GenerateContentConfig(
132
- system_instruction="I say high, you say low",
200
+ system_instruction='I say high, you say low',
133
201
  temperature=0.3,
134
202
  ),
135
203
  )
@@ -143,8 +211,8 @@ dictionaries. You can get the type from `google.genai.types`.
143
211
 
144
212
  ```python
145
213
  response = client.models.generate_content(
146
- model="gemini-2.0-flash-exp",
147
- contents=types.Part.from_text(text="Why is the sky blue?"),
214
+ model='gemini-2.0-flash-001',
215
+ contents=types.Part.from_text(text='Why is the sky blue?'),
148
216
  config=types.GenerateContentConfig(
149
217
  temperature=0,
150
218
  top_p=0.95,
@@ -152,7 +220,7 @@ response = client.models.generate_content(
152
220
  candidate_count=1,
153
221
  seed=5,
154
222
  max_output_tokens=100,
155
- stop_sequences=["STOP!"],
223
+ stop_sequences=['STOP!'],
156
224
  presence_penalty=0.0,
157
225
  frequency_penalty=0.0,
158
226
  ),
@@ -171,7 +239,7 @@ for model in client.models.list():
171
239
  ```
172
240
 
173
241
  ```python
174
- pager = client.models.list(config={"page_size": 10})
242
+ pager = client.models.list(config={'page_size': 10})
175
243
  print(pager.page_size)
176
244
  print(pager[0])
177
245
  pager.next_page()
@@ -186,7 +254,7 @@ async for job in await client.aio.models.list():
186
254
  ```
187
255
 
188
256
  ```python
189
- async_pager = await client.aio.models.list(config={"page_size": 10})
257
+ async_pager = await client.aio.models.list(config={'page_size': 10})
190
258
  print(async_pager.page_size)
191
259
  print(async_pager[0])
192
260
  await async_pager.next_page()
@@ -197,13 +265,13 @@ print(async_pager[0])
197
265
 
198
266
  ```python
199
267
  response = client.models.generate_content(
200
- model="gemini-2.0-flash-exp",
201
- contents="Say something bad.",
268
+ model='gemini-2.0-flash-001',
269
+ contents='Say something bad.',
202
270
  config=types.GenerateContentConfig(
203
271
  safety_settings=[
204
272
  types.SafetySetting(
205
- category="HARM_CATEGORY_HATE_SPEECH",
206
- threshold="BLOCK_ONLY_HIGH",
273
+ category='HARM_CATEGORY_HATE_SPEECH',
274
+ threshold='BLOCK_ONLY_HIGH',
207
275
  )
208
276
  ]
209
277
  ),
@@ -216,7 +284,7 @@ print(response.text)
216
284
  #### Automatic Python function Support
217
285
 
218
286
  You can pass a Python function directly and it will be automatically
219
- called and responded.
287
+ called and responded by default.
220
288
 
221
289
  ```python
222
290
  def get_current_weather(location: str) -> str:
@@ -225,17 +293,41 @@ def get_current_weather(location: str) -> str:
225
293
  Args:
226
294
  location: The city and state, e.g. San Francisco, CA
227
295
  """
228
- return "sunny"
296
+ return 'sunny'
229
297
 
230
298
 
231
299
  response = client.models.generate_content(
232
- model="gemini-2.0-flash-exp",
233
- contents="What is the weather like in Boston?",
300
+ model='gemini-2.0-flash-001',
301
+ contents='What is the weather like in Boston?',
234
302
  config=types.GenerateContentConfig(tools=[get_current_weather]),
235
303
  )
236
304
 
237
305
  print(response.text)
238
306
  ```
307
+ #### Disabling automatic function calling
308
+ If you pass in a python function as a tool directly, and do not want
309
+ automatic function calling, you can disable automatic function calling
310
+ as follows:
311
+
312
+ ```python
313
+ response = client.models.generate_content(
314
+ model='gemini-2.0-flash-001',
315
+ contents='What is the weather like in Boston?',
316
+ config=types.GenerateContentConfig(
317
+ tools=[get_current_weather],
318
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
319
+ disable=True
320
+ ),
321
+ ),
322
+ )
323
+ ```
324
+
325
+ With automatic function calling disabled, you will get a list of function call
326
+ parts in the response:
327
+
328
+ ```python
329
+ function_calls: Optional[List[types.FunctionCall]] = response.function_calls
330
+ ```
239
331
 
240
332
  #### Manually declare and invoke a function for function calling
241
333
 
@@ -247,25 +339,25 @@ Then you will receive a function call part in the response.
247
339
 
248
340
  ```python
249
341
  function = types.FunctionDeclaration(
250
- name="get_current_weather",
251
- description="Get the current weather in a given location",
342
+ name='get_current_weather',
343
+ description='Get the current weather in a given location',
252
344
  parameters=types.Schema(
253
- type="OBJECT",
345
+ type='OBJECT',
254
346
  properties={
255
- "location": types.Schema(
256
- type="STRING",
257
- description="The city and state, e.g. San Francisco, CA",
347
+ 'location': types.Schema(
348
+ type='STRING',
349
+ description='The city and state, e.g. San Francisco, CA',
258
350
  ),
259
351
  },
260
- required=["location"],
352
+ required=['location'],
261
353
  ),
262
354
  )
263
355
 
264
356
  tool = types.Tool(function_declarations=[function])
265
357
 
266
358
  response = client.models.generate_content(
267
- model="gemini-2.0-flash-exp",
268
- contents="What is the weather like in Boston?",
359
+ model='gemini-2.0-flash-001',
360
+ contents='What is the weather like in Boston?',
269
361
  config=types.GenerateContentConfig(tools=[tool]),
270
362
  )
271
363
 
@@ -279,33 +371,34 @@ The following example shows how to do it for a simple function invocation.
279
371
 
280
372
  ```python
281
373
  user_prompt_content = types.Content(
282
- role="user",
283
- parts=[types.Part.from_text(text="What is the weather like in Boston?")],
374
+ role='user',
375
+ parts=[types.Part.from_text(text='What is the weather like in Boston?')],
284
376
  )
285
377
  function_call_part = response.function_calls[0]
378
+ function_call_content = response.candidates[0].content
286
379
 
287
380
 
288
381
  try:
289
382
  function_result = get_current_weather(
290
383
  **function_call_part.function_call.args
291
384
  )
292
- function_response = {"result": function_result}
385
+ function_response = {'result': function_result}
293
386
  except (
294
387
  Exception
295
388
  ) as e: # instead of raising the exception, you can let the model handle it
296
- function_response = {"error": str(e)}
389
+ function_response = {'error': str(e)}
297
390
 
298
391
 
299
392
  function_response_part = types.Part.from_function_response(
300
- name=function_call_part.function_call.name,
393
+ name=function_call_part.name,
301
394
  response=function_response,
302
395
  )
303
396
  function_response_content = types.Content(
304
- role="tool", parts=[function_response_part]
397
+ role='tool', parts=[function_response_part]
305
398
  )
306
399
 
307
400
  response = client.models.generate_content(
308
- model="gemini-2.0-flash-exp",
401
+ model='gemini-2.0-flash-001',
309
402
  contents=[
310
403
  user_prompt_content,
311
404
  function_call_content,
@@ -338,7 +431,7 @@ def get_current_weather(location: str) -> str:
338
431
  return "sunny"
339
432
 
340
433
  response = client.models.generate_content(
341
- model="gemini-2.0-flash-exp",
434
+ model="gemini-2.0-flash-001",
342
435
  contents="What is the weather like in Boston?",
343
436
  config=types.GenerateContentConfig(
344
437
  tools=[get_current_weather],
@@ -366,7 +459,7 @@ def get_current_weather(location: str) -> str:
366
459
  return "sunny"
367
460
 
368
461
  response = client.models.generate_content(
369
- model="gemini-2.0-flash-exp",
462
+ model="gemini-2.0-flash-001",
370
463
  contents="What is the weather like in Boston?",
371
464
  config=types.GenerateContentConfig(
372
465
  tools=[get_current_weather],
@@ -400,10 +493,10 @@ class CountryInfo(BaseModel):
400
493
 
401
494
 
402
495
  response = client.models.generate_content(
403
- model="gemini-2.0-flash-exp",
404
- contents="Give me information for the United States.",
496
+ model='gemini-2.0-flash-001',
497
+ contents='Give me information for the United States.',
405
498
  config=types.GenerateContentConfig(
406
- response_mime_type="application/json",
499
+ response_mime_type='application/json',
407
500
  response_schema=CountryInfo,
408
501
  ),
409
502
  )
@@ -412,30 +505,30 @@ print(response.text)
412
505
 
413
506
  ```python
414
507
  response = client.models.generate_content(
415
- model="gemini-2.0-flash-exp",
416
- contents="Give me information for the United States.",
508
+ model='gemini-2.0-flash-001',
509
+ contents='Give me information for the United States.',
417
510
  config=types.GenerateContentConfig(
418
- response_mime_type="application/json",
511
+ response_mime_type='application/json',
419
512
  response_schema={
420
- "required": [
421
- "name",
422
- "population",
423
- "capital",
424
- "continent",
425
- "gdp",
426
- "official_language",
427
- "total_area_sq_mi",
513
+ 'required': [
514
+ 'name',
515
+ 'population',
516
+ 'capital',
517
+ 'continent',
518
+ 'gdp',
519
+ 'official_language',
520
+ 'total_area_sq_mi',
428
521
  ],
429
- "properties": {
430
- "name": {"type": "STRING"},
431
- "population": {"type": "INTEGER"},
432
- "capital": {"type": "STRING"},
433
- "continent": {"type": "STRING"},
434
- "gdp": {"type": "INTEGER"},
435
- "official_language": {"type": "STRING"},
436
- "total_area_sq_mi": {"type": "INTEGER"},
522
+ 'properties': {
523
+ 'name': {'type': 'STRING'},
524
+ 'population': {'type': 'INTEGER'},
525
+ 'capital': {'type': 'STRING'},
526
+ 'continent': {'type': 'STRING'},
527
+ 'gdp': {'type': 'INTEGER'},
528
+ 'official_language': {'type': 'STRING'},
529
+ 'total_area_sq_mi': {'type': 'INTEGER'},
437
530
  },
438
- "type": "OBJECT",
531
+ 'type': 'OBJECT',
439
532
  },
440
533
  ),
441
534
  )
@@ -458,7 +551,7 @@ class InstrumentEnum(Enum):
458
551
  KEYBOARD = 'Keyboard'
459
552
 
460
553
  response = client.models.generate_content(
461
- model='gemini-2.0-flash-exp',
554
+ model='gemini-2.0-flash-001',
462
555
  contents='What instrument plays multiple notes at once?',
463
556
  config={
464
557
  'response_mime_type': 'text/x.enum',
@@ -483,7 +576,7 @@ class InstrumentEnum(Enum):
483
576
  KEYBOARD = 'Keyboard'
484
577
 
485
578
  response = client.models.generate_content(
486
- model='gemini-2.0-flash-exp',
579
+ model='gemini-2.0-flash-001',
487
580
  contents='What instrument plays multiple notes at once?',
488
581
  config={
489
582
  'response_mime_type': 'application/json',
@@ -499,9 +592,9 @@ print(response.text)
499
592
 
500
593
  ```python
501
594
  for chunk in client.models.generate_content_stream(
502
- model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
595
+ model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
503
596
  ):
504
- print(chunk.text, end="")
597
+ print(chunk.text, end='')
505
598
  ```
506
599
 
507
600
  #### Streaming for image content
@@ -511,35 +604,35 @@ you can use the `from_uri` class method to create a `Part` object.
511
604
 
512
605
  ```python
513
606
  for chunk in client.models.generate_content_stream(
514
- model="gemini-2.0-flash-exp",
607
+ model='gemini-2.0-flash-001',
515
608
  contents=[
516
- "What is this image about?",
609
+ 'What is this image about?',
517
610
  types.Part.from_uri(
518
- file_uri="gs://generativeai-downloads/images/scones.jpg",
519
- mime_type="image/jpeg",
611
+ file_uri='gs://generativeai-downloads/images/scones.jpg',
612
+ mime_type='image/jpeg',
520
613
  ),
521
614
  ],
522
615
  ):
523
- print(chunk.text, end="")
616
+ print(chunk.text, end='')
524
617
  ```
525
618
 
526
619
  If your image is stored in your local file system, you can read it in as bytes
527
620
  data and use the `from_bytes` class method to create a `Part` object.
528
621
 
529
622
  ```python
530
- YOUR_IMAGE_PATH = "your_image_path"
531
- YOUR_IMAGE_MIME_TYPE = "your_image_mime_type"
532
- with open(YOUR_IMAGE_PATH, "rb") as f:
623
+ YOUR_IMAGE_PATH = 'your_image_path'
624
+ YOUR_IMAGE_MIME_TYPE = 'your_image_mime_type'
625
+ with open(YOUR_IMAGE_PATH, 'rb') as f:
533
626
  image_bytes = f.read()
534
627
 
535
628
  for chunk in client.models.generate_content_stream(
536
- model="gemini-2.0-flash-exp",
629
+ model='gemini-2.0-flash-001',
537
630
  contents=[
538
- "What is this image about?",
631
+ 'What is this image about?',
539
632
  types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
540
633
  ],
541
634
  ):
542
- print(chunk.text, end="")
635
+ print(chunk.text, end='')
543
636
  ```
544
637
 
545
638
  ### Async
@@ -552,7 +645,7 @@ of `client.models.generate_content`
552
645
 
553
646
  ```python
554
647
  response = await client.aio.models.generate_content(
555
- model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
648
+ model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
556
649
  )
557
650
 
558
651
  print(response.text)
@@ -562,17 +655,17 @@ print(response.text)
562
655
 
563
656
  ```python
564
657
  async for chunk in await client.aio.models.generate_content_stream(
565
- model="gemini-2.0-flash-exp", contents="Tell me a story in 300 words."
658
+ model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
566
659
  ):
567
- print(chunk.text, end="")
660
+ print(chunk.text, end='')
568
661
  ```
569
662
 
570
663
  ### Count Tokens and Compute Tokens
571
664
 
572
665
  ```python
573
666
  response = client.models.count_tokens(
574
- model="gemini-2.0-flash-exp",
575
- contents="why is the sky blue?",
667
+ model='gemini-2.0-flash-001',
668
+ contents='why is the sky blue?',
576
669
  )
577
670
  print(response)
578
671
  ```
@@ -583,8 +676,8 @@ Compute tokens is only supported in Vertex AI.
583
676
 
584
677
  ```python
585
678
  response = client.models.compute_tokens(
586
- model="gemini-2.0-flash-exp",
587
- contents="why is the sky blue?",
679
+ model='gemini-2.0-flash-001',
680
+ contents='why is the sky blue?',
588
681
  )
589
682
  print(response)
590
683
  ```
@@ -593,8 +686,8 @@ print(response)
593
686
 
594
687
  ```python
595
688
  response = await client.aio.models.count_tokens(
596
- model="gemini-2.0-flash-exp",
597
- contents="why is the sky blue?",
689
+ model='gemini-2.0-flash-001',
690
+ contents='why is the sky blue?',
598
691
  )
599
692
  print(response)
600
693
  ```
@@ -603,8 +696,8 @@ print(response)
603
696
 
604
697
  ```python
605
698
  response = client.models.embed_content(
606
- model="text-embedding-004",
607
- contents="why is the sky blue?",
699
+ model='text-embedding-004',
700
+ contents='why is the sky blue?',
608
701
  )
609
702
  print(response)
610
703
  ```
@@ -612,8 +705,8 @@ print(response)
612
705
  ```python
613
706
  # multiple contents with config
614
707
  response = client.models.embed_content(
615
- model="text-embedding-004",
616
- contents=["why is the sky blue?", "What is your age?"],
708
+ model='text-embedding-004',
709
+ contents=['why is the sky blue?', 'What is your age?'],
617
710
  config=types.EmbedContentConfig(output_dimensionality=10),
618
711
  )
619
712
 
@@ -629,13 +722,12 @@ Support for generate images in Gemini Developer API is behind an allowlist
629
722
  ```python
630
723
  # Generate Image
631
724
  response1 = client.models.generate_images(
632
- model="imagen-3.0-generate-002",
633
- prompt="An umbrella in the foreground, and a rainy night sky in the background",
725
+ model='imagen-3.0-generate-002',
726
+ prompt='An umbrella in the foreground, and a rainy night sky in the background',
634
727
  config=types.GenerateImagesConfig(
635
- negative_prompt="human",
636
728
  number_of_images=1,
637
729
  include_rai_reason=True,
638
- output_mime_type="image/jpeg",
730
+ output_mime_type='image/jpeg',
639
731
  ),
640
732
  )
641
733
  response1.generated_images[0].image.show()
@@ -648,12 +740,12 @@ Upscale image is only supported in Vertex AI.
648
740
  ```python
649
741
  # Upscale the generated image from above
650
742
  response2 = client.models.upscale_image(
651
- model="imagen-3.0-generate-001",
743
+ model='imagen-3.0-generate-001',
652
744
  image=response1.generated_images[0].image,
653
- upscale_factor="x2",
745
+ upscale_factor='x2',
654
746
  config=types.UpscaleImageConfig(
655
747
  include_rai_reason=True,
656
- output_mime_type="image/jpeg",
748
+ output_mime_type='image/jpeg',
657
749
  ),
658
750
  )
659
751
  response2.generated_images[0].image.show()
@@ -678,26 +770,53 @@ raw_ref_image = RawReferenceImage(
678
770
  mask_ref_image = MaskReferenceImage(
679
771
  reference_id=2,
680
772
  config=types.MaskReferenceConfig(
681
- mask_mode="MASK_MODE_BACKGROUND",
773
+ mask_mode='MASK_MODE_BACKGROUND',
682
774
  mask_dilation=0,
683
775
  ),
684
776
  )
685
777
 
686
778
  response3 = client.models.edit_image(
687
- model="imagen-3.0-capability-001",
688
- prompt="Sunlight and clear sky",
779
+ model='imagen-3.0-capability-001',
780
+ prompt='Sunlight and clear sky',
689
781
  reference_images=[raw_ref_image, mask_ref_image],
690
782
  config=types.EditImageConfig(
691
- edit_mode="EDIT_MODE_INPAINT_INSERTION",
783
+ edit_mode='EDIT_MODE_INPAINT_INSERTION',
692
784
  number_of_images=1,
693
- negative_prompt="human",
694
785
  include_rai_reason=True,
695
- output_mime_type="image/jpeg",
786
+ output_mime_type='image/jpeg',
696
787
  ),
697
788
  )
698
789
  response3.generated_images[0].image.show()
699
790
  ```
700
791
 
792
+ ### Veo
793
+
794
+ #### Generate Videos
795
+
796
+ Support for generate videos in Vertex and Gemini Developer API is behind an allowlist
797
+
798
+ ```python
799
+ # Create operation
800
+ operation = client.models.generate_videos(
801
+ model='veo-2.0-generate-001',
802
+ prompt='A neon hologram of a cat driving at top speed',
803
+ config=types.GenerateVideosConfig(
804
+ number_of_videos=1,
805
+ fps=24,
806
+ duration_seconds=5,
807
+ enhance_prompt=True,
808
+ ),
809
+ )
810
+
811
+ # Poll operation
812
+ while not operation.done:
813
+ time.sleep(20)
814
+ operation = client.operations.get(operation)
815
+
816
+ video = operation.result.generated_videos[0].video
817
+ video.show()
818
+ ```
819
+
701
820
  ## Chats
702
821
 
703
822
  Create a chat session to start a multi-turn conversations with the model.
@@ -705,32 +824,32 @@ Create a chat session to start a multi-turn conversations with the model.
705
824
  ### Send Message
706
825
 
707
826
  ```python
708
- chat = client.chats.create(model="gemini-2.0-flash-exp")
709
- response = chat.send_message("tell me a story")
827
+ chat = client.chats.create(model='gemini-2.0-flash-001')
828
+ response = chat.send_message('tell me a story')
710
829
  print(response.text)
711
830
  ```
712
831
 
713
832
  ### Streaming
714
833
 
715
834
  ```python
716
- chat = client.chats.create(model="gemini-2.0-flash-exp")
717
- for chunk in chat.send_message_stream("tell me a story"):
835
+ chat = client.chats.create(model='gemini-2.0-flash-001')
836
+ for chunk in chat.send_message_stream('tell me a story'):
718
837
  print(chunk.text)
719
838
  ```
720
839
 
721
840
  ### Async
722
841
 
723
842
  ```python
724
- chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
725
- response = await chat.send_message("tell me a story")
843
+ chat = client.aio.chats.create(model='gemini-2.0-flash-001')
844
+ response = await chat.send_message('tell me a story')
726
845
  print(response.text)
727
846
  ```
728
847
 
729
848
  ### Async Streaming
730
849
 
731
850
  ```python
732
- chat = client.aio.chats.create(model="gemini-2.0-flash-exp")
733
- async for chunk in await chat.send_message_stream("tell me a story"):
851
+ chat = client.aio.chats.create(model='gemini-2.0-flash-001')
852
+ async for chunk in await chat.send_message_stream('tell me a story'):
734
853
  print(chunk.text)
735
854
  ```
736
855
 
@@ -746,17 +865,24 @@ Files are only supported in Gemini Developer API.
746
865
  ### Upload
747
866
 
748
867
  ```python
749
- file1 = client.files.upload(path="2312.11805v3.pdf")
750
- file2 = client.files.upload(path="2403.05530.pdf")
868
+ file1 = client.files.upload(file='2312.11805v3.pdf')
869
+ file2 = client.files.upload(file='2403.05530.pdf')
751
870
 
752
871
  print(file1)
753
872
  print(file2)
754
873
  ```
755
874
 
875
+ ### Get
876
+
877
+ ```python
878
+ file1 = client.files.upload(file='2312.11805v3.pdf')
879
+ file_info = client.files.get(name=file1.name)
880
+ ```
881
+
756
882
  ### Delete
757
883
 
758
884
  ```python
759
- file3 = client.files.upload(path="2312.11805v3.pdf")
885
+ file3 = client.files.upload(file='2312.11805v3.pdf')
760
886
 
761
887
  client.files.delete(name=file3.name)
762
888
  ```
@@ -770,32 +896,32 @@ client.files.delete(name=file3.name)
770
896
  ```python
771
897
  if client.vertexai:
772
898
  file_uris = [
773
- "gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf",
774
- "gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf",
899
+ 'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
900
+ 'gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf',
775
901
  ]
776
902
  else:
777
903
  file_uris = [file1.uri, file2.uri]
778
904
 
779
905
  cached_content = client.caches.create(
780
- model="gemini-1.5-pro-002",
906
+ model='gemini-1.5-pro-002',
781
907
  config=types.CreateCachedContentConfig(
782
908
  contents=[
783
909
  types.Content(
784
- role="user",
910
+ role='user',
785
911
  parts=[
786
912
  types.Part.from_uri(
787
- file_uri=file_uris[0], mime_type="application/pdf"
913
+ file_uri=file_uris[0], mime_type='application/pdf'
788
914
  ),
789
915
  types.Part.from_uri(
790
916
  file_uri=file_uris[1],
791
- mime_type="application/pdf",
917
+ mime_type='application/pdf',
792
918
  ),
793
919
  ],
794
920
  )
795
921
  ],
796
- system_instruction="What is the sum of the two pdfs?",
797
- display_name="test cache",
798
- ttl="3600s",
922
+ system_instruction='What is the sum of the two pdfs?',
923
+ display_name='test cache',
924
+ ttl='3600s',
799
925
  ),
800
926
  )
801
927
  ```
@@ -810,8 +936,8 @@ cached_content = client.caches.get(name=cached_content.name)
810
936
 
811
937
  ```python
812
938
  response = client.models.generate_content(
813
- model="gemini-1.5-pro-002",
814
- contents="Summarize the pdfs",
939
+ model='gemini-1.5-pro-002',
940
+ contents='Summarize the pdfs',
815
941
  config=types.GenerateContentConfig(
816
942
  cached_content=cached_content.name,
817
943
  ),
@@ -831,17 +957,17 @@ tuning through `tune`.
831
957
 
832
958
  ```python
833
959
  if client.vertexai:
834
- model = "gemini-1.5-pro-002"
960
+ model = 'gemini-1.5-pro-002'
835
961
  training_dataset = types.TuningDataset(
836
- gcs_uri="gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl",
962
+ gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
837
963
  )
838
964
  else:
839
- model = "models/gemini-1.0-pro-001"
965
+ model = 'models/gemini-1.0-pro-001'
840
966
  training_dataset = types.TuningDataset(
841
967
  examples=[
842
968
  types.TuningExample(
843
- text_input=f"Input text {i}",
844
- output=f"Output text {i}",
969
+ text_input=f'Input text {i}',
970
+ output=f'Output text {i}',
845
971
  )
846
972
  for i in range(5)
847
973
  ],
@@ -853,7 +979,7 @@ tuning_job = client.tunings.tune(
853
979
  base_model=model,
854
980
  training_dataset=training_dataset,
855
981
  config=types.CreateTuningJobConfig(
856
- epoch_count=1, tuned_model_display_name="test_dataset_examples model"
982
+ epoch_count=1, tuned_model_display_name='test_dataset_examples model'
857
983
  ),
858
984
  )
859
985
  print(tuning_job)
@@ -871,8 +997,8 @@ import time
871
997
 
872
998
  running_states = set(
873
999
  [
874
- "JOB_STATE_PENDING",
875
- "JOB_STATE_RUNNING",
1000
+ 'JOB_STATE_PENDING',
1001
+ 'JOB_STATE_RUNNING',
876
1002
  ]
877
1003
  )
878
1004
 
@@ -887,7 +1013,7 @@ while tuning_job.state in running_states:
887
1013
  ```python
888
1014
  response = client.models.generate_content(
889
1015
  model=tuning_job.tuned_model.endpoint,
890
- contents="why is the sky blue?",
1016
+ contents='why is the sky blue?',
891
1017
  )
892
1018
 
893
1019
  print(response.text)
@@ -905,12 +1031,12 @@ print(tuned_model)
905
1031
  To retrieve base models, see [list base models](#list-base-models).
906
1032
 
907
1033
  ```python
908
- for model in client.models.list(config={"page_size": 10, "query_base": False}):
1034
+ for model in client.models.list(config={'page_size': 10, 'query_base': False}):
909
1035
  print(model)
910
1036
  ```
911
1037
 
912
1038
  ```python
913
- pager = client.models.list(config={"page_size": 10, "query_base": False})
1039
+ pager = client.models.list(config={'page_size': 10, 'query_base': False})
914
1040
  print(pager.page_size)
915
1041
  print(pager[0])
916
1042
  pager.next_page()
@@ -920,12 +1046,12 @@ print(pager[0])
920
1046
  #### Async
921
1047
 
922
1048
  ```python
923
- async for job in await client.aio.models.list(config={"page_size": 10, "query_base": False}):
1049
+ async for job in await client.aio.models.list(config={'page_size': 10, 'query_base': False}):
924
1050
  print(job)
925
1051
  ```
926
1052
 
927
1053
  ```python
928
- async_pager = await client.aio.models.list(config={"page_size": 10, "query_base": False})
1054
+ async_pager = await client.aio.models.list(config={'page_size': 10, 'query_base': False})
929
1055
  print(async_pager.page_size)
930
1056
  print(async_pager[0])
931
1057
  await async_pager.next_page()
@@ -940,7 +1066,7 @@ model = pager[0]
940
1066
  model = client.models.update(
941
1067
  model=model.name,
942
1068
  config=types.UpdateModelConfig(
943
- display_name="my tuned model", description="my tuned model description"
1069
+ display_name='my tuned model', description='my tuned model description'
944
1070
  ),
945
1071
  )
946
1072
 
@@ -951,12 +1077,12 @@ print(model)
951
1077
  ### List Tuning Jobs
952
1078
 
953
1079
  ```python
954
- for job in client.tunings.list(config={"page_size": 10}):
1080
+ for job in client.tunings.list(config={'page_size': 10}):
955
1081
  print(job)
956
1082
  ```
957
1083
 
958
1084
  ```python
959
- pager = client.tunings.list(config={"page_size": 10})
1085
+ pager = client.tunings.list(config={'page_size': 10})
960
1086
  print(pager.page_size)
961
1087
  print(pager[0])
962
1088
  pager.next_page()
@@ -966,12 +1092,12 @@ print(pager[0])
966
1092
  #### Async
967
1093
 
968
1094
  ```python
969
- async for job in await client.aio.tunings.list(config={"page_size": 10}):
1095
+ async for job in await client.aio.tunings.list(config={'page_size': 10}):
970
1096
  print(job)
971
1097
  ```
972
1098
 
973
1099
  ```python
974
- async_pager = await client.aio.tunings.list(config={"page_size": 10})
1100
+ async_pager = await client.aio.tunings.list(config={'page_size': 10})
975
1101
  print(async_pager.page_size)
976
1102
  print(async_pager[0])
977
1103
  await async_pager.next_page()
@@ -987,8 +1113,8 @@ Only supported in Vertex AI.
987
1113
  ```python
988
1114
  # Specify model and source file only, destination and job display name will be auto-populated
989
1115
  job = client.batches.create(
990
- model="gemini-1.5-flash-002",
991
- src="bq://my-project.my-dataset.my-table",
1116
+ model='gemini-1.5-flash-002',
1117
+ src='bq://my-project.my-dataset.my-table',
992
1118
  )
993
1119
 
994
1120
  job
@@ -1004,10 +1130,10 @@ job.state
1004
1130
  ```python
1005
1131
  completed_states = set(
1006
1132
  [
1007
- "JOB_STATE_SUCCEEDED",
1008
- "JOB_STATE_FAILED",
1009
- "JOB_STATE_CANCELLED",
1010
- "JOB_STATE_PAUSED",
1133
+ 'JOB_STATE_SUCCEEDED',
1134
+ 'JOB_STATE_FAILED',
1135
+ 'JOB_STATE_CANCELLED',
1136
+ 'JOB_STATE_PAUSED',
1011
1137
  ]
1012
1138
  )
1013
1139
 
@@ -1061,3 +1187,20 @@ delete_job = client.batches.delete(name=job.name)
1061
1187
 
1062
1188
  delete_job
1063
1189
  ```
1190
+
1191
+ ## Error Handling
1192
+
1193
+ To handle errors raised by the model service, the SDK provides this [APIError](https://github.com/googleapis/python-genai/blob/main/google/genai/errors.py) class.
1194
+
1195
+ ```python
1196
+ from google.genai import errors
1197
+
1198
+ try:
1199
+ client.models.generate_content(
1200
+ model="invalid-model-name",
1201
+ contents="What is your name?",
1202
+ )
1203
+ except errors.APIError as e:
1204
+ print(e.code) # 404
1205
+ print(e.message)
1206
+ ```