google-genai 1.45.0__tar.gz → 1.46.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {google_genai-1.45.0/google_genai.egg-info → google_genai-1.46.0}/PKG-INFO +25 -4
  2. {google_genai-1.45.0 → google_genai-1.46.0}/README.md +23 -1
  3. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_api_client.py +9 -2
  4. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_live_converters.py +5 -0
  5. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/models.py +40 -3
  6. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/tunings.py +0 -30
  7. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/types.py +143 -82
  8. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/version.py +1 -1
  9. {google_genai-1.45.0 → google_genai-1.46.0/google_genai.egg-info}/PKG-INFO +25 -4
  10. {google_genai-1.45.0 → google_genai-1.46.0}/google_genai.egg-info/SOURCES.txt +1 -0
  11. {google_genai-1.45.0 → google_genai-1.46.0}/pyproject.toml +13 -10
  12. google_genai-1.46.0/setup.cfg +10 -0
  13. google_genai-1.45.0/setup.cfg +0 -4
  14. {google_genai-1.45.0 → google_genai-1.46.0}/LICENSE +0 -0
  15. {google_genai-1.45.0 → google_genai-1.46.0}/MANIFEST.in +0 -0
  16. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/__init__.py +0 -0
  17. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_adapters.py +0 -0
  18. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_api_module.py +0 -0
  19. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_automatic_function_calling_util.py +0 -0
  20. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_base_transformers.py +0 -0
  21. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_base_url.py +0 -0
  22. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_common.py +0 -0
  23. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_extra_utils.py +0 -0
  24. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_local_tokenizer_loader.py +0 -0
  25. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_mcp_utils.py +0 -0
  26. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_operations_converters.py +0 -0
  27. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_replay_api_client.py +0 -0
  28. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_test_api_client.py +0 -0
  29. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_tokens_converters.py +0 -0
  30. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/_transformers.py +0 -0
  31. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/batches.py +0 -0
  32. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/caches.py +0 -0
  33. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/chats.py +0 -0
  34. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/client.py +0 -0
  35. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/errors.py +0 -0
  36. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/files.py +0 -0
  37. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/live.py +0 -0
  38. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/live_music.py +0 -0
  39. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/local_tokenizer.py +0 -0
  40. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/operations.py +0 -0
  41. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/pagers.py +0 -0
  42. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/py.typed +0 -0
  43. {google_genai-1.45.0 → google_genai-1.46.0}/google/genai/tokens.py +0 -0
  44. {google_genai-1.45.0 → google_genai-1.46.0}/google_genai.egg-info/dependency_links.txt +0 -0
  45. {google_genai-1.45.0 → google_genai-1.46.0}/google_genai.egg-info/requires.txt +0 -0
  46. {google_genai-1.45.0 → google_genai-1.46.0}/google_genai.egg-info/top_level.txt +0 -0
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.45.0
3
+ Version: 1.46.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
- License: Apache-2.0
6
+ License-Expression: Apache-2.0
7
7
  Project-URL: Homepage, https://github.com/googleapis/python-genai
8
8
  Classifier: Intended Audience :: Developers
9
- Classifier: License :: OSI Approved :: Apache Software License
10
9
  Classifier: Operating System :: OS Independent
11
10
  Classifier: Programming Language :: Python
12
11
  Classifier: Programming Language :: Python :: 3
@@ -312,7 +311,7 @@ See the 'Create a client' section above to initialize a client.
312
311
 
313
312
  ### Generate Content
314
313
 
315
- #### with text content
314
+ #### with text content input (text output)
316
315
 
317
316
  ```python
318
317
  response = client.models.generate_content(
@@ -321,6 +320,28 @@ response = client.models.generate_content(
321
320
  print(response.text)
322
321
  ```
323
322
 
323
+ #### with text content input (image output)
324
+
325
+ ```python
326
+ from google.genai import types
327
+
328
+ response = client.models.generate_content(
329
+ model='gemini-2.5-flash-image',
330
+ contents='A cartoon infographic for flying sneakers',
331
+ config=types.GenerateContentConfig(
332
+ response_modalities=["IMAGE"],
333
+ image_config=types.ImageConfig(
334
+ aspect_ratio="9:16",
335
+ ),
336
+ ),
337
+ )
338
+
339
+ for part in response.parts:
340
+ if part.inline_data:
341
+ generated_image = part.as_image()
342
+ generated_image.show()
343
+ ```
344
+
324
345
  #### with uploaded file (Gemini Developer API only)
325
346
  download the file in console.
326
347
 
@@ -274,7 +274,7 @@ See the 'Create a client' section above to initialize a client.
274
274
 
275
275
  ### Generate Content
276
276
 
277
- #### with text content
277
+ #### with text content input (text output)
278
278
 
279
279
  ```python
280
280
  response = client.models.generate_content(
@@ -283,6 +283,28 @@ response = client.models.generate_content(
283
283
  print(response.text)
284
284
  ```
285
285
 
286
+ #### with text content input (image output)
287
+
288
+ ```python
289
+ from google.genai import types
290
+
291
+ response = client.models.generate_content(
292
+ model='gemini-2.5-flash-image',
293
+ contents='A cartoon infographic for flying sneakers',
294
+ config=types.GenerateContentConfig(
295
+ response_modalities=["IMAGE"],
296
+ image_config=types.ImageConfig(
297
+ aspect_ratio="9:16",
298
+ ),
299
+ ),
300
+ )
301
+
302
+ for part in response.parts:
303
+ if part.inline_data:
304
+ generated_image = part.as_image()
305
+ generated_image.show()
306
+ ```
307
+
286
308
  #### with uploaded file (Gemini Developer API only)
287
309
  download the file in console.
288
310
 
@@ -693,8 +693,15 @@ class BaseApiClient:
693
693
  self._http_options
694
694
  )
695
695
  self._async_httpx_client_args = async_client_args
696
- self._httpx_client = SyncHttpxClient(**client_args)
697
- self._async_httpx_client = AsyncHttpxClient(**async_client_args)
696
+
697
+ if self._http_options.httpx_client:
698
+ self._httpx_client = self._http_options.httpx_client
699
+ else:
700
+ self._httpx_client = SyncHttpxClient(**client_args)
701
+ if self._http_options.httpx_async_client:
702
+ self._async_httpx_client = self._http_options.httpx_async_client
703
+ else:
704
+ self._async_httpx_client = AsyncHttpxClient(**async_client_args)
698
705
  if self._use_aiohttp():
699
706
  # Do it once at the genai.Client level. Share among all requests.
700
707
  self._async_client_session_request_args = self._ensure_aiohttp_ssl_ctx(
@@ -226,6 +226,11 @@ def _GenerationConfig_to_vertex(
226
226
  if getv(from_object, ['top_p']) is not None:
227
227
  setv(to_object, ['topP'], getv(from_object, ['top_p']))
228
228
 
229
+ if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
230
+ raise ValueError(
231
+ 'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
232
+ )
233
+
229
234
  return to_object
230
235
 
231
236
 
@@ -2398,6 +2398,11 @@ def _GenerationConfig_to_vertex(
2398
2398
  if getv(from_object, ['top_p']) is not None:
2399
2399
  setv(to_object, ['topP'], getv(from_object, ['top_p']))
2400
2400
 
2401
+ if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
2402
+ raise ValueError(
2403
+ 'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
2404
+ )
2405
+
2401
2406
  return to_object
2402
2407
 
2403
2408
 
@@ -6856,7 +6861,7 @@ class AsyncModels(_api_module.BaseModule):
6856
6861
  # * Everlasting Florals
6857
6862
  # * Timeless Petals
6858
6863
 
6859
- async for chunk in awiat client.aio.models.generate_content_stream(
6864
+ async for chunk in await client.aio.models.generate_content_stream(
6860
6865
  model='gemini-2.0-flash',
6861
6866
  contents=[
6862
6867
  types.Part.from_text('What is shown in this image?'),
@@ -6898,9 +6903,11 @@ class AsyncModels(_api_module.BaseModule):
6898
6903
  response = await self._generate_content_stream(
6899
6904
  model=model, contents=contents, config=config
6900
6905
  )
6901
- logger.info(f'AFC remote call {i} is done.')
6906
+ # TODO: b/453739108 - make AFC logic more robust like the other 3 methods.
6907
+ if i > 1:
6908
+ logger.info(f'AFC remote call {i} is done.')
6902
6909
  remaining_remote_calls_afc -= 1
6903
- if remaining_remote_calls_afc == 0:
6910
+ if i > 1 and remaining_remote_calls_afc == 0:
6904
6911
  logger.info(
6905
6912
  'Reached max remote calls for automatic function calling.'
6906
6913
  )
@@ -7270,6 +7277,36 @@ class AsyncModels(_api_module.BaseModule):
7270
7277
  'Source and prompt/image/video are mutually exclusive.'
7271
7278
  + ' Please only use source.'
7272
7279
  )
7280
+ # Gemini Developer API does not support video bytes.
7281
+ video_dct: dict[str, Any] = {}
7282
+ if not self._api_client.vertexai and video:
7283
+ if isinstance(video, types.Video):
7284
+ video_dct = video.model_dump()
7285
+ else:
7286
+ video_dct = dict(video)
7287
+
7288
+ if video_dct.get('uri') and video_dct.get('video_bytes'):
7289
+ video = types.Video(
7290
+ uri=video_dct.get('uri'), mime_type=video_dct.get('mime_type')
7291
+ )
7292
+ elif not self._api_client.vertexai and source:
7293
+ if isinstance(source, types.GenerateVideosSource):
7294
+ source_dct = source.model_dump()
7295
+ video_dct = source_dct.get('video', {})
7296
+ else:
7297
+ source_dct = dict(source)
7298
+ if isinstance(source_dct.get('video'), types.Video):
7299
+ video_obj: types.Video = source_dct.get('video', types.Video())
7300
+ video_dct = video_obj.model_dump()
7301
+ if video_dct and video_dct.get('uri') and video_dct.get('video_bytes'):
7302
+ source = types.GenerateVideosSource(
7303
+ prompt=source_dct.get('prompt'),
7304
+ image=source_dct.get('image'),
7305
+ video=types.Video(
7306
+ uri=video_dct.get('uri'),
7307
+ mime_type=video_dct.get('mime_type'),
7308
+ ),
7309
+ )
7273
7310
  return await self._generate_videos(
7274
7311
  model=model,
7275
7312
  prompt=prompt,
@@ -551,36 +551,6 @@ def _TuningJob_from_mldev(
551
551
  _TunedModel_from_mldev(getv(from_object, ['_self']), to_object),
552
552
  )
553
553
 
554
- if getv(from_object, ['customBaseModel']) is not None:
555
- setv(
556
- to_object, ['custom_base_model'], getv(from_object, ['customBaseModel'])
557
- )
558
-
559
- if getv(from_object, ['experiment']) is not None:
560
- setv(to_object, ['experiment'], getv(from_object, ['experiment']))
561
-
562
- if getv(from_object, ['labels']) is not None:
563
- setv(to_object, ['labels'], getv(from_object, ['labels']))
564
-
565
- if getv(from_object, ['outputUri']) is not None:
566
- setv(to_object, ['output_uri'], getv(from_object, ['outputUri']))
567
-
568
- if getv(from_object, ['pipelineJob']) is not None:
569
- setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))
570
-
571
- if getv(from_object, ['serviceAccount']) is not None:
572
- setv(to_object, ['service_account'], getv(from_object, ['serviceAccount']))
573
-
574
- if getv(from_object, ['tunedModelDisplayName']) is not None:
575
- setv(
576
- to_object,
577
- ['tuned_model_display_name'],
578
- getv(from_object, ['tunedModelDisplayName']),
579
- )
580
-
581
- if getv(from_object, ['veoTuningSpec']) is not None:
582
- setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec']))
583
-
584
554
  return to_object
585
555
 
586
556
 
@@ -89,6 +89,27 @@ else:
89
89
  except ImportError:
90
90
  yaml = None
91
91
 
92
+ _is_httpx_imported = False
93
+ if typing.TYPE_CHECKING:
94
+ import httpx
95
+
96
+ HttpxClient = httpx.Client
97
+ HttpxAsyncClient = httpx.AsyncClient
98
+ _is_httpx_imported = True
99
+ else:
100
+ HttpxClient: typing.Type = Any
101
+ HttpxAsyncClient: typing.Type = Any
102
+
103
+ try:
104
+ import httpx
105
+
106
+ HttpxClient = httpx.Client
107
+ HttpxAsyncClient = httpx.AsyncClient
108
+ _is_httpx_imported = True
109
+ except ImportError:
110
+ HttpxClient = None
111
+ HttpxAsyncClient = None
112
+
92
113
  logger = logging.getLogger('google_genai.types')
93
114
 
94
115
  T = typing.TypeVar('T', bound='GenerateContentResponse')
@@ -118,6 +139,19 @@ class Language(_common.CaseInSensitiveEnum):
118
139
  """Python >= 3.10, with numpy and simpy available."""
119
140
 
120
141
 
142
+ class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
143
+ """Specifies how the response should be scheduled in the conversation."""
144
+
145
+ SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
146
+ """This value is unused."""
147
+ SILENT = 'SILENT'
148
+ """Only add the result to the conversation context, do not interrupt or trigger generation."""
149
+ WHEN_IDLE = 'WHEN_IDLE'
150
+ """Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
151
+ INTERRUPT = 'INTERRUPT'
152
+ """Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
153
+
154
+
121
155
  class Type(_common.CaseInSensitiveEnum):
122
156
  """Optional. The type of the data."""
123
157
 
@@ -144,14 +178,14 @@ class HarmCategory(_common.CaseInSensitiveEnum):
144
178
 
145
179
  HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED'
146
180
  """The harm category is unspecified."""
147
- HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
148
- """The harm category is hate speech."""
149
- HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
150
- """The harm category is dangerous content."""
151
181
  HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT'
152
182
  """The harm category is harassment."""
183
+ HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
184
+ """The harm category is hate speech."""
153
185
  HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT'
154
186
  """The harm category is sexually explicit content."""
187
+ HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
188
+ """The harm category is dangerous content."""
155
189
  HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY'
156
190
  """Deprecated: Election filter is not longer supported. The harm category is civic integrity."""
157
191
  HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE'
@@ -166,6 +200,8 @@ class HarmCategory(_common.CaseInSensitiveEnum):
166
200
  'HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT'
167
201
  )
168
202
  """The harm category is image sexually explicit content."""
203
+ HARM_CATEGORY_JAILBREAK = 'HARM_CATEGORY_JAILBREAK'
204
+ """The harm category is for jailbreak prompts."""
169
205
 
170
206
 
171
207
  class HarmBlockMethod(_common.CaseInSensitiveEnum):
@@ -322,20 +358,24 @@ class HarmSeverity(_common.CaseInSensitiveEnum):
322
358
 
323
359
 
324
360
  class BlockedReason(_common.CaseInSensitiveEnum):
325
- """Output only. Blocked reason."""
361
+ """Output only. The reason why the prompt was blocked."""
326
362
 
327
363
  BLOCKED_REASON_UNSPECIFIED = 'BLOCKED_REASON_UNSPECIFIED'
328
- """Unspecified blocked reason."""
364
+ """The blocked reason is unspecified."""
329
365
  SAFETY = 'SAFETY'
330
- """Candidates blocked due to safety."""
366
+ """The prompt was blocked for safety reasons."""
331
367
  OTHER = 'OTHER'
332
- """Candidates blocked due to other reason."""
368
+ """The prompt was blocked for other reasons. For example, it may be due to the prompt's language, or because it contains other harmful content."""
333
369
  BLOCKLIST = 'BLOCKLIST'
334
- """Candidates blocked due to the terms which are included from the terminology blocklist."""
370
+ """The prompt was blocked because it contains a term from the terminology blocklist."""
335
371
  PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
336
- """Candidates blocked due to prohibited content."""
372
+ """The prompt was blocked because it contains prohibited content."""
337
373
  IMAGE_SAFETY = 'IMAGE_SAFETY'
338
- """Candidates blocked due to unsafe image generation content."""
374
+ """The prompt was blocked because it contains content that is unsafe for image generation."""
375
+ MODEL_ARMOR = 'MODEL_ARMOR'
376
+ """The prompt was blocked by Model Armor."""
377
+ JAILBREAK = 'JAILBREAK'
378
+ """The prompt was blocked as a jailbreak attempt."""
339
379
 
340
380
 
341
381
  class TrafficType(_common.CaseInSensitiveEnum):
@@ -702,19 +742,6 @@ class MediaModality(_common.CaseInSensitiveEnum):
702
742
  """Document, e.g. PDF."""
703
743
 
704
744
 
705
- class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
706
- """Specifies how the response should be scheduled in the conversation."""
707
-
708
- SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
709
- """This value is unused."""
710
- SILENT = 'SILENT'
711
- """Only add the result to the conversation context, do not interrupt or trigger generation."""
712
- WHEN_IDLE = 'WHEN_IDLE'
713
- """Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
714
- INTERRUPT = 'INTERRUPT'
715
- """Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
716
-
717
-
718
745
  class StartSensitivity(_common.CaseInSensitiveEnum):
719
746
  """Start of speech sensitivity."""
720
747
 
@@ -1530,6 +1557,15 @@ class HttpOptions(_common.BaseModel):
1530
1557
  default=None, description="""HTTP retry options for the request."""
1531
1558
  )
1532
1559
 
1560
+ httpx_client: Optional['HttpxClient'] = Field(
1561
+ default=None,
1562
+ description="""A custom httpx client to be used for the request.""",
1563
+ )
1564
+ httpx_async_client: Optional['HttpxAsyncClient'] = Field(
1565
+ default=None,
1566
+ description="""A custom httpx async client to be used for the request.""",
1567
+ )
1568
+
1533
1569
 
1534
1570
  class HttpOptionsDict(TypedDict, total=False):
1535
1571
  """HTTP options to be used in each of the requests."""
@@ -2672,8 +2708,7 @@ class GoogleSearch(_common.BaseModel):
2672
2708
  )
2673
2709
  exclude_domains: Optional[list[str]] = Field(
2674
2710
  default=None,
2675
- description="""Optional. List of domains to be excluded from the search results.
2676
- The default limit is 2000 domains.""",
2711
+ description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].""",
2677
2712
  )
2678
2713
 
2679
2714
 
@@ -2686,8 +2721,7 @@ class GoogleSearchDict(TypedDict, total=False):
2686
2721
  """
2687
2722
 
2688
2723
  exclude_domains: Optional[list[str]]
2689
- """Optional. List of domains to be excluded from the search results.
2690
- The default limit is 2000 domains."""
2724
+ """Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]."""
2691
2725
 
2692
2726
 
2693
2727
  GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]
@@ -5706,31 +5740,41 @@ CandidateOrDict = Union[Candidate, CandidateDict]
5706
5740
 
5707
5741
 
5708
5742
  class GenerateContentResponsePromptFeedback(_common.BaseModel):
5709
- """Content filter results for a prompt sent in the request."""
5743
+ """Content filter results for a prompt sent in the request.
5744
+
5745
+ Note: This is sent only in the first stream chunk and only if no candidates
5746
+ were generated due to content violations.
5747
+ """
5710
5748
 
5711
5749
  block_reason: Optional[BlockedReason] = Field(
5712
- default=None, description="""Output only. Blocked reason."""
5750
+ default=None,
5751
+ description="""Output only. The reason why the prompt was blocked.""",
5713
5752
  )
5714
5753
  block_reason_message: Optional[str] = Field(
5715
5754
  default=None,
5716
- description="""Output only. A readable block reason message.""",
5755
+ description="""Output only. A readable message that explains the reason why the prompt was blocked.""",
5717
5756
  )
5718
5757
  safety_ratings: Optional[list[SafetyRating]] = Field(
5719
- default=None, description="""Output only. Safety ratings."""
5758
+ default=None,
5759
+ description="""Output only. A list of safety ratings for the prompt. There is one rating per category.""",
5720
5760
  )
5721
5761
 
5722
5762
 
5723
5763
  class GenerateContentResponsePromptFeedbackDict(TypedDict, total=False):
5724
- """Content filter results for a prompt sent in the request."""
5764
+ """Content filter results for a prompt sent in the request.
5765
+
5766
+ Note: This is sent only in the first stream chunk and only if no candidates
5767
+ were generated due to content violations.
5768
+ """
5725
5769
 
5726
5770
  block_reason: Optional[BlockedReason]
5727
- """Output only. Blocked reason."""
5771
+ """Output only. The reason why the prompt was blocked."""
5728
5772
 
5729
5773
  block_reason_message: Optional[str]
5730
- """Output only. A readable block reason message."""
5774
+ """Output only. A readable message that explains the reason why the prompt was blocked."""
5731
5775
 
5732
5776
  safety_ratings: Optional[list[SafetyRatingDict]]
5733
- """Output only. Safety ratings."""
5777
+ """Output only. A list of safety ratings for the prompt. There is one rating per category."""
5734
5778
 
5735
5779
 
5736
5780
  GenerateContentResponsePromptFeedbackOrDict = Union[
@@ -8304,34 +8348,6 @@ class DeleteModelResponseDict(TypedDict, total=False):
8304
8348
  DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict]
8305
8349
 
8306
8350
 
8307
- class GenerationConfigThinkingConfig(_common.BaseModel):
8308
- """Config for thinking features."""
8309
-
8310
- include_thoughts: Optional[bool] = Field(
8311
- default=None,
8312
- description="""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available.""",
8313
- )
8314
- thinking_budget: Optional[int] = Field(
8315
- default=None,
8316
- description="""Optional. Indicates the thinking budget in tokens.""",
8317
- )
8318
-
8319
-
8320
- class GenerationConfigThinkingConfigDict(TypedDict, total=False):
8321
- """Config for thinking features."""
8322
-
8323
- include_thoughts: Optional[bool]
8324
- """Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available."""
8325
-
8326
- thinking_budget: Optional[int]
8327
- """Optional. Indicates the thinking budget in tokens."""
8328
-
8329
-
8330
- GenerationConfigThinkingConfigOrDict = Union[
8331
- GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
8332
- ]
8333
-
8334
-
8335
8351
  class GenerationConfig(_common.BaseModel):
8336
8352
  """Generation config."""
8337
8353
 
@@ -8400,7 +8416,7 @@ class GenerationConfig(_common.BaseModel):
8400
8416
  default=None,
8401
8417
  description="""Optional. Controls the randomness of predictions.""",
8402
8418
  )
8403
- thinking_config: Optional[GenerationConfigThinkingConfig] = Field(
8419
+ thinking_config: Optional[ThinkingConfig] = Field(
8404
8420
  default=None,
8405
8421
  description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""",
8406
8422
  )
@@ -8412,6 +8428,10 @@ class GenerationConfig(_common.BaseModel):
8412
8428
  default=None,
8413
8429
  description="""Optional. If specified, nucleus sampling will be used.""",
8414
8430
  )
8431
+ enable_enhanced_civic_answers: Optional[bool] = Field(
8432
+ default=None,
8433
+ description="""Optional. Enables enhanced civic answers. It may not be available for all models.""",
8434
+ )
8415
8435
 
8416
8436
 
8417
8437
  class GenerationConfigDict(TypedDict, total=False):
@@ -8474,7 +8494,7 @@ class GenerationConfigDict(TypedDict, total=False):
8474
8494
  temperature: Optional[float]
8475
8495
  """Optional. Controls the randomness of predictions."""
8476
8496
 
8477
- thinking_config: Optional[GenerationConfigThinkingConfigDict]
8497
+ thinking_config: Optional[ThinkingConfigDict]
8478
8498
  """Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking."""
8479
8499
 
8480
8500
  top_k: Optional[float]
@@ -8483,6 +8503,9 @@ class GenerationConfigDict(TypedDict, total=False):
8483
8503
  top_p: Optional[float]
8484
8504
  """Optional. If specified, nucleus sampling will be used."""
8485
8505
 
8506
+ enable_enhanced_civic_answers: Optional[bool]
8507
+ """Optional. Enables enhanced civic answers. It may not be available for all models."""
8508
+
8486
8509
 
8487
8510
  GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict]
8488
8511
 
@@ -9335,14 +9358,22 @@ TunedModelCheckpointOrDict = Union[
9335
9358
 
9336
9359
 
9337
9360
  class TunedModel(_common.BaseModel):
9361
+ """TunedModel for the Tuned Model of a Tuning Job."""
9338
9362
 
9339
9363
  model: Optional[str] = Field(
9340
9364
  default=None,
9341
- description="""Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`""",
9365
+ description="""Output only. The resource name of the TunedModel.
9366
+ Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
9367
+ When tuning from a base model, the version_id will be 1.
9368
+ For continuous tuning, the version id will be incremented by 1 from the
9369
+ last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
9370
+ """,
9342
9371
  )
9343
9372
  endpoint: Optional[str] = Field(
9344
9373
  default=None,
9345
- description="""Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.""",
9374
+ description="""Output only. A resource name of an Endpoint.
9375
+ Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
9376
+ """,
9346
9377
  )
9347
9378
  checkpoints: Optional[list[TunedModelCheckpoint]] = Field(
9348
9379
  default=None,
@@ -9353,12 +9384,20 @@ class TunedModel(_common.BaseModel):
9353
9384
 
9354
9385
 
9355
9386
  class TunedModelDict(TypedDict, total=False):
9387
+ """TunedModel for the Tuned Model of a Tuning Job."""
9356
9388
 
9357
9389
  model: Optional[str]
9358
- """Output only. The resource name of the TunedModel. Format: `projects/{project}/locations/{location}/models/{model}@{version_id}` When tuning from a base model, the version_id will be 1. For continuous tuning, the version id will be incremented by 1 from the last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`"""
9390
+ """Output only. The resource name of the TunedModel.
9391
+ Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
9392
+ When tuning from a base model, the version_id will be 1.
9393
+ For continuous tuning, the version id will be incremented by 1 from the
9394
+ last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
9395
+ """
9359
9396
 
9360
9397
  endpoint: Optional[str]
9361
- """Output only. A resource name of an Endpoint. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`."""
9398
+ """Output only. A resource name of an Endpoint.
9399
+ Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
9400
+ """
9362
9401
 
9363
9402
  checkpoints: Optional[list[TunedModelCheckpointDict]]
9364
9403
  """The checkpoints associated with this TunedModel.
@@ -10829,22 +10868,24 @@ _CancelTuningJobParametersOrDict = Union[
10829
10868
 
10830
10869
 
10831
10870
  class TuningExample(_common.BaseModel):
10871
+ """A single example for tuning."""
10832
10872
 
10833
- text_input: Optional[str] = Field(
10834
- default=None, description="""Text model input."""
10835
- )
10836
10873
  output: Optional[str] = Field(
10837
- default=None, description="""The expected model output."""
10874
+ default=None, description="""Required. The expected model output."""
10875
+ )
10876
+ text_input: Optional[str] = Field(
10877
+ default=None, description="""Optional. Text model input."""
10838
10878
  )
10839
10879
 
10840
10880
 
10841
10881
  class TuningExampleDict(TypedDict, total=False):
10842
-
10843
- text_input: Optional[str]
10844
- """Text model input."""
10882
+ """A single example for tuning."""
10845
10883
 
10846
10884
  output: Optional[str]
10847
- """The expected model output."""
10885
+ """Required. The expected model output."""
10886
+
10887
+ text_input: Optional[str]
10888
+ """Optional. Text model input."""
10848
10889
 
10849
10890
 
10850
10891
  TuningExampleOrDict = Union[TuningExample, TuningExampleDict]
@@ -11656,10 +11697,11 @@ class ListFilesResponse(_common.BaseModel):
11656
11697
  default=None, description="""Used to retain the full HTTP response."""
11657
11698
  )
11658
11699
  next_page_token: Optional[str] = Field(
11659
- default=None, description="""A token to retrieve next page of results."""
11700
+ default=None,
11701
+ description="""A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""",
11660
11702
  )
11661
11703
  files: Optional[list[File]] = Field(
11662
- default=None, description="""The list of files."""
11704
+ default=None, description="""The list of `File`s."""
11663
11705
  )
11664
11706
 
11665
11707
 
@@ -11670,10 +11712,10 @@ class ListFilesResponseDict(TypedDict, total=False):
11670
11712
  """Used to retain the full HTTP response."""
11671
11713
 
11672
11714
  next_page_token: Optional[str]
11673
- """A token to retrieve next page of results."""
11715
+ """A token that can be sent as a `page_token` into a subsequent `ListFiles` call."""
11674
11716
 
11675
11717
  files: Optional[list[FileDict]]
11676
- """The list of files."""
11718
+ """The list of `File`s."""
11677
11719
 
11678
11720
 
11679
11721
  ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict]
@@ -12351,6 +12393,25 @@ class BatchJob(_common.BaseModel):
12351
12393
  return self.state.name in JOB_STATES_ENDED
12352
12394
 
12353
12395
 
12396
+ class GenerationConfigThinkingConfig(ThinkingConfig):
12397
+ """Config for thinking feature.
12398
+
12399
+ This class will be deprecated. Please use `ThinkingConfig` instead.
12400
+ """
12401
+
12402
+
12403
+ class GenerationConfigThinkingConfigDict(ThinkingConfigDict):
12404
+ """Config for thinking feature.
12405
+
12406
+ This class will be deprecated. Please use `ThinkingConfig` instead.
12407
+ """
12408
+
12409
+
12410
+ GenerationConfigThinkingConfigOrDict = Union[
12411
+ GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
12412
+ ]
12413
+
12414
+
12354
12415
  class BatchJobDict(TypedDict, total=False):
12355
12416
  """Config for batches.create return value."""
12356
12417
 
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.45.0' # x-release-please-version
16
+ __version__ = '1.46.0' # x-release-please-version
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.45.0
3
+ Version: 1.46.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
- License: Apache-2.0
6
+ License-Expression: Apache-2.0
7
7
  Project-URL: Homepage, https://github.com/googleapis/python-genai
8
8
  Classifier: Intended Audience :: Developers
9
- Classifier: License :: OSI Approved :: Apache Software License
10
9
  Classifier: Operating System :: OS Independent
11
10
  Classifier: Programming Language :: Python
12
11
  Classifier: Programming Language :: Python :: 3
@@ -312,7 +311,7 @@ See the 'Create a client' section above to initialize a client.
312
311
 
313
312
  ### Generate Content
314
313
 
315
- #### with text content
314
+ #### with text content input (text output)
316
315
 
317
316
  ```python
318
317
  response = client.models.generate_content(
@@ -321,6 +320,28 @@ response = client.models.generate_content(
321
320
  print(response.text)
322
321
  ```
323
322
 
323
+ #### with text content input (image output)
324
+
325
+ ```python
326
+ from google.genai import types
327
+
328
+ response = client.models.generate_content(
329
+ model='gemini-2.5-flash-image',
330
+ contents='A cartoon infographic for flying sneakers',
331
+ config=types.GenerateContentConfig(
332
+ response_modalities=["IMAGE"],
333
+ image_config=types.ImageConfig(
334
+ aspect_ratio="9:16",
335
+ ),
336
+ ),
337
+ )
338
+
339
+ for part in response.parts:
340
+ if part.inline_data:
341
+ generated_image = part.as_image()
342
+ generated_image.show()
343
+ ```
344
+
324
345
  #### with uploaded file (Gemini Developer API only)
325
346
  download the file in console.
326
347
 
@@ -2,6 +2,7 @@ LICENSE
2
2
  MANIFEST.in
3
3
  README.md
4
4
  pyproject.toml
5
+ setup.cfg
5
6
  google/genai/__init__.py
6
7
  google/genai/_adapters.py
7
8
  google/genai/_api_client.py
@@ -3,17 +3,16 @@ requires = ["setuptools", "wheel", "twine>=6.1.0", "packaging>=24.2", "pkginfo>=
3
3
 
4
4
  [project]
5
5
  name = "google-genai"
6
- version = "1.45.0"
6
+ version = "1.46.0"
7
7
  description = "GenAI Python SDK"
8
8
  readme = "README.md"
9
- license = {text = "Apache-2.0"}
9
+ license = "Apache-2.0"
10
10
  requires-python = ">=3.9"
11
11
  authors = [
12
12
  { name = "Google LLC", email = "googleapis-packages@google.com" },
13
13
  ]
14
14
  classifiers = [
15
15
  "Intended Audience :: Developers",
16
- "License :: OSI Approved :: Apache Software License",
17
16
  "Operating System :: OS Independent",
18
17
  "Programming Language :: Python",
19
18
  "Programming Language :: Python :: 3",
@@ -44,12 +43,16 @@ local-tokenizer = ["sentencepiece>=0.2.0", "protobuf"]
44
43
  [project.urls]
45
44
  Homepage = "https://github.com/googleapis/python-genai"
46
45
 
47
- [tool.setuptools]
48
- packages = [
49
- "google",
50
- "google.genai",
51
- ]
52
- include-package-data = true
46
+ # [tool.setuptools] settings are in setup.cfg
53
47
 
54
- [tools.setuptools.package_data]
48
+ [tool.setuptools.package-data]
55
49
  "google.genai" = ["py.typed"]
50
+
51
+ [tool.mypy]
52
+ exclude = ["tests/", "_test_api_client\\.py"]
53
+ plugins = ["pydantic.mypy"]
54
+ # we are ignoring 'unused-ignore' because we run mypy on Python 3.9 - 3.13 and
55
+ # some errors in _automatic_function_calling_util.py only apply in 3.10+
56
+ # 'import-not-found' and 'import-untyped' are environment specific
57
+ disable_error_code = ["import-not-found", "import-untyped", "unused-ignore"]
58
+ strict = true
@@ -0,0 +1,10 @@
1
+ [options]
2
+ packages =
3
+ google
4
+ google.genai
5
+ include_package_data = True
6
+
7
+ [egg_info]
8
+ tag_build =
9
+ tag_date = 0
10
+
@@ -1,4 +0,0 @@
1
- [egg_info]
2
- tag_build =
3
- tag_date = 0
4
-
File without changes
File without changes