google-genai 1.44.0__py3-none-any.whl → 1.46.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +10 -2
- google/genai/_common.py +1 -3
- google/genai/_live_converters.py +5 -0
- google/genai/_replay_api_client.py +8 -3
- google/genai/models.py +40 -3
- google/genai/tunings.py +0 -30
- google/genai/types.py +274 -98
- google/genai/version.py +1 -1
- {google_genai-1.44.0.dist-info → google_genai-1.46.0.dist-info}/METADATA +45 -4
- {google_genai-1.44.0.dist-info → google_genai-1.46.0.dist-info}/RECORD +13 -13
- {google_genai-1.44.0.dist-info → google_genai-1.46.0.dist-info}/WHEEL +0 -0
- {google_genai-1.44.0.dist-info → google_genai-1.46.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.44.0.dist-info → google_genai-1.46.0.dist-info}/top_level.txt +0 -0
google/genai/_api_client.py
CHANGED
|
@@ -692,8 +692,16 @@ class BaseApiClient:
|
|
|
692
692
|
client_args, async_client_args = self._ensure_httpx_ssl_ctx(
|
|
693
693
|
self._http_options
|
|
694
694
|
)
|
|
695
|
-
self.
|
|
696
|
-
|
|
695
|
+
self._async_httpx_client_args = async_client_args
|
|
696
|
+
|
|
697
|
+
if self._http_options.httpx_client:
|
|
698
|
+
self._httpx_client = self._http_options.httpx_client
|
|
699
|
+
else:
|
|
700
|
+
self._httpx_client = SyncHttpxClient(**client_args)
|
|
701
|
+
if self._http_options.httpx_async_client:
|
|
702
|
+
self._async_httpx_client = self._http_options.httpx_async_client
|
|
703
|
+
else:
|
|
704
|
+
self._async_httpx_client = AsyncHttpxClient(**async_client_args)
|
|
697
705
|
if self._use_aiohttp():
|
|
698
706
|
# Do it once at the genai.Client level. Share among all requests.
|
|
699
707
|
self._async_client_session_request_args = self._ensure_aiohttp_ssl_ctx(
|
google/genai/_common.py
CHANGED
|
@@ -272,9 +272,7 @@ def convert_to_dict(obj: object, convert_keys: bool = False) -> Any:
|
|
|
272
272
|
return convert_to_dict(obj.model_dump(exclude_none=True), convert_keys)
|
|
273
273
|
elif isinstance(obj, dict):
|
|
274
274
|
return {
|
|
275
|
-
maybe_snake_to_camel(key, convert_keys): convert_to_dict(
|
|
276
|
-
value, convert_keys
|
|
277
|
-
)
|
|
275
|
+
maybe_snake_to_camel(key, convert_keys): convert_to_dict(value)
|
|
278
276
|
for key, value in obj.items()
|
|
279
277
|
}
|
|
280
278
|
elif isinstance(obj, list):
|
google/genai/_live_converters.py
CHANGED
|
@@ -226,6 +226,11 @@ def _GenerationConfig_to_vertex(
|
|
|
226
226
|
if getv(from_object, ['top_p']) is not None:
|
|
227
227
|
setv(to_object, ['topP'], getv(from_object, ['top_p']))
|
|
228
228
|
|
|
229
|
+
if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
|
|
230
|
+
raise ValueError(
|
|
231
|
+
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
|
|
232
|
+
)
|
|
233
|
+
|
|
229
234
|
return to_object
|
|
230
235
|
|
|
231
236
|
|
|
@@ -56,8 +56,13 @@ def _normalize_json_case(obj: Any) -> Any:
|
|
|
56
56
|
return [_normalize_json_case(item) for item in obj]
|
|
57
57
|
elif isinstance(obj, enum.Enum):
|
|
58
58
|
return obj.value
|
|
59
|
-
|
|
60
|
-
|
|
59
|
+
elif isinstance(obj, str):
|
|
60
|
+
# Python >= 3.14 has a new division by zero error message.
|
|
61
|
+
if 'division by zero' in obj:
|
|
62
|
+
return obj.replace(
|
|
63
|
+
'division by zero', 'integer division or modulo by zero'
|
|
64
|
+
)
|
|
65
|
+
return obj
|
|
61
66
|
|
|
62
67
|
|
|
63
68
|
def _equals_ignore_key_case(obj1: Any, obj2: Any) -> bool:
|
|
@@ -88,7 +93,7 @@ def _equals_ignore_key_case(obj1: Any, obj2: Any) -> bool:
|
|
|
88
93
|
|
|
89
94
|
def _redact_version_numbers(version_string: str) -> str:
|
|
90
95
|
"""Redacts version numbers in the form x.y.z from a string."""
|
|
91
|
-
return re.sub(r'\d+\.\d+\.\d+', '{VERSION_NUMBER}', version_string)
|
|
96
|
+
return re.sub(r'\d+\.\d+\.\d+[a-zA-Z0-9]*', '{VERSION_NUMBER}', version_string)
|
|
92
97
|
|
|
93
98
|
|
|
94
99
|
def _redact_language_label(language_label: str) -> str:
|
google/genai/models.py
CHANGED
|
@@ -2398,6 +2398,11 @@ def _GenerationConfig_to_vertex(
|
|
|
2398
2398
|
if getv(from_object, ['top_p']) is not None:
|
|
2399
2399
|
setv(to_object, ['topP'], getv(from_object, ['top_p']))
|
|
2400
2400
|
|
|
2401
|
+
if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
|
|
2402
|
+
raise ValueError(
|
|
2403
|
+
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
|
|
2404
|
+
)
|
|
2405
|
+
|
|
2401
2406
|
return to_object
|
|
2402
2407
|
|
|
2403
2408
|
|
|
@@ -6856,7 +6861,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
|
6856
6861
|
# * Everlasting Florals
|
|
6857
6862
|
# * Timeless Petals
|
|
6858
6863
|
|
|
6859
|
-
async for chunk in
|
|
6864
|
+
async for chunk in await client.aio.models.generate_content_stream(
|
|
6860
6865
|
model='gemini-2.0-flash',
|
|
6861
6866
|
contents=[
|
|
6862
6867
|
types.Part.from_text('What is shown in this image?'),
|
|
@@ -6898,9 +6903,11 @@ class AsyncModels(_api_module.BaseModule):
|
|
|
6898
6903
|
response = await self._generate_content_stream(
|
|
6899
6904
|
model=model, contents=contents, config=config
|
|
6900
6905
|
)
|
|
6901
|
-
|
|
6906
|
+
# TODO: b/453739108 - make AFC logic more robust like the other 3 methods.
|
|
6907
|
+
if i > 1:
|
|
6908
|
+
logger.info(f'AFC remote call {i} is done.')
|
|
6902
6909
|
remaining_remote_calls_afc -= 1
|
|
6903
|
-
if remaining_remote_calls_afc == 0:
|
|
6910
|
+
if i > 1 and remaining_remote_calls_afc == 0:
|
|
6904
6911
|
logger.info(
|
|
6905
6912
|
'Reached max remote calls for automatic function calling.'
|
|
6906
6913
|
)
|
|
@@ -7270,6 +7277,36 @@ class AsyncModels(_api_module.BaseModule):
|
|
|
7270
7277
|
'Source and prompt/image/video are mutually exclusive.'
|
|
7271
7278
|
+ ' Please only use source.'
|
|
7272
7279
|
)
|
|
7280
|
+
# Gemini Developer API does not support video bytes.
|
|
7281
|
+
video_dct: dict[str, Any] = {}
|
|
7282
|
+
if not self._api_client.vertexai and video:
|
|
7283
|
+
if isinstance(video, types.Video):
|
|
7284
|
+
video_dct = video.model_dump()
|
|
7285
|
+
else:
|
|
7286
|
+
video_dct = dict(video)
|
|
7287
|
+
|
|
7288
|
+
if video_dct.get('uri') and video_dct.get('video_bytes'):
|
|
7289
|
+
video = types.Video(
|
|
7290
|
+
uri=video_dct.get('uri'), mime_type=video_dct.get('mime_type')
|
|
7291
|
+
)
|
|
7292
|
+
elif not self._api_client.vertexai and source:
|
|
7293
|
+
if isinstance(source, types.GenerateVideosSource):
|
|
7294
|
+
source_dct = source.model_dump()
|
|
7295
|
+
video_dct = source_dct.get('video', {})
|
|
7296
|
+
else:
|
|
7297
|
+
source_dct = dict(source)
|
|
7298
|
+
if isinstance(source_dct.get('video'), types.Video):
|
|
7299
|
+
video_obj: types.Video = source_dct.get('video', types.Video())
|
|
7300
|
+
video_dct = video_obj.model_dump()
|
|
7301
|
+
if video_dct and video_dct.get('uri') and video_dct.get('video_bytes'):
|
|
7302
|
+
source = types.GenerateVideosSource(
|
|
7303
|
+
prompt=source_dct.get('prompt'),
|
|
7304
|
+
image=source_dct.get('image'),
|
|
7305
|
+
video=types.Video(
|
|
7306
|
+
uri=video_dct.get('uri'),
|
|
7307
|
+
mime_type=video_dct.get('mime_type'),
|
|
7308
|
+
),
|
|
7309
|
+
)
|
|
7273
7310
|
return await self._generate_videos(
|
|
7274
7311
|
model=model,
|
|
7275
7312
|
prompt=prompt,
|
google/genai/tunings.py
CHANGED
|
@@ -551,36 +551,6 @@ def _TuningJob_from_mldev(
|
|
|
551
551
|
_TunedModel_from_mldev(getv(from_object, ['_self']), to_object),
|
|
552
552
|
)
|
|
553
553
|
|
|
554
|
-
if getv(from_object, ['customBaseModel']) is not None:
|
|
555
|
-
setv(
|
|
556
|
-
to_object, ['custom_base_model'], getv(from_object, ['customBaseModel'])
|
|
557
|
-
)
|
|
558
|
-
|
|
559
|
-
if getv(from_object, ['experiment']) is not None:
|
|
560
|
-
setv(to_object, ['experiment'], getv(from_object, ['experiment']))
|
|
561
|
-
|
|
562
|
-
if getv(from_object, ['labels']) is not None:
|
|
563
|
-
setv(to_object, ['labels'], getv(from_object, ['labels']))
|
|
564
|
-
|
|
565
|
-
if getv(from_object, ['outputUri']) is not None:
|
|
566
|
-
setv(to_object, ['output_uri'], getv(from_object, ['outputUri']))
|
|
567
|
-
|
|
568
|
-
if getv(from_object, ['pipelineJob']) is not None:
|
|
569
|
-
setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))
|
|
570
|
-
|
|
571
|
-
if getv(from_object, ['serviceAccount']) is not None:
|
|
572
|
-
setv(to_object, ['service_account'], getv(from_object, ['serviceAccount']))
|
|
573
|
-
|
|
574
|
-
if getv(from_object, ['tunedModelDisplayName']) is not None:
|
|
575
|
-
setv(
|
|
576
|
-
to_object,
|
|
577
|
-
['tuned_model_display_name'],
|
|
578
|
-
getv(from_object, ['tunedModelDisplayName']),
|
|
579
|
-
)
|
|
580
|
-
|
|
581
|
-
if getv(from_object, ['veoTuningSpec']) is not None:
|
|
582
|
-
setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec']))
|
|
583
|
-
|
|
584
554
|
return to_object
|
|
585
555
|
|
|
586
556
|
|
google/genai/types.py
CHANGED
|
@@ -89,6 +89,27 @@ else:
|
|
|
89
89
|
except ImportError:
|
|
90
90
|
yaml = None
|
|
91
91
|
|
|
92
|
+
_is_httpx_imported = False
|
|
93
|
+
if typing.TYPE_CHECKING:
|
|
94
|
+
import httpx
|
|
95
|
+
|
|
96
|
+
HttpxClient = httpx.Client
|
|
97
|
+
HttpxAsyncClient = httpx.AsyncClient
|
|
98
|
+
_is_httpx_imported = True
|
|
99
|
+
else:
|
|
100
|
+
HttpxClient: typing.Type = Any
|
|
101
|
+
HttpxAsyncClient: typing.Type = Any
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
import httpx
|
|
105
|
+
|
|
106
|
+
HttpxClient = httpx.Client
|
|
107
|
+
HttpxAsyncClient = httpx.AsyncClient
|
|
108
|
+
_is_httpx_imported = True
|
|
109
|
+
except ImportError:
|
|
110
|
+
HttpxClient = None
|
|
111
|
+
HttpxAsyncClient = None
|
|
112
|
+
|
|
92
113
|
logger = logging.getLogger('google_genai.types')
|
|
93
114
|
|
|
94
115
|
T = typing.TypeVar('T', bound='GenerateContentResponse')
|
|
@@ -118,6 +139,19 @@ class Language(_common.CaseInSensitiveEnum):
|
|
|
118
139
|
"""Python >= 3.10, with numpy and simpy available."""
|
|
119
140
|
|
|
120
141
|
|
|
142
|
+
class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
|
|
143
|
+
"""Specifies how the response should be scheduled in the conversation."""
|
|
144
|
+
|
|
145
|
+
SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
|
|
146
|
+
"""This value is unused."""
|
|
147
|
+
SILENT = 'SILENT'
|
|
148
|
+
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
|
|
149
|
+
WHEN_IDLE = 'WHEN_IDLE'
|
|
150
|
+
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
|
|
151
|
+
INTERRUPT = 'INTERRUPT'
|
|
152
|
+
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
|
|
153
|
+
|
|
154
|
+
|
|
121
155
|
class Type(_common.CaseInSensitiveEnum):
|
|
122
156
|
"""Optional. The type of the data."""
|
|
123
157
|
|
|
@@ -144,14 +178,14 @@ class HarmCategory(_common.CaseInSensitiveEnum):
|
|
|
144
178
|
|
|
145
179
|
HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED'
|
|
146
180
|
"""The harm category is unspecified."""
|
|
147
|
-
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
|
|
148
|
-
"""The harm category is hate speech."""
|
|
149
|
-
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
|
|
150
|
-
"""The harm category is dangerous content."""
|
|
151
181
|
HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT'
|
|
152
182
|
"""The harm category is harassment."""
|
|
183
|
+
HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH'
|
|
184
|
+
"""The harm category is hate speech."""
|
|
153
185
|
HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT'
|
|
154
186
|
"""The harm category is sexually explicit content."""
|
|
187
|
+
HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT'
|
|
188
|
+
"""The harm category is dangerous content."""
|
|
155
189
|
HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY'
|
|
156
190
|
"""Deprecated: Election filter is not longer supported. The harm category is civic integrity."""
|
|
157
191
|
HARM_CATEGORY_IMAGE_HATE = 'HARM_CATEGORY_IMAGE_HATE'
|
|
@@ -166,6 +200,8 @@ class HarmCategory(_common.CaseInSensitiveEnum):
|
|
|
166
200
|
'HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT'
|
|
167
201
|
)
|
|
168
202
|
"""The harm category is image sexually explicit content."""
|
|
203
|
+
HARM_CATEGORY_JAILBREAK = 'HARM_CATEGORY_JAILBREAK'
|
|
204
|
+
"""The harm category is for jailbreak prompts."""
|
|
169
205
|
|
|
170
206
|
|
|
171
207
|
class HarmBlockMethod(_common.CaseInSensitiveEnum):
|
|
@@ -322,20 +358,24 @@ class HarmSeverity(_common.CaseInSensitiveEnum):
|
|
|
322
358
|
|
|
323
359
|
|
|
324
360
|
class BlockedReason(_common.CaseInSensitiveEnum):
|
|
325
|
-
"""Output only.
|
|
361
|
+
"""Output only. The reason why the prompt was blocked."""
|
|
326
362
|
|
|
327
363
|
BLOCKED_REASON_UNSPECIFIED = 'BLOCKED_REASON_UNSPECIFIED'
|
|
328
|
-
"""
|
|
364
|
+
"""The blocked reason is unspecified."""
|
|
329
365
|
SAFETY = 'SAFETY'
|
|
330
|
-
"""
|
|
366
|
+
"""The prompt was blocked for safety reasons."""
|
|
331
367
|
OTHER = 'OTHER'
|
|
332
|
-
"""
|
|
368
|
+
"""The prompt was blocked for other reasons. For example, it may be due to the prompt's language, or because it contains other harmful content."""
|
|
333
369
|
BLOCKLIST = 'BLOCKLIST'
|
|
334
|
-
"""
|
|
370
|
+
"""The prompt was blocked because it contains a term from the terminology blocklist."""
|
|
335
371
|
PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
|
|
336
|
-
"""
|
|
372
|
+
"""The prompt was blocked because it contains prohibited content."""
|
|
337
373
|
IMAGE_SAFETY = 'IMAGE_SAFETY'
|
|
338
|
-
"""
|
|
374
|
+
"""The prompt was blocked because it contains content that is unsafe for image generation."""
|
|
375
|
+
MODEL_ARMOR = 'MODEL_ARMOR'
|
|
376
|
+
"""The prompt was blocked by Model Armor."""
|
|
377
|
+
JAILBREAK = 'JAILBREAK'
|
|
378
|
+
"""The prompt was blocked as a jailbreak attempt."""
|
|
339
379
|
|
|
340
380
|
|
|
341
381
|
class TrafficType(_common.CaseInSensitiveEnum):
|
|
@@ -702,19 +742,6 @@ class MediaModality(_common.CaseInSensitiveEnum):
|
|
|
702
742
|
"""Document, e.g. PDF."""
|
|
703
743
|
|
|
704
744
|
|
|
705
|
-
class FunctionResponseScheduling(_common.CaseInSensitiveEnum):
|
|
706
|
-
"""Specifies how the response should be scheduled in the conversation."""
|
|
707
|
-
|
|
708
|
-
SCHEDULING_UNSPECIFIED = 'SCHEDULING_UNSPECIFIED'
|
|
709
|
-
"""This value is unused."""
|
|
710
|
-
SILENT = 'SILENT'
|
|
711
|
-
"""Only add the result to the conversation context, do not interrupt or trigger generation."""
|
|
712
|
-
WHEN_IDLE = 'WHEN_IDLE'
|
|
713
|
-
"""Add the result to the conversation context, and prompt to generate output without interrupting ongoing generation."""
|
|
714
|
-
INTERRUPT = 'INTERRUPT'
|
|
715
|
-
"""Add the result to the conversation context, interrupt ongoing generation and prompt to generate output."""
|
|
716
|
-
|
|
717
|
-
|
|
718
745
|
class StartSensitivity(_common.CaseInSensitiveEnum):
|
|
719
746
|
"""Start of speech sensitivity."""
|
|
720
747
|
|
|
@@ -1530,6 +1557,15 @@ class HttpOptions(_common.BaseModel):
|
|
|
1530
1557
|
default=None, description="""HTTP retry options for the request."""
|
|
1531
1558
|
)
|
|
1532
1559
|
|
|
1560
|
+
httpx_client: Optional['HttpxClient'] = Field(
|
|
1561
|
+
default=None,
|
|
1562
|
+
description="""A custom httpx client to be used for the request.""",
|
|
1563
|
+
)
|
|
1564
|
+
httpx_async_client: Optional['HttpxAsyncClient'] = Field(
|
|
1565
|
+
default=None,
|
|
1566
|
+
description="""A custom httpx async client to be used for the request.""",
|
|
1567
|
+
)
|
|
1568
|
+
|
|
1533
1569
|
|
|
1534
1570
|
class HttpOptionsDict(TypedDict, total=False):
|
|
1535
1571
|
"""HTTP options to be used in each of the requests."""
|
|
@@ -1696,6 +1732,10 @@ class JSONSchema(_common.BaseModel):
|
|
|
1696
1732
|
' matches the instance successfully.'
|
|
1697
1733
|
),
|
|
1698
1734
|
)
|
|
1735
|
+
additional_properties: Optional[Any] = Field(
|
|
1736
|
+
default=None,
|
|
1737
|
+
description="""Can either be a boolean or an object; controls the presence of additional properties.""",
|
|
1738
|
+
)
|
|
1699
1739
|
any_of: Optional[list['JSONSchema']] = Field(
|
|
1700
1740
|
default=None,
|
|
1701
1741
|
description=(
|
|
@@ -1704,6 +1744,20 @@ class JSONSchema(_common.BaseModel):
|
|
|
1704
1744
|
' keyword’s value.'
|
|
1705
1745
|
),
|
|
1706
1746
|
)
|
|
1747
|
+
unique_items: Optional[bool] = Field(
|
|
1748
|
+
default=None,
|
|
1749
|
+
description="""Boolean value that indicates whether the items in an array are unique.""",
|
|
1750
|
+
)
|
|
1751
|
+
ref: Optional[str] = Field(
|
|
1752
|
+
default=None,
|
|
1753
|
+
alias='$ref',
|
|
1754
|
+
description="""Allows indirect references between schema nodes.""",
|
|
1755
|
+
)
|
|
1756
|
+
defs: Optional[dict[str, 'JSONSchema']] = Field(
|
|
1757
|
+
default=None,
|
|
1758
|
+
alias='$defs',
|
|
1759
|
+
description="""Schema definitions to be used with $ref.""",
|
|
1760
|
+
)
|
|
1707
1761
|
|
|
1708
1762
|
|
|
1709
1763
|
class Schema(_common.BaseModel):
|
|
@@ -1915,7 +1969,7 @@ class Schema(_common.BaseModel):
|
|
|
1915
1969
|
list_schema_field_names: tuple[str, ...] = (
|
|
1916
1970
|
'any_of', # 'one_of', 'all_of', 'not' to come
|
|
1917
1971
|
)
|
|
1918
|
-
dict_schema_field_names: tuple[str, ...] = ('properties',)
|
|
1972
|
+
dict_schema_field_names: tuple[str, ...] = ('properties',)
|
|
1919
1973
|
|
|
1920
1974
|
related_field_names_by_type: dict[str, tuple[str, ...]] = {
|
|
1921
1975
|
JSONSchemaType.NUMBER.value: (
|
|
@@ -1964,6 +2018,23 @@ class Schema(_common.BaseModel):
|
|
|
1964
2018
|
# placeholder for potential gemini api unsupported fields
|
|
1965
2019
|
gemini_api_unsupported_field_names: tuple[str, ...] = ()
|
|
1966
2020
|
|
|
2021
|
+
def _resolve_ref(
|
|
2022
|
+
ref_path: str, root_schema_dict: dict[str, Any]
|
|
2023
|
+
) -> dict[str, Any]:
|
|
2024
|
+
"""Helper to resolve a $ref path."""
|
|
2025
|
+
current = root_schema_dict
|
|
2026
|
+
for part in ref_path.lstrip('#/').split('/'):
|
|
2027
|
+
if part == '$defs':
|
|
2028
|
+
part = 'defs'
|
|
2029
|
+
current = current[part]
|
|
2030
|
+
current.pop('title', None)
|
|
2031
|
+
if 'properties' in current and current['properties'] is not None:
|
|
2032
|
+
for prop_schema in current['properties'].values():
|
|
2033
|
+
if isinstance(prop_schema, dict):
|
|
2034
|
+
prop_schema.pop('title', None)
|
|
2035
|
+
|
|
2036
|
+
return current
|
|
2037
|
+
|
|
1967
2038
|
def normalize_json_schema_type(
|
|
1968
2039
|
json_schema_type: Optional[
|
|
1969
2040
|
Union[JSONSchemaType, Sequence[JSONSchemaType], str, Sequence[str]]
|
|
@@ -1972,11 +2043,16 @@ class Schema(_common.BaseModel):
|
|
|
1972
2043
|
"""Returns (non_null_types, nullable)"""
|
|
1973
2044
|
if json_schema_type is None:
|
|
1974
2045
|
return [], False
|
|
1975
|
-
|
|
1976
|
-
|
|
2046
|
+
type_sequence: Sequence[Union[JSONSchemaType, str]]
|
|
2047
|
+
if isinstance(json_schema_type, str) or not isinstance(
|
|
2048
|
+
json_schema_type, Sequence
|
|
2049
|
+
):
|
|
2050
|
+
type_sequence = [json_schema_type]
|
|
2051
|
+
else:
|
|
2052
|
+
type_sequence = json_schema_type
|
|
1977
2053
|
non_null_types = []
|
|
1978
2054
|
nullable = False
|
|
1979
|
-
for type_value in
|
|
2055
|
+
for type_value in type_sequence:
|
|
1980
2056
|
if isinstance(type_value, JSONSchemaType):
|
|
1981
2057
|
type_value = type_value.value
|
|
1982
2058
|
if type_value == JSONSchemaType.NULL.value:
|
|
@@ -1996,7 +2072,10 @@ class Schema(_common.BaseModel):
|
|
|
1996
2072
|
for field_name, field_value in json_schema_dict.items():
|
|
1997
2073
|
if field_value is None:
|
|
1998
2074
|
continue
|
|
1999
|
-
if field_name not in google_schema_field_names
|
|
2075
|
+
if field_name not in google_schema_field_names and field_name not in [
|
|
2076
|
+
'ref',
|
|
2077
|
+
'defs',
|
|
2078
|
+
]:
|
|
2000
2079
|
raise ValueError(
|
|
2001
2080
|
f'JSONSchema field "{field_name}" is not supported by the '
|
|
2002
2081
|
'Schema object. And the "raise_error_on_unsupported_field" '
|
|
@@ -2026,12 +2105,19 @@ class Schema(_common.BaseModel):
|
|
|
2026
2105
|
)
|
|
2027
2106
|
|
|
2028
2107
|
def convert_json_schema(
|
|
2029
|
-
|
|
2108
|
+
current_json_schema: JSONSchema,
|
|
2109
|
+
root_json_schema_dict: dict[str, Any],
|
|
2030
2110
|
api_option: Literal['VERTEX_AI', 'GEMINI_API'],
|
|
2031
2111
|
raise_error_on_unsupported_field: bool,
|
|
2032
2112
|
) -> 'Schema':
|
|
2033
2113
|
schema = Schema()
|
|
2034
|
-
json_schema_dict =
|
|
2114
|
+
json_schema_dict = current_json_schema.model_dump()
|
|
2115
|
+
|
|
2116
|
+
if json_schema_dict.get('ref'):
|
|
2117
|
+
json_schema_dict = _resolve_ref(
|
|
2118
|
+
json_schema_dict['ref'], root_json_schema_dict
|
|
2119
|
+
)
|
|
2120
|
+
|
|
2035
2121
|
raise_error_if_cannot_convert(
|
|
2036
2122
|
json_schema_dict=json_schema_dict,
|
|
2037
2123
|
api_option=api_option,
|
|
@@ -2057,6 +2143,7 @@ class Schema(_common.BaseModel):
|
|
|
2057
2143
|
non_null_types, nullable = normalize_json_schema_type(
|
|
2058
2144
|
json_schema_dict.get('type', None)
|
|
2059
2145
|
)
|
|
2146
|
+
is_union_like_type = len(non_null_types) > 1
|
|
2060
2147
|
if len(non_null_types) > 1:
|
|
2061
2148
|
logger.warning(
|
|
2062
2149
|
'JSONSchema type is union-like, e.g. ["null", "string", "array"]. '
|
|
@@ -2086,11 +2173,14 @@ class Schema(_common.BaseModel):
|
|
|
2086
2173
|
# Pass 2: the JSONSchema.type is not union-like,
|
|
2087
2174
|
# e.g. 'string', ['string'], ['null', 'string'].
|
|
2088
2175
|
for field_name, field_value in json_schema_dict.items():
|
|
2089
|
-
if field_value is None:
|
|
2176
|
+
if field_value is None or field_name == 'defs':
|
|
2090
2177
|
continue
|
|
2091
2178
|
if field_name in schema_field_names:
|
|
2179
|
+
if field_name == 'items' and not field_value:
|
|
2180
|
+
continue
|
|
2092
2181
|
schema_field_value: 'Schema' = convert_json_schema(
|
|
2093
|
-
|
|
2182
|
+
current_json_schema=JSONSchema(**field_value),
|
|
2183
|
+
root_json_schema_dict=root_json_schema_dict,
|
|
2094
2184
|
api_option=api_option,
|
|
2095
2185
|
raise_error_on_unsupported_field=raise_error_on_unsupported_field,
|
|
2096
2186
|
)
|
|
@@ -2098,17 +2188,21 @@ class Schema(_common.BaseModel):
|
|
|
2098
2188
|
elif field_name in list_schema_field_names:
|
|
2099
2189
|
list_schema_field_value: list['Schema'] = [
|
|
2100
2190
|
convert_json_schema(
|
|
2101
|
-
|
|
2191
|
+
current_json_schema=JSONSchema(**this_field_value),
|
|
2192
|
+
root_json_schema_dict=root_json_schema_dict,
|
|
2102
2193
|
api_option=api_option,
|
|
2103
2194
|
raise_error_on_unsupported_field=raise_error_on_unsupported_field,
|
|
2104
2195
|
)
|
|
2105
2196
|
for this_field_value in field_value
|
|
2106
2197
|
]
|
|
2107
2198
|
setattr(schema, field_name, list_schema_field_value)
|
|
2199
|
+
if not schema.type and not is_union_like_type:
|
|
2200
|
+
schema.type = Type('OBJECT')
|
|
2108
2201
|
elif field_name in dict_schema_field_names:
|
|
2109
2202
|
dict_schema_field_value: dict[str, 'Schema'] = {
|
|
2110
2203
|
key: convert_json_schema(
|
|
2111
|
-
|
|
2204
|
+
current_json_schema=JSONSchema(**value),
|
|
2205
|
+
root_json_schema_dict=root_json_schema_dict,
|
|
2112
2206
|
api_option=api_option,
|
|
2113
2207
|
raise_error_on_unsupported_field=raise_error_on_unsupported_field,
|
|
2114
2208
|
)
|
|
@@ -2116,20 +2210,52 @@ class Schema(_common.BaseModel):
|
|
|
2116
2210
|
}
|
|
2117
2211
|
setattr(schema, field_name, dict_schema_field_value)
|
|
2118
2212
|
elif field_name == 'type':
|
|
2119
|
-
# non_null_types can only be empty or have one element.
|
|
2120
|
-
# because already handled union-like case above.
|
|
2121
2213
|
non_null_types, nullable = normalize_json_schema_type(field_value)
|
|
2122
2214
|
if nullable:
|
|
2123
2215
|
schema.nullable = True
|
|
2124
2216
|
if non_null_types:
|
|
2125
2217
|
schema.type = Type(non_null_types[0])
|
|
2126
2218
|
else:
|
|
2127
|
-
|
|
2219
|
+
if (
|
|
2220
|
+
hasattr(schema, field_name)
|
|
2221
|
+
and field_name != 'additional_properties'
|
|
2222
|
+
):
|
|
2223
|
+
setattr(schema, field_name, field_value)
|
|
2224
|
+
|
|
2225
|
+
if (
|
|
2226
|
+
schema.type == 'ARRAY'
|
|
2227
|
+
and schema.items
|
|
2228
|
+
and not schema.items.model_dump(exclude_unset=True)
|
|
2229
|
+
):
|
|
2230
|
+
schema.items = None
|
|
2231
|
+
|
|
2232
|
+
if schema.any_of and len(schema.any_of) == 2:
|
|
2233
|
+
nullable_part = None
|
|
2234
|
+
type_part = None
|
|
2235
|
+
for part in schema.any_of:
|
|
2236
|
+
# A schema representing `None` will either be of type NULL or just be nullable.
|
|
2237
|
+
part_dict = part.model_dump(exclude_unset=True)
|
|
2238
|
+
if part_dict == {'nullable': True} or part_dict == {'type': 'NULL'}:
|
|
2239
|
+
nullable_part = part
|
|
2240
|
+
else:
|
|
2241
|
+
type_part = part
|
|
2242
|
+
|
|
2243
|
+
# If we found both parts, unwrap them into a single schema.
|
|
2244
|
+
if nullable_part and type_part:
|
|
2245
|
+
default_value = schema.default
|
|
2246
|
+
schema = type_part
|
|
2247
|
+
schema.nullable = True
|
|
2248
|
+
# Carry the default value over to the unwrapped schema
|
|
2249
|
+
if default_value is not None:
|
|
2250
|
+
schema.default = default_value
|
|
2128
2251
|
|
|
2129
2252
|
return schema
|
|
2130
2253
|
|
|
2254
|
+
# This is the initial call to the recursive function.
|
|
2255
|
+
root_schema_dict = json_schema.model_dump()
|
|
2131
2256
|
return convert_json_schema(
|
|
2132
|
-
|
|
2257
|
+
current_json_schema=json_schema,
|
|
2258
|
+
root_json_schema_dict=root_schema_dict,
|
|
2133
2259
|
api_option=api_option,
|
|
2134
2260
|
raise_error_on_unsupported_field=raise_error_on_unsupported_field,
|
|
2135
2261
|
)
|
|
@@ -2371,7 +2497,32 @@ class FunctionDeclaration(_common.BaseModel):
|
|
|
2371
2497
|
json_schema_dict = _automatic_function_calling_util._add_unevaluated_items_to_fixed_len_tuple_schema(
|
|
2372
2498
|
json_schema_dict
|
|
2373
2499
|
)
|
|
2374
|
-
|
|
2500
|
+
if 'prefixItems' in json_schema_dict:
|
|
2501
|
+
parameters_json_schema[name] = json_schema_dict
|
|
2502
|
+
continue
|
|
2503
|
+
|
|
2504
|
+
union_args = typing.get_args(param.annotation)
|
|
2505
|
+
has_primitive = any(
|
|
2506
|
+
_automatic_function_calling_util._is_builtin_primitive_or_compound(
|
|
2507
|
+
arg
|
|
2508
|
+
)
|
|
2509
|
+
for arg in union_args
|
|
2510
|
+
)
|
|
2511
|
+
if (
|
|
2512
|
+
'$ref' in json_schema_dict or '$defs' in json_schema_dict
|
|
2513
|
+
) and has_primitive:
|
|
2514
|
+
# This is a complex schema with a primitive (e.g., str | MyModel)
|
|
2515
|
+
# that is better represented by raw JSON schema.
|
|
2516
|
+
parameters_json_schema[name] = json_schema_dict
|
|
2517
|
+
continue
|
|
2518
|
+
|
|
2519
|
+
schema = Schema.from_json_schema(
|
|
2520
|
+
json_schema=JSONSchema(**json_schema_dict),
|
|
2521
|
+
api_option=api_option,
|
|
2522
|
+
)
|
|
2523
|
+
if param.default is not inspect.Parameter.empty:
|
|
2524
|
+
schema.default = param.default
|
|
2525
|
+
parameters_properties[name] = schema
|
|
2375
2526
|
except Exception as e:
|
|
2376
2527
|
_automatic_function_calling_util._raise_for_unsupported_param(
|
|
2377
2528
|
param, callable.__name__, e
|
|
@@ -2557,8 +2708,7 @@ class GoogleSearch(_common.BaseModel):
|
|
|
2557
2708
|
)
|
|
2558
2709
|
exclude_domains: Optional[list[str]] = Field(
|
|
2559
2710
|
default=None,
|
|
2560
|
-
description="""Optional. List of domains to be excluded from the search results.
|
|
2561
|
-
The default limit is 2000 domains.""",
|
|
2711
|
+
description="""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"].""",
|
|
2562
2712
|
)
|
|
2563
2713
|
|
|
2564
2714
|
|
|
@@ -2571,8 +2721,7 @@ class GoogleSearchDict(TypedDict, total=False):
|
|
|
2571
2721
|
"""
|
|
2572
2722
|
|
|
2573
2723
|
exclude_domains: Optional[list[str]]
|
|
2574
|
-
"""Optional. List of domains to be excluded from the search results.
|
|
2575
|
-
The default limit is 2000 domains."""
|
|
2724
|
+
"""Optional. List of domains to be excluded from the search results. The default limit is 2000 domains. Example: ["amazon.com", "facebook.com"]."""
|
|
2576
2725
|
|
|
2577
2726
|
|
|
2578
2727
|
GoogleSearchOrDict = Union[GoogleSearch, GoogleSearchDict]
|
|
@@ -5591,31 +5740,41 @@ CandidateOrDict = Union[Candidate, CandidateDict]
|
|
|
5591
5740
|
|
|
5592
5741
|
|
|
5593
5742
|
class GenerateContentResponsePromptFeedback(_common.BaseModel):
|
|
5594
|
-
"""Content filter results for a prompt sent in the request.
|
|
5743
|
+
"""Content filter results for a prompt sent in the request.
|
|
5744
|
+
|
|
5745
|
+
Note: This is sent only in the first stream chunk and only if no candidates
|
|
5746
|
+
were generated due to content violations.
|
|
5747
|
+
"""
|
|
5595
5748
|
|
|
5596
5749
|
block_reason: Optional[BlockedReason] = Field(
|
|
5597
|
-
default=None,
|
|
5750
|
+
default=None,
|
|
5751
|
+
description="""Output only. The reason why the prompt was blocked.""",
|
|
5598
5752
|
)
|
|
5599
5753
|
block_reason_message: Optional[str] = Field(
|
|
5600
5754
|
default=None,
|
|
5601
|
-
description="""Output only. A readable
|
|
5755
|
+
description="""Output only. A readable message that explains the reason why the prompt was blocked.""",
|
|
5602
5756
|
)
|
|
5603
5757
|
safety_ratings: Optional[list[SafetyRating]] = Field(
|
|
5604
|
-
default=None,
|
|
5758
|
+
default=None,
|
|
5759
|
+
description="""Output only. A list of safety ratings for the prompt. There is one rating per category.""",
|
|
5605
5760
|
)
|
|
5606
5761
|
|
|
5607
5762
|
|
|
5608
5763
|
class GenerateContentResponsePromptFeedbackDict(TypedDict, total=False):
|
|
5609
|
-
"""Content filter results for a prompt sent in the request.
|
|
5764
|
+
"""Content filter results for a prompt sent in the request.
|
|
5765
|
+
|
|
5766
|
+
Note: This is sent only in the first stream chunk and only if no candidates
|
|
5767
|
+
were generated due to content violations.
|
|
5768
|
+
"""
|
|
5610
5769
|
|
|
5611
5770
|
block_reason: Optional[BlockedReason]
|
|
5612
|
-
"""Output only.
|
|
5771
|
+
"""Output only. The reason why the prompt was blocked."""
|
|
5613
5772
|
|
|
5614
5773
|
block_reason_message: Optional[str]
|
|
5615
|
-
"""Output only. A readable
|
|
5774
|
+
"""Output only. A readable message that explains the reason why the prompt was blocked."""
|
|
5616
5775
|
|
|
5617
5776
|
safety_ratings: Optional[list[SafetyRatingDict]]
|
|
5618
|
-
"""Output only.
|
|
5777
|
+
"""Output only. A list of safety ratings for the prompt. There is one rating per category."""
|
|
5619
5778
|
|
|
5620
5779
|
|
|
5621
5780
|
GenerateContentResponsePromptFeedbackOrDict = Union[
|
|
@@ -8189,34 +8348,6 @@ class DeleteModelResponseDict(TypedDict, total=False):
|
|
|
8189
8348
|
DeleteModelResponseOrDict = Union[DeleteModelResponse, DeleteModelResponseDict]
|
|
8190
8349
|
|
|
8191
8350
|
|
|
8192
|
-
class GenerationConfigThinkingConfig(_common.BaseModel):
|
|
8193
|
-
"""Config for thinking features."""
|
|
8194
|
-
|
|
8195
|
-
include_thoughts: Optional[bool] = Field(
|
|
8196
|
-
default=None,
|
|
8197
|
-
description="""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available.""",
|
|
8198
|
-
)
|
|
8199
|
-
thinking_budget: Optional[int] = Field(
|
|
8200
|
-
default=None,
|
|
8201
|
-
description="""Optional. Indicates the thinking budget in tokens.""",
|
|
8202
|
-
)
|
|
8203
|
-
|
|
8204
|
-
|
|
8205
|
-
class GenerationConfigThinkingConfigDict(TypedDict, total=False):
|
|
8206
|
-
"""Config for thinking features."""
|
|
8207
|
-
|
|
8208
|
-
include_thoughts: Optional[bool]
|
|
8209
|
-
"""Optional. Indicates whether to include thoughts in the response. If true, thoughts are returned only when available."""
|
|
8210
|
-
|
|
8211
|
-
thinking_budget: Optional[int]
|
|
8212
|
-
"""Optional. Indicates the thinking budget in tokens."""
|
|
8213
|
-
|
|
8214
|
-
|
|
8215
|
-
GenerationConfigThinkingConfigOrDict = Union[
|
|
8216
|
-
GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
|
|
8217
|
-
]
|
|
8218
|
-
|
|
8219
|
-
|
|
8220
8351
|
class GenerationConfig(_common.BaseModel):
|
|
8221
8352
|
"""Generation config."""
|
|
8222
8353
|
|
|
@@ -8285,7 +8416,7 @@ class GenerationConfig(_common.BaseModel):
|
|
|
8285
8416
|
default=None,
|
|
8286
8417
|
description="""Optional. Controls the randomness of predictions.""",
|
|
8287
8418
|
)
|
|
8288
|
-
thinking_config: Optional[
|
|
8419
|
+
thinking_config: Optional[ThinkingConfig] = Field(
|
|
8289
8420
|
default=None,
|
|
8290
8421
|
description="""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking.""",
|
|
8291
8422
|
)
|
|
@@ -8297,6 +8428,10 @@ class GenerationConfig(_common.BaseModel):
|
|
|
8297
8428
|
default=None,
|
|
8298
8429
|
description="""Optional. If specified, nucleus sampling will be used.""",
|
|
8299
8430
|
)
|
|
8431
|
+
enable_enhanced_civic_answers: Optional[bool] = Field(
|
|
8432
|
+
default=None,
|
|
8433
|
+
description="""Optional. Enables enhanced civic answers. It may not be available for all models.""",
|
|
8434
|
+
)
|
|
8300
8435
|
|
|
8301
8436
|
|
|
8302
8437
|
class GenerationConfigDict(TypedDict, total=False):
|
|
@@ -8359,7 +8494,7 @@ class GenerationConfigDict(TypedDict, total=False):
|
|
|
8359
8494
|
temperature: Optional[float]
|
|
8360
8495
|
"""Optional. Controls the randomness of predictions."""
|
|
8361
8496
|
|
|
8362
|
-
thinking_config: Optional[
|
|
8497
|
+
thinking_config: Optional[ThinkingConfigDict]
|
|
8363
8498
|
"""Optional. Config for thinking features. An error will be returned if this field is set for models that don't support thinking."""
|
|
8364
8499
|
|
|
8365
8500
|
top_k: Optional[float]
|
|
@@ -8368,6 +8503,9 @@ class GenerationConfigDict(TypedDict, total=False):
|
|
|
8368
8503
|
top_p: Optional[float]
|
|
8369
8504
|
"""Optional. If specified, nucleus sampling will be used."""
|
|
8370
8505
|
|
|
8506
|
+
enable_enhanced_civic_answers: Optional[bool]
|
|
8507
|
+
"""Optional. Enables enhanced civic answers. It may not be available for all models."""
|
|
8508
|
+
|
|
8371
8509
|
|
|
8372
8510
|
GenerationConfigOrDict = Union[GenerationConfig, GenerationConfigDict]
|
|
8373
8511
|
|
|
@@ -9220,14 +9358,22 @@ TunedModelCheckpointOrDict = Union[
|
|
|
9220
9358
|
|
|
9221
9359
|
|
|
9222
9360
|
class TunedModel(_common.BaseModel):
|
|
9361
|
+
"""TunedModel for the Tuned Model of a Tuning Job."""
|
|
9223
9362
|
|
|
9224
9363
|
model: Optional[str] = Field(
|
|
9225
9364
|
default=None,
|
|
9226
|
-
description="""Output only. The resource name of the TunedModel.
|
|
9365
|
+
description="""Output only. The resource name of the TunedModel.
|
|
9366
|
+
Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
|
|
9367
|
+
When tuning from a base model, the version_id will be 1.
|
|
9368
|
+
For continuous tuning, the version id will be incremented by 1 from the
|
|
9369
|
+
last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
|
|
9370
|
+
""",
|
|
9227
9371
|
)
|
|
9228
9372
|
endpoint: Optional[str] = Field(
|
|
9229
9373
|
default=None,
|
|
9230
|
-
description="""Output only. A resource name of an Endpoint.
|
|
9374
|
+
description="""Output only. A resource name of an Endpoint.
|
|
9375
|
+
Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
|
|
9376
|
+
""",
|
|
9231
9377
|
)
|
|
9232
9378
|
checkpoints: Optional[list[TunedModelCheckpoint]] = Field(
|
|
9233
9379
|
default=None,
|
|
@@ -9238,12 +9384,20 @@ class TunedModel(_common.BaseModel):
|
|
|
9238
9384
|
|
|
9239
9385
|
|
|
9240
9386
|
class TunedModelDict(TypedDict, total=False):
|
|
9387
|
+
"""TunedModel for the Tuned Model of a Tuning Job."""
|
|
9241
9388
|
|
|
9242
9389
|
model: Optional[str]
|
|
9243
|
-
"""Output only. The resource name of the TunedModel.
|
|
9390
|
+
"""Output only. The resource name of the TunedModel.
|
|
9391
|
+
Format: `projects/{project}/locations/{location}/models/{model}@{version_id}`
|
|
9392
|
+
When tuning from a base model, the version_id will be 1.
|
|
9393
|
+
For continuous tuning, the version id will be incremented by 1 from the
|
|
9394
|
+
last version id in the parent model. E.g., `projects/{project}/locations/{location}/models/{model}@{last_version_id + 1}`
|
|
9395
|
+
"""
|
|
9244
9396
|
|
|
9245
9397
|
endpoint: Optional[str]
|
|
9246
|
-
"""Output only. A resource name of an Endpoint.
|
|
9398
|
+
"""Output only. A resource name of an Endpoint.
|
|
9399
|
+
Format: `projects/{project}/locations/{location}/endpoints/{endpoint}`.
|
|
9400
|
+
"""
|
|
9247
9401
|
|
|
9248
9402
|
checkpoints: Optional[list[TunedModelCheckpointDict]]
|
|
9249
9403
|
"""The checkpoints associated with this TunedModel.
|
|
@@ -10714,22 +10868,24 @@ _CancelTuningJobParametersOrDict = Union[
|
|
|
10714
10868
|
|
|
10715
10869
|
|
|
10716
10870
|
class TuningExample(_common.BaseModel):
|
|
10871
|
+
"""A single example for tuning."""
|
|
10717
10872
|
|
|
10718
|
-
text_input: Optional[str] = Field(
|
|
10719
|
-
default=None, description="""Text model input."""
|
|
10720
|
-
)
|
|
10721
10873
|
output: Optional[str] = Field(
|
|
10722
|
-
default=None, description="""The expected model output."""
|
|
10874
|
+
default=None, description="""Required. The expected model output."""
|
|
10875
|
+
)
|
|
10876
|
+
text_input: Optional[str] = Field(
|
|
10877
|
+
default=None, description="""Optional. Text model input."""
|
|
10723
10878
|
)
|
|
10724
10879
|
|
|
10725
10880
|
|
|
10726
10881
|
class TuningExampleDict(TypedDict, total=False):
|
|
10727
|
-
|
|
10728
|
-
text_input: Optional[str]
|
|
10729
|
-
"""Text model input."""
|
|
10882
|
+
"""A single example for tuning."""
|
|
10730
10883
|
|
|
10731
10884
|
output: Optional[str]
|
|
10732
|
-
"""The expected model output."""
|
|
10885
|
+
"""Required. The expected model output."""
|
|
10886
|
+
|
|
10887
|
+
text_input: Optional[str]
|
|
10888
|
+
"""Optional. Text model input."""
|
|
10733
10889
|
|
|
10734
10890
|
|
|
10735
10891
|
TuningExampleOrDict = Union[TuningExample, TuningExampleDict]
|
|
@@ -11541,10 +11697,11 @@ class ListFilesResponse(_common.BaseModel):
|
|
|
11541
11697
|
default=None, description="""Used to retain the full HTTP response."""
|
|
11542
11698
|
)
|
|
11543
11699
|
next_page_token: Optional[str] = Field(
|
|
11544
|
-
default=None,
|
|
11700
|
+
default=None,
|
|
11701
|
+
description="""A token that can be sent as a `page_token` into a subsequent `ListFiles` call.""",
|
|
11545
11702
|
)
|
|
11546
11703
|
files: Optional[list[File]] = Field(
|
|
11547
|
-
default=None, description="""The list of
|
|
11704
|
+
default=None, description="""The list of `File`s."""
|
|
11548
11705
|
)
|
|
11549
11706
|
|
|
11550
11707
|
|
|
@@ -11555,10 +11712,10 @@ class ListFilesResponseDict(TypedDict, total=False):
|
|
|
11555
11712
|
"""Used to retain the full HTTP response."""
|
|
11556
11713
|
|
|
11557
11714
|
next_page_token: Optional[str]
|
|
11558
|
-
"""A token
|
|
11715
|
+
"""A token that can be sent as a `page_token` into a subsequent `ListFiles` call."""
|
|
11559
11716
|
|
|
11560
11717
|
files: Optional[list[FileDict]]
|
|
11561
|
-
"""The list of
|
|
11718
|
+
"""The list of `File`s."""
|
|
11562
11719
|
|
|
11563
11720
|
|
|
11564
11721
|
ListFilesResponseOrDict = Union[ListFilesResponse, ListFilesResponseDict]
|
|
@@ -12236,6 +12393,25 @@ class BatchJob(_common.BaseModel):
|
|
|
12236
12393
|
return self.state.name in JOB_STATES_ENDED
|
|
12237
12394
|
|
|
12238
12395
|
|
|
12396
|
+
class GenerationConfigThinkingConfig(ThinkingConfig):
|
|
12397
|
+
"""Config for thinking feature.
|
|
12398
|
+
|
|
12399
|
+
This class will be deprecated. Please use `ThinkingConfig` instead.
|
|
12400
|
+
"""
|
|
12401
|
+
|
|
12402
|
+
|
|
12403
|
+
class GenerationConfigThinkingConfigDict(ThinkingConfigDict):
|
|
12404
|
+
"""Config for thinking feature.
|
|
12405
|
+
|
|
12406
|
+
This class will be deprecated. Please use `ThinkingConfig` instead.
|
|
12407
|
+
"""
|
|
12408
|
+
|
|
12409
|
+
|
|
12410
|
+
GenerationConfigThinkingConfigOrDict = Union[
|
|
12411
|
+
GenerationConfigThinkingConfig, GenerationConfigThinkingConfigDict
|
|
12412
|
+
]
|
|
12413
|
+
|
|
12414
|
+
|
|
12239
12415
|
class BatchJobDict(TypedDict, total=False):
|
|
12240
12416
|
"""Config for batches.create return value."""
|
|
12241
12417
|
|
google/genai/version.py
CHANGED
|
@@ -1,12 +1,11 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: google-genai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.46.0
|
|
4
4
|
Summary: GenAI Python SDK
|
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
|
6
|
-
License: Apache-2.0
|
|
6
|
+
License-Expression: Apache-2.0
|
|
7
7
|
Project-URL: Homepage, https://github.com/googleapis/python-genai
|
|
8
8
|
Classifier: Intended Audience :: Developers
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
9
|
Classifier: Operating System :: OS Independent
|
|
11
10
|
Classifier: Programming Language :: Python
|
|
12
11
|
Classifier: Programming Language :: Python :: 3
|
|
@@ -15,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.10
|
|
|
15
14
|
Classifier: Programming Language :: Python :: 3.11
|
|
16
15
|
Classifier: Programming Language :: Python :: 3.12
|
|
17
16
|
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
18
|
Classifier: Topic :: Internet
|
|
19
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
20
20
|
Requires-Python: >=3.9
|
|
@@ -279,6 +279,25 @@ http_options = types.HttpOptions(
|
|
|
279
279
|
client=Client(..., http_options=http_options)
|
|
280
280
|
```
|
|
281
281
|
|
|
282
|
+
### Custom base url
|
|
283
|
+
|
|
284
|
+
In some cases you might need a custom base url (for example, API gateway proxy
|
|
285
|
+
server) and bypass some authentication checks for project, location, or API key.
|
|
286
|
+
You may pass the custom base url like this:
|
|
287
|
+
|
|
288
|
+
```python
|
|
289
|
+
|
|
290
|
+
base_url = 'https://test-api-gateway-proxy.com'
|
|
291
|
+
client = Client(
|
|
292
|
+
vertexai=True,
|
|
293
|
+
http_options={
|
|
294
|
+
'base_url': base_url,
|
|
295
|
+
'headers': {'Authorization': 'Bearer test_token'},
|
|
296
|
+
},
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
```
|
|
300
|
+
|
|
282
301
|
## Types
|
|
283
302
|
|
|
284
303
|
Parameter types can be specified as either dictionaries(`TypedDict`) or
|
|
@@ -292,7 +311,7 @@ See the 'Create a client' section above to initialize a client.
|
|
|
292
311
|
|
|
293
312
|
### Generate Content
|
|
294
313
|
|
|
295
|
-
#### with text content
|
|
314
|
+
#### with text content input (text output)
|
|
296
315
|
|
|
297
316
|
```python
|
|
298
317
|
response = client.models.generate_content(
|
|
@@ -301,6 +320,28 @@ response = client.models.generate_content(
|
|
|
301
320
|
print(response.text)
|
|
302
321
|
```
|
|
303
322
|
|
|
323
|
+
#### with text content input (image output)
|
|
324
|
+
|
|
325
|
+
```python
|
|
326
|
+
from google.genai import types
|
|
327
|
+
|
|
328
|
+
response = client.models.generate_content(
|
|
329
|
+
model='gemini-2.5-flash-image',
|
|
330
|
+
contents='A cartoon infographic for flying sneakers',
|
|
331
|
+
config=types.GenerateContentConfig(
|
|
332
|
+
response_modalities=["IMAGE"],
|
|
333
|
+
image_config=types.ImageConfig(
|
|
334
|
+
aspect_ratio="9:16",
|
|
335
|
+
),
|
|
336
|
+
),
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
for part in response.parts:
|
|
340
|
+
if part.inline_data:
|
|
341
|
+
generated_image = part.as_image()
|
|
342
|
+
generated_image.show()
|
|
343
|
+
```
|
|
344
|
+
|
|
304
345
|
#### with uploaded file (Gemini Developer API only)
|
|
305
346
|
download the file in console.
|
|
306
347
|
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
google/genai/__init__.py,sha256=SKz_9WQKA3R4OpJIDJlgssVfizLNDG2tuWtOD9pxrPE,729
|
|
2
2
|
google/genai/_adapters.py,sha256=Kok38miNYJff2n--l0zEK_hbq0y2rWOH7k75J7SMYbQ,1744
|
|
3
|
-
google/genai/_api_client.py,sha256=
|
|
3
|
+
google/genai/_api_client.py,sha256=_y6XAs65RXUqCi-G4vKiTkGM7x14eSYaNbJLbbdcagc,62975
|
|
4
4
|
google/genai/_api_module.py,sha256=lj8eUWx8_LBGBz-49qz6_ywWm3GYp3d8Bg5JoOHbtbI,902
|
|
5
5
|
google/genai/_automatic_function_calling_util.py,sha256=xXNkJR-pzSMkeSXMz3Jw-kMHFbTJEiRJ3wocuwtWW4I,11627
|
|
6
6
|
google/genai/_base_transformers.py,sha256=wljA6m4tLl4XLGlBC2DNOls5N9-X9tffBq0M7i8jgpw,1034
|
|
7
7
|
google/genai/_base_url.py,sha256=E5H4dew14Y16qfnB3XRnjSCi19cJVlkaMNoM_8ip-PM,1597
|
|
8
|
-
google/genai/_common.py,sha256=
|
|
8
|
+
google/genai/_common.py,sha256=6_psdFl0iBRwgyIKOuGtugpTCHPGB2zZzsJCVcI_2oI,24114
|
|
9
9
|
google/genai/_extra_utils.py,sha256=YLw64xzAKD_fQJp327-GGZM3kQ0sVdhNXMeDaaNkVFE,23011
|
|
10
|
-
google/genai/_live_converters.py,sha256=
|
|
10
|
+
google/genai/_live_converters.py,sha256=b4TZW_BobO4fPkoIiOKVDcwKmQl5FvXaucENY55_YWo,42296
|
|
11
11
|
google/genai/_local_tokenizer_loader.py,sha256=cGN1F0f7hNjRIGCGTLeox7IGAZf_YcvZjSp2rCyhUak,7465
|
|
12
12
|
google/genai/_mcp_utils.py,sha256=HuWJ8FUjquv40Mf_QjcL5r5yXWrS-JjINsjlOSbbyAc,3870
|
|
13
13
|
google/genai/_operations_converters.py,sha256=8w4WSeA_KSyc56JcL1MTknZHIds0gF3E8YdriluUJfY,8708
|
|
14
|
-
google/genai/_replay_api_client.py,sha256=
|
|
14
|
+
google/genai/_replay_api_client.py,sha256=oCPZULWpmjahOn5pvY7KkCB_cksNwm7pc4nuTnqqqV8,22956
|
|
15
15
|
google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
|
|
16
16
|
google/genai/_tokens_converters.py,sha256=xQY6yWtt7iJtfygfmd29d9mjjGKOpy0xG3yTdlr7APk,14137
|
|
17
17
|
google/genai/_transformers.py,sha256=tx6FecRkfQbEmmgXZrb8ndIRacAfluKIFlyQilslWG0,42782
|
|
@@ -24,16 +24,16 @@ google/genai/files.py,sha256=2TkcZo7iviHA48OEjc9YnyirZ-umBUN7Z4Gdr4nHyJI,31551
|
|
|
24
24
|
google/genai/live.py,sha256=1YfDR2VTqeHp2YJkgX2j1KHDaLcGCLN4Y6O9T4cM-4U,40996
|
|
25
25
|
google/genai/live_music.py,sha256=Y7I7jh5SAKgyjBIMLboH0oTnZJ18uOT2SpRDKURvp94,6783
|
|
26
26
|
google/genai/local_tokenizer.py,sha256=EKZ72cV2Zfutlo_efMOPnLRNZN4WQe57rD3G80cF340,14109
|
|
27
|
-
google/genai/models.py,sha256=
|
|
27
|
+
google/genai/models.py,sha256=Zhl9ns-JJfUhYt_XTycx6DzMB7OP8eA6rpPwbq6q3vQ,227545
|
|
28
28
|
google/genai/operations.py,sha256=KgM5vsagUnAMGk9wKxuQYBUh_6bwrPQ9BzZvydiumQA,16208
|
|
29
29
|
google/genai/pagers.py,sha256=m0SfWWn1EJs2k1On3DZx371qb8g2BRm_188ExsicIRc,7098
|
|
30
30
|
google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
|
|
31
31
|
google/genai/tokens.py,sha256=4BPW0gGWFeFVk3INkuY2tfREnsrvzQDhouvRI6_F9Q8,12235
|
|
32
|
-
google/genai/tunings.py,sha256=
|
|
33
|
-
google/genai/types.py,sha256=
|
|
34
|
-
google/genai/version.py,sha256=
|
|
35
|
-
google_genai-1.
|
|
36
|
-
google_genai-1.
|
|
37
|
-
google_genai-1.
|
|
38
|
-
google_genai-1.
|
|
39
|
-
google_genai-1.
|
|
32
|
+
google/genai/tunings.py,sha256=QO7n8hRXJhiw7B_Jr2dBxxnhvnKGhtkPH6721Jt2k2w,57071
|
|
33
|
+
google/genai/types.py,sha256=T5I04cmt668wxI28OX9F-8xahHnmfqotSczwpc1xMgg,567055
|
|
34
|
+
google/genai/version.py,sha256=meVKVLiGBg5qMEJ0AIQYDf7Icw5Mxy1_bXQeMsePcig,627
|
|
35
|
+
google_genai-1.46.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
|
|
36
|
+
google_genai-1.46.0.dist-info/METADATA,sha256=XH1pYkjPp84lsrs-fteMEsQGrmHS2OENsfTSnKcbMPQ,46242
|
|
37
|
+
google_genai-1.46.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
38
|
+
google_genai-1.46.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
|
|
39
|
+
google_genai-1.46.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|