google-genai 1.49.0__tar.gz → 1.50.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-1.49.0/google_genai.egg-info → google_genai-1.50.0}/PKG-INFO +1 -1
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_api_client.py +31 -6
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_local_tokenizer_loader.py +0 -9
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_transformers.py +2 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/documents.py +5 -2
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/errors.py +45 -1
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/models.py +28 -3
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/operations.py +4 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/types.py +78 -9
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/version.py +1 -1
- {google_genai-1.49.0 → google_genai-1.50.0/google_genai.egg-info}/PKG-INFO +1 -1
- {google_genai-1.49.0 → google_genai-1.50.0}/pyproject.toml +5 -1
- {google_genai-1.49.0 → google_genai-1.50.0}/LICENSE +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/MANIFEST.in +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/README.md +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/__init__.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_adapters.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_api_module.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_automatic_function_calling_util.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_base_transformers.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_base_url.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_common.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_extra_utils.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_live_converters.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_mcp_utils.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_operations_converters.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_replay_api_client.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_test_api_client.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_tokens_converters.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/batches.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/caches.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/chats.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/client.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/file_search_stores.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/files.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/live.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/live_music.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/local_tokenizer.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/pagers.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/py.typed +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/tokens.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google/genai/tunings.py +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google_genai.egg-info/SOURCES.txt +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google_genai.egg-info/requires.txt +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/google_genai.egg-info/top_level.txt +0 -0
- {google_genai-1.49.0 → google_genai-1.50.0}/setup.cfg +0 -0
|
@@ -37,14 +37,12 @@ import time
|
|
|
37
37
|
from typing import Any, AsyncIterator, Iterator, Optional, Tuple, TYPE_CHECKING, Union
|
|
38
38
|
from urllib.parse import urlparse
|
|
39
39
|
from urllib.parse import urlunparse
|
|
40
|
-
import warnings
|
|
41
40
|
|
|
42
41
|
import anyio
|
|
43
42
|
import certifi
|
|
44
43
|
import google.auth
|
|
45
44
|
import google.auth.credentials
|
|
46
45
|
from google.auth.credentials import Credentials
|
|
47
|
-
from google.auth.transport.requests import Request
|
|
48
46
|
import httpx
|
|
49
47
|
from pydantic import BaseModel
|
|
50
48
|
from pydantic import ValidationError
|
|
@@ -197,6 +195,7 @@ def load_auth(*, project: Union[str, None]) -> Tuple[Credentials, str]:
|
|
|
197
195
|
|
|
198
196
|
|
|
199
197
|
def refresh_auth(credentials: Credentials) -> Credentials:
|
|
198
|
+
from google.auth.transport.requests import Request
|
|
200
199
|
credentials.refresh(Request()) # type: ignore[no-untyped-call]
|
|
201
200
|
return credentials
|
|
202
201
|
|
|
@@ -1347,9 +1346,21 @@ class BaseApiClient:
|
|
|
1347
1346
|
|
|
1348
1347
|
session_response = self._request(http_request, http_options, stream=True)
|
|
1349
1348
|
for chunk in session_response.segments():
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1349
|
+
chunk_dump = json.dumps(chunk)
|
|
1350
|
+
try:
|
|
1351
|
+
if chunk_dump.startswith('{"error":'):
|
|
1352
|
+
chunk_json = json.loads(chunk_dump)
|
|
1353
|
+
errors.APIError.raise_error(
|
|
1354
|
+
chunk_json.get('error', {}).get('code'),
|
|
1355
|
+
chunk_json,
|
|
1356
|
+
session_response,
|
|
1357
|
+
)
|
|
1358
|
+
except json.decoder.JSONDecodeError:
|
|
1359
|
+
logger.debug(
|
|
1360
|
+
'Failed to decode chunk that contains an error: %s' % chunk_dump
|
|
1361
|
+
)
|
|
1362
|
+
pass
|
|
1363
|
+
yield SdkHttpResponse(headers=session_response.headers, body=chunk_dump)
|
|
1353
1364
|
|
|
1354
1365
|
async def async_request(
|
|
1355
1366
|
self,
|
|
@@ -1383,7 +1394,21 @@ class BaseApiClient:
|
|
|
1383
1394
|
|
|
1384
1395
|
async def async_generator(): # type: ignore[no-untyped-def]
|
|
1385
1396
|
async for chunk in response:
|
|
1386
|
-
|
|
1397
|
+
chunk_dump = json.dumps(chunk)
|
|
1398
|
+
try:
|
|
1399
|
+
if chunk_dump.startswith('{"error":'):
|
|
1400
|
+
chunk_json = json.loads(chunk_dump)
|
|
1401
|
+
await errors.APIError.raise_error_async(
|
|
1402
|
+
chunk_json.get('error', {}).get('code'),
|
|
1403
|
+
chunk_json,
|
|
1404
|
+
response,
|
|
1405
|
+
)
|
|
1406
|
+
except json.decoder.JSONDecodeError:
|
|
1407
|
+
logger.debug(
|
|
1408
|
+
'Failed to decode chunk that contains an error: %s' % chunk_dump
|
|
1409
|
+
)
|
|
1410
|
+
pass
|
|
1411
|
+
yield SdkHttpResponse(headers=response.headers, body=chunk_dump)
|
|
1387
1412
|
|
|
1388
1413
|
return async_generator() # type: ignore[no-untyped-call]
|
|
1389
1414
|
|
|
@@ -28,9 +28,6 @@ from sentencepiece import sentencepiece_model_pb2
|
|
|
28
28
|
|
|
29
29
|
# Source of truth: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models
|
|
30
30
|
_GEMINI_MODELS_TO_TOKENIZER_NAMES = {
|
|
31
|
-
"gemini-1.0-pro": "gemma2",
|
|
32
|
-
"gemini-1.5-pro": "gemma2",
|
|
33
|
-
"gemini-1.5-flash": "gemma2",
|
|
34
31
|
"gemini-2.5-pro": "gemma3",
|
|
35
32
|
"gemini-2.5-flash": "gemma3",
|
|
36
33
|
"gemini-2.5-flash-lite": "gemma3",
|
|
@@ -38,12 +35,6 @@ _GEMINI_MODELS_TO_TOKENIZER_NAMES = {
|
|
|
38
35
|
"gemini-2.0-flash-lite": "gemma3",
|
|
39
36
|
}
|
|
40
37
|
_GEMINI_STABLE_MODELS_TO_TOKENIZER_NAMES = {
|
|
41
|
-
"gemini-1.0-pro-001": "gemma2",
|
|
42
|
-
"gemini-1.0-pro-002": "gemma2",
|
|
43
|
-
"gemini-1.5-pro-001": "gemma2",
|
|
44
|
-
"gemini-1.5-flash-001": "gemma2",
|
|
45
|
-
"gemini-1.5-flash-002": "gemma2",
|
|
46
|
-
"gemini-1.5-pro-002": "gemma2",
|
|
47
38
|
"gemini-2.5-pro-preview-06-05": "gemma3",
|
|
48
39
|
"gemini-2.5-pro-preview-05-06": "gemma3",
|
|
49
40
|
"gemini-2.5-pro-exp-03-25": "gemma3",
|
|
@@ -199,6 +199,8 @@ def _resource_name(
|
|
|
199
199
|
def t_model(client: _api_client.BaseApiClient, model: str) -> str:
|
|
200
200
|
if not model:
|
|
201
201
|
raise ValueError('model is required.')
|
|
202
|
+
if '..' in model or '?' in model or '&' in model:
|
|
203
|
+
raise ValueError('invalid model parameter.')
|
|
202
204
|
if client.vertexai:
|
|
203
205
|
if (
|
|
204
206
|
model.startswith('projects/')
|
|
@@ -15,6 +15,7 @@
|
|
|
15
15
|
|
|
16
16
|
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
|
17
17
|
|
|
18
|
+
from functools import partial
|
|
18
19
|
import json
|
|
19
20
|
import logging
|
|
20
21
|
from typing import Any, Optional, Union
|
|
@@ -327,9 +328,10 @@ class Documents(_api_module.BaseModule):
|
|
|
327
328
|
for document in client.documents.list(parent='rag_store_name'):
|
|
328
329
|
print(f"document: {document.name} - {document.display_name}")
|
|
329
330
|
"""
|
|
331
|
+
list_request = partial(self._list, parent=parent)
|
|
330
332
|
return Pager(
|
|
331
333
|
'documents',
|
|
332
|
-
|
|
334
|
+
list_request,
|
|
333
335
|
self._list(parent=parent, config=config),
|
|
334
336
|
config,
|
|
335
337
|
)
|
|
@@ -541,9 +543,10 @@ class AsyncDocuments(_api_module.BaseModule):
|
|
|
541
543
|
client.documents.list(parent='rag_store_name'):
|
|
542
544
|
print(f"document: {document.name} - {document.display_name}")
|
|
543
545
|
"""
|
|
546
|
+
list_request = partial(self._list, parent=parent)
|
|
544
547
|
return AsyncPager(
|
|
545
548
|
'documents',
|
|
546
|
-
|
|
549
|
+
list_request,
|
|
547
550
|
await self._list(parent=parent, config=config),
|
|
548
551
|
config,
|
|
549
552
|
)
|
|
@@ -103,7 +103,30 @@ class APIError(Exception):
|
|
|
103
103
|
else:
|
|
104
104
|
response_json = response.body_segments[0].get('error', {})
|
|
105
105
|
|
|
106
|
-
status_code
|
|
106
|
+
cls.raise_error(response.status_code, response_json, response)
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
def raise_error(
|
|
110
|
+
cls,
|
|
111
|
+
status_code: int,
|
|
112
|
+
response_json: Any,
|
|
113
|
+
response: Optional[
|
|
114
|
+
Union['ReplayResponse', httpx.Response, 'aiohttp.ClientResponse']
|
|
115
|
+
],
|
|
116
|
+
) -> None:
|
|
117
|
+
"""Raises an appropriate APIError subclass based on the status code.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
status_code: The HTTP status code of the response.
|
|
121
|
+
response_json: The JSON body of the response, or a dict containing error
|
|
122
|
+
details.
|
|
123
|
+
response: The original response object.
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
ClientError: If the status code is in the 4xx range.
|
|
127
|
+
ServerError: If the status code is in the 5xx range.
|
|
128
|
+
APIError: For other error status codes.
|
|
129
|
+
"""
|
|
107
130
|
if 400 <= status_code < 500:
|
|
108
131
|
raise ClientError(status_code, response_json, response)
|
|
109
132
|
elif 500 <= status_code < 600:
|
|
@@ -162,6 +185,27 @@ class APIError(Exception):
|
|
|
162
185
|
except ImportError:
|
|
163
186
|
raise ValueError(f'Unsupported response type: {type(response)}')
|
|
164
187
|
|
|
188
|
+
await cls.raise_error_async(status_code, response_json, response)
|
|
189
|
+
|
|
190
|
+
@classmethod
|
|
191
|
+
async def raise_error_async(
|
|
192
|
+
cls, status_code: int, response_json: Any, response: Optional[
|
|
193
|
+
Union['ReplayResponse', httpx.Response, 'aiohttp.ClientResponse']
|
|
194
|
+
]
|
|
195
|
+
) -> None:
|
|
196
|
+
"""Raises an appropriate APIError subclass based on the status code.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
status_code: The HTTP status code of the response.
|
|
200
|
+
response_json: The JSON body of the response, or a dict containing error
|
|
201
|
+
details.
|
|
202
|
+
response: The original response object.
|
|
203
|
+
|
|
204
|
+
Raises:
|
|
205
|
+
ClientError: If the status code is in the 4xx range.
|
|
206
|
+
ServerError: If the status code is in the 5xx range.
|
|
207
|
+
APIError: For other error status codes.
|
|
208
|
+
"""
|
|
165
209
|
if 400 <= status_code < 500:
|
|
166
210
|
raise ClientError(status_code, response_json, response)
|
|
167
211
|
elif 500 <= status_code < 600:
|
|
@@ -2759,6 +2759,21 @@ def _Model_from_mldev(
|
|
|
2759
2759
|
getv(from_object, ['supportedGenerationMethods']),
|
|
2760
2760
|
)
|
|
2761
2761
|
|
|
2762
|
+
if getv(from_object, ['temperature']) is not None:
|
|
2763
|
+
setv(to_object, ['temperature'], getv(from_object, ['temperature']))
|
|
2764
|
+
|
|
2765
|
+
if getv(from_object, ['maxTemperature']) is not None:
|
|
2766
|
+
setv(to_object, ['max_temperature'], getv(from_object, ['maxTemperature']))
|
|
2767
|
+
|
|
2768
|
+
if getv(from_object, ['topP']) is not None:
|
|
2769
|
+
setv(to_object, ['top_p'], getv(from_object, ['topP']))
|
|
2770
|
+
|
|
2771
|
+
if getv(from_object, ['topK']) is not None:
|
|
2772
|
+
setv(to_object, ['top_k'], getv(from_object, ['topK']))
|
|
2773
|
+
|
|
2774
|
+
if getv(from_object, ['thinking']) is not None:
|
|
2775
|
+
setv(to_object, ['thinking'], getv(from_object, ['thinking']))
|
|
2776
|
+
|
|
2762
2777
|
return to_object
|
|
2763
2778
|
|
|
2764
2779
|
|
|
@@ -2905,7 +2920,7 @@ def _RecontextImageConfig_to_vertex(
|
|
|
2905
2920
|
if getv(from_object, ['base_steps']) is not None:
|
|
2906
2921
|
setv(
|
|
2907
2922
|
parent_object,
|
|
2908
|
-
['parameters', '
|
|
2923
|
+
['parameters', 'baseSteps'],
|
|
2909
2924
|
getv(from_object, ['base_steps']),
|
|
2910
2925
|
)
|
|
2911
2926
|
|
|
@@ -4521,7 +4536,12 @@ class Models(_api_module.BaseModule):
|
|
|
4521
4536
|
else:
|
|
4522
4537
|
path = '{models_url}'
|
|
4523
4538
|
query_params = request_dict.get('_query')
|
|
4524
|
-
if query_params:
|
|
4539
|
+
if query_params and query_params.get('filter'):
|
|
4540
|
+
query_param_filter = query_params.pop('filter')
|
|
4541
|
+
path = f'{path}?filter={query_param_filter}'
|
|
4542
|
+
if query_params:
|
|
4543
|
+
path += f'&{urlencode(query_params)}'
|
|
4544
|
+
elif query_params:
|
|
4525
4545
|
path = f'{path}?{urlencode(query_params)}'
|
|
4526
4546
|
# TODO: remove the hack that pops config.
|
|
4527
4547
|
request_dict.pop('config', None)
|
|
@@ -6361,7 +6381,12 @@ class AsyncModels(_api_module.BaseModule):
|
|
|
6361
6381
|
else:
|
|
6362
6382
|
path = '{models_url}'
|
|
6363
6383
|
query_params = request_dict.get('_query')
|
|
6364
|
-
if query_params:
|
|
6384
|
+
if query_params and query_params.get('filter'):
|
|
6385
|
+
query_param_filter = query_params.pop('filter')
|
|
6386
|
+
path = f'{path}?filter={query_param_filter}'
|
|
6387
|
+
if query_params:
|
|
6388
|
+
path += f'&{urlencode(query_params)}'
|
|
6389
|
+
elif query_params:
|
|
6365
6390
|
path = f'{path}?{urlencode(query_params)}'
|
|
6366
6391
|
# TODO: remove the hack that pops config.
|
|
6367
6392
|
request_dict.pop('config', None)
|
|
@@ -282,6 +282,8 @@ class Operations(_api_module.BaseModule):
|
|
|
282
282
|
response_operation = operation.from_api_response(
|
|
283
283
|
response_dict, is_vertex_ai=True
|
|
284
284
|
)
|
|
285
|
+
|
|
286
|
+
self._api_client._verify_response(response_operation) # type: ignore [arg-type]
|
|
285
287
|
return response_operation # type: ignore[no-any-return]
|
|
286
288
|
else:
|
|
287
289
|
response_dict = self._get_videos_operation(
|
|
@@ -291,6 +293,8 @@ class Operations(_api_module.BaseModule):
|
|
|
291
293
|
response_operation = operation.from_api_response(
|
|
292
294
|
response_dict, is_vertex_ai=False
|
|
293
295
|
)
|
|
296
|
+
|
|
297
|
+
self._api_client._verify_response(response_operation) # type: ignore [arg-type]
|
|
294
298
|
return response_operation # type: ignore[no-any-return]
|
|
295
299
|
|
|
296
300
|
|
|
@@ -114,6 +114,8 @@ else:
|
|
|
114
114
|
HttpxAsyncClient = None
|
|
115
115
|
|
|
116
116
|
logger = logging.getLogger('google_genai.types')
|
|
117
|
+
_from_json_schema_warning_logged = False
|
|
118
|
+
_json_schema_warning_logged = False
|
|
117
119
|
|
|
118
120
|
T = typing.TypeVar('T', bound='GenerateContentResponse')
|
|
119
121
|
|
|
@@ -2026,7 +2028,9 @@ class Schema(_common.BaseModel):
|
|
|
2026
2028
|
Schema](https://json-schema.org/)
|
|
2027
2029
|
"""
|
|
2028
2030
|
|
|
2029
|
-
|
|
2031
|
+
global _json_schema_warning_logged
|
|
2032
|
+
if not _json_schema_warning_logged:
|
|
2033
|
+
info_message = """
|
|
2030
2034
|
Note: Conversion of fields that are not included in the JSONSchema class are
|
|
2031
2035
|
ignored.
|
|
2032
2036
|
Json Schema is now supported natively by both Vertex AI and Gemini API. Users
|
|
@@ -2041,7 +2045,9 @@ are recommended to pass/receive Json Schema directly to/from the API. For exampl
|
|
|
2041
2045
|
FunctionDeclaration.response_json_schema, which accepts [JSON
|
|
2042
2046
|
Schema](https://json-schema.org/)
|
|
2043
2047
|
"""
|
|
2044
|
-
|
|
2048
|
+
logger.info(info_message)
|
|
2049
|
+
_json_schema_warning_logged = True
|
|
2050
|
+
|
|
2045
2051
|
json_schema_field_names: set[str] = set(JSONSchema.model_fields.keys())
|
|
2046
2052
|
schema_field_names: tuple[str] = (
|
|
2047
2053
|
'items',
|
|
@@ -2148,7 +2154,9 @@ are recommended to pass/receive Json Schema directly to/from the API. For exampl
|
|
|
2148
2154
|
raise_error_on_unsupported_field is set to True. Or if the JSONSchema
|
|
2149
2155
|
is not compatible with the specified API option.
|
|
2150
2156
|
"""
|
|
2151
|
-
|
|
2157
|
+
global _from_json_schema_warning_logged
|
|
2158
|
+
if not _from_json_schema_warning_logged:
|
|
2159
|
+
info_message = """
|
|
2152
2160
|
Note: Conversion of fields that are not included in the JSONSchema class are ignored.
|
|
2153
2161
|
Json Schema is now supported natively by both Vertex AI and Gemini API. Users
|
|
2154
2162
|
are recommended to pass/receive Json Schema directly to/from the API. For example:
|
|
@@ -2162,7 +2170,9 @@ are recommended to pass/receive Json Schema directly to/from the API. For exampl
|
|
|
2162
2170
|
FunctionDeclaration.response_json_schema, which accepts [JSON
|
|
2163
2171
|
Schema](https://json-schema.org/)
|
|
2164
2172
|
"""
|
|
2165
|
-
|
|
2173
|
+
logger.info(info_message)
|
|
2174
|
+
_from_json_schema_warning_logged = True
|
|
2175
|
+
|
|
2166
2176
|
google_schema_field_names: set[str] = set(cls.model_fields.keys())
|
|
2167
2177
|
schema_field_names: tuple[str, ...] = (
|
|
2168
2178
|
'items',
|
|
@@ -6311,7 +6321,7 @@ class GenerateContentResponse(_common.BaseModel):
|
|
|
6311
6321
|
non_text_parts = []
|
|
6312
6322
|
for part in self.candidates[0].content.parts:
|
|
6313
6323
|
for field_name, field_value in part.model_dump(
|
|
6314
|
-
exclude={'text', 'thought'}
|
|
6324
|
+
exclude={'text', 'thought', 'thought_signature'}
|
|
6315
6325
|
).items():
|
|
6316
6326
|
if field_value is not None:
|
|
6317
6327
|
non_text_parts.append(field_name)
|
|
@@ -8479,6 +8489,38 @@ class Model(_common.BaseModel):
|
|
|
8479
8489
|
checkpoints: Optional[list[Checkpoint]] = Field(
|
|
8480
8490
|
default=None, description="""The checkpoints of the model."""
|
|
8481
8491
|
)
|
|
8492
|
+
temperature: Optional[float] = Field(
|
|
8493
|
+
default=None,
|
|
8494
|
+
description="""Temperature value used for sampling set when the dataset was saved.
|
|
8495
|
+
This value is used to tune the degree of randomness.""",
|
|
8496
|
+
)
|
|
8497
|
+
max_temperature: Optional[float] = Field(
|
|
8498
|
+
default=None,
|
|
8499
|
+
description="""The maximum temperature value used for sampling set when the
|
|
8500
|
+
dataset was saved. This value is used to tune the degree of randomness.""",
|
|
8501
|
+
)
|
|
8502
|
+
top_p: Optional[float] = Field(
|
|
8503
|
+
default=None,
|
|
8504
|
+
description="""Optional. Specifies the nucleus sampling threshold. The model
|
|
8505
|
+
considers only the smallest set of tokens whose cumulative probability is
|
|
8506
|
+
at least `top_p`. This helps generate more diverse and less repetitive
|
|
8507
|
+
responses. For example, a `top_p` of 0.9 means the model considers tokens
|
|
8508
|
+
until the cumulative probability of the tokens to select from reaches 0.9.
|
|
8509
|
+
It's recommended to adjust either temperature or `top_p`, but not both.""",
|
|
8510
|
+
)
|
|
8511
|
+
top_k: Optional[int] = Field(
|
|
8512
|
+
default=None,
|
|
8513
|
+
description="""Optional. Specifies the top-k sampling threshold. The model
|
|
8514
|
+
considers only the top k most probable tokens for the next token. This can
|
|
8515
|
+
be useful for generating more coherent and less random text. For example,
|
|
8516
|
+
a `top_k` of 40 means the model will choose the next word from the 40 most
|
|
8517
|
+
likely words.""",
|
|
8518
|
+
)
|
|
8519
|
+
thinking: Optional[bool] = Field(
|
|
8520
|
+
default=None,
|
|
8521
|
+
description="""Whether the model supports thinking features. If true, thoughts are
|
|
8522
|
+
returned only if the model supports thought and thoughts are available.""",
|
|
8523
|
+
)
|
|
8482
8524
|
|
|
8483
8525
|
|
|
8484
8526
|
class ModelDict(TypedDict, total=False):
|
|
@@ -8525,6 +8567,33 @@ class ModelDict(TypedDict, total=False):
|
|
|
8525
8567
|
checkpoints: Optional[list[CheckpointDict]]
|
|
8526
8568
|
"""The checkpoints of the model."""
|
|
8527
8569
|
|
|
8570
|
+
temperature: Optional[float]
|
|
8571
|
+
"""Temperature value used for sampling set when the dataset was saved.
|
|
8572
|
+
This value is used to tune the degree of randomness."""
|
|
8573
|
+
|
|
8574
|
+
max_temperature: Optional[float]
|
|
8575
|
+
"""The maximum temperature value used for sampling set when the
|
|
8576
|
+
dataset was saved. This value is used to tune the degree of randomness."""
|
|
8577
|
+
|
|
8578
|
+
top_p: Optional[float]
|
|
8579
|
+
"""Optional. Specifies the nucleus sampling threshold. The model
|
|
8580
|
+
considers only the smallest set of tokens whose cumulative probability is
|
|
8581
|
+
at least `top_p`. This helps generate more diverse and less repetitive
|
|
8582
|
+
responses. For example, a `top_p` of 0.9 means the model considers tokens
|
|
8583
|
+
until the cumulative probability of the tokens to select from reaches 0.9.
|
|
8584
|
+
It's recommended to adjust either temperature or `top_p`, but not both."""
|
|
8585
|
+
|
|
8586
|
+
top_k: Optional[int]
|
|
8587
|
+
"""Optional. Specifies the top-k sampling threshold. The model
|
|
8588
|
+
considers only the top k most probable tokens for the next token. This can
|
|
8589
|
+
be useful for generating more coherent and less random text. For example,
|
|
8590
|
+
a `top_k` of 40 means the model will choose the next word from the 40 most
|
|
8591
|
+
likely words."""
|
|
8592
|
+
|
|
8593
|
+
thinking: Optional[bool]
|
|
8594
|
+
"""Whether the model supports thinking features. If true, thoughts are
|
|
8595
|
+
returned only if the model supports thought and thoughts are available."""
|
|
8596
|
+
|
|
8528
8597
|
|
|
8529
8598
|
ModelOrDict = Union[Model, ModelDict]
|
|
8530
8599
|
|
|
@@ -10126,7 +10195,7 @@ class AutoraterConfig(_common.BaseModel):
|
|
|
10126
10195
|
endpoint to use.
|
|
10127
10196
|
|
|
10128
10197
|
Publisher model format:
|
|
10129
|
-
`projects/{project}/locations/{location}/publishers
|
|
10198
|
+
`projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`
|
|
10130
10199
|
|
|
10131
10200
|
Tuned model endpoint format:
|
|
10132
10201
|
`projects/{project}/locations/{location}/endpoints/{endpoint}`""",
|
|
@@ -10158,7 +10227,7 @@ class AutoraterConfigDict(TypedDict, total=False):
|
|
|
10158
10227
|
endpoint to use.
|
|
10159
10228
|
|
|
10160
10229
|
Publisher model format:
|
|
10161
|
-
`projects/{project}/locations/{location}/publishers
|
|
10230
|
+
`projects/{project}/locations/{location}/publishers/{publisher}/models/{model}`
|
|
10162
10231
|
|
|
10163
10232
|
Tuned model endpoint format:
|
|
10164
10233
|
`projects/{project}/locations/{location}/endpoints/{endpoint}`"""
|
|
@@ -11261,7 +11330,7 @@ class TuningJob(_common.BaseModel):
|
|
|
11261
11330
|
description="""Tuning Spec for open sourced and third party Partner models.""",
|
|
11262
11331
|
)
|
|
11263
11332
|
evaluation_config: Optional[EvaluationConfig] = Field(
|
|
11264
|
-
default=None, description=""""""
|
|
11333
|
+
default=None, description="""Evaluation config for the tuning job."""
|
|
11265
11334
|
)
|
|
11266
11335
|
custom_base_model: Optional[str] = Field(
|
|
11267
11336
|
default=None,
|
|
@@ -11361,7 +11430,7 @@ class TuningJobDict(TypedDict, total=False):
|
|
|
11361
11430
|
"""Tuning Spec for open sourced and third party Partner models."""
|
|
11362
11431
|
|
|
11363
11432
|
evaluation_config: Optional[EvaluationConfigDict]
|
|
11364
|
-
""""""
|
|
11433
|
+
"""Evaluation config for the tuning job."""
|
|
11365
11434
|
|
|
11366
11435
|
custom_base_model: Optional[str]
|
|
11367
11436
|
"""Optional. The user-provided path to custom model weights. Set this field to tune a custom model. The path must be a Cloud Storage directory that contains the model weights in .safetensors format along with associated model metadata files. If this field is set, the base_model field must still be set to indicate which base model the custom model is derived from. This feature is only available for open source models."""
|
|
@@ -3,7 +3,7 @@ requires = ["setuptools", "wheel", "twine>=6.1.0", "packaging>=24.2", "pkginfo>=
|
|
|
3
3
|
|
|
4
4
|
[project]
|
|
5
5
|
name = "google-genai"
|
|
6
|
-
version = "1.
|
|
6
|
+
version = "1.50.0"
|
|
7
7
|
description = "GenAI Python SDK"
|
|
8
8
|
readme = "README.md"
|
|
9
9
|
license = "Apache-2.0"
|
|
@@ -55,3 +55,7 @@ plugins = ["pydantic.mypy"]
|
|
|
55
55
|
# 'import-not-found' and 'import-untyped' are environment specific
|
|
56
56
|
disable_error_code = ["import-not-found", "import-untyped", "unused-ignore"]
|
|
57
57
|
strict = true
|
|
58
|
+
|
|
59
|
+
[tool.pytest.ini_options]
|
|
60
|
+
asyncio_mode = "auto"
|
|
61
|
+
asyncio_default_fixture_loop_scope = "function"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{google_genai-1.49.0 → google_genai-1.50.0}/google/genai/_automatic_function_calling_util.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|