google-genai 1.10.0__py3-none-any.whl → 1.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +100 -31
- google/genai/_automatic_function_calling_util.py +4 -24
- google/genai/_common.py +40 -37
- google/genai/_extra_utils.py +72 -12
- google/genai/_live_converters.py +2487 -0
- google/genai/_replay_api_client.py +32 -26
- google/genai/_transformers.py +119 -25
- google/genai/batches.py +45 -45
- google/genai/caches.py +126 -126
- google/genai/chats.py +13 -9
- google/genai/client.py +3 -2
- google/genai/errors.py +6 -6
- google/genai/files.py +38 -38
- google/genai/live.py +138 -1029
- google/genai/models.py +455 -387
- google/genai/operations.py +33 -33
- google/genai/pagers.py +2 -2
- google/genai/py.typed +1 -0
- google/genai/tunings.py +70 -70
- google/genai/types.py +964 -45
- google/genai/version.py +1 -1
- {google_genai-1.10.0.dist-info → google_genai-1.12.0.dist-info}/METADATA +1 -1
- google_genai-1.12.0.dist-info/RECORD +29 -0
- {google_genai-1.10.0.dist-info → google_genai-1.12.0.dist-info}/WHEEL +1 -1
- google_genai-1.10.0.dist-info/RECORD +0 -27
- {google_genai-1.10.0.dist-info → google_genai-1.12.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.10.0.dist-info → google_genai-1.12.0.dist-info}/top_level.txt +0 -0
@@ -30,11 +30,10 @@ from requests.exceptions import HTTPError
|
|
30
30
|
|
31
31
|
from . import errors
|
32
32
|
from ._api_client import BaseApiClient
|
33
|
-
from ._api_client import HttpOptions
|
34
33
|
from ._api_client import HttpRequest
|
35
34
|
from ._api_client import HttpResponse
|
36
35
|
from ._common import BaseModel
|
37
|
-
from .types import HttpOptionsOrDict
|
36
|
+
from .types import HttpOptions, HttpOptionsOrDict
|
38
37
|
from .types import GenerateVideosOperation
|
39
38
|
|
40
39
|
|
@@ -48,7 +47,7 @@ def _redact_language_label(language_label: str) -> str:
|
|
48
47
|
return re.sub(r'gl-python/', '{LANGUAGE_LABEL}/', language_label)
|
49
48
|
|
50
49
|
|
51
|
-
def _redact_request_headers(headers):
|
50
|
+
def _redact_request_headers(headers: dict[str, str]) -> dict[str, str]:
|
52
51
|
"""Redacts headers that should not be recorded."""
|
53
52
|
redacted_headers = {}
|
54
53
|
for header_name, header_value in headers.items():
|
@@ -111,29 +110,36 @@ def _redact_project_location_path(path: str) -> str:
|
|
111
110
|
return path
|
112
111
|
|
113
112
|
|
114
|
-
def _redact_request_body(body: dict[str, object]):
|
113
|
+
def _redact_request_body(body: dict[str, object]) -> None:
|
115
114
|
"""Redacts fields in the request body in place."""
|
116
115
|
for key, value in body.items():
|
117
116
|
if isinstance(value, str):
|
118
117
|
body[key] = _redact_project_location_path(value)
|
119
118
|
|
120
119
|
|
121
|
-
def redact_http_request(http_request: HttpRequest):
|
120
|
+
def redact_http_request(http_request: HttpRequest) -> None:
|
122
121
|
http_request.headers = _redact_request_headers(http_request.headers)
|
123
122
|
http_request.url = _redact_request_url(http_request.url)
|
124
123
|
if not isinstance(http_request.data, bytes):
|
125
124
|
_redact_request_body(http_request.data)
|
126
125
|
|
127
126
|
|
128
|
-
def _current_file_path_and_line():
|
127
|
+
def _current_file_path_and_line() -> str:
|
129
128
|
"""Prints the current file path and line number."""
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
129
|
+
current_frame = inspect.currentframe()
|
130
|
+
if (
|
131
|
+
current_frame is not None
|
132
|
+
and current_frame.f_back is not None
|
133
|
+
and current_frame.f_back.f_back is not None
|
134
|
+
):
|
135
|
+
frame = current_frame.f_back.f_back
|
136
|
+
filepath = inspect.getfile(frame)
|
137
|
+
lineno = frame.f_lineno
|
138
|
+
return f'File: {filepath}, Line: {lineno}'
|
139
|
+
return ''
|
134
140
|
|
135
141
|
|
136
|
-
def _debug_print(message: str):
|
142
|
+
def _debug_print(message: str) -> None:
|
137
143
|
print(
|
138
144
|
'DEBUG (test',
|
139
145
|
os.environ.get('PYTEST_CURRENT_TEST'),
|
@@ -212,33 +218,33 @@ class ReplayApiClient(BaseApiClient):
|
|
212
218
|
'GOOGLE_GENAI_REPLAYS_DIRECTORY', None
|
213
219
|
)
|
214
220
|
# Valid replay modes are replay-only or record-and-replay.
|
215
|
-
self.replay_session = None
|
221
|
+
self.replay_session: Union[ReplayFile, None] = None
|
216
222
|
self._mode = mode
|
217
223
|
self._replay_id = replay_id
|
218
224
|
|
219
|
-
def initialize_replay_session(self, replay_id: str):
|
225
|
+
def initialize_replay_session(self, replay_id: str) -> None:
|
220
226
|
self._replay_id = replay_id
|
221
227
|
self._initialize_replay_session()
|
222
228
|
|
223
|
-
def _get_replay_file_path(self):
|
229
|
+
def _get_replay_file_path(self) -> str:
|
224
230
|
return self._generate_file_path_from_replay_id(
|
225
231
|
self.replays_directory, self._replay_id
|
226
232
|
)
|
227
233
|
|
228
|
-
def _should_call_api(self):
|
234
|
+
def _should_call_api(self) -> bool:
|
229
235
|
return self._mode in ['record', 'api'] or (
|
230
236
|
self._mode == 'auto'
|
231
237
|
and not os.path.isfile(self._get_replay_file_path())
|
232
238
|
)
|
233
239
|
|
234
|
-
def _should_update_replay(self):
|
240
|
+
def _should_update_replay(self) -> bool:
|
235
241
|
return self._should_call_api() and self._mode != 'api'
|
236
242
|
|
237
|
-
def _initialize_replay_session_if_not_loaded(self):
|
243
|
+
def _initialize_replay_session_if_not_loaded(self) -> None:
|
238
244
|
if not self.replay_session:
|
239
245
|
self._initialize_replay_session()
|
240
246
|
|
241
|
-
def _initialize_replay_session(self):
|
247
|
+
def _initialize_replay_session(self) -> None:
|
242
248
|
_debug_print('Test is using replay id: ' + self._replay_id)
|
243
249
|
self._replay_index = 0
|
244
250
|
self._sdk_response_index = 0
|
@@ -259,7 +265,7 @@ class ReplayApiClient(BaseApiClient):
|
|
259
265
|
replay_id=self._replay_id, interactions=[]
|
260
266
|
)
|
261
267
|
|
262
|
-
def _generate_file_path_from_replay_id(self, replay_directory, replay_id):
|
268
|
+
def _generate_file_path_from_replay_id(self, replay_directory: Optional[str], replay_id: str) -> str:
|
263
269
|
session_parts = replay_id.split('/')
|
264
270
|
if len(session_parts) < 3:
|
265
271
|
raise ValueError(
|
@@ -273,7 +279,7 @@ class ReplayApiClient(BaseApiClient):
|
|
273
279
|
path_parts.extend(session_parts)
|
274
280
|
return os.path.join(*path_parts) + '.json'
|
275
281
|
|
276
|
-
def close(self):
|
282
|
+
def close(self) -> None:
|
277
283
|
if not self._should_update_replay() or not self.replay_session:
|
278
284
|
return
|
279
285
|
replay_file_path = self._get_replay_file_path()
|
@@ -286,7 +292,7 @@ class ReplayApiClient(BaseApiClient):
|
|
286
292
|
self,
|
287
293
|
http_request: HttpRequest,
|
288
294
|
http_response: Union[HttpResponse, errors.APIError, bytes],
|
289
|
-
):
|
295
|
+
) -> None:
|
290
296
|
if not self._should_update_replay():
|
291
297
|
return
|
292
298
|
redact_http_request(http_request)
|
@@ -334,7 +340,7 @@ class ReplayApiClient(BaseApiClient):
|
|
334
340
|
self,
|
335
341
|
http_request: HttpRequest,
|
336
342
|
interaction: ReplayInteraction,
|
337
|
-
):
|
343
|
+
) -> None:
|
338
344
|
assert http_request.url == interaction.request.url
|
339
345
|
assert http_request.headers == interaction.request.headers, (
|
340
346
|
'Request headers mismatch:\n'
|
@@ -378,7 +384,7 @@ class ReplayApiClient(BaseApiClient):
|
|
378
384
|
byte_stream=interaction.response.byte_segments,
|
379
385
|
)
|
380
386
|
|
381
|
-
def _verify_response(self, response_model: BaseModel):
|
387
|
+
def _verify_response(self, response_model: BaseModel) -> None:
|
382
388
|
if self._mode == 'api':
|
383
389
|
return
|
384
390
|
if not self.replay_session:
|
@@ -389,7 +395,7 @@ class ReplayApiClient(BaseApiClient):
|
|
389
395
|
if isinstance(response_model, list):
|
390
396
|
response_model = response_model[0]
|
391
397
|
if response_model and 'http_headers' in response_model.model_fields:
|
392
|
-
response_model.http_headers.pop('Date', None)
|
398
|
+
response_model.http_headers.pop('Date', None) # type: ignore[attr-defined]
|
393
399
|
interaction.response.sdk_response_segments.append(
|
394
400
|
response_model.model_dump(exclude_none=True)
|
395
401
|
)
|
@@ -548,7 +554,7 @@ class ReplayApiClient(BaseApiClient):
|
|
548
554
|
|
549
555
|
def download_file(
|
550
556
|
self, path: str, *, http_options: Optional[HttpOptionsOrDict] = None
|
551
|
-
):
|
557
|
+
) -> Union[HttpResponse, bytes, Any]:
|
552
558
|
self._initialize_replay_session_if_not_loaded()
|
553
559
|
request = self._build_request(
|
554
560
|
'get', path=path, request_dict={}, http_options=http_options
|
@@ -569,7 +575,7 @@ class ReplayApiClient(BaseApiClient):
|
|
569
575
|
|
570
576
|
async def async_download_file(
|
571
577
|
self, path: str, *, http_options: Optional[HttpOptionsOrDict] = None
|
572
|
-
):
|
578
|
+
) -> Any:
|
573
579
|
self._initialize_replay_session_if_not_loaded()
|
574
580
|
request = self._build_request(
|
575
581
|
'get', path=path, request_dict={}, http_options=http_options
|
google/genai/_transformers.py
CHANGED
@@ -53,7 +53,7 @@ def _resource_name(
|
|
53
53
|
*,
|
54
54
|
collection_identifier: str,
|
55
55
|
collection_hierarchy_depth: int = 2,
|
56
|
-
):
|
56
|
+
) -> str:
|
57
57
|
# pylint: disable=line-too-long
|
58
58
|
"""Prepends resource name with project, location, collection_identifier if needed.
|
59
59
|
|
@@ -140,7 +140,7 @@ def _resource_name(
|
|
140
140
|
return resource_name
|
141
141
|
|
142
142
|
|
143
|
-
def t_model(client: _api_client.BaseApiClient, model: str):
|
143
|
+
def t_model(client: _api_client.BaseApiClient, model: str) -> str:
|
144
144
|
if not model:
|
145
145
|
raise ValueError('model is required.')
|
146
146
|
if client.vertexai:
|
@@ -211,7 +211,7 @@ def t_extract_models(
|
|
211
211
|
return []
|
212
212
|
|
213
213
|
|
214
|
-
def t_caches_model(api_client: _api_client.BaseApiClient, model: str):
|
214
|
+
def t_caches_model(api_client: _api_client.BaseApiClient, model: str) -> Optional[str]:
|
215
215
|
model = t_model(api_client, model)
|
216
216
|
if not model:
|
217
217
|
return None
|
@@ -226,7 +226,7 @@ def t_caches_model(api_client: _api_client.BaseApiClient, model: str):
|
|
226
226
|
return model
|
227
227
|
|
228
228
|
|
229
|
-
def pil_to_blob(img) -> types.Blob:
|
229
|
+
def pil_to_blob(img: Any) -> types.Blob:
|
230
230
|
PngImagePlugin: Optional[builtin_types.ModuleType]
|
231
231
|
try:
|
232
232
|
import PIL.PngImagePlugin
|
@@ -280,9 +280,19 @@ def t_function_responses(
|
|
280
280
|
return [t_function_response(function_responses)]
|
281
281
|
|
282
282
|
|
283
|
-
|
283
|
+
def t_blobs(
|
284
|
+
api_client: _api_client.BaseApiClient,
|
285
|
+
blobs: Union[types.BlobImageUnionDict, list[types.BlobImageUnionDict]],
|
286
|
+
) -> list[types.Blob]:
|
287
|
+
if isinstance(blobs, list):
|
288
|
+
return [t_blob(api_client, blob) for blob in blobs]
|
289
|
+
else:
|
290
|
+
return [t_blob(api_client, blobs)]
|
291
|
+
|
284
292
|
|
285
|
-
def t_blob(
|
293
|
+
def t_blob(
|
294
|
+
api_client: _api_client.BaseApiClient, blob: types.BlobImageUnionDict
|
295
|
+
) -> types.Blob:
|
286
296
|
try:
|
287
297
|
import PIL.Image
|
288
298
|
|
@@ -307,6 +317,24 @@ def t_blob(blob: BlobUnion) -> types.Blob:
|
|
307
317
|
)
|
308
318
|
|
309
319
|
|
320
|
+
def t_image_blob(
|
321
|
+
api_client: _api_client.BaseApiClient, blob: types.BlobImageUnionDict
|
322
|
+
) -> types.Blob:
|
323
|
+
blob = t_blob(api_client, blob)
|
324
|
+
if blob.mime_type and blob.mime_type.startswith('image/'):
|
325
|
+
return blob
|
326
|
+
raise ValueError(f'Unsupported mime type: {blob.mime_type!r}')
|
327
|
+
|
328
|
+
|
329
|
+
def t_audio_blob(
|
330
|
+
api_client: _api_client.BaseApiClient, blob: types.BlobOrDict
|
331
|
+
) -> types.Blob:
|
332
|
+
blob = t_blob(api_client, blob)
|
333
|
+
if blob.mime_type and blob.mime_type.startswith('audio/'):
|
334
|
+
return blob
|
335
|
+
raise ValueError(f'Unsupported mime type: {blob.mime_type!r}')
|
336
|
+
|
337
|
+
|
310
338
|
def t_part(part: Optional[types.PartUnionDict]) -> types.Part:
|
311
339
|
try:
|
312
340
|
import PIL.Image
|
@@ -470,7 +498,7 @@ def t_contents(
|
|
470
498
|
def _append_accumulated_parts_as_content(
|
471
499
|
result: list[types.Content],
|
472
500
|
accumulated_parts: list[types.Part],
|
473
|
-
):
|
501
|
+
) -> None:
|
474
502
|
if not accumulated_parts:
|
475
503
|
return
|
476
504
|
result.append(
|
@@ -484,7 +512,7 @@ def t_contents(
|
|
484
512
|
result: list[types.Content],
|
485
513
|
accumulated_parts: list[types.Part],
|
486
514
|
current_part: types.PartUnionDict,
|
487
|
-
):
|
515
|
+
) -> None:
|
488
516
|
current_part = t_part(current_part)
|
489
517
|
if _is_user_part(current_part) == _are_user_parts(accumulated_parts):
|
490
518
|
accumulated_parts.append(current_part)
|
@@ -523,7 +551,7 @@ def t_contents(
|
|
523
551
|
return result
|
524
552
|
|
525
553
|
|
526
|
-
def handle_null_fields(schema: dict[str, Any]):
|
554
|
+
def handle_null_fields(schema: dict[str, Any]) -> None:
|
527
555
|
"""Process null fields in the schema so it is compatible with OpenAPI.
|
528
556
|
|
529
557
|
The OpenAPI spec does not support 'type: 'null' in the schema. This function
|
@@ -588,7 +616,7 @@ def process_schema(
|
|
588
616
|
defs: Optional[dict[str, Any]] = None,
|
589
617
|
*,
|
590
618
|
order_properties: bool = True,
|
591
|
-
):
|
619
|
+
) -> None:
|
592
620
|
"""Updates the schema and each sub-schema inplace to be API-compatible.
|
593
621
|
|
594
622
|
- Inlines the $defs.
|
@@ -649,13 +677,6 @@ def process_schema(
|
|
649
677
|
'type': 'array'
|
650
678
|
}
|
651
679
|
"""
|
652
|
-
if not client.vertexai:
|
653
|
-
if schema.get('default') is not None:
|
654
|
-
raise ValueError(
|
655
|
-
'Default value is not supported in the response schema for the Gemini'
|
656
|
-
' API.'
|
657
|
-
)
|
658
|
-
|
659
680
|
if schema.get('title') == 'PlaceholderLiteralEnum':
|
660
681
|
del schema['title']
|
661
682
|
|
@@ -663,7 +684,9 @@ def process_schema(
|
|
663
684
|
# provided directly to response_schema, it may use `any_of` instead of `anyOf.
|
664
685
|
# Otherwise, model_json_schema() uses `anyOf`.
|
665
686
|
for from_name, to_name in [
|
687
|
+
('additional_properties', 'additionalProperties'),
|
666
688
|
('any_of', 'anyOf'),
|
689
|
+
('prefix_items', 'prefixItems'),
|
667
690
|
('property_ordering', 'propertyOrdering'),
|
668
691
|
]:
|
669
692
|
if (value := schema.pop(from_name, None)) is not None:
|
@@ -723,9 +746,16 @@ def process_schema(
|
|
723
746
|
and 'propertyOrdering' not in schema
|
724
747
|
):
|
725
748
|
schema['property_ordering'] = list(properties.keys())
|
749
|
+
if (additional := schema.get('additionalProperties')) is not None:
|
750
|
+
# It is legal to set 'additionalProperties' to a bool:
|
751
|
+
# https://json-schema.org/understanding-json-schema/reference/object#additionalproperties
|
752
|
+
if isinstance(additional, dict):
|
753
|
+
schema['additionalProperties'] = _recurse(additional)
|
726
754
|
elif schema_type == 'ARRAY':
|
727
755
|
if (items := schema.get('items')) is not None:
|
728
756
|
schema['items'] = _recurse(items)
|
757
|
+
if (prefixes := schema.get('prefixItems')) is not None:
|
758
|
+
schema['prefixItems'] = [_recurse(prefix) for prefix in prefixes]
|
729
759
|
|
730
760
|
|
731
761
|
def _process_enum(
|
@@ -834,7 +864,7 @@ def t_speech_config(
|
|
834
864
|
raise ValueError(f'Unsupported speechConfig type: {type(origin)}')
|
835
865
|
|
836
866
|
|
837
|
-
def t_tool(client: _api_client.BaseApiClient, origin) -> Optional[types.Tool]:
|
867
|
+
def t_tool(client: _api_client.BaseApiClient, origin: Any) -> Optional[Union[types.Tool, Any]]:
|
838
868
|
if not origin:
|
839
869
|
return None
|
840
870
|
if inspect.isfunction(origin) or inspect.ismethod(origin):
|
@@ -845,6 +875,8 @@ def t_tool(client: _api_client.BaseApiClient, origin) -> Optional[types.Tool]:
|
|
845
875
|
)
|
846
876
|
]
|
847
877
|
)
|
878
|
+
elif isinstance(origin, dict):
|
879
|
+
return types.Tool.model_validate(origin)
|
848
880
|
else:
|
849
881
|
return origin
|
850
882
|
|
@@ -875,11 +907,11 @@ def t_tools(
|
|
875
907
|
return tools
|
876
908
|
|
877
909
|
|
878
|
-
def t_cached_content_name(client: _api_client.BaseApiClient, name: str):
|
910
|
+
def t_cached_content_name(client: _api_client.BaseApiClient, name: str) -> str:
|
879
911
|
return _resource_name(client, name, collection_identifier='cachedContents')
|
880
912
|
|
881
913
|
|
882
|
-
def t_batch_job_source(client: _api_client.BaseApiClient, src: str):
|
914
|
+
def t_batch_job_source(client: _api_client.BaseApiClient, src: str) -> types.BatchJobSource:
|
883
915
|
if src.startswith('gs://'):
|
884
916
|
return types.BatchJobSource(
|
885
917
|
format='jsonl',
|
@@ -894,7 +926,7 @@ def t_batch_job_source(client: _api_client.BaseApiClient, src: str):
|
|
894
926
|
raise ValueError(f'Unsupported source: {src}')
|
895
927
|
|
896
928
|
|
897
|
-
def t_batch_job_destination(client: _api_client.BaseApiClient, dest: str):
|
929
|
+
def t_batch_job_destination(client: _api_client.BaseApiClient, dest: str) -> types.BatchJobDestination:
|
898
930
|
if dest.startswith('gs://'):
|
899
931
|
return types.BatchJobDestination(
|
900
932
|
format='jsonl',
|
@@ -909,7 +941,7 @@ def t_batch_job_destination(client: _api_client.BaseApiClient, dest: str):
|
|
909
941
|
raise ValueError(f'Unsupported destination: {dest}')
|
910
942
|
|
911
943
|
|
912
|
-
def t_batch_job_name(client: _api_client.BaseApiClient, name: str):
|
944
|
+
def t_batch_job_name(client: _api_client.BaseApiClient, name: str) -> str:
|
913
945
|
if not client.vertexai:
|
914
946
|
return name
|
915
947
|
|
@@ -928,7 +960,7 @@ LRO_POLLING_TIMEOUT_SECONDS = 900.0
|
|
928
960
|
LRO_POLLING_MULTIPLIER = 1.5
|
929
961
|
|
930
962
|
|
931
|
-
def t_resolve_operation(api_client: _api_client.BaseApiClient, struct: dict):
|
963
|
+
def t_resolve_operation(api_client: _api_client.BaseApiClient, struct: dict[str, Any]) -> Any:
|
932
964
|
if (name := struct.get('name')) and '/operations/' in name:
|
933
965
|
operation: dict[str, Any] = struct
|
934
966
|
total_seconds = 0.0
|
@@ -937,7 +969,7 @@ def t_resolve_operation(api_client: _api_client.BaseApiClient, struct: dict):
|
|
937
969
|
if total_seconds > LRO_POLLING_TIMEOUT_SECONDS:
|
938
970
|
raise RuntimeError(f'Operation {name} timed out.\n{operation}')
|
939
971
|
# TODO(b/374433890): Replace with LRO module once it's available.
|
940
|
-
operation = api_client.request(
|
972
|
+
operation = api_client.request( # type: ignore[assignment]
|
941
973
|
http_method='GET', path=name, request_dict={}
|
942
974
|
)
|
943
975
|
time.sleep(delay_seconds)
|
@@ -959,7 +991,7 @@ def t_resolve_operation(api_client: _api_client.BaseApiClient, struct: dict):
|
|
959
991
|
def t_file_name(
|
960
992
|
api_client: _api_client.BaseApiClient,
|
961
993
|
name: Optional[Union[str, types.File, types.Video, types.GeneratedVideo]],
|
962
|
-
):
|
994
|
+
) -> str:
|
963
995
|
# Remove the files/ prefix since it's added to the url path.
|
964
996
|
if isinstance(name, types.File):
|
965
997
|
name = name.name
|
@@ -1017,3 +1049,65 @@ def t_bytes(api_client: _api_client.BaseApiClient, data: bytes) -> str:
|
|
1017
1049
|
if not isinstance(data, bytes):
|
1018
1050
|
return data
|
1019
1051
|
return base64.b64encode(data).decode('ascii')
|
1052
|
+
|
1053
|
+
|
1054
|
+
def t_content_strict(content: types.ContentOrDict) -> types.Content:
|
1055
|
+
if isinstance(content, dict):
|
1056
|
+
return types.Content.model_validate(content)
|
1057
|
+
elif isinstance(content, types.Content):
|
1058
|
+
return content
|
1059
|
+
else:
|
1060
|
+
raise ValueError(
|
1061
|
+
f'Could not convert input (type "{type(content)}") to '
|
1062
|
+
'`types.Content`'
|
1063
|
+
)
|
1064
|
+
|
1065
|
+
|
1066
|
+
def t_contents_strict(
|
1067
|
+
contents: Union[Sequence[types.ContentOrDict], types.ContentOrDict],
|
1068
|
+
) -> list[types.Content]:
|
1069
|
+
if isinstance(contents, Sequence):
|
1070
|
+
return [t_content_strict(content) for content in contents]
|
1071
|
+
else:
|
1072
|
+
return [t_content_strict(contents)]
|
1073
|
+
|
1074
|
+
|
1075
|
+
def t_client_content(
|
1076
|
+
turns: Optional[
|
1077
|
+
Union[Sequence[types.ContentOrDict], types.ContentOrDict]
|
1078
|
+
] = None,
|
1079
|
+
turn_complete: bool = True,
|
1080
|
+
) -> types.LiveClientContent:
|
1081
|
+
if turns is None:
|
1082
|
+
return types.LiveClientContent(turn_complete=turn_complete)
|
1083
|
+
|
1084
|
+
try:
|
1085
|
+
return types.LiveClientContent(
|
1086
|
+
turns=t_contents_strict(contents=turns),
|
1087
|
+
turn_complete=turn_complete,
|
1088
|
+
)
|
1089
|
+
except Exception as e:
|
1090
|
+
raise ValueError(
|
1091
|
+
f'Could not convert input (type "{type(turns)}") to '
|
1092
|
+
'`types.LiveClientContent`'
|
1093
|
+
) from e
|
1094
|
+
|
1095
|
+
|
1096
|
+
def t_tool_response(
|
1097
|
+
input: Union[
|
1098
|
+
types.FunctionResponseOrDict,
|
1099
|
+
Sequence[types.FunctionResponseOrDict],
|
1100
|
+
],
|
1101
|
+
) -> types.LiveClientToolResponse:
|
1102
|
+
if not input:
|
1103
|
+
raise ValueError(f'A tool response is required, got: \n{input}')
|
1104
|
+
|
1105
|
+
try:
|
1106
|
+
return types.LiveClientToolResponse(
|
1107
|
+
function_responses=t_function_responses(function_responses=input)
|
1108
|
+
)
|
1109
|
+
except Exception as e:
|
1110
|
+
raise ValueError(
|
1111
|
+
f'Could not convert input (type "{type(input)}") to '
|
1112
|
+
'`types.LiveClientToolResponse`'
|
1113
|
+
) from e
|
google/genai/batches.py
CHANGED
@@ -33,9 +33,9 @@ logger = logging.getLogger('google_genai.batches')
|
|
33
33
|
|
34
34
|
def _BatchJobSource_to_vertex(
|
35
35
|
api_client: BaseApiClient,
|
36
|
-
from_object: Union[dict, object],
|
37
|
-
parent_object: Optional[dict] = None,
|
38
|
-
) -> dict:
|
36
|
+
from_object: Union[dict[str, Any], object],
|
37
|
+
parent_object: Optional[dict[str, Any]] = None,
|
38
|
+
) -> dict[str, Any]:
|
39
39
|
to_object: dict[str, Any] = {}
|
40
40
|
if getv(from_object, ['format']) is not None:
|
41
41
|
setv(to_object, ['instancesFormat'], getv(from_object, ['format']))
|
@@ -55,9 +55,9 @@ def _BatchJobSource_to_vertex(
|
|
55
55
|
|
56
56
|
def _BatchJobDestination_to_vertex(
|
57
57
|
api_client: BaseApiClient,
|
58
|
-
from_object: Union[dict, object],
|
59
|
-
parent_object: Optional[dict] = None,
|
60
|
-
) -> dict:
|
58
|
+
from_object: Union[dict[str, Any], object],
|
59
|
+
parent_object: Optional[dict[str, Any]] = None,
|
60
|
+
) -> dict[str, Any]:
|
61
61
|
to_object: dict[str, Any] = {}
|
62
62
|
if getv(from_object, ['format']) is not None:
|
63
63
|
setv(to_object, ['predictionsFormat'], getv(from_object, ['format']))
|
@@ -81,9 +81,9 @@ def _BatchJobDestination_to_vertex(
|
|
81
81
|
|
82
82
|
def _CreateBatchJobConfig_to_vertex(
|
83
83
|
api_client: BaseApiClient,
|
84
|
-
from_object: Union[dict, object],
|
85
|
-
parent_object: Optional[dict] = None,
|
86
|
-
) -> dict:
|
84
|
+
from_object: Union[dict[str, Any], object],
|
85
|
+
parent_object: Optional[dict[str, Any]] = None,
|
86
|
+
) -> dict[str, Any]:
|
87
87
|
to_object: dict[str, Any] = {}
|
88
88
|
|
89
89
|
if getv(from_object, ['display_name']) is not None:
|
@@ -105,9 +105,9 @@ def _CreateBatchJobConfig_to_vertex(
|
|
105
105
|
|
106
106
|
def _CreateBatchJobParameters_to_vertex(
|
107
107
|
api_client: BaseApiClient,
|
108
|
-
from_object: Union[dict, object],
|
109
|
-
parent_object: Optional[dict] = None,
|
110
|
-
) -> dict:
|
108
|
+
from_object: Union[dict[str, Any], object],
|
109
|
+
parent_object: Optional[dict[str, Any]] = None,
|
110
|
+
) -> dict[str, Any]:
|
111
111
|
to_object: dict[str, Any] = {}
|
112
112
|
if getv(from_object, ['model']) is not None:
|
113
113
|
setv(
|
@@ -141,9 +141,9 @@ def _CreateBatchJobParameters_to_vertex(
|
|
141
141
|
|
142
142
|
def _GetBatchJobParameters_to_vertex(
|
143
143
|
api_client: BaseApiClient,
|
144
|
-
from_object: Union[dict, object],
|
145
|
-
parent_object: Optional[dict] = None,
|
146
|
-
) -> dict:
|
144
|
+
from_object: Union[dict[str, Any], object],
|
145
|
+
parent_object: Optional[dict[str, Any]] = None,
|
146
|
+
) -> dict[str, Any]:
|
147
147
|
to_object: dict[str, Any] = {}
|
148
148
|
if getv(from_object, ['name']) is not None:
|
149
149
|
setv(
|
@@ -160,9 +160,9 @@ def _GetBatchJobParameters_to_vertex(
|
|
160
160
|
|
161
161
|
def _CancelBatchJobParameters_to_vertex(
|
162
162
|
api_client: BaseApiClient,
|
163
|
-
from_object: Union[dict, object],
|
164
|
-
parent_object: Optional[dict] = None,
|
165
|
-
) -> dict:
|
163
|
+
from_object: Union[dict[str, Any], object],
|
164
|
+
parent_object: Optional[dict[str, Any]] = None,
|
165
|
+
) -> dict[str, Any]:
|
166
166
|
to_object: dict[str, Any] = {}
|
167
167
|
if getv(from_object, ['name']) is not None:
|
168
168
|
setv(
|
@@ -179,9 +179,9 @@ def _CancelBatchJobParameters_to_vertex(
|
|
179
179
|
|
180
180
|
def _ListBatchJobsConfig_to_vertex(
|
181
181
|
api_client: BaseApiClient,
|
182
|
-
from_object: Union[dict, object],
|
183
|
-
parent_object: Optional[dict] = None,
|
184
|
-
) -> dict:
|
182
|
+
from_object: Union[dict[str, Any], object],
|
183
|
+
parent_object: Optional[dict[str, Any]] = None,
|
184
|
+
) -> dict[str, Any]:
|
185
185
|
to_object: dict[str, Any] = {}
|
186
186
|
|
187
187
|
if getv(from_object, ['page_size']) is not None:
|
@@ -204,9 +204,9 @@ def _ListBatchJobsConfig_to_vertex(
|
|
204
204
|
|
205
205
|
def _ListBatchJobsParameters_to_vertex(
|
206
206
|
api_client: BaseApiClient,
|
207
|
-
from_object: Union[dict, object],
|
208
|
-
parent_object: Optional[dict] = None,
|
209
|
-
) -> dict:
|
207
|
+
from_object: Union[dict[str, Any], object],
|
208
|
+
parent_object: Optional[dict[str, Any]] = None,
|
209
|
+
) -> dict[str, Any]:
|
210
210
|
to_object: dict[str, Any] = {}
|
211
211
|
if getv(from_object, ['config']) is not None:
|
212
212
|
setv(
|
@@ -222,9 +222,9 @@ def _ListBatchJobsParameters_to_vertex(
|
|
222
222
|
|
223
223
|
def _DeleteBatchJobParameters_to_vertex(
|
224
224
|
api_client: BaseApiClient,
|
225
|
-
from_object: Union[dict, object],
|
226
|
-
parent_object: Optional[dict] = None,
|
227
|
-
) -> dict:
|
225
|
+
from_object: Union[dict[str, Any], object],
|
226
|
+
parent_object: Optional[dict[str, Any]] = None,
|
227
|
+
) -> dict[str, Any]:
|
228
228
|
to_object: dict[str, Any] = {}
|
229
229
|
if getv(from_object, ['name']) is not None:
|
230
230
|
setv(
|
@@ -241,9 +241,9 @@ def _DeleteBatchJobParameters_to_vertex(
|
|
241
241
|
|
242
242
|
def _JobError_from_vertex(
|
243
243
|
api_client: BaseApiClient,
|
244
|
-
from_object: Union[dict, object],
|
245
|
-
parent_object: Optional[dict] = None,
|
246
|
-
) -> dict:
|
244
|
+
from_object: Union[dict[str, Any], object],
|
245
|
+
parent_object: Optional[dict[str, Any]] = None,
|
246
|
+
) -> dict[str, Any]:
|
247
247
|
to_object: dict[str, Any] = {}
|
248
248
|
if getv(from_object, ['details']) is not None:
|
249
249
|
setv(to_object, ['details'], getv(from_object, ['details']))
|
@@ -259,9 +259,9 @@ def _JobError_from_vertex(
|
|
259
259
|
|
260
260
|
def _BatchJobSource_from_vertex(
|
261
261
|
api_client: BaseApiClient,
|
262
|
-
from_object: Union[dict, object],
|
263
|
-
parent_object: Optional[dict] = None,
|
264
|
-
) -> dict:
|
262
|
+
from_object: Union[dict[str, Any], object],
|
263
|
+
parent_object: Optional[dict[str, Any]] = None,
|
264
|
+
) -> dict[str, Any]:
|
265
265
|
to_object: dict[str, Any] = {}
|
266
266
|
if getv(from_object, ['instancesFormat']) is not None:
|
267
267
|
setv(to_object, ['format'], getv(from_object, ['instancesFormat']))
|
@@ -281,9 +281,9 @@ def _BatchJobSource_from_vertex(
|
|
281
281
|
|
282
282
|
def _BatchJobDestination_from_vertex(
|
283
283
|
api_client: BaseApiClient,
|
284
|
-
from_object: Union[dict, object],
|
285
|
-
parent_object: Optional[dict] = None,
|
286
|
-
) -> dict:
|
284
|
+
from_object: Union[dict[str, Any], object],
|
285
|
+
parent_object: Optional[dict[str, Any]] = None,
|
286
|
+
) -> dict[str, Any]:
|
287
287
|
to_object: dict[str, Any] = {}
|
288
288
|
if getv(from_object, ['predictionsFormat']) is not None:
|
289
289
|
setv(to_object, ['format'], getv(from_object, ['predictionsFormat']))
|
@@ -307,9 +307,9 @@ def _BatchJobDestination_from_vertex(
|
|
307
307
|
|
308
308
|
def _BatchJob_from_vertex(
|
309
309
|
api_client: BaseApiClient,
|
310
|
-
from_object: Union[dict, object],
|
311
|
-
parent_object: Optional[dict] = None,
|
312
|
-
) -> dict:
|
310
|
+
from_object: Union[dict[str, Any], object],
|
311
|
+
parent_object: Optional[dict[str, Any]] = None,
|
312
|
+
) -> dict[str, Any]:
|
313
313
|
to_object: dict[str, Any] = {}
|
314
314
|
if getv(from_object, ['name']) is not None:
|
315
315
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
@@ -367,9 +367,9 @@ def _BatchJob_from_vertex(
|
|
367
367
|
|
368
368
|
def _ListBatchJobsResponse_from_vertex(
|
369
369
|
api_client: BaseApiClient,
|
370
|
-
from_object: Union[dict, object],
|
371
|
-
parent_object: Optional[dict] = None,
|
372
|
-
) -> dict:
|
370
|
+
from_object: Union[dict[str, Any], object],
|
371
|
+
parent_object: Optional[dict[str, Any]] = None,
|
372
|
+
) -> dict[str, Any]:
|
373
373
|
to_object: dict[str, Any] = {}
|
374
374
|
if getv(from_object, ['nextPageToken']) is not None:
|
375
375
|
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
@@ -389,9 +389,9 @@ def _ListBatchJobsResponse_from_vertex(
|
|
389
389
|
|
390
390
|
def _DeleteResourceJob_from_vertex(
|
391
391
|
api_client: BaseApiClient,
|
392
|
-
from_object: Union[dict, object],
|
393
|
-
parent_object: Optional[dict] = None,
|
394
|
-
) -> dict:
|
392
|
+
from_object: Union[dict[str, Any], object],
|
393
|
+
parent_object: Optional[dict[str, Any]] = None,
|
394
|
+
) -> dict[str, Any]:
|
395
395
|
to_object: dict[str, Any] = {}
|
396
396
|
if getv(from_object, ['name']) is not None:
|
397
397
|
setv(to_object, ['name'], getv(from_object, ['name']))
|