google-genai 1.19.0__py3-none-any.whl → 1.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +449 -137
- google/genai/_common.py +88 -1
- google/genai/_live_converters.py +174 -414
- google/genai/_replay_api_client.py +10 -9
- google/genai/_tokens_converters.py +81 -176
- google/genai/_transformers.py +19 -40
- google/genai/batches.py +47 -64
- google/genai/caches.py +132 -222
- google/genai/chats.py +9 -14
- google/genai/client.py +1 -1
- google/genai/errors.py +32 -6
- google/genai/files.py +89 -103
- google/genai/live.py +15 -20
- google/genai/live_music.py +4 -5
- google/genai/models.py +412 -558
- google/genai/operations.py +36 -68
- google/genai/tokens.py +11 -6
- google/genai/tunings.py +65 -113
- google/genai/types.py +305 -92
- google/genai/version.py +1 -1
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/METADATA +47 -1
- google_genai-1.21.0.dist-info/RECORD +35 -0
- google_genai-1.19.0.dist-info/RECORD +0 -35
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/WHEEL +0 -0
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.19.0.dist-info → google_genai-1.21.0.dist-info}/top_level.txt +0 -0
google/genai/live.py
CHANGED
@@ -79,6 +79,7 @@ _FUNCTION_RESPONSE_REQUIRES_ID = (
|
|
79
79
|
' response of a ToolCall.FunctionalCalls in Google AI.'
|
80
80
|
)
|
81
81
|
|
82
|
+
|
82
83
|
class AsyncSession:
|
83
84
|
"""[Preview] AsyncSession."""
|
84
85
|
|
@@ -215,11 +216,11 @@ class AsyncSession:
|
|
215
216
|
|
216
217
|
if self._api_client.vertexai:
|
217
218
|
client_content_dict = live_converters._LiveClientContent_to_vertex(
|
218
|
-
|
219
|
+
from_object=client_content
|
219
220
|
)
|
220
221
|
else:
|
221
222
|
client_content_dict = live_converters._LiveClientContent_to_mldev(
|
222
|
-
|
223
|
+
from_object=client_content
|
223
224
|
)
|
224
225
|
|
225
226
|
await self._ws.send(json.dumps({'client_content': client_content_dict}))
|
@@ -313,13 +314,13 @@ class AsyncSession:
|
|
313
314
|
if self._api_client.vertexai:
|
314
315
|
realtime_input_dict = (
|
315
316
|
live_converters._LiveSendRealtimeInputParameters_to_vertex(
|
316
|
-
|
317
|
+
from_object=realtime_input
|
317
318
|
)
|
318
319
|
)
|
319
320
|
else:
|
320
321
|
realtime_input_dict = (
|
321
322
|
live_converters._LiveSendRealtimeInputParameters_to_mldev(
|
322
|
-
|
323
|
+
from_object=realtime_input
|
323
324
|
)
|
324
325
|
)
|
325
326
|
realtime_input_dict = _common.convert_to_dict(realtime_input_dict)
|
@@ -399,11 +400,11 @@ class AsyncSession:
|
|
399
400
|
tool_response = t.t_tool_response(function_responses)
|
400
401
|
if self._api_client.vertexai:
|
401
402
|
tool_response_dict = live_converters._LiveClientToolResponse_to_vertex(
|
402
|
-
|
403
|
+
from_object=tool_response
|
403
404
|
)
|
404
405
|
else:
|
405
406
|
tool_response_dict = live_converters._LiveClientToolResponse_to_mldev(
|
406
|
-
|
407
|
+
from_object=tool_response
|
407
408
|
)
|
408
409
|
for response in tool_response_dict.get('functionResponses', []):
|
409
410
|
if response.get('id') is None:
|
@@ -527,13 +528,9 @@ class AsyncSession:
|
|
527
528
|
response = {}
|
528
529
|
|
529
530
|
if self._api_client.vertexai:
|
530
|
-
response_dict = live_converters._LiveServerMessage_from_vertex(
|
531
|
-
self._api_client, response
|
532
|
-
)
|
531
|
+
response_dict = live_converters._LiveServerMessage_from_vertex(response)
|
533
532
|
else:
|
534
|
-
response_dict = live_converters._LiveServerMessage_from_mldev(
|
535
|
-
self._api_client, response
|
536
|
-
)
|
533
|
+
response_dict = live_converters._LiveServerMessage_from_mldev(response)
|
537
534
|
|
538
535
|
return types.LiveServerMessage._from_response(
|
539
536
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -647,13 +644,13 @@ class AsyncSession:
|
|
647
644
|
content_input_parts.append(item)
|
648
645
|
if self._api_client.vertexai:
|
649
646
|
contents = [
|
650
|
-
_Content_to_vertex(
|
651
|
-
for item in t.t_contents(
|
647
|
+
_Content_to_vertex(item, to_object)
|
648
|
+
for item in t.t_contents(content_input_parts)
|
652
649
|
]
|
653
650
|
else:
|
654
651
|
contents = [
|
655
|
-
_Content_to_mldev(
|
656
|
-
for item in t.t_contents(
|
652
|
+
_Content_to_mldev(item, to_object)
|
653
|
+
for item in t.t_contents(content_input_parts)
|
657
654
|
]
|
658
655
|
|
659
656
|
content_dict_list: list[types.ContentDict] = []
|
@@ -1063,7 +1060,7 @@ async def _t_live_connect_config(
|
|
1063
1060
|
elif isinstance(config, dict):
|
1064
1061
|
if getv(config, ['system_instruction']) is not None:
|
1065
1062
|
converted_system_instruction = t.t_content(
|
1066
|
-
|
1063
|
+
getv(config, ['system_instruction'])
|
1067
1064
|
)
|
1068
1065
|
else:
|
1069
1066
|
converted_system_instruction = None
|
@@ -1073,9 +1070,7 @@ async def _t_live_connect_config(
|
|
1073
1070
|
if config.system_instruction is None:
|
1074
1071
|
system_instruction = None
|
1075
1072
|
else:
|
1076
|
-
system_instruction = t.t_content(
|
1077
|
-
api_client, getv(config, ['system_instruction'])
|
1078
|
-
)
|
1073
|
+
system_instruction = t.t_content(getv(config, ['system_instruction']))
|
1079
1074
|
parameter_model = config
|
1080
1075
|
parameter_model.system_instruction = system_instruction
|
1081
1076
|
|
google/genai/live_music.py
CHANGED
@@ -58,7 +58,7 @@ class AsyncMusicSession:
|
|
58
58
|
raise NotImplementedError('Live music generation is not supported in Vertex AI.')
|
59
59
|
else:
|
60
60
|
client_content_dict = live_converters._LiveMusicClientContent_to_mldev(
|
61
|
-
|
61
|
+
from_object={'weighted_prompts': prompts}
|
62
62
|
)
|
63
63
|
await self._ws.send(json.dumps({'clientContent': client_content_dict}))
|
64
64
|
|
@@ -70,7 +70,7 @@ class AsyncMusicSession:
|
|
70
70
|
raise NotImplementedError('Live music generation is not supported in Vertex AI.')
|
71
71
|
else:
|
72
72
|
config_dict = live_converters._LiveMusicGenerationConfig_to_mldev(
|
73
|
-
|
73
|
+
from_object=config
|
74
74
|
)
|
75
75
|
await self._ws.send(json.dumps({'musicGenerationConfig': config_dict}))
|
76
76
|
|
@@ -82,7 +82,7 @@ class AsyncMusicSession:
|
|
82
82
|
raise NotImplementedError('Live music generation is not supported in Vertex AI.')
|
83
83
|
else:
|
84
84
|
playback_control_dict = live_converters._LiveMusicClientMessage_to_mldev(
|
85
|
-
|
85
|
+
from_object={'playback_control': playback_control}
|
86
86
|
)
|
87
87
|
await self._ws.send(json.dumps(playback_control_dict))
|
88
88
|
|
@@ -135,7 +135,7 @@ class AsyncMusicSession:
|
|
135
135
|
raise NotImplementedError('Live music generation is not supported in Vertex AI.')
|
136
136
|
else:
|
137
137
|
response_dict = live_converters._LiveMusicServerMessage_from_mldev(
|
138
|
-
|
138
|
+
response
|
139
139
|
)
|
140
140
|
|
141
141
|
return types.LiveMusicServerMessage._from_response(
|
@@ -173,7 +173,6 @@ class AsyncLiveMusic(_api_module.BaseModule):
|
|
173
173
|
# Only mldev supported
|
174
174
|
request_dict = _common.convert_to_dict(
|
175
175
|
live_converters._LiveMusicConnectParameters_to_mldev(
|
176
|
-
api_client=self._api_client,
|
177
176
|
from_object=types.LiveMusicConnectParameters(
|
178
177
|
model=transformed_model,
|
179
178
|
).model_dump(exclude_none=True)
|