google-genai 0.0.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +2 -0
- google/genai/_api_client.py +14 -6
- google/genai/_automatic_function_calling_util.py +0 -44
- google/genai/_extra_utils.py +15 -0
- google/genai/_transformers.py +3 -2
- google/genai/batches.py +254 -4
- google/genai/caches.py +10 -0
- google/genai/chats.py +14 -2
- google/genai/files.py +6 -0
- google/genai/live.py +74 -42
- google/genai/models.py +110 -11
- google/genai/tunings.py +317 -4
- google/genai/types.py +482 -85
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/METADATA +75 -58
- google_genai-0.2.0.dist-info/RECORD +24 -0
- google_genai-0.0.1.dist-info/RECORD +0 -24
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/LICENSE +0 -0
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/WHEEL +0 -0
- {google_genai-0.0.1.dist-info → google_genai-0.2.0.dist-info}/top_level.txt +0 -0
google/genai/live.py
CHANGED
@@ -53,6 +53,12 @@ except ModuleNotFoundError:
|
|
53
53
|
from websockets.client import connect
|
54
54
|
|
55
55
|
|
56
|
+
_FUNCTION_RESPONSE_REQUIRES_ID = (
|
57
|
+
'FunctionResponse request must have an `id` field from the'
|
58
|
+
' response of a ToolCall.FunctionalCalls in Google AI.'
|
59
|
+
)
|
60
|
+
|
61
|
+
|
56
62
|
class AsyncSession:
|
57
63
|
"""AsyncSession."""
|
58
64
|
|
@@ -81,20 +87,23 @@ class AsyncSession:
|
|
81
87
|
"""Receive model responses from the server.
|
82
88
|
|
83
89
|
The method will yield the model responses from the server. The returned
|
84
|
-
responses will represent a complete model turn.
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
```
|
89
|
-
client = genai.Client(api_key=API_KEY)
|
90
|
-
|
91
|
-
async with client.aio.live.connect(model='...') as session:
|
92
|
-
await session.send(input='Hello world!', end_of_turn=True)
|
93
|
-
async for message in session.receive():
|
94
|
-
print(message)
|
95
|
-
```
|
90
|
+
responses will represent a complete model turn. When the returned message
|
91
|
+
is fuction call, user must call `send` with the function response to
|
92
|
+
continue the turn.
|
93
|
+
|
96
94
|
Yields:
|
97
|
-
|
95
|
+
The model responses from the server.
|
96
|
+
|
97
|
+
Example usage:
|
98
|
+
|
99
|
+
.. code-block:: python
|
100
|
+
|
101
|
+
client = genai.Client(api_key=API_KEY)
|
102
|
+
|
103
|
+
async with client.aio.live.connect(model='...') as session:
|
104
|
+
await session.send(input='Hello world!', end_of_turn=True)
|
105
|
+
async for message in session.receive():
|
106
|
+
print(message)
|
98
107
|
"""
|
99
108
|
# TODO(b/365983264) Handle intermittent issues for the user.
|
100
109
|
while result := await self._receive():
|
@@ -113,28 +122,27 @@ class AsyncSession:
|
|
113
122
|
input stream to the model and the other task will be used to receive the
|
114
123
|
responses from the model.
|
115
124
|
|
116
|
-
Example usage:
|
117
|
-
```
|
118
|
-
client = genai.Client(api_key=API_KEY)
|
119
|
-
config = {'response_modalities': ['AUDIO']}
|
120
|
-
|
121
|
-
async def audio_stream():
|
122
|
-
stream = read_audio()
|
123
|
-
for data in stream:
|
124
|
-
yield data
|
125
|
-
|
126
|
-
async with client.aio.live.connect(model='...') as session:
|
127
|
-
for audio in session.start_stream(stream = audio_stream(),
|
128
|
-
mime_type = 'audio/pcm'):
|
129
|
-
play_audio_chunk(audio.data)
|
130
|
-
```
|
131
|
-
|
132
125
|
Args:
|
133
|
-
|
134
|
-
|
126
|
+
stream: An iterator that yields the model response.
|
127
|
+
mime_type: The MIME type of the data in the stream.
|
135
128
|
|
136
129
|
Yields:
|
137
|
-
|
130
|
+
The audio bytes received from the model and server response messages.
|
131
|
+
|
132
|
+
Example usage:
|
133
|
+
|
134
|
+
.. code-block:: python
|
135
|
+
|
136
|
+
client = genai.Client(api_key=API_KEY)
|
137
|
+
config = {'response_modalities': ['AUDIO']}
|
138
|
+
async def audio_stream():
|
139
|
+
stream = read_audio()
|
140
|
+
for data in stream:
|
141
|
+
yield data
|
142
|
+
async with client.aio.live.connect(model='...') as session:
|
143
|
+
for audio in session.start_stream(stream = audio_stream(),
|
144
|
+
mime_type = 'audio/pcm'):
|
145
|
+
play_audio_chunk(audio.data)
|
138
146
|
"""
|
139
147
|
stop_event = asyncio.Event()
|
140
148
|
# Start the send loop. When stream is complete stop_event is set.
|
@@ -340,6 +348,8 @@ class AsyncSession:
|
|
340
348
|
input = [input]
|
341
349
|
elif isinstance(input, dict) and 'name' in input and 'response' in input:
|
342
350
|
# ToolResponse.FunctionResponse
|
351
|
+
if not (self._api_client.vertexai) and 'id' not in input:
|
352
|
+
raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID)
|
343
353
|
input = [input]
|
344
354
|
|
345
355
|
if isinstance(input, Sequence) and any(
|
@@ -395,7 +405,29 @@ class AsyncSession:
|
|
395
405
|
client_message = {'client_content': input.model_dump(exclude_none=True)}
|
396
406
|
elif isinstance(input, types.LiveClientToolResponse):
|
397
407
|
# ToolResponse.FunctionResponse
|
408
|
+
if not (self._api_client.vertexai) and not (input.function_responses[0].id):
|
409
|
+
raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID)
|
398
410
|
client_message = {'tool_response': input.model_dump(exclude_none=True)}
|
411
|
+
elif isinstance(input, types.FunctionResponse):
|
412
|
+
if not (self._api_client.vertexai) and not (input.id):
|
413
|
+
raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID)
|
414
|
+
client_message = {
|
415
|
+
'tool_response': {
|
416
|
+
'function_responses': [input.model_dump(exclude_none=True)]
|
417
|
+
}
|
418
|
+
}
|
419
|
+
elif isinstance(input, Sequence) and isinstance(
|
420
|
+
input[0], types.FunctionResponse
|
421
|
+
):
|
422
|
+
if not (self._api_client.vertexai) and not (input[0].id):
|
423
|
+
raise ValueError(_FUNCTION_RESPONSE_REQUIRES_ID)
|
424
|
+
client_message = {
|
425
|
+
'tool_response': {
|
426
|
+
'function_responses': [
|
427
|
+
c.model_dump(exclude_none=True) for c in input
|
428
|
+
]
|
429
|
+
}
|
430
|
+
}
|
399
431
|
else:
|
400
432
|
raise ValueError(
|
401
433
|
f'Unsupported input type "{type(input)}" or input content "{input}"'
|
@@ -571,16 +603,16 @@ class AsyncLive(_common.BaseModule):
|
|
571
603
|
) -> AsyncSession:
|
572
604
|
"""Connect to the live server.
|
573
605
|
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
606
|
+
Usage:
|
607
|
+
|
608
|
+
.. code-block:: python
|
609
|
+
|
610
|
+
client = genai.Client(api_key=API_KEY)
|
611
|
+
config = {}
|
612
|
+
async with client.aio.live.connect(model='gemini-1.0-pro-002', config=config) as session:
|
613
|
+
await session.send(input='Hello world!', end_of_turn=True)
|
614
|
+
async for message in session:
|
615
|
+
print(message)
|
584
616
|
"""
|
585
617
|
base_url = self.api_client._websocket_base_url()
|
586
618
|
if self.api_client.api_key:
|
google/genai/models.py
CHANGED
@@ -880,6 +880,11 @@ def _GenerateContentConfig_to_mldev(
|
|
880
880
|
getv(from_object, ['response_modalities']),
|
881
881
|
)
|
882
882
|
|
883
|
+
if getv(from_object, ['media_resolution']):
|
884
|
+
raise ValueError(
|
885
|
+
'media_resolution parameter is not supported in Google AI.'
|
886
|
+
)
|
887
|
+
|
883
888
|
if getv(from_object, ['speech_config']) is not None:
|
884
889
|
setv(
|
885
890
|
to_object,
|
@@ -1022,6 +1027,11 @@ def _GenerateContentConfig_to_vertex(
|
|
1022
1027
|
getv(from_object, ['response_modalities']),
|
1023
1028
|
)
|
1024
1029
|
|
1030
|
+
if getv(from_object, ['media_resolution']) is not None:
|
1031
|
+
setv(
|
1032
|
+
to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
|
1033
|
+
)
|
1034
|
+
|
1025
1035
|
if getv(from_object, ['speech_config']) is not None:
|
1026
1036
|
setv(
|
1027
1037
|
to_object,
|
@@ -1116,8 +1126,15 @@ def _EmbedContentConfig_to_mldev(
|
|
1116
1126
|
parent_object: dict = None,
|
1117
1127
|
) -> dict:
|
1118
1128
|
to_object = {}
|
1129
|
+
if getv(from_object, ['http_options']) is not None:
|
1130
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1131
|
+
|
1119
1132
|
if getv(from_object, ['task_type']) is not None:
|
1120
|
-
setv(
|
1133
|
+
setv(
|
1134
|
+
parent_object,
|
1135
|
+
['requests[]', 'taskType'],
|
1136
|
+
getv(from_object, ['task_type']),
|
1137
|
+
)
|
1121
1138
|
|
1122
1139
|
if getv(from_object, ['title']) is not None:
|
1123
1140
|
setv(parent_object, ['requests[]', 'title'], getv(from_object, ['title']))
|
@@ -1144,10 +1161,13 @@ def _EmbedContentConfig_to_vertex(
|
|
1144
1161
|
parent_object: dict = None,
|
1145
1162
|
) -> dict:
|
1146
1163
|
to_object = {}
|
1164
|
+
if getv(from_object, ['http_options']) is not None:
|
1165
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1166
|
+
|
1147
1167
|
if getv(from_object, ['task_type']) is not None:
|
1148
1168
|
setv(
|
1149
1169
|
parent_object,
|
1150
|
-
['instances[]', '
|
1170
|
+
['instances[]', 'task_type'],
|
1151
1171
|
getv(from_object, ['task_type']),
|
1152
1172
|
)
|
1153
1173
|
|
@@ -1253,6 +1273,9 @@ def _GenerateImageConfig_to_mldev(
|
|
1253
1273
|
parent_object: dict = None,
|
1254
1274
|
) -> dict:
|
1255
1275
|
to_object = {}
|
1276
|
+
if getv(from_object, ['http_options']) is not None:
|
1277
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1278
|
+
|
1256
1279
|
if getv(from_object, ['output_gcs_uri']):
|
1257
1280
|
raise ValueError('output_gcs_uri parameter is not supported in Google AI.')
|
1258
1281
|
|
@@ -1348,6 +1371,9 @@ def _GenerateImageConfig_to_vertex(
|
|
1348
1371
|
parent_object: dict = None,
|
1349
1372
|
) -> dict:
|
1350
1373
|
to_object = {}
|
1374
|
+
if getv(from_object, ['http_options']) is not None:
|
1375
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1376
|
+
|
1351
1377
|
if getv(from_object, ['output_gcs_uri']) is not None:
|
1352
1378
|
setv(
|
1353
1379
|
parent_object,
|
@@ -1778,6 +1804,9 @@ def _EditImageConfig_to_mldev(
|
|
1778
1804
|
parent_object: dict = None,
|
1779
1805
|
) -> dict:
|
1780
1806
|
to_object = {}
|
1807
|
+
if getv(from_object, ['http_options']) is not None:
|
1808
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1809
|
+
|
1781
1810
|
if getv(from_object, ['output_gcs_uri']):
|
1782
1811
|
raise ValueError('output_gcs_uri parameter is not supported in Google AI.')
|
1783
1812
|
|
@@ -1870,6 +1899,9 @@ def _EditImageConfig_to_vertex(
|
|
1870
1899
|
parent_object: dict = None,
|
1871
1900
|
) -> dict:
|
1872
1901
|
to_object = {}
|
1902
|
+
if getv(from_object, ['http_options']) is not None:
|
1903
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1904
|
+
|
1873
1905
|
if getv(from_object, ['output_gcs_uri']) is not None:
|
1874
1906
|
setv(
|
1875
1907
|
parent_object,
|
@@ -2042,6 +2074,9 @@ def _UpscaleImageAPIConfig_to_mldev(
|
|
2042
2074
|
parent_object: dict = None,
|
2043
2075
|
) -> dict:
|
2044
2076
|
to_object = {}
|
2077
|
+
if getv(from_object, ['http_options']) is not None:
|
2078
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
2079
|
+
|
2045
2080
|
if getv(from_object, ['upscale_factor']) is not None:
|
2046
2081
|
setv(
|
2047
2082
|
parent_object,
|
@@ -2089,6 +2124,9 @@ def _UpscaleImageAPIConfig_to_vertex(
|
|
2089
2124
|
parent_object: dict = None,
|
2090
2125
|
) -> dict:
|
2091
2126
|
to_object = {}
|
2127
|
+
if getv(from_object, ['http_options']) is not None:
|
2128
|
+
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
2129
|
+
|
2092
2130
|
if getv(from_object, ['upscale_factor']) is not None:
|
2093
2131
|
setv(
|
2094
2132
|
parent_object,
|
@@ -3695,6 +3733,29 @@ class Models(_common.BaseModule):
|
|
3695
3733
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
3696
3734
|
config: Optional[types.EmbedContentConfigOrDict] = None,
|
3697
3735
|
) -> types.EmbedContentResponse:
|
3736
|
+
"""Calculates embeddings for the given contents(only text is supported).
|
3737
|
+
|
3738
|
+
Args:
|
3739
|
+
model (str): The model to use.
|
3740
|
+
contents (list[Content]): The contents to embed.
|
3741
|
+
config (EmbedContentConfig): Optional configuration for embeddings.
|
3742
|
+
|
3743
|
+
Usage:
|
3744
|
+
|
3745
|
+
.. code-block:: python
|
3746
|
+
|
3747
|
+
embeddings = client.models.embed_content(
|
3748
|
+
model= 'text-embedding-004',
|
3749
|
+
contents=[
|
3750
|
+
'What is your name?',
|
3751
|
+
'What is your favorite color?',
|
3752
|
+
],
|
3753
|
+
config={
|
3754
|
+
'output_dimensionality': 64
|
3755
|
+
},
|
3756
|
+
)
|
3757
|
+
"""
|
3758
|
+
|
3698
3759
|
parameter_model = types._EmbedContentParameters(
|
3699
3760
|
model=model,
|
3700
3761
|
contents=contents,
|
@@ -4327,6 +4388,7 @@ class Models(_common.BaseModule):
|
|
4327
4388
|
logging.info(
|
4328
4389
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
4329
4390
|
)
|
4391
|
+
automatic_function_calling_history = []
|
4330
4392
|
while remaining_remote_calls_afc > 0:
|
4331
4393
|
response = self._generate_content(
|
4332
4394
|
model=model, contents=contents, config=config
|
@@ -4351,11 +4413,17 @@ class Models(_common.BaseModule):
|
|
4351
4413
|
break
|
4352
4414
|
contents = t.t_contents(self.api_client, contents)
|
4353
4415
|
contents.append(response.candidates[0].content)
|
4354
|
-
contents.append(
|
4355
|
-
|
4356
|
-
|
4357
|
-
|
4358
|
-
|
4416
|
+
contents.append(
|
4417
|
+
types.Content(
|
4418
|
+
role='user',
|
4419
|
+
parts=func_response_parts,
|
4420
|
+
)
|
4421
|
+
)
|
4422
|
+
automatic_function_calling_history.extend(contents)
|
4423
|
+
if _extra_utils.should_append_afc_history(config):
|
4424
|
+
response.automatic_function_calling_history = (
|
4425
|
+
automatic_function_calling_history
|
4426
|
+
)
|
4359
4427
|
return response
|
4360
4428
|
|
4361
4429
|
def upscale_image(
|
@@ -4563,6 +4631,29 @@ class AsyncModels(_common.BaseModule):
|
|
4563
4631
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
4564
4632
|
config: Optional[types.EmbedContentConfigOrDict] = None,
|
4565
4633
|
) -> types.EmbedContentResponse:
|
4634
|
+
"""Calculates embeddings for the given contents(only text is supported).
|
4635
|
+
|
4636
|
+
Args:
|
4637
|
+
model (str): The model to use.
|
4638
|
+
contents (list[Content]): The contents to embed.
|
4639
|
+
config (EmbedContentConfig): Optional configuration for embeddings.
|
4640
|
+
|
4641
|
+
Usage:
|
4642
|
+
|
4643
|
+
.. code-block:: python
|
4644
|
+
|
4645
|
+
embeddings = client.models.embed_content(
|
4646
|
+
model= 'text-embedding-004',
|
4647
|
+
contents=[
|
4648
|
+
'What is your name?',
|
4649
|
+
'What is your favorite color?',
|
4650
|
+
],
|
4651
|
+
config={
|
4652
|
+
'output_dimensionality': 64
|
4653
|
+
},
|
4654
|
+
)
|
4655
|
+
"""
|
4656
|
+
|
4566
4657
|
parameter_model = types._EmbedContentParameters(
|
4567
4658
|
model=model,
|
4568
4659
|
contents=contents,
|
@@ -5185,6 +5276,7 @@ class AsyncModels(_common.BaseModule):
|
|
5185
5276
|
logging.info(
|
5186
5277
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
5187
5278
|
)
|
5279
|
+
automatic_function_calling_history = []
|
5188
5280
|
while remaining_remote_calls_afc > 0:
|
5189
5281
|
response = await self._generate_content(
|
5190
5282
|
model=model, contents=contents, config=config
|
@@ -5209,11 +5301,18 @@ class AsyncModels(_common.BaseModule):
|
|
5209
5301
|
break
|
5210
5302
|
contents = t.t_contents(self.api_client, contents)
|
5211
5303
|
contents.append(response.candidates[0].content)
|
5212
|
-
contents.append(
|
5213
|
-
|
5214
|
-
|
5215
|
-
|
5304
|
+
contents.append(
|
5305
|
+
types.Content(
|
5306
|
+
role='user',
|
5307
|
+
parts=func_response_parts,
|
5308
|
+
)
|
5309
|
+
)
|
5310
|
+
automatic_function_calling_history.extend(contents)
|
5216
5311
|
|
5312
|
+
if _extra_utils.should_append_afc_history(config):
|
5313
|
+
response.automatic_function_calling_history = (
|
5314
|
+
automatic_function_calling_history
|
5315
|
+
)
|
5217
5316
|
return response
|
5218
5317
|
|
5219
5318
|
async def list(
|