google-genai 1.0.0rc0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +24 -21
- google/genai/_automatic_function_calling_util.py +21 -18
- google/genai/_common.py +24 -1
- google/genai/_extra_utils.py +14 -8
- google/genai/_replay_api_client.py +2 -0
- google/genai/_transformers.py +31 -3
- google/genai/chats.py +24 -8
- google/genai/errors.py +4 -0
- google/genai/files.py +18 -12
- google/genai/live.py +5 -0
- google/genai/models.py +311 -10
- google/genai/tunings.py +224 -60
- google/genai/types.py +100 -74
- google/genai/version.py +1 -1
- {google_genai-1.0.0rc0.dist-info → google_genai-1.1.0.dist-info}/METADATA +258 -149
- google_genai-1.1.0.dist-info/RECORD +27 -0
- google_genai-1.0.0rc0.dist-info/RECORD +0 -27
- {google_genai-1.0.0rc0.dist-info → google_genai-1.1.0.dist-info}/LICENSE +0 -0
- {google_genai-1.0.0rc0.dist-info → google_genai-1.1.0.dist-info}/WHEEL +0 -0
- {google_genai-1.0.0rc0.dist-info → google_genai-1.1.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -1436,11 +1436,7 @@ def _GenerateImagesConfig_to_mldev(
|
|
1436
1436
|
)
|
1437
1437
|
|
1438
1438
|
if getv(from_object, ['enhance_prompt']) is not None:
|
1439
|
-
|
1440
|
-
parent_object,
|
1441
|
-
['parameters', 'enhancePrompt'],
|
1442
|
-
getv(from_object, ['enhance_prompt']),
|
1443
|
-
)
|
1439
|
+
raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
|
1444
1440
|
|
1445
1441
|
return to_object
|
1446
1442
|
|
@@ -3372,9 +3368,6 @@ def _GeneratedImage_from_mldev(
|
|
3372
3368
|
getv(from_object, ['raiFilteredReason']),
|
3373
3369
|
)
|
3374
3370
|
|
3375
|
-
if getv(from_object, ['prompt']) is not None:
|
3376
|
-
setv(to_object, ['enhanced_prompt'], getv(from_object, ['prompt']))
|
3377
|
-
|
3378
3371
|
return to_object
|
3379
3372
|
|
3380
3373
|
|
@@ -3862,7 +3855,7 @@ class Models(_api_module.BaseModule):
|
|
3862
3855
|
self._api_client._verify_response(return_value)
|
3863
3856
|
return return_value
|
3864
3857
|
|
3865
|
-
def
|
3858
|
+
def _generate_content_stream(
|
3866
3859
|
self,
|
3867
3860
|
*,
|
3868
3861
|
model: str,
|
@@ -4680,10 +4673,14 @@ class Models(_api_module.BaseModule):
|
|
4680
4673
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
4681
4674
|
)
|
4682
4675
|
automatic_function_calling_history = []
|
4676
|
+
response = None
|
4677
|
+
i = 0
|
4683
4678
|
while remaining_remote_calls_afc > 0:
|
4679
|
+
i += 1
|
4684
4680
|
response = self._generate_content(
|
4685
4681
|
model=model, contents=contents, config=config
|
4686
4682
|
)
|
4683
|
+
logging.info(f'AFC remote call {i} is done.')
|
4687
4684
|
remaining_remote_calls_afc -= 1
|
4688
4685
|
if remaining_remote_calls_afc == 0:
|
4689
4686
|
logging.info('Reached max remote calls for automatic function calling.')
|
@@ -4691,6 +4688,8 @@ class Models(_api_module.BaseModule):
|
|
4691
4688
|
function_map = _extra_utils.get_function_map(config)
|
4692
4689
|
if not function_map:
|
4693
4690
|
break
|
4691
|
+
if not response:
|
4692
|
+
break
|
4694
4693
|
if (
|
4695
4694
|
not response.candidates
|
4696
4695
|
or not response.candidates[0].content
|
@@ -4720,6 +4719,151 @@ class Models(_api_module.BaseModule):
|
|
4720
4719
|
)
|
4721
4720
|
return response
|
4722
4721
|
|
4722
|
+
def generate_content_stream(
|
4723
|
+
self,
|
4724
|
+
*,
|
4725
|
+
model: str,
|
4726
|
+
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
4727
|
+
config: Optional[types.GenerateContentConfigOrDict] = None,
|
4728
|
+
) -> Iterator[types.GenerateContentResponse]:
|
4729
|
+
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
4730
|
+
|
4731
|
+
For the `model` parameter, supported format for Vertex AI API includes:
|
4732
|
+
- the Gemini model ID, for example: 'gemini-1.5-flash-002'
|
4733
|
+
- the full resource name starts with 'projects/', for example:
|
4734
|
+
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
|
4735
|
+
- the partial resource name with 'publishers/', for example:
|
4736
|
+
'publishers/google/models/gemini-1.5-flash-002' or
|
4737
|
+
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
4738
|
+
- `/` separated publisher and model name, for example:
|
4739
|
+
'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
|
4740
|
+
|
4741
|
+
For the `model` parameter, supported format for Gemini API includes:
|
4742
|
+
- the Gemini model ID, for example: 'gemini-1.5-flash-002'
|
4743
|
+
- the model name starts with 'models/', for example:
|
4744
|
+
'models/gemini-1.5-flash-002'
|
4745
|
+
- if you would like to use a tuned model, the model name starts with
|
4746
|
+
'tunedModels/', for example:
|
4747
|
+
'tunedModels/1234567890123456789'
|
4748
|
+
|
4749
|
+
Some models support multimodal input and output.
|
4750
|
+
|
4751
|
+
Usage:
|
4752
|
+
|
4753
|
+
.. code-block:: python
|
4754
|
+
|
4755
|
+
from google.genai import types
|
4756
|
+
from google import genai
|
4757
|
+
|
4758
|
+
client = genai.Client(
|
4759
|
+
vertexai=True, project='my-project-id', location='us-central1'
|
4760
|
+
)
|
4761
|
+
|
4762
|
+
for chunk in client.models.generate_content_stream(
|
4763
|
+
model='gemini-1.5-flash-002',
|
4764
|
+
contents='''What is a good name for a flower shop that specializes in
|
4765
|
+
selling bouquets of dried flowers?'''
|
4766
|
+
):
|
4767
|
+
print(chunk.text)
|
4768
|
+
# **Elegant & Classic:**
|
4769
|
+
# * The Dried Bloom
|
4770
|
+
# * Everlasting Florals
|
4771
|
+
# * Timeless Petals
|
4772
|
+
|
4773
|
+
for chunk in client.models.generate_content_stream(
|
4774
|
+
model='gemini-1.5-flash-002',
|
4775
|
+
contents=[
|
4776
|
+
types.Part.from_text('What is shown in this image?'),
|
4777
|
+
types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
|
4778
|
+
'image/jpeg')
|
4779
|
+
]
|
4780
|
+
):
|
4781
|
+
print(chunk.text)
|
4782
|
+
# The image shows a flat lay arrangement of freshly baked blueberry
|
4783
|
+
# scones.
|
4784
|
+
"""
|
4785
|
+
|
4786
|
+
if _extra_utils.should_disable_afc(config):
|
4787
|
+
yield from self._generate_content_stream(
|
4788
|
+
model=model, contents=contents, config=config
|
4789
|
+
)
|
4790
|
+
return
|
4791
|
+
|
4792
|
+
remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
|
4793
|
+
logging.info(
|
4794
|
+
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
4795
|
+
)
|
4796
|
+
automatic_function_calling_history = []
|
4797
|
+
chunk = None
|
4798
|
+
func_response_parts = None
|
4799
|
+
i = 0
|
4800
|
+
while remaining_remote_calls_afc > 0:
|
4801
|
+
i += 1
|
4802
|
+
response = self._generate_content_stream(
|
4803
|
+
model=model, contents=contents, config=config
|
4804
|
+
)
|
4805
|
+
logging.info(f'AFC remote call {i} is done.')
|
4806
|
+
remaining_remote_calls_afc -= 1
|
4807
|
+
if remaining_remote_calls_afc == 0:
|
4808
|
+
logging.info('Reached max remote calls for automatic function calling.')
|
4809
|
+
|
4810
|
+
function_map = _extra_utils.get_function_map(config)
|
4811
|
+
|
4812
|
+
if i == 1:
|
4813
|
+
# First request gets a function call.
|
4814
|
+
# Then get function response parts.
|
4815
|
+
# Yield chunks only if there's no function response parts.
|
4816
|
+
for chunk in response:
|
4817
|
+
if not function_map:
|
4818
|
+
yield chunk
|
4819
|
+
else:
|
4820
|
+
func_response_parts = _extra_utils.get_function_response_parts(
|
4821
|
+
chunk, function_map
|
4822
|
+
)
|
4823
|
+
if not func_response_parts:
|
4824
|
+
yield chunk
|
4825
|
+
|
4826
|
+
else:
|
4827
|
+
# Second request and beyond, yield chunks.
|
4828
|
+
for chunk in response:
|
4829
|
+
if _extra_utils.should_append_afc_history(config):
|
4830
|
+
chunk.automatic_function_calling_history = (
|
4831
|
+
automatic_function_calling_history
|
4832
|
+
)
|
4833
|
+
yield chunk
|
4834
|
+
func_response_parts = _extra_utils.get_function_response_parts(
|
4835
|
+
chunk, function_map
|
4836
|
+
)
|
4837
|
+
|
4838
|
+
if not chunk:
|
4839
|
+
break
|
4840
|
+
if (
|
4841
|
+
not chunk
|
4842
|
+
or not chunk.candidates
|
4843
|
+
or not chunk.candidates[0].content
|
4844
|
+
or not chunk.candidates[0].content.parts
|
4845
|
+
):
|
4846
|
+
break
|
4847
|
+
|
4848
|
+
if not function_map:
|
4849
|
+
break
|
4850
|
+
if not func_response_parts:
|
4851
|
+
break
|
4852
|
+
|
4853
|
+
# Append function response parts to contents for the next request.
|
4854
|
+
func_call_content = chunk.candidates[0].content
|
4855
|
+
func_response_content = types.Content(
|
4856
|
+
role='user',
|
4857
|
+
parts=func_response_parts,
|
4858
|
+
)
|
4859
|
+
contents = t.t_contents(self._api_client, contents)
|
4860
|
+
if not automatic_function_calling_history:
|
4861
|
+
automatic_function_calling_history.extend(contents)
|
4862
|
+
contents.append(func_call_content)
|
4863
|
+
contents.append(func_response_content)
|
4864
|
+
automatic_function_calling_history.append(func_call_content)
|
4865
|
+
automatic_function_calling_history.append(func_response_content)
|
4866
|
+
|
4723
4867
|
def upscale_image(
|
4724
4868
|
self,
|
4725
4869
|
*,
|
@@ -4886,7 +5030,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
4886
5030
|
self._api_client._verify_response(return_value)
|
4887
5031
|
return return_value
|
4888
5032
|
|
4889
|
-
async def
|
5033
|
+
async def _generate_content_stream(
|
4890
5034
|
self,
|
4891
5035
|
*,
|
4892
5036
|
model: str,
|
@@ -5681,6 +5825,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5681
5825
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
5682
5826
|
)
|
5683
5827
|
automatic_function_calling_history = []
|
5828
|
+
response = None
|
5684
5829
|
while remaining_remote_calls_afc > 0:
|
5685
5830
|
response = await self._generate_content(
|
5686
5831
|
model=model, contents=contents, config=config
|
@@ -5692,6 +5837,8 @@ class AsyncModels(_api_module.BaseModule):
|
|
5692
5837
|
function_map = _extra_utils.get_function_map(config)
|
5693
5838
|
if not function_map:
|
5694
5839
|
break
|
5840
|
+
if not response:
|
5841
|
+
break
|
5695
5842
|
if (
|
5696
5843
|
not response.candidates
|
5697
5844
|
or not response.candidates[0].content
|
@@ -5722,6 +5869,160 @@ class AsyncModels(_api_module.BaseModule):
|
|
5722
5869
|
)
|
5723
5870
|
return response
|
5724
5871
|
|
5872
|
+
async def generate_content_stream(
|
5873
|
+
self,
|
5874
|
+
*,
|
5875
|
+
model: str,
|
5876
|
+
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
5877
|
+
config: Optional[types.GenerateContentConfigOrDict] = None,
|
5878
|
+
) -> Awaitable[AsyncIterator[types.GenerateContentResponse]]:
|
5879
|
+
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
5880
|
+
|
5881
|
+
For the `model` parameter, supported format for Vertex AI API includes:
|
5882
|
+
- the Gemini model ID, for example: 'gemini-1.5-flash-002'
|
5883
|
+
- the full resource name starts with 'projects/', for example:
|
5884
|
+
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
|
5885
|
+
- the partial resource name with 'publishers/', for example:
|
5886
|
+
'publishers/google/models/gemini-1.5-flash-002' or
|
5887
|
+
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5888
|
+
- `/` separated publisher and model name, for example:
|
5889
|
+
'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
|
5890
|
+
|
5891
|
+
For the `model` parameter, supported format for Gemini API includes:
|
5892
|
+
- the Gemini model ID, for example: 'gemini-1.5-flash-002'
|
5893
|
+
- the model name starts with 'models/', for example:
|
5894
|
+
'models/gemini-1.5-flash-002'
|
5895
|
+
- if you would like to use a tuned model, the model name starts with
|
5896
|
+
'tunedModels/', for example:
|
5897
|
+
'tunedModels/1234567890123456789'
|
5898
|
+
|
5899
|
+
Some models support multimodal input and output.
|
5900
|
+
|
5901
|
+
Usage:
|
5902
|
+
|
5903
|
+
.. code-block:: python
|
5904
|
+
|
5905
|
+
from google.genai import types
|
5906
|
+
from google import genai
|
5907
|
+
|
5908
|
+
client = genai.Client(
|
5909
|
+
vertexai=True, project='my-project-id', location='us-central1'
|
5910
|
+
)
|
5911
|
+
|
5912
|
+
async for chunk in await client.aio.models.generate_content_stream(
|
5913
|
+
model='gemini-1.5-flash-002',
|
5914
|
+
contents='''What is a good name for a flower shop that specializes in
|
5915
|
+
selling bouquets of dried flowers?'''
|
5916
|
+
):
|
5917
|
+
print(chunk.text)
|
5918
|
+
# **Elegant & Classic:**
|
5919
|
+
# * The Dried Bloom
|
5920
|
+
# * Everlasting Florals
|
5921
|
+
# * Timeless Petals
|
5922
|
+
|
5923
|
+
async for chunk in awiat client.aio.models.generate_content_stream(
|
5924
|
+
model='gemini-1.5-flash-002',
|
5925
|
+
contents=[
|
5926
|
+
types.Part.from_text('What is shown in this image?'),
|
5927
|
+
types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
|
5928
|
+
'image/jpeg')
|
5929
|
+
]
|
5930
|
+
):
|
5931
|
+
print(chunk.text)
|
5932
|
+
# The image shows a flat lay arrangement of freshly baked blueberry
|
5933
|
+
# scones.
|
5934
|
+
"""
|
5935
|
+
|
5936
|
+
if _extra_utils.should_disable_afc(config):
|
5937
|
+
response = await self._generate_content_stream(
|
5938
|
+
model=model, contents=contents, config=config
|
5939
|
+
)
|
5940
|
+
|
5941
|
+
async def base_async_generator(model, contents, config):
|
5942
|
+
async for chunk in response:
|
5943
|
+
yield chunk
|
5944
|
+
|
5945
|
+
return base_async_generator(model, contents, config)
|
5946
|
+
|
5947
|
+
async def async_generator(model, contents, config):
|
5948
|
+
remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
|
5949
|
+
logging.info(
|
5950
|
+
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
5951
|
+
)
|
5952
|
+
automatic_function_calling_history = []
|
5953
|
+
chunk = None
|
5954
|
+
i = 0
|
5955
|
+
while remaining_remote_calls_afc > 0:
|
5956
|
+
i += 1
|
5957
|
+
response = await self._generate_content_stream(
|
5958
|
+
model=model, contents=contents, config=config
|
5959
|
+
)
|
5960
|
+
logging.info(f'AFC remote call {i} is done.')
|
5961
|
+
remaining_remote_calls_afc -= 1
|
5962
|
+
if remaining_remote_calls_afc == 0:
|
5963
|
+
logging.info(
|
5964
|
+
'Reached max remote calls for automatic function calling.'
|
5965
|
+
)
|
5966
|
+
|
5967
|
+
function_map = _extra_utils.get_function_map(config)
|
5968
|
+
|
5969
|
+
if i == 1:
|
5970
|
+
# First request gets a function call.
|
5971
|
+
# Then get function response parts.
|
5972
|
+
# Yield chunks only if there's no function response parts.
|
5973
|
+
async for chunk in response:
|
5974
|
+
if not function_map:
|
5975
|
+
yield chunk
|
5976
|
+
else:
|
5977
|
+
func_response_parts = _extra_utils.get_function_response_parts(
|
5978
|
+
chunk, function_map
|
5979
|
+
)
|
5980
|
+
if not func_response_parts:
|
5981
|
+
yield chunk
|
5982
|
+
|
5983
|
+
else:
|
5984
|
+
# Second request and beyond, yield chunks.
|
5985
|
+
async for chunk in response:
|
5986
|
+
|
5987
|
+
if _extra_utils.should_append_afc_history(config):
|
5988
|
+
chunk.automatic_function_calling_history = (
|
5989
|
+
automatic_function_calling_history
|
5990
|
+
)
|
5991
|
+
yield chunk
|
5992
|
+
func_response_parts = _extra_utils.get_function_response_parts(
|
5993
|
+
chunk, function_map
|
5994
|
+
)
|
5995
|
+
if not chunk:
|
5996
|
+
break
|
5997
|
+
if (
|
5998
|
+
not chunk
|
5999
|
+
or not chunk.candidates
|
6000
|
+
or not chunk.candidates[0].content
|
6001
|
+
or not chunk.candidates[0].content.parts
|
6002
|
+
):
|
6003
|
+
break
|
6004
|
+
if not function_map:
|
6005
|
+
break
|
6006
|
+
|
6007
|
+
if not func_response_parts:
|
6008
|
+
break
|
6009
|
+
|
6010
|
+
# Append function response parts to contents for the next request.
|
6011
|
+
func_call_content = chunk.candidates[0].content
|
6012
|
+
func_response_content = types.Content(
|
6013
|
+
role='user',
|
6014
|
+
parts=func_response_parts,
|
6015
|
+
)
|
6016
|
+
contents = t.t_contents(self._api_client, contents)
|
6017
|
+
if not automatic_function_calling_history:
|
6018
|
+
automatic_function_calling_history.extend(contents)
|
6019
|
+
contents.append(func_call_content)
|
6020
|
+
contents.append(func_response_content)
|
6021
|
+
automatic_function_calling_history.append(func_call_content)
|
6022
|
+
automatic_function_calling_history.append(func_response_content)
|
6023
|
+
|
6024
|
+
return async_generator(model, contents, config)
|
6025
|
+
|
5725
6026
|
async def list(
|
5726
6027
|
self,
|
5727
6028
|
*,
|