google-genai 1.0.0rc0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -174,8 +174,10 @@ def _Schema_to_mldev(
174
174
  raise ValueError('example parameter is not supported in Gemini API.')
175
175
 
176
176
  if getv(from_object, ['property_ordering']) is not None:
177
- raise ValueError(
178
- 'property_ordering parameter is not supported in Gemini API.'
177
+ setv(
178
+ to_object,
179
+ ['propertyOrdering'],
180
+ getv(from_object, ['property_ordering']),
179
181
  )
180
182
 
181
183
  if getv(from_object, ['pattern']) is not None:
@@ -924,8 +926,8 @@ def _GenerateContentConfig_to_mldev(
924
926
  )
925
927
 
926
928
  if getv(from_object, ['media_resolution']) is not None:
927
- raise ValueError(
928
- 'media_resolution parameter is not supported in Gemini API.'
929
+ setv(
930
+ to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
929
931
  )
930
932
 
931
933
  if getv(from_object, ['speech_config']) is not None:
@@ -1436,11 +1438,7 @@ def _GenerateImagesConfig_to_mldev(
1436
1438
  )
1437
1439
 
1438
1440
  if getv(from_object, ['enhance_prompt']) is not None:
1439
- setv(
1440
- parent_object,
1441
- ['parameters', 'enhancePrompt'],
1442
- getv(from_object, ['enhance_prompt']),
1443
- )
1441
+ raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
1444
1442
 
1445
1443
  return to_object
1446
1444
 
@@ -2791,16 +2789,6 @@ def _ComputeTokensParameters_to_vertex(
2791
2789
  return to_object
2792
2790
 
2793
2791
 
2794
- def _MediaResolution_to_mldev_enum_validate(enum_value: Any):
2795
- if enum_value in set([
2796
- 'MEDIA_RESOLUTION_UNSPECIFIED',
2797
- 'MEDIA_RESOLUTION_LOW',
2798
- 'MEDIA_RESOLUTION_MEDIUM',
2799
- 'MEDIA_RESOLUTION_HIGH',
2800
- ]):
2801
- raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2802
-
2803
-
2804
2792
  def _SafetyFilterLevel_to_mldev_enum_validate(enum_value: Any):
2805
2793
  if enum_value in set(['BLOCK_NONE']):
2806
2794
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
@@ -3372,9 +3360,6 @@ def _GeneratedImage_from_mldev(
3372
3360
  getv(from_object, ['raiFilteredReason']),
3373
3361
  )
3374
3362
 
3375
- if getv(from_object, ['prompt']) is not None:
3376
- setv(to_object, ['enhanced_prompt'], getv(from_object, ['prompt']))
3377
-
3378
3363
  return to_object
3379
3364
 
3380
3365
 
@@ -3862,7 +3847,7 @@ class Models(_api_module.BaseModule):
3862
3847
  self._api_client._verify_response(return_value)
3863
3848
  return return_value
3864
3849
 
3865
- def generate_content_stream(
3850
+ def _generate_content_stream(
3866
3851
  self,
3867
3852
  *,
3868
3853
  model: str,
@@ -4680,10 +4665,14 @@ class Models(_api_module.BaseModule):
4680
4665
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
4681
4666
  )
4682
4667
  automatic_function_calling_history = []
4668
+ response = None
4669
+ i = 0
4683
4670
  while remaining_remote_calls_afc > 0:
4671
+ i += 1
4684
4672
  response = self._generate_content(
4685
4673
  model=model, contents=contents, config=config
4686
4674
  )
4675
+ logging.info(f'AFC remote call {i} is done.')
4687
4676
  remaining_remote_calls_afc -= 1
4688
4677
  if remaining_remote_calls_afc == 0:
4689
4678
  logging.info('Reached max remote calls for automatic function calling.')
@@ -4691,6 +4680,8 @@ class Models(_api_module.BaseModule):
4691
4680
  function_map = _extra_utils.get_function_map(config)
4692
4681
  if not function_map:
4693
4682
  break
4683
+ if not response:
4684
+ break
4694
4685
  if (
4695
4686
  not response.candidates
4696
4687
  or not response.candidates[0].content
@@ -4720,6 +4711,151 @@ class Models(_api_module.BaseModule):
4720
4711
  )
4721
4712
  return response
4722
4713
 
4714
+ def generate_content_stream(
4715
+ self,
4716
+ *,
4717
+ model: str,
4718
+ contents: Union[types.ContentListUnion, types.ContentListUnionDict],
4719
+ config: Optional[types.GenerateContentConfigOrDict] = None,
4720
+ ) -> Iterator[types.GenerateContentResponse]:
4721
+ """Makes an API request to generate content using a model and yields the model's response in chunks.
4722
+
4723
+ For the `model` parameter, supported format for Vertex AI API includes:
4724
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4725
+ - the full resource name starts with 'projects/', for example:
4726
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
4727
+ - the partial resource name with 'publishers/', for example:
4728
+ 'publishers/google/models/gemini-1.5-flash-002' or
4729
+ 'publishers/meta/models/llama-3.1-405b-instruct-maas'
4730
+ - `/` separated publisher and model name, for example:
4731
+ 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
4732
+
4733
+ For the `model` parameter, supported format for Gemini API includes:
4734
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4735
+ - the model name starts with 'models/', for example:
4736
+ 'models/gemini-1.5-flash-002'
4737
+ - if you would like to use a tuned model, the model name starts with
4738
+ 'tunedModels/', for example:
4739
+ 'tunedModels/1234567890123456789'
4740
+
4741
+ Some models support multimodal input and output.
4742
+
4743
+ Usage:
4744
+
4745
+ .. code-block:: python
4746
+
4747
+ from google.genai import types
4748
+ from google import genai
4749
+
4750
+ client = genai.Client(
4751
+ vertexai=True, project='my-project-id', location='us-central1'
4752
+ )
4753
+
4754
+ for chunk in client.models.generate_content_stream(
4755
+ model='gemini-1.5-flash-002',
4756
+ contents='''What is a good name for a flower shop that specializes in
4757
+ selling bouquets of dried flowers?'''
4758
+ ):
4759
+ print(chunk.text)
4760
+ # **Elegant & Classic:**
4761
+ # * The Dried Bloom
4762
+ # * Everlasting Florals
4763
+ # * Timeless Petals
4764
+
4765
+ for chunk in client.models.generate_content_stream(
4766
+ model='gemini-1.5-flash-002',
4767
+ contents=[
4768
+ types.Part.from_text('What is shown in this image?'),
4769
+ types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
4770
+ 'image/jpeg')
4771
+ ]
4772
+ ):
4773
+ print(chunk.text)
4774
+ # The image shows a flat lay arrangement of freshly baked blueberry
4775
+ # scones.
4776
+ """
4777
+
4778
+ if _extra_utils.should_disable_afc(config):
4779
+ yield from self._generate_content_stream(
4780
+ model=model, contents=contents, config=config
4781
+ )
4782
+ return
4783
+
4784
+ remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
4785
+ logging.info(
4786
+ f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
4787
+ )
4788
+ automatic_function_calling_history = []
4789
+ chunk = None
4790
+ func_response_parts = None
4791
+ i = 0
4792
+ while remaining_remote_calls_afc > 0:
4793
+ i += 1
4794
+ response = self._generate_content_stream(
4795
+ model=model, contents=contents, config=config
4796
+ )
4797
+ logging.info(f'AFC remote call {i} is done.')
4798
+ remaining_remote_calls_afc -= 1
4799
+ if remaining_remote_calls_afc == 0:
4800
+ logging.info('Reached max remote calls for automatic function calling.')
4801
+
4802
+ function_map = _extra_utils.get_function_map(config)
4803
+
4804
+ if i == 1:
4805
+ # First request gets a function call.
4806
+ # Then get function response parts.
4807
+ # Yield chunks only if there's no function response parts.
4808
+ for chunk in response:
4809
+ if not function_map:
4810
+ yield chunk
4811
+ else:
4812
+ func_response_parts = _extra_utils.get_function_response_parts(
4813
+ chunk, function_map
4814
+ )
4815
+ if not func_response_parts:
4816
+ yield chunk
4817
+
4818
+ else:
4819
+ # Second request and beyond, yield chunks.
4820
+ for chunk in response:
4821
+ if _extra_utils.should_append_afc_history(config):
4822
+ chunk.automatic_function_calling_history = (
4823
+ automatic_function_calling_history
4824
+ )
4825
+ yield chunk
4826
+ func_response_parts = _extra_utils.get_function_response_parts(
4827
+ chunk, function_map
4828
+ )
4829
+
4830
+ if not chunk:
4831
+ break
4832
+ if (
4833
+ not chunk
4834
+ or not chunk.candidates
4835
+ or not chunk.candidates[0].content
4836
+ or not chunk.candidates[0].content.parts
4837
+ ):
4838
+ break
4839
+
4840
+ if not function_map:
4841
+ break
4842
+ if not func_response_parts:
4843
+ break
4844
+
4845
+ # Append function response parts to contents for the next request.
4846
+ func_call_content = chunk.candidates[0].content
4847
+ func_response_content = types.Content(
4848
+ role='user',
4849
+ parts=func_response_parts,
4850
+ )
4851
+ contents = t.t_contents(self._api_client, contents)
4852
+ if not automatic_function_calling_history:
4853
+ automatic_function_calling_history.extend(contents)
4854
+ contents.append(func_call_content)
4855
+ contents.append(func_response_content)
4856
+ automatic_function_calling_history.append(func_call_content)
4857
+ automatic_function_calling_history.append(func_response_content)
4858
+
4723
4859
  def upscale_image(
4724
4860
  self,
4725
4861
  *,
@@ -4886,7 +5022,7 @@ class AsyncModels(_api_module.BaseModule):
4886
5022
  self._api_client._verify_response(return_value)
4887
5023
  return return_value
4888
5024
 
4889
- async def generate_content_stream(
5025
+ async def _generate_content_stream(
4890
5026
  self,
4891
5027
  *,
4892
5028
  model: str,
@@ -5681,6 +5817,7 @@ class AsyncModels(_api_module.BaseModule):
5681
5817
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
5682
5818
  )
5683
5819
  automatic_function_calling_history = []
5820
+ response = None
5684
5821
  while remaining_remote_calls_afc > 0:
5685
5822
  response = await self._generate_content(
5686
5823
  model=model, contents=contents, config=config
@@ -5692,6 +5829,8 @@ class AsyncModels(_api_module.BaseModule):
5692
5829
  function_map = _extra_utils.get_function_map(config)
5693
5830
  if not function_map:
5694
5831
  break
5832
+ if not response:
5833
+ break
5695
5834
  if (
5696
5835
  not response.candidates
5697
5836
  or not response.candidates[0].content
@@ -5722,6 +5861,160 @@ class AsyncModels(_api_module.BaseModule):
5722
5861
  )
5723
5862
  return response
5724
5863
 
5864
+ async def generate_content_stream(
5865
+ self,
5866
+ *,
5867
+ model: str,
5868
+ contents: Union[types.ContentListUnion, types.ContentListUnionDict],
5869
+ config: Optional[types.GenerateContentConfigOrDict] = None,
5870
+ ) -> Awaitable[AsyncIterator[types.GenerateContentResponse]]:
5871
+ """Makes an API request to generate content using a model and yields the model's response in chunks.
5872
+
5873
+ For the `model` parameter, supported format for Vertex AI API includes:
5874
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
5875
+ - the full resource name starts with 'projects/', for example:
5876
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5877
+ - the partial resource name with 'publishers/', for example:
5878
+ 'publishers/google/models/gemini-1.5-flash-002' or
5879
+ 'publishers/meta/models/llama-3.1-405b-instruct-maas'
5880
+ - `/` separated publisher and model name, for example:
5881
+ 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5882
+
5883
+ For the `model` parameter, supported format for Gemini API includes:
5884
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
5885
+ - the model name starts with 'models/', for example:
5886
+ 'models/gemini-1.5-flash-002'
5887
+ - if you would like to use a tuned model, the model name starts with
5888
+ 'tunedModels/', for example:
5889
+ 'tunedModels/1234567890123456789'
5890
+
5891
+ Some models support multimodal input and output.
5892
+
5893
+ Usage:
5894
+
5895
+ .. code-block:: python
5896
+
5897
+ from google.genai import types
5898
+ from google import genai
5899
+
5900
+ client = genai.Client(
5901
+ vertexai=True, project='my-project-id', location='us-central1'
5902
+ )
5903
+
5904
+ async for chunk in await client.aio.models.generate_content_stream(
5905
+ model='gemini-1.5-flash-002',
5906
+ contents='''What is a good name for a flower shop that specializes in
5907
+ selling bouquets of dried flowers?'''
5908
+ ):
5909
+ print(chunk.text)
5910
+ # **Elegant & Classic:**
5911
+ # * The Dried Bloom
5912
+ # * Everlasting Florals
5913
+ # * Timeless Petals
5914
+
5915
+ async for chunk in awiat client.aio.models.generate_content_stream(
5916
+ model='gemini-1.5-flash-002',
5917
+ contents=[
5918
+ types.Part.from_text('What is shown in this image?'),
5919
+ types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
5920
+ 'image/jpeg')
5921
+ ]
5922
+ ):
5923
+ print(chunk.text)
5924
+ # The image shows a flat lay arrangement of freshly baked blueberry
5925
+ # scones.
5926
+ """
5927
+
5928
+ if _extra_utils.should_disable_afc(config):
5929
+ response = await self._generate_content_stream(
5930
+ model=model, contents=contents, config=config
5931
+ )
5932
+
5933
+ async def base_async_generator(model, contents, config):
5934
+ async for chunk in response:
5935
+ yield chunk
5936
+
5937
+ return base_async_generator(model, contents, config)
5938
+
5939
+ async def async_generator(model, contents, config):
5940
+ remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
5941
+ logging.info(
5942
+ f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
5943
+ )
5944
+ automatic_function_calling_history = []
5945
+ chunk = None
5946
+ i = 0
5947
+ while remaining_remote_calls_afc > 0:
5948
+ i += 1
5949
+ response = await self._generate_content_stream(
5950
+ model=model, contents=contents, config=config
5951
+ )
5952
+ logging.info(f'AFC remote call {i} is done.')
5953
+ remaining_remote_calls_afc -= 1
5954
+ if remaining_remote_calls_afc == 0:
5955
+ logging.info(
5956
+ 'Reached max remote calls for automatic function calling.'
5957
+ )
5958
+
5959
+ function_map = _extra_utils.get_function_map(config)
5960
+
5961
+ if i == 1:
5962
+ # First request gets a function call.
5963
+ # Then get function response parts.
5964
+ # Yield chunks only if there's no function response parts.
5965
+ async for chunk in response:
5966
+ if not function_map:
5967
+ yield chunk
5968
+ else:
5969
+ func_response_parts = _extra_utils.get_function_response_parts(
5970
+ chunk, function_map
5971
+ )
5972
+ if not func_response_parts:
5973
+ yield chunk
5974
+
5975
+ else:
5976
+ # Second request and beyond, yield chunks.
5977
+ async for chunk in response:
5978
+
5979
+ if _extra_utils.should_append_afc_history(config):
5980
+ chunk.automatic_function_calling_history = (
5981
+ automatic_function_calling_history
5982
+ )
5983
+ yield chunk
5984
+ func_response_parts = _extra_utils.get_function_response_parts(
5985
+ chunk, function_map
5986
+ )
5987
+ if not chunk:
5988
+ break
5989
+ if (
5990
+ not chunk
5991
+ or not chunk.candidates
5992
+ or not chunk.candidates[0].content
5993
+ or not chunk.candidates[0].content.parts
5994
+ ):
5995
+ break
5996
+ if not function_map:
5997
+ break
5998
+
5999
+ if not func_response_parts:
6000
+ break
6001
+
6002
+ # Append function response parts to contents for the next request.
6003
+ func_call_content = chunk.candidates[0].content
6004
+ func_response_content = types.Content(
6005
+ role='user',
6006
+ parts=func_response_parts,
6007
+ )
6008
+ contents = t.t_contents(self._api_client, contents)
6009
+ if not automatic_function_calling_history:
6010
+ automatic_function_calling_history.extend(contents)
6011
+ contents.append(func_call_content)
6012
+ contents.append(func_response_content)
6013
+ automatic_function_calling_history.append(func_call_content)
6014
+ automatic_function_calling_history.append(func_response_content)
6015
+
6016
+ return async_generator(model, contents, config)
6017
+
5725
6018
  async def list(
5726
6019
  self,
5727
6020
  *,
@@ -5729,8 +6022,8 @@ class AsyncModels(_api_module.BaseModule):
5729
6022
  ) -> AsyncPager[types.Model]:
5730
6023
  """Makes an API request to list the available models.
5731
6024
 
5732
- If `query_base` is set to True in the config, the API will return all
5733
- available base models. If set to False or not set (default), it will return
6025
+ If `query_base` is set to True in the config or not set (default), the
6026
+ API will return all available base models. If set to False, it will return
5734
6027
  all tuned models.
5735
6028
 
5736
6029
  Args:
@@ -5755,6 +6048,8 @@ class AsyncModels(_api_module.BaseModule):
5755
6048
  types._ListModelsParameters(config=config).config
5756
6049
  or types.ListModelsConfig()
5757
6050
  )
6051
+ if config.query_base is None:
6052
+ config.query_base = True
5758
6053
  if self._api_client.vertexai:
5759
6054
  config = config.copy()
5760
6055
  if not config.query_base:
@@ -5765,8 +6060,6 @@ class AsyncModels(_api_module.BaseModule):
5765
6060
  if filter_value
5766
6061
  else 'labels.tune-type:*'
5767
6062
  )
5768
- if not config.query_base:
5769
- config.query_base = False
5770
6063
  return AsyncPager(
5771
6064
  'models',
5772
6065
  self._list,