google-genai 0.7.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -16,7 +16,7 @@
16
16
  # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
17
 
18
18
  import logging
19
- from typing import AsyncIterator, Awaitable, Iterator, Optional, Union
19
+ from typing import Any, AsyncIterator, Awaitable, Iterator, Optional, Union
20
20
  from urllib.parse import urlencode
21
21
  from . import _api_module
22
22
  from . import _common
@@ -904,6 +904,9 @@ def _GenerateContentConfig_to_mldev(
904
904
  ),
905
905
  )
906
906
 
907
+ if getv(from_object, ['labels']) is not None:
908
+ raise ValueError('labels parameter is not supported in Gemini API.')
909
+
907
910
  if getv(from_object, ['cached_content']) is not None:
908
911
  setv(
909
912
  parent_object,
@@ -1066,6 +1069,9 @@ def _GenerateContentConfig_to_vertex(
1066
1069
  ),
1067
1070
  )
1068
1071
 
1072
+ if getv(from_object, ['labels']) is not None:
1073
+ setv(parent_object, ['labels'], getv(from_object, ['labels']))
1074
+
1069
1075
  if getv(from_object, ['cached_content']) is not None:
1070
1076
  setv(
1071
1077
  parent_object,
@@ -1430,11 +1436,7 @@ def _GenerateImagesConfig_to_mldev(
1430
1436
  )
1431
1437
 
1432
1438
  if getv(from_object, ['enhance_prompt']) is not None:
1433
- setv(
1434
- parent_object,
1435
- ['parameters', 'enhancePrompt'],
1436
- getv(from_object, ['enhance_prompt']),
1437
- )
1439
+ raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
1438
1440
 
1439
1441
  return to_object
1440
1442
 
@@ -2785,7 +2787,7 @@ def _ComputeTokensParameters_to_vertex(
2785
2787
  return to_object
2786
2788
 
2787
2789
 
2788
- def _MediaResolution_to_mldev_enum_validate(enum_value: any):
2790
+ def _MediaResolution_to_mldev_enum_validate(enum_value: Any):
2789
2791
  if enum_value in set([
2790
2792
  'MEDIA_RESOLUTION_UNSPECIFIED',
2791
2793
  'MEDIA_RESOLUTION_LOW',
@@ -2795,17 +2797,17 @@ def _MediaResolution_to_mldev_enum_validate(enum_value: any):
2795
2797
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2796
2798
 
2797
2799
 
2798
- def _SafetyFilterLevel_to_mldev_enum_validate(enum_value: any):
2800
+ def _SafetyFilterLevel_to_mldev_enum_validate(enum_value: Any):
2799
2801
  if enum_value in set(['BLOCK_NONE']):
2800
2802
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2801
2803
 
2802
2804
 
2803
- def _PersonGeneration_to_mldev_enum_validate(enum_value: any):
2805
+ def _PersonGeneration_to_mldev_enum_validate(enum_value: Any):
2804
2806
  if enum_value in set(['ALLOW_ALL']):
2805
2807
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2806
2808
 
2807
2809
 
2808
- def _MaskReferenceMode_to_mldev_enum_validate(enum_value: any):
2810
+ def _MaskReferenceMode_to_mldev_enum_validate(enum_value: Any):
2809
2811
  if enum_value in set([
2810
2812
  'MASK_MODE_DEFAULT',
2811
2813
  'MASK_MODE_USER_PROVIDED',
@@ -2816,7 +2818,7 @@ def _MaskReferenceMode_to_mldev_enum_validate(enum_value: any):
2816
2818
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2817
2819
 
2818
2820
 
2819
- def _ControlReferenceType_to_mldev_enum_validate(enum_value: any):
2821
+ def _ControlReferenceType_to_mldev_enum_validate(enum_value: Any):
2820
2822
  if enum_value in set([
2821
2823
  'CONTROL_TYPE_DEFAULT',
2822
2824
  'CONTROL_TYPE_CANNY',
@@ -2826,7 +2828,7 @@ def _ControlReferenceType_to_mldev_enum_validate(enum_value: any):
2826
2828
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2827
2829
 
2828
2830
 
2829
- def _SubjectReferenceType_to_mldev_enum_validate(enum_value: any):
2831
+ def _SubjectReferenceType_to_mldev_enum_validate(enum_value: Any):
2830
2832
  if enum_value in set([
2831
2833
  'SUBJECT_TYPE_DEFAULT',
2832
2834
  'SUBJECT_TYPE_PERSON',
@@ -2836,7 +2838,7 @@ def _SubjectReferenceType_to_mldev_enum_validate(enum_value: any):
2836
2838
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
2837
2839
 
2838
2840
 
2839
- def _EditMode_to_mldev_enum_validate(enum_value: any):
2841
+ def _EditMode_to_mldev_enum_validate(enum_value: Any):
2840
2842
  if enum_value in set([
2841
2843
  'EDIT_MODE_DEFAULT',
2842
2844
  'EDIT_MODE_INPAINT_REMOVAL',
@@ -3389,6 +3391,9 @@ def _GeneratedImage_from_vertex(
3389
3391
  getv(from_object, ['raiFilteredReason']),
3390
3392
  )
3391
3393
 
3394
+ if getv(from_object, ['prompt']) is not None:
3395
+ setv(to_object, ['enhanced_prompt'], getv(from_object, ['prompt']))
3396
+
3392
3397
  return to_object
3393
3398
 
3394
3399
 
@@ -3850,7 +3855,7 @@ class Models(_api_module.BaseModule):
3850
3855
  self._api_client._verify_response(return_value)
3851
3856
  return return_value
3852
3857
 
3853
- def generate_content_stream(
3858
+ def _generate_content_stream(
3854
3859
  self,
3855
3860
  *,
3856
3861
  model: str,
@@ -4668,10 +4673,14 @@ class Models(_api_module.BaseModule):
4668
4673
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
4669
4674
  )
4670
4675
  automatic_function_calling_history = []
4676
+ response = None
4677
+ i = 0
4671
4678
  while remaining_remote_calls_afc > 0:
4679
+ i += 1
4672
4680
  response = self._generate_content(
4673
4681
  model=model, contents=contents, config=config
4674
4682
  )
4683
+ logging.info(f'AFC remote call {i} is done.')
4675
4684
  remaining_remote_calls_afc -= 1
4676
4685
  if remaining_remote_calls_afc == 0:
4677
4686
  logging.info('Reached max remote calls for automatic function calling.')
@@ -4679,6 +4688,8 @@ class Models(_api_module.BaseModule):
4679
4688
  function_map = _extra_utils.get_function_map(config)
4680
4689
  if not function_map:
4681
4690
  break
4691
+ if not response:
4692
+ break
4682
4693
  if (
4683
4694
  not response.candidates
4684
4695
  or not response.candidates[0].content
@@ -4690,21 +4701,165 @@ class Models(_api_module.BaseModule):
4690
4701
  )
4691
4702
  if not func_response_parts:
4692
4703
  break
4693
- contents = t.t_contents(self._api_client, contents)
4694
- contents.append(response.candidates[0].content)
4695
- contents.append(
4696
- types.Content(
4697
- role='user',
4698
- parts=func_response_parts,
4699
- )
4704
+ func_call_content = response.candidates[0].content
4705
+ func_response_content = types.Content(
4706
+ role='user',
4707
+ parts=func_response_parts,
4700
4708
  )
4701
- automatic_function_calling_history.extend(contents)
4709
+ contents = t.t_contents(self._api_client, contents)
4710
+ if not automatic_function_calling_history:
4711
+ automatic_function_calling_history.extend(contents)
4712
+ contents.append(func_call_content)
4713
+ contents.append(func_response_content)
4714
+ automatic_function_calling_history.append(func_call_content)
4715
+ automatic_function_calling_history.append(func_response_content)
4702
4716
  if _extra_utils.should_append_afc_history(config):
4703
4717
  response.automatic_function_calling_history = (
4704
4718
  automatic_function_calling_history
4705
4719
  )
4706
4720
  return response
4707
4721
 
4722
+ def generate_content_stream(
4723
+ self,
4724
+ *,
4725
+ model: str,
4726
+ contents: Union[types.ContentListUnion, types.ContentListUnionDict],
4727
+ config: Optional[types.GenerateContentConfigOrDict] = None,
4728
+ ) -> Iterator[types.GenerateContentResponse]:
4729
+ """Makes an API request to generate content using a model and yields the model's response in chunks.
4730
+
4731
+ For the `model` parameter, supported format for Vertex AI API includes:
4732
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4733
+ - the full resource name starts with 'projects/', for example:
4734
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
4735
+ - the partial resource name with 'publishers/', for example:
4736
+ 'publishers/google/models/gemini-1.5-flash-002' or
4737
+ 'publishers/meta/models/llama-3.1-405b-instruct-maas'
4738
+ - `/` separated publisher and model name, for example:
4739
+ 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
4740
+
4741
+ For the `model` parameter, supported format for Gemini API includes:
4742
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4743
+ - the model name starts with 'models/', for example:
4744
+ 'models/gemini-1.5-flash-002'
4745
+ - if you would like to use a tuned model, the model name starts with
4746
+ 'tunedModels/', for example:
4747
+ 'tunedModels/1234567890123456789'
4748
+
4749
+ Some models support multimodal input and output.
4750
+
4751
+ Usage:
4752
+
4753
+ .. code-block:: python
4754
+
4755
+ from google.genai import types
4756
+ from google import genai
4757
+
4758
+ client = genai.Client(
4759
+ vertexai=True, project='my-project-id', location='us-central1'
4760
+ )
4761
+
4762
+ for chunk in client.models.generate_content_stream(
4763
+ model='gemini-1.5-flash-002',
4764
+ contents='''What is a good name for a flower shop that specializes in
4765
+ selling bouquets of dried flowers?'''
4766
+ ):
4767
+ print(chunk.text)
4768
+ # **Elegant & Classic:**
4769
+ # * The Dried Bloom
4770
+ # * Everlasting Florals
4771
+ # * Timeless Petals
4772
+
4773
+ for chunk in client.models.generate_content_stream(
4774
+ model='gemini-1.5-flash-002',
4775
+ contents=[
4776
+ types.Part.from_text('What is shown in this image?'),
4777
+ types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
4778
+ 'image/jpeg')
4779
+ ]
4780
+ ):
4781
+ print(chunk.text)
4782
+ # The image shows a flat lay arrangement of freshly baked blueberry
4783
+ # scones.
4784
+ """
4785
+
4786
+ if _extra_utils.should_disable_afc(config):
4787
+ return self._generate_content_stream(
4788
+ model=model, contents=contents, config=config
4789
+ )
4790
+ remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
4791
+ logging.info(
4792
+ f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
4793
+ )
4794
+ automatic_function_calling_history = []
4795
+ chunk = None
4796
+ i = 0
4797
+ while remaining_remote_calls_afc > 0:
4798
+ i += 1
4799
+ response = self._generate_content_stream(
4800
+ model=model, contents=contents, config=config
4801
+ )
4802
+ logging.info(f'AFC remote call {i} is done.')
4803
+ remaining_remote_calls_afc -= 1
4804
+ if remaining_remote_calls_afc == 0:
4805
+ logging.info('Reached max remote calls for automatic function calling.')
4806
+
4807
+ function_map = _extra_utils.get_function_map(config)
4808
+
4809
+ if i == 1:
4810
+ # First request gets a function call.
4811
+ # Then get function response parts.
4812
+ # Yield chunks only if there's no function response parts.
4813
+ for chunk in response:
4814
+ if not function_map:
4815
+ yield chunk
4816
+ else:
4817
+ func_response_parts = _extra_utils.get_function_response_parts(
4818
+ chunk, function_map
4819
+ )
4820
+ if not func_response_parts:
4821
+ yield chunk
4822
+
4823
+ else:
4824
+ # Second request and beyond, yield chunks.
4825
+ for chunk in response:
4826
+ if _extra_utils.should_append_afc_history(config):
4827
+ chunk.automatic_function_calling_history = (
4828
+ automatic_function_calling_history
4829
+ )
4830
+ yield chunk
4831
+ if not chunk:
4832
+ break
4833
+ if (
4834
+ not chunk
4835
+ or not chunk.candidates
4836
+ or not chunk.candidates[0].content
4837
+ or not chunk.candidates[0].content.parts
4838
+ ):
4839
+ break
4840
+
4841
+ if not function_map:
4842
+ break
4843
+ func_response_parts = _extra_utils.get_function_response_parts(
4844
+ chunk, function_map
4845
+ )
4846
+ if not func_response_parts:
4847
+ break
4848
+
4849
+ # Append function response parts to contents for the next request.
4850
+ func_call_content = chunk.candidates[0].content
4851
+ func_response_content = types.Content(
4852
+ role='user',
4853
+ parts=func_response_parts,
4854
+ )
4855
+ contents = t.t_contents(self._api_client, contents)
4856
+ if not automatic_function_calling_history:
4857
+ automatic_function_calling_history.extend(contents)
4858
+ contents.append(func_call_content)
4859
+ contents.append(func_response_content)
4860
+ automatic_function_calling_history.append(func_call_content)
4861
+ automatic_function_calling_history.append(func_response_content)
4862
+
4708
4863
  def upscale_image(
4709
4864
  self,
4710
4865
  *,
@@ -4768,8 +4923,8 @@ class Models(_api_module.BaseModule):
4768
4923
  ) -> Pager[types.Model]:
4769
4924
  """Makes an API request to list the available models.
4770
4925
 
4771
- If `query_base` is set to True in the config, the API will return all
4772
- available base models. If set to False or not set (default), it will return
4926
+ If `query_base` is set to True in the config or not set (default), the
4927
+ API will return all available base models. If set to False, it will return
4773
4928
  all tuned models.
4774
4929
 
4775
4930
  Args:
@@ -4792,6 +4947,8 @@ class Models(_api_module.BaseModule):
4792
4947
  types._ListModelsParameters(config=config).config
4793
4948
  or types.ListModelsConfig()
4794
4949
  )
4950
+ if config.query_base is None:
4951
+ config.query_base = True
4795
4952
  if self._api_client.vertexai:
4796
4953
  config = config.copy()
4797
4954
  if not config.query_base:
@@ -4802,8 +4959,6 @@ class Models(_api_module.BaseModule):
4802
4959
  if filter_value
4803
4960
  else 'labels.tune-type:*'
4804
4961
  )
4805
- if not config.query_base:
4806
- config.query_base = False
4807
4962
  return Pager(
4808
4963
  'models',
4809
4964
  self._list,
@@ -4871,7 +5026,7 @@ class AsyncModels(_api_module.BaseModule):
4871
5026
  self._api_client._verify_response(return_value)
4872
5027
  return return_value
4873
5028
 
4874
- async def generate_content_stream(
5029
+ async def _generate_content_stream(
4875
5030
  self,
4876
5031
  *,
4877
5032
  model: str,
@@ -5666,6 +5821,7 @@ class AsyncModels(_api_module.BaseModule):
5666
5821
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
5667
5822
  )
5668
5823
  automatic_function_calling_history = []
5824
+ response = None
5669
5825
  while remaining_remote_calls_afc > 0:
5670
5826
  response = await self._generate_content(
5671
5827
  model=model, contents=contents, config=config
@@ -5677,6 +5833,8 @@ class AsyncModels(_api_module.BaseModule):
5677
5833
  function_map = _extra_utils.get_function_map(config)
5678
5834
  if not function_map:
5679
5835
  break
5836
+ if not response:
5837
+ break
5680
5838
  if (
5681
5839
  not response.candidates
5682
5840
  or not response.candidates[0].content
@@ -5688,15 +5846,18 @@ class AsyncModels(_api_module.BaseModule):
5688
5846
  )
5689
5847
  if not func_response_parts:
5690
5848
  break
5691
- contents = t.t_contents(self._api_client, contents)
5692
- contents.append(response.candidates[0].content)
5693
- contents.append(
5694
- types.Content(
5695
- role='user',
5696
- parts=func_response_parts,
5697
- )
5849
+ func_call_content = response.candidates[0].content
5850
+ func_response_content = types.Content(
5851
+ role='user',
5852
+ parts=func_response_parts,
5698
5853
  )
5699
- automatic_function_calling_history.extend(contents)
5854
+ contents = t.t_contents(self._api_client, contents)
5855
+ if not automatic_function_calling_history:
5856
+ automatic_function_calling_history.extend(contents)
5857
+ contents.append(func_call_content)
5858
+ contents.append(func_response_content)
5859
+ automatic_function_calling_history.append(func_call_content)
5860
+ automatic_function_calling_history.append(func_response_content)
5700
5861
 
5701
5862
  if _extra_utils.should_append_afc_history(config):
5702
5863
  response.automatic_function_calling_history = (
@@ -5704,6 +5865,154 @@ class AsyncModels(_api_module.BaseModule):
5704
5865
  )
5705
5866
  return response
5706
5867
 
5868
+ async def generate_content_stream(
5869
+ self,
5870
+ *,
5871
+ model: str,
5872
+ contents: Union[types.ContentListUnion, types.ContentListUnionDict],
5873
+ config: Optional[types.GenerateContentConfigOrDict] = None,
5874
+ ) -> Awaitable[AsyncIterator[types.GenerateContentResponse]]:
5875
+ """Makes an API request to generate content using a model and yields the model's response in chunks.
5876
+
5877
+ For the `model` parameter, supported format for Vertex AI API includes:
5878
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
5879
+ - the full resource name starts with 'projects/', for example:
5880
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5881
+ - the partial resource name with 'publishers/', for example:
5882
+ 'publishers/google/models/gemini-1.5-flash-002' or
5883
+ 'publishers/meta/models/llama-3.1-405b-instruct-maas'
5884
+ - `/` separated publisher and model name, for example:
5885
+ 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5886
+
5887
+ For the `model` parameter, supported format for Gemini API includes:
5888
+ - the Gemini model ID, for example: 'gemini-1.5-flash-002'
5889
+ - the model name starts with 'models/', for example:
5890
+ 'models/gemini-1.5-flash-002'
5891
+ - if you would like to use a tuned model, the model name starts with
5892
+ 'tunedModels/', for example:
5893
+ 'tunedModels/1234567890123456789'
5894
+
5895
+ Some models support multimodal input and output.
5896
+
5897
+ Usage:
5898
+
5899
+ .. code-block:: python
5900
+
5901
+ from google.genai import types
5902
+ from google import genai
5903
+
5904
+ client = genai.Client(
5905
+ vertexai=True, project='my-project-id', location='us-central1'
5906
+ )
5907
+
5908
+ async for chunk in await client.aio.models.generate_content_stream(
5909
+ model='gemini-1.5-flash-002',
5910
+ contents='''What is a good name for a flower shop that specializes in
5911
+ selling bouquets of dried flowers?'''
5912
+ ):
5913
+ print(chunk.text)
5914
+ # **Elegant & Classic:**
5915
+ # * The Dried Bloom
5916
+ # * Everlasting Florals
5917
+ # * Timeless Petals
5918
+
5919
+ async for chunk in awiat client.aio.models.generate_content_stream(
5920
+ model='gemini-1.5-flash-002',
5921
+ contents=[
5922
+ types.Part.from_text('What is shown in this image?'),
5923
+ types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
5924
+ 'image/jpeg')
5925
+ ]
5926
+ ):
5927
+ print(chunk.text)
5928
+ # The image shows a flat lay arrangement of freshly baked blueberry
5929
+ # scones.
5930
+ """
5931
+
5932
+ if _extra_utils.should_disable_afc(config):
5933
+ return self._generate_content_stream(
5934
+ model=model, contents=contents, config=config
5935
+ )
5936
+
5937
+ async def async_generator(model, contents, config):
5938
+ remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
5939
+ logging.info(
5940
+ f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
5941
+ )
5942
+ automatic_function_calling_history = []
5943
+ chunk = None
5944
+ i = 0
5945
+ while remaining_remote_calls_afc > 0:
5946
+ i += 1
5947
+ response = await self._generate_content_stream(
5948
+ model=model, contents=contents, config=config
5949
+ )
5950
+ logging.info(f'AFC remote call {i} is done.')
5951
+ remaining_remote_calls_afc -= 1
5952
+ if remaining_remote_calls_afc == 0:
5953
+ logging.info(
5954
+ 'Reached max remote calls for automatic function calling.'
5955
+ )
5956
+
5957
+ function_map = _extra_utils.get_function_map(config)
5958
+
5959
+ if i == 1:
5960
+ # First request gets a function call.
5961
+ # Then get function response parts.
5962
+ # Yield chunks only if there's no function response parts.
5963
+ async for chunk in response:
5964
+ if not function_map:
5965
+ yield chunk
5966
+ else:
5967
+ func_response_parts = _extra_utils.get_function_response_parts(
5968
+ chunk, function_map
5969
+ )
5970
+ if not func_response_parts:
5971
+ yield chunk
5972
+
5973
+ else:
5974
+ # Second request and beyond, yield chunks.
5975
+ async for chunk in response:
5976
+
5977
+ if _extra_utils.should_append_afc_history(config):
5978
+ chunk.automatic_function_calling_history = (
5979
+ automatic_function_calling_history
5980
+ )
5981
+ yield chunk
5982
+
5983
+ if not chunk:
5984
+ break
5985
+ if (
5986
+ not chunk
5987
+ or not chunk.candidates
5988
+ or not chunk.candidates[0].content
5989
+ or not chunk.candidates[0].content.parts
5990
+ ):
5991
+ break
5992
+ if not function_map:
5993
+ break
5994
+ func_response_parts = _extra_utils.get_function_response_parts(
5995
+ chunk, function_map
5996
+ )
5997
+ if not func_response_parts:
5998
+ break
5999
+
6000
+ # Append function response parts to contents for the next request.
6001
+ func_call_content = chunk.candidates[0].content
6002
+ func_response_content = types.Content(
6003
+ role='user',
6004
+ parts=func_response_parts,
6005
+ )
6006
+ contents = t.t_contents(self._api_client, contents)
6007
+ if not automatic_function_calling_history:
6008
+ automatic_function_calling_history.extend(contents)
6009
+ contents.append(func_call_content)
6010
+ contents.append(func_response_content)
6011
+ automatic_function_calling_history.append(func_call_content)
6012
+ automatic_function_calling_history.append(func_response_content)
6013
+
6014
+ return async_generator(model, contents, config)
6015
+
5707
6016
  async def list(
5708
6017
  self,
5709
6018
  *,