google-genai 1.2.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -28,6 +28,8 @@ from ._common import get_value_by_path as getv
28
28
  from ._common import set_value_by_path as setv
29
29
  from .pagers import AsyncPager, Pager
30
30
 
31
+ logger = logging.getLogger('google_genai.models')
32
+
31
33
 
32
34
  def _Part_to_mldev(
33
35
  api_client: ApiClient,
@@ -2789,6 +2791,202 @@ def _ComputeTokensParameters_to_vertex(
2789
2791
  return to_object
2790
2792
 
2791
2793
 
2794
+ def _GenerateVideosConfig_to_mldev(
2795
+ api_client: ApiClient,
2796
+ from_object: Union[dict, object],
2797
+ parent_object: dict = None,
2798
+ ) -> dict:
2799
+ to_object = {}
2800
+
2801
+ if getv(from_object, ['number_of_videos']) is not None:
2802
+ setv(
2803
+ parent_object,
2804
+ ['parameters', 'sampleCount'],
2805
+ getv(from_object, ['number_of_videos']),
2806
+ )
2807
+
2808
+ if getv(from_object, ['output_gcs_uri']) is not None:
2809
+ raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
2810
+
2811
+ if getv(from_object, ['fps']) is not None:
2812
+ raise ValueError('fps parameter is not supported in Gemini API.')
2813
+
2814
+ if getv(from_object, ['duration_seconds']) is not None:
2815
+ raise ValueError(
2816
+ 'duration_seconds parameter is not supported in Gemini API.'
2817
+ )
2818
+
2819
+ if getv(from_object, ['seed']) is not None:
2820
+ raise ValueError('seed parameter is not supported in Gemini API.')
2821
+
2822
+ if getv(from_object, ['aspect_ratio']) is not None:
2823
+ setv(
2824
+ parent_object,
2825
+ ['parameters', 'aspectRatio'],
2826
+ getv(from_object, ['aspect_ratio']),
2827
+ )
2828
+
2829
+ if getv(from_object, ['resolution']) is not None:
2830
+ raise ValueError('resolution parameter is not supported in Gemini API.')
2831
+
2832
+ if getv(from_object, ['person_generation']) is not None:
2833
+ setv(
2834
+ parent_object,
2835
+ ['parameters', 'personGeneration'],
2836
+ getv(from_object, ['person_generation']),
2837
+ )
2838
+
2839
+ if getv(from_object, ['pubsub_topic']) is not None:
2840
+ raise ValueError('pubsub_topic parameter is not supported in Gemini API.')
2841
+
2842
+ if getv(from_object, ['negative_prompt']) is not None:
2843
+ setv(
2844
+ parent_object,
2845
+ ['parameters', 'negativePrompt'],
2846
+ getv(from_object, ['negative_prompt']),
2847
+ )
2848
+
2849
+ if getv(from_object, ['enhance_prompt']) is not None:
2850
+ raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
2851
+
2852
+ return to_object
2853
+
2854
+
2855
+ def _GenerateVideosConfig_to_vertex(
2856
+ api_client: ApiClient,
2857
+ from_object: Union[dict, object],
2858
+ parent_object: dict = None,
2859
+ ) -> dict:
2860
+ to_object = {}
2861
+
2862
+ if getv(from_object, ['number_of_videos']) is not None:
2863
+ setv(
2864
+ parent_object,
2865
+ ['parameters', 'sampleCount'],
2866
+ getv(from_object, ['number_of_videos']),
2867
+ )
2868
+
2869
+ if getv(from_object, ['output_gcs_uri']) is not None:
2870
+ setv(
2871
+ parent_object,
2872
+ ['parameters', 'storageUri'],
2873
+ getv(from_object, ['output_gcs_uri']),
2874
+ )
2875
+
2876
+ if getv(from_object, ['fps']) is not None:
2877
+ setv(parent_object, ['parameters', 'fps'], getv(from_object, ['fps']))
2878
+
2879
+ if getv(from_object, ['duration_seconds']) is not None:
2880
+ setv(
2881
+ parent_object,
2882
+ ['parameters', 'durationSeconds'],
2883
+ getv(from_object, ['duration_seconds']),
2884
+ )
2885
+
2886
+ if getv(from_object, ['seed']) is not None:
2887
+ setv(parent_object, ['parameters', 'seed'], getv(from_object, ['seed']))
2888
+
2889
+ if getv(from_object, ['aspect_ratio']) is not None:
2890
+ setv(
2891
+ parent_object,
2892
+ ['parameters', 'aspectRatio'],
2893
+ getv(from_object, ['aspect_ratio']),
2894
+ )
2895
+
2896
+ if getv(from_object, ['resolution']) is not None:
2897
+ setv(
2898
+ parent_object,
2899
+ ['parameters', 'resolution'],
2900
+ getv(from_object, ['resolution']),
2901
+ )
2902
+
2903
+ if getv(from_object, ['person_generation']) is not None:
2904
+ setv(
2905
+ parent_object,
2906
+ ['parameters', 'personGeneration'],
2907
+ getv(from_object, ['person_generation']),
2908
+ )
2909
+
2910
+ if getv(from_object, ['pubsub_topic']) is not None:
2911
+ setv(
2912
+ parent_object,
2913
+ ['parameters', 'pubsubTopic'],
2914
+ getv(from_object, ['pubsub_topic']),
2915
+ )
2916
+
2917
+ if getv(from_object, ['negative_prompt']) is not None:
2918
+ setv(
2919
+ parent_object,
2920
+ ['parameters', 'negativePrompt'],
2921
+ getv(from_object, ['negative_prompt']),
2922
+ )
2923
+
2924
+ if getv(from_object, ['enhance_prompt']) is not None:
2925
+ setv(
2926
+ parent_object,
2927
+ ['parameters', 'enhancePrompt'],
2928
+ getv(from_object, ['enhance_prompt']),
2929
+ )
2930
+
2931
+ return to_object
2932
+
2933
+
2934
+ def _GenerateVideosParameters_to_mldev(
2935
+ api_client: ApiClient,
2936
+ from_object: Union[dict, object],
2937
+ parent_object: dict = None,
2938
+ ) -> dict:
2939
+ to_object = {}
2940
+ if getv(from_object, ['model']) is not None:
2941
+ setv(
2942
+ to_object,
2943
+ ['_url', 'model'],
2944
+ t.t_model(api_client, getv(from_object, ['model'])),
2945
+ )
2946
+
2947
+ if getv(from_object, ['prompt']) is not None:
2948
+ setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
2949
+
2950
+ if getv(from_object, ['config']) is not None:
2951
+ setv(
2952
+ to_object,
2953
+ ['config'],
2954
+ _GenerateVideosConfig_to_mldev(
2955
+ api_client, getv(from_object, ['config']), to_object
2956
+ ),
2957
+ )
2958
+
2959
+ return to_object
2960
+
2961
+
2962
+ def _GenerateVideosParameters_to_vertex(
2963
+ api_client: ApiClient,
2964
+ from_object: Union[dict, object],
2965
+ parent_object: dict = None,
2966
+ ) -> dict:
2967
+ to_object = {}
2968
+ if getv(from_object, ['model']) is not None:
2969
+ setv(
2970
+ to_object,
2971
+ ['_url', 'model'],
2972
+ t.t_model(api_client, getv(from_object, ['model'])),
2973
+ )
2974
+
2975
+ if getv(from_object, ['prompt']) is not None:
2976
+ setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
2977
+
2978
+ if getv(from_object, ['config']) is not None:
2979
+ setv(
2980
+ to_object,
2981
+ ['config'],
2982
+ _GenerateVideosConfig_to_vertex(
2983
+ api_client, getv(from_object, ['config']), to_object
2984
+ ),
2985
+ )
2986
+
2987
+ return to_object
2988
+
2989
+
2792
2990
  def _SafetyFilterLevel_to_mldev_enum_validate(enum_value: Any):
2793
2991
  if enum_value in set(['BLOCK_NONE']):
2794
2992
  raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
@@ -3788,6 +3986,216 @@ def _ComputeTokensResponse_from_vertex(
3788
3986
  return to_object
3789
3987
 
3790
3988
 
3989
+ def _Video_from_mldev(
3990
+ api_client: ApiClient,
3991
+ from_object: Union[dict, object],
3992
+ parent_object: dict = None,
3993
+ ) -> dict:
3994
+ to_object = {}
3995
+ if getv(from_object, ['uri']) is not None:
3996
+ setv(to_object, ['uri'], getv(from_object, ['uri']))
3997
+
3998
+ if getv(from_object, ['encodedVideo']) is not None:
3999
+ setv(
4000
+ to_object,
4001
+ ['video_bytes'],
4002
+ t.t_bytes(api_client, getv(from_object, ['encodedVideo'])),
4003
+ )
4004
+
4005
+ if getv(from_object, ['encoding']) is not None:
4006
+ setv(to_object, ['mime_type'], getv(from_object, ['encoding']))
4007
+
4008
+ return to_object
4009
+
4010
+
4011
+ def _Video_from_vertex(
4012
+ api_client: ApiClient,
4013
+ from_object: Union[dict, object],
4014
+ parent_object: dict = None,
4015
+ ) -> dict:
4016
+ to_object = {}
4017
+ if getv(from_object, ['gcsUri']) is not None:
4018
+ setv(to_object, ['uri'], getv(from_object, ['gcsUri']))
4019
+
4020
+ if getv(from_object, ['bytesBase64Encoded']) is not None:
4021
+ setv(
4022
+ to_object,
4023
+ ['video_bytes'],
4024
+ t.t_bytes(api_client, getv(from_object, ['bytesBase64Encoded'])),
4025
+ )
4026
+
4027
+ if getv(from_object, ['mimeType']) is not None:
4028
+ setv(to_object, ['mime_type'], getv(from_object, ['mimeType']))
4029
+
4030
+ return to_object
4031
+
4032
+
4033
+ def _GeneratedVideo_from_mldev(
4034
+ api_client: ApiClient,
4035
+ from_object: Union[dict, object],
4036
+ parent_object: dict = None,
4037
+ ) -> dict:
4038
+ to_object = {}
4039
+ if getv(from_object, ['_self']) is not None:
4040
+ setv(
4041
+ to_object,
4042
+ ['video'],
4043
+ _Video_from_mldev(api_client, getv(from_object, ['_self']), to_object),
4044
+ )
4045
+
4046
+ return to_object
4047
+
4048
+
4049
+ def _GeneratedVideo_from_vertex(
4050
+ api_client: ApiClient,
4051
+ from_object: Union[dict, object],
4052
+ parent_object: dict = None,
4053
+ ) -> dict:
4054
+ to_object = {}
4055
+ if getv(from_object, ['_self']) is not None:
4056
+ setv(
4057
+ to_object,
4058
+ ['video'],
4059
+ _Video_from_vertex(api_client, getv(from_object, ['_self']), to_object),
4060
+ )
4061
+
4062
+ return to_object
4063
+
4064
+
4065
+ def _GenerateVideosResponse_from_mldev(
4066
+ api_client: ApiClient,
4067
+ from_object: Union[dict, object],
4068
+ parent_object: dict = None,
4069
+ ) -> dict:
4070
+ to_object = {}
4071
+ if getv(from_object, ['videos']) is not None:
4072
+ setv(
4073
+ to_object,
4074
+ ['generated_videos'],
4075
+ [
4076
+ _GeneratedVideo_from_mldev(api_client, item, to_object)
4077
+ for item in getv(from_object, ['videos'])
4078
+ ],
4079
+ )
4080
+
4081
+ if getv(from_object, ['raiMediaFilteredCount']) is not None:
4082
+ setv(
4083
+ to_object,
4084
+ ['rai_media_filtered_count'],
4085
+ getv(from_object, ['raiMediaFilteredCount']),
4086
+ )
4087
+
4088
+ if getv(from_object, ['raiMediaFilteredReasons']) is not None:
4089
+ setv(
4090
+ to_object,
4091
+ ['rai_media_filtered_reasons'],
4092
+ getv(from_object, ['raiMediaFilteredReasons']),
4093
+ )
4094
+
4095
+ return to_object
4096
+
4097
+
4098
+ def _GenerateVideosResponse_from_vertex(
4099
+ api_client: ApiClient,
4100
+ from_object: Union[dict, object],
4101
+ parent_object: dict = None,
4102
+ ) -> dict:
4103
+ to_object = {}
4104
+ if getv(from_object, ['videos']) is not None:
4105
+ setv(
4106
+ to_object,
4107
+ ['generated_videos'],
4108
+ [
4109
+ _GeneratedVideo_from_vertex(api_client, item, to_object)
4110
+ for item in getv(from_object, ['videos'])
4111
+ ],
4112
+ )
4113
+
4114
+ if getv(from_object, ['raiMediaFilteredCount']) is not None:
4115
+ setv(
4116
+ to_object,
4117
+ ['rai_media_filtered_count'],
4118
+ getv(from_object, ['raiMediaFilteredCount']),
4119
+ )
4120
+
4121
+ if getv(from_object, ['raiMediaFilteredReasons']) is not None:
4122
+ setv(
4123
+ to_object,
4124
+ ['rai_media_filtered_reasons'],
4125
+ getv(from_object, ['raiMediaFilteredReasons']),
4126
+ )
4127
+
4128
+ return to_object
4129
+
4130
+
4131
+ def _GenerateVideosOperation_from_mldev(
4132
+ api_client: ApiClient,
4133
+ from_object: Union[dict, object],
4134
+ parent_object: dict = None,
4135
+ ) -> dict:
4136
+ to_object = {}
4137
+ if getv(from_object, ['name']) is not None:
4138
+ setv(to_object, ['name'], getv(from_object, ['name']))
4139
+
4140
+ if getv(from_object, ['metadata']) is not None:
4141
+ setv(to_object, ['metadata'], getv(from_object, ['metadata']))
4142
+
4143
+ if getv(from_object, ['done']) is not None:
4144
+ setv(to_object, ['done'], getv(from_object, ['done']))
4145
+
4146
+ if getv(from_object, ['error']) is not None:
4147
+ setv(to_object, ['error'], getv(from_object, ['error']))
4148
+
4149
+ if getv(from_object, ['response']) is not None:
4150
+ setv(to_object, ['response'], getv(from_object, ['response']))
4151
+
4152
+ if getv(from_object, ['response', 'generateVideoResponse']) is not None:
4153
+ setv(
4154
+ to_object,
4155
+ ['result'],
4156
+ _GenerateVideosResponse_from_mldev(
4157
+ api_client,
4158
+ getv(from_object, ['response', 'generateVideoResponse']),
4159
+ to_object,
4160
+ ),
4161
+ )
4162
+
4163
+ return to_object
4164
+
4165
+
4166
+ def _GenerateVideosOperation_from_vertex(
4167
+ api_client: ApiClient,
4168
+ from_object: Union[dict, object],
4169
+ parent_object: dict = None,
4170
+ ) -> dict:
4171
+ to_object = {}
4172
+ if getv(from_object, ['name']) is not None:
4173
+ setv(to_object, ['name'], getv(from_object, ['name']))
4174
+
4175
+ if getv(from_object, ['metadata']) is not None:
4176
+ setv(to_object, ['metadata'], getv(from_object, ['metadata']))
4177
+
4178
+ if getv(from_object, ['done']) is not None:
4179
+ setv(to_object, ['done'], getv(from_object, ['done']))
4180
+
4181
+ if getv(from_object, ['error']) is not None:
4182
+ setv(to_object, ['error'], getv(from_object, ['error']))
4183
+
4184
+ if getv(from_object, ['response']) is not None:
4185
+ setv(to_object, ['response'], getv(from_object, ['response']))
4186
+
4187
+ if getv(from_object, ['response']) is not None:
4188
+ setv(
4189
+ to_object,
4190
+ ['result'],
4191
+ _GenerateVideosResponse_from_vertex(
4192
+ api_client, getv(from_object, ['response']), to_object
4193
+ ),
4194
+ )
4195
+
4196
+ return to_object
4197
+
4198
+
3791
4199
  class Models(_api_module.BaseModule):
3792
4200
 
3793
4201
  def _generate_content(
@@ -3915,7 +4323,7 @@ class Models(_api_module.BaseModule):
3915
4323
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
3916
4324
  config: Optional[types.EmbedContentConfigOrDict] = None,
3917
4325
  ) -> types.EmbedContentResponse:
3918
- """Calculates embeddings for the given contents(only text is supported).
4326
+ """Calculates embeddings for the given contents. Only text is supported.
3919
4327
 
3920
4328
  Args:
3921
4329
  model (str): The model to use.
@@ -4007,7 +4415,7 @@ class Models(_api_module.BaseModule):
4007
4415
  .. code-block:: python
4008
4416
 
4009
4417
  response = client.models.generate_images(
4010
- model='imagen-3.0-generate-001',
4418
+ model='imagen-3.0-generate-002',
4011
4419
  prompt='Man with a dog',
4012
4420
  config=types.GenerateImagesConfig(
4013
4421
  number_of_images= 1,
@@ -4447,10 +4855,11 @@ class Models(_api_module.BaseModule):
4447
4855
  ) -> types.CountTokensResponse:
4448
4856
  """Counts the number of tokens in the given content.
4449
4857
 
4858
+ Multimodal input is supported for Gemini models.
4859
+
4450
4860
  Args:
4451
4861
  model (str): The model to use for counting tokens.
4452
4862
  contents (list[types.Content]): The content to count tokens for.
4453
- Multimodal input is supported for Gemini models.
4454
4863
  config (CountTokensConfig): The configuration for counting tokens.
4455
4864
 
4456
4865
  Usage:
@@ -4522,14 +4931,15 @@ class Models(_api_module.BaseModule):
4522
4931
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
4523
4932
  config: Optional[types.ComputeTokensConfigOrDict] = None,
4524
4933
  ) -> types.ComputeTokensResponse:
4525
- """Return a list of tokens based on the input text.
4934
+ """Return a list of tokens based on the input contents.
4935
+
4936
+ Only text is supported.
4526
4937
 
4527
4938
  This method is not supported by the Gemini Developer API.
4528
4939
 
4529
4940
  Args:
4530
4941
  model (str): The model to use.
4531
- contents (list[shared.Content]): The content to compute tokens for. Only
4532
- text is supported.
4942
+ contents (list[shared.Content]): The content to compute tokens for.
4533
4943
 
4534
4944
  Usage:
4535
4945
 
@@ -4592,6 +5002,88 @@ class Models(_api_module.BaseModule):
4592
5002
  self._api_client._verify_response(return_value)
4593
5003
  return return_value
4594
5004
 
5005
+ @_common.experimental_warning(
5006
+ 'This method is experimental and may change in future versions.'
5007
+ )
5008
+ def generate_videos(
5009
+ self,
5010
+ *,
5011
+ model: str,
5012
+ prompt: Optional[str] = None,
5013
+ config: Optional[types.GenerateVideosConfigOrDict] = None,
5014
+ ) -> types.GenerateVideosOperation:
5015
+ """Generates videos based on a text description and configuration.
5016
+
5017
+ Args:
5018
+ model: The model to use.
5019
+ instances: A list of prompts, images and videos to generate videos from.
5020
+ config: Configuration for generation.
5021
+
5022
+ Usage:
5023
+
5024
+ ```
5025
+ operation = client.models.generate_videos(
5026
+ model="veo-2.0-generate-001",
5027
+ prompt="A neon hologram of a cat driving at top speed",
5028
+ )
5029
+ while not operation.done:
5030
+ time.sleep(10)
5031
+ operation = client.operations.get(operation)
5032
+
5033
+ operation.result.generated_videos[0].video.uri
5034
+ ```
5035
+ """
5036
+
5037
+ parameter_model = types._GenerateVideosParameters(
5038
+ model=model,
5039
+ prompt=prompt,
5040
+ config=config,
5041
+ )
5042
+
5043
+ if self._api_client.vertexai:
5044
+ request_dict = _GenerateVideosParameters_to_vertex(
5045
+ self._api_client, parameter_model
5046
+ )
5047
+ path = '{model}:predictLongRunning'.format_map(request_dict.get('_url'))
5048
+ else:
5049
+ request_dict = _GenerateVideosParameters_to_mldev(
5050
+ self._api_client, parameter_model
5051
+ )
5052
+ path = '{model}:predictLongRunning'.format_map(request_dict.get('_url'))
5053
+ query_params = request_dict.get('_query')
5054
+ if query_params:
5055
+ path = f'{path}?{urlencode(query_params)}'
5056
+ # TODO: remove the hack that pops config.
5057
+ request_dict.pop('config', None)
5058
+
5059
+ http_options = None
5060
+ if isinstance(config, dict):
5061
+ http_options = config.get('http_options', None)
5062
+ elif hasattr(config, 'http_options'):
5063
+ http_options = config.http_options
5064
+
5065
+ request_dict = _common.convert_to_dict(request_dict)
5066
+ request_dict = _common.encode_unserializable_types(request_dict)
5067
+
5068
+ response_dict = self._api_client.request(
5069
+ 'post', path, request_dict, http_options
5070
+ )
5071
+
5072
+ if self._api_client.vertexai:
5073
+ response_dict = _GenerateVideosOperation_from_vertex(
5074
+ self._api_client, response_dict
5075
+ )
5076
+ else:
5077
+ response_dict = _GenerateVideosOperation_from_mldev(
5078
+ self._api_client, response_dict
5079
+ )
5080
+
5081
+ return_value = types.GenerateVideosOperation._from_response(
5082
+ response=response_dict, kwargs=parameter_model
5083
+ )
5084
+ self._api_client._verify_response(return_value)
5085
+ return return_value
5086
+
4595
5087
  def generate_content(
4596
5088
  self,
4597
5089
  *,
@@ -4601,19 +5093,19 @@ class Models(_api_module.BaseModule):
4601
5093
  ) -> types.GenerateContentResponse:
4602
5094
  """Makes an API request to generate content using a model.
4603
5095
 
4604
- For the `model` parameter, supported format for Vertex AI API includes:
4605
- - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4606
- - the full resource name starts with 'projects/', for example:
5096
+ For the `model` parameter, supported formats for Vertex AI API include:
5097
+ - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5098
+ - The full resource name starts with 'projects/', for example:
4607
5099
  'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
4608
- - the partial resource name with 'publishers/', for example:
5100
+ - The partial resource name with 'publishers/', for example:
4609
5101
  'publishers/google/models/gemini-1.5-flash-002' or
4610
5102
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
4611
5103
  - `/` separated publisher and model name, for example:
4612
5104
  'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
4613
5105
 
4614
- For the `model` parameter, supported format for Gemini API includes:
4615
- - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4616
- - the model name starts with 'models/', for example:
5106
+ For the `model` parameter, supported formats for Gemini API include:
5107
+ - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5108
+ - The model name starts with 'models/', for example:
4617
5109
  'models/gemini-1.5-flash-002'
4618
5110
  - if you would like to use a tuned model, the model name starts with
4619
5111
  'tunedModels/', for example:
@@ -4661,7 +5153,7 @@ class Models(_api_module.BaseModule):
4661
5153
  model=model, contents=contents, config=config
4662
5154
  )
4663
5155
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
4664
- logging.info(
5156
+ logger.info(
4665
5157
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
4666
5158
  )
4667
5159
  automatic_function_calling_history = []
@@ -4672,10 +5164,10 @@ class Models(_api_module.BaseModule):
4672
5164
  response = self._generate_content(
4673
5165
  model=model, contents=contents, config=config
4674
5166
  )
4675
- logging.info(f'AFC remote call {i} is done.')
5167
+ logger.info(f'AFC remote call {i} is done.')
4676
5168
  remaining_remote_calls_afc -= 1
4677
5169
  if remaining_remote_calls_afc == 0:
4678
- logging.info('Reached max remote calls for automatic function calling.')
5170
+ logger.info('Reached max remote calls for automatic function calling.')
4679
5171
 
4680
5172
  function_map = _extra_utils.get_function_map(config)
4681
5173
  if not function_map:
@@ -4720,21 +5212,21 @@ class Models(_api_module.BaseModule):
4720
5212
  ) -> Iterator[types.GenerateContentResponse]:
4721
5213
  """Makes an API request to generate content using a model and yields the model's response in chunks.
4722
5214
 
4723
- For the `model` parameter, supported format for Vertex AI API includes:
4724
- - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4725
- - the full resource name starts with 'projects/', for example:
5215
+ For the `model` parameter, supported formats for Vertex AI API include:
5216
+ - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5217
+ - The full resource name starts with 'projects/', for example:
4726
5218
  'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
4727
- - the partial resource name with 'publishers/', for example:
5219
+ - The partial resource name with 'publishers/', for example:
4728
5220
  'publishers/google/models/gemini-1.5-flash-002' or
4729
5221
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
4730
5222
  - `/` separated publisher and model name, for example:
4731
5223
  'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
4732
5224
 
4733
- For the `model` parameter, supported format for Gemini API includes:
4734
- - the Gemini model ID, for example: 'gemini-1.5-flash-002'
4735
- - the model name starts with 'models/', for example:
5225
+ For the `model` parameter, supported formats for Gemini API include:
5226
+ - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5227
+ - The model name starts with 'models/', for example:
4736
5228
  'models/gemini-1.5-flash-002'
4737
- - if you would like to use a tuned model, the model name starts with
5229
+ - If you would like to use a tuned model, the model name starts with
4738
5230
  'tunedModels/', for example:
4739
5231
  'tunedModels/1234567890123456789'
4740
5232
 
@@ -4782,7 +5274,7 @@ class Models(_api_module.BaseModule):
4782
5274
  return
4783
5275
 
4784
5276
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
4785
- logging.info(
5277
+ logger.info(
4786
5278
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
4787
5279
  )
4788
5280
  automatic_function_calling_history = []
@@ -4794,10 +5286,10 @@ class Models(_api_module.BaseModule):
4794
5286
  response = self._generate_content_stream(
4795
5287
  model=model, contents=contents, config=config
4796
5288
  )
4797
- logging.info(f'AFC remote call {i} is done.')
5289
+ logger.info(f'AFC remote call {i} is done.')
4798
5290
  remaining_remote_calls_afc -= 1
4799
5291
  if remaining_remote_calls_afc == 0:
4800
- logging.info('Reached max remote calls for automatic function calling.')
5292
+ logger.info('Reached max remote calls for automatic function calling.')
4801
5293
 
4802
5294
  function_map = _extra_utils.get_function_map(config)
4803
5295
 
@@ -4809,6 +5301,12 @@ class Models(_api_module.BaseModule):
4809
5301
  if not function_map:
4810
5302
  yield chunk
4811
5303
  else:
5304
+ if (
5305
+ not chunk.candidates
5306
+ or not chunk.candidates[0].content
5307
+ or not chunk.candidates[0].content.parts
5308
+ ):
5309
+ break
4812
5310
  func_response_parts = _extra_utils.get_function_response_parts(
4813
5311
  chunk, function_map
4814
5312
  )
@@ -4823,20 +5321,16 @@ class Models(_api_module.BaseModule):
4823
5321
  automatic_function_calling_history
4824
5322
  )
4825
5323
  yield chunk
5324
+ if (
5325
+ not chunk.candidates
5326
+ or not chunk.candidates[0].content
5327
+ or not chunk.candidates[0].content.parts
5328
+ ):
5329
+ break
4826
5330
  func_response_parts = _extra_utils.get_function_response_parts(
4827
5331
  chunk, function_map
4828
5332
  )
4829
5333
 
4830
- if not chunk:
4831
- break
4832
- if (
4833
- not chunk
4834
- or not chunk.candidates
4835
- or not chunk.candidates[0].content
4836
- or not chunk.candidates[0].content.parts
4837
- ):
4838
- break
4839
-
4840
5334
  if not function_map:
4841
5335
  break
4842
5336
  if not func_response_parts:
@@ -4898,7 +5392,7 @@ class Models(_api_module.BaseModule):
4898
5392
 
4899
5393
  # Convert to API config.
4900
5394
  config = config or {}
4901
- config_dct = config if isinstance(config, dict) else config.dict()
5395
+ config_dct = config if isinstance(config, dict) else config.model_dump()
4902
5396
  api_config = types._UpscaleImageAPIConfigDict(**config_dct) # pylint: disable=protected-access
4903
5397
 
4904
5398
  # Provide default values through API config.
@@ -5095,7 +5589,7 @@ class AsyncModels(_api_module.BaseModule):
5095
5589
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
5096
5590
  config: Optional[types.EmbedContentConfigOrDict] = None,
5097
5591
  ) -> types.EmbedContentResponse:
5098
- """Calculates embeddings for the given contents(only text is supported).
5592
+ """Calculates embeddings for the given contents. Only text is supported.
5099
5593
 
5100
5594
  Args:
5101
5595
  model (str): The model to use.
@@ -5106,7 +5600,7 @@ class AsyncModels(_api_module.BaseModule):
5106
5600
 
5107
5601
  .. code-block:: python
5108
5602
 
5109
- embeddings = client.models.embed_content(
5603
+ embeddings = await client.aio.models.embed_content(
5110
5604
  model= 'text-embedding-004',
5111
5605
  contents=[
5112
5606
  'What is your name?',
@@ -5186,8 +5680,8 @@ class AsyncModels(_api_module.BaseModule):
5186
5680
 
5187
5681
  .. code-block:: python
5188
5682
 
5189
- response = client.models.generate_images(
5190
- model='imagen-3.0-generate-001',
5683
+ response = await client.aio.models.generate_images(
5684
+ model='imagen-3.0-generate-002',
5191
5685
  prompt='Man with a dog',
5192
5686
  config=types.GenerateImagesConfig(
5193
5687
  number_of_images= 1,
@@ -5284,7 +5778,7 @@ class AsyncModels(_api_module.BaseModule):
5284
5778
  mask_dilation=0.06,
5285
5779
  ),
5286
5780
  )
5287
- response = client.models.edit_image(
5781
+ response = await client.aio.models.edit_image(
5288
5782
  model='imagen-3.0-capability-preview-0930',
5289
5783
  prompt='man with dog',
5290
5784
  reference_images=[raw_ref_image, mask_ref_image],
@@ -5627,17 +6121,18 @@ class AsyncModels(_api_module.BaseModule):
5627
6121
  ) -> types.CountTokensResponse:
5628
6122
  """Counts the number of tokens in the given content.
5629
6123
 
6124
+ Multimodal input is supported for Gemini models.
6125
+
5630
6126
  Args:
5631
6127
  model (str): The model to use for counting tokens.
5632
6128
  contents (list[types.Content]): The content to count tokens for.
5633
- Multimodal input is supported for Gemini models.
5634
6129
  config (CountTokensConfig): The configuration for counting tokens.
5635
6130
 
5636
6131
  Usage:
5637
6132
 
5638
6133
  .. code-block:: python
5639
6134
 
5640
- response = client.models.count_tokens(
6135
+ response = await client.aio.models.count_tokens(
5641
6136
  model='gemini-1.5-flash',
5642
6137
  contents='What is your name?',
5643
6138
  )
@@ -5702,20 +6197,21 @@ class AsyncModels(_api_module.BaseModule):
5702
6197
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
5703
6198
  config: Optional[types.ComputeTokensConfigOrDict] = None,
5704
6199
  ) -> types.ComputeTokensResponse:
5705
- """Return a list of tokens based on the input text.
6200
+ """Return a list of tokens based on the input contents.
6201
+
6202
+ Only text is supported.
5706
6203
 
5707
6204
  This method is not supported by the Gemini Developer API.
5708
6205
 
5709
6206
  Args:
5710
6207
  model (str): The model to use.
5711
- contents (list[shared.Content]): The content to compute tokens for. Only
5712
- text is supported.
6208
+ contents (list[shared.Content]): The content to compute tokens for.
5713
6209
 
5714
6210
  Usage:
5715
6211
 
5716
6212
  .. code-block:: python
5717
6213
 
5718
- response = client.models.compute_tokens(
6214
+ response = await client.aio.models.compute_tokens(
5719
6215
  model='gemini-1.5-flash',
5720
6216
  contents='What is your name?',
5721
6217
  )
@@ -5772,6 +6268,88 @@ class AsyncModels(_api_module.BaseModule):
5772
6268
  self._api_client._verify_response(return_value)
5773
6269
  return return_value
5774
6270
 
6271
+ @_common.experimental_warning(
6272
+ 'This method is experimental and may change in future versions.'
6273
+ )
6274
+ async def generate_videos(
6275
+ self,
6276
+ *,
6277
+ model: str,
6278
+ prompt: Optional[str] = None,
6279
+ config: Optional[types.GenerateVideosConfigOrDict] = None,
6280
+ ) -> types.GenerateVideosOperation:
6281
+ """Generates videos based on a text description and configuration.
6282
+
6283
+ Args:
6284
+ model: The model to use.
6285
+ instances: A list of prompts, images and videos to generate videos from.
6286
+ config: Configuration for generation.
6287
+
6288
+ Usage:
6289
+
6290
+ ```
6291
+ operation = client.models.generate_videos(
6292
+ model="veo-2.0-generate-001",
6293
+ prompt="A neon hologram of a cat driving at top speed",
6294
+ )
6295
+ while not operation.done:
6296
+ time.sleep(10)
6297
+ operation = client.operations.get(operation)
6298
+
6299
+ operation.result.generated_videos[0].video.uri
6300
+ ```
6301
+ """
6302
+
6303
+ parameter_model = types._GenerateVideosParameters(
6304
+ model=model,
6305
+ prompt=prompt,
6306
+ config=config,
6307
+ )
6308
+
6309
+ if self._api_client.vertexai:
6310
+ request_dict = _GenerateVideosParameters_to_vertex(
6311
+ self._api_client, parameter_model
6312
+ )
6313
+ path = '{model}:predictLongRunning'.format_map(request_dict.get('_url'))
6314
+ else:
6315
+ request_dict = _GenerateVideosParameters_to_mldev(
6316
+ self._api_client, parameter_model
6317
+ )
6318
+ path = '{model}:predictLongRunning'.format_map(request_dict.get('_url'))
6319
+ query_params = request_dict.get('_query')
6320
+ if query_params:
6321
+ path = f'{path}?{urlencode(query_params)}'
6322
+ # TODO: remove the hack that pops config.
6323
+ request_dict.pop('config', None)
6324
+
6325
+ http_options = None
6326
+ if isinstance(config, dict):
6327
+ http_options = config.get('http_options', None)
6328
+ elif hasattr(config, 'http_options'):
6329
+ http_options = config.http_options
6330
+
6331
+ request_dict = _common.convert_to_dict(request_dict)
6332
+ request_dict = _common.encode_unserializable_types(request_dict)
6333
+
6334
+ response_dict = await self._api_client.async_request(
6335
+ 'post', path, request_dict, http_options
6336
+ )
6337
+
6338
+ if self._api_client.vertexai:
6339
+ response_dict = _GenerateVideosOperation_from_vertex(
6340
+ self._api_client, response_dict
6341
+ )
6342
+ else:
6343
+ response_dict = _GenerateVideosOperation_from_mldev(
6344
+ self._api_client, response_dict
6345
+ )
6346
+
6347
+ return_value = types.GenerateVideosOperation._from_response(
6348
+ response=response_dict, kwargs=parameter_model
6349
+ )
6350
+ self._api_client._verify_response(return_value)
6351
+ return return_value
6352
+
5775
6353
  async def generate_content(
5776
6354
  self,
5777
6355
  *,
@@ -5813,7 +6391,7 @@ class AsyncModels(_api_module.BaseModule):
5813
6391
  model=model, contents=contents, config=config
5814
6392
  )
5815
6393
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
5816
- logging.info(
6394
+ logger.info(
5817
6395
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
5818
6396
  )
5819
6397
  automatic_function_calling_history = []
@@ -5824,7 +6402,7 @@ class AsyncModels(_api_module.BaseModule):
5824
6402
  )
5825
6403
  remaining_remote_calls_afc -= 1
5826
6404
  if remaining_remote_calls_afc == 0:
5827
- logging.info('Reached max remote calls for automatic function calling.')
6405
+ logger.info('Reached max remote calls for automatic function calling.')
5828
6406
 
5829
6407
  function_map = _extra_utils.get_function_map(config)
5830
6408
  if not function_map:
@@ -5870,21 +6448,21 @@ class AsyncModels(_api_module.BaseModule):
5870
6448
  ) -> Awaitable[AsyncIterator[types.GenerateContentResponse]]:
5871
6449
  """Makes an API request to generate content using a model and yields the model's response in chunks.
5872
6450
 
5873
- For the `model` parameter, supported format for Vertex AI API includes:
5874
- - the Gemini model ID, for example: 'gemini-1.5-flash-002'
5875
- - the full resource name starts with 'projects/', for example:
6451
+ For the `model` parameter, supported formats for Vertex AI API include:
6452
+ - The Gemini model ID, for example: 'gemini-1.5-flash-002'
6453
+ - The full resource name starts with 'projects/', for example:
5876
6454
  'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5877
- - the partial resource name with 'publishers/', for example:
6455
+ - The partial resource name with 'publishers/', for example:
5878
6456
  'publishers/google/models/gemini-1.5-flash-002' or
5879
6457
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
5880
6458
  - `/` separated publisher and model name, for example:
5881
6459
  'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5882
6460
 
5883
- For the `model` parameter, supported format for Gemini API includes:
5884
- - the Gemini model ID, for example: 'gemini-1.5-flash-002'
5885
- - the model name starts with 'models/', for example:
6461
+ For the `model` parameter, supported formats for Gemini API include:
6462
+ - The Gemini model ID, for example: 'gemini-1.5-flash-002'
6463
+ - The model name starts with 'models/', for example:
5886
6464
  'models/gemini-1.5-flash-002'
5887
- - if you would like to use a tuned model, the model name starts with
6465
+ - If you would like to use a tuned model, the model name starts with
5888
6466
  'tunedModels/', for example:
5889
6467
  'tunedModels/1234567890123456789'
5890
6468
 
@@ -5938,10 +6516,11 @@ class AsyncModels(_api_module.BaseModule):
5938
6516
 
5939
6517
  async def async_generator(model, contents, config):
5940
6518
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
5941
- logging.info(
6519
+ logger.info(
5942
6520
  f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
5943
6521
  )
5944
6522
  automatic_function_calling_history = []
6523
+ func_response_parts = None
5945
6524
  chunk = None
5946
6525
  i = 0
5947
6526
  while remaining_remote_calls_afc > 0:
@@ -5949,10 +6528,10 @@ class AsyncModels(_api_module.BaseModule):
5949
6528
  response = await self._generate_content_stream(
5950
6529
  model=model, contents=contents, config=config
5951
6530
  )
5952
- logging.info(f'AFC remote call {i} is done.')
6531
+ logger.info(f'AFC remote call {i} is done.')
5953
6532
  remaining_remote_calls_afc -= 1
5954
6533
  if remaining_remote_calls_afc == 0:
5955
- logging.info(
6534
+ logger.info(
5956
6535
  'Reached max remote calls for automatic function calling.'
5957
6536
  )
5958
6537
 
@@ -5966,6 +6545,12 @@ class AsyncModels(_api_module.BaseModule):
5966
6545
  if not function_map:
5967
6546
  yield chunk
5968
6547
  else:
6548
+ if (
6549
+ not chunk.candidates
6550
+ or not chunk.candidates[0].content
6551
+ or not chunk.candidates[0].content.parts
6552
+ ):
6553
+ break
5969
6554
  func_response_parts = _extra_utils.get_function_response_parts(
5970
6555
  chunk, function_map
5971
6556
  )
@@ -5981,18 +6566,15 @@ class AsyncModels(_api_module.BaseModule):
5981
6566
  automatic_function_calling_history
5982
6567
  )
5983
6568
  yield chunk
6569
+ if (
6570
+ not chunk.candidates
6571
+ or not chunk.candidates[0].content
6572
+ or not chunk.candidates[0].content.parts
6573
+ ):
6574
+ break
5984
6575
  func_response_parts = _extra_utils.get_function_response_parts(
5985
6576
  chunk, function_map
5986
6577
  )
5987
- if not chunk:
5988
- break
5989
- if (
5990
- not chunk
5991
- or not chunk.candidates
5992
- or not chunk.candidates[0].content
5993
- or not chunk.candidates[0].content.parts
5994
- ):
5995
- break
5996
6578
  if not function_map:
5997
6579
  break
5998
6580