google-genai 1.40.0__py3-none-any.whl → 1.42.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/types.py CHANGED
@@ -287,6 +287,8 @@ class FinishReason(_common.CaseInSensitiveEnum):
287
287
  """The tool call generated by the model is invalid."""
288
288
  IMAGE_PROHIBITED_CONTENT = 'IMAGE_PROHIBITED_CONTENT'
289
289
  """Image generation stopped because the generated images have prohibited content."""
290
+ NO_IMAGE = 'NO_IMAGE'
291
+ """The model was expected to generate an image, but none was generated."""
290
292
 
291
293
 
292
294
  class HarmProbability(_common.CaseInSensitiveEnum):
@@ -436,6 +438,17 @@ class AdapterSize(_common.CaseInSensitiveEnum):
436
438
  """Adapter size 32."""
437
439
 
438
440
 
441
+ class TuningTask(_common.CaseInSensitiveEnum):
442
+ """Optional. The tuning task. Either I2V or T2V."""
443
+
444
+ TUNING_TASK_UNSPECIFIED = 'TUNING_TASK_UNSPECIFIED'
445
+ """Default value. This value is unused."""
446
+ TUNING_TASK_I2V = 'TUNING_TASK_I2V'
447
+ """Tuning task for image to video."""
448
+ TUNING_TASK_T2V = 'TUNING_TASK_T2V'
449
+ """Tuning task for text to video."""
450
+
451
+
439
452
  class JSONSchemaType(Enum):
440
453
  """The type of the data supported by JSON Schema.
441
454
 
@@ -1104,6 +1117,40 @@ class FunctionResponsePart(_common.BaseModel):
1104
1117
  default=None, description="""Optional. URI based data."""
1105
1118
  )
1106
1119
 
1120
+ @classmethod
1121
+ def from_bytes(cls, *, data: bytes, mime_type: str) -> 'FunctionResponsePart':
1122
+ """Creates a FunctionResponsePart from bytes and mime type.
1123
+
1124
+ Args:
1125
+ data (bytes): The bytes of the data
1126
+ mime_type (str): mime_type: The MIME type of the data.
1127
+ """
1128
+ inline_data = FunctionResponseBlob(
1129
+ data=data,
1130
+ mime_type=mime_type,
1131
+ )
1132
+ return cls(inline_data=inline_data)
1133
+
1134
+ @classmethod
1135
+ def from_uri(
1136
+ cls, *, file_uri: str, mime_type: Optional[str] = None
1137
+ ) -> 'FunctionResponsePart':
1138
+ """Creates a FunctionResponsePart from a file uri.
1139
+
1140
+ Args:
1141
+ file_uri (str): The uri of the file
1142
+ mime_type (str): mime_type: The MIME type of the file. If not provided,
1143
+ the MIME type will be automatically determined.
1144
+ """
1145
+ if mime_type is None:
1146
+ import mimetypes
1147
+
1148
+ mime_type, _ = mimetypes.guess_type(file_uri)
1149
+ if not mime_type:
1150
+ raise ValueError(f'Failed to determine mime type for file: {file_uri}')
1151
+ file_data = FunctionResponseFileData(file_uri=file_uri, mime_type=mime_type)
1152
+ return cls(file_data=file_data)
1153
+
1107
1154
 
1108
1155
  class FunctionResponsePartDict(TypedDict, total=False):
1109
1156
  """A datatype containing media that is part of a `FunctionResponse` message.
@@ -1261,7 +1308,7 @@ class Part(_common.BaseModel):
1261
1308
 
1262
1309
  Args:
1263
1310
  file_uri (str): The uri of the file
1264
- mime_type (str): mime_type: The MIME type of the image. If not provided,
1311
+ mime_type (str): mime_type: The MIME type of the file. If not provided,
1265
1312
  the MIME type will be automatically determined.
1266
1313
  """
1267
1314
  if mime_type is None:
@@ -1292,9 +1339,15 @@ class Part(_common.BaseModel):
1292
1339
 
1293
1340
  @classmethod
1294
1341
  def from_function_response(
1295
- cls, *, name: str, response: dict[str, Any]
1342
+ cls,
1343
+ *,
1344
+ name: str,
1345
+ response: dict[str, Any],
1346
+ parts: Optional[list[FunctionResponsePart]] = None,
1296
1347
  ) -> 'Part':
1297
- function_response = FunctionResponse(name=name, response=response)
1348
+ function_response = FunctionResponse(
1349
+ name=name, response=response, parts=parts
1350
+ )
1298
1351
  return cls(function_response=function_response)
1299
1352
 
1300
1353
  @classmethod
@@ -4309,8 +4362,10 @@ class GenerateContentConfig(_common.BaseModel):
4309
4362
  @pydantic.field_validator('image_config', mode='before')
4310
4363
  @classmethod
4311
4364
  def _check_image_config_type(cls, value: Any) -> Any:
4312
- if not isinstance(value, ImageConfig):
4313
- raise ValueError('image_config must be an instance of ImageConfig.')
4365
+ if isinstance(value, GenerateImagesConfig):
4366
+ raise ValueError(
4367
+ 'image_config must be an instance of ImageConfig or compatible dict.'
4368
+ )
4314
4369
  return value
4315
4370
 
4316
4371
 
@@ -6244,6 +6299,10 @@ class GenerateImagesConfig(_common.BaseModel):
6244
6299
  default=None,
6245
6300
  description="""Whether to add a watermark to the generated images.""",
6246
6301
  )
6302
+ labels: Optional[dict[str, str]] = Field(
6303
+ default=None,
6304
+ description="""User specified labels to track billing usage.""",
6305
+ )
6247
6306
  image_size: Optional[str] = Field(
6248
6307
  default=None,
6249
6308
  description="""The size of the largest dimension of the generated image.
@@ -6309,6 +6368,9 @@ class GenerateImagesConfigDict(TypedDict, total=False):
6309
6368
  add_watermark: Optional[bool]
6310
6369
  """Whether to add a watermark to the generated images."""
6311
6370
 
6371
+ labels: Optional[dict[str, str]]
6372
+ """User specified labels to track billing usage."""
6373
+
6312
6374
  image_size: Optional[str]
6313
6375
  """The size of the largest dimension of the generated image.
6314
6376
  Supported sizes are 1K and 2K (not supported for Imagen 3 models)."""
@@ -6886,6 +6948,10 @@ class EditImageConfig(_common.BaseModel):
6886
6948
  default=None,
6887
6949
  description="""Whether to add a watermark to the generated images.""",
6888
6950
  )
6951
+ labels: Optional[dict[str, str]] = Field(
6952
+ default=None,
6953
+ description="""User specified labels to track billing usage.""",
6954
+ )
6889
6955
  edit_mode: Optional[EditMode] = Field(
6890
6956
  default=None,
6891
6957
  description="""Describes the editing mode for the request.""",
@@ -6952,6 +7018,9 @@ class EditImageConfigDict(TypedDict, total=False):
6952
7018
  add_watermark: Optional[bool]
6953
7019
  """Whether to add a watermark to the generated images."""
6954
7020
 
7021
+ labels: Optional[dict[str, str]]
7022
+ """User specified labels to track billing usage."""
7023
+
6955
7024
  edit_mode: Optional[EditMode]
6956
7025
  """Describes the editing mode for the request."""
6957
7026
 
@@ -7067,6 +7136,10 @@ class _UpscaleImageAPIConfig(_common.BaseModel):
7067
7136
  output image will have be more different from the input image, but
7068
7137
  with finer details and less noise.""",
7069
7138
  )
7139
+ labels: Optional[dict[str, str]] = Field(
7140
+ default=None,
7141
+ description="""User specified labels to track billing usage.""",
7142
+ )
7070
7143
  number_of_images: Optional[int] = Field(default=None, description="""""")
7071
7144
  mode: Optional[str] = Field(default=None, description="""""")
7072
7145
 
@@ -7106,6 +7179,9 @@ class _UpscaleImageAPIConfigDict(TypedDict, total=False):
7106
7179
  output image will have be more different from the input image, but
7107
7180
  with finer details and less noise."""
7108
7181
 
7182
+ labels: Optional[dict[str, str]]
7183
+ """User specified labels to track billing usage."""
7184
+
7109
7185
  number_of_images: Optional[int]
7110
7186
  """"""
7111
7187
 
@@ -7282,6 +7358,10 @@ class RecontextImageConfig(_common.BaseModel):
7282
7358
  enhance_prompt: Optional[bool] = Field(
7283
7359
  default=None, description="""Whether to use the prompt rewriting logic."""
7284
7360
  )
7361
+ labels: Optional[dict[str, str]] = Field(
7362
+ default=None,
7363
+ description="""User specified labels to track billing usage.""",
7364
+ )
7285
7365
 
7286
7366
 
7287
7367
  class RecontextImageConfigDict(TypedDict, total=False):
@@ -7323,6 +7403,9 @@ class RecontextImageConfigDict(TypedDict, total=False):
7323
7403
  enhance_prompt: Optional[bool]
7324
7404
  """Whether to use the prompt rewriting logic."""
7325
7405
 
7406
+ labels: Optional[dict[str, str]]
7407
+ """User specified labels to track billing usage."""
7408
+
7326
7409
 
7327
7410
  RecontextImageConfigOrDict = Union[
7328
7411
  RecontextImageConfig, RecontextImageConfigDict
@@ -7473,6 +7556,10 @@ class SegmentImageConfig(_common.BaseModel):
7473
7556
  can be set to a decimal value between 0 and 255 non-inclusive.
7474
7557
  Set to -1 for no binary color thresholding.""",
7475
7558
  )
7559
+ labels: Optional[dict[str, str]] = Field(
7560
+ default=None,
7561
+ description="""User specified labels to track billing usage.""",
7562
+ )
7476
7563
 
7477
7564
 
7478
7565
  class SegmentImageConfigDict(TypedDict, total=False):
@@ -7503,6 +7590,9 @@ class SegmentImageConfigDict(TypedDict, total=False):
7503
7590
  can be set to a decimal value between 0 and 255 non-inclusive.
7504
7591
  Set to -1 for no binary color thresholding."""
7505
7592
 
7593
+ labels: Optional[dict[str, str]]
7594
+ """User specified labels to track billing usage."""
7595
+
7506
7596
 
7507
7597
  SegmentImageConfigOrDict = Union[SegmentImageConfig, SegmentImageConfigDict]
7508
7598
 
@@ -8965,6 +9055,7 @@ class GenerateVideosOperation(_common.BaseModel, Operation):
8965
9055
  cls, api_response: Any, is_vertex_ai: bool = False
8966
9056
  ) -> Self:
8967
9057
  """Instantiates a GenerateVideosOperation from an API response."""
9058
+
8968
9059
  if is_vertex_ai:
8969
9060
  response_dict = _GenerateVideosOperation_from_vertex(api_response)
8970
9061
  else:
@@ -10173,6 +10264,71 @@ PartnerModelTuningSpecOrDict = Union[
10173
10264
  ]
10174
10265
 
10175
10266
 
10267
+ class VeoHyperParameters(_common.BaseModel):
10268
+ """Hyperparameters for Veo."""
10269
+
10270
+ epoch_count: Optional[int] = Field(
10271
+ default=None,
10272
+ description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
10273
+ )
10274
+ learning_rate_multiplier: Optional[float] = Field(
10275
+ default=None,
10276
+ description="""Optional. Multiplier for adjusting the default learning rate.""",
10277
+ )
10278
+ tuning_task: Optional[TuningTask] = Field(
10279
+ default=None,
10280
+ description="""Optional. The tuning task. Either I2V or T2V.""",
10281
+ )
10282
+
10283
+
10284
+ class VeoHyperParametersDict(TypedDict, total=False):
10285
+ """Hyperparameters for Veo."""
10286
+
10287
+ epoch_count: Optional[int]
10288
+ """Optional. Number of complete passes the model makes over the entire training dataset during training."""
10289
+
10290
+ learning_rate_multiplier: Optional[float]
10291
+ """Optional. Multiplier for adjusting the default learning rate."""
10292
+
10293
+ tuning_task: Optional[TuningTask]
10294
+ """Optional. The tuning task. Either I2V or T2V."""
10295
+
10296
+
10297
+ VeoHyperParametersOrDict = Union[VeoHyperParameters, VeoHyperParametersDict]
10298
+
10299
+
10300
+ class VeoTuningSpec(_common.BaseModel):
10301
+ """Tuning Spec for Veo Model Tuning."""
10302
+
10303
+ hyper_parameters: Optional[VeoHyperParameters] = Field(
10304
+ default=None, description="""Optional. Hyperparameters for Veo."""
10305
+ )
10306
+ training_dataset_uri: Optional[str] = Field(
10307
+ default=None,
10308
+ description="""Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
10309
+ )
10310
+ validation_dataset_uri: Optional[str] = Field(
10311
+ default=None,
10312
+ description="""Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset.""",
10313
+ )
10314
+
10315
+
10316
+ class VeoTuningSpecDict(TypedDict, total=False):
10317
+ """Tuning Spec for Veo Model Tuning."""
10318
+
10319
+ hyper_parameters: Optional[VeoHyperParametersDict]
10320
+ """Optional. Hyperparameters for Veo."""
10321
+
10322
+ training_dataset_uri: Optional[str]
10323
+ """Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""
10324
+
10325
+ validation_dataset_uri: Optional[str]
10326
+ """Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset."""
10327
+
10328
+
10329
+ VeoTuningSpecOrDict = Union[VeoTuningSpec, VeoTuningSpecDict]
10330
+
10331
+
10176
10332
  class TuningJob(_common.BaseModel):
10177
10333
  """A tuning job."""
10178
10334
 
@@ -10268,6 +10424,9 @@ class TuningJob(_common.BaseModel):
10268
10424
  default=None,
10269
10425
  description="""Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters.""",
10270
10426
  )
10427
+ veo_tuning_spec: Optional[VeoTuningSpec] = Field(
10428
+ default=None, description="""Tuning Spec for Veo Tuning."""
10429
+ )
10271
10430
 
10272
10431
  @property
10273
10432
  def has_ended(self) -> bool:
@@ -10355,6 +10514,9 @@ class TuningJobDict(TypedDict, total=False):
10355
10514
  tuned_model_display_name: Optional[str]
10356
10515
  """Optional. The display name of the TunedModel. The name can be up to 128 characters long and can consist of any UTF-8 characters."""
10357
10516
 
10517
+ veo_tuning_spec: Optional[VeoTuningSpecDict]
10518
+ """Tuning Spec for Veo Tuning."""
10519
+
10358
10520
 
10359
10521
  TuningJobOrDict = Union[TuningJob, TuningJobDict]
10360
10522
 
@@ -12914,6 +13076,10 @@ class UpscaleImageConfig(_common.BaseModel):
12914
13076
  output image will have be more different from the input image, but
12915
13077
  with finer details and less noise.""",
12916
13078
  )
13079
+ labels: Optional[dict[str, str]] = Field(
13080
+ default=None,
13081
+ description="""User specified labels to track billing usage.""",
13082
+ )
12917
13083
 
12918
13084
 
12919
13085
  class UpscaleImageConfigDict(TypedDict, total=False):
@@ -12952,6 +13118,9 @@ class UpscaleImageConfigDict(TypedDict, total=False):
12952
13118
  output image will have be more different from the input image, but
12953
13119
  with finer details and less noise."""
12954
13120
 
13121
+ labels: Optional[dict[str, str]]
13122
+ """User specified labels to track billing usage."""
13123
+
12955
13124
 
12956
13125
  UpscaleImageConfigOrDict = Union[UpscaleImageConfig, UpscaleImageConfigDict]
12957
13126
 
@@ -14633,6 +14802,13 @@ class LiveConnectConfig(_common.BaseModel):
14633
14802
  description="""The speech generation configuration.
14634
14803
  """,
14635
14804
  )
14805
+ thinking_config: Optional[ThinkingConfig] = Field(
14806
+ default=None,
14807
+ description="""Config for thinking features.
14808
+ An error will be returned if this field is set for models that don't
14809
+ support thinking.
14810
+ """,
14811
+ )
14636
14812
  enable_affective_dialog: Optional[bool] = Field(
14637
14813
  default=None,
14638
14814
  description="""If enabled, the model will detect emotions and adapt its responses accordingly.""",
@@ -14738,6 +14914,12 @@ class LiveConnectConfigDict(TypedDict, total=False):
14738
14914
  """The speech generation configuration.
14739
14915
  """
14740
14916
 
14917
+ thinking_config: Optional[ThinkingConfigDict]
14918
+ """Config for thinking features.
14919
+ An error will be returned if this field is set for models that don't
14920
+ support thinking.
14921
+ """
14922
+
14741
14923
  enable_affective_dialog: Optional[bool]
14742
14924
  """If enabled, the model will detect emotions and adapt its responses accordingly."""
14743
14925
 
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.40.0' # x-release-please-version
16
+ __version__ = '1.42.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.40.0
3
+ Version: 1.42.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -296,7 +296,7 @@ See the 'Create a client' section above to initialize a client.
296
296
 
297
297
  ```python
298
298
  response = client.models.generate_content(
299
- model='gemini-2.0-flash-001', contents='Why is the sky blue?'
299
+ model='gemini-2.5-flash', contents='Why is the sky blue?'
300
300
  )
301
301
  print(response.text)
302
302
  ```
@@ -313,7 +313,7 @@ python code.
313
313
  ```python
314
314
  file = client.files.upload(file='a11.txt')
315
315
  response = client.models.generate_content(
316
- model='gemini-2.0-flash-001',
316
+ model='gemini-2.5-flash',
317
317
  contents=['Could you summarize this file?', file]
318
318
  )
319
319
  print(response.text)
@@ -617,7 +617,7 @@ print(async_pager[0])
617
617
  from google.genai import types
618
618
 
619
619
  response = client.models.generate_content(
620
- model='gemini-2.0-flash-001',
620
+ model='gemini-2.5-flash',
621
621
  contents='Say something bad.',
622
622
  config=types.GenerateContentConfig(
623
623
  safety_settings=[
@@ -651,7 +651,7 @@ def get_current_weather(location: str) -> str:
651
651
 
652
652
 
653
653
  response = client.models.generate_content(
654
- model='gemini-2.0-flash-001',
654
+ model='gemini-2.5-flash',
655
655
  contents='What is the weather like in Boston?',
656
656
  config=types.GenerateContentConfig(tools=[get_current_weather]),
657
657
  )
@@ -667,7 +667,7 @@ as follows:
667
667
  from google.genai import types
668
668
 
669
669
  response = client.models.generate_content(
670
- model='gemini-2.0-flash-001',
670
+ model='gemini-2.5-flash',
671
671
  contents='What is the weather like in Boston?',
672
672
  config=types.GenerateContentConfig(
673
673
  tools=[get_current_weather],
@@ -714,7 +714,7 @@ function = types.FunctionDeclaration(
714
714
  tool = types.Tool(function_declarations=[function])
715
715
 
716
716
  response = client.models.generate_content(
717
- model='gemini-2.0-flash-001',
717
+ model='gemini-2.5-flash',
718
718
  contents='What is the weather like in Boston?',
719
719
  config=types.GenerateContentConfig(tools=[tool]),
720
720
  )
@@ -758,7 +758,7 @@ function_response_content = types.Content(
758
758
  )
759
759
 
760
760
  response = client.models.generate_content(
761
- model='gemini-2.0-flash-001',
761
+ model='gemini-2.5-flash',
762
762
  contents=[
763
763
  user_prompt_content,
764
764
  function_call_content,
@@ -793,7 +793,7 @@ def get_current_weather(location: str) -> str:
793
793
  return "sunny"
794
794
 
795
795
  response = client.models.generate_content(
796
- model="gemini-2.0-flash-001",
796
+ model="gemini-2.5-flash",
797
797
  contents="What is the weather like in Boston?",
798
798
  config=types.GenerateContentConfig(
799
799
  tools=[get_current_weather],
@@ -823,7 +823,7 @@ def get_current_weather(location: str) -> str:
823
823
  return "sunny"
824
824
 
825
825
  response = client.models.generate_content(
826
- model="gemini-2.0-flash-001",
826
+ model="gemini-2.5-flash",
827
827
  contents="What is the weather like in Boston?",
828
828
  config=types.GenerateContentConfig(
829
829
  tools=[get_current_weather],
@@ -913,7 +913,7 @@ user_profile = {
913
913
  }
914
914
 
915
915
  response = client.models.generate_content(
916
- model='gemini-2.0-flash',
916
+ model='gemini-2.5-flash',
917
917
  contents='Give me a random user profile.',
918
918
  config={
919
919
  'response_mime_type': 'application/json',
@@ -943,7 +943,7 @@ class CountryInfo(BaseModel):
943
943
 
944
944
 
945
945
  response = client.models.generate_content(
946
- model='gemini-2.0-flash-001',
946
+ model='gemini-2.5-flash',
947
947
  contents='Give me information for the United States.',
948
948
  config=types.GenerateContentConfig(
949
949
  response_mime_type='application/json',
@@ -957,7 +957,7 @@ print(response.text)
957
957
  from google.genai import types
958
958
 
959
959
  response = client.models.generate_content(
960
- model='gemini-2.0-flash-001',
960
+ model='gemini-2.5-flash',
961
961
  contents='Give me information for the United States.',
962
962
  config=types.GenerateContentConfig(
963
963
  response_mime_type='application/json',
@@ -995,6 +995,8 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
995
995
  values as the response.
996
996
 
997
997
  ```python
998
+ from enum import Enum
999
+
998
1000
  class InstrumentEnum(Enum):
999
1001
  PERCUSSION = 'Percussion'
1000
1002
  STRING = 'String'
@@ -1003,7 +1005,7 @@ class InstrumentEnum(Enum):
1003
1005
  KEYBOARD = 'Keyboard'
1004
1006
 
1005
1007
  response = client.models.generate_content(
1006
- model='gemini-2.0-flash-001',
1008
+ model='gemini-2.5-flash',
1007
1009
  contents='What instrument plays multiple notes at once?',
1008
1010
  config={
1009
1011
  'response_mime_type': 'text/x.enum',
@@ -1029,7 +1031,7 @@ class InstrumentEnum(Enum):
1029
1031
  KEYBOARD = 'Keyboard'
1030
1032
 
1031
1033
  response = client.models.generate_content(
1032
- model='gemini-2.0-flash-001',
1034
+ model='gemini-2.5-flash',
1033
1035
  contents='What instrument plays multiple notes at once?',
1034
1036
  config={
1035
1037
  'response_mime_type': 'application/json',
@@ -1048,7 +1050,7 @@ to you, rather than being returned as one chunk.
1048
1050
 
1049
1051
  ```python
1050
1052
  for chunk in client.models.generate_content_stream(
1051
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1053
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1052
1054
  ):
1053
1055
  print(chunk.text, end='')
1054
1056
  ```
@@ -1062,7 +1064,7 @@ you can use the `from_uri` class method to create a `Part` object.
1062
1064
  from google.genai import types
1063
1065
 
1064
1066
  for chunk in client.models.generate_content_stream(
1065
- model='gemini-2.0-flash-001',
1067
+ model='gemini-2.5-flash',
1066
1068
  contents=[
1067
1069
  'What is this image about?',
1068
1070
  types.Part.from_uri(
@@ -1086,7 +1088,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
1086
1088
  image_bytes = f.read()
1087
1089
 
1088
1090
  for chunk in client.models.generate_content_stream(
1089
- model='gemini-2.0-flash-001',
1091
+ model='gemini-2.5-flash',
1090
1092
  contents=[
1091
1093
  'What is this image about?',
1092
1094
  types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
@@ -1105,7 +1107,7 @@ of `client.models.generate_content`
1105
1107
 
1106
1108
  ```python
1107
1109
  response = await client.aio.models.generate_content(
1108
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1110
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1109
1111
  )
1110
1112
 
1111
1113
  print(response.text)
@@ -1116,7 +1118,7 @@ print(response.text)
1116
1118
 
1117
1119
  ```python
1118
1120
  async for chunk in await client.aio.models.generate_content_stream(
1119
- model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1121
+ model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
1120
1122
  ):
1121
1123
  print(chunk.text, end='')
1122
1124
  ```
@@ -1125,7 +1127,7 @@ async for chunk in await client.aio.models.generate_content_stream(
1125
1127
 
1126
1128
  ```python
1127
1129
  response = client.models.count_tokens(
1128
- model='gemini-2.0-flash-001',
1130
+ model='gemini-2.5-flash',
1129
1131
  contents='why is the sky blue?',
1130
1132
  )
1131
1133
  print(response)
@@ -1137,7 +1139,7 @@ Compute tokens is only supported in Vertex AI.
1137
1139
 
1138
1140
  ```python
1139
1141
  response = client.models.compute_tokens(
1140
- model='gemini-2.0-flash-001',
1142
+ model='gemini-2.5-flash',
1141
1143
  contents='why is the sky blue?',
1142
1144
  )
1143
1145
  print(response)
@@ -1147,7 +1149,7 @@ print(response)
1147
1149
 
1148
1150
  ```python
1149
1151
  response = await client.aio.models.count_tokens(
1150
- model='gemini-2.0-flash-001',
1152
+ model='gemini-2.5-flash',
1151
1153
  contents='why is the sky blue?',
1152
1154
  )
1153
1155
  print(response)
@@ -1156,14 +1158,14 @@ print(response)
1156
1158
  #### Local Count Tokens
1157
1159
 
1158
1160
  ```python
1159
- tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1161
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
1160
1162
  result = tokenizer.count_tokens("What is your name?")
1161
1163
  ```
1162
1164
 
1163
1165
  #### Local Compute Tokens
1164
1166
 
1165
1167
  ```python
1166
- tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1168
+ tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
1167
1169
  result = tokenizer.compute_tokens("What is your name?")
1168
1170
  ```
1169
1171
 
@@ -1376,7 +1378,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
1376
1378
  ### Send Message (Synchronous Non-Streaming)
1377
1379
 
1378
1380
  ```python
1379
- chat = client.chats.create(model='gemini-2.0-flash-001')
1381
+ chat = client.chats.create(model='gemini-2.5-flash')
1380
1382
  response = chat.send_message('tell me a story')
1381
1383
  print(response.text)
1382
1384
  response = chat.send_message('summarize the story you told me in 1 sentence')
@@ -1386,7 +1388,7 @@ print(response.text)
1386
1388
  ### Send Message (Synchronous Streaming)
1387
1389
 
1388
1390
  ```python
1389
- chat = client.chats.create(model='gemini-2.0-flash-001')
1391
+ chat = client.chats.create(model='gemini-2.5-flash')
1390
1392
  for chunk in chat.send_message_stream('tell me a story'):
1391
1393
  print(chunk.text)
1392
1394
  ```
@@ -1394,7 +1396,7 @@ for chunk in chat.send_message_stream('tell me a story'):
1394
1396
  ### Send Message (Asynchronous Non-Streaming)
1395
1397
 
1396
1398
  ```python
1397
- chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1399
+ chat = client.aio.chats.create(model='gemini-2.5-flash')
1398
1400
  response = await chat.send_message('tell me a story')
1399
1401
  print(response.text)
1400
1402
  ```
@@ -1402,7 +1404,7 @@ print(response.text)
1402
1404
  ### Send Message (Asynchronous Streaming)
1403
1405
 
1404
1406
  ```python
1405
- chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1407
+ chat = client.aio.chats.create(model='gemini-2.5-flash')
1406
1408
  async for chunk in await chat.send_message_stream('tell me a story'):
1407
1409
  print(chunk.text)
1408
1410
  ```
@@ -1461,7 +1463,7 @@ else:
1461
1463
  file_uris = [file1.uri, file2.uri]
1462
1464
 
1463
1465
  cached_content = client.caches.create(
1464
- model='gemini-2.0-flash-001',
1466
+ model='gemini-2.5-flash',
1465
1467
  config=types.CreateCachedContentConfig(
1466
1468
  contents=[
1467
1469
  types.Content(
@@ -1496,7 +1498,7 @@ cached_content = client.caches.get(name=cached_content.name)
1496
1498
  from google.genai import types
1497
1499
 
1498
1500
  response = client.models.generate_content(
1499
- model='gemini-2.0-flash-001',
1501
+ model='gemini-2.5-flash',
1500
1502
  contents='Summarize the pdfs',
1501
1503
  config=types.GenerateContentConfig(
1502
1504
  cached_content=cached_content.name,
@@ -1518,7 +1520,7 @@ section above to initialize a client.
1518
1520
  ```python
1519
1521
  from google.genai import types
1520
1522
 
1521
- model = 'gemini-2.0-flash-001'
1523
+ model = 'gemini-2.5-flash'
1522
1524
  training_dataset = types.TuningDataset(
1523
1525
  # or gcs_uri=my_vertex_multimodal_dataset
1524
1526
  gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
@@ -1672,7 +1674,7 @@ Vertex AI:
1672
1674
  ```python
1673
1675
  # Specify model and source file only, destination and job display name will be auto-populated
1674
1676
  job = client.batches.create(
1675
- model='gemini-2.0-flash-001',
1677
+ model='gemini-2.5-flash',
1676
1678
  src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1677
1679
  )
1678
1680
 
@@ -1684,7 +1686,7 @@ Gemini Developer API:
1684
1686
  ```python
1685
1687
  # Create a batch job with inlined requests
1686
1688
  batch_job = client.batches.create(
1687
- model="gemini-2.0-flash",
1689
+ model="gemini-2.5-flash",
1688
1690
  src=[{
1689
1691
  "contents": [{
1690
1692
  "parts": [{
@@ -1699,7 +1701,7 @@ batch_job = client.batches.create(
1699
1701
  job
1700
1702
  ```
1701
1703
 
1702
- In order to create a batch job with file name. Need to upload a jsonl file.
1704
+ In order to create a batch job with file name. Need to upload a json file.
1703
1705
  For example myrequests.json:
1704
1706
 
1705
1707
  ```
@@ -1712,14 +1714,14 @@ Then upload the file.
1712
1714
  ```python
1713
1715
  # Upload the file
1714
1716
  file = client.files.upload(
1715
- file='myrequest.json',
1716
- config=types.UploadFileConfig(display_name='test_json')
1717
+ file='myrequests.json',
1718
+ config=types.UploadFileConfig(display_name='test-json')
1717
1719
  )
1718
1720
 
1719
1721
  # Create a batch job with file name
1720
1722
  batch_job = client.batches.create(
1721
1723
  model="gemini-2.0-flash",
1722
- src="files/file_name",
1724
+ src="files/test-json",
1723
1725
  )
1724
1726
  ```
1725
1727