google-genai 1.6.0__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/types.py CHANGED
@@ -137,10 +137,9 @@ class State(_common.CaseInSensitiveEnum):
137
137
 
138
138
 
139
139
  class FinishReason(_common.CaseInSensitiveEnum):
140
- """Output only.
140
+ """Output only. The reason why the model stopped generating tokens.
141
141
 
142
- The reason why the model stopped generating tokens. If empty, the model has
143
- not stopped generating the tokens.
142
+ If empty, the model has not stopped generating the tokens.
144
143
  """
145
144
 
146
145
  FINISH_REASON_UNSPECIFIED = 'FINISH_REASON_UNSPECIFIED'
@@ -153,6 +152,7 @@ class FinishReason(_common.CaseInSensitiveEnum):
153
152
  PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
154
153
  SPII = 'SPII'
155
154
  MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL'
155
+ IMAGE_SAFETY = 'IMAGE_SAFETY'
156
156
 
157
157
 
158
158
  class HarmProbability(_common.CaseInSensitiveEnum):
@@ -185,6 +185,15 @@ class BlockedReason(_common.CaseInSensitiveEnum):
185
185
  PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'
186
186
 
187
187
 
188
+ class Modality(_common.CaseInSensitiveEnum):
189
+ """Server content modalities."""
190
+
191
+ MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED'
192
+ TEXT = 'TEXT'
193
+ IMAGE = 'IMAGE'
194
+ AUDIO = 'AUDIO'
195
+
196
+
188
197
  class DeploymentResourcesType(_common.CaseInSensitiveEnum):
189
198
  """"""
190
199
 
@@ -334,13 +343,15 @@ class FileSource(_common.CaseInSensitiveEnum):
334
343
  GENERATED = 'GENERATED'
335
344
 
336
345
 
337
- class Modality(_common.CaseInSensitiveEnum):
346
+ class MediaModality(_common.CaseInSensitiveEnum):
338
347
  """Server content modalities."""
339
348
 
340
349
  MODALITY_UNSPECIFIED = 'MODALITY_UNSPECIFIED'
341
350
  TEXT = 'TEXT'
342
351
  IMAGE = 'IMAGE'
352
+ VIDEO = 'VIDEO'
343
353
  AUDIO = 'AUDIO'
354
+ DOCUMENT = 'DOCUMENT'
344
355
 
345
356
 
346
357
  class VideoMetadata(_common.BaseModel):
@@ -623,6 +634,11 @@ class Part(_common.BaseModel):
623
634
 
624
635
  @classmethod
625
636
  def from_video_metadata(cls, *, start_offset: str, end_offset: str) -> 'Part':
637
+ logger.warning("""Part.from_video_metadata will be deprecated soon.
638
+ Because a Part instance needs to include at least one of the fields:
639
+ text, file_data, inline_data, function_call, function_response, executable_code or code_execution_result.
640
+ A Part instance contains only video_metadata is not a valid Part.
641
+ """)
626
642
  video_metadata = VideoMetadata(
627
643
  end_offset=end_offset, start_offset=start_offset
628
644
  )
@@ -832,9 +848,6 @@ class Schema(_common.BaseModel):
832
848
  default=None,
833
849
  description="""Optional. Maximum length of the Type.STRING""",
834
850
  )
835
- title: Optional[str] = Field(
836
- default=None, description="""Optional. The title of the Schema."""
837
- )
838
851
  min_length: Optional[int] = Field(
839
852
  default=None,
840
853
  description="""Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING""",
@@ -898,6 +911,9 @@ class Schema(_common.BaseModel):
898
911
  default=None,
899
912
  description="""Optional. Required properties of Type.OBJECT.""",
900
913
  )
914
+ title: Optional[str] = Field(
915
+ default=None, description="""Optional. The title of the Schema."""
916
+ )
901
917
  type: Optional[Type] = Field(
902
918
  default=None, description="""Optional. The type of the data."""
903
919
  )
@@ -921,9 +937,6 @@ class SchemaDict(TypedDict, total=False):
921
937
  max_length: Optional[int]
922
938
  """Optional. Maximum length of the Type.STRING"""
923
939
 
924
- title: Optional[str]
925
- """Optional. The title of the Schema."""
926
-
927
940
  min_length: Optional[int]
928
941
  """Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING"""
929
942
 
@@ -972,6 +985,9 @@ class SchemaDict(TypedDict, total=False):
972
985
  required: Optional[list[str]]
973
986
  """Optional. Required properties of Type.OBJECT."""
974
987
 
988
+ title: Optional[str]
989
+ """Optional. The title of the Schema."""
990
+
975
991
  type: Optional[Type]
976
992
  """Optional. The type of the data."""
977
993
 
@@ -1210,25 +1226,34 @@ GoogleSearchRetrievalOrDict = Union[
1210
1226
 
1211
1227
 
1212
1228
  class VertexAISearch(_common.BaseModel):
1213
- """Retrieve from Vertex AI Search datastore for grounding.
1229
+ """Retrieve from Vertex AI Search datastore or engine for grounding.
1214
1230
 
1215
- See https://cloud.google.com/products/agent-builder
1231
+ datastore and engine are mutually exclusive. See
1232
+ https://cloud.google.com/products/agent-builder
1216
1233
  """
1217
1234
 
1218
1235
  datastore: Optional[str] = Field(
1219
1236
  default=None,
1220
- description="""Required. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`""",
1237
+ description="""Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`""",
1238
+ )
1239
+ engine: Optional[str] = Field(
1240
+ default=None,
1241
+ description="""Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}`""",
1221
1242
  )
1222
1243
 
1223
1244
 
1224
1245
  class VertexAISearchDict(TypedDict, total=False):
1225
- """Retrieve from Vertex AI Search datastore for grounding.
1246
+ """Retrieve from Vertex AI Search datastore or engine for grounding.
1226
1247
 
1227
- See https://cloud.google.com/products/agent-builder
1248
+ datastore and engine are mutually exclusive. See
1249
+ https://cloud.google.com/products/agent-builder
1228
1250
  """
1229
1251
 
1230
1252
  datastore: Optional[str]
1231
- """Required. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`"""
1253
+ """Optional. Fully-qualified Vertex AI Search data store resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`"""
1254
+
1255
+ engine: Optional[str]
1256
+ """Optional. Fully-qualified Vertex AI Search engine resource ID. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}`"""
1232
1257
 
1233
1258
 
1234
1259
  VertexAISearchOrDict = Union[VertexAISearch, VertexAISearchDict]
@@ -2740,13 +2765,15 @@ class Candidate(_common.BaseModel):
2740
2765
  description="""Number of tokens for this candidate.
2741
2766
  """,
2742
2767
  )
2743
- avg_logprobs: Optional[float] = Field(
2768
+ finish_reason: Optional[FinishReason] = Field(
2744
2769
  default=None,
2745
- description="""Output only. Average log probability score of the candidate.""",
2770
+ description="""The reason why the model stopped generating tokens.
2771
+ If empty, the model has not stopped generating the tokens.
2772
+ """,
2746
2773
  )
2747
- finish_reason: Optional[FinishReason] = Field(
2774
+ avg_logprobs: Optional[float] = Field(
2748
2775
  default=None,
2749
- description="""Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens.""",
2776
+ description="""Output only. Average log probability score of the candidate.""",
2750
2777
  )
2751
2778
  grounding_metadata: Optional[GroundingMetadata] = Field(
2752
2779
  default=None,
@@ -2784,12 +2811,14 @@ class CandidateDict(TypedDict, total=False):
2784
2811
  """Number of tokens for this candidate.
2785
2812
  """
2786
2813
 
2814
+ finish_reason: Optional[FinishReason]
2815
+ """The reason why the model stopped generating tokens.
2816
+ If empty, the model has not stopped generating the tokens.
2817
+ """
2818
+
2787
2819
  avg_logprobs: Optional[float]
2788
2820
  """Output only. Average log probability score of the candidate."""
2789
2821
 
2790
- finish_reason: Optional[FinishReason]
2791
- """Output only. The reason why the model stopped generating tokens. If empty, the model has not stopped generating the tokens."""
2792
-
2793
2822
  grounding_metadata: Optional[GroundingMetadataDict]
2794
2823
  """Output only. Metadata specifies sources used to ground generated content."""
2795
2824
 
@@ -2840,9 +2869,38 @@ GenerateContentResponsePromptFeedbackOrDict = Union[
2840
2869
  ]
2841
2870
 
2842
2871
 
2872
+ class ModalityTokenCount(_common.BaseModel):
2873
+ """Represents token counting info for a single modality."""
2874
+
2875
+ modality: Optional[MediaModality] = Field(
2876
+ default=None,
2877
+ description="""The modality associated with this token count.""",
2878
+ )
2879
+ token_count: Optional[int] = Field(
2880
+ default=None, description="""Number of tokens."""
2881
+ )
2882
+
2883
+
2884
+ class ModalityTokenCountDict(TypedDict, total=False):
2885
+ """Represents token counting info for a single modality."""
2886
+
2887
+ modality: Optional[MediaModality]
2888
+ """The modality associated with this token count."""
2889
+
2890
+ token_count: Optional[int]
2891
+ """Number of tokens."""
2892
+
2893
+
2894
+ ModalityTokenCountOrDict = Union[ModalityTokenCount, ModalityTokenCountDict]
2895
+
2896
+
2843
2897
  class GenerateContentResponseUsageMetadata(_common.BaseModel):
2844
2898
  """Usage metadata about response(s)."""
2845
2899
 
2900
+ cache_tokens_details: Optional[list[ModalityTokenCount]] = Field(
2901
+ default=None,
2902
+ description="""Output only. List of modalities of the cached content in the request input.""",
2903
+ )
2846
2904
  cached_content_token_count: Optional[int] = Field(
2847
2905
  default=None,
2848
2906
  description="""Output only. Number of tokens in the cached part in the input (the cached content).""",
@@ -2850,30 +2908,68 @@ class GenerateContentResponseUsageMetadata(_common.BaseModel):
2850
2908
  candidates_token_count: Optional[int] = Field(
2851
2909
  default=None, description="""Number of tokens in the response(s)."""
2852
2910
  )
2911
+ candidates_tokens_details: Optional[list[ModalityTokenCount]] = Field(
2912
+ default=None,
2913
+ description="""Output only. List of modalities that were returned in the response.""",
2914
+ )
2853
2915
  prompt_token_count: Optional[int] = Field(
2854
2916
  default=None,
2855
2917
  description="""Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content.""",
2856
2918
  )
2919
+ prompt_tokens_details: Optional[list[ModalityTokenCount]] = Field(
2920
+ default=None,
2921
+ description="""Output only. List of modalities that were processed in the request input.""",
2922
+ )
2923
+ thoughts_token_count: Optional[int] = Field(
2924
+ default=None,
2925
+ description="""Output only. Number of tokens present in thoughts output.""",
2926
+ )
2927
+ tool_use_prompt_token_count: Optional[int] = Field(
2928
+ default=None,
2929
+ description="""Output only. Number of tokens present in tool-use prompt(s).""",
2930
+ )
2931
+ tool_use_prompt_tokens_details: Optional[list[ModalityTokenCount]] = Field(
2932
+ default=None,
2933
+ description="""Output only. List of modalities that were processed for tool-use request inputs.""",
2934
+ )
2857
2935
  total_token_count: Optional[int] = Field(
2858
2936
  default=None,
2859
- description="""Total token count for prompt and response candidates.""",
2937
+ description="""Total token count for prompt, response candidates, and tool-use prompts (if present).""",
2860
2938
  )
2861
2939
 
2862
2940
 
2863
2941
  class GenerateContentResponseUsageMetadataDict(TypedDict, total=False):
2864
2942
  """Usage metadata about response(s)."""
2865
2943
 
2944
+ cache_tokens_details: Optional[list[ModalityTokenCountDict]]
2945
+ """Output only. List of modalities of the cached content in the request input."""
2946
+
2866
2947
  cached_content_token_count: Optional[int]
2867
2948
  """Output only. Number of tokens in the cached part in the input (the cached content)."""
2868
2949
 
2869
2950
  candidates_token_count: Optional[int]
2870
2951
  """Number of tokens in the response(s)."""
2871
2952
 
2953
+ candidates_tokens_details: Optional[list[ModalityTokenCountDict]]
2954
+ """Output only. List of modalities that were returned in the response."""
2955
+
2872
2956
  prompt_token_count: Optional[int]
2873
2957
  """Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content."""
2874
2958
 
2959
+ prompt_tokens_details: Optional[list[ModalityTokenCountDict]]
2960
+ """Output only. List of modalities that were processed in the request input."""
2961
+
2962
+ thoughts_token_count: Optional[int]
2963
+ """Output only. Number of tokens present in thoughts output."""
2964
+
2965
+ tool_use_prompt_token_count: Optional[int]
2966
+ """Output only. Number of tokens present in tool-use prompt(s)."""
2967
+
2968
+ tool_use_prompt_tokens_details: Optional[list[ModalityTokenCountDict]]
2969
+ """Output only. List of modalities that were processed for tool-use request inputs."""
2970
+
2875
2971
  total_token_count: Optional[int]
2876
- """Total token count for prompt and response candidates."""
2972
+ """Total token count for prompt, response candidates, and tool-use prompts (if present)."""
2877
2973
 
2878
2974
 
2879
2975
  GenerateContentResponseUsageMetadataOrDict = Union[
@@ -2914,12 +3010,14 @@ class GenerateContentResponse(_common.BaseModel):
2914
3010
  automatic_function_calling_history: Optional[list[Content]] = None
2915
3011
  parsed: Optional[Union[pydantic.BaseModel, dict, Enum]] = Field(
2916
3012
  default=None,
2917
- description="""Parsed response if response_schema is provided. Not available for streaming.""",
3013
+ description="""First candidate from the parsed response if response_schema is provided. Not available for streaming.""",
2918
3014
  )
2919
3015
 
2920
- @property
2921
- def text(self) -> Optional[str]:
2922
- """Returns the concatenation of all text parts in the response."""
3016
+ def _get_text(self, warn_property: str = 'text') -> Optional[str]:
3017
+ """Returns the concatenation of all text parts in the response.
3018
+
3019
+ This is an internal method that allows customizing the warning message.
3020
+ """
2923
3021
  if (
2924
3022
  not self.candidates
2925
3023
  or not self.candidates[0].content
@@ -2928,9 +3026,10 @@ class GenerateContentResponse(_common.BaseModel):
2928
3026
  return None
2929
3027
  if len(self.candidates) > 1:
2930
3028
  logger.warning(
2931
- f'there are {len(self.candidates)} candidates, returning text from'
2932
- ' the first candidate.Access response.candidates directly to get'
2933
- ' text from other candidates.'
3029
+ f'there are {len(self.candidates)} candidates, returning'
3030
+ f' {warn_property} result from the first candidate. Access'
3031
+ ' response.candidates directly to get the result from other'
3032
+ ' candidates.'
2934
3033
  )
2935
3034
  text = ''
2936
3035
  any_text_part_text = False
@@ -2949,12 +3048,18 @@ class GenerateContentResponse(_common.BaseModel):
2949
3048
  if non_text_parts:
2950
3049
  logger.warning(
2951
3050
  'Warning: there are non-text parts in the response:'
2952
- f' {non_text_parts},returning concatenated text from text parts,check'
2953
- ' out the non text parts for full response from model.'
3051
+ f' {non_text_parts},returning concatenated {warn_property} result'
3052
+ ' from text parts,check out the non text parts for full response'
3053
+ ' from model.'
2954
3054
  )
2955
3055
  # part.text == '' is different from part.text is None
2956
3056
  return text if any_text_part_text else None
2957
3057
 
3058
+ @property
3059
+ def text(self) -> Optional[str]:
3060
+ """Returns the concatenation of all text parts in the response."""
3061
+ return self._get_text(warn_property='text')
3062
+
2958
3063
  @property
2959
3064
  def function_calls(self) -> Optional[list[FunctionCall]]:
2960
3065
  """Returns the list of function calls in the response."""
@@ -3037,16 +3142,23 @@ class GenerateContentResponse(_common.BaseModel):
3037
3142
  ):
3038
3143
  # Pydantic schema.
3039
3144
  try:
3040
- if result.text is not None:
3041
- result.parsed = response_schema.model_validate_json(result.text)
3145
+ result_text = result._get_text(warn_property='parsed')
3146
+ if result_text is not None:
3147
+ result.parsed = response_schema.model_validate_json(result_text)
3042
3148
  # may not be a valid json per stream response
3043
3149
  except pydantic.ValidationError:
3044
3150
  pass
3045
3151
  except json.decoder.JSONDecodeError:
3046
3152
  pass
3047
- elif isinstance(response_schema, EnumMeta) and result.text is not None:
3153
+ elif (
3154
+ isinstance(response_schema, EnumMeta)
3155
+ and result._get_text(warn_property='parsed') is not None
3156
+ ):
3048
3157
  # Enum with "application/json" returns response in double quotes.
3049
- enum_value = result.text.replace('"', '')
3158
+ result_text = result._get_text(warn_property='parsed')
3159
+ if result_text is None:
3160
+ raise ValueError('Response is empty.')
3161
+ enum_value = result_text.replace('"', '')
3050
3162
  try:
3051
3163
  result.parsed = response_schema(enum_value)
3052
3164
  if (
@@ -3064,8 +3176,9 @@ class GenerateContentResponse(_common.BaseModel):
3064
3176
  placeholder: response_schema # type: ignore[valid-type]
3065
3177
 
3066
3178
  try:
3067
- if result.text is not None:
3068
- parsed = {'placeholder': json.loads(result.text)}
3179
+ result_text = result._get_text(warn_property='parsed')
3180
+ if result_text is not None:
3181
+ parsed = {'placeholder': json.loads(result_text)}
3069
3182
  placeholder = Placeholder.model_validate(parsed)
3070
3183
  result.parsed = placeholder.placeholder
3071
3184
  except json.decoder.JSONDecodeError:
@@ -3080,8 +3193,9 @@ class GenerateContentResponse(_common.BaseModel):
3080
3193
  # want the result converted to. So just return json.
3081
3194
  # JSON schema.
3082
3195
  try:
3083
- if result.text is not None:
3084
- result.parsed = json.loads(result.text)
3196
+ result_text = result._get_text(warn_property='parsed')
3197
+ if result_text is not None:
3198
+ result.parsed = json.loads(result_text)
3085
3199
  # may not be a valid json per stream response
3086
3200
  except json.decoder.JSONDecodeError:
3087
3201
  pass
@@ -3091,12 +3205,13 @@ class GenerateContentResponse(_common.BaseModel):
3091
3205
  for union_type in union_types:
3092
3206
  if issubclass(union_type, pydantic.BaseModel):
3093
3207
  try:
3094
- if result.text is not None:
3208
+ result_text = result._get_text(warn_property='parsed')
3209
+ if result_text is not None:
3095
3210
 
3096
3211
  class Placeholder(pydantic.BaseModel): # type: ignore[no-redef]
3097
3212
  placeholder: response_schema # type: ignore[valid-type]
3098
3213
 
3099
- parsed = {'placeholder': json.loads(result.text)}
3214
+ parsed = {'placeholder': json.loads(result_text)}
3100
3215
  placeholder = Placeholder.model_validate(parsed)
3101
3216
  result.parsed = placeholder.placeholder
3102
3217
  except json.decoder.JSONDecodeError:
@@ -3105,8 +3220,9 @@ class GenerateContentResponse(_common.BaseModel):
3105
3220
  pass
3106
3221
  else:
3107
3222
  try:
3108
- if result.text is not None:
3109
- result.parsed = json.loads(result.text)
3223
+ result_text = result._get_text(warn_property='parsed')
3224
+ if result_text is not None:
3225
+ result.parsed = json.loads(result_text)
3110
3226
  # may not be a valid json per stream response
3111
3227
  except json.decoder.JSONDecodeError:
3112
3228
  pass
@@ -3436,7 +3552,8 @@ class GenerateImagesConfig(_common.BaseModel):
3436
3552
  )
3437
3553
  include_safety_attributes: Optional[bool] = Field(
3438
3554
  default=None,
3439
- description="""Whether to report the safety scores of each image in the response.
3555
+ description="""Whether to report the safety scores of each generated image and
3556
+ the positive prompt in the response.
3440
3557
  """,
3441
3558
  )
3442
3559
  include_rai_reason: Optional[bool] = Field(
@@ -3515,7 +3632,8 @@ class GenerateImagesConfigDict(TypedDict, total=False):
3515
3632
  """
3516
3633
 
3517
3634
  include_safety_attributes: Optional[bool]
3518
- """Whether to report the safety scores of each image in the response.
3635
+ """Whether to report the safety scores of each generated image and
3636
+ the positive prompt in the response.
3519
3637
  """
3520
3638
 
3521
3639
  include_rai_reason: Optional[bool]
@@ -3761,6 +3879,11 @@ class SafetyAttributes(_common.BaseModel):
3761
3879
  description="""List of scores of each categories.
3762
3880
  """,
3763
3881
  )
3882
+ content_type: Optional[str] = Field(
3883
+ default=None,
3884
+ description="""Internal use only.
3885
+ """,
3886
+ )
3764
3887
 
3765
3888
 
3766
3889
  class SafetyAttributesDict(TypedDict, total=False):
@@ -3774,6 +3897,10 @@ class SafetyAttributesDict(TypedDict, total=False):
3774
3897
  """List of scores of each categories.
3775
3898
  """
3776
3899
 
3900
+ content_type: Optional[str]
3901
+ """Internal use only.
3902
+ """
3903
+
3777
3904
 
3778
3905
  SafetyAttributesOrDict = Union[SafetyAttributes, SafetyAttributesDict]
3779
3906
 
@@ -3840,6 +3967,12 @@ class GenerateImagesResponse(_common.BaseModel):
3840
3967
  description="""List of generated images.
3841
3968
  """,
3842
3969
  )
3970
+ positive_prompt_safety_attributes: Optional[SafetyAttributes] = Field(
3971
+ default=None,
3972
+ description="""Safety attributes of the positive prompt. Only populated if
3973
+ ``include_safety_attributes`` is set to True.
3974
+ """,
3975
+ )
3843
3976
 
3844
3977
 
3845
3978
  class GenerateImagesResponseDict(TypedDict, total=False):
@@ -3849,6 +3982,11 @@ class GenerateImagesResponseDict(TypedDict, total=False):
3849
3982
  """List of generated images.
3850
3983
  """
3851
3984
 
3985
+ positive_prompt_safety_attributes: Optional[SafetyAttributesDict]
3986
+ """Safety attributes of the positive prompt. Only populated if
3987
+ ``include_safety_attributes`` is set to True.
3988
+ """
3989
+
3852
3990
 
3853
3991
  GenerateImagesResponseOrDict = Union[
3854
3992
  GenerateImagesResponse, GenerateImagesResponseDict
@@ -4085,7 +4223,8 @@ class EditImageConfig(_common.BaseModel):
4085
4223
  )
4086
4224
  include_safety_attributes: Optional[bool] = Field(
4087
4225
  default=None,
4088
- description="""Whether to report the safety scores of each image in the response.
4226
+ description="""Whether to report the safety scores of each generated image and
4227
+ the positive prompt in the response.
4089
4228
  """,
4090
4229
  )
4091
4230
  include_rai_reason: Optional[bool] = Field(
@@ -4163,7 +4302,8 @@ class EditImageConfigDict(TypedDict, total=False):
4163
4302
  """
4164
4303
 
4165
4304
  include_safety_attributes: Optional[bool]
4166
- """Whether to report the safety scores of each image in the response.
4305
+ """Whether to report the safety scores of each generated image and
4306
+ the positive prompt in the response.
4167
4307
  """
4168
4308
 
4169
4309
  include_rai_reason: Optional[bool]
@@ -5313,14 +5453,7 @@ GenerateVideosResponseOrDict = Union[
5313
5453
 
5314
5454
 
5315
5455
  class GenerateVideosOperation(_common.BaseModel):
5316
- """A video generation operation.
5317
-
5318
- Use the following code to refresh the operation:
5319
-
5320
- ```
5321
- operation = client.operations.get(operation)
5322
- ```
5323
- """
5456
+ """A video generation operation."""
5324
5457
 
5325
5458
  name: Optional[str] = Field(
5326
5459
  default=None,
@@ -5348,14 +5481,7 @@ class GenerateVideosOperation(_common.BaseModel):
5348
5481
 
5349
5482
 
5350
5483
  class GenerateVideosOperationDict(TypedDict, total=False):
5351
- """A video generation operation.
5352
-
5353
- Use the following code to refresh the operation:
5354
-
5355
- ```
5356
- operation = client.operations.get(operation)
5357
- ```
5358
- """
5484
+ """A video generation operation."""
5359
5485
 
5360
5486
  name: Optional[str]
5361
5487
  """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."""
@@ -6603,11 +6729,11 @@ class CreateCachedContentConfig(_common.BaseModel):
6603
6729
  )
6604
6730
  ttl: Optional[str] = Field(
6605
6731
  default=None,
6606
- description="""The TTL for this resource. The expiration time is computed: now + TTL.""",
6732
+ description="""The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s".""",
6607
6733
  )
6608
6734
  expire_time: Optional[datetime.datetime] = Field(
6609
6735
  default=None,
6610
- description="""Timestamp of when this resource is considered expired.""",
6736
+ description="""Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z.""",
6611
6737
  )
6612
6738
  display_name: Optional[str] = Field(
6613
6739
  default=None,
@@ -6643,10 +6769,10 @@ class CreateCachedContentConfigDict(TypedDict, total=False):
6643
6769
  """Used to override HTTP request options."""
6644
6770
 
6645
6771
  ttl: Optional[str]
6646
- """The TTL for this resource. The expiration time is computed: now + TTL."""
6772
+ """The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s"."""
6647
6773
 
6648
6774
  expire_time: Optional[datetime.datetime]
6649
- """Timestamp of when this resource is considered expired."""
6775
+ """Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z."""
6650
6776
 
6651
6777
  display_name: Optional[str]
6652
6778
  """The user-generated meaningful display name of the cached content.
@@ -6937,11 +7063,11 @@ class UpdateCachedContentConfig(_common.BaseModel):
6937
7063
  )
6938
7064
  ttl: Optional[str] = Field(
6939
7065
  default=None,
6940
- description="""The TTL for this resource. The expiration time is computed: now + TTL.""",
7066
+ description="""The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s".""",
6941
7067
  )
6942
7068
  expire_time: Optional[datetime.datetime] = Field(
6943
7069
  default=None,
6944
- description="""Timestamp of when this resource is considered expired.""",
7070
+ description="""Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z.""",
6945
7071
  )
6946
7072
 
6947
7073
 
@@ -6952,10 +7078,10 @@ class UpdateCachedContentConfigDict(TypedDict, total=False):
6952
7078
  """Used to override HTTP request options."""
6953
7079
 
6954
7080
  ttl: Optional[str]
6955
- """The TTL for this resource. The expiration time is computed: now + TTL."""
7081
+ """The TTL for this resource. The expiration time is computed: now + TTL. It is a duration string, with up to nine fractional digits, terminated by 's'. Example: "3.5s"."""
6956
7082
 
6957
7083
  expire_time: Optional[datetime.datetime]
6958
- """Timestamp of when this resource is considered expired."""
7084
+ """Timestamp of when this resource is considered expired. Uses RFC 3339 format, Example: 2014-10-02T15:01:23Z."""
6959
7085
 
6960
7086
 
6961
7087
  UpdateCachedContentConfigOrDict = Union[
@@ -7985,6 +8111,10 @@ class TestTableItem(_common.BaseModel):
7985
8111
  default=None,
7986
8112
  description="""When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource.""",
7987
8113
  )
8114
+ ignore_keys: Optional[list[str]] = Field(
8115
+ default=None,
8116
+ description="""Keys to ignore when comparing the request and response. This is useful for tests that are not deterministic.""",
8117
+ )
7988
8118
 
7989
8119
 
7990
8120
  class TestTableItemDict(TypedDict, total=False):
@@ -8010,6 +8140,9 @@ class TestTableItemDict(TypedDict, total=False):
8010
8140
  skip_in_api_mode: Optional[str]
8011
8141
  """When set to a reason string, this test will be skipped in the API mode. Use this flag for tests that can not be reproduced with the real API. E.g. a test that deletes a resource."""
8012
8142
 
8143
+ ignore_keys: Optional[list[str]]
8144
+ """Keys to ignore when comparing the request and response. This is useful for tests that are not deterministic."""
8145
+
8013
8146
 
8014
8147
  TestTableItemOrDict = Union[TestTableItem, TestTableItemDict]
8015
8148
 
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.6.0' # x-release-please-version
16
+ __version__ = '1.8.0' # x-release-please-version