google-genai 1.26.0__py3-none-any.whl → 1.27.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/batches.py CHANGED
@@ -1622,6 +1622,11 @@ def _ListBatchJobsResponse_from_mldev(
1622
1622
  parent_object: Optional[dict[str, Any]] = None,
1623
1623
  ) -> dict[str, Any]:
1624
1624
  to_object: dict[str, Any] = {}
1625
+ if getv(from_object, ['sdkHttpResponse']) is not None:
1626
+ setv(
1627
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
1628
+ )
1629
+
1625
1630
  if getv(from_object, ['nextPageToken']) is not None:
1626
1631
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
1627
1632
 
@@ -1784,6 +1789,11 @@ def _ListBatchJobsResponse_from_vertex(
1784
1789
  parent_object: Optional[dict[str, Any]] = None,
1785
1790
  ) -> dict[str, Any]:
1786
1791
  to_object: dict[str, Any] = {}
1792
+ if getv(from_object, ['sdkHttpResponse']) is not None:
1793
+ setv(
1794
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
1795
+ )
1796
+
1787
1797
  if getv(from_object, ['nextPageToken']) is not None:
1788
1798
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
1789
1799
 
@@ -2091,7 +2101,9 @@ class Batches(_api_module.BaseModule):
2091
2101
  return_value = types.ListBatchJobsResponse._from_response(
2092
2102
  response=response_dict, kwargs=parameter_model.model_dump()
2093
2103
  )
2094
-
2104
+ return_value.sdk_http_response = types.HttpResponse(
2105
+ headers=response.headers
2106
+ )
2095
2107
  self._api_client._verify_response(return_value)
2096
2108
  return return_value
2097
2109
 
@@ -2522,7 +2534,9 @@ class AsyncBatches(_api_module.BaseModule):
2522
2534
  return_value = types.ListBatchJobsResponse._from_response(
2523
2535
  response=response_dict, kwargs=parameter_model.model_dump()
2524
2536
  )
2525
-
2537
+ return_value.sdk_http_response = types.HttpResponse(
2538
+ headers=response.headers
2539
+ )
2526
2540
  self._api_client._verify_response(return_value)
2527
2541
  return return_value
2528
2542
 
google/genai/caches.py CHANGED
@@ -1327,6 +1327,11 @@ def _ListCachedContentsResponse_from_mldev(
1327
1327
  parent_object: Optional[dict[str, Any]] = None,
1328
1328
  ) -> dict[str, Any]:
1329
1329
  to_object: dict[str, Any] = {}
1330
+ if getv(from_object, ['sdkHttpResponse']) is not None:
1331
+ setv(
1332
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
1333
+ )
1334
+
1330
1335
  if getv(from_object, ['nextPageToken']) is not None:
1331
1336
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
1332
1337
 
@@ -1386,6 +1391,11 @@ def _ListCachedContentsResponse_from_vertex(
1386
1391
  parent_object: Optional[dict[str, Any]] = None,
1387
1392
  ) -> dict[str, Any]:
1388
1393
  to_object: dict[str, Any] = {}
1394
+ if getv(from_object, ['sdkHttpResponse']) is not None:
1395
+ setv(
1396
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
1397
+ )
1398
+
1389
1399
  if getv(from_object, ['nextPageToken']) is not None:
1390
1400
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
1391
1401
 
@@ -1773,7 +1783,9 @@ class Caches(_api_module.BaseModule):
1773
1783
  return_value = types.ListCachedContentsResponse._from_response(
1774
1784
  response=response_dict, kwargs=parameter_model.model_dump()
1775
1785
  )
1776
-
1786
+ return_value.sdk_http_response = types.HttpResponse(
1787
+ headers=response.headers
1788
+ )
1777
1789
  self._api_client._verify_response(return_value)
1778
1790
  return return_value
1779
1791
 
@@ -2165,7 +2177,9 @@ class AsyncCaches(_api_module.BaseModule):
2165
2177
  return_value = types.ListCachedContentsResponse._from_response(
2166
2178
  response=response_dict, kwargs=parameter_model.model_dump()
2167
2179
  )
2168
-
2180
+ return_value.sdk_http_response = types.HttpResponse(
2181
+ headers=response.headers
2182
+ )
2169
2183
  self._api_client._verify_response(return_value)
2170
2184
  return return_value
2171
2185
 
google/genai/files.py CHANGED
@@ -267,6 +267,11 @@ def _ListFilesResponse_from_mldev(
267
267
  parent_object: Optional[dict[str, Any]] = None,
268
268
  ) -> dict[str, Any]:
269
269
  to_object: dict[str, Any] = {}
270
+ if getv(from_object, ['sdkHttpResponse']) is not None:
271
+ setv(
272
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
273
+ )
274
+
270
275
  if getv(from_object, ['nextPageToken']) is not None:
271
276
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
272
277
 
@@ -370,7 +375,9 @@ class Files(_api_module.BaseModule):
370
375
  return_value = types.ListFilesResponse._from_response(
371
376
  response=response_dict, kwargs=parameter_model.model_dump()
372
377
  )
373
-
378
+ return_value.sdk_http_response = types.HttpResponse(
379
+ headers=response.headers
380
+ )
374
381
  self._api_client._verify_response(return_value)
375
382
  return return_value
376
383
 
@@ -848,7 +855,9 @@ class AsyncFiles(_api_module.BaseModule):
848
855
  return_value = types.ListFilesResponse._from_response(
849
856
  response=response_dict, kwargs=parameter_model.model_dump()
850
857
  )
851
-
858
+ return_value.sdk_http_response = types.HttpResponse(
859
+ headers=response.headers
860
+ )
852
861
  self._api_client._verify_response(return_value)
853
862
  return return_value
854
863
 
google/genai/models.py CHANGED
@@ -996,6 +996,9 @@ def _GenerateImagesConfig_to_mldev(
996
996
  if getv(from_object, ['add_watermark']) is not None:
997
997
  raise ValueError('add_watermark parameter is not supported in Gemini API.')
998
998
 
999
+ if getv(from_object, ['image_size']) is not None:
1000
+ raise ValueError('image_size parameter is not supported in Gemini API.')
1001
+
999
1002
  if getv(from_object, ['enhance_prompt']) is not None:
1000
1003
  raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
1001
1004
 
@@ -1239,6 +1242,15 @@ def _Image_to_mldev(
1239
1242
  return to_object
1240
1243
 
1241
1244
 
1245
+ def _GenerateVideosSource_to_mldev(
1246
+ from_object: Union[dict[str, Any], object],
1247
+ parent_object: Optional[dict[str, Any]] = None,
1248
+ ) -> dict[str, Any]:
1249
+ to_object: dict[str, Any] = {}
1250
+
1251
+ return to_object
1252
+
1253
+
1242
1254
  def _GenerateVideosConfig_to_mldev(
1243
1255
  from_object: Union[dict[str, Any], object],
1244
1256
  parent_object: Optional[dict[str, Any]] = None,
@@ -2388,6 +2400,13 @@ def _GenerateImagesConfig_to_vertex(
2388
2400
  getv(from_object, ['add_watermark']),
2389
2401
  )
2390
2402
 
2403
+ if getv(from_object, ['image_size']) is not None:
2404
+ setv(
2405
+ parent_object,
2406
+ ['parameters', 'sampleImageSize'],
2407
+ getv(from_object, ['image_size']),
2408
+ )
2409
+
2391
2410
  if getv(from_object, ['enhance_prompt']) is not None:
2392
2411
  setv(
2393
2412
  parent_object,
@@ -3071,6 +3090,15 @@ def _Video_to_vertex(
3071
3090
  return to_object
3072
3091
 
3073
3092
 
3093
+ def _GenerateVideosSource_to_vertex(
3094
+ from_object: Union[dict[str, Any], object],
3095
+ parent_object: Optional[dict[str, Any]] = None,
3096
+ ) -> dict[str, Any]:
3097
+ to_object: dict[str, Any] = {}
3098
+
3099
+ return to_object
3100
+
3101
+
3074
3102
  def _GenerateVideosConfig_to_vertex(
3075
3103
  from_object: Union[dict[str, Any], object],
3076
3104
  parent_object: Optional[dict[str, Any]] = None,
@@ -3709,6 +3737,11 @@ def _ListModelsResponse_from_mldev(
3709
3737
  parent_object: Optional[dict[str, Any]] = None,
3710
3738
  ) -> dict[str, Any]:
3711
3739
  to_object: dict[str, Any] = {}
3740
+ if getv(from_object, ['sdkHttpResponse']) is not None:
3741
+ setv(
3742
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
3743
+ )
3744
+
3712
3745
  if getv(from_object, ['nextPageToken']) is not None:
3713
3746
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
3714
3747
 
@@ -4471,6 +4504,11 @@ def _ListModelsResponse_from_vertex(
4471
4504
  parent_object: Optional[dict[str, Any]] = None,
4472
4505
  ) -> dict[str, Any]:
4473
4506
  to_object: dict[str, Any] = {}
4507
+ if getv(from_object, ['sdkHttpResponse']) is not None:
4508
+ setv(
4509
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
4510
+ )
4511
+
4474
4512
  if getv(from_object, ['nextPageToken']) is not None:
4475
4513
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
4476
4514
 
@@ -5233,7 +5271,9 @@ class Models(_api_module.BaseModule):
5233
5271
  return_value = types.ListModelsResponse._from_response(
5234
5272
  response=response_dict, kwargs=parameter_model.model_dump()
5235
5273
  )
5236
-
5274
+ return_value.sdk_http_response = types.HttpResponse(
5275
+ headers=response.headers
5276
+ )
5237
5277
  self._api_client._verify_response(return_value)
5238
5278
  return return_value
5239
5279
 
@@ -5539,7 +5579,7 @@ class Models(_api_module.BaseModule):
5539
5579
  self._api_client._verify_response(return_value)
5540
5580
  return return_value
5541
5581
 
5542
- def generate_videos(
5582
+ def _generate_videos(
5543
5583
  self,
5544
5584
  *,
5545
5585
  model: str,
@@ -5560,7 +5600,7 @@ class Models(_api_module.BaseModule):
5560
5600
  Args:
5561
5601
  model: The model to use.
5562
5602
  prompt: The text prompt for generating the videos. Optional for image to
5563
- video use cases.
5603
+ video and video extension use cases.
5564
5604
  image: The input image for generating the videos. Optional if prompt is
5565
5605
  provided.
5566
5606
  video: The input video for video extension use cases. Optional if prompt
@@ -6129,6 +6169,56 @@ class Models(_api_module.BaseModule):
6129
6169
  config=api_config,
6130
6170
  )
6131
6171
 
6172
+ def generate_videos(
6173
+ self,
6174
+ *,
6175
+ model: str,
6176
+ prompt: Optional[str] = None,
6177
+ image: Optional[types.ImageOrDict] = None,
6178
+ video: Optional[types.VideoOrDict] = None,
6179
+ config: Optional[types.GenerateVideosConfigOrDict] = None,
6180
+ ) -> types.GenerateVideosOperation:
6181
+ """Generates videos based on an input (text, image, or video) and configuration.
6182
+
6183
+ The following use cases are supported:
6184
+ 1. Text to video generation.
6185
+ 2a. Image to video generation (additional text prompt is optional).
6186
+ 2b. Image to video generation with frame interpolation (specify last_frame
6187
+ in config).
6188
+ 3. Video extension (additional text prompt is optional)
6189
+
6190
+ Args:
6191
+ model: The model to use.
6192
+ prompt: The text prompt for generating the videos. Optional for image to
6193
+ video and video extension use cases.
6194
+ image: The input image for generating the videos. Optional if prompt is
6195
+ provided.
6196
+ video: The input video for video extension use cases. Optional if prompt
6197
+ or image is provided.
6198
+ config: Configuration for generation.
6199
+
6200
+ Usage:
6201
+
6202
+ ```
6203
+ operation = client.models.generate_videos(
6204
+ model="veo-2.0-generate-001",
6205
+ prompt="A neon hologram of a cat driving at top speed",
6206
+ )
6207
+ while not operation.done:
6208
+ time.sleep(10)
6209
+ operation = client.operations.get(operation)
6210
+
6211
+ operation.result.generated_videos[0].video.uri
6212
+ ```
6213
+ """
6214
+ return self._generate_videos(
6215
+ model=model,
6216
+ prompt=prompt,
6217
+ image=image,
6218
+ video=video,
6219
+ config=config,
6220
+ )
6221
+
6132
6222
  def list(
6133
6223
  self,
6134
6224
  *,
@@ -6798,7 +6888,9 @@ class AsyncModels(_api_module.BaseModule):
6798
6888
  return_value = types.ListModelsResponse._from_response(
6799
6889
  response=response_dict, kwargs=parameter_model.model_dump()
6800
6890
  )
6801
-
6891
+ return_value.sdk_http_response = types.HttpResponse(
6892
+ headers=response.headers
6893
+ )
6802
6894
  self._api_client._verify_response(return_value)
6803
6895
  return return_value
6804
6896
 
@@ -7103,7 +7195,7 @@ class AsyncModels(_api_module.BaseModule):
7103
7195
  self._api_client._verify_response(return_value)
7104
7196
  return return_value
7105
7197
 
7106
- async def generate_videos(
7198
+ async def _generate_videos(
7107
7199
  self,
7108
7200
  *,
7109
7201
  model: str,
@@ -7124,7 +7216,7 @@ class AsyncModels(_api_module.BaseModule):
7124
7216
  Args:
7125
7217
  model: The model to use.
7126
7218
  prompt: The text prompt for generating the videos. Optional for image to
7127
- video use cases.
7219
+ video and video extension use cases.
7128
7220
  image: The input image for generating the videos. Optional if prompt is
7129
7221
  provided.
7130
7222
  video: The input video for video extension use cases. Optional if prompt
@@ -7724,3 +7816,53 @@ class AsyncModels(_api_module.BaseModule):
7724
7816
  upscale_factor=upscale_factor,
7725
7817
  config=api_config,
7726
7818
  )
7819
+
7820
+ async def generate_videos(
7821
+ self,
7822
+ *,
7823
+ model: str,
7824
+ prompt: Optional[str] = None,
7825
+ image: Optional[types.ImageOrDict] = None,
7826
+ video: Optional[types.VideoOrDict] = None,
7827
+ config: Optional[types.GenerateVideosConfigOrDict] = None,
7828
+ ) -> types.GenerateVideosOperation:
7829
+ """Generates videos based on an input (text, image, or video) and configuration.
7830
+
7831
+ The following use cases are supported:
7832
+ 1. Text to video generation.
7833
+ 2a. Image to video generation (additional text prompt is optional).
7834
+ 2b. Image to video generation with frame interpolation (specify last_frame
7835
+ in config).
7836
+ 3. Video extension (additional text prompt is optional)
7837
+
7838
+ Args:
7839
+ model: The model to use.
7840
+ prompt: The text prompt for generating the videos. Optional for image to
7841
+ video and video extension use cases.
7842
+ image: The input image for generating the videos. Optional if prompt is
7843
+ provided.
7844
+ video: The input video for video extension use cases. Optional if prompt
7845
+ or image is provided.
7846
+ config: Configuration for generation.
7847
+
7848
+ Usage:
7849
+
7850
+ ```
7851
+ operation = client.models.generate_videos(
7852
+ model="veo-2.0-generate-001",
7853
+ prompt="A neon hologram of a cat driving at top speed",
7854
+ )
7855
+ while not operation.done:
7856
+ time.sleep(10)
7857
+ operation = client.operations.get(operation)
7858
+
7859
+ operation.result.generated_videos[0].video.uri
7860
+ ```
7861
+ """
7862
+ return await self._generate_videos(
7863
+ model=model,
7864
+ prompt=prompt,
7865
+ image=image,
7866
+ video=video,
7867
+ config=config,
7868
+ )
google/genai/pagers.py CHANGED
@@ -18,7 +18,8 @@
18
18
  # pylint: disable=protected-access
19
19
 
20
20
  import copy
21
- from typing import Any, AsyncIterator,Awaitable, Callable, Generic, Iterator, Literal, TypeVar
21
+ from typing import Any, AsyncIterator,Awaitable, Callable, Generic, Iterator, Literal, TypeVar, Union
22
+ from . import types
22
23
 
23
24
  T = TypeVar('T')
24
25
 
@@ -43,6 +44,8 @@ class _BasePager(Generic[T]):
43
44
  self._page = getattr(response, self._name) or []
44
45
  self._idx = 0
45
46
 
47
+ self._sdk_http_response = getattr(response, 'sdk_http_response', None)
48
+
46
49
  if not config:
47
50
  request_config = {}
48
51
  elif isinstance(config, dict):
@@ -110,6 +113,13 @@ class _BasePager(Generic[T]):
110
113
 
111
114
  return self._page_size
112
115
 
116
+ @property
117
+ def sdk_http_response(self) -> Union[types.HttpResponse, None]:
118
+ """Returns the http response of the API response.
119
+ """
120
+
121
+ return self._sdk_http_response
122
+
113
123
  @property
114
124
  def config(self) -> dict[str, Any]:
115
125
  """Returns the configuration when making the API request for the next page.
google/genai/tunings.py CHANGED
@@ -506,6 +506,11 @@ def _ListTuningJobsResponse_from_mldev(
506
506
  parent_object: Optional[dict[str, Any]] = None,
507
507
  ) -> dict[str, Any]:
508
508
  to_object: dict[str, Any] = {}
509
+ if getv(from_object, ['sdkHttpResponse']) is not None:
510
+ setv(
511
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
512
+ )
513
+
509
514
  if getv(from_object, ['nextPageToken']) is not None:
510
515
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
511
516
 
@@ -691,6 +696,11 @@ def _ListTuningJobsResponse_from_vertex(
691
696
  parent_object: Optional[dict[str, Any]] = None,
692
697
  ) -> dict[str, Any]:
693
698
  to_object: dict[str, Any] = {}
699
+ if getv(from_object, ['sdkHttpResponse']) is not None:
700
+ setv(
701
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
702
+ )
703
+
694
704
  if getv(from_object, ['nextPageToken']) is not None:
695
705
  setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
696
706
 
@@ -839,7 +849,9 @@ class Tunings(_api_module.BaseModule):
839
849
  return_value = types.ListTuningJobsResponse._from_response(
840
850
  response=response_dict, kwargs=parameter_model.model_dump()
841
851
  )
842
-
852
+ return_value.sdk_http_response = types.HttpResponse(
853
+ headers=response.headers
854
+ )
843
855
  self._api_client._verify_response(return_value)
844
856
  return return_value
845
857
 
@@ -1183,7 +1195,9 @@ class AsyncTunings(_api_module.BaseModule):
1183
1195
  return_value = types.ListTuningJobsResponse._from_response(
1184
1196
  response=response_dict, kwargs=parameter_model.model_dump()
1185
1197
  )
1186
-
1198
+ return_value.sdk_http_response = types.HttpResponse(
1199
+ headers=response.headers
1200
+ )
1187
1201
  self._api_client._verify_response(return_value)
1188
1202
  return return_value
1189
1203
 
google/genai/types.py CHANGED
@@ -1270,7 +1270,10 @@ class HttpOptions(_common.BaseModel):
1270
1270
  )
1271
1271
  extra_body: Optional[dict[str, Any]] = Field(
1272
1272
  default=None,
1273
- description="""Extra parameters to add to the request body.""",
1273
+ description="""Extra parameters to add to the request body.
1274
+ The structure must match the backend API's request structure.
1275
+ - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
1276
+ - GeminiAPI backend API docs: https://ai.google.dev/api/rest""",
1274
1277
  )
1275
1278
  retry_options: Optional[HttpRetryOptions] = Field(
1276
1279
  default=None, description="""HTTP retry options for the request."""
@@ -1299,7 +1302,10 @@ class HttpOptionsDict(TypedDict, total=False):
1299
1302
  """Args passed to the async HTTP client."""
1300
1303
 
1301
1304
  extra_body: Optional[dict[str, Any]]
1302
- """Extra parameters to add to the request body."""
1305
+ """Extra parameters to add to the request body.
1306
+ The structure must match the backend API's request structure.
1307
+ - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
1308
+ - GeminiAPI backend API docs: https://ai.google.dev/api/rest"""
1303
1309
 
1304
1310
  retry_options: Optional[HttpRetryOptionsDict]
1305
1311
  """HTTP retry options for the request."""
@@ -5714,6 +5720,12 @@ class GenerateImagesConfig(_common.BaseModel):
5714
5720
  description="""Whether to add a watermark to the generated images.
5715
5721
  """,
5716
5722
  )
5723
+ image_size: Optional[str] = Field(
5724
+ default=None,
5725
+ description="""The size of the largest dimension of the generated image.
5726
+ Supported sizes are 1K and 2K (not supported for Imagen 3 models).
5727
+ """,
5728
+ )
5717
5729
  enhance_prompt: Optional[bool] = Field(
5718
5730
  default=None,
5719
5731
  description="""Whether to use the prompt rewriting logic.
@@ -5790,6 +5802,11 @@ class GenerateImagesConfigDict(TypedDict, total=False):
5790
5802
  """Whether to add a watermark to the generated images.
5791
5803
  """
5792
5804
 
5805
+ image_size: Optional[str]
5806
+ """The size of the largest dimension of the generated image.
5807
+ Supported sizes are 1K and 2K (not supported for Imagen 3 models).
5808
+ """
5809
+
5793
5810
  enhance_prompt: Optional[bool]
5794
5811
  """Whether to use the prompt rewriting logic.
5795
5812
  """
@@ -6967,12 +6984,18 @@ _ListModelsParametersOrDict = Union[
6967
6984
 
6968
6985
  class ListModelsResponse(_common.BaseModel):
6969
6986
 
6987
+ sdk_http_response: Optional[HttpResponse] = Field(
6988
+ default=None, description="""Used to retain the full HTTP response."""
6989
+ )
6970
6990
  next_page_token: Optional[str] = Field(default=None, description="""""")
6971
6991
  models: Optional[list[Model]] = Field(default=None, description="""""")
6972
6992
 
6973
6993
 
6974
6994
  class ListModelsResponseDict(TypedDict, total=False):
6975
6995
 
6996
+ sdk_http_response: Optional[HttpResponseDict]
6997
+ """Used to retain the full HTTP response."""
6998
+
6976
6999
  next_page_token: Optional[str]
6977
7000
  """"""
6978
7001
 
@@ -8976,6 +8999,9 @@ _ListTuningJobsParametersOrDict = Union[
8976
8999
  class ListTuningJobsResponse(_common.BaseModel):
8977
9000
  """Response for the list tuning jobs method."""
8978
9001
 
9002
+ sdk_http_response: Optional[HttpResponse] = Field(
9003
+ default=None, description="""Used to retain the full HTTP response."""
9004
+ )
8979
9005
  next_page_token: Optional[str] = Field(
8980
9006
  default=None,
8981
9007
  description="""A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page.""",
@@ -8988,6 +9014,9 @@ class ListTuningJobsResponse(_common.BaseModel):
8988
9014
  class ListTuningJobsResponseDict(TypedDict, total=False):
8989
9015
  """Response for the list tuning jobs method."""
8990
9016
 
9017
+ sdk_http_response: Optional[HttpResponseDict]
9018
+ """Used to retain the full HTTP response."""
9019
+
8991
9020
  next_page_token: Optional[str]
8992
9021
  """A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page."""
8993
9022
 
@@ -9710,6 +9739,9 @@ _ListCachedContentsParametersOrDict = Union[
9710
9739
 
9711
9740
  class ListCachedContentsResponse(_common.BaseModel):
9712
9741
 
9742
+ sdk_http_response: Optional[HttpResponse] = Field(
9743
+ default=None, description="""Used to retain the full HTTP response."""
9744
+ )
9713
9745
  next_page_token: Optional[str] = Field(default=None, description="""""")
9714
9746
  cached_contents: Optional[list[CachedContent]] = Field(
9715
9747
  default=None,
@@ -9720,6 +9752,9 @@ class ListCachedContentsResponse(_common.BaseModel):
9720
9752
 
9721
9753
  class ListCachedContentsResponseDict(TypedDict, total=False):
9722
9754
 
9755
+ sdk_http_response: Optional[HttpResponseDict]
9756
+ """Used to retain the full HTTP response."""
9757
+
9723
9758
  next_page_token: Optional[str]
9724
9759
  """"""
9725
9760
 
@@ -9783,6 +9818,9 @@ _ListFilesParametersOrDict = Union[
9783
9818
  class ListFilesResponse(_common.BaseModel):
9784
9819
  """Response for the list files method."""
9785
9820
 
9821
+ sdk_http_response: Optional[HttpResponse] = Field(
9822
+ default=None, description="""Used to retain the full HTTP response."""
9823
+ )
9786
9824
  next_page_token: Optional[str] = Field(
9787
9825
  default=None, description="""A token to retrieve next page of results."""
9788
9826
  )
@@ -9794,6 +9832,9 @@ class ListFilesResponse(_common.BaseModel):
9794
9832
  class ListFilesResponseDict(TypedDict, total=False):
9795
9833
  """Response for the list files method."""
9796
9834
 
9835
+ sdk_http_response: Optional[HttpResponseDict]
9836
+ """Used to retain the full HTTP response."""
9837
+
9797
9838
  next_page_token: Optional[str]
9798
9839
  """A token to retrieve next page of results."""
9799
9840
 
@@ -10584,6 +10625,9 @@ _ListBatchJobsParametersOrDict = Union[
10584
10625
  class ListBatchJobsResponse(_common.BaseModel):
10585
10626
  """Config for batches.list return value."""
10586
10627
 
10628
+ sdk_http_response: Optional[HttpResponse] = Field(
10629
+ default=None, description="""Used to retain the full HTTP response."""
10630
+ )
10587
10631
  next_page_token: Optional[str] = Field(default=None, description="""""")
10588
10632
  batch_jobs: Optional[list[BatchJob]] = Field(default=None, description="""""")
10589
10633
 
@@ -10591,6 +10635,9 @@ class ListBatchJobsResponse(_common.BaseModel):
10591
10635
  class ListBatchJobsResponseDict(TypedDict, total=False):
10592
10636
  """Config for batches.list return value."""
10593
10637
 
10638
+ sdk_http_response: Optional[HttpResponseDict]
10639
+ """Used to retain the full HTTP response."""
10640
+
10594
10641
  next_page_token: Optional[str]
10595
10642
  """"""
10596
10643
 
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.26.0' # x-release-please-version
16
+ __version__ = '1.27.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.26.0
3
+ Version: 1.27.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -1404,35 +1404,21 @@ print(response.text)
1404
1404
  ## Tunings
1405
1405
 
1406
1406
  `client.tunings` contains tuning job APIs and supports supervised fine
1407
- tuning through `tune`. See the 'Create a client' section above to initialize a
1408
- client.
1407
+ tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
1408
+ section above to initialize a client.
1409
1409
 
1410
1410
  ### Tune
1411
1411
 
1412
1412
  - Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
1413
- - Gemini Developer API supports tuning from inline examples
1414
1413
 
1415
1414
  ```python
1416
1415
  from google.genai import types
1417
1416
 
1418
- if client.vertexai:
1419
- model = 'gemini-2.0-flash-001'
1420
- training_dataset = types.TuningDataset(
1421
- # or gcs_uri=my_vertex_multimodal_dataset
1422
- gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1423
- )
1424
- else:
1425
- model = 'models/gemini-2.0-flash-001'
1426
- # or gcs_uri=my_vertex_multimodal_dataset.resource_name
1427
- training_dataset = types.TuningDataset(
1428
- examples=[
1429
- types.TuningExample(
1430
- text_input=f'Input text {i}',
1431
- output=f'Output text {i}',
1432
- )
1433
- for i in range(5)
1434
- ],
1435
- )
1417
+ model = 'gemini-2.0-flash-001'
1418
+ training_dataset = types.TuningDataset(
1419
+ # or gcs_uri=my_vertex_multimodal_dataset
1420
+ gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1421
+ )
1436
1422
  ```
1437
1423
 
1438
1424
  ```python
@@ -1458,14 +1444,15 @@ print(tuning_job)
1458
1444
  ```python
1459
1445
  import time
1460
1446
 
1461
- running_states = set(
1447
+ completed_states = set(
1462
1448
  [
1463
- 'JOB_STATE_PENDING',
1464
- 'JOB_STATE_RUNNING',
1449
+ 'JOB_STATE_SUCCEEDED',
1450
+ 'JOB_STATE_FAILED',
1451
+ 'JOB_STATE_CANCELLED',
1465
1452
  ]
1466
1453
  )
1467
1454
 
1468
- while tuning_job.state in running_states:
1455
+ while tuning_job.state not in completed_states:
1469
1456
  print(tuning_job.state)
1470
1457
  tuning_job = client.tunings.get(name=tuning_job.name)
1471
1458
  time.sleep(10)
@@ -1576,16 +1563,63 @@ initialize a client.
1576
1563
 
1577
1564
  ### Create
1578
1565
 
1566
+ Vertex AI:
1567
+
1579
1568
  ```python
1580
1569
  # Specify model and source file only, destination and job display name will be auto-populated
1581
1570
  job = client.batches.create(
1582
1571
  model='gemini-2.0-flash-001',
1583
- src='bq://my-project.my-dataset.my-table',
1572
+ src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1584
1573
  )
1585
1574
 
1586
1575
  job
1587
1576
  ```
1588
1577
 
1578
+ Gemini Developer API:
1579
+
1580
+ ```python
1581
+ # Create a batch job with inlined requests
1582
+ batch_job = client.batches.create(
1583
+ model="gemini-2.0-flash",
1584
+ src=[{
1585
+ "contents": [{
1586
+ "parts": [{
1587
+ "text": "Hello!",
1588
+ }],
1589
+ "role": "user",
1590
+ }],
1591
+ "config:": {"response_modalities": ["text"]},
1592
+ }],
1593
+ )
1594
+
1595
+ job
1596
+ ```
1597
+
1598
+ In order to create a batch job with file name. Need to upload a jsonl file.
1599
+ For example myrequests.json:
1600
+
1601
+ ```
1602
+ {"key":"request_1", "request": {"contents": [{"parts": [{"text":
1603
+ "Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
1604
+ {"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
1605
+ ```
1606
+ Then upload the file.
1607
+
1608
+ ```python
1609
+ # Upload the file
1610
+ file = client.files.upload(
1611
+ file='myrequest.json',
1612
+ config=types.UploadFileConfig(display_name='test_json')
1613
+ )
1614
+
1615
+ # Create a batch job with file name
1616
+ batch_job = client.batches.create(
1617
+ model="gemini-2.0-flash",
1618
+ src="files/file_name",
1619
+ )
1620
+ ```
1621
+
1622
+
1589
1623
  ```python
1590
1624
  # Get a job by name
1591
1625
  job = client.batches.get(name=job.name)
@@ -12,24 +12,24 @@ google/genai/_replay_api_client.py,sha256=2ndavmUMySvjLIdYEvjPZIOPfc-IA5rbWQgEwW
12
12
  google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
13
13
  google/genai/_tokens_converters.py,sha256=ClWTsgcqn91zSw_qTqLPTNSP1-_G8s-NlBCD8-DQniw,23803
14
14
  google/genai/_transformers.py,sha256=uTRSd9qrUZbJoFzLEWy3wGR_j1KqVg5cypGzQM1Mc4w,37357
15
- google/genai/batches.py,sha256=rat60rGEza3QIu4ExyUl0RAzQiyFZkcJ4excxpiq8FA,80594
16
- google/genai/caches.py,sha256=oL7vb_G1__Q1I4jdhLNNe20KJK6nRwZ7WeiQ8S1-oVg,64173
15
+ google/genai/batches.py,sha256=a8X0wi8D6d7WC8873C2oVtk_NBYbd8xeI77GSkww4Nc,81094
16
+ google/genai/caches.py,sha256=isEzVYJgQVOjHf0XkFl86HOXzoYFXB-PgEVqwuo1V4s,64673
17
17
  google/genai/chats.py,sha256=0QdOUeWEYDQgAWBy1f7a3z3yY9S8tXSowUzNrzazzj4,16651
18
18
  google/genai/client.py,sha256=wXnfZBSv9p-yKtX_gabUrfBXoYHuqHhzK_VgwRttMgY,10777
19
19
  google/genai/errors.py,sha256=IdSymOuUJDprfPRBhBtFDkc_XX81UvgNbWrOLR8L2GU,5582
20
- google/genai/files.py,sha256=mz3sZ6F90Kh5ftqq0uv8gcqJfRuwfoZGBFsmDmpAt9k,39809
20
+ google/genai/files.py,sha256=Z9CP2RLAZlZDE3zWXVNA2LF3x7wJTXOhNzDoSyHmr9k,40154
21
21
  google/genai/live.py,sha256=WvOPBFDwD2eyUx89XCW6oudKtlK7960RqQuk5-SY1Ac,39482
22
22
  google/genai/live_music.py,sha256=3GG9nsto8Vhkohcs-4CPMS4DFp1ZtMuLYzHfvEPYAeg,6971
23
- google/genai/models.py,sha256=0QkjP8oXmWcOvyFZFZZc4LMsEOqYdoS5hwAkhQtDF70,234958
23
+ google/genai/models.py,sha256=wdxOAA2OTklPBobfvKSDLqdihd74IIG6lco-7Z2S4xg,239491
24
24
  google/genai/operations.py,sha256=3eudPaItN6_JJKMWNT9lLIJLUGyAQfFK1xken7Rv8vQ,12814
25
- google/genai/pagers.py,sha256=nyVYxp92rS-UaewO_oBgP593knofeLU6yOn6RolNoGQ,6797
25
+ google/genai/pagers.py,sha256=hlfLtH8Fv_eU19bfqKaabvWqDZYhIYXhOVuoH2oBJkA,7077
26
26
  google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
27
27
  google/genai/tokens.py,sha256=QGW1jI0Y5wXqiaad0-N6Utgh9sK4TK0todHf5h0GLeI,12490
28
- google/genai/tunings.py,sha256=-_Fti-aX8NyDg4FxWtABUPuc5wmxNm6f_l98KXUKa1s,48461
29
- google/genai/types.py,sha256=8etqHluvZ_FQh4ZZTkJhMCbRe_41szi7YI_AVyQglE0,472335
30
- google/genai/version.py,sha256=4aazDDnUY1hmPYa17XYQCaoZRiBorYIq6V0oRcsfpYU,627
31
- google_genai-1.26.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
32
- google_genai-1.26.0.dist-info/METADATA,sha256=1cD530ebUHsbi0RM5576qQCC8qTphGLlx-Rd5JZINvI,42400
33
- google_genai-1.26.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
- google_genai-1.26.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
35
- google_genai-1.26.0.dist-info/RECORD,,
28
+ google/genai/tunings.py,sha256=diQSMojun63gFI9b-bluxTYjwbOwEIjviMJwHvUb9A4,48961
29
+ google/genai/types.py,sha256=0otLX5sfNZoW98bgHI7D2NAYTnF5MKXwvRuKHEqQhv0,474334
30
+ google/genai/version.py,sha256=zQZWqve_yrf9cPtiYvuJgv4hhwEymigHvKuPVVKoisc,627
31
+ google_genai-1.27.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
32
+ google_genai-1.27.0.dist-info/METADATA,sha256=3h-l2dUas9jaOIvWv6r6Vnd1gWz-Ppz6Hn7i4sHkOcM,43091
33
+ google_genai-1.27.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
+ google_genai-1.27.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
35
+ google_genai-1.27.0.dist-info/RECORD,,