google-genai 1.5.0__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -175,15 +175,9 @@ def _Schema_to_mldev(
175
175
  if getv(from_object, ['pattern']) is not None:
176
176
  raise ValueError('pattern parameter is not supported in Gemini API.')
177
177
 
178
- if getv(from_object, ['minimum']) is not None:
179
- raise ValueError('minimum parameter is not supported in Gemini API.')
180
-
181
178
  if getv(from_object, ['default']) is not None:
182
179
  raise ValueError('default parameter is not supported in Gemini API.')
183
180
 
184
- if getv(from_object, ['any_of']) is not None:
185
- raise ValueError('any_of parameter is not supported in Gemini API.')
186
-
187
181
  if getv(from_object, ['max_length']) is not None:
188
182
  raise ValueError('max_length parameter is not supported in Gemini API.')
189
183
 
@@ -196,12 +190,12 @@ def _Schema_to_mldev(
196
190
  if getv(from_object, ['min_properties']) is not None:
197
191
  raise ValueError('min_properties parameter is not supported in Gemini API.')
198
192
 
199
- if getv(from_object, ['maximum']) is not None:
200
- raise ValueError('maximum parameter is not supported in Gemini API.')
201
-
202
193
  if getv(from_object, ['max_properties']) is not None:
203
194
  raise ValueError('max_properties parameter is not supported in Gemini API.')
204
195
 
196
+ if getv(from_object, ['any_of']) is not None:
197
+ setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
198
+
205
199
  if getv(from_object, ['description']) is not None:
206
200
  setv(to_object, ['description'], getv(from_object, ['description']))
207
201
 
@@ -217,9 +211,15 @@ def _Schema_to_mldev(
217
211
  if getv(from_object, ['max_items']) is not None:
218
212
  setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
219
213
 
214
+ if getv(from_object, ['maximum']) is not None:
215
+ setv(to_object, ['maximum'], getv(from_object, ['maximum']))
216
+
220
217
  if getv(from_object, ['min_items']) is not None:
221
218
  setv(to_object, ['minItems'], getv(from_object, ['min_items']))
222
219
 
220
+ if getv(from_object, ['minimum']) is not None:
221
+ setv(to_object, ['minimum'], getv(from_object, ['minimum']))
222
+
223
223
  if getv(from_object, ['nullable']) is not None:
224
224
  setv(to_object, ['nullable'], getv(from_object, ['nullable']))
225
225
 
@@ -254,15 +254,9 @@ def _Schema_to_vertex(
254
254
  if getv(from_object, ['pattern']) is not None:
255
255
  setv(to_object, ['pattern'], getv(from_object, ['pattern']))
256
256
 
257
- if getv(from_object, ['minimum']) is not None:
258
- setv(to_object, ['minimum'], getv(from_object, ['minimum']))
259
-
260
257
  if getv(from_object, ['default']) is not None:
261
258
  setv(to_object, ['default'], getv(from_object, ['default']))
262
259
 
263
- if getv(from_object, ['any_of']) is not None:
264
- setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
265
-
266
260
  if getv(from_object, ['max_length']) is not None:
267
261
  setv(to_object, ['maxLength'], getv(from_object, ['max_length']))
268
262
 
@@ -275,12 +269,12 @@ def _Schema_to_vertex(
275
269
  if getv(from_object, ['min_properties']) is not None:
276
270
  setv(to_object, ['minProperties'], getv(from_object, ['min_properties']))
277
271
 
278
- if getv(from_object, ['maximum']) is not None:
279
- setv(to_object, ['maximum'], getv(from_object, ['maximum']))
280
-
281
272
  if getv(from_object, ['max_properties']) is not None:
282
273
  setv(to_object, ['maxProperties'], getv(from_object, ['max_properties']))
283
274
 
275
+ if getv(from_object, ['any_of']) is not None:
276
+ setv(to_object, ['anyOf'], getv(from_object, ['any_of']))
277
+
284
278
  if getv(from_object, ['description']) is not None:
285
279
  setv(to_object, ['description'], getv(from_object, ['description']))
286
280
 
@@ -296,9 +290,15 @@ def _Schema_to_vertex(
296
290
  if getv(from_object, ['max_items']) is not None:
297
291
  setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
298
292
 
293
+ if getv(from_object, ['maximum']) is not None:
294
+ setv(to_object, ['maximum'], getv(from_object, ['maximum']))
295
+
299
296
  if getv(from_object, ['min_items']) is not None:
300
297
  setv(to_object, ['minItems'], getv(from_object, ['min_items']))
301
298
 
299
+ if getv(from_object, ['minimum']) is not None:
300
+ setv(to_object, ['minimum'], getv(from_object, ['minimum']))
301
+
302
302
  if getv(from_object, ['nullable']) is not None:
303
303
  setv(to_object, ['nullable'], getv(from_object, ['nullable']))
304
304
 
@@ -1351,10 +1351,8 @@ def _GenerateImagesConfig_to_mldev(
1351
1351
  raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
1352
1352
 
1353
1353
  if getv(from_object, ['negative_prompt']) is not None:
1354
- setv(
1355
- parent_object,
1356
- ['parameters', 'negativePrompt'],
1357
- getv(from_object, ['negative_prompt']),
1354
+ raise ValueError(
1355
+ 'negative_prompt parameter is not supported in Gemini API.'
1358
1356
  )
1359
1357
 
1360
1358
  if getv(from_object, ['number_of_images']) is not None:
@@ -1910,10 +1908,8 @@ def _EditImageConfig_to_mldev(
1910
1908
  raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
1911
1909
 
1912
1910
  if getv(from_object, ['negative_prompt']) is not None:
1913
- setv(
1914
- parent_object,
1915
- ['parameters', 'negativePrompt'],
1916
- getv(from_object, ['negative_prompt']),
1911
+ raise ValueError(
1912
+ 'negative_prompt parameter is not supported in Gemini API.'
1917
1913
  )
1918
1914
 
1919
1915
  if getv(from_object, ['number_of_images']) is not None:
@@ -2003,6 +1999,9 @@ def _EditImageConfig_to_mldev(
2003
1999
  getv(from_object, ['edit_mode']),
2004
2000
  )
2005
2001
 
2002
+ if getv(from_object, ['base_steps']) is not None:
2003
+ raise ValueError('base_steps parameter is not supported in Gemini API.')
2004
+
2006
2005
  return to_object
2007
2006
 
2008
2007
 
@@ -2107,6 +2106,13 @@ def _EditImageConfig_to_vertex(
2107
2106
  getv(from_object, ['edit_mode']),
2108
2107
  )
2109
2108
 
2109
+ if getv(from_object, ['base_steps']) is not None:
2110
+ setv(
2111
+ parent_object,
2112
+ ['parameters', 'editConfig', 'baseSteps'],
2113
+ getv(from_object, ['base_steps']),
2114
+ )
2115
+
2110
2116
  return to_object
2111
2117
 
2112
2118
 
@@ -3561,6 +3567,48 @@ def _Image_from_vertex(
3561
3567
  return to_object
3562
3568
 
3563
3569
 
3570
+ def _SafetyAttributes_from_mldev(
3571
+ api_client: BaseApiClient,
3572
+ from_object: Union[dict, object],
3573
+ parent_object: Optional[dict] = None,
3574
+ ) -> dict:
3575
+ to_object: dict[str, Any] = {}
3576
+ if getv(from_object, ['safetyAttributes', 'categories']) is not None:
3577
+ setv(
3578
+ to_object,
3579
+ ['categories'],
3580
+ getv(from_object, ['safetyAttributes', 'categories']),
3581
+ )
3582
+
3583
+ if getv(from_object, ['safetyAttributes', 'scores']) is not None:
3584
+ setv(
3585
+ to_object, ['scores'], getv(from_object, ['safetyAttributes', 'scores'])
3586
+ )
3587
+
3588
+ return to_object
3589
+
3590
+
3591
+ def _SafetyAttributes_from_vertex(
3592
+ api_client: BaseApiClient,
3593
+ from_object: Union[dict, object],
3594
+ parent_object: Optional[dict] = None,
3595
+ ) -> dict:
3596
+ to_object: dict[str, Any] = {}
3597
+ if getv(from_object, ['safetyAttributes', 'categories']) is not None:
3598
+ setv(
3599
+ to_object,
3600
+ ['categories'],
3601
+ getv(from_object, ['safetyAttributes', 'categories']),
3602
+ )
3603
+
3604
+ if getv(from_object, ['safetyAttributes', 'scores']) is not None:
3605
+ setv(
3606
+ to_object, ['scores'], getv(from_object, ['safetyAttributes', 'scores'])
3607
+ )
3608
+
3609
+ return to_object
3610
+
3611
+
3564
3612
  def _GeneratedImage_from_mldev(
3565
3613
  api_client: BaseApiClient,
3566
3614
  from_object: Union[dict, object],
@@ -3581,6 +3629,15 @@ def _GeneratedImage_from_mldev(
3581
3629
  getv(from_object, ['raiFilteredReason']),
3582
3630
  )
3583
3631
 
3632
+ if getv(from_object, ['_self']) is not None:
3633
+ setv(
3634
+ to_object,
3635
+ ['safety_attributes'],
3636
+ _SafetyAttributes_from_mldev(
3637
+ api_client, getv(from_object, ['_self']), to_object
3638
+ ),
3639
+ )
3640
+
3584
3641
  return to_object
3585
3642
 
3586
3643
 
@@ -3604,6 +3661,15 @@ def _GeneratedImage_from_vertex(
3604
3661
  getv(from_object, ['raiFilteredReason']),
3605
3662
  )
3606
3663
 
3664
+ if getv(from_object, ['_self']) is not None:
3665
+ setv(
3666
+ to_object,
3667
+ ['safety_attributes'],
3668
+ _SafetyAttributes_from_vertex(
3669
+ api_client, getv(from_object, ['_self']), to_object
3670
+ ),
3671
+ )
3672
+
3607
3673
  if getv(from_object, ['prompt']) is not None:
3608
3674
  setv(to_object, ['enhanced_prompt'], getv(from_object, ['prompt']))
3609
3675
 
@@ -4980,7 +5046,7 @@ class Models(_api_module.BaseModule):
4980
5046
  .. code-block:: python
4981
5047
 
4982
5048
  response = client.models.count_tokens(
4983
- model='gemini-1.5-flash',
5049
+ model='gemini-2.0-flash',
4984
5050
  contents='What is your name?',
4985
5051
  )
4986
5052
  print(response)
@@ -5054,9 +5120,9 @@ class Models(_api_module.BaseModule):
5054
5120
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
5055
5121
  config: Optional[types.ComputeTokensConfigOrDict] = None,
5056
5122
  ) -> types.ComputeTokensResponse:
5057
- """Return a list of tokens based on the input contents.
5123
+ """Given a list of contents, returns a corresponding TokensInfo containing the
5058
5124
 
5059
- Only text is supported.
5125
+ list of tokens and list of token ids.
5060
5126
 
5061
5127
  This method is not supported by the Gemini Developer API.
5062
5128
 
@@ -5069,7 +5135,7 @@ class Models(_api_module.BaseModule):
5069
5135
  .. code-block:: python
5070
5136
 
5071
5137
  response = client.models.compute_tokens(
5072
- model='gemini-1.5-flash',
5138
+ model='gemini-2.0-flash',
5073
5139
  contents='What is your name?',
5074
5140
  )
5075
5141
  print(response)
@@ -5234,21 +5300,21 @@ class Models(_api_module.BaseModule):
5234
5300
  """Makes an API request to generate content using a model.
5235
5301
 
5236
5302
  For the `model` parameter, supported formats for Vertex AI API include:
5237
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5303
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5238
5304
  - The full resource name starts with 'projects/', for example:
5239
- 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5305
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
5240
5306
  - The partial resource name with 'publishers/', for example:
5241
- 'publishers/google/models/gemini-1.5-flash-002' or
5307
+ 'publishers/google/models/gemini-2.0-flash' or
5242
5308
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
5243
5309
  - `/` separated publisher and model name, for example:
5244
- 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5310
+ 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
5245
5311
 
5246
5312
  For the `model` parameter, supported formats for Gemini API include:
5247
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5313
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5248
5314
  - The model name starts with 'models/', for example:
5249
- 'models/gemini-1.5-flash-002'
5250
- - if you would like to use a tuned model, the model name starts with
5251
- 'tunedModels/', for example:
5315
+ 'models/gemini-2.0-flash'
5316
+ - For tuned models, the model name starts with 'tunedModels/',
5317
+ for example:
5252
5318
  'tunedModels/1234567890123456789'
5253
5319
 
5254
5320
  Some models support multimodal input and output.
@@ -5265,7 +5331,7 @@ class Models(_api_module.BaseModule):
5265
5331
  )
5266
5332
 
5267
5333
  response = client.models.generate_content(
5268
- model='gemini-1.5-flash-002',
5334
+ model='gemini-2.0-flash',
5269
5335
  contents='''What is a good name for a flower shop that specializes in
5270
5336
  selling bouquets of dried flowers?'''
5271
5337
  )
@@ -5276,7 +5342,7 @@ class Models(_api_module.BaseModule):
5276
5342
  # * Timeless Petals
5277
5343
 
5278
5344
  response = client.models.generate_content(
5279
- model='gemini-1.5-flash-002',
5345
+ model='gemini-2.0-flash',
5280
5346
  contents=[
5281
5347
  types.Part.from_text('What is shown in this image?'),
5282
5348
  types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
@@ -5354,21 +5420,21 @@ class Models(_api_module.BaseModule):
5354
5420
  """Makes an API request to generate content using a model and yields the model's response in chunks.
5355
5421
 
5356
5422
  For the `model` parameter, supported formats for Vertex AI API include:
5357
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5423
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5358
5424
  - The full resource name starts with 'projects/', for example:
5359
- 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
5425
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
5360
5426
  - The partial resource name with 'publishers/', for example:
5361
- 'publishers/google/models/gemini-1.5-flash-002' or
5427
+ 'publishers/google/models/gemini-2.0-flash' or
5362
5428
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
5363
5429
  - `/` separated publisher and model name, for example:
5364
- 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
5430
+ 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
5365
5431
 
5366
5432
  For the `model` parameter, supported formats for Gemini API include:
5367
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
5433
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
5368
5434
  - The model name starts with 'models/', for example:
5369
- 'models/gemini-1.5-flash-002'
5370
- - If you would like to use a tuned model, the model name starts with
5371
- 'tunedModels/', for example:
5435
+ 'models/gemini-2.0-flash'
5436
+ - For tuned models, the model name starts with 'tunedModels/',
5437
+ for example:
5372
5438
  'tunedModels/1234567890123456789'
5373
5439
 
5374
5440
  Some models support multimodal input and output.
@@ -5385,7 +5451,7 @@ class Models(_api_module.BaseModule):
5385
5451
  )
5386
5452
 
5387
5453
  for chunk in client.models.generate_content_stream(
5388
- model='gemini-1.5-flash-002',
5454
+ model='gemini-2.0-flash',
5389
5455
  contents='''What is a good name for a flower shop that specializes in
5390
5456
  selling bouquets of dried flowers?'''
5391
5457
  ):
@@ -5396,7 +5462,7 @@ class Models(_api_module.BaseModule):
5396
5462
  # * Timeless Petals
5397
5463
 
5398
5464
  for chunk in client.models.generate_content_stream(
5399
- model='gemini-1.5-flash-002',
5465
+ model='gemini-2.0-flash',
5400
5466
  contents=[
5401
5467
  types.Part.from_text('What is shown in this image?'),
5402
5468
  types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
@@ -6365,7 +6431,7 @@ class AsyncModels(_api_module.BaseModule):
6365
6431
  .. code-block:: python
6366
6432
 
6367
6433
  response = await client.aio.models.count_tokens(
6368
- model='gemini-1.5-flash',
6434
+ model='gemini-2.0-flash',
6369
6435
  contents='What is your name?',
6370
6436
  )
6371
6437
  print(response)
@@ -6439,11 +6505,10 @@ class AsyncModels(_api_module.BaseModule):
6439
6505
  contents: Union[types.ContentListUnion, types.ContentListUnionDict],
6440
6506
  config: Optional[types.ComputeTokensConfigOrDict] = None,
6441
6507
  ) -> types.ComputeTokensResponse:
6442
- """Return a list of tokens based on the input contents.
6508
+ """Given a list of contents, returns a corresponding TokensInfo containing the
6443
6509
 
6444
- Only text is supported.
6510
+ list of tokens and list of token ids.
6445
6511
 
6446
- This method is not supported by the Gemini Developer API.
6447
6512
 
6448
6513
  Args:
6449
6514
  model (str): The model to use.
@@ -6454,7 +6519,7 @@ class AsyncModels(_api_module.BaseModule):
6454
6519
  .. code-block:: python
6455
6520
 
6456
6521
  response = await client.aio.models.compute_tokens(
6457
- model='gemini-1.5-flash',
6522
+ model='gemini-2.0-flash',
6458
6523
  contents='What is your name?',
6459
6524
  )
6460
6525
  print(response)
@@ -6632,7 +6697,7 @@ class AsyncModels(_api_module.BaseModule):
6632
6697
  )
6633
6698
 
6634
6699
  response = await client.aio.models.generate_content(
6635
- model='gemini-1.5-flash-002',
6700
+ model='gemini-2.0-flash',
6636
6701
  contents='User input: I like bagels. Answer:',
6637
6702
  config=types.GenerateContentConfig(
6638
6703
  system_instruction=
@@ -6709,21 +6774,21 @@ class AsyncModels(_api_module.BaseModule):
6709
6774
  """Makes an API request to generate content using a model and yields the model's response in chunks.
6710
6775
 
6711
6776
  For the `model` parameter, supported formats for Vertex AI API include:
6712
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
6777
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
6713
6778
  - The full resource name starts with 'projects/', for example:
6714
- 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
6779
+ 'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-2.0-flash'
6715
6780
  - The partial resource name with 'publishers/', for example:
6716
- 'publishers/google/models/gemini-1.5-flash-002' or
6781
+ 'publishers/google/models/gemini-2.0-flash' or
6717
6782
  'publishers/meta/models/llama-3.1-405b-instruct-maas'
6718
6783
  - `/` separated publisher and model name, for example:
6719
- 'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
6784
+ 'google/gemini-2.0-flash' or 'meta/llama-3.1-405b-instruct-maas'
6720
6785
 
6721
6786
  For the `model` parameter, supported formats for Gemini API include:
6722
- - The Gemini model ID, for example: 'gemini-1.5-flash-002'
6787
+ - The Gemini model ID, for example: 'gemini-2.0-flash'
6723
6788
  - The model name starts with 'models/', for example:
6724
- 'models/gemini-1.5-flash-002'
6725
- - If you would like to use a tuned model, the model name starts with
6726
- 'tunedModels/', for example:
6789
+ 'models/gemini-2.0-flash'
6790
+ - For tuned models, the model name starts with 'tunedModels/',
6791
+ for example:
6727
6792
  'tunedModels/1234567890123456789'
6728
6793
 
6729
6794
  Some models support multimodal input and output.
@@ -6740,7 +6805,7 @@ class AsyncModels(_api_module.BaseModule):
6740
6805
  )
6741
6806
 
6742
6807
  async for chunk in await client.aio.models.generate_content_stream(
6743
- model='gemini-1.5-flash-002',
6808
+ model='gemini-2.0-flash',
6744
6809
  contents='''What is a good name for a flower shop that specializes in
6745
6810
  selling bouquets of dried flowers?'''
6746
6811
  ):
@@ -6751,7 +6816,7 @@ class AsyncModels(_api_module.BaseModule):
6751
6816
  # * Timeless Petals
6752
6817
 
6753
6818
  async for chunk in awiat client.aio.models.generate_content_stream(
6754
- model='gemini-1.5-flash-002',
6819
+ model='gemini-2.0-flash',
6755
6820
  contents=[
6756
6821
  types.Part.from_text('What is shown in this image?'),
6757
6822
  types.Part.from_uri('gs://generativeai-downloads/images/scones.jpg',
@@ -465,10 +465,24 @@ class Operations(_api_module.BaseModule):
465
465
  # TODO(b/398233524): Cast operation types
466
466
  if self._api_client.vertexai:
467
467
  resource_name = operation_name.rpartition('/operations/')[0]
468
+ http_options = types.HttpOptions()
469
+ if isinstance(config, dict):
470
+ dict_options = config.get('http_options', None)
471
+ if dict_options is not None:
472
+ http_options = types.HttpOptions(**dict(dict_options))
473
+ elif isinstance(config, types.GetOperationConfig) and config is not None:
474
+ http_options = (
475
+ config.http_options
476
+ if config.http_options is not None
477
+ else types.HttpOptions()
478
+ )
479
+ fetch_operation_config = types.FetchPredictOperationConfig(
480
+ http_options=http_options
481
+ )
468
482
  return self._fetch_predict_operation(
469
483
  operation_name=operation_name,
470
484
  resource_name=resource_name,
471
- config=config,
485
+ config=fetch_operation_config,
472
486
  )
473
487
  else:
474
488
  return self._get_operation(
@@ -623,10 +637,24 @@ class AsyncOperations(_api_module.BaseModule):
623
637
 
624
638
  if self._api_client.vertexai:
625
639
  resource_name = operation_name.rpartition('/operations/')[0]
640
+ http_options = types.HttpOptions()
641
+ if isinstance(config, dict):
642
+ dict_options = config.get('http_options', None)
643
+ if dict_options is not None:
644
+ http_options = types.HttpOptions(**dict(dict_options))
645
+ elif isinstance(config, types.GetOperationConfig) and config is not None:
646
+ http_options = (
647
+ config.http_options
648
+ if config.http_options is not None
649
+ else types.HttpOptions()
650
+ )
651
+ fetch_operation_config = types.FetchPredictOperationConfig(
652
+ http_options=http_options
653
+ )
626
654
  return await self._fetch_predict_operation(
627
655
  operation_name=operation_name,
628
656
  resource_name=resource_name,
629
- config=config,
657
+ config=fetch_operation_config,
630
658
  )
631
659
  else:
632
660
  return await self._get_operation(
google/genai/pagers.py CHANGED
@@ -65,9 +65,9 @@ class _BasePager(Generic[T]):
65
65
 
66
66
  @property
67
67
  def page(self) -> list[T]:
68
- """Returns the current page, which is a list of items.
68
+ """Returns a subset of the entire list of items.
69
69
 
70
- The returned list of items is a subset of the entire list.
70
+ For the number of items returned, see `pageSize()`.
71
71
 
72
72
  Usage:
73
73
 
@@ -97,9 +97,7 @@ class _BasePager(Generic[T]):
97
97
 
98
98
  @property
99
99
  def page_size(self) -> int:
100
- """Returns the length of the page fetched each time by this pager.
101
-
102
- The number of items in the page is less than or equal to the page length.
100
+ """Returns the maximum number of items fetched by the pager at one time.
103
101
 
104
102
  Usage:
105
103
 
google/genai/tunings.py CHANGED
@@ -332,7 +332,7 @@ def _CreateTuningJobConfig_to_vertex(
332
332
 
333
333
  if getv(from_object, ['learning_rate_multiplier']) is not None:
334
334
  setv(
335
- to_object,
335
+ parent_object,
336
336
  ['supervisedTuningSpec', 'hyperParameters', 'learningRateMultiplier'],
337
337
  getv(from_object, ['learning_rate_multiplier']),
338
338
  )
@@ -781,13 +781,13 @@ class Tunings(_api_module.BaseModule):
781
781
  def _list(
782
782
  self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
783
783
  ) -> types.ListTuningJobsResponse:
784
- """Lists tuning jobs.
784
+ """Lists `TuningJob` objects.
785
785
 
786
786
  Args:
787
787
  config: The configuration for the list request.
788
788
 
789
789
  Returns:
790
- A list of tuning jobs.
790
+ A list of `TuningJob` objects.
791
791
  """
792
792
 
793
793
  parameter_model = types._ListTuningJobsParameters(
@@ -855,7 +855,7 @@ class Tunings(_api_module.BaseModule):
855
855
  training_dataset: types.TuningDatasetOrDict,
856
856
  config: Optional[types.CreateTuningJobConfigOrDict] = None,
857
857
  ) -> types.TuningJob:
858
- """Creates a supervised fine-tuning job.
858
+ """Creates a supervised fine-tuning job and returns the TuningJob object.
859
859
 
860
860
  Args:
861
861
  base_model: The name of the model to tune.
@@ -922,7 +922,7 @@ class Tunings(_api_module.BaseModule):
922
922
  training_dataset: types.TuningDatasetOrDict,
923
923
  config: Optional[types.CreateTuningJobConfigOrDict] = None,
924
924
  ) -> types.Operation:
925
- """Creates a supervised fine-tuning job.
925
+ """Creates a supervised fine-tuning job and returns the TuningJob object.
926
926
 
927
927
  Args:
928
928
  base_model: The name of the model to tune.
@@ -999,7 +999,11 @@ class Tunings(_api_module.BaseModule):
999
999
  config: Optional[types.GetTuningJobConfigOrDict] = None,
1000
1000
  ) -> types.TuningJob:
1001
1001
  job = self._get(name=name, config=config)
1002
- if job.experiment and self._api_client.vertexai:
1002
+ if (
1003
+ job.experiment
1004
+ and self._api_client.vertexai
1005
+ and self._api_client.project is not None
1006
+ ):
1003
1007
  _IpythonUtils.display_experiment_button(
1004
1008
  experiment=job.experiment,
1005
1009
  project=self._api_client.project,
@@ -1029,11 +1033,12 @@ class Tunings(_api_module.BaseModule):
1029
1033
  training_dataset=training_dataset,
1030
1034
  config=config,
1031
1035
  )
1032
- operation_dict = operation.to_json_dict()
1033
- try:
1034
- tuned_model_name = operation_dict['metadata']['tunedModel']
1035
- except KeyError:
1036
- tuned_model_name = operation_dict['name'].partition('/operations/')[0]
1036
+ if operation.metadata is not None and 'tunedModel' in operation.metadata:
1037
+ tuned_model_name = operation.metadata['tunedModel']
1038
+ else:
1039
+ if operation.name is None:
1040
+ raise ValueError('Operation name is required.')
1041
+ tuned_model_name = operation.name.partition('/operations/')[0]
1037
1042
  tuning_job = types.TuningJob(
1038
1043
  name=tuned_model_name,
1039
1044
  state=types.JobState.JOB_STATE_QUEUED,
@@ -1120,13 +1125,13 @@ class AsyncTunings(_api_module.BaseModule):
1120
1125
  async def _list(
1121
1126
  self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
1122
1127
  ) -> types.ListTuningJobsResponse:
1123
- """Lists tuning jobs.
1128
+ """Lists `TuningJob` objects.
1124
1129
 
1125
1130
  Args:
1126
1131
  config: The configuration for the list request.
1127
1132
 
1128
1133
  Returns:
1129
- A list of tuning jobs.
1134
+ A list of `TuningJob` objects.
1130
1135
  """
1131
1136
 
1132
1137
  parameter_model = types._ListTuningJobsParameters(
@@ -1194,7 +1199,7 @@ class AsyncTunings(_api_module.BaseModule):
1194
1199
  training_dataset: types.TuningDatasetOrDict,
1195
1200
  config: Optional[types.CreateTuningJobConfigOrDict] = None,
1196
1201
  ) -> types.TuningJob:
1197
- """Creates a supervised fine-tuning job.
1202
+ """Creates a supervised fine-tuning job and returns the TuningJob object.
1198
1203
 
1199
1204
  Args:
1200
1205
  base_model: The name of the model to tune.
@@ -1261,7 +1266,7 @@ class AsyncTunings(_api_module.BaseModule):
1261
1266
  training_dataset: types.TuningDatasetOrDict,
1262
1267
  config: Optional[types.CreateTuningJobConfigOrDict] = None,
1263
1268
  ) -> types.Operation:
1264
- """Creates a supervised fine-tuning job.
1269
+ """Creates a supervised fine-tuning job and returns the TuningJob object.
1265
1270
 
1266
1271
  Args:
1267
1272
  base_model: The name of the model to tune.
@@ -1338,7 +1343,11 @@ class AsyncTunings(_api_module.BaseModule):
1338
1343
  config: Optional[types.GetTuningJobConfigOrDict] = None,
1339
1344
  ) -> types.TuningJob:
1340
1345
  job = await self._get(name=name, config=config)
1341
- if job.experiment and self._api_client.vertexai:
1346
+ if (
1347
+ job.experiment
1348
+ and self._api_client.vertexai
1349
+ and self._api_client.project is not None
1350
+ ):
1342
1351
  _IpythonUtils.display_experiment_button(
1343
1352
  experiment=job.experiment,
1344
1353
  project=self._api_client.project,
@@ -1368,11 +1377,12 @@ class AsyncTunings(_api_module.BaseModule):
1368
1377
  training_dataset=training_dataset,
1369
1378
  config=config,
1370
1379
  )
1371
- operation_dict = operation.to_json_dict()
1372
- try:
1373
- tuned_model_name = operation_dict['metadata']['tunedModel']
1374
- except KeyError:
1375
- tuned_model_name = operation_dict['name'].partition('/operations/')[0]
1380
+ if operation.metadata is not None and 'tunedModel' in operation.metadata:
1381
+ tuned_model_name = operation.metadata['tunedModel']
1382
+ else:
1383
+ if operation.name is None:
1384
+ raise ValueError('Operation name is required.')
1385
+ tuned_model_name = operation.name.partition('/operations/')[0]
1376
1386
  tuning_job = types.TuningJob(
1377
1387
  name=tuned_model_name,
1378
1388
  state=types.JobState.JOB_STATE_QUEUED,