google-genai 1.59.0__py3-none-any.whl → 1.61.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. google/genai/_interactions/resources/interactions.py +60 -38
  2. google/genai/_interactions/types/__init__.py +2 -1
  3. google/genai/_interactions/types/content_delta.py +1 -1
  4. google/genai/_interactions/types/function_result_content.py +2 -1
  5. google/genai/_interactions/types/function_result_content_param.py +4 -4
  6. google/genai/_interactions/types/{interaction_event.py → interaction_complete_event.py} +3 -3
  7. google/genai/_interactions/types/interaction_create_params.py +6 -6
  8. google/genai/_interactions/types/interaction_get_params.py +3 -0
  9. google/genai/_interactions/types/interaction_sse_event.py +11 -2
  10. google/genai/_interactions/types/interaction_start_event.py +36 -0
  11. google/genai/batches.py +8 -0
  12. google/genai/files.py +15 -15
  13. google/genai/models.py +12 -0
  14. google/genai/tests/batches/test_create_with_inlined_requests.py +31 -15
  15. google/genai/tests/batches/test_get.py +1 -1
  16. google/genai/tests/client/test_client_close.py +0 -1
  17. google/genai/tests/files/test_register_table.py +1 -1
  18. google/genai/tests/models/test_generate_content.py +16 -0
  19. google/genai/tests/transformers/test_schema.py +10 -1
  20. google/genai/tests/tunings/test_tune.py +87 -0
  21. google/genai/tunings.py +163 -4
  22. google/genai/types.py +221 -14
  23. google/genai/version.py +1 -1
  24. {google_genai-1.59.0.dist-info → google_genai-1.61.0.dist-info}/METADATA +1 -1
  25. {google_genai-1.59.0.dist-info → google_genai-1.61.0.dist-info}/RECORD +28 -27
  26. {google_genai-1.59.0.dist-info → google_genai-1.61.0.dist-info}/WHEEL +1 -1
  27. {google_genai-1.59.0.dist-info → google_genai-1.61.0.dist-info}/licenses/LICENSE +0 -0
  28. {google_genai-1.59.0.dist-info → google_genai-1.61.0.dist-info}/top_level.txt +0 -0
google/genai/tunings.py CHANGED
@@ -188,6 +188,14 @@ def _CreateTuningJobConfig_to_mldev(
188
188
  if getv(from_object, ['adapter_size']) is not None:
189
189
  raise ValueError('adapter_size parameter is not supported in Gemini API.')
190
190
 
191
+ if getv(from_object, ['tuning_mode']) is not None:
192
+ raise ValueError('tuning_mode parameter is not supported in Gemini API.')
193
+
194
+ if getv(from_object, ['custom_base_model']) is not None:
195
+ raise ValueError(
196
+ 'custom_base_model parameter is not supported in Gemini API.'
197
+ )
198
+
191
199
  if getv(from_object, ['batch_size']) is not None:
192
200
  setv(
193
201
  parent_object,
@@ -213,6 +221,24 @@ def _CreateTuningJobConfig_to_mldev(
213
221
  if getv(from_object, ['beta']) is not None:
214
222
  raise ValueError('beta parameter is not supported in Gemini API.')
215
223
 
224
+ if getv(from_object, ['base_teacher_model']) is not None:
225
+ raise ValueError(
226
+ 'base_teacher_model parameter is not supported in Gemini API.'
227
+ )
228
+
229
+ if getv(from_object, ['tuned_teacher_model_source']) is not None:
230
+ raise ValueError(
231
+ 'tuned_teacher_model_source parameter is not supported in Gemini API.'
232
+ )
233
+
234
+ if getv(from_object, ['sft_loss_weight_multiplier']) is not None:
235
+ raise ValueError(
236
+ 'sft_loss_weight_multiplier parameter is not supported in Gemini API.'
237
+ )
238
+
239
+ if getv(from_object, ['output_uri']) is not None:
240
+ raise ValueError('output_uri parameter is not supported in Gemini API.')
241
+
216
242
  return to_object
217
243
 
218
244
 
@@ -246,6 +272,16 @@ def _CreateTuningJobConfig_to_vertex(
246
272
  ),
247
273
  )
248
274
 
275
+ elif discriminator == 'DISTILLATION':
276
+ if getv(from_object, ['validation_dataset']) is not None:
277
+ setv(
278
+ parent_object,
279
+ ['distillationSpec'],
280
+ _TuningValidationDataset_to_vertex(
281
+ getv(from_object, ['validation_dataset']), to_object, root_object
282
+ ),
283
+ )
284
+
249
285
  if getv(from_object, ['tuned_model_display_name']) is not None:
250
286
  setv(
251
287
  parent_object,
@@ -275,6 +311,14 @@ def _CreateTuningJobConfig_to_vertex(
275
311
  getv(from_object, ['epoch_count']),
276
312
  )
277
313
 
314
+ elif discriminator == 'DISTILLATION':
315
+ if getv(from_object, ['epoch_count']) is not None:
316
+ setv(
317
+ parent_object,
318
+ ['distillationSpec', 'hyperParameters', 'epochCount'],
319
+ getv(from_object, ['epoch_count']),
320
+ )
321
+
278
322
  discriminator = getv(root_object, ['config', 'method'])
279
323
  if discriminator is None:
280
324
  discriminator = 'SUPERVISED_FINE_TUNING'
@@ -298,6 +342,14 @@ def _CreateTuningJobConfig_to_vertex(
298
342
  getv(from_object, ['learning_rate_multiplier']),
299
343
  )
300
344
 
345
+ elif discriminator == 'DISTILLATION':
346
+ if getv(from_object, ['learning_rate_multiplier']) is not None:
347
+ setv(
348
+ parent_object,
349
+ ['distillationSpec', 'hyperParameters', 'learningRateMultiplier'],
350
+ getv(from_object, ['learning_rate_multiplier']),
351
+ )
352
+
301
353
  discriminator = getv(root_object, ['config', 'method'])
302
354
  if discriminator is None:
303
355
  discriminator = 'SUPERVISED_FINE_TUNING'
@@ -317,6 +369,14 @@ def _CreateTuningJobConfig_to_vertex(
317
369
  getv(from_object, ['export_last_checkpoint_only']),
318
370
  )
319
371
 
372
+ elif discriminator == 'DISTILLATION':
373
+ if getv(from_object, ['export_last_checkpoint_only']) is not None:
374
+ setv(
375
+ parent_object,
376
+ ['distillationSpec', 'exportLastCheckpointOnly'],
377
+ getv(from_object, ['export_last_checkpoint_only']),
378
+ )
379
+
320
380
  discriminator = getv(root_object, ['config', 'method'])
321
381
  if discriminator is None:
322
382
  discriminator = 'SUPERVISED_FINE_TUNING'
@@ -336,11 +396,53 @@ def _CreateTuningJobConfig_to_vertex(
336
396
  getv(from_object, ['adapter_size']),
337
397
  )
338
398
 
339
- if getv(from_object, ['batch_size']) is not None:
340
- raise ValueError('batch_size parameter is not supported in Vertex AI.')
399
+ elif discriminator == 'DISTILLATION':
400
+ if getv(from_object, ['adapter_size']) is not None:
401
+ setv(
402
+ parent_object,
403
+ ['distillationSpec', 'hyperParameters', 'adapterSize'],
404
+ getv(from_object, ['adapter_size']),
405
+ )
341
406
 
342
- if getv(from_object, ['learning_rate']) is not None:
343
- raise ValueError('learning_rate parameter is not supported in Vertex AI.')
407
+ discriminator = getv(root_object, ['config', 'method'])
408
+ if discriminator is None:
409
+ discriminator = 'SUPERVISED_FINE_TUNING'
410
+ if discriminator == 'SUPERVISED_FINE_TUNING':
411
+ if getv(from_object, ['tuning_mode']) is not None:
412
+ setv(
413
+ parent_object,
414
+ ['supervisedTuningSpec', 'tuningMode'],
415
+ getv(from_object, ['tuning_mode']),
416
+ )
417
+
418
+ if getv(from_object, ['custom_base_model']) is not None:
419
+ setv(
420
+ parent_object,
421
+ ['customBaseModel'],
422
+ getv(from_object, ['custom_base_model']),
423
+ )
424
+
425
+ discriminator = getv(root_object, ['config', 'method'])
426
+ if discriminator is None:
427
+ discriminator = 'SUPERVISED_FINE_TUNING'
428
+ if discriminator == 'SUPERVISED_FINE_TUNING':
429
+ if getv(from_object, ['batch_size']) is not None:
430
+ setv(
431
+ parent_object,
432
+ ['supervisedTuningSpec', 'hyperParameters', 'batchSize'],
433
+ getv(from_object, ['batch_size']),
434
+ )
435
+
436
+ discriminator = getv(root_object, ['config', 'method'])
437
+ if discriminator is None:
438
+ discriminator = 'SUPERVISED_FINE_TUNING'
439
+ if discriminator == 'SUPERVISED_FINE_TUNING':
440
+ if getv(from_object, ['learning_rate']) is not None:
441
+ setv(
442
+ parent_object,
443
+ ['supervisedTuningSpec', 'hyperParameters', 'learningRate'],
444
+ getv(from_object, ['learning_rate']),
445
+ )
344
446
 
345
447
  discriminator = getv(root_object, ['config', 'method'])
346
448
  if discriminator is None:
@@ -365,6 +467,16 @@ def _CreateTuningJobConfig_to_vertex(
365
467
  ),
366
468
  )
367
469
 
470
+ elif discriminator == 'DISTILLATION':
471
+ if getv(from_object, ['evaluation_config']) is not None:
472
+ setv(
473
+ parent_object,
474
+ ['distillationSpec', 'evaluationConfig'],
475
+ _EvaluationConfig_to_vertex(
476
+ getv(from_object, ['evaluation_config']), to_object, root_object
477
+ ),
478
+ )
479
+
368
480
  if getv(from_object, ['labels']) is not None:
369
481
  setv(parent_object, ['labels'], getv(from_object, ['labels']))
370
482
 
@@ -375,6 +487,30 @@ def _CreateTuningJobConfig_to_vertex(
375
487
  getv(from_object, ['beta']),
376
488
  )
377
489
 
490
+ if getv(from_object, ['base_teacher_model']) is not None:
491
+ setv(
492
+ parent_object,
493
+ ['distillationSpec', 'baseTeacherModel'],
494
+ getv(from_object, ['base_teacher_model']),
495
+ )
496
+
497
+ if getv(from_object, ['tuned_teacher_model_source']) is not None:
498
+ setv(
499
+ parent_object,
500
+ ['distillationSpec', 'tunedTeacherModelSource'],
501
+ getv(from_object, ['tuned_teacher_model_source']),
502
+ )
503
+
504
+ if getv(from_object, ['sft_loss_weight_multiplier']) is not None:
505
+ setv(
506
+ parent_object,
507
+ ['distillationSpec', 'hyperParameters', 'sftLossWeightMultiplier'],
508
+ getv(from_object, ['sft_loss_weight_multiplier']),
509
+ )
510
+
511
+ if getv(from_object, ['output_uri']) is not None:
512
+ setv(parent_object, ['outputUri'], getv(from_object, ['output_uri']))
513
+
378
514
  return to_object
379
515
 
380
516
 
@@ -920,6 +1056,14 @@ def _TuningDataset_to_vertex(
920
1056
  getv(from_object, ['gcs_uri']),
921
1057
  )
922
1058
 
1059
+ elif discriminator == 'DISTILLATION':
1060
+ if getv(from_object, ['gcs_uri']) is not None:
1061
+ setv(
1062
+ parent_object,
1063
+ ['distillationSpec', 'promptDatasetUri'],
1064
+ getv(from_object, ['gcs_uri']),
1065
+ )
1066
+
923
1067
  discriminator = getv(root_object, ['config', 'method'])
924
1068
  if discriminator is None:
925
1069
  discriminator = 'SUPERVISED_FINE_TUNING'
@@ -939,6 +1083,14 @@ def _TuningDataset_to_vertex(
939
1083
  getv(from_object, ['vertex_dataset_resource']),
940
1084
  )
941
1085
 
1086
+ elif discriminator == 'DISTILLATION':
1087
+ if getv(from_object, ['vertex_dataset_resource']) is not None:
1088
+ setv(
1089
+ parent_object,
1090
+ ['distillationSpec', 'promptDatasetUri'],
1091
+ getv(from_object, ['vertex_dataset_resource']),
1092
+ )
1093
+
942
1094
  if getv(from_object, ['examples']) is not None:
943
1095
  raise ValueError('examples parameter is not supported in Vertex AI.')
944
1096
 
@@ -1066,6 +1218,13 @@ def _TuningJob_from_vertex(
1066
1218
  getv(from_object, ['preferenceOptimizationSpec']),
1067
1219
  )
1068
1220
 
1221
+ if getv(from_object, ['distillationSpec']) is not None:
1222
+ setv(
1223
+ to_object,
1224
+ ['distillation_spec'],
1225
+ getv(from_object, ['distillationSpec']),
1226
+ )
1227
+
1069
1228
  if getv(from_object, ['tuningDataStats']) is not None:
1070
1229
  setv(
1071
1230
  to_object, ['tuning_data_stats'], getv(from_object, ['tuningDataStats'])
google/genai/types.py CHANGED
@@ -813,6 +813,8 @@ class TuningMethod(_common.CaseInSensitiveEnum):
813
813
  """Supervised fine tuning."""
814
814
  PREFERENCE_TUNING = 'PREFERENCE_TUNING'
815
815
  """Preference optimization tuning."""
816
+ DISTILLATION = 'DISTILLATION'
817
+ """Distillation tuning."""
816
818
 
817
819
 
818
820
  class DocumentState(_common.CaseInSensitiveEnum):
@@ -5003,6 +5005,38 @@ SpeechConfigUnion = Union[str, SpeechConfig]
5003
5005
  SpeechConfigUnionDict = Union[str, SpeechConfig, SpeechConfigDict]
5004
5006
 
5005
5007
 
5008
+ class ModelArmorConfig(_common.BaseModel):
5009
+ """Configuration for Model Armor integrations of prompt and responses.
5010
+
5011
+ This data type is not supported in Gemini API.
5012
+ """
5013
+
5014
+ prompt_template_name: Optional[str] = Field(
5015
+ default=None,
5016
+ description="""Optional. The name of the Model Armor template to use for prompt sanitization.""",
5017
+ )
5018
+ response_template_name: Optional[str] = Field(
5019
+ default=None,
5020
+ description="""Optional. The name of the Model Armor template to use for response sanitization.""",
5021
+ )
5022
+
5023
+
5024
+ class ModelArmorConfigDict(TypedDict, total=False):
5025
+ """Configuration for Model Armor integrations of prompt and responses.
5026
+
5027
+ This data type is not supported in Gemini API.
5028
+ """
5029
+
5030
+ prompt_template_name: Optional[str]
5031
+ """Optional. The name of the Model Armor template to use for prompt sanitization."""
5032
+
5033
+ response_template_name: Optional[str]
5034
+ """Optional. The name of the Model Armor template to use for response sanitization."""
5035
+
5036
+
5037
+ ModelArmorConfigOrDict = Union[ModelArmorConfig, ModelArmorConfigDict]
5038
+
5039
+
5006
5040
  class GenerateContentConfig(_common.BaseModel):
5007
5041
  """Optional model configuration parameters.
5008
5042
 
@@ -5219,6 +5253,12 @@ class GenerateContentConfig(_common.BaseModel):
5219
5253
  models. This field is not supported in Vertex AI.
5220
5254
  """,
5221
5255
  )
5256
+ model_armor_config: Optional[ModelArmorConfig] = Field(
5257
+ default=None,
5258
+ description="""Settings for prompt and response sanitization using the Model Armor
5259
+ service. If supplied, safety_settings must not be supplied.
5260
+ """,
5261
+ )
5222
5262
 
5223
5263
  @pydantic.field_validator('response_schema', mode='before')
5224
5264
  @classmethod
@@ -5429,6 +5469,11 @@ class GenerateContentConfigDict(TypedDict, total=False):
5429
5469
  models. This field is not supported in Vertex AI.
5430
5470
  """
5431
5471
 
5472
+ model_armor_config: Optional[ModelArmorConfigDict]
5473
+ """Settings for prompt and response sanitization using the Model Armor
5474
+ service. If supplied, safety_settings must not be supplied.
5475
+ """
5476
+
5432
5477
 
5433
5478
  GenerateContentConfigOrDict = Union[
5434
5479
  GenerateContentConfig, GenerateContentConfigDict
@@ -10503,6 +10548,114 @@ PreferenceOptimizationSpecOrDict = Union[
10503
10548
  ]
10504
10549
 
10505
10550
 
10551
+ class DistillationHyperParameters(_common.BaseModel):
10552
+ """Hyperparameters for Distillation.
10553
+
10554
+ This data type is not supported in Gemini API.
10555
+ """
10556
+
10557
+ adapter_size: Optional[AdapterSize] = Field(
10558
+ default=None, description="""Optional. Adapter size for distillation."""
10559
+ )
10560
+ epoch_count: Optional[int] = Field(
10561
+ default=None,
10562
+ description="""Optional. Number of complete passes the model makes over the entire training dataset during training.""",
10563
+ )
10564
+ learning_rate_multiplier: Optional[float] = Field(
10565
+ default=None,
10566
+ description="""Optional. Multiplier for adjusting the default learning rate.""",
10567
+ )
10568
+
10569
+
10570
+ class DistillationHyperParametersDict(TypedDict, total=False):
10571
+ """Hyperparameters for Distillation.
10572
+
10573
+ This data type is not supported in Gemini API.
10574
+ """
10575
+
10576
+ adapter_size: Optional[AdapterSize]
10577
+ """Optional. Adapter size for distillation."""
10578
+
10579
+ epoch_count: Optional[int]
10580
+ """Optional. Number of complete passes the model makes over the entire training dataset during training."""
10581
+
10582
+ learning_rate_multiplier: Optional[float]
10583
+ """Optional. Multiplier for adjusting the default learning rate."""
10584
+
10585
+
10586
+ DistillationHyperParametersOrDict = Union[
10587
+ DistillationHyperParameters, DistillationHyperParametersDict
10588
+ ]
10589
+
10590
+
10591
+ class DistillationSpec(_common.BaseModel):
10592
+ """Distillation tuning spec for tuning."""
10593
+
10594
+ prompt_dataset_uri: Optional[str] = Field(
10595
+ default=None,
10596
+ description="""The GCS URI of the prompt dataset to use during distillation.""",
10597
+ )
10598
+ base_teacher_model: Optional[str] = Field(
10599
+ default=None,
10600
+ description="""The base teacher model that is being distilled. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models).""",
10601
+ )
10602
+ hyper_parameters: Optional[DistillationHyperParameters] = Field(
10603
+ default=None,
10604
+ description="""Optional. Hyperparameters for Distillation.""",
10605
+ )
10606
+ pipeline_root_directory: Optional[str] = Field(
10607
+ default=None,
10608
+ description="""Deprecated. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts.""",
10609
+ )
10610
+ student_model: Optional[str] = Field(
10611
+ default=None,
10612
+ description="""The student model that is being tuned, e.g., "google/gemma-2b-1.1-it". Deprecated. Use base_model instead.""",
10613
+ )
10614
+ training_dataset_uri: Optional[str] = Field(
10615
+ default=None,
10616
+ description="""Deprecated. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
10617
+ )
10618
+ tuned_teacher_model_source: Optional[str] = Field(
10619
+ default=None,
10620
+ description="""The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`.""",
10621
+ )
10622
+ validation_dataset_uri: Optional[str] = Field(
10623
+ default=None,
10624
+ description="""Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""",
10625
+ )
10626
+
10627
+
10628
+ class DistillationSpecDict(TypedDict, total=False):
10629
+ """Distillation tuning spec for tuning."""
10630
+
10631
+ prompt_dataset_uri: Optional[str]
10632
+ """The GCS URI of the prompt dataset to use during distillation."""
10633
+
10634
+ base_teacher_model: Optional[str]
10635
+ """The base teacher model that is being distilled. See [Supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models)."""
10636
+
10637
+ hyper_parameters: Optional[DistillationHyperParametersDict]
10638
+ """Optional. Hyperparameters for Distillation."""
10639
+
10640
+ pipeline_root_directory: Optional[str]
10641
+ """Deprecated. A path in a Cloud Storage bucket, which will be treated as the root output directory of the distillation pipeline. It is used by the system to generate the paths of output artifacts."""
10642
+
10643
+ student_model: Optional[str]
10644
+ """The student model that is being tuned, e.g., "google/gemma-2b-1.1-it". Deprecated. Use base_model instead."""
10645
+
10646
+ training_dataset_uri: Optional[str]
10647
+ """Deprecated. Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
10648
+
10649
+ tuned_teacher_model_source: Optional[str]
10650
+ """The resource name of the Tuned teacher model. Format: `projects/{project}/locations/{location}/models/{model}`."""
10651
+
10652
+ validation_dataset_uri: Optional[str]
10653
+ """Optional. Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file."""
10654
+
10655
+
10656
+ DistillationSpecOrDict = Union[DistillationSpec, DistillationSpecDict]
10657
+
10658
+
10506
10659
  class GcsDestination(_common.BaseModel):
10507
10660
  """The Google Cloud Storage location where the output is to be written to."""
10508
10661
 
@@ -11699,6 +11852,9 @@ class TuningJob(_common.BaseModel):
11699
11852
  preference_optimization_spec: Optional[PreferenceOptimizationSpec] = Field(
11700
11853
  default=None, description="""Tuning Spec for Preference Optimization."""
11701
11854
  )
11855
+ distillation_spec: Optional[DistillationSpec] = Field(
11856
+ default=None, description="""Tuning Spec for Distillation."""
11857
+ )
11702
11858
  tuning_data_stats: Optional[TuningDataStats] = Field(
11703
11859
  default=None,
11704
11860
  description="""Output only. The tuning data statistics associated with this TuningJob.""",
@@ -11802,6 +11958,9 @@ class TuningJobDict(TypedDict, total=False):
11802
11958
  preference_optimization_spec: Optional[PreferenceOptimizationSpecDict]
11803
11959
  """Tuning Spec for Preference Optimization."""
11804
11960
 
11961
+ distillation_spec: Optional[DistillationSpecDict]
11962
+ """Tuning Spec for Distillation."""
11963
+
11805
11964
  tuning_data_stats: Optional[TuningDataStatsDict]
11806
11965
  """Output only. The tuning data statistics associated with this TuningJob."""
11807
11966
 
@@ -12090,7 +12249,7 @@ class CreateTuningJobConfig(_common.BaseModel):
12090
12249
  )
12091
12250
  method: Optional[TuningMethod] = Field(
12092
12251
  default=None,
12093
- description="""The method to use for tuning (SUPERVISED_FINE_TUNING or PREFERENCE_TUNING). If not set, the default method (SFT) will be used.""",
12252
+ description="""The method to use for tuning (SUPERVISED_FINE_TUNING or PREFERENCE_TUNING or DISTILLATION). If not set, the default method (SFT) will be used.""",
12094
12253
  )
12095
12254
  validation_dataset: Optional[TuningValidationDataset] = Field(
12096
12255
  default=None,
@@ -12109,7 +12268,7 @@ class CreateTuningJobConfig(_common.BaseModel):
12109
12268
  )
12110
12269
  learning_rate_multiplier: Optional[float] = Field(
12111
12270
  default=None,
12112
- description="""Multiplier for adjusting the default learning rate.""",
12271
+ description="""Multiplier for adjusting the default learning rate. 1P models only. Mutually exclusive with learning_rate.""",
12113
12272
  )
12114
12273
  export_last_checkpoint_only: Optional[bool] = Field(
12115
12274
  default=None,
@@ -12122,13 +12281,20 @@ class CreateTuningJobConfig(_common.BaseModel):
12122
12281
  adapter_size: Optional[AdapterSize] = Field(
12123
12282
  default=None, description="""Adapter size for tuning."""
12124
12283
  )
12284
+ tuning_mode: Optional[TuningMode] = Field(
12285
+ default=None, description="""Tuning mode for SFT tuning."""
12286
+ )
12287
+ custom_base_model: Optional[str] = Field(
12288
+ default=None,
12289
+ description="""Custom base model for tuning. This is only supported for OSS models in Vertex.""",
12290
+ )
12125
12291
  batch_size: Optional[int] = Field(
12126
12292
  default=None,
12127
- description="""The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples.""",
12293
+ description="""The batch size hyperparameter for tuning. This is only supported for OSS models in Vertex.""",
12128
12294
  )
12129
12295
  learning_rate: Optional[float] = Field(
12130
12296
  default=None,
12131
- description="""The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples.""",
12297
+ description="""The learning rate for tuning. OSS models only. Mutually exclusive with learning_rate_multiplier.""",
12132
12298
  )
12133
12299
  evaluation_config: Optional[EvaluationConfig] = Field(
12134
12300
  default=None, description="""Evaluation config for the tuning job."""
@@ -12141,6 +12307,22 @@ class CreateTuningJobConfig(_common.BaseModel):
12141
12307
  default=None,
12142
12308
  description="""Weight for KL Divergence regularization, Preference Optimization tuning only.""",
12143
12309
  )
12310
+ base_teacher_model: Optional[str] = Field(
12311
+ default=None,
12312
+ description="""The base teacher model that is being distilled. Distillation only.""",
12313
+ )
12314
+ tuned_teacher_model_source: Optional[str] = Field(
12315
+ default=None,
12316
+ description="""The resource name of the Tuned teacher model. Distillation only.""",
12317
+ )
12318
+ sft_loss_weight_multiplier: Optional[float] = Field(
12319
+ default=None,
12320
+ description="""Multiplier for adjusting the weight of the SFT loss. Distillation only.""",
12321
+ )
12322
+ output_uri: Optional[str] = Field(
12323
+ default=None,
12324
+ description="""The Google Cloud Storage location where the tuning job outputs are written.""",
12325
+ )
12144
12326
 
12145
12327
 
12146
12328
  class CreateTuningJobConfigDict(TypedDict, total=False):
@@ -12150,7 +12332,7 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
12150
12332
  """Used to override HTTP request options."""
12151
12333
 
12152
12334
  method: Optional[TuningMethod]
12153
- """The method to use for tuning (SUPERVISED_FINE_TUNING or PREFERENCE_TUNING). If not set, the default method (SFT) will be used."""
12335
+ """The method to use for tuning (SUPERVISED_FINE_TUNING or PREFERENCE_TUNING or DISTILLATION). If not set, the default method (SFT) will be used."""
12154
12336
 
12155
12337
  validation_dataset: Optional[TuningValidationDatasetDict]
12156
12338
  """Validation dataset for tuning. The dataset must be formatted as a JSONL file."""
@@ -12165,7 +12347,7 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
12165
12347
  """Number of complete passes the model makes over the entire training dataset during training."""
12166
12348
 
12167
12349
  learning_rate_multiplier: Optional[float]
12168
- """Multiplier for adjusting the default learning rate."""
12350
+ """Multiplier for adjusting the default learning rate. 1P models only. Mutually exclusive with learning_rate."""
12169
12351
 
12170
12352
  export_last_checkpoint_only: Optional[bool]
12171
12353
  """If set to true, disable intermediate checkpoints and only the last checkpoint will be exported. Otherwise, enable intermediate checkpoints."""
@@ -12176,11 +12358,17 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
12176
12358
  adapter_size: Optional[AdapterSize]
12177
12359
  """Adapter size for tuning."""
12178
12360
 
12361
+ tuning_mode: Optional[TuningMode]
12362
+ """Tuning mode for SFT tuning."""
12363
+
12364
+ custom_base_model: Optional[str]
12365
+ """Custom base model for tuning. This is only supported for OSS models in Vertex."""
12366
+
12179
12367
  batch_size: Optional[int]
12180
- """The batch size hyperparameter for tuning. If not set, a default of 4 or 16 will be used based on the number of training examples."""
12368
+ """The batch size hyperparameter for tuning. This is only supported for OSS models in Vertex."""
12181
12369
 
12182
12370
  learning_rate: Optional[float]
12183
- """The learning rate hyperparameter for tuning. If not set, a default of 0.001 or 0.0002 will be calculated based on the number of training examples."""
12371
+ """The learning rate for tuning. OSS models only. Mutually exclusive with learning_rate_multiplier."""
12184
12372
 
12185
12373
  evaluation_config: Optional[EvaluationConfigDict]
12186
12374
  """Evaluation config for the tuning job."""
@@ -12191,6 +12379,18 @@ class CreateTuningJobConfigDict(TypedDict, total=False):
12191
12379
  beta: Optional[float]
12192
12380
  """Weight for KL Divergence regularization, Preference Optimization tuning only."""
12193
12381
 
12382
+ base_teacher_model: Optional[str]
12383
+ """The base teacher model that is being distilled. Distillation only."""
12384
+
12385
+ tuned_teacher_model_source: Optional[str]
12386
+ """The resource name of the Tuned teacher model. Distillation only."""
12387
+
12388
+ sft_loss_weight_multiplier: Optional[float]
12389
+ """Multiplier for adjusting the weight of the SFT loss. Distillation only."""
12390
+
12391
+ output_uri: Optional[str]
12392
+ """The Google Cloud Storage location where the tuning job outputs are written."""
12393
+
12194
12394
 
12195
12395
  CreateTuningJobConfigOrDict = Union[
12196
12396
  CreateTuningJobConfig, CreateTuningJobConfigDict
@@ -13983,8 +14183,8 @@ class RegisterFilesConfigDict(TypedDict, total=False):
13983
14183
  RegisterFilesConfigOrDict = Union[RegisterFilesConfig, RegisterFilesConfigDict]
13984
14184
 
13985
14185
 
13986
- class _RegisterFilesParameters(_common.BaseModel):
13987
- """Generates the parameters for the private _Register method."""
14186
+ class _InternalRegisterFilesParameters(_common.BaseModel):
14187
+ """Parameters for the private _Register method."""
13988
14188
 
13989
14189
  uris: Optional[list[str]] = Field(
13990
14190
  default=None,
@@ -13996,8 +14196,8 @@ class _RegisterFilesParameters(_common.BaseModel):
13996
14196
  )
13997
14197
 
13998
14198
 
13999
- class _RegisterFilesParametersDict(TypedDict, total=False):
14000
- """Generates the parameters for the private _Register method."""
14199
+ class _InternalRegisterFilesParametersDict(TypedDict, total=False):
14200
+ """Parameters for the private _Register method."""
14001
14201
 
14002
14202
  uris: Optional[list[str]]
14003
14203
  """The Google Cloud Storage URIs to register. Example: `gs://bucket/object`."""
@@ -14006,8 +14206,8 @@ class _RegisterFilesParametersDict(TypedDict, total=False):
14006
14206
  """Used to override the default configuration."""
14007
14207
 
14008
14208
 
14009
- _RegisterFilesParametersOrDict = Union[
14010
- _RegisterFilesParameters, _RegisterFilesParametersDict
14209
+ _InternalRegisterFilesParametersOrDict = Union[
14210
+ _InternalRegisterFilesParameters, _InternalRegisterFilesParametersDict
14011
14211
  ]
14012
14212
 
14013
14213
 
@@ -14182,6 +14382,10 @@ class InlinedResponse(_common.BaseModel):
14182
14382
  description="""The response to the request.
14183
14383
  """,
14184
14384
  )
14385
+ metadata: Optional[dict[str, str]] = Field(
14386
+ default=None,
14387
+ description="""The metadata to be associated with the request.""",
14388
+ )
14185
14389
  error: Optional[JobError] = Field(
14186
14390
  default=None,
14187
14391
  description="""The error encountered while processing the request.
@@ -14196,6 +14400,9 @@ class InlinedResponseDict(TypedDict, total=False):
14196
14400
  """The response to the request.
14197
14401
  """
14198
14402
 
14403
+ metadata: Optional[dict[str, str]]
14404
+ """The metadata to be associated with the request."""
14405
+
14199
14406
  error: Optional[JobErrorDict]
14200
14407
  """The error encountered while processing the request.
14201
14408
  """
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.59.0' # x-release-please-version
16
+ __version__ = '1.61.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.59.0
3
+ Version: 1.61.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License-Expression: Apache-2.0