google-genai 1.47.0__py3-none-any.whl → 1.49.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -2299,6 +2299,13 @@ def _GenerationConfig_to_vertex(
2299
2299
  getv(from_object, ['model_selection_config']),
2300
2300
  )
2301
2301
 
2302
+ if getv(from_object, ['response_json_schema']) is not None:
2303
+ setv(
2304
+ to_object,
2305
+ ['responseJsonSchema'],
2306
+ getv(from_object, ['response_json_schema']),
2307
+ )
2308
+
2302
2309
  if getv(from_object, ['audio_timestamp']) is not None:
2303
2310
  setv(to_object, ['audioTimestamp'], getv(from_object, ['audio_timestamp']))
2304
2311
 
@@ -2337,13 +2344,6 @@ def _GenerationConfig_to_vertex(
2337
2344
  to_object, ['presencePenalty'], getv(from_object, ['presence_penalty'])
2338
2345
  )
2339
2346
 
2340
- if getv(from_object, ['response_json_schema']) is not None:
2341
- setv(
2342
- to_object,
2343
- ['responseJsonSchema'],
2344
- getv(from_object, ['response_json_schema']),
2345
- )
2346
-
2347
2347
  if getv(from_object, ['response_logprobs']) is not None:
2348
2348
  setv(
2349
2349
  to_object,
@@ -2462,6 +2462,11 @@ def _GoogleSearch_to_mldev(
2462
2462
  'exclude_domains parameter is not supported in Gemini API.'
2463
2463
  )
2464
2464
 
2465
+ if getv(from_object, ['blocking_confidence']) is not None:
2466
+ raise ValueError(
2467
+ 'blocking_confidence parameter is not supported in Gemini API.'
2468
+ )
2469
+
2465
2470
  if getv(from_object, ['time_range_filter']) is not None:
2466
2471
  setv(
2467
2472
  to_object, ['timeRangeFilter'], getv(from_object, ['time_range_filter'])
@@ -3316,16 +3321,12 @@ def _Tool_to_mldev(
3316
3321
  getv(from_object, ['google_search_retrieval']),
3317
3322
  )
3318
3323
 
3319
- if getv(from_object, ['google_maps']) is not None:
3320
- setv(
3321
- to_object,
3322
- ['googleMaps'],
3323
- _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object),
3324
- )
3325
-
3326
3324
  if getv(from_object, ['computer_use']) is not None:
3327
3325
  setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
3328
3326
 
3327
+ if getv(from_object, ['file_search']) is not None:
3328
+ setv(to_object, ['fileSearch'], getv(from_object, ['file_search']))
3329
+
3329
3330
  if getv(from_object, ['code_execution']) is not None:
3330
3331
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
3331
3332
 
@@ -3334,6 +3335,13 @@ def _Tool_to_mldev(
3334
3335
  'enterprise_web_search parameter is not supported in Gemini API.'
3335
3336
  )
3336
3337
 
3338
+ if getv(from_object, ['google_maps']) is not None:
3339
+ setv(
3340
+ to_object,
3341
+ ['googleMaps'],
3342
+ _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object),
3343
+ )
3344
+
3337
3345
  if getv(from_object, ['google_search']) is not None:
3338
3346
  setv(
3339
3347
  to_object,
@@ -3372,12 +3380,12 @@ def _Tool_to_vertex(
3372
3380
  getv(from_object, ['google_search_retrieval']),
3373
3381
  )
3374
3382
 
3375
- if getv(from_object, ['google_maps']) is not None:
3376
- setv(to_object, ['googleMaps'], getv(from_object, ['google_maps']))
3377
-
3378
3383
  if getv(from_object, ['computer_use']) is not None:
3379
3384
  setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
3380
3385
 
3386
+ if getv(from_object, ['file_search']) is not None:
3387
+ raise ValueError('file_search parameter is not supported in Vertex AI.')
3388
+
3381
3389
  if getv(from_object, ['code_execution']) is not None:
3382
3390
  setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
3383
3391
 
@@ -3388,6 +3396,9 @@ def _Tool_to_vertex(
3388
3396
  getv(from_object, ['enterprise_web_search']),
3389
3397
  )
3390
3398
 
3399
+ if getv(from_object, ['google_maps']) is not None:
3400
+ setv(to_object, ['googleMaps'], getv(from_object, ['google_maps']))
3401
+
3391
3402
  if getv(from_object, ['google_search']) is not None:
3392
3403
  setv(to_object, ['googleSearch'], getv(from_object, ['google_search']))
3393
3404
 
@@ -4993,6 +5004,9 @@ class Models(_api_module.BaseModule):
4993
5004
  # scones.
4994
5005
  """
4995
5006
 
5007
+ incompatible_tools_indexes = (
5008
+ _extra_utils.find_afc_incompatible_tool_indexes(config)
5009
+ )
4996
5010
  parsed_config = _extra_utils.parse_config_for_mcp_usage(config)
4997
5011
  if (
4998
5012
  parsed_config
@@ -5006,6 +5020,28 @@ class Models(_api_module.BaseModule):
5006
5020
  return self._generate_content(
5007
5021
  model=model, contents=contents, config=parsed_config
5008
5022
  )
5023
+ if incompatible_tools_indexes:
5024
+ original_tools_length = 0
5025
+ if isinstance(config, types.GenerateContentConfig):
5026
+ if config.tools:
5027
+ original_tools_length = len(config.tools)
5028
+ elif isinstance(config, dict):
5029
+ tools = config.get('tools', [])
5030
+ if tools:
5031
+ original_tools_length = len(tools)
5032
+ if len(incompatible_tools_indexes) != original_tools_length:
5033
+ indices_str = ', '.join(map(str, incompatible_tools_indexes))
5034
+ logger.warning(
5035
+ 'Tools at indices [%s] are not compatible with automatic function '
5036
+ 'calling (AFC). AFC is disabled. If AFC is intended, please '
5037
+ 'include python callables in the tool list, and do not include '
5038
+ 'function declaration in the tool list.',
5039
+ indices_str,
5040
+ )
5041
+ return self._generate_content(
5042
+ model=model, contents=contents, config=parsed_config
5043
+ )
5044
+
5009
5045
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(
5010
5046
  parsed_config
5011
5047
  )
@@ -5129,6 +5165,9 @@ class Models(_api_module.BaseModule):
5129
5165
  # scones.
5130
5166
  """
5131
5167
 
5168
+ incompatible_tools_indexes = (
5169
+ _extra_utils.find_afc_incompatible_tool_indexes(config)
5170
+ )
5132
5171
  parsed_config = _extra_utils.parse_config_for_mcp_usage(config)
5133
5172
  if (
5134
5173
  parsed_config
@@ -5144,6 +5183,27 @@ class Models(_api_module.BaseModule):
5144
5183
  )
5145
5184
  return
5146
5185
 
5186
+ if incompatible_tools_indexes:
5187
+ original_tools_length = 0
5188
+ if isinstance(config, types.GenerateContentConfig):
5189
+ if config.tools:
5190
+ original_tools_length = len(config.tools)
5191
+ elif isinstance(config, dict):
5192
+ tools = config.get('tools', [])
5193
+ if tools:
5194
+ original_tools_length = len(tools)
5195
+ if len(incompatible_tools_indexes) != original_tools_length:
5196
+ indices_str = ', '.join(map(str, incompatible_tools_indexes))
5197
+ logger.warning(
5198
+ 'Tools at indices [%s] are not compatible with automatic function '
5199
+ 'calling. AFC will be disabled.',
5200
+ indices_str,
5201
+ )
5202
+ yield from self._generate_content_stream(
5203
+ model=model, contents=contents, config=parsed_config
5204
+ )
5205
+ return
5206
+
5147
5207
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(
5148
5208
  parsed_config
5149
5209
  )
@@ -5168,7 +5228,7 @@ class Models(_api_module.BaseModule):
5168
5228
  # Yield chunks only if there's no function response parts.
5169
5229
  for chunk in response:
5170
5230
  if not function_map:
5171
- _extra_utils.append_chunk_contents(contents, chunk)
5231
+ contents = _extra_utils.append_chunk_contents(contents, chunk) # type: ignore[assignment]
5172
5232
  yield chunk
5173
5233
  else:
5174
5234
  if (
@@ -5181,7 +5241,7 @@ class Models(_api_module.BaseModule):
5181
5241
  chunk, function_map
5182
5242
  )
5183
5243
  if not func_response_parts:
5184
- _extra_utils.append_chunk_contents(contents, chunk)
5244
+ contents = _extra_utils.append_chunk_contents(contents, chunk) # type: ignore[assignment]
5185
5245
  yield chunk
5186
5246
 
5187
5247
  else:
@@ -5191,7 +5251,7 @@ class Models(_api_module.BaseModule):
5191
5251
  chunk.automatic_function_calling_history = (
5192
5252
  automatic_function_calling_history
5193
5253
  )
5194
- _extra_utils.append_chunk_contents(contents, chunk)
5254
+ contents = _extra_utils.append_chunk_contents(contents, chunk) # type: ignore[assignment]
5195
5255
  yield chunk
5196
5256
  if (
5197
5257
  chunk is None
@@ -6759,6 +6819,9 @@ class AsyncModels(_api_module.BaseModule):
6759
6819
  # J'aime les bagels.
6760
6820
  """
6761
6821
  # Retrieve and cache any MCP sessions if provided.
6822
+ incompatible_tools_indexes = (
6823
+ _extra_utils.find_afc_incompatible_tool_indexes(config)
6824
+ )
6762
6825
  parsed_config, mcp_to_genai_tool_adapters = (
6763
6826
  await _extra_utils.parse_config_for_mcp_sessions(config)
6764
6827
  )
@@ -6766,6 +6829,27 @@ class AsyncModels(_api_module.BaseModule):
6766
6829
  return await self._generate_content(
6767
6830
  model=model, contents=contents, config=parsed_config
6768
6831
  )
6832
+ if incompatible_tools_indexes:
6833
+ original_tools_length = 0
6834
+ if isinstance(config, types.GenerateContentConfig):
6835
+ if config.tools:
6836
+ original_tools_length = len(config.tools)
6837
+ elif isinstance(config, dict):
6838
+ tools = config.get('tools', [])
6839
+ if tools:
6840
+ original_tools_length = len(tools)
6841
+ if len(incompatible_tools_indexes) != original_tools_length:
6842
+ indices_str = ', '.join(map(str, incompatible_tools_indexes))
6843
+ logger.warning(
6844
+ 'Tools at indices [%s] are not compatible with automatic function '
6845
+ 'calling (AFC). AFC is disabled. If AFC is intended, please '
6846
+ 'include python callables in the tool list, and do not include '
6847
+ 'function declaration in the tool list.',
6848
+ indices_str,
6849
+ )
6850
+ return await self._generate_content(
6851
+ model=model, contents=contents, config=parsed_config
6852
+ )
6769
6853
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(
6770
6854
  parsed_config
6771
6855
  )
@@ -6890,6 +6974,10 @@ class AsyncModels(_api_module.BaseModule):
6890
6974
  # scones.
6891
6975
  """
6892
6976
 
6977
+ # Retrieve and cache any MCP sessions if provided.
6978
+ incompatible_tools_indexes = (
6979
+ _extra_utils.find_afc_incompatible_tool_indexes(config)
6980
+ )
6893
6981
  # Retrieve and cache any MCP sessions if provided.
6894
6982
  parsed_config, mcp_to_genai_tool_adapters = (
6895
6983
  await _extra_utils.parse_config_for_mcp_sessions(config)
@@ -6905,6 +6993,34 @@ class AsyncModels(_api_module.BaseModule):
6905
6993
 
6906
6994
  return base_async_generator(model, contents, parsed_config) # type: ignore[no-untyped-call, no-any-return]
6907
6995
 
6996
+ if incompatible_tools_indexes:
6997
+ original_tools_length = 0
6998
+ if isinstance(config, types.GenerateContentConfig):
6999
+ if config.tools:
7000
+ original_tools_length = len(config.tools)
7001
+ elif isinstance(config, dict):
7002
+ tools = config.get('tools', [])
7003
+ if tools:
7004
+ original_tools_length = len(tools)
7005
+ if len(incompatible_tools_indexes) != original_tools_length:
7006
+ indices_str = ', '.join(map(str, incompatible_tools_indexes))
7007
+ logger.warning(
7008
+ 'Tools at indices [%s] are not compatible with automatic function '
7009
+ 'calling (AFC). AFC is disabled. If AFC is intended, please '
7010
+ 'include python callables in the tool list, and do not include '
7011
+ 'function declaration in the tool list.',
7012
+ indices_str,
7013
+ )
7014
+ response = await self._generate_content_stream(
7015
+ model=model, contents=contents, config=parsed_config
7016
+ )
7017
+
7018
+ async def base_async_generator(model, contents, config): # type: ignore[no-untyped-def]
7019
+ async for chunk in response: # type: ignore[attr-defined]
7020
+ yield chunk
7021
+
7022
+ return base_async_generator(model, contents, parsed_config) # type: ignore[no-untyped-call, no-any-return]
7023
+
6908
7024
  async def async_generator(model, contents, config): # type: ignore[no-untyped-def]
6909
7025
  remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
6910
7026
  logger.info(
@@ -6938,7 +7054,7 @@ class AsyncModels(_api_module.BaseModule):
6938
7054
  # Yield chunks only if there's no function response parts.
6939
7055
  async for chunk in response: # type: ignore[attr-defined]
6940
7056
  if not function_map:
6941
- _extra_utils.append_chunk_contents(contents, chunk)
7057
+ contents = _extra_utils.append_chunk_contents(contents, chunk)
6942
7058
  yield chunk
6943
7059
  else:
6944
7060
  if (
@@ -6953,7 +7069,7 @@ class AsyncModels(_api_module.BaseModule):
6953
7069
  )
6954
7070
  )
6955
7071
  if not func_response_parts:
6956
- _extra_utils.append_chunk_contents(contents, chunk)
7072
+ contents = _extra_utils.append_chunk_contents(contents, chunk)
6957
7073
  yield chunk
6958
7074
 
6959
7075
  else:
@@ -6964,7 +7080,7 @@ class AsyncModels(_api_module.BaseModule):
6964
7080
  chunk.automatic_function_calling_history = (
6965
7081
  automatic_function_calling_history
6966
7082
  )
6967
- _extra_utils.append_chunk_contents(contents, chunk)
7083
+ contents = _extra_utils.append_chunk_contents(contents, chunk)
6968
7084
  yield chunk
6969
7085
  if (
6970
7086
  chunk is None
google/genai/pagers.py CHANGED
@@ -25,7 +25,13 @@ from . import types
25
25
  T = TypeVar('T')
26
26
 
27
27
  PagedItem = Literal[
28
- 'batch_jobs', 'models', 'tuning_jobs', 'files', 'cached_contents'
28
+ 'batch_jobs',
29
+ 'models',
30
+ 'tuning_jobs',
31
+ 'files',
32
+ 'cached_contents',
33
+ 'file_search_stores',
34
+ 'documents',
29
35
  ]
30
36
 
31
37
 
google/genai/tunings.py CHANGED
@@ -32,6 +32,60 @@ from .pagers import AsyncPager, Pager
32
32
  logger = logging.getLogger('google_genai.tunings')
33
33
 
34
34
 
35
+ def _AutoraterConfig_from_vertex(
36
+ from_object: Union[dict[str, Any], object],
37
+ parent_object: Optional[dict[str, Any]] = None,
38
+ root_object: Optional[Union[dict[str, Any], object]] = None,
39
+ ) -> dict[str, Any]:
40
+ to_object: dict[str, Any] = {}
41
+ if getv(from_object, ['samplingCount']) is not None:
42
+ setv(to_object, ['sampling_count'], getv(from_object, ['samplingCount']))
43
+
44
+ if getv(from_object, ['flipEnabled']) is not None:
45
+ setv(to_object, ['flip_enabled'], getv(from_object, ['flipEnabled']))
46
+
47
+ if getv(from_object, ['autoraterModel']) is not None:
48
+ setv(to_object, ['autorater_model'], getv(from_object, ['autoraterModel']))
49
+
50
+ if getv(from_object, ['generationConfig']) is not None:
51
+ setv(
52
+ to_object,
53
+ ['generation_config'],
54
+ _GenerationConfig_from_vertex(
55
+ getv(from_object, ['generationConfig']), to_object, root_object
56
+ ),
57
+ )
58
+
59
+ return to_object
60
+
61
+
62
+ def _AutoraterConfig_to_vertex(
63
+ from_object: Union[dict[str, Any], object],
64
+ parent_object: Optional[dict[str, Any]] = None,
65
+ root_object: Optional[Union[dict[str, Any], object]] = None,
66
+ ) -> dict[str, Any]:
67
+ to_object: dict[str, Any] = {}
68
+ if getv(from_object, ['sampling_count']) is not None:
69
+ setv(to_object, ['samplingCount'], getv(from_object, ['sampling_count']))
70
+
71
+ if getv(from_object, ['flip_enabled']) is not None:
72
+ setv(to_object, ['flipEnabled'], getv(from_object, ['flip_enabled']))
73
+
74
+ if getv(from_object, ['autorater_model']) is not None:
75
+ setv(to_object, ['autoraterModel'], getv(from_object, ['autorater_model']))
76
+
77
+ if getv(from_object, ['generation_config']) is not None:
78
+ setv(
79
+ to_object,
80
+ ['generationConfig'],
81
+ _GenerationConfig_to_vertex(
82
+ getv(from_object, ['generation_config']), to_object, root_object
83
+ ),
84
+ )
85
+
86
+ return to_object
87
+
88
+
35
89
  def _CancelTuningJobParameters_to_mldev(
36
90
  from_object: Union[dict[str, Any], object],
37
91
  parent_object: Optional[dict[str, Any]] = None,
@@ -360,7 +414,11 @@ def _EvaluationConfig_from_vertex(
360
414
 
361
415
  if getv(from_object, ['autoraterConfig']) is not None:
362
416
  setv(
363
- to_object, ['autorater_config'], getv(from_object, ['autoraterConfig'])
417
+ to_object,
418
+ ['autorater_config'],
419
+ _AutoraterConfig_from_vertex(
420
+ getv(from_object, ['autoraterConfig']), to_object, root_object
421
+ ),
364
422
  )
365
423
 
366
424
  return to_object
@@ -380,7 +438,240 @@ def _EvaluationConfig_to_vertex(
380
438
 
381
439
  if getv(from_object, ['autorater_config']) is not None:
382
440
  setv(
383
- to_object, ['autoraterConfig'], getv(from_object, ['autorater_config'])
441
+ to_object,
442
+ ['autoraterConfig'],
443
+ _AutoraterConfig_to_vertex(
444
+ getv(from_object, ['autorater_config']), to_object, root_object
445
+ ),
446
+ )
447
+
448
+ return to_object
449
+
450
+
451
+ def _GenerationConfig_from_vertex(
452
+ from_object: Union[dict[str, Any], object],
453
+ parent_object: Optional[dict[str, Any]] = None,
454
+ root_object: Optional[Union[dict[str, Any], object]] = None,
455
+ ) -> dict[str, Any]:
456
+ to_object: dict[str, Any] = {}
457
+ if getv(from_object, ['modelConfig']) is not None:
458
+ setv(
459
+ to_object,
460
+ ['model_selection_config'],
461
+ getv(from_object, ['modelConfig']),
462
+ )
463
+
464
+ if getv(from_object, ['responseJsonSchema']) is not None:
465
+ setv(
466
+ to_object,
467
+ ['response_json_schema'],
468
+ getv(from_object, ['responseJsonSchema']),
469
+ )
470
+
471
+ if getv(from_object, ['audioTimestamp']) is not None:
472
+ setv(to_object, ['audio_timestamp'], getv(from_object, ['audioTimestamp']))
473
+
474
+ if getv(from_object, ['candidateCount']) is not None:
475
+ setv(to_object, ['candidate_count'], getv(from_object, ['candidateCount']))
476
+
477
+ if getv(from_object, ['enableAffectiveDialog']) is not None:
478
+ setv(
479
+ to_object,
480
+ ['enable_affective_dialog'],
481
+ getv(from_object, ['enableAffectiveDialog']),
482
+ )
483
+
484
+ if getv(from_object, ['frequencyPenalty']) is not None:
485
+ setv(
486
+ to_object,
487
+ ['frequency_penalty'],
488
+ getv(from_object, ['frequencyPenalty']),
489
+ )
490
+
491
+ if getv(from_object, ['logprobs']) is not None:
492
+ setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))
493
+
494
+ if getv(from_object, ['maxOutputTokens']) is not None:
495
+ setv(
496
+ to_object, ['max_output_tokens'], getv(from_object, ['maxOutputTokens'])
497
+ )
498
+
499
+ if getv(from_object, ['mediaResolution']) is not None:
500
+ setv(
501
+ to_object, ['media_resolution'], getv(from_object, ['mediaResolution'])
502
+ )
503
+
504
+ if getv(from_object, ['presencePenalty']) is not None:
505
+ setv(
506
+ to_object, ['presence_penalty'], getv(from_object, ['presencePenalty'])
507
+ )
508
+
509
+ if getv(from_object, ['responseLogprobs']) is not None:
510
+ setv(
511
+ to_object,
512
+ ['response_logprobs'],
513
+ getv(from_object, ['responseLogprobs']),
514
+ )
515
+
516
+ if getv(from_object, ['responseMimeType']) is not None:
517
+ setv(
518
+ to_object,
519
+ ['response_mime_type'],
520
+ getv(from_object, ['responseMimeType']),
521
+ )
522
+
523
+ if getv(from_object, ['responseModalities']) is not None:
524
+ setv(
525
+ to_object,
526
+ ['response_modalities'],
527
+ getv(from_object, ['responseModalities']),
528
+ )
529
+
530
+ if getv(from_object, ['responseSchema']) is not None:
531
+ setv(to_object, ['response_schema'], getv(from_object, ['responseSchema']))
532
+
533
+ if getv(from_object, ['routingConfig']) is not None:
534
+ setv(to_object, ['routing_config'], getv(from_object, ['routingConfig']))
535
+
536
+ if getv(from_object, ['seed']) is not None:
537
+ setv(to_object, ['seed'], getv(from_object, ['seed']))
538
+
539
+ if getv(from_object, ['speechConfig']) is not None:
540
+ setv(to_object, ['speech_config'], getv(from_object, ['speechConfig']))
541
+
542
+ if getv(from_object, ['stopSequences']) is not None:
543
+ setv(to_object, ['stop_sequences'], getv(from_object, ['stopSequences']))
544
+
545
+ if getv(from_object, ['temperature']) is not None:
546
+ setv(to_object, ['temperature'], getv(from_object, ['temperature']))
547
+
548
+ if getv(from_object, ['thinkingConfig']) is not None:
549
+ setv(to_object, ['thinking_config'], getv(from_object, ['thinkingConfig']))
550
+
551
+ if getv(from_object, ['topK']) is not None:
552
+ setv(to_object, ['top_k'], getv(from_object, ['topK']))
553
+
554
+ if getv(from_object, ['topP']) is not None:
555
+ setv(to_object, ['top_p'], getv(from_object, ['topP']))
556
+
557
+ return to_object
558
+
559
+
560
+ def _GenerationConfig_to_vertex(
561
+ from_object: Union[dict[str, Any], object],
562
+ parent_object: Optional[dict[str, Any]] = None,
563
+ root_object: Optional[Union[dict[str, Any], object]] = None,
564
+ ) -> dict[str, Any]:
565
+ to_object: dict[str, Any] = {}
566
+ if getv(from_object, ['model_selection_config']) is not None:
567
+ setv(
568
+ to_object,
569
+ ['modelConfig'],
570
+ getv(from_object, ['model_selection_config']),
571
+ )
572
+
573
+ if getv(from_object, ['response_json_schema']) is not None:
574
+ setv(
575
+ to_object,
576
+ ['responseJsonSchema'],
577
+ getv(from_object, ['response_json_schema']),
578
+ )
579
+
580
+ if getv(from_object, ['audio_timestamp']) is not None:
581
+ setv(to_object, ['audioTimestamp'], getv(from_object, ['audio_timestamp']))
582
+
583
+ if getv(from_object, ['candidate_count']) is not None:
584
+ setv(to_object, ['candidateCount'], getv(from_object, ['candidate_count']))
585
+
586
+ if getv(from_object, ['enable_affective_dialog']) is not None:
587
+ setv(
588
+ to_object,
589
+ ['enableAffectiveDialog'],
590
+ getv(from_object, ['enable_affective_dialog']),
591
+ )
592
+
593
+ if getv(from_object, ['frequency_penalty']) is not None:
594
+ setv(
595
+ to_object,
596
+ ['frequencyPenalty'],
597
+ getv(from_object, ['frequency_penalty']),
598
+ )
599
+
600
+ if getv(from_object, ['logprobs']) is not None:
601
+ setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))
602
+
603
+ if getv(from_object, ['max_output_tokens']) is not None:
604
+ setv(
605
+ to_object, ['maxOutputTokens'], getv(from_object, ['max_output_tokens'])
606
+ )
607
+
608
+ if getv(from_object, ['media_resolution']) is not None:
609
+ setv(
610
+ to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
611
+ )
612
+
613
+ if getv(from_object, ['presence_penalty']) is not None:
614
+ setv(
615
+ to_object, ['presencePenalty'], getv(from_object, ['presence_penalty'])
616
+ )
617
+
618
+ if getv(from_object, ['response_logprobs']) is not None:
619
+ setv(
620
+ to_object,
621
+ ['responseLogprobs'],
622
+ getv(from_object, ['response_logprobs']),
623
+ )
624
+
625
+ if getv(from_object, ['response_mime_type']) is not None:
626
+ setv(
627
+ to_object,
628
+ ['responseMimeType'],
629
+ getv(from_object, ['response_mime_type']),
630
+ )
631
+
632
+ if getv(from_object, ['response_modalities']) is not None:
633
+ setv(
634
+ to_object,
635
+ ['responseModalities'],
636
+ getv(from_object, ['response_modalities']),
637
+ )
638
+
639
+ if getv(from_object, ['response_schema']) is not None:
640
+ setv(to_object, ['responseSchema'], getv(from_object, ['response_schema']))
641
+
642
+ if getv(from_object, ['routing_config']) is not None:
643
+ setv(to_object, ['routingConfig'], getv(from_object, ['routing_config']))
644
+
645
+ if getv(from_object, ['seed']) is not None:
646
+ setv(to_object, ['seed'], getv(from_object, ['seed']))
647
+
648
+ if getv(from_object, ['speech_config']) is not None:
649
+ setv(
650
+ to_object,
651
+ ['speechConfig'],
652
+ _SpeechConfig_to_vertex(
653
+ getv(from_object, ['speech_config']), to_object, root_object
654
+ ),
655
+ )
656
+
657
+ if getv(from_object, ['stop_sequences']) is not None:
658
+ setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences']))
659
+
660
+ if getv(from_object, ['temperature']) is not None:
661
+ setv(to_object, ['temperature'], getv(from_object, ['temperature']))
662
+
663
+ if getv(from_object, ['thinking_config']) is not None:
664
+ setv(to_object, ['thinkingConfig'], getv(from_object, ['thinking_config']))
665
+
666
+ if getv(from_object, ['top_k']) is not None:
667
+ setv(to_object, ['topK'], getv(from_object, ['top_k']))
668
+
669
+ if getv(from_object, ['top_p']) is not None:
670
+ setv(to_object, ['topP'], getv(from_object, ['top_p']))
671
+
672
+ if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
673
+ raise ValueError(
674
+ 'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
384
675
  )
385
676
 
386
677
  return to_object
@@ -542,6 +833,26 @@ def _ListTuningJobsResponse_from_vertex(
542
833
  return to_object
543
834
 
544
835
 
836
+ def _SpeechConfig_to_vertex(
837
+ from_object: Union[dict[str, Any], object],
838
+ parent_object: Optional[dict[str, Any]] = None,
839
+ root_object: Optional[Union[dict[str, Any], object]] = None,
840
+ ) -> dict[str, Any]:
841
+ to_object: dict[str, Any] = {}
842
+ if getv(from_object, ['language_code']) is not None:
843
+ setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
844
+
845
+ if getv(from_object, ['voice_config']) is not None:
846
+ setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config']))
847
+
848
+ if getv(from_object, ['multi_speaker_voice_config']) is not None:
849
+ raise ValueError(
850
+ 'multi_speaker_voice_config parameter is not supported in Vertex AI.'
851
+ )
852
+
853
+ return to_object
854
+
855
+
545
856
  def _TunedModel_from_mldev(
546
857
  from_object: Union[dict[str, Any], object],
547
858
  parent_object: Optional[dict[str, Any]] = None,