google-genai 1.41.0__py3-none-any.whl → 1.42.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +2 -1
- google/genai/_common.py +213 -77
- google/genai/_live_converters.py +720 -3119
- google/genai/_replay_api_client.py +8 -4
- google/genai/_tokens_converters.py +14 -443
- google/genai/_transformers.py +42 -12
- google/genai/batches.py +113 -1063
- google/genai/caches.py +67 -863
- google/genai/errors.py +9 -2
- google/genai/files.py +12 -171
- google/genai/live.py +10 -11
- google/genai/live_music.py +24 -27
- google/genai/models.py +322 -1835
- google/genai/operations.py +6 -32
- google/genai/tokens.py +2 -12
- google/genai/tunings.py +18 -197
- google/genai/types.py +86 -3
- google/genai/version.py +1 -1
- {google_genai-1.41.0.dist-info → google_genai-1.42.0.dist-info}/METADATA +40 -38
- google_genai-1.42.0.dist-info/RECORD +39 -0
- google_genai-1.41.0.dist-info/RECORD +0 -39
- {google_genai-1.41.0.dist-info → google_genai-1.42.0.dist-info}/WHEEL +0 -0
- {google_genai-1.41.0.dist-info → google_genai-1.42.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.41.0.dist-info → google_genai-1.42.0.dist-info}/top_level.txt +0 -0
google/genai/operations.py
CHANGED
@@ -91,26 +91,6 @@ def _GetProjectOperationParameters_to_vertex(
|
|
91
91
|
return to_object
|
92
92
|
|
93
93
|
|
94
|
-
def _ProjectOperation_from_vertex(
|
95
|
-
from_object: Union[dict[str, Any], object],
|
96
|
-
parent_object: Optional[dict[str, Any]] = None,
|
97
|
-
) -> dict[str, Any]:
|
98
|
-
to_object: dict[str, Any] = {}
|
99
|
-
if getv(from_object, ['name']) is not None:
|
100
|
-
setv(to_object, ['name'], getv(from_object, ['name']))
|
101
|
-
|
102
|
-
if getv(from_object, ['metadata']) is not None:
|
103
|
-
setv(to_object, ['metadata'], getv(from_object, ['metadata']))
|
104
|
-
|
105
|
-
if getv(from_object, ['done']) is not None:
|
106
|
-
setv(to_object, ['done'], getv(from_object, ['done']))
|
107
|
-
|
108
|
-
if getv(from_object, ['error']) is not None:
|
109
|
-
setv(to_object, ['error'], getv(from_object, ['error']))
|
110
|
-
|
111
|
-
return to_object
|
112
|
-
|
113
|
-
|
114
94
|
class Operations(_api_module.BaseModule):
|
115
95
|
|
116
96
|
def _get_videos_operation(
|
@@ -158,7 +138,7 @@ class Operations(_api_module.BaseModule):
|
|
158
138
|
|
159
139
|
response = self._api_client.request('get', path, request_dict, http_options)
|
160
140
|
|
161
|
-
response_dict =
|
141
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
162
142
|
|
163
143
|
return response_dict
|
164
144
|
|
@@ -208,7 +188,7 @@ class Operations(_api_module.BaseModule):
|
|
208
188
|
'post', path, request_dict, http_options
|
209
189
|
)
|
210
190
|
|
211
|
-
response_dict =
|
191
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
212
192
|
|
213
193
|
return response_dict
|
214
194
|
|
@@ -252,10 +232,7 @@ class Operations(_api_module.BaseModule):
|
|
252
232
|
|
253
233
|
response = self._api_client.request('get', path, request_dict, http_options)
|
254
234
|
|
255
|
-
response_dict =
|
256
|
-
|
257
|
-
if self._api_client.vertexai:
|
258
|
-
response_dict = _ProjectOperation_from_vertex(response_dict)
|
235
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
259
236
|
|
260
237
|
return_value = types.ProjectOperation._from_response(
|
261
238
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -366,7 +343,7 @@ class AsyncOperations(_api_module.BaseModule):
|
|
366
343
|
'get', path, request_dict, http_options
|
367
344
|
)
|
368
345
|
|
369
|
-
response_dict =
|
346
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
370
347
|
|
371
348
|
return response_dict
|
372
349
|
|
@@ -416,7 +393,7 @@ class AsyncOperations(_api_module.BaseModule):
|
|
416
393
|
'post', path, request_dict, http_options
|
417
394
|
)
|
418
395
|
|
419
|
-
response_dict =
|
396
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
420
397
|
|
421
398
|
return response_dict
|
422
399
|
|
@@ -462,10 +439,7 @@ class AsyncOperations(_api_module.BaseModule):
|
|
462
439
|
'get', path, request_dict, http_options
|
463
440
|
)
|
464
441
|
|
465
|
-
response_dict =
|
466
|
-
|
467
|
-
if self._api_client.vertexai:
|
468
|
-
response_dict = _ProjectOperation_from_vertex(response_dict)
|
442
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
469
443
|
|
470
444
|
return_value = types.ProjectOperation._from_response(
|
471
445
|
response=response_dict, kwargs=parameter_model.model_dump()
|
google/genai/tokens.py
CHANGED
@@ -257,12 +257,7 @@ class Tokens(_api_module.BaseModule):
|
|
257
257
|
response = self._api_client.request(
|
258
258
|
'post', path, request_dict, http_options
|
259
259
|
)
|
260
|
-
response_dict =
|
261
|
-
|
262
|
-
if not self._api_client.vertexai:
|
263
|
-
response_dict = tokens_converters._AuthToken_from_mldev(
|
264
|
-
response_dict
|
265
|
-
)
|
260
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
266
261
|
|
267
262
|
return_value = types.AuthToken._from_response(
|
268
263
|
response=response_dict, kwargs=parameter_model.model_dump()
|
@@ -358,12 +353,7 @@ class AsyncTokens(_api_module.BaseModule):
|
|
358
353
|
request_dict,
|
359
354
|
http_options=http_options,
|
360
355
|
)
|
361
|
-
response_dict =
|
362
|
-
|
363
|
-
if not self._api_client.vertexai:
|
364
|
-
response_dict = tokens_converters._AuthToken_from_mldev(
|
365
|
-
response_dict
|
366
|
-
)
|
356
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
367
357
|
|
368
358
|
return_value = types.AuthToken._from_response(
|
369
359
|
response=response_dict, kwargs=parameter_model.model_dump()
|
google/genai/tunings.py
CHANGED
@@ -32,40 +32,6 @@ from .pagers import AsyncPager, Pager
|
|
32
32
|
logger = logging.getLogger('google_genai.tunings')
|
33
33
|
|
34
34
|
|
35
|
-
def _AutoraterConfig_from_vertex(
|
36
|
-
from_object: Union[dict[str, Any], object],
|
37
|
-
parent_object: Optional[dict[str, Any]] = None,
|
38
|
-
) -> dict[str, Any]:
|
39
|
-
to_object: dict[str, Any] = {}
|
40
|
-
if getv(from_object, ['samplingCount']) is not None:
|
41
|
-
setv(to_object, ['sampling_count'], getv(from_object, ['samplingCount']))
|
42
|
-
|
43
|
-
if getv(from_object, ['flipEnabled']) is not None:
|
44
|
-
setv(to_object, ['flip_enabled'], getv(from_object, ['flipEnabled']))
|
45
|
-
|
46
|
-
if getv(from_object, ['autoraterModel']) is not None:
|
47
|
-
setv(to_object, ['autorater_model'], getv(from_object, ['autoraterModel']))
|
48
|
-
|
49
|
-
return to_object
|
50
|
-
|
51
|
-
|
52
|
-
def _AutoraterConfig_to_vertex(
|
53
|
-
from_object: Union[dict[str, Any], object],
|
54
|
-
parent_object: Optional[dict[str, Any]] = None,
|
55
|
-
) -> dict[str, Any]:
|
56
|
-
to_object: dict[str, Any] = {}
|
57
|
-
if getv(from_object, ['sampling_count']) is not None:
|
58
|
-
setv(to_object, ['samplingCount'], getv(from_object, ['sampling_count']))
|
59
|
-
|
60
|
-
if getv(from_object, ['flip_enabled']) is not None:
|
61
|
-
setv(to_object, ['flipEnabled'], getv(from_object, ['flip_enabled']))
|
62
|
-
|
63
|
-
if getv(from_object, ['autorater_model']) is not None:
|
64
|
-
setv(to_object, ['autoraterModel'], getv(from_object, ['autorater_model']))
|
65
|
-
|
66
|
-
return to_object
|
67
|
-
|
68
|
-
|
69
35
|
def _CancelTuningJobParameters_to_mldev(
|
70
36
|
from_object: Union[dict[str, Any], object],
|
71
37
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -297,21 +263,11 @@ def _EvaluationConfig_from_vertex(
|
|
297
263
|
setv(to_object, ['metrics'], t.t_metrics(getv(from_object, ['metrics'])))
|
298
264
|
|
299
265
|
if getv(from_object, ['outputConfig']) is not None:
|
300
|
-
setv(
|
301
|
-
to_object,
|
302
|
-
['output_config'],
|
303
|
-
_OutputConfig_from_vertex(
|
304
|
-
getv(from_object, ['outputConfig']), to_object
|
305
|
-
),
|
306
|
-
)
|
266
|
+
setv(to_object, ['output_config'], getv(from_object, ['outputConfig']))
|
307
267
|
|
308
268
|
if getv(from_object, ['autoraterConfig']) is not None:
|
309
269
|
setv(
|
310
|
-
to_object,
|
311
|
-
['autorater_config'],
|
312
|
-
_AutoraterConfig_from_vertex(
|
313
|
-
getv(from_object, ['autoraterConfig']), to_object
|
314
|
-
),
|
270
|
+
to_object, ['autorater_config'], getv(from_object, ['autoraterConfig'])
|
315
271
|
)
|
316
272
|
|
317
273
|
return to_object
|
@@ -326,47 +282,11 @@ def _EvaluationConfig_to_vertex(
|
|
326
282
|
setv(to_object, ['metrics'], t.t_metrics(getv(from_object, ['metrics'])))
|
327
283
|
|
328
284
|
if getv(from_object, ['output_config']) is not None:
|
329
|
-
setv(
|
330
|
-
to_object,
|
331
|
-
['outputConfig'],
|
332
|
-
_OutputConfig_to_vertex(
|
333
|
-
getv(from_object, ['output_config']), to_object
|
334
|
-
),
|
335
|
-
)
|
285
|
+
setv(to_object, ['outputConfig'], getv(from_object, ['output_config']))
|
336
286
|
|
337
287
|
if getv(from_object, ['autorater_config']) is not None:
|
338
288
|
setv(
|
339
|
-
to_object,
|
340
|
-
['autoraterConfig'],
|
341
|
-
_AutoraterConfig_to_vertex(
|
342
|
-
getv(from_object, ['autorater_config']), to_object
|
343
|
-
),
|
344
|
-
)
|
345
|
-
|
346
|
-
return to_object
|
347
|
-
|
348
|
-
|
349
|
-
def _GcsDestination_from_vertex(
|
350
|
-
from_object: Union[dict[str, Any], object],
|
351
|
-
parent_object: Optional[dict[str, Any]] = None,
|
352
|
-
) -> dict[str, Any]:
|
353
|
-
to_object: dict[str, Any] = {}
|
354
|
-
if getv(from_object, ['outputUriPrefix']) is not None:
|
355
|
-
setv(
|
356
|
-
to_object, ['output_uri_prefix'], getv(from_object, ['outputUriPrefix'])
|
357
|
-
)
|
358
|
-
|
359
|
-
return to_object
|
360
|
-
|
361
|
-
|
362
|
-
def _GcsDestination_to_vertex(
|
363
|
-
from_object: Union[dict[str, Any], object],
|
364
|
-
parent_object: Optional[dict[str, Any]] = None,
|
365
|
-
) -> dict[str, Any]:
|
366
|
-
to_object: dict[str, Any] = {}
|
367
|
-
if getv(from_object, ['output_uri_prefix']) is not None:
|
368
|
-
setv(
|
369
|
-
to_object, ['outputUriPrefix'], getv(from_object, ['output_uri_prefix'])
|
289
|
+
to_object, ['autoraterConfig'], getv(from_object, ['autorater_config'])
|
370
290
|
)
|
371
291
|
|
372
292
|
return to_object
|
@@ -516,60 +436,6 @@ def _ListTuningJobsResponse_from_vertex(
|
|
516
436
|
return to_object
|
517
437
|
|
518
438
|
|
519
|
-
def _OutputConfig_from_vertex(
|
520
|
-
from_object: Union[dict[str, Any], object],
|
521
|
-
parent_object: Optional[dict[str, Any]] = None,
|
522
|
-
) -> dict[str, Any]:
|
523
|
-
to_object: dict[str, Any] = {}
|
524
|
-
if getv(from_object, ['gcsDestination']) is not None:
|
525
|
-
setv(
|
526
|
-
to_object,
|
527
|
-
['gcs_destination'],
|
528
|
-
_GcsDestination_from_vertex(
|
529
|
-
getv(from_object, ['gcsDestination']), to_object
|
530
|
-
),
|
531
|
-
)
|
532
|
-
|
533
|
-
return to_object
|
534
|
-
|
535
|
-
|
536
|
-
def _OutputConfig_to_vertex(
|
537
|
-
from_object: Union[dict[str, Any], object],
|
538
|
-
parent_object: Optional[dict[str, Any]] = None,
|
539
|
-
) -> dict[str, Any]:
|
540
|
-
to_object: dict[str, Any] = {}
|
541
|
-
if getv(from_object, ['gcs_destination']) is not None:
|
542
|
-
setv(
|
543
|
-
to_object,
|
544
|
-
['gcsDestination'],
|
545
|
-
_GcsDestination_to_vertex(
|
546
|
-
getv(from_object, ['gcs_destination']), to_object
|
547
|
-
),
|
548
|
-
)
|
549
|
-
|
550
|
-
return to_object
|
551
|
-
|
552
|
-
|
553
|
-
def _TunedModelCheckpoint_from_vertex(
|
554
|
-
from_object: Union[dict[str, Any], object],
|
555
|
-
parent_object: Optional[dict[str, Any]] = None,
|
556
|
-
) -> dict[str, Any]:
|
557
|
-
to_object: dict[str, Any] = {}
|
558
|
-
if getv(from_object, ['checkpointId']) is not None:
|
559
|
-
setv(to_object, ['checkpoint_id'], getv(from_object, ['checkpointId']))
|
560
|
-
|
561
|
-
if getv(from_object, ['epoch']) is not None:
|
562
|
-
setv(to_object, ['epoch'], getv(from_object, ['epoch']))
|
563
|
-
|
564
|
-
if getv(from_object, ['step']) is not None:
|
565
|
-
setv(to_object, ['step'], getv(from_object, ['step']))
|
566
|
-
|
567
|
-
if getv(from_object, ['endpoint']) is not None:
|
568
|
-
setv(to_object, ['endpoint'], getv(from_object, ['endpoint']))
|
569
|
-
|
570
|
-
return to_object
|
571
|
-
|
572
|
-
|
573
439
|
def _TunedModel_from_mldev(
|
574
440
|
from_object: Union[dict[str, Any], object],
|
575
441
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -584,30 +450,6 @@ def _TunedModel_from_mldev(
|
|
584
450
|
return to_object
|
585
451
|
|
586
452
|
|
587
|
-
def _TunedModel_from_vertex(
|
588
|
-
from_object: Union[dict[str, Any], object],
|
589
|
-
parent_object: Optional[dict[str, Any]] = None,
|
590
|
-
) -> dict[str, Any]:
|
591
|
-
to_object: dict[str, Any] = {}
|
592
|
-
if getv(from_object, ['model']) is not None:
|
593
|
-
setv(to_object, ['model'], getv(from_object, ['model']))
|
594
|
-
|
595
|
-
if getv(from_object, ['endpoint']) is not None:
|
596
|
-
setv(to_object, ['endpoint'], getv(from_object, ['endpoint']))
|
597
|
-
|
598
|
-
if getv(from_object, ['checkpoints']) is not None:
|
599
|
-
setv(
|
600
|
-
to_object,
|
601
|
-
['checkpoints'],
|
602
|
-
[
|
603
|
-
_TunedModelCheckpoint_from_vertex(item, to_object)
|
604
|
-
for item in getv(from_object, ['checkpoints'])
|
605
|
-
],
|
606
|
-
)
|
607
|
-
|
608
|
-
return to_object
|
609
|
-
|
610
|
-
|
611
453
|
def _TuningDataset_to_mldev(
|
612
454
|
from_object: Union[dict[str, Any], object],
|
613
455
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -625,10 +467,7 @@ def _TuningDataset_to_mldev(
|
|
625
467
|
setv(
|
626
468
|
to_object,
|
627
469
|
['examples', 'examples'],
|
628
|
-
[
|
629
|
-
_TuningExample_to_mldev(item, to_object)
|
630
|
-
for item in getv(from_object, ['examples'])
|
631
|
-
],
|
470
|
+
[item for item in getv(from_object, ['examples'])],
|
632
471
|
)
|
633
472
|
|
634
473
|
return to_object
|
@@ -659,20 +498,6 @@ def _TuningDataset_to_vertex(
|
|
659
498
|
return to_object
|
660
499
|
|
661
500
|
|
662
|
-
def _TuningExample_to_mldev(
|
663
|
-
from_object: Union[dict[str, Any], object],
|
664
|
-
parent_object: Optional[dict[str, Any]] = None,
|
665
|
-
) -> dict[str, Any]:
|
666
|
-
to_object: dict[str, Any] = {}
|
667
|
-
if getv(from_object, ['text_input']) is not None:
|
668
|
-
setv(to_object, ['textInput'], getv(from_object, ['text_input']))
|
669
|
-
|
670
|
-
if getv(from_object, ['output']) is not None:
|
671
|
-
setv(to_object, ['output'], getv(from_object, ['output']))
|
672
|
-
|
673
|
-
return to_object
|
674
|
-
|
675
|
-
|
676
501
|
def _TuningJob_from_mldev(
|
677
502
|
from_object: Union[dict[str, Any], object],
|
678
503
|
parent_object: Optional[dict[str, Any]] = None,
|
@@ -801,11 +626,7 @@ def _TuningJob_from_vertex(
|
|
801
626
|
setv(to_object, ['base_model'], getv(from_object, ['baseModel']))
|
802
627
|
|
803
628
|
if getv(from_object, ['tunedModel']) is not None:
|
804
|
-
setv(
|
805
|
-
to_object,
|
806
|
-
['tuned_model'],
|
807
|
-
_TunedModel_from_vertex(getv(from_object, ['tunedModel']), to_object),
|
808
|
-
)
|
629
|
+
setv(to_object, ['tuned_model'], getv(from_object, ['tunedModel']))
|
809
630
|
|
810
631
|
if getv(from_object, ['preTunedModel']) is not None:
|
811
632
|
setv(to_object, ['pre_tuned_model'], getv(from_object, ['preTunedModel']))
|
@@ -973,12 +794,12 @@ class Tunings(_api_module.BaseModule):
|
|
973
794
|
|
974
795
|
response = self._api_client.request('get', path, request_dict, http_options)
|
975
796
|
|
976
|
-
response_dict =
|
797
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
977
798
|
|
978
799
|
if self._api_client.vertexai:
|
979
800
|
response_dict = _TuningJob_from_vertex(response_dict)
|
980
801
|
|
981
|
-
|
802
|
+
if not self._api_client.vertexai:
|
982
803
|
response_dict = _TuningJob_from_mldev(response_dict)
|
983
804
|
|
984
805
|
return_value = types.TuningJob._from_response(
|
@@ -1040,12 +861,12 @@ class Tunings(_api_module.BaseModule):
|
|
1040
861
|
|
1041
862
|
response = self._api_client.request('get', path, request_dict, http_options)
|
1042
863
|
|
1043
|
-
response_dict =
|
864
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1044
865
|
|
1045
866
|
if self._api_client.vertexai:
|
1046
867
|
response_dict = _ListTuningJobsResponse_from_vertex(response_dict)
|
1047
868
|
|
1048
|
-
|
869
|
+
if not self._api_client.vertexai:
|
1049
870
|
response_dict = _ListTuningJobsResponse_from_mldev(response_dict)
|
1050
871
|
|
1051
872
|
return_value = types.ListTuningJobsResponse._from_response(
|
@@ -1169,7 +990,7 @@ class Tunings(_api_module.BaseModule):
|
|
1169
990
|
'post', path, request_dict, http_options
|
1170
991
|
)
|
1171
992
|
|
1172
|
-
response_dict =
|
993
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1173
994
|
|
1174
995
|
if self._api_client.vertexai:
|
1175
996
|
response_dict = _TuningJob_from_vertex(response_dict)
|
@@ -1242,7 +1063,7 @@ class Tunings(_api_module.BaseModule):
|
|
1242
1063
|
'post', path, request_dict, http_options
|
1243
1064
|
)
|
1244
1065
|
|
1245
|
-
response_dict =
|
1066
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1246
1067
|
|
1247
1068
|
if not self._api_client.vertexai:
|
1248
1069
|
response_dict = _TuningOperation_from_mldev(response_dict)
|
@@ -1426,12 +1247,12 @@ class AsyncTunings(_api_module.BaseModule):
|
|
1426
1247
|
'get', path, request_dict, http_options
|
1427
1248
|
)
|
1428
1249
|
|
1429
|
-
response_dict =
|
1250
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1430
1251
|
|
1431
1252
|
if self._api_client.vertexai:
|
1432
1253
|
response_dict = _TuningJob_from_vertex(response_dict)
|
1433
1254
|
|
1434
|
-
|
1255
|
+
if not self._api_client.vertexai:
|
1435
1256
|
response_dict = _TuningJob_from_mldev(response_dict)
|
1436
1257
|
|
1437
1258
|
return_value = types.TuningJob._from_response(
|
@@ -1495,12 +1316,12 @@ class AsyncTunings(_api_module.BaseModule):
|
|
1495
1316
|
'get', path, request_dict, http_options
|
1496
1317
|
)
|
1497
1318
|
|
1498
|
-
response_dict =
|
1319
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1499
1320
|
|
1500
1321
|
if self._api_client.vertexai:
|
1501
1322
|
response_dict = _ListTuningJobsResponse_from_vertex(response_dict)
|
1502
1323
|
|
1503
|
-
|
1324
|
+
if not self._api_client.vertexai:
|
1504
1325
|
response_dict = _ListTuningJobsResponse_from_mldev(response_dict)
|
1505
1326
|
|
1506
1327
|
return_value = types.ListTuningJobsResponse._from_response(
|
@@ -1624,7 +1445,7 @@ class AsyncTunings(_api_module.BaseModule):
|
|
1624
1445
|
'post', path, request_dict, http_options
|
1625
1446
|
)
|
1626
1447
|
|
1627
|
-
response_dict =
|
1448
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1628
1449
|
|
1629
1450
|
if self._api_client.vertexai:
|
1630
1451
|
response_dict = _TuningJob_from_vertex(response_dict)
|
@@ -1697,7 +1518,7 @@ class AsyncTunings(_api_module.BaseModule):
|
|
1697
1518
|
'post', path, request_dict, http_options
|
1698
1519
|
)
|
1699
1520
|
|
1700
|
-
response_dict =
|
1521
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
1701
1522
|
|
1702
1523
|
if not self._api_client.vertexai:
|
1703
1524
|
response_dict = _TuningOperation_from_mldev(response_dict)
|
google/genai/types.py
CHANGED
@@ -1117,6 +1117,40 @@ class FunctionResponsePart(_common.BaseModel):
|
|
1117
1117
|
default=None, description="""Optional. URI based data."""
|
1118
1118
|
)
|
1119
1119
|
|
1120
|
+
@classmethod
|
1121
|
+
def from_bytes(cls, *, data: bytes, mime_type: str) -> 'FunctionResponsePart':
|
1122
|
+
"""Creates a FunctionResponsePart from bytes and mime type.
|
1123
|
+
|
1124
|
+
Args:
|
1125
|
+
data (bytes): The bytes of the data
|
1126
|
+
mime_type (str): mime_type: The MIME type of the data.
|
1127
|
+
"""
|
1128
|
+
inline_data = FunctionResponseBlob(
|
1129
|
+
data=data,
|
1130
|
+
mime_type=mime_type,
|
1131
|
+
)
|
1132
|
+
return cls(inline_data=inline_data)
|
1133
|
+
|
1134
|
+
@classmethod
|
1135
|
+
def from_uri(
|
1136
|
+
cls, *, file_uri: str, mime_type: Optional[str] = None
|
1137
|
+
) -> 'FunctionResponsePart':
|
1138
|
+
"""Creates a FunctionResponsePart from a file uri.
|
1139
|
+
|
1140
|
+
Args:
|
1141
|
+
file_uri (str): The uri of the file
|
1142
|
+
mime_type (str): mime_type: The MIME type of the file. If not provided,
|
1143
|
+
the MIME type will be automatically determined.
|
1144
|
+
"""
|
1145
|
+
if mime_type is None:
|
1146
|
+
import mimetypes
|
1147
|
+
|
1148
|
+
mime_type, _ = mimetypes.guess_type(file_uri)
|
1149
|
+
if not mime_type:
|
1150
|
+
raise ValueError(f'Failed to determine mime type for file: {file_uri}')
|
1151
|
+
file_data = FunctionResponseFileData(file_uri=file_uri, mime_type=mime_type)
|
1152
|
+
return cls(file_data=file_data)
|
1153
|
+
|
1120
1154
|
|
1121
1155
|
class FunctionResponsePartDict(TypedDict, total=False):
|
1122
1156
|
"""A datatype containing media that is part of a `FunctionResponse` message.
|
@@ -1274,7 +1308,7 @@ class Part(_common.BaseModel):
|
|
1274
1308
|
|
1275
1309
|
Args:
|
1276
1310
|
file_uri (str): The uri of the file
|
1277
|
-
mime_type (str): mime_type: The MIME type of the
|
1311
|
+
mime_type (str): mime_type: The MIME type of the file. If not provided,
|
1278
1312
|
the MIME type will be automatically determined.
|
1279
1313
|
"""
|
1280
1314
|
if mime_type is None:
|
@@ -1305,9 +1339,15 @@ class Part(_common.BaseModel):
|
|
1305
1339
|
|
1306
1340
|
@classmethod
|
1307
1341
|
def from_function_response(
|
1308
|
-
cls,
|
1342
|
+
cls,
|
1343
|
+
*,
|
1344
|
+
name: str,
|
1345
|
+
response: dict[str, Any],
|
1346
|
+
parts: Optional[list[FunctionResponsePart]] = None,
|
1309
1347
|
) -> 'Part':
|
1310
|
-
function_response = FunctionResponse(
|
1348
|
+
function_response = FunctionResponse(
|
1349
|
+
name=name, response=response, parts=parts
|
1350
|
+
)
|
1311
1351
|
return cls(function_response=function_response)
|
1312
1352
|
|
1313
1353
|
@classmethod
|
@@ -6259,6 +6299,10 @@ class GenerateImagesConfig(_common.BaseModel):
|
|
6259
6299
|
default=None,
|
6260
6300
|
description="""Whether to add a watermark to the generated images.""",
|
6261
6301
|
)
|
6302
|
+
labels: Optional[dict[str, str]] = Field(
|
6303
|
+
default=None,
|
6304
|
+
description="""User specified labels to track billing usage.""",
|
6305
|
+
)
|
6262
6306
|
image_size: Optional[str] = Field(
|
6263
6307
|
default=None,
|
6264
6308
|
description="""The size of the largest dimension of the generated image.
|
@@ -6324,6 +6368,9 @@ class GenerateImagesConfigDict(TypedDict, total=False):
|
|
6324
6368
|
add_watermark: Optional[bool]
|
6325
6369
|
"""Whether to add a watermark to the generated images."""
|
6326
6370
|
|
6371
|
+
labels: Optional[dict[str, str]]
|
6372
|
+
"""User specified labels to track billing usage."""
|
6373
|
+
|
6327
6374
|
image_size: Optional[str]
|
6328
6375
|
"""The size of the largest dimension of the generated image.
|
6329
6376
|
Supported sizes are 1K and 2K (not supported for Imagen 3 models)."""
|
@@ -6901,6 +6948,10 @@ class EditImageConfig(_common.BaseModel):
|
|
6901
6948
|
default=None,
|
6902
6949
|
description="""Whether to add a watermark to the generated images.""",
|
6903
6950
|
)
|
6951
|
+
labels: Optional[dict[str, str]] = Field(
|
6952
|
+
default=None,
|
6953
|
+
description="""User specified labels to track billing usage.""",
|
6954
|
+
)
|
6904
6955
|
edit_mode: Optional[EditMode] = Field(
|
6905
6956
|
default=None,
|
6906
6957
|
description="""Describes the editing mode for the request.""",
|
@@ -6967,6 +7018,9 @@ class EditImageConfigDict(TypedDict, total=False):
|
|
6967
7018
|
add_watermark: Optional[bool]
|
6968
7019
|
"""Whether to add a watermark to the generated images."""
|
6969
7020
|
|
7021
|
+
labels: Optional[dict[str, str]]
|
7022
|
+
"""User specified labels to track billing usage."""
|
7023
|
+
|
6970
7024
|
edit_mode: Optional[EditMode]
|
6971
7025
|
"""Describes the editing mode for the request."""
|
6972
7026
|
|
@@ -7082,6 +7136,10 @@ class _UpscaleImageAPIConfig(_common.BaseModel):
|
|
7082
7136
|
output image will have be more different from the input image, but
|
7083
7137
|
with finer details and less noise.""",
|
7084
7138
|
)
|
7139
|
+
labels: Optional[dict[str, str]] = Field(
|
7140
|
+
default=None,
|
7141
|
+
description="""User specified labels to track billing usage.""",
|
7142
|
+
)
|
7085
7143
|
number_of_images: Optional[int] = Field(default=None, description="""""")
|
7086
7144
|
mode: Optional[str] = Field(default=None, description="""""")
|
7087
7145
|
|
@@ -7121,6 +7179,9 @@ class _UpscaleImageAPIConfigDict(TypedDict, total=False):
|
|
7121
7179
|
output image will have be more different from the input image, but
|
7122
7180
|
with finer details and less noise."""
|
7123
7181
|
|
7182
|
+
labels: Optional[dict[str, str]]
|
7183
|
+
"""User specified labels to track billing usage."""
|
7184
|
+
|
7124
7185
|
number_of_images: Optional[int]
|
7125
7186
|
""""""
|
7126
7187
|
|
@@ -7297,6 +7358,10 @@ class RecontextImageConfig(_common.BaseModel):
|
|
7297
7358
|
enhance_prompt: Optional[bool] = Field(
|
7298
7359
|
default=None, description="""Whether to use the prompt rewriting logic."""
|
7299
7360
|
)
|
7361
|
+
labels: Optional[dict[str, str]] = Field(
|
7362
|
+
default=None,
|
7363
|
+
description="""User specified labels to track billing usage.""",
|
7364
|
+
)
|
7300
7365
|
|
7301
7366
|
|
7302
7367
|
class RecontextImageConfigDict(TypedDict, total=False):
|
@@ -7338,6 +7403,9 @@ class RecontextImageConfigDict(TypedDict, total=False):
|
|
7338
7403
|
enhance_prompt: Optional[bool]
|
7339
7404
|
"""Whether to use the prompt rewriting logic."""
|
7340
7405
|
|
7406
|
+
labels: Optional[dict[str, str]]
|
7407
|
+
"""User specified labels to track billing usage."""
|
7408
|
+
|
7341
7409
|
|
7342
7410
|
RecontextImageConfigOrDict = Union[
|
7343
7411
|
RecontextImageConfig, RecontextImageConfigDict
|
@@ -7488,6 +7556,10 @@ class SegmentImageConfig(_common.BaseModel):
|
|
7488
7556
|
can be set to a decimal value between 0 and 255 non-inclusive.
|
7489
7557
|
Set to -1 for no binary color thresholding.""",
|
7490
7558
|
)
|
7559
|
+
labels: Optional[dict[str, str]] = Field(
|
7560
|
+
default=None,
|
7561
|
+
description="""User specified labels to track billing usage.""",
|
7562
|
+
)
|
7491
7563
|
|
7492
7564
|
|
7493
7565
|
class SegmentImageConfigDict(TypedDict, total=False):
|
@@ -7518,6 +7590,9 @@ class SegmentImageConfigDict(TypedDict, total=False):
|
|
7518
7590
|
can be set to a decimal value between 0 and 255 non-inclusive.
|
7519
7591
|
Set to -1 for no binary color thresholding."""
|
7520
7592
|
|
7593
|
+
labels: Optional[dict[str, str]]
|
7594
|
+
"""User specified labels to track billing usage."""
|
7595
|
+
|
7521
7596
|
|
7522
7597
|
SegmentImageConfigOrDict = Union[SegmentImageConfig, SegmentImageConfigDict]
|
7523
7598
|
|
@@ -8980,6 +9055,7 @@ class GenerateVideosOperation(_common.BaseModel, Operation):
|
|
8980
9055
|
cls, api_response: Any, is_vertex_ai: bool = False
|
8981
9056
|
) -> Self:
|
8982
9057
|
"""Instantiates a GenerateVideosOperation from an API response."""
|
9058
|
+
|
8983
9059
|
if is_vertex_ai:
|
8984
9060
|
response_dict = _GenerateVideosOperation_from_vertex(api_response)
|
8985
9061
|
else:
|
@@ -13000,6 +13076,10 @@ class UpscaleImageConfig(_common.BaseModel):
|
|
13000
13076
|
output image will have be more different from the input image, but
|
13001
13077
|
with finer details and less noise.""",
|
13002
13078
|
)
|
13079
|
+
labels: Optional[dict[str, str]] = Field(
|
13080
|
+
default=None,
|
13081
|
+
description="""User specified labels to track billing usage.""",
|
13082
|
+
)
|
13003
13083
|
|
13004
13084
|
|
13005
13085
|
class UpscaleImageConfigDict(TypedDict, total=False):
|
@@ -13038,6 +13118,9 @@ class UpscaleImageConfigDict(TypedDict, total=False):
|
|
13038
13118
|
output image will have be more different from the input image, but
|
13039
13119
|
with finer details and less noise."""
|
13040
13120
|
|
13121
|
+
labels: Optional[dict[str, str]]
|
13122
|
+
"""User specified labels to track billing usage."""
|
13123
|
+
|
13041
13124
|
|
13042
13125
|
UpscaleImageConfigOrDict = Union[UpscaleImageConfig, UpscaleImageConfigDict]
|
13043
13126
|
|
google/genai/version.py
CHANGED