google-genai 1.2.0__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +160 -59
- google/genai/_api_module.py +6 -1
- google/genai/_automatic_function_calling_util.py +12 -12
- google/genai/_common.py +14 -2
- google/genai/_extra_utils.py +14 -8
- google/genai/_replay_api_client.py +35 -3
- google/genai/_test_api_client.py +8 -8
- google/genai/_transformers.py +169 -48
- google/genai/batches.py +176 -127
- google/genai/caches.py +315 -214
- google/genai/chats.py +179 -35
- google/genai/client.py +16 -6
- google/genai/errors.py +19 -5
- google/genai/files.py +161 -115
- google/genai/live.py +137 -105
- google/genai/models.py +1553 -734
- google/genai/operations.py +635 -0
- google/genai/pagers.py +5 -5
- google/genai/tunings.py +166 -103
- google/genai/types.py +590 -142
- google/genai/version.py +1 -1
- {google_genai-1.2.0.dist-info → google_genai-1.4.0.dist-info}/METADATA +94 -12
- google_genai-1.4.0.dist-info/RECORD +27 -0
- {google_genai-1.2.0.dist-info → google_genai-1.4.0.dist-info}/WHEEL +1 -1
- google/genai/_operations.py +0 -365
- google_genai-1.2.0.dist-info/RECORD +0 -27
- {google_genai-1.2.0.dist-info → google_genai-1.4.0.dist-info}/LICENSE +0 -0
- {google_genai-1.2.0.dist-info → google_genai-1.4.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -23,18 +23,20 @@ from . import _common
|
|
23
23
|
from . import _extra_utils
|
24
24
|
from . import _transformers as t
|
25
25
|
from . import types
|
26
|
-
from ._api_client import
|
26
|
+
from ._api_client import BaseApiClient
|
27
27
|
from ._common import get_value_by_path as getv
|
28
28
|
from ._common import set_value_by_path as setv
|
29
29
|
from .pagers import AsyncPager, Pager
|
30
30
|
|
31
|
+
logger = logging.getLogger('google_genai.models')
|
32
|
+
|
31
33
|
|
32
34
|
def _Part_to_mldev(
|
33
|
-
api_client:
|
35
|
+
api_client: BaseApiClient,
|
34
36
|
from_object: Union[dict, object],
|
35
|
-
parent_object: dict = None,
|
37
|
+
parent_object: Optional[dict] = None,
|
36
38
|
) -> dict:
|
37
|
-
to_object = {}
|
39
|
+
to_object: dict[str, Any] = {}
|
38
40
|
if getv(from_object, ['video_metadata']) is not None:
|
39
41
|
raise ValueError('video_metadata parameter is not supported in Gemini API.')
|
40
42
|
|
@@ -74,11 +76,11 @@ def _Part_to_mldev(
|
|
74
76
|
|
75
77
|
|
76
78
|
def _Part_to_vertex(
|
77
|
-
api_client:
|
79
|
+
api_client: BaseApiClient,
|
78
80
|
from_object: Union[dict, object],
|
79
|
-
parent_object: dict = None,
|
81
|
+
parent_object: Optional[dict] = None,
|
80
82
|
) -> dict:
|
81
|
-
to_object = {}
|
83
|
+
to_object: dict[str, Any] = {}
|
82
84
|
if getv(from_object, ['video_metadata']) is not None:
|
83
85
|
setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata']))
|
84
86
|
|
@@ -118,11 +120,11 @@ def _Part_to_vertex(
|
|
118
120
|
|
119
121
|
|
120
122
|
def _Content_to_mldev(
|
121
|
-
api_client:
|
123
|
+
api_client: BaseApiClient,
|
122
124
|
from_object: Union[dict, object],
|
123
|
-
parent_object: dict = None,
|
125
|
+
parent_object: Optional[dict] = None,
|
124
126
|
) -> dict:
|
125
|
-
to_object = {}
|
127
|
+
to_object: dict[str, Any] = {}
|
126
128
|
if getv(from_object, ['parts']) is not None:
|
127
129
|
setv(
|
128
130
|
to_object,
|
@@ -140,11 +142,11 @@ def _Content_to_mldev(
|
|
140
142
|
|
141
143
|
|
142
144
|
def _Content_to_vertex(
|
143
|
-
api_client:
|
145
|
+
api_client: BaseApiClient,
|
144
146
|
from_object: Union[dict, object],
|
145
|
-
parent_object: dict = None,
|
147
|
+
parent_object: Optional[dict] = None,
|
146
148
|
) -> dict:
|
147
|
-
to_object = {}
|
149
|
+
to_object: dict[str, Any] = {}
|
148
150
|
if getv(from_object, ['parts']) is not None:
|
149
151
|
setv(
|
150
152
|
to_object,
|
@@ -162,24 +164,14 @@ def _Content_to_vertex(
|
|
162
164
|
|
163
165
|
|
164
166
|
def _Schema_to_mldev(
|
165
|
-
api_client:
|
167
|
+
api_client: BaseApiClient,
|
166
168
|
from_object: Union[dict, object],
|
167
|
-
parent_object: dict = None,
|
169
|
+
parent_object: Optional[dict] = None,
|
168
170
|
) -> dict:
|
169
|
-
to_object = {}
|
170
|
-
if getv(from_object, ['min_items']) is not None:
|
171
|
-
raise ValueError('min_items parameter is not supported in Gemini API.')
|
172
|
-
|
171
|
+
to_object: dict[str, Any] = {}
|
173
172
|
if getv(from_object, ['example']) is not None:
|
174
173
|
raise ValueError('example parameter is not supported in Gemini API.')
|
175
174
|
|
176
|
-
if getv(from_object, ['property_ordering']) is not None:
|
177
|
-
setv(
|
178
|
-
to_object,
|
179
|
-
['propertyOrdering'],
|
180
|
-
getv(from_object, ['property_ordering']),
|
181
|
-
)
|
182
|
-
|
183
175
|
if getv(from_object, ['pattern']) is not None:
|
184
176
|
raise ValueError('pattern parameter is not supported in Gemini API.')
|
185
177
|
|
@@ -204,21 +196,12 @@ def _Schema_to_mldev(
|
|
204
196
|
if getv(from_object, ['min_properties']) is not None:
|
205
197
|
raise ValueError('min_properties parameter is not supported in Gemini API.')
|
206
198
|
|
207
|
-
if getv(from_object, ['max_items']) is not None:
|
208
|
-
raise ValueError('max_items parameter is not supported in Gemini API.')
|
209
|
-
|
210
199
|
if getv(from_object, ['maximum']) is not None:
|
211
200
|
raise ValueError('maximum parameter is not supported in Gemini API.')
|
212
201
|
|
213
|
-
if getv(from_object, ['nullable']) is not None:
|
214
|
-
raise ValueError('nullable parameter is not supported in Gemini API.')
|
215
|
-
|
216
202
|
if getv(from_object, ['max_properties']) is not None:
|
217
203
|
raise ValueError('max_properties parameter is not supported in Gemini API.')
|
218
204
|
|
219
|
-
if getv(from_object, ['type']) is not None:
|
220
|
-
setv(to_object, ['type'], getv(from_object, ['type']))
|
221
|
-
|
222
205
|
if getv(from_object, ['description']) is not None:
|
223
206
|
setv(to_object, ['description'], getv(from_object, ['description']))
|
224
207
|
|
@@ -231,34 +214,43 @@ def _Schema_to_mldev(
|
|
231
214
|
if getv(from_object, ['items']) is not None:
|
232
215
|
setv(to_object, ['items'], getv(from_object, ['items']))
|
233
216
|
|
217
|
+
if getv(from_object, ['max_items']) is not None:
|
218
|
+
setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
|
219
|
+
|
220
|
+
if getv(from_object, ['min_items']) is not None:
|
221
|
+
setv(to_object, ['minItems'], getv(from_object, ['min_items']))
|
222
|
+
|
223
|
+
if getv(from_object, ['nullable']) is not None:
|
224
|
+
setv(to_object, ['nullable'], getv(from_object, ['nullable']))
|
225
|
+
|
234
226
|
if getv(from_object, ['properties']) is not None:
|
235
227
|
setv(to_object, ['properties'], getv(from_object, ['properties']))
|
236
228
|
|
229
|
+
if getv(from_object, ['property_ordering']) is not None:
|
230
|
+
setv(
|
231
|
+
to_object,
|
232
|
+
['propertyOrdering'],
|
233
|
+
getv(from_object, ['property_ordering']),
|
234
|
+
)
|
235
|
+
|
237
236
|
if getv(from_object, ['required']) is not None:
|
238
237
|
setv(to_object, ['required'], getv(from_object, ['required']))
|
239
238
|
|
239
|
+
if getv(from_object, ['type']) is not None:
|
240
|
+
setv(to_object, ['type'], getv(from_object, ['type']))
|
241
|
+
|
240
242
|
return to_object
|
241
243
|
|
242
244
|
|
243
245
|
def _Schema_to_vertex(
|
244
|
-
api_client:
|
246
|
+
api_client: BaseApiClient,
|
245
247
|
from_object: Union[dict, object],
|
246
|
-
parent_object: dict = None,
|
248
|
+
parent_object: Optional[dict] = None,
|
247
249
|
) -> dict:
|
248
|
-
to_object = {}
|
249
|
-
if getv(from_object, ['min_items']) is not None:
|
250
|
-
setv(to_object, ['minItems'], getv(from_object, ['min_items']))
|
251
|
-
|
250
|
+
to_object: dict[str, Any] = {}
|
252
251
|
if getv(from_object, ['example']) is not None:
|
253
252
|
setv(to_object, ['example'], getv(from_object, ['example']))
|
254
253
|
|
255
|
-
if getv(from_object, ['property_ordering']) is not None:
|
256
|
-
setv(
|
257
|
-
to_object,
|
258
|
-
['propertyOrdering'],
|
259
|
-
getv(from_object, ['property_ordering']),
|
260
|
-
)
|
261
|
-
|
262
254
|
if getv(from_object, ['pattern']) is not None:
|
263
255
|
setv(to_object, ['pattern'], getv(from_object, ['pattern']))
|
264
256
|
|
@@ -283,21 +275,12 @@ def _Schema_to_vertex(
|
|
283
275
|
if getv(from_object, ['min_properties']) is not None:
|
284
276
|
setv(to_object, ['minProperties'], getv(from_object, ['min_properties']))
|
285
277
|
|
286
|
-
if getv(from_object, ['max_items']) is not None:
|
287
|
-
setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
|
288
|
-
|
289
278
|
if getv(from_object, ['maximum']) is not None:
|
290
279
|
setv(to_object, ['maximum'], getv(from_object, ['maximum']))
|
291
280
|
|
292
|
-
if getv(from_object, ['nullable']) is not None:
|
293
|
-
setv(to_object, ['nullable'], getv(from_object, ['nullable']))
|
294
|
-
|
295
281
|
if getv(from_object, ['max_properties']) is not None:
|
296
282
|
setv(to_object, ['maxProperties'], getv(from_object, ['max_properties']))
|
297
283
|
|
298
|
-
if getv(from_object, ['type']) is not None:
|
299
|
-
setv(to_object, ['type'], getv(from_object, ['type']))
|
300
|
-
|
301
284
|
if getv(from_object, ['description']) is not None:
|
302
285
|
setv(to_object, ['description'], getv(from_object, ['description']))
|
303
286
|
|
@@ -310,21 +293,40 @@ def _Schema_to_vertex(
|
|
310
293
|
if getv(from_object, ['items']) is not None:
|
311
294
|
setv(to_object, ['items'], getv(from_object, ['items']))
|
312
295
|
|
296
|
+
if getv(from_object, ['max_items']) is not None:
|
297
|
+
setv(to_object, ['maxItems'], getv(from_object, ['max_items']))
|
298
|
+
|
299
|
+
if getv(from_object, ['min_items']) is not None:
|
300
|
+
setv(to_object, ['minItems'], getv(from_object, ['min_items']))
|
301
|
+
|
302
|
+
if getv(from_object, ['nullable']) is not None:
|
303
|
+
setv(to_object, ['nullable'], getv(from_object, ['nullable']))
|
304
|
+
|
313
305
|
if getv(from_object, ['properties']) is not None:
|
314
306
|
setv(to_object, ['properties'], getv(from_object, ['properties']))
|
315
307
|
|
308
|
+
if getv(from_object, ['property_ordering']) is not None:
|
309
|
+
setv(
|
310
|
+
to_object,
|
311
|
+
['propertyOrdering'],
|
312
|
+
getv(from_object, ['property_ordering']),
|
313
|
+
)
|
314
|
+
|
316
315
|
if getv(from_object, ['required']) is not None:
|
317
316
|
setv(to_object, ['required'], getv(from_object, ['required']))
|
318
317
|
|
318
|
+
if getv(from_object, ['type']) is not None:
|
319
|
+
setv(to_object, ['type'], getv(from_object, ['type']))
|
320
|
+
|
319
321
|
return to_object
|
320
322
|
|
321
323
|
|
322
324
|
def _SafetySetting_to_mldev(
|
323
|
-
api_client:
|
325
|
+
api_client: BaseApiClient,
|
324
326
|
from_object: Union[dict, object],
|
325
|
-
parent_object: dict = None,
|
327
|
+
parent_object: Optional[dict] = None,
|
326
328
|
) -> dict:
|
327
|
-
to_object = {}
|
329
|
+
to_object: dict[str, Any] = {}
|
328
330
|
if getv(from_object, ['method']) is not None:
|
329
331
|
raise ValueError('method parameter is not supported in Gemini API.')
|
330
332
|
|
@@ -338,11 +340,11 @@ def _SafetySetting_to_mldev(
|
|
338
340
|
|
339
341
|
|
340
342
|
def _SafetySetting_to_vertex(
|
341
|
-
api_client:
|
343
|
+
api_client: BaseApiClient,
|
342
344
|
from_object: Union[dict, object],
|
343
|
-
parent_object: dict = None,
|
345
|
+
parent_object: Optional[dict] = None,
|
344
346
|
) -> dict:
|
345
|
-
to_object = {}
|
347
|
+
to_object: dict[str, Any] = {}
|
346
348
|
if getv(from_object, ['method']) is not None:
|
347
349
|
setv(to_object, ['method'], getv(from_object, ['method']))
|
348
350
|
|
@@ -356,11 +358,11 @@ def _SafetySetting_to_vertex(
|
|
356
358
|
|
357
359
|
|
358
360
|
def _FunctionDeclaration_to_mldev(
|
359
|
-
api_client:
|
361
|
+
api_client: BaseApiClient,
|
360
362
|
from_object: Union[dict, object],
|
361
|
-
parent_object: dict = None,
|
363
|
+
parent_object: Optional[dict] = None,
|
362
364
|
) -> dict:
|
363
|
-
to_object = {}
|
365
|
+
to_object: dict[str, Any] = {}
|
364
366
|
if getv(from_object, ['response']) is not None:
|
365
367
|
raise ValueError('response parameter is not supported in Gemini API.')
|
366
368
|
|
@@ -377,11 +379,11 @@ def _FunctionDeclaration_to_mldev(
|
|
377
379
|
|
378
380
|
|
379
381
|
def _FunctionDeclaration_to_vertex(
|
380
|
-
api_client:
|
382
|
+
api_client: BaseApiClient,
|
381
383
|
from_object: Union[dict, object],
|
382
|
-
parent_object: dict = None,
|
384
|
+
parent_object: Optional[dict] = None,
|
383
385
|
) -> dict:
|
384
|
-
to_object = {}
|
386
|
+
to_object: dict[str, Any] = {}
|
385
387
|
if getv(from_object, ['response']) is not None:
|
386
388
|
setv(
|
387
389
|
to_object,
|
@@ -404,31 +406,31 @@ def _FunctionDeclaration_to_vertex(
|
|
404
406
|
|
405
407
|
|
406
408
|
def _GoogleSearch_to_mldev(
|
407
|
-
api_client:
|
409
|
+
api_client: BaseApiClient,
|
408
410
|
from_object: Union[dict, object],
|
409
|
-
parent_object: dict = None,
|
411
|
+
parent_object: Optional[dict] = None,
|
410
412
|
) -> dict:
|
411
|
-
to_object = {}
|
413
|
+
to_object: dict[str, Any] = {}
|
412
414
|
|
413
415
|
return to_object
|
414
416
|
|
415
417
|
|
416
418
|
def _GoogleSearch_to_vertex(
|
417
|
-
api_client:
|
419
|
+
api_client: BaseApiClient,
|
418
420
|
from_object: Union[dict, object],
|
419
|
-
parent_object: dict = None,
|
421
|
+
parent_object: Optional[dict] = None,
|
420
422
|
) -> dict:
|
421
|
-
to_object = {}
|
423
|
+
to_object: dict[str, Any] = {}
|
422
424
|
|
423
425
|
return to_object
|
424
426
|
|
425
427
|
|
426
428
|
def _DynamicRetrievalConfig_to_mldev(
|
427
|
-
api_client:
|
429
|
+
api_client: BaseApiClient,
|
428
430
|
from_object: Union[dict, object],
|
429
|
-
parent_object: dict = None,
|
431
|
+
parent_object: Optional[dict] = None,
|
430
432
|
) -> dict:
|
431
|
-
to_object = {}
|
433
|
+
to_object: dict[str, Any] = {}
|
432
434
|
if getv(from_object, ['mode']) is not None:
|
433
435
|
setv(to_object, ['mode'], getv(from_object, ['mode']))
|
434
436
|
|
@@ -443,11 +445,11 @@ def _DynamicRetrievalConfig_to_mldev(
|
|
443
445
|
|
444
446
|
|
445
447
|
def _DynamicRetrievalConfig_to_vertex(
|
446
|
-
api_client:
|
448
|
+
api_client: BaseApiClient,
|
447
449
|
from_object: Union[dict, object],
|
448
|
-
parent_object: dict = None,
|
450
|
+
parent_object: Optional[dict] = None,
|
449
451
|
) -> dict:
|
450
|
-
to_object = {}
|
452
|
+
to_object: dict[str, Any] = {}
|
451
453
|
if getv(from_object, ['mode']) is not None:
|
452
454
|
setv(to_object, ['mode'], getv(from_object, ['mode']))
|
453
455
|
|
@@ -462,11 +464,11 @@ def _DynamicRetrievalConfig_to_vertex(
|
|
462
464
|
|
463
465
|
|
464
466
|
def _GoogleSearchRetrieval_to_mldev(
|
465
|
-
api_client:
|
467
|
+
api_client: BaseApiClient,
|
466
468
|
from_object: Union[dict, object],
|
467
|
-
parent_object: dict = None,
|
469
|
+
parent_object: Optional[dict] = None,
|
468
470
|
) -> dict:
|
469
|
-
to_object = {}
|
471
|
+
to_object: dict[str, Any] = {}
|
470
472
|
if getv(from_object, ['dynamic_retrieval_config']) is not None:
|
471
473
|
setv(
|
472
474
|
to_object,
|
@@ -482,11 +484,11 @@ def _GoogleSearchRetrieval_to_mldev(
|
|
482
484
|
|
483
485
|
|
484
486
|
def _GoogleSearchRetrieval_to_vertex(
|
485
|
-
api_client:
|
487
|
+
api_client: BaseApiClient,
|
486
488
|
from_object: Union[dict, object],
|
487
|
-
parent_object: dict = None,
|
489
|
+
parent_object: Optional[dict] = None,
|
488
490
|
) -> dict:
|
489
|
-
to_object = {}
|
491
|
+
to_object: dict[str, Any] = {}
|
490
492
|
if getv(from_object, ['dynamic_retrieval_config']) is not None:
|
491
493
|
setv(
|
492
494
|
to_object,
|
@@ -502,11 +504,11 @@ def _GoogleSearchRetrieval_to_vertex(
|
|
502
504
|
|
503
505
|
|
504
506
|
def _Tool_to_mldev(
|
505
|
-
api_client:
|
507
|
+
api_client: BaseApiClient,
|
506
508
|
from_object: Union[dict, object],
|
507
|
-
parent_object: dict = None,
|
509
|
+
parent_object: Optional[dict] = None,
|
508
510
|
) -> dict:
|
509
|
-
to_object = {}
|
511
|
+
to_object: dict[str, Any] = {}
|
510
512
|
if getv(from_object, ['function_declarations']) is not None:
|
511
513
|
setv(
|
512
514
|
to_object,
|
@@ -547,11 +549,11 @@ def _Tool_to_mldev(
|
|
547
549
|
|
548
550
|
|
549
551
|
def _Tool_to_vertex(
|
550
|
-
api_client:
|
552
|
+
api_client: BaseApiClient,
|
551
553
|
from_object: Union[dict, object],
|
552
|
-
parent_object: dict = None,
|
554
|
+
parent_object: Optional[dict] = None,
|
553
555
|
) -> dict:
|
554
|
-
to_object = {}
|
556
|
+
to_object: dict[str, Any] = {}
|
555
557
|
if getv(from_object, ['function_declarations']) is not None:
|
556
558
|
setv(
|
557
559
|
to_object,
|
@@ -592,11 +594,11 @@ def _Tool_to_vertex(
|
|
592
594
|
|
593
595
|
|
594
596
|
def _FunctionCallingConfig_to_mldev(
|
595
|
-
api_client:
|
597
|
+
api_client: BaseApiClient,
|
596
598
|
from_object: Union[dict, object],
|
597
|
-
parent_object: dict = None,
|
599
|
+
parent_object: Optional[dict] = None,
|
598
600
|
) -> dict:
|
599
|
-
to_object = {}
|
601
|
+
to_object: dict[str, Any] = {}
|
600
602
|
if getv(from_object, ['mode']) is not None:
|
601
603
|
setv(to_object, ['mode'], getv(from_object, ['mode']))
|
602
604
|
|
@@ -611,11 +613,11 @@ def _FunctionCallingConfig_to_mldev(
|
|
611
613
|
|
612
614
|
|
613
615
|
def _FunctionCallingConfig_to_vertex(
|
614
|
-
api_client:
|
616
|
+
api_client: BaseApiClient,
|
615
617
|
from_object: Union[dict, object],
|
616
|
-
parent_object: dict = None,
|
618
|
+
parent_object: Optional[dict] = None,
|
617
619
|
) -> dict:
|
618
|
-
to_object = {}
|
620
|
+
to_object: dict[str, Any] = {}
|
619
621
|
if getv(from_object, ['mode']) is not None:
|
620
622
|
setv(to_object, ['mode'], getv(from_object, ['mode']))
|
621
623
|
|
@@ -630,11 +632,11 @@ def _FunctionCallingConfig_to_vertex(
|
|
630
632
|
|
631
633
|
|
632
634
|
def _ToolConfig_to_mldev(
|
633
|
-
api_client:
|
635
|
+
api_client: BaseApiClient,
|
634
636
|
from_object: Union[dict, object],
|
635
|
-
parent_object: dict = None,
|
637
|
+
parent_object: Optional[dict] = None,
|
636
638
|
) -> dict:
|
637
|
-
to_object = {}
|
639
|
+
to_object: dict[str, Any] = {}
|
638
640
|
if getv(from_object, ['function_calling_config']) is not None:
|
639
641
|
setv(
|
640
642
|
to_object,
|
@@ -650,11 +652,11 @@ def _ToolConfig_to_mldev(
|
|
650
652
|
|
651
653
|
|
652
654
|
def _ToolConfig_to_vertex(
|
653
|
-
api_client:
|
655
|
+
api_client: BaseApiClient,
|
654
656
|
from_object: Union[dict, object],
|
655
|
-
parent_object: dict = None,
|
657
|
+
parent_object: Optional[dict] = None,
|
656
658
|
) -> dict:
|
657
|
-
to_object = {}
|
659
|
+
to_object: dict[str, Any] = {}
|
658
660
|
if getv(from_object, ['function_calling_config']) is not None:
|
659
661
|
setv(
|
660
662
|
to_object,
|
@@ -670,11 +672,11 @@ def _ToolConfig_to_vertex(
|
|
670
672
|
|
671
673
|
|
672
674
|
def _PrebuiltVoiceConfig_to_mldev(
|
673
|
-
api_client:
|
675
|
+
api_client: BaseApiClient,
|
674
676
|
from_object: Union[dict, object],
|
675
|
-
parent_object: dict = None,
|
677
|
+
parent_object: Optional[dict] = None,
|
676
678
|
) -> dict:
|
677
|
-
to_object = {}
|
679
|
+
to_object: dict[str, Any] = {}
|
678
680
|
if getv(from_object, ['voice_name']) is not None:
|
679
681
|
setv(to_object, ['voiceName'], getv(from_object, ['voice_name']))
|
680
682
|
|
@@ -682,11 +684,11 @@ def _PrebuiltVoiceConfig_to_mldev(
|
|
682
684
|
|
683
685
|
|
684
686
|
def _PrebuiltVoiceConfig_to_vertex(
|
685
|
-
api_client:
|
687
|
+
api_client: BaseApiClient,
|
686
688
|
from_object: Union[dict, object],
|
687
|
-
parent_object: dict = None,
|
689
|
+
parent_object: Optional[dict] = None,
|
688
690
|
) -> dict:
|
689
|
-
to_object = {}
|
691
|
+
to_object: dict[str, Any] = {}
|
690
692
|
if getv(from_object, ['voice_name']) is not None:
|
691
693
|
setv(to_object, ['voiceName'], getv(from_object, ['voice_name']))
|
692
694
|
|
@@ -694,11 +696,11 @@ def _PrebuiltVoiceConfig_to_vertex(
|
|
694
696
|
|
695
697
|
|
696
698
|
def _VoiceConfig_to_mldev(
|
697
|
-
api_client:
|
699
|
+
api_client: BaseApiClient,
|
698
700
|
from_object: Union[dict, object],
|
699
|
-
parent_object: dict = None,
|
701
|
+
parent_object: Optional[dict] = None,
|
700
702
|
) -> dict:
|
701
|
-
to_object = {}
|
703
|
+
to_object: dict[str, Any] = {}
|
702
704
|
if getv(from_object, ['prebuilt_voice_config']) is not None:
|
703
705
|
setv(
|
704
706
|
to_object,
|
@@ -712,11 +714,11 @@ def _VoiceConfig_to_mldev(
|
|
712
714
|
|
713
715
|
|
714
716
|
def _VoiceConfig_to_vertex(
|
715
|
-
api_client:
|
717
|
+
api_client: BaseApiClient,
|
716
718
|
from_object: Union[dict, object],
|
717
|
-
parent_object: dict = None,
|
719
|
+
parent_object: Optional[dict] = None,
|
718
720
|
) -> dict:
|
719
|
-
to_object = {}
|
721
|
+
to_object: dict[str, Any] = {}
|
720
722
|
if getv(from_object, ['prebuilt_voice_config']) is not None:
|
721
723
|
setv(
|
722
724
|
to_object,
|
@@ -730,11 +732,11 @@ def _VoiceConfig_to_vertex(
|
|
730
732
|
|
731
733
|
|
732
734
|
def _SpeechConfig_to_mldev(
|
733
|
-
api_client:
|
735
|
+
api_client: BaseApiClient,
|
734
736
|
from_object: Union[dict, object],
|
735
|
-
parent_object: dict = None,
|
737
|
+
parent_object: Optional[dict] = None,
|
736
738
|
) -> dict:
|
737
|
-
to_object = {}
|
739
|
+
to_object: dict[str, Any] = {}
|
738
740
|
if getv(from_object, ['voice_config']) is not None:
|
739
741
|
setv(
|
740
742
|
to_object,
|
@@ -748,11 +750,11 @@ def _SpeechConfig_to_mldev(
|
|
748
750
|
|
749
751
|
|
750
752
|
def _SpeechConfig_to_vertex(
|
751
|
-
api_client:
|
753
|
+
api_client: BaseApiClient,
|
752
754
|
from_object: Union[dict, object],
|
753
|
-
parent_object: dict = None,
|
755
|
+
parent_object: Optional[dict] = None,
|
754
756
|
) -> dict:
|
755
|
-
to_object = {}
|
757
|
+
to_object: dict[str, Any] = {}
|
756
758
|
if getv(from_object, ['voice_config']) is not None:
|
757
759
|
setv(
|
758
760
|
to_object,
|
@@ -766,11 +768,11 @@ def _SpeechConfig_to_vertex(
|
|
766
768
|
|
767
769
|
|
768
770
|
def _ThinkingConfig_to_mldev(
|
769
|
-
api_client:
|
771
|
+
api_client: BaseApiClient,
|
770
772
|
from_object: Union[dict, object],
|
771
|
-
parent_object: dict = None,
|
773
|
+
parent_object: Optional[dict] = None,
|
772
774
|
) -> dict:
|
773
|
-
to_object = {}
|
775
|
+
to_object: dict[str, Any] = {}
|
774
776
|
if getv(from_object, ['include_thoughts']) is not None:
|
775
777
|
setv(
|
776
778
|
to_object, ['includeThoughts'], getv(from_object, ['include_thoughts'])
|
@@ -780,11 +782,11 @@ def _ThinkingConfig_to_mldev(
|
|
780
782
|
|
781
783
|
|
782
784
|
def _ThinkingConfig_to_vertex(
|
783
|
-
api_client:
|
785
|
+
api_client: BaseApiClient,
|
784
786
|
from_object: Union[dict, object],
|
785
|
-
parent_object: dict = None,
|
787
|
+
parent_object: Optional[dict] = None,
|
786
788
|
) -> dict:
|
787
|
-
to_object = {}
|
789
|
+
to_object: dict[str, Any] = {}
|
788
790
|
if getv(from_object, ['include_thoughts']) is not None:
|
789
791
|
setv(
|
790
792
|
to_object, ['includeThoughts'], getv(from_object, ['include_thoughts'])
|
@@ -794,11 +796,11 @@ def _ThinkingConfig_to_vertex(
|
|
794
796
|
|
795
797
|
|
796
798
|
def _GenerateContentConfig_to_mldev(
|
797
|
-
api_client:
|
799
|
+
api_client: BaseApiClient,
|
798
800
|
from_object: Union[dict, object],
|
799
|
-
parent_object: dict = None,
|
801
|
+
parent_object: Optional[dict] = None,
|
800
802
|
) -> dict:
|
801
|
-
to_object = {}
|
803
|
+
to_object: dict[str, Any] = {}
|
802
804
|
|
803
805
|
if getv(from_object, ['system_instruction']) is not None:
|
804
806
|
setv(
|
@@ -959,11 +961,11 @@ def _GenerateContentConfig_to_mldev(
|
|
959
961
|
|
960
962
|
|
961
963
|
def _GenerateContentConfig_to_vertex(
|
962
|
-
api_client:
|
964
|
+
api_client: BaseApiClient,
|
963
965
|
from_object: Union[dict, object],
|
964
|
-
parent_object: dict = None,
|
966
|
+
parent_object: Optional[dict] = None,
|
965
967
|
) -> dict:
|
966
|
-
to_object = {}
|
968
|
+
to_object: dict[str, Any] = {}
|
967
969
|
|
968
970
|
if getv(from_object, ['system_instruction']) is not None:
|
969
971
|
setv(
|
@@ -1122,11 +1124,11 @@ def _GenerateContentConfig_to_vertex(
|
|
1122
1124
|
|
1123
1125
|
|
1124
1126
|
def _GenerateContentParameters_to_mldev(
|
1125
|
-
api_client:
|
1127
|
+
api_client: BaseApiClient,
|
1126
1128
|
from_object: Union[dict, object],
|
1127
|
-
parent_object: dict = None,
|
1129
|
+
parent_object: Optional[dict] = None,
|
1128
1130
|
) -> dict:
|
1129
|
-
to_object = {}
|
1131
|
+
to_object: dict[str, Any] = {}
|
1130
1132
|
if getv(from_object, ['model']) is not None:
|
1131
1133
|
setv(
|
1132
1134
|
to_object,
|
@@ -1159,11 +1161,11 @@ def _GenerateContentParameters_to_mldev(
|
|
1159
1161
|
|
1160
1162
|
|
1161
1163
|
def _GenerateContentParameters_to_vertex(
|
1162
|
-
api_client:
|
1164
|
+
api_client: BaseApiClient,
|
1163
1165
|
from_object: Union[dict, object],
|
1164
|
-
parent_object: dict = None,
|
1166
|
+
parent_object: Optional[dict] = None,
|
1165
1167
|
) -> dict:
|
1166
|
-
to_object = {}
|
1168
|
+
to_object: dict[str, Any] = {}
|
1167
1169
|
if getv(from_object, ['model']) is not None:
|
1168
1170
|
setv(
|
1169
1171
|
to_object,
|
@@ -1196,11 +1198,11 @@ def _GenerateContentParameters_to_vertex(
|
|
1196
1198
|
|
1197
1199
|
|
1198
1200
|
def _EmbedContentConfig_to_mldev(
|
1199
|
-
api_client:
|
1201
|
+
api_client: BaseApiClient,
|
1200
1202
|
from_object: Union[dict, object],
|
1201
|
-
parent_object: dict = None,
|
1203
|
+
parent_object: Optional[dict] = None,
|
1202
1204
|
) -> dict:
|
1203
|
-
to_object = {}
|
1205
|
+
to_object: dict[str, Any] = {}
|
1204
1206
|
|
1205
1207
|
if getv(from_object, ['task_type']) is not None:
|
1206
1208
|
setv(
|
@@ -1229,11 +1231,11 @@ def _EmbedContentConfig_to_mldev(
|
|
1229
1231
|
|
1230
1232
|
|
1231
1233
|
def _EmbedContentConfig_to_vertex(
|
1232
|
-
api_client:
|
1234
|
+
api_client: BaseApiClient,
|
1233
1235
|
from_object: Union[dict, object],
|
1234
|
-
parent_object: dict = None,
|
1236
|
+
parent_object: Optional[dict] = None,
|
1235
1237
|
) -> dict:
|
1236
|
-
to_object = {}
|
1238
|
+
to_object: dict[str, Any] = {}
|
1237
1239
|
|
1238
1240
|
if getv(from_object, ['task_type']) is not None:
|
1239
1241
|
setv(
|
@@ -1270,11 +1272,11 @@ def _EmbedContentConfig_to_vertex(
|
|
1270
1272
|
|
1271
1273
|
|
1272
1274
|
def _EmbedContentParameters_to_mldev(
|
1273
|
-
api_client:
|
1275
|
+
api_client: BaseApiClient,
|
1274
1276
|
from_object: Union[dict, object],
|
1275
|
-
parent_object: dict = None,
|
1277
|
+
parent_object: Optional[dict] = None,
|
1276
1278
|
) -> dict:
|
1277
|
-
to_object = {}
|
1279
|
+
to_object: dict[str, Any] = {}
|
1278
1280
|
if getv(from_object, ['model']) is not None:
|
1279
1281
|
setv(
|
1280
1282
|
to_object,
|
@@ -1307,11 +1309,11 @@ def _EmbedContentParameters_to_mldev(
|
|
1307
1309
|
|
1308
1310
|
|
1309
1311
|
def _EmbedContentParameters_to_vertex(
|
1310
|
-
api_client:
|
1312
|
+
api_client: BaseApiClient,
|
1311
1313
|
from_object: Union[dict, object],
|
1312
|
-
parent_object: dict = None,
|
1314
|
+
parent_object: Optional[dict] = None,
|
1313
1315
|
) -> dict:
|
1314
|
-
to_object = {}
|
1316
|
+
to_object: dict[str, Any] = {}
|
1315
1317
|
if getv(from_object, ['model']) is not None:
|
1316
1318
|
setv(
|
1317
1319
|
to_object,
|
@@ -1339,11 +1341,11 @@ def _EmbedContentParameters_to_vertex(
|
|
1339
1341
|
|
1340
1342
|
|
1341
1343
|
def _GenerateImagesConfig_to_mldev(
|
1342
|
-
api_client:
|
1344
|
+
api_client: BaseApiClient,
|
1343
1345
|
from_object: Union[dict, object],
|
1344
|
-
parent_object: dict = None,
|
1346
|
+
parent_object: Optional[dict] = None,
|
1345
1347
|
) -> dict:
|
1346
|
-
to_object = {}
|
1348
|
+
to_object: dict[str, Any] = {}
|
1347
1349
|
|
1348
1350
|
if getv(from_object, ['output_gcs_uri']) is not None:
|
1349
1351
|
raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
|
@@ -1362,6 +1364,13 @@ def _GenerateImagesConfig_to_mldev(
|
|
1362
1364
|
getv(from_object, ['number_of_images']),
|
1363
1365
|
)
|
1364
1366
|
|
1367
|
+
if getv(from_object, ['aspect_ratio']) is not None:
|
1368
|
+
setv(
|
1369
|
+
parent_object,
|
1370
|
+
['parameters', 'aspectRatio'],
|
1371
|
+
getv(from_object, ['aspect_ratio']),
|
1372
|
+
)
|
1373
|
+
|
1365
1374
|
if getv(from_object, ['guidance_scale']) is not None:
|
1366
1375
|
setv(
|
1367
1376
|
parent_object,
|
@@ -1430,13 +1439,6 @@ def _GenerateImagesConfig_to_mldev(
|
|
1430
1439
|
if getv(from_object, ['add_watermark']) is not None:
|
1431
1440
|
raise ValueError('add_watermark parameter is not supported in Gemini API.')
|
1432
1441
|
|
1433
|
-
if getv(from_object, ['aspect_ratio']) is not None:
|
1434
|
-
setv(
|
1435
|
-
parent_object,
|
1436
|
-
['parameters', 'aspectRatio'],
|
1437
|
-
getv(from_object, ['aspect_ratio']),
|
1438
|
-
)
|
1439
|
-
|
1440
1442
|
if getv(from_object, ['enhance_prompt']) is not None:
|
1441
1443
|
raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
|
1442
1444
|
|
@@ -1444,11 +1446,11 @@ def _GenerateImagesConfig_to_mldev(
|
|
1444
1446
|
|
1445
1447
|
|
1446
1448
|
def _GenerateImagesConfig_to_vertex(
|
1447
|
-
api_client:
|
1449
|
+
api_client: BaseApiClient,
|
1448
1450
|
from_object: Union[dict, object],
|
1449
|
-
parent_object: dict = None,
|
1451
|
+
parent_object: Optional[dict] = None,
|
1450
1452
|
) -> dict:
|
1451
|
-
to_object = {}
|
1453
|
+
to_object: dict[str, Any] = {}
|
1452
1454
|
|
1453
1455
|
if getv(from_object, ['output_gcs_uri']) is not None:
|
1454
1456
|
setv(
|
@@ -1471,6 +1473,13 @@ def _GenerateImagesConfig_to_vertex(
|
|
1471
1473
|
getv(from_object, ['number_of_images']),
|
1472
1474
|
)
|
1473
1475
|
|
1476
|
+
if getv(from_object, ['aspect_ratio']) is not None:
|
1477
|
+
setv(
|
1478
|
+
parent_object,
|
1479
|
+
['parameters', 'aspectRatio'],
|
1480
|
+
getv(from_object, ['aspect_ratio']),
|
1481
|
+
)
|
1482
|
+
|
1474
1483
|
if getv(from_object, ['guidance_scale']) is not None:
|
1475
1484
|
setv(
|
1476
1485
|
parent_object,
|
@@ -1537,13 +1546,6 @@ def _GenerateImagesConfig_to_vertex(
|
|
1537
1546
|
getv(from_object, ['add_watermark']),
|
1538
1547
|
)
|
1539
1548
|
|
1540
|
-
if getv(from_object, ['aspect_ratio']) is not None:
|
1541
|
-
setv(
|
1542
|
-
parent_object,
|
1543
|
-
['parameters', 'aspectRatio'],
|
1544
|
-
getv(from_object, ['aspect_ratio']),
|
1545
|
-
)
|
1546
|
-
|
1547
1549
|
if getv(from_object, ['enhance_prompt']) is not None:
|
1548
1550
|
setv(
|
1549
1551
|
parent_object,
|
@@ -1555,11 +1557,11 @@ def _GenerateImagesConfig_to_vertex(
|
|
1555
1557
|
|
1556
1558
|
|
1557
1559
|
def _GenerateImagesParameters_to_mldev(
|
1558
|
-
api_client:
|
1560
|
+
api_client: BaseApiClient,
|
1559
1561
|
from_object: Union[dict, object],
|
1560
|
-
parent_object: dict = None,
|
1562
|
+
parent_object: Optional[dict] = None,
|
1561
1563
|
) -> dict:
|
1562
|
-
to_object = {}
|
1564
|
+
to_object: dict[str, Any] = {}
|
1563
1565
|
if getv(from_object, ['model']) is not None:
|
1564
1566
|
setv(
|
1565
1567
|
to_object,
|
@@ -1568,7 +1570,7 @@ def _GenerateImagesParameters_to_mldev(
|
|
1568
1570
|
)
|
1569
1571
|
|
1570
1572
|
if getv(from_object, ['prompt']) is not None:
|
1571
|
-
setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt']))
|
1573
|
+
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
1572
1574
|
|
1573
1575
|
if getv(from_object, ['config']) is not None:
|
1574
1576
|
setv(
|
@@ -1583,11 +1585,11 @@ def _GenerateImagesParameters_to_mldev(
|
|
1583
1585
|
|
1584
1586
|
|
1585
1587
|
def _GenerateImagesParameters_to_vertex(
|
1586
|
-
api_client:
|
1588
|
+
api_client: BaseApiClient,
|
1587
1589
|
from_object: Union[dict, object],
|
1588
|
-
parent_object: dict = None,
|
1590
|
+
parent_object: Optional[dict] = None,
|
1589
1591
|
) -> dict:
|
1590
|
-
to_object = {}
|
1592
|
+
to_object: dict[str, Any] = {}
|
1591
1593
|
if getv(from_object, ['model']) is not None:
|
1592
1594
|
setv(
|
1593
1595
|
to_object,
|
@@ -1596,7 +1598,7 @@ def _GenerateImagesParameters_to_vertex(
|
|
1596
1598
|
)
|
1597
1599
|
|
1598
1600
|
if getv(from_object, ['prompt']) is not None:
|
1599
|
-
setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt']))
|
1601
|
+
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
1600
1602
|
|
1601
1603
|
if getv(from_object, ['config']) is not None:
|
1602
1604
|
setv(
|
@@ -1611,11 +1613,11 @@ def _GenerateImagesParameters_to_vertex(
|
|
1611
1613
|
|
1612
1614
|
|
1613
1615
|
def _Image_to_mldev(
|
1614
|
-
api_client:
|
1616
|
+
api_client: BaseApiClient,
|
1615
1617
|
from_object: Union[dict, object],
|
1616
|
-
parent_object: dict = None,
|
1618
|
+
parent_object: Optional[dict] = None,
|
1617
1619
|
) -> dict:
|
1618
|
-
to_object = {}
|
1620
|
+
to_object: dict[str, Any] = {}
|
1619
1621
|
if getv(from_object, ['gcs_uri']) is not None:
|
1620
1622
|
raise ValueError('gcs_uri parameter is not supported in Gemini API.')
|
1621
1623
|
|
@@ -1633,11 +1635,11 @@ def _Image_to_mldev(
|
|
1633
1635
|
|
1634
1636
|
|
1635
1637
|
def _Image_to_vertex(
|
1636
|
-
api_client:
|
1638
|
+
api_client: BaseApiClient,
|
1637
1639
|
from_object: Union[dict, object],
|
1638
|
-
parent_object: dict = None,
|
1640
|
+
parent_object: Optional[dict] = None,
|
1639
1641
|
) -> dict:
|
1640
|
-
to_object = {}
|
1642
|
+
to_object: dict[str, Any] = {}
|
1641
1643
|
if getv(from_object, ['gcs_uri']) is not None:
|
1642
1644
|
setv(to_object, ['gcsUri'], getv(from_object, ['gcs_uri']))
|
1643
1645
|
|
@@ -1655,11 +1657,11 @@ def _Image_to_vertex(
|
|
1655
1657
|
|
1656
1658
|
|
1657
1659
|
def _MaskReferenceConfig_to_mldev(
|
1658
|
-
api_client:
|
1660
|
+
api_client: BaseApiClient,
|
1659
1661
|
from_object: Union[dict, object],
|
1660
|
-
parent_object: dict = None,
|
1662
|
+
parent_object: Optional[dict] = None,
|
1661
1663
|
) -> dict:
|
1662
|
-
to_object = {}
|
1664
|
+
to_object: dict[str, Any] = {}
|
1663
1665
|
if getv(from_object, ['mask_mode']) is not None:
|
1664
1666
|
raise ValueError('mask_mode parameter is not supported in Gemini API.')
|
1665
1667
|
|
@@ -1675,11 +1677,11 @@ def _MaskReferenceConfig_to_mldev(
|
|
1675
1677
|
|
1676
1678
|
|
1677
1679
|
def _MaskReferenceConfig_to_vertex(
|
1678
|
-
api_client:
|
1680
|
+
api_client: BaseApiClient,
|
1679
1681
|
from_object: Union[dict, object],
|
1680
|
-
parent_object: dict = None,
|
1682
|
+
parent_object: Optional[dict] = None,
|
1681
1683
|
) -> dict:
|
1682
|
-
to_object = {}
|
1684
|
+
to_object: dict[str, Any] = {}
|
1683
1685
|
if getv(from_object, ['mask_mode']) is not None:
|
1684
1686
|
setv(to_object, ['maskMode'], getv(from_object, ['mask_mode']))
|
1685
1687
|
|
@@ -1695,11 +1697,11 @@ def _MaskReferenceConfig_to_vertex(
|
|
1695
1697
|
|
1696
1698
|
|
1697
1699
|
def _ControlReferenceConfig_to_mldev(
|
1698
|
-
api_client:
|
1700
|
+
api_client: BaseApiClient,
|
1699
1701
|
from_object: Union[dict, object],
|
1700
|
-
parent_object: dict = None,
|
1702
|
+
parent_object: Optional[dict] = None,
|
1701
1703
|
) -> dict:
|
1702
|
-
to_object = {}
|
1704
|
+
to_object: dict[str, Any] = {}
|
1703
1705
|
if getv(from_object, ['control_type']) is not None:
|
1704
1706
|
raise ValueError('control_type parameter is not supported in Gemini API.')
|
1705
1707
|
|
@@ -1713,11 +1715,11 @@ def _ControlReferenceConfig_to_mldev(
|
|
1713
1715
|
|
1714
1716
|
|
1715
1717
|
def _ControlReferenceConfig_to_vertex(
|
1716
|
-
api_client:
|
1718
|
+
api_client: BaseApiClient,
|
1717
1719
|
from_object: Union[dict, object],
|
1718
|
-
parent_object: dict = None,
|
1720
|
+
parent_object: Optional[dict] = None,
|
1719
1721
|
) -> dict:
|
1720
|
-
to_object = {}
|
1722
|
+
to_object: dict[str, Any] = {}
|
1721
1723
|
if getv(from_object, ['control_type']) is not None:
|
1722
1724
|
setv(to_object, ['controlType'], getv(from_object, ['control_type']))
|
1723
1725
|
|
@@ -1732,11 +1734,11 @@ def _ControlReferenceConfig_to_vertex(
|
|
1732
1734
|
|
1733
1735
|
|
1734
1736
|
def _StyleReferenceConfig_to_mldev(
|
1735
|
-
api_client:
|
1737
|
+
api_client: BaseApiClient,
|
1736
1738
|
from_object: Union[dict, object],
|
1737
|
-
parent_object: dict = None,
|
1739
|
+
parent_object: Optional[dict] = None,
|
1738
1740
|
) -> dict:
|
1739
|
-
to_object = {}
|
1741
|
+
to_object: dict[str, Any] = {}
|
1740
1742
|
if getv(from_object, ['style_description']) is not None:
|
1741
1743
|
raise ValueError(
|
1742
1744
|
'style_description parameter is not supported in Gemini API.'
|
@@ -1746,11 +1748,11 @@ def _StyleReferenceConfig_to_mldev(
|
|
1746
1748
|
|
1747
1749
|
|
1748
1750
|
def _StyleReferenceConfig_to_vertex(
|
1749
|
-
api_client:
|
1751
|
+
api_client: BaseApiClient,
|
1750
1752
|
from_object: Union[dict, object],
|
1751
|
-
parent_object: dict = None,
|
1753
|
+
parent_object: Optional[dict] = None,
|
1752
1754
|
) -> dict:
|
1753
|
-
to_object = {}
|
1755
|
+
to_object: dict[str, Any] = {}
|
1754
1756
|
if getv(from_object, ['style_description']) is not None:
|
1755
1757
|
setv(
|
1756
1758
|
to_object,
|
@@ -1762,11 +1764,11 @@ def _StyleReferenceConfig_to_vertex(
|
|
1762
1764
|
|
1763
1765
|
|
1764
1766
|
def _SubjectReferenceConfig_to_mldev(
|
1765
|
-
api_client:
|
1767
|
+
api_client: BaseApiClient,
|
1766
1768
|
from_object: Union[dict, object],
|
1767
|
-
parent_object: dict = None,
|
1769
|
+
parent_object: Optional[dict] = None,
|
1768
1770
|
) -> dict:
|
1769
|
-
to_object = {}
|
1771
|
+
to_object: dict[str, Any] = {}
|
1770
1772
|
if getv(from_object, ['subject_type']) is not None:
|
1771
1773
|
raise ValueError('subject_type parameter is not supported in Gemini API.')
|
1772
1774
|
|
@@ -1779,11 +1781,11 @@ def _SubjectReferenceConfig_to_mldev(
|
|
1779
1781
|
|
1780
1782
|
|
1781
1783
|
def _SubjectReferenceConfig_to_vertex(
|
1782
|
-
api_client:
|
1784
|
+
api_client: BaseApiClient,
|
1783
1785
|
from_object: Union[dict, object],
|
1784
|
-
parent_object: dict = None,
|
1786
|
+
parent_object: Optional[dict] = None,
|
1785
1787
|
) -> dict:
|
1786
|
-
to_object = {}
|
1788
|
+
to_object: dict[str, Any] = {}
|
1787
1789
|
if getv(from_object, ['subject_type']) is not None:
|
1788
1790
|
setv(to_object, ['subjectType'], getv(from_object, ['subject_type']))
|
1789
1791
|
|
@@ -1798,11 +1800,11 @@ def _SubjectReferenceConfig_to_vertex(
|
|
1798
1800
|
|
1799
1801
|
|
1800
1802
|
def _ReferenceImageAPI_to_mldev(
|
1801
|
-
api_client:
|
1803
|
+
api_client: BaseApiClient,
|
1802
1804
|
from_object: Union[dict, object],
|
1803
|
-
parent_object: dict = None,
|
1805
|
+
parent_object: Optional[dict] = None,
|
1804
1806
|
) -> dict:
|
1805
|
-
to_object = {}
|
1807
|
+
to_object: dict[str, Any] = {}
|
1806
1808
|
if getv(from_object, ['reference_image']) is not None:
|
1807
1809
|
raise ValueError(
|
1808
1810
|
'reference_image parameter is not supported in Gemini API.'
|
@@ -1838,11 +1840,11 @@ def _ReferenceImageAPI_to_mldev(
|
|
1838
1840
|
|
1839
1841
|
|
1840
1842
|
def _ReferenceImageAPI_to_vertex(
|
1841
|
-
api_client:
|
1843
|
+
api_client: BaseApiClient,
|
1842
1844
|
from_object: Union[dict, object],
|
1843
|
-
parent_object: dict = None,
|
1845
|
+
parent_object: Optional[dict] = None,
|
1844
1846
|
) -> dict:
|
1845
|
-
to_object = {}
|
1847
|
+
to_object: dict[str, Any] = {}
|
1846
1848
|
if getv(from_object, ['reference_image']) is not None:
|
1847
1849
|
setv(
|
1848
1850
|
to_object,
|
@@ -1898,11 +1900,11 @@ def _ReferenceImageAPI_to_vertex(
|
|
1898
1900
|
|
1899
1901
|
|
1900
1902
|
def _EditImageConfig_to_mldev(
|
1901
|
-
api_client:
|
1903
|
+
api_client: BaseApiClient,
|
1902
1904
|
from_object: Union[dict, object],
|
1903
|
-
parent_object: dict = None,
|
1905
|
+
parent_object: Optional[dict] = None,
|
1904
1906
|
) -> dict:
|
1905
|
-
to_object = {}
|
1907
|
+
to_object: dict[str, Any] = {}
|
1906
1908
|
|
1907
1909
|
if getv(from_object, ['output_gcs_uri']) is not None:
|
1908
1910
|
raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
|
@@ -1921,6 +1923,13 @@ def _EditImageConfig_to_mldev(
|
|
1921
1923
|
getv(from_object, ['number_of_images']),
|
1922
1924
|
)
|
1923
1925
|
|
1926
|
+
if getv(from_object, ['aspect_ratio']) is not None:
|
1927
|
+
setv(
|
1928
|
+
parent_object,
|
1929
|
+
['parameters', 'aspectRatio'],
|
1930
|
+
getv(from_object, ['aspect_ratio']),
|
1931
|
+
)
|
1932
|
+
|
1924
1933
|
if getv(from_object, ['guidance_scale']) is not None:
|
1925
1934
|
setv(
|
1926
1935
|
parent_object,
|
@@ -1998,11 +2007,11 @@ def _EditImageConfig_to_mldev(
|
|
1998
2007
|
|
1999
2008
|
|
2000
2009
|
def _EditImageConfig_to_vertex(
|
2001
|
-
api_client:
|
2010
|
+
api_client: BaseApiClient,
|
2002
2011
|
from_object: Union[dict, object],
|
2003
|
-
parent_object: dict = None,
|
2012
|
+
parent_object: Optional[dict] = None,
|
2004
2013
|
) -> dict:
|
2005
|
-
to_object = {}
|
2014
|
+
to_object: dict[str, Any] = {}
|
2006
2015
|
|
2007
2016
|
if getv(from_object, ['output_gcs_uri']) is not None:
|
2008
2017
|
setv(
|
@@ -2025,6 +2034,13 @@ def _EditImageConfig_to_vertex(
|
|
2025
2034
|
getv(from_object, ['number_of_images']),
|
2026
2035
|
)
|
2027
2036
|
|
2037
|
+
if getv(from_object, ['aspect_ratio']) is not None:
|
2038
|
+
setv(
|
2039
|
+
parent_object,
|
2040
|
+
['parameters', 'aspectRatio'],
|
2041
|
+
getv(from_object, ['aspect_ratio']),
|
2042
|
+
)
|
2043
|
+
|
2028
2044
|
if getv(from_object, ['guidance_scale']) is not None:
|
2029
2045
|
setv(
|
2030
2046
|
parent_object,
|
@@ -2095,11 +2111,11 @@ def _EditImageConfig_to_vertex(
|
|
2095
2111
|
|
2096
2112
|
|
2097
2113
|
def _EditImageParameters_to_mldev(
|
2098
|
-
api_client:
|
2114
|
+
api_client: BaseApiClient,
|
2099
2115
|
from_object: Union[dict, object],
|
2100
|
-
parent_object: dict = None,
|
2116
|
+
parent_object: Optional[dict] = None,
|
2101
2117
|
) -> dict:
|
2102
|
-
to_object = {}
|
2118
|
+
to_object: dict[str, Any] = {}
|
2103
2119
|
if getv(from_object, ['model']) is not None:
|
2104
2120
|
setv(
|
2105
2121
|
to_object,
|
@@ -2108,12 +2124,12 @@ def _EditImageParameters_to_mldev(
|
|
2108
2124
|
)
|
2109
2125
|
|
2110
2126
|
if getv(from_object, ['prompt']) is not None:
|
2111
|
-
setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt']))
|
2127
|
+
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
2112
2128
|
|
2113
2129
|
if getv(from_object, ['reference_images']) is not None:
|
2114
2130
|
setv(
|
2115
2131
|
to_object,
|
2116
|
-
['instances', 'referenceImages'],
|
2132
|
+
['instances[0]', 'referenceImages'],
|
2117
2133
|
[
|
2118
2134
|
_ReferenceImageAPI_to_mldev(api_client, item, to_object)
|
2119
2135
|
for item in getv(from_object, ['reference_images'])
|
@@ -2133,11 +2149,11 @@ def _EditImageParameters_to_mldev(
|
|
2133
2149
|
|
2134
2150
|
|
2135
2151
|
def _EditImageParameters_to_vertex(
|
2136
|
-
api_client:
|
2152
|
+
api_client: BaseApiClient,
|
2137
2153
|
from_object: Union[dict, object],
|
2138
|
-
parent_object: dict = None,
|
2154
|
+
parent_object: Optional[dict] = None,
|
2139
2155
|
) -> dict:
|
2140
|
-
to_object = {}
|
2156
|
+
to_object: dict[str, Any] = {}
|
2141
2157
|
if getv(from_object, ['model']) is not None:
|
2142
2158
|
setv(
|
2143
2159
|
to_object,
|
@@ -2146,12 +2162,12 @@ def _EditImageParameters_to_vertex(
|
|
2146
2162
|
)
|
2147
2163
|
|
2148
2164
|
if getv(from_object, ['prompt']) is not None:
|
2149
|
-
setv(to_object, ['instances', 'prompt'], getv(from_object, ['prompt']))
|
2165
|
+
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
2150
2166
|
|
2151
2167
|
if getv(from_object, ['reference_images']) is not None:
|
2152
2168
|
setv(
|
2153
2169
|
to_object,
|
2154
|
-
['instances', 'referenceImages'],
|
2170
|
+
['instances[0]', 'referenceImages'],
|
2155
2171
|
[
|
2156
2172
|
_ReferenceImageAPI_to_vertex(api_client, item, to_object)
|
2157
2173
|
for item in getv(from_object, ['reference_images'])
|
@@ -2171,11 +2187,11 @@ def _EditImageParameters_to_vertex(
|
|
2171
2187
|
|
2172
2188
|
|
2173
2189
|
def _UpscaleImageAPIConfig_to_mldev(
|
2174
|
-
api_client:
|
2190
|
+
api_client: BaseApiClient,
|
2175
2191
|
from_object: Union[dict, object],
|
2176
|
-
parent_object: dict = None,
|
2192
|
+
parent_object: Optional[dict] = None,
|
2177
2193
|
) -> dict:
|
2178
|
-
to_object = {}
|
2194
|
+
to_object: dict[str, Any] = {}
|
2179
2195
|
|
2180
2196
|
if getv(from_object, ['include_rai_reason']) is not None:
|
2181
2197
|
setv(
|
@@ -2212,11 +2228,11 @@ def _UpscaleImageAPIConfig_to_mldev(
|
|
2212
2228
|
|
2213
2229
|
|
2214
2230
|
def _UpscaleImageAPIConfig_to_vertex(
|
2215
|
-
api_client:
|
2231
|
+
api_client: BaseApiClient,
|
2216
2232
|
from_object: Union[dict, object],
|
2217
|
-
parent_object: dict = None,
|
2233
|
+
parent_object: Optional[dict] = None,
|
2218
2234
|
) -> dict:
|
2219
|
-
to_object = {}
|
2235
|
+
to_object: dict[str, Any] = {}
|
2220
2236
|
|
2221
2237
|
if getv(from_object, ['include_rai_reason']) is not None:
|
2222
2238
|
setv(
|
@@ -2253,11 +2269,11 @@ def _UpscaleImageAPIConfig_to_vertex(
|
|
2253
2269
|
|
2254
2270
|
|
2255
2271
|
def _UpscaleImageAPIParameters_to_mldev(
|
2256
|
-
api_client:
|
2272
|
+
api_client: BaseApiClient,
|
2257
2273
|
from_object: Union[dict, object],
|
2258
|
-
parent_object: dict = None,
|
2274
|
+
parent_object: Optional[dict] = None,
|
2259
2275
|
) -> dict:
|
2260
|
-
to_object = {}
|
2276
|
+
to_object: dict[str, Any] = {}
|
2261
2277
|
if getv(from_object, ['model']) is not None:
|
2262
2278
|
setv(
|
2263
2279
|
to_object,
|
@@ -2268,7 +2284,7 @@ def _UpscaleImageAPIParameters_to_mldev(
|
|
2268
2284
|
if getv(from_object, ['image']) is not None:
|
2269
2285
|
setv(
|
2270
2286
|
to_object,
|
2271
|
-
['instances', 'image'],
|
2287
|
+
['instances[0]', 'image'],
|
2272
2288
|
_Image_to_mldev(api_client, getv(from_object, ['image']), to_object),
|
2273
2289
|
)
|
2274
2290
|
|
@@ -2292,11 +2308,11 @@ def _UpscaleImageAPIParameters_to_mldev(
|
|
2292
2308
|
|
2293
2309
|
|
2294
2310
|
def _UpscaleImageAPIParameters_to_vertex(
|
2295
|
-
api_client:
|
2311
|
+
api_client: BaseApiClient,
|
2296
2312
|
from_object: Union[dict, object],
|
2297
|
-
parent_object: dict = None,
|
2313
|
+
parent_object: Optional[dict] = None,
|
2298
2314
|
) -> dict:
|
2299
|
-
to_object = {}
|
2315
|
+
to_object: dict[str, Any] = {}
|
2300
2316
|
if getv(from_object, ['model']) is not None:
|
2301
2317
|
setv(
|
2302
2318
|
to_object,
|
@@ -2307,7 +2323,7 @@ def _UpscaleImageAPIParameters_to_vertex(
|
|
2307
2323
|
if getv(from_object, ['image']) is not None:
|
2308
2324
|
setv(
|
2309
2325
|
to_object,
|
2310
|
-
['instances', 'image'],
|
2326
|
+
['instances[0]', 'image'],
|
2311
2327
|
_Image_to_vertex(api_client, getv(from_object, ['image']), to_object),
|
2312
2328
|
)
|
2313
2329
|
|
@@ -2331,11 +2347,11 @@ def _UpscaleImageAPIParameters_to_vertex(
|
|
2331
2347
|
|
2332
2348
|
|
2333
2349
|
def _GetModelParameters_to_mldev(
|
2334
|
-
api_client:
|
2350
|
+
api_client: BaseApiClient,
|
2335
2351
|
from_object: Union[dict, object],
|
2336
|
-
parent_object: dict = None,
|
2352
|
+
parent_object: Optional[dict] = None,
|
2337
2353
|
) -> dict:
|
2338
|
-
to_object = {}
|
2354
|
+
to_object: dict[str, Any] = {}
|
2339
2355
|
if getv(from_object, ['model']) is not None:
|
2340
2356
|
setv(
|
2341
2357
|
to_object,
|
@@ -2350,11 +2366,11 @@ def _GetModelParameters_to_mldev(
|
|
2350
2366
|
|
2351
2367
|
|
2352
2368
|
def _GetModelParameters_to_vertex(
|
2353
|
-
api_client:
|
2369
|
+
api_client: BaseApiClient,
|
2354
2370
|
from_object: Union[dict, object],
|
2355
|
-
parent_object: dict = None,
|
2371
|
+
parent_object: Optional[dict] = None,
|
2356
2372
|
) -> dict:
|
2357
|
-
to_object = {}
|
2373
|
+
to_object: dict[str, Any] = {}
|
2358
2374
|
if getv(from_object, ['model']) is not None:
|
2359
2375
|
setv(
|
2360
2376
|
to_object,
|
@@ -2369,11 +2385,11 @@ def _GetModelParameters_to_vertex(
|
|
2369
2385
|
|
2370
2386
|
|
2371
2387
|
def _ListModelsConfig_to_mldev(
|
2372
|
-
api_client:
|
2388
|
+
api_client: BaseApiClient,
|
2373
2389
|
from_object: Union[dict, object],
|
2374
|
-
parent_object: dict = None,
|
2390
|
+
parent_object: Optional[dict] = None,
|
2375
2391
|
) -> dict:
|
2376
|
-
to_object = {}
|
2392
|
+
to_object: dict[str, Any] = {}
|
2377
2393
|
|
2378
2394
|
if getv(from_object, ['page_size']) is not None:
|
2379
2395
|
setv(
|
@@ -2401,11 +2417,11 @@ def _ListModelsConfig_to_mldev(
|
|
2401
2417
|
|
2402
2418
|
|
2403
2419
|
def _ListModelsConfig_to_vertex(
|
2404
|
-
api_client:
|
2420
|
+
api_client: BaseApiClient,
|
2405
2421
|
from_object: Union[dict, object],
|
2406
|
-
parent_object: dict = None,
|
2422
|
+
parent_object: Optional[dict] = None,
|
2407
2423
|
) -> dict:
|
2408
|
-
to_object = {}
|
2424
|
+
to_object: dict[str, Any] = {}
|
2409
2425
|
|
2410
2426
|
if getv(from_object, ['page_size']) is not None:
|
2411
2427
|
setv(
|
@@ -2433,11 +2449,11 @@ def _ListModelsConfig_to_vertex(
|
|
2433
2449
|
|
2434
2450
|
|
2435
2451
|
def _ListModelsParameters_to_mldev(
|
2436
|
-
api_client:
|
2452
|
+
api_client: BaseApiClient,
|
2437
2453
|
from_object: Union[dict, object],
|
2438
|
-
parent_object: dict = None,
|
2454
|
+
parent_object: Optional[dict] = None,
|
2439
2455
|
) -> dict:
|
2440
|
-
to_object = {}
|
2456
|
+
to_object: dict[str, Any] = {}
|
2441
2457
|
if getv(from_object, ['config']) is not None:
|
2442
2458
|
setv(
|
2443
2459
|
to_object,
|
@@ -2451,11 +2467,11 @@ def _ListModelsParameters_to_mldev(
|
|
2451
2467
|
|
2452
2468
|
|
2453
2469
|
def _ListModelsParameters_to_vertex(
|
2454
|
-
api_client:
|
2470
|
+
api_client: BaseApiClient,
|
2455
2471
|
from_object: Union[dict, object],
|
2456
|
-
parent_object: dict = None,
|
2472
|
+
parent_object: Optional[dict] = None,
|
2457
2473
|
) -> dict:
|
2458
|
-
to_object = {}
|
2474
|
+
to_object: dict[str, Any] = {}
|
2459
2475
|
if getv(from_object, ['config']) is not None:
|
2460
2476
|
setv(
|
2461
2477
|
to_object,
|
@@ -2469,11 +2485,11 @@ def _ListModelsParameters_to_vertex(
|
|
2469
2485
|
|
2470
2486
|
|
2471
2487
|
def _UpdateModelConfig_to_mldev(
|
2472
|
-
api_client:
|
2488
|
+
api_client: BaseApiClient,
|
2473
2489
|
from_object: Union[dict, object],
|
2474
|
-
parent_object: dict = None,
|
2490
|
+
parent_object: Optional[dict] = None,
|
2475
2491
|
) -> dict:
|
2476
|
-
to_object = {}
|
2492
|
+
to_object: dict[str, Any] = {}
|
2477
2493
|
|
2478
2494
|
if getv(from_object, ['display_name']) is not None:
|
2479
2495
|
setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
|
@@ -2485,11 +2501,11 @@ def _UpdateModelConfig_to_mldev(
|
|
2485
2501
|
|
2486
2502
|
|
2487
2503
|
def _UpdateModelConfig_to_vertex(
|
2488
|
-
api_client:
|
2504
|
+
api_client: BaseApiClient,
|
2489
2505
|
from_object: Union[dict, object],
|
2490
|
-
parent_object: dict = None,
|
2506
|
+
parent_object: Optional[dict] = None,
|
2491
2507
|
) -> dict:
|
2492
|
-
to_object = {}
|
2508
|
+
to_object: dict[str, Any] = {}
|
2493
2509
|
|
2494
2510
|
if getv(from_object, ['display_name']) is not None:
|
2495
2511
|
setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
|
@@ -2501,11 +2517,11 @@ def _UpdateModelConfig_to_vertex(
|
|
2501
2517
|
|
2502
2518
|
|
2503
2519
|
def _UpdateModelParameters_to_mldev(
|
2504
|
-
api_client:
|
2520
|
+
api_client: BaseApiClient,
|
2505
2521
|
from_object: Union[dict, object],
|
2506
|
-
parent_object: dict = None,
|
2522
|
+
parent_object: Optional[dict] = None,
|
2507
2523
|
) -> dict:
|
2508
|
-
to_object = {}
|
2524
|
+
to_object: dict[str, Any] = {}
|
2509
2525
|
if getv(from_object, ['model']) is not None:
|
2510
2526
|
setv(
|
2511
2527
|
to_object,
|
@@ -2526,11 +2542,11 @@ def _UpdateModelParameters_to_mldev(
|
|
2526
2542
|
|
2527
2543
|
|
2528
2544
|
def _UpdateModelParameters_to_vertex(
|
2529
|
-
api_client:
|
2545
|
+
api_client: BaseApiClient,
|
2530
2546
|
from_object: Union[dict, object],
|
2531
|
-
parent_object: dict = None,
|
2547
|
+
parent_object: Optional[dict] = None,
|
2532
2548
|
) -> dict:
|
2533
|
-
to_object = {}
|
2549
|
+
to_object: dict[str, Any] = {}
|
2534
2550
|
if getv(from_object, ['model']) is not None:
|
2535
2551
|
setv(
|
2536
2552
|
to_object,
|
@@ -2551,11 +2567,11 @@ def _UpdateModelParameters_to_vertex(
|
|
2551
2567
|
|
2552
2568
|
|
2553
2569
|
def _DeleteModelParameters_to_mldev(
|
2554
|
-
api_client:
|
2570
|
+
api_client: BaseApiClient,
|
2555
2571
|
from_object: Union[dict, object],
|
2556
|
-
parent_object: dict = None,
|
2572
|
+
parent_object: Optional[dict] = None,
|
2557
2573
|
) -> dict:
|
2558
|
-
to_object = {}
|
2574
|
+
to_object: dict[str, Any] = {}
|
2559
2575
|
if getv(from_object, ['model']) is not None:
|
2560
2576
|
setv(
|
2561
2577
|
to_object,
|
@@ -2570,11 +2586,11 @@ def _DeleteModelParameters_to_mldev(
|
|
2570
2586
|
|
2571
2587
|
|
2572
2588
|
def _DeleteModelParameters_to_vertex(
|
2573
|
-
api_client:
|
2589
|
+
api_client: BaseApiClient,
|
2574
2590
|
from_object: Union[dict, object],
|
2575
|
-
parent_object: dict = None,
|
2591
|
+
parent_object: Optional[dict] = None,
|
2576
2592
|
) -> dict:
|
2577
|
-
to_object = {}
|
2593
|
+
to_object: dict[str, Any] = {}
|
2578
2594
|
if getv(from_object, ['model']) is not None:
|
2579
2595
|
setv(
|
2580
2596
|
to_object,
|
@@ -2589,32 +2605,19 @@ def _DeleteModelParameters_to_vertex(
|
|
2589
2605
|
|
2590
2606
|
|
2591
2607
|
def _CountTokensConfig_to_mldev(
|
2592
|
-
api_client:
|
2608
|
+
api_client: BaseApiClient,
|
2593
2609
|
from_object: Union[dict, object],
|
2594
|
-
parent_object: dict = None,
|
2610
|
+
parent_object: Optional[dict] = None,
|
2595
2611
|
) -> dict:
|
2596
|
-
to_object = {}
|
2612
|
+
to_object: dict[str, Any] = {}
|
2597
2613
|
|
2598
2614
|
if getv(from_object, ['system_instruction']) is not None:
|
2599
|
-
|
2600
|
-
|
2601
|
-
['generateContentRequest', 'systemInstruction'],
|
2602
|
-
_Content_to_mldev(
|
2603
|
-
api_client,
|
2604
|
-
t.t_content(api_client, getv(from_object, ['system_instruction'])),
|
2605
|
-
to_object,
|
2606
|
-
),
|
2615
|
+
raise ValueError(
|
2616
|
+
'system_instruction parameter is not supported in Gemini API.'
|
2607
2617
|
)
|
2608
2618
|
|
2609
2619
|
if getv(from_object, ['tools']) is not None:
|
2610
|
-
|
2611
|
-
parent_object,
|
2612
|
-
['generateContentRequest', 'tools'],
|
2613
|
-
[
|
2614
|
-
_Tool_to_mldev(api_client, item, to_object)
|
2615
|
-
for item in getv(from_object, ['tools'])
|
2616
|
-
],
|
2617
|
-
)
|
2620
|
+
raise ValueError('tools parameter is not supported in Gemini API.')
|
2618
2621
|
|
2619
2622
|
if getv(from_object, ['generation_config']) is not None:
|
2620
2623
|
raise ValueError(
|
@@ -2625,11 +2628,11 @@ def _CountTokensConfig_to_mldev(
|
|
2625
2628
|
|
2626
2629
|
|
2627
2630
|
def _CountTokensConfig_to_vertex(
|
2628
|
-
api_client:
|
2631
|
+
api_client: BaseApiClient,
|
2629
2632
|
from_object: Union[dict, object],
|
2630
|
-
parent_object: dict = None,
|
2633
|
+
parent_object: Optional[dict] = None,
|
2631
2634
|
) -> dict:
|
2632
|
-
to_object = {}
|
2635
|
+
to_object: dict[str, Any] = {}
|
2633
2636
|
|
2634
2637
|
if getv(from_object, ['system_instruction']) is not None:
|
2635
2638
|
setv(
|
@@ -2663,11 +2666,11 @@ def _CountTokensConfig_to_vertex(
|
|
2663
2666
|
|
2664
2667
|
|
2665
2668
|
def _CountTokensParameters_to_mldev(
|
2666
|
-
api_client:
|
2669
|
+
api_client: BaseApiClient,
|
2667
2670
|
from_object: Union[dict, object],
|
2668
|
-
parent_object: dict = None,
|
2671
|
+
parent_object: Optional[dict] = None,
|
2669
2672
|
) -> dict:
|
2670
|
-
to_object = {}
|
2673
|
+
to_object: dict[str, Any] = {}
|
2671
2674
|
if getv(from_object, ['model']) is not None:
|
2672
2675
|
setv(
|
2673
2676
|
to_object,
|
@@ -2700,11 +2703,11 @@ def _CountTokensParameters_to_mldev(
|
|
2700
2703
|
|
2701
2704
|
|
2702
2705
|
def _CountTokensParameters_to_vertex(
|
2703
|
-
api_client:
|
2706
|
+
api_client: BaseApiClient,
|
2704
2707
|
from_object: Union[dict, object],
|
2705
|
-
parent_object: dict = None,
|
2708
|
+
parent_object: Optional[dict] = None,
|
2706
2709
|
) -> dict:
|
2707
|
-
to_object = {}
|
2710
|
+
to_object: dict[str, Any] = {}
|
2708
2711
|
if getv(from_object, ['model']) is not None:
|
2709
2712
|
setv(
|
2710
2713
|
to_object,
|
@@ -2737,11 +2740,11 @@ def _CountTokensParameters_to_vertex(
|
|
2737
2740
|
|
2738
2741
|
|
2739
2742
|
def _ComputeTokensParameters_to_mldev(
|
2740
|
-
api_client:
|
2743
|
+
api_client: BaseApiClient,
|
2741
2744
|
from_object: Union[dict, object],
|
2742
|
-
parent_object: dict = None,
|
2745
|
+
parent_object: Optional[dict] = None,
|
2743
2746
|
) -> dict:
|
2744
|
-
to_object = {}
|
2747
|
+
to_object: dict[str, Any] = {}
|
2745
2748
|
if getv(from_object, ['model']) is not None:
|
2746
2749
|
setv(
|
2747
2750
|
to_object,
|
@@ -2759,11 +2762,11 @@ def _ComputeTokensParameters_to_mldev(
|
|
2759
2762
|
|
2760
2763
|
|
2761
2764
|
def _ComputeTokensParameters_to_vertex(
|
2762
|
-
api_client:
|
2765
|
+
api_client: BaseApiClient,
|
2763
2766
|
from_object: Union[dict, object],
|
2764
|
-
parent_object: dict = None,
|
2767
|
+
parent_object: Optional[dict] = None,
|
2765
2768
|
) -> dict:
|
2766
|
-
to_object = {}
|
2769
|
+
to_object: dict[str, Any] = {}
|
2767
2770
|
if getv(from_object, ['model']) is not None:
|
2768
2771
|
setv(
|
2769
2772
|
to_object,
|
@@ -2789,129 +2792,325 @@ def _ComputeTokensParameters_to_vertex(
|
|
2789
2792
|
return to_object
|
2790
2793
|
|
2791
2794
|
|
2792
|
-
def
|
2793
|
-
|
2794
|
-
|
2795
|
-
|
2796
|
-
|
2797
|
-
|
2798
|
-
if enum_value in set(['ALLOW_ALL']):
|
2799
|
-
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2800
|
-
|
2801
|
-
|
2802
|
-
def _MaskReferenceMode_to_mldev_enum_validate(enum_value: Any):
|
2803
|
-
if enum_value in set([
|
2804
|
-
'MASK_MODE_DEFAULT',
|
2805
|
-
'MASK_MODE_USER_PROVIDED',
|
2806
|
-
'MASK_MODE_BACKGROUND',
|
2807
|
-
'MASK_MODE_FOREGROUND',
|
2808
|
-
'MASK_MODE_SEMANTIC',
|
2809
|
-
]):
|
2810
|
-
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2811
|
-
|
2812
|
-
|
2813
|
-
def _ControlReferenceType_to_mldev_enum_validate(enum_value: Any):
|
2814
|
-
if enum_value in set([
|
2815
|
-
'CONTROL_TYPE_DEFAULT',
|
2816
|
-
'CONTROL_TYPE_CANNY',
|
2817
|
-
'CONTROL_TYPE_SCRIBBLE',
|
2818
|
-
'CONTROL_TYPE_FACE_MESH',
|
2819
|
-
]):
|
2820
|
-
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2821
|
-
|
2822
|
-
|
2823
|
-
def _SubjectReferenceType_to_mldev_enum_validate(enum_value: Any):
|
2824
|
-
if enum_value in set([
|
2825
|
-
'SUBJECT_TYPE_DEFAULT',
|
2826
|
-
'SUBJECT_TYPE_PERSON',
|
2827
|
-
'SUBJECT_TYPE_ANIMAL',
|
2828
|
-
'SUBJECT_TYPE_PRODUCT',
|
2829
|
-
]):
|
2830
|
-
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2795
|
+
def _GenerateVideosConfig_to_mldev(
|
2796
|
+
api_client: BaseApiClient,
|
2797
|
+
from_object: Union[dict, object],
|
2798
|
+
parent_object: Optional[dict] = None,
|
2799
|
+
) -> dict:
|
2800
|
+
to_object: dict[str, Any] = {}
|
2831
2801
|
|
2802
|
+
if getv(from_object, ['number_of_videos']) is not None:
|
2803
|
+
setv(
|
2804
|
+
parent_object,
|
2805
|
+
['parameters', 'sampleCount'],
|
2806
|
+
getv(from_object, ['number_of_videos']),
|
2807
|
+
)
|
2832
2808
|
|
2833
|
-
|
2834
|
-
|
2835
|
-
'EDIT_MODE_DEFAULT',
|
2836
|
-
'EDIT_MODE_INPAINT_REMOVAL',
|
2837
|
-
'EDIT_MODE_INPAINT_INSERTION',
|
2838
|
-
'EDIT_MODE_OUTPAINT',
|
2839
|
-
'EDIT_MODE_CONTROLLED_EDITING',
|
2840
|
-
'EDIT_MODE_STYLE',
|
2841
|
-
'EDIT_MODE_BGSWAP',
|
2842
|
-
'EDIT_MODE_PRODUCT_IMAGE',
|
2843
|
-
]):
|
2844
|
-
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2809
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
2810
|
+
raise ValueError('output_gcs_uri parameter is not supported in Gemini API.')
|
2845
2811
|
|
2812
|
+
if getv(from_object, ['fps']) is not None:
|
2813
|
+
raise ValueError('fps parameter is not supported in Gemini API.')
|
2846
2814
|
|
2847
|
-
|
2848
|
-
|
2849
|
-
|
2850
|
-
|
2851
|
-
) -> dict:
|
2852
|
-
to_object = {}
|
2815
|
+
if getv(from_object, ['duration_seconds']) is not None:
|
2816
|
+
raise ValueError(
|
2817
|
+
'duration_seconds parameter is not supported in Gemini API.'
|
2818
|
+
)
|
2853
2819
|
|
2854
|
-
if getv(from_object, ['
|
2855
|
-
|
2820
|
+
if getv(from_object, ['seed']) is not None:
|
2821
|
+
raise ValueError('seed parameter is not supported in Gemini API.')
|
2856
2822
|
|
2857
|
-
if getv(from_object, ['
|
2823
|
+
if getv(from_object, ['aspect_ratio']) is not None:
|
2858
2824
|
setv(
|
2859
|
-
|
2860
|
-
['
|
2861
|
-
getv(from_object, ['
|
2825
|
+
parent_object,
|
2826
|
+
['parameters', 'aspectRatio'],
|
2827
|
+
getv(from_object, ['aspect_ratio']),
|
2862
2828
|
)
|
2863
2829
|
|
2864
|
-
if getv(from_object, ['
|
2865
|
-
|
2830
|
+
if getv(from_object, ['resolution']) is not None:
|
2831
|
+
raise ValueError('resolution parameter is not supported in Gemini API.')
|
2866
2832
|
|
2867
|
-
if getv(from_object, ['
|
2868
|
-
setv(
|
2833
|
+
if getv(from_object, ['person_generation']) is not None:
|
2834
|
+
setv(
|
2835
|
+
parent_object,
|
2836
|
+
['parameters', 'personGeneration'],
|
2837
|
+
getv(from_object, ['person_generation']),
|
2838
|
+
)
|
2869
2839
|
|
2870
|
-
if getv(from_object, ['
|
2871
|
-
|
2840
|
+
if getv(from_object, ['pubsub_topic']) is not None:
|
2841
|
+
raise ValueError('pubsub_topic parameter is not supported in Gemini API.')
|
2872
2842
|
|
2873
|
-
if getv(from_object, ['
|
2843
|
+
if getv(from_object, ['negative_prompt']) is not None:
|
2874
2844
|
setv(
|
2875
|
-
|
2876
|
-
['
|
2877
|
-
getv(from_object, ['
|
2845
|
+
parent_object,
|
2846
|
+
['parameters', 'negativePrompt'],
|
2847
|
+
getv(from_object, ['negative_prompt']),
|
2878
2848
|
)
|
2879
2849
|
|
2880
|
-
if getv(from_object, ['
|
2881
|
-
|
2882
|
-
|
2883
|
-
if getv(from_object, ['text']) is not None:
|
2884
|
-
setv(to_object, ['text'], getv(from_object, ['text']))
|
2850
|
+
if getv(from_object, ['enhance_prompt']) is not None:
|
2851
|
+
raise ValueError('enhance_prompt parameter is not supported in Gemini API.')
|
2885
2852
|
|
2886
2853
|
return to_object
|
2887
2854
|
|
2888
2855
|
|
2889
|
-
def
|
2890
|
-
api_client:
|
2856
|
+
def _GenerateVideosConfig_to_vertex(
|
2857
|
+
api_client: BaseApiClient,
|
2891
2858
|
from_object: Union[dict, object],
|
2892
|
-
parent_object: dict = None,
|
2859
|
+
parent_object: Optional[dict] = None,
|
2893
2860
|
) -> dict:
|
2894
|
-
to_object = {}
|
2895
|
-
if getv(from_object, ['videoMetadata']) is not None:
|
2896
|
-
setv(to_object, ['video_metadata'], getv(from_object, ['videoMetadata']))
|
2861
|
+
to_object: dict[str, Any] = {}
|
2897
2862
|
|
2898
|
-
if getv(from_object, ['
|
2899
|
-
setv(to_object, ['thought'], getv(from_object, ['thought']))
|
2900
|
-
|
2901
|
-
if getv(from_object, ['codeExecutionResult']) is not None:
|
2863
|
+
if getv(from_object, ['number_of_videos']) is not None:
|
2902
2864
|
setv(
|
2903
|
-
|
2904
|
-
['
|
2905
|
-
getv(from_object, ['
|
2865
|
+
parent_object,
|
2866
|
+
['parameters', 'sampleCount'],
|
2867
|
+
getv(from_object, ['number_of_videos']),
|
2906
2868
|
)
|
2907
2869
|
|
2908
|
-
if getv(from_object, ['
|
2909
|
-
setv(
|
2870
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
2871
|
+
setv(
|
2872
|
+
parent_object,
|
2873
|
+
['parameters', 'storageUri'],
|
2874
|
+
getv(from_object, ['output_gcs_uri']),
|
2875
|
+
)
|
2910
2876
|
|
2911
|
-
if getv(from_object, ['
|
2912
|
-
setv(
|
2877
|
+
if getv(from_object, ['fps']) is not None:
|
2878
|
+
setv(parent_object, ['parameters', 'fps'], getv(from_object, ['fps']))
|
2913
2879
|
|
2914
|
-
if getv(from_object, ['
|
2880
|
+
if getv(from_object, ['duration_seconds']) is not None:
|
2881
|
+
setv(
|
2882
|
+
parent_object,
|
2883
|
+
['parameters', 'durationSeconds'],
|
2884
|
+
getv(from_object, ['duration_seconds']),
|
2885
|
+
)
|
2886
|
+
|
2887
|
+
if getv(from_object, ['seed']) is not None:
|
2888
|
+
setv(parent_object, ['parameters', 'seed'], getv(from_object, ['seed']))
|
2889
|
+
|
2890
|
+
if getv(from_object, ['aspect_ratio']) is not None:
|
2891
|
+
setv(
|
2892
|
+
parent_object,
|
2893
|
+
['parameters', 'aspectRatio'],
|
2894
|
+
getv(from_object, ['aspect_ratio']),
|
2895
|
+
)
|
2896
|
+
|
2897
|
+
if getv(from_object, ['resolution']) is not None:
|
2898
|
+
setv(
|
2899
|
+
parent_object,
|
2900
|
+
['parameters', 'resolution'],
|
2901
|
+
getv(from_object, ['resolution']),
|
2902
|
+
)
|
2903
|
+
|
2904
|
+
if getv(from_object, ['person_generation']) is not None:
|
2905
|
+
setv(
|
2906
|
+
parent_object,
|
2907
|
+
['parameters', 'personGeneration'],
|
2908
|
+
getv(from_object, ['person_generation']),
|
2909
|
+
)
|
2910
|
+
|
2911
|
+
if getv(from_object, ['pubsub_topic']) is not None:
|
2912
|
+
setv(
|
2913
|
+
parent_object,
|
2914
|
+
['parameters', 'pubsubTopic'],
|
2915
|
+
getv(from_object, ['pubsub_topic']),
|
2916
|
+
)
|
2917
|
+
|
2918
|
+
if getv(from_object, ['negative_prompt']) is not None:
|
2919
|
+
setv(
|
2920
|
+
parent_object,
|
2921
|
+
['parameters', 'negativePrompt'],
|
2922
|
+
getv(from_object, ['negative_prompt']),
|
2923
|
+
)
|
2924
|
+
|
2925
|
+
if getv(from_object, ['enhance_prompt']) is not None:
|
2926
|
+
setv(
|
2927
|
+
parent_object,
|
2928
|
+
['parameters', 'enhancePrompt'],
|
2929
|
+
getv(from_object, ['enhance_prompt']),
|
2930
|
+
)
|
2931
|
+
|
2932
|
+
return to_object
|
2933
|
+
|
2934
|
+
|
2935
|
+
def _GenerateVideosParameters_to_mldev(
|
2936
|
+
api_client: BaseApiClient,
|
2937
|
+
from_object: Union[dict, object],
|
2938
|
+
parent_object: Optional[dict] = None,
|
2939
|
+
) -> dict:
|
2940
|
+
to_object: dict[str, Any] = {}
|
2941
|
+
if getv(from_object, ['model']) is not None:
|
2942
|
+
setv(
|
2943
|
+
to_object,
|
2944
|
+
['_url', 'model'],
|
2945
|
+
t.t_model(api_client, getv(from_object, ['model'])),
|
2946
|
+
)
|
2947
|
+
|
2948
|
+
if getv(from_object, ['prompt']) is not None:
|
2949
|
+
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
2950
|
+
|
2951
|
+
if getv(from_object, ['config']) is not None:
|
2952
|
+
setv(
|
2953
|
+
to_object,
|
2954
|
+
['config'],
|
2955
|
+
_GenerateVideosConfig_to_mldev(
|
2956
|
+
api_client, getv(from_object, ['config']), to_object
|
2957
|
+
),
|
2958
|
+
)
|
2959
|
+
|
2960
|
+
return to_object
|
2961
|
+
|
2962
|
+
|
2963
|
+
def _GenerateVideosParameters_to_vertex(
|
2964
|
+
api_client: BaseApiClient,
|
2965
|
+
from_object: Union[dict, object],
|
2966
|
+
parent_object: Optional[dict] = None,
|
2967
|
+
) -> dict:
|
2968
|
+
to_object: dict[str, Any] = {}
|
2969
|
+
if getv(from_object, ['model']) is not None:
|
2970
|
+
setv(
|
2971
|
+
to_object,
|
2972
|
+
['_url', 'model'],
|
2973
|
+
t.t_model(api_client, getv(from_object, ['model'])),
|
2974
|
+
)
|
2975
|
+
|
2976
|
+
if getv(from_object, ['prompt']) is not None:
|
2977
|
+
setv(to_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt']))
|
2978
|
+
|
2979
|
+
if getv(from_object, ['config']) is not None:
|
2980
|
+
setv(
|
2981
|
+
to_object,
|
2982
|
+
['config'],
|
2983
|
+
_GenerateVideosConfig_to_vertex(
|
2984
|
+
api_client, getv(from_object, ['config']), to_object
|
2985
|
+
),
|
2986
|
+
)
|
2987
|
+
|
2988
|
+
return to_object
|
2989
|
+
|
2990
|
+
|
2991
|
+
def _SafetyFilterLevel_to_mldev_enum_validate(enum_value: Any):
|
2992
|
+
if enum_value in set(['BLOCK_NONE']):
|
2993
|
+
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2994
|
+
|
2995
|
+
|
2996
|
+
def _PersonGeneration_to_mldev_enum_validate(enum_value: Any):
|
2997
|
+
if enum_value in set(['ALLOW_ALL']):
|
2998
|
+
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
2999
|
+
|
3000
|
+
|
3001
|
+
def _MaskReferenceMode_to_mldev_enum_validate(enum_value: Any):
|
3002
|
+
if enum_value in set([
|
3003
|
+
'MASK_MODE_DEFAULT',
|
3004
|
+
'MASK_MODE_USER_PROVIDED',
|
3005
|
+
'MASK_MODE_BACKGROUND',
|
3006
|
+
'MASK_MODE_FOREGROUND',
|
3007
|
+
'MASK_MODE_SEMANTIC',
|
3008
|
+
]):
|
3009
|
+
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
3010
|
+
|
3011
|
+
|
3012
|
+
def _ControlReferenceType_to_mldev_enum_validate(enum_value: Any):
|
3013
|
+
if enum_value in set([
|
3014
|
+
'CONTROL_TYPE_DEFAULT',
|
3015
|
+
'CONTROL_TYPE_CANNY',
|
3016
|
+
'CONTROL_TYPE_SCRIBBLE',
|
3017
|
+
'CONTROL_TYPE_FACE_MESH',
|
3018
|
+
]):
|
3019
|
+
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
3020
|
+
|
3021
|
+
|
3022
|
+
def _SubjectReferenceType_to_mldev_enum_validate(enum_value: Any):
|
3023
|
+
if enum_value in set([
|
3024
|
+
'SUBJECT_TYPE_DEFAULT',
|
3025
|
+
'SUBJECT_TYPE_PERSON',
|
3026
|
+
'SUBJECT_TYPE_ANIMAL',
|
3027
|
+
'SUBJECT_TYPE_PRODUCT',
|
3028
|
+
]):
|
3029
|
+
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
3030
|
+
|
3031
|
+
|
3032
|
+
def _EditMode_to_mldev_enum_validate(enum_value: Any):
|
3033
|
+
if enum_value in set([
|
3034
|
+
'EDIT_MODE_DEFAULT',
|
3035
|
+
'EDIT_MODE_INPAINT_REMOVAL',
|
3036
|
+
'EDIT_MODE_INPAINT_INSERTION',
|
3037
|
+
'EDIT_MODE_OUTPAINT',
|
3038
|
+
'EDIT_MODE_CONTROLLED_EDITING',
|
3039
|
+
'EDIT_MODE_STYLE',
|
3040
|
+
'EDIT_MODE_BGSWAP',
|
3041
|
+
'EDIT_MODE_PRODUCT_IMAGE',
|
3042
|
+
]):
|
3043
|
+
raise ValueError(f'{enum_value} enum value is not supported in Gemini API.')
|
3044
|
+
|
3045
|
+
|
3046
|
+
def _Part_from_mldev(
|
3047
|
+
api_client: BaseApiClient,
|
3048
|
+
from_object: Union[dict, object],
|
3049
|
+
parent_object: Optional[dict] = None,
|
3050
|
+
) -> dict:
|
3051
|
+
to_object: dict[str, Any] = {}
|
3052
|
+
|
3053
|
+
if getv(from_object, ['thought']) is not None:
|
3054
|
+
setv(to_object, ['thought'], getv(from_object, ['thought']))
|
3055
|
+
|
3056
|
+
if getv(from_object, ['codeExecutionResult']) is not None:
|
3057
|
+
setv(
|
3058
|
+
to_object,
|
3059
|
+
['code_execution_result'],
|
3060
|
+
getv(from_object, ['codeExecutionResult']),
|
3061
|
+
)
|
3062
|
+
|
3063
|
+
if getv(from_object, ['executableCode']) is not None:
|
3064
|
+
setv(to_object, ['executable_code'], getv(from_object, ['executableCode']))
|
3065
|
+
|
3066
|
+
if getv(from_object, ['fileData']) is not None:
|
3067
|
+
setv(to_object, ['file_data'], getv(from_object, ['fileData']))
|
3068
|
+
|
3069
|
+
if getv(from_object, ['functionCall']) is not None:
|
3070
|
+
setv(to_object, ['function_call'], getv(from_object, ['functionCall']))
|
3071
|
+
|
3072
|
+
if getv(from_object, ['functionResponse']) is not None:
|
3073
|
+
setv(
|
3074
|
+
to_object,
|
3075
|
+
['function_response'],
|
3076
|
+
getv(from_object, ['functionResponse']),
|
3077
|
+
)
|
3078
|
+
|
3079
|
+
if getv(from_object, ['inlineData']) is not None:
|
3080
|
+
setv(to_object, ['inline_data'], getv(from_object, ['inlineData']))
|
3081
|
+
|
3082
|
+
if getv(from_object, ['text']) is not None:
|
3083
|
+
setv(to_object, ['text'], getv(from_object, ['text']))
|
3084
|
+
|
3085
|
+
return to_object
|
3086
|
+
|
3087
|
+
|
3088
|
+
def _Part_from_vertex(
|
3089
|
+
api_client: BaseApiClient,
|
3090
|
+
from_object: Union[dict, object],
|
3091
|
+
parent_object: Optional[dict] = None,
|
3092
|
+
) -> dict:
|
3093
|
+
to_object: dict[str, Any] = {}
|
3094
|
+
if getv(from_object, ['videoMetadata']) is not None:
|
3095
|
+
setv(to_object, ['video_metadata'], getv(from_object, ['videoMetadata']))
|
3096
|
+
|
3097
|
+
if getv(from_object, ['thought']) is not None:
|
3098
|
+
setv(to_object, ['thought'], getv(from_object, ['thought']))
|
3099
|
+
|
3100
|
+
if getv(from_object, ['codeExecutionResult']) is not None:
|
3101
|
+
setv(
|
3102
|
+
to_object,
|
3103
|
+
['code_execution_result'],
|
3104
|
+
getv(from_object, ['codeExecutionResult']),
|
3105
|
+
)
|
3106
|
+
|
3107
|
+
if getv(from_object, ['executableCode']) is not None:
|
3108
|
+
setv(to_object, ['executable_code'], getv(from_object, ['executableCode']))
|
3109
|
+
|
3110
|
+
if getv(from_object, ['fileData']) is not None:
|
3111
|
+
setv(to_object, ['file_data'], getv(from_object, ['fileData']))
|
3112
|
+
|
3113
|
+
if getv(from_object, ['functionCall']) is not None:
|
2915
3114
|
setv(to_object, ['function_call'], getv(from_object, ['functionCall']))
|
2916
3115
|
|
2917
3116
|
if getv(from_object, ['functionResponse']) is not None:
|
@@ -2931,11 +3130,11 @@ def _Part_from_vertex(
|
|
2931
3130
|
|
2932
3131
|
|
2933
3132
|
def _Content_from_mldev(
|
2934
|
-
api_client:
|
3133
|
+
api_client: BaseApiClient,
|
2935
3134
|
from_object: Union[dict, object],
|
2936
|
-
parent_object: dict = None,
|
3135
|
+
parent_object: Optional[dict] = None,
|
2937
3136
|
) -> dict:
|
2938
|
-
to_object = {}
|
3137
|
+
to_object: dict[str, Any] = {}
|
2939
3138
|
if getv(from_object, ['parts']) is not None:
|
2940
3139
|
setv(
|
2941
3140
|
to_object,
|
@@ -2953,11 +3152,11 @@ def _Content_from_mldev(
|
|
2953
3152
|
|
2954
3153
|
|
2955
3154
|
def _Content_from_vertex(
|
2956
|
-
api_client:
|
3155
|
+
api_client: BaseApiClient,
|
2957
3156
|
from_object: Union[dict, object],
|
2958
|
-
parent_object: dict = None,
|
3157
|
+
parent_object: Optional[dict] = None,
|
2959
3158
|
) -> dict:
|
2960
|
-
to_object = {}
|
3159
|
+
to_object: dict[str, Any] = {}
|
2961
3160
|
if getv(from_object, ['parts']) is not None:
|
2962
3161
|
setv(
|
2963
3162
|
to_object,
|
@@ -2975,11 +3174,11 @@ def _Content_from_vertex(
|
|
2975
3174
|
|
2976
3175
|
|
2977
3176
|
def _CitationMetadata_from_mldev(
|
2978
|
-
api_client:
|
3177
|
+
api_client: BaseApiClient,
|
2979
3178
|
from_object: Union[dict, object],
|
2980
|
-
parent_object: dict = None,
|
3179
|
+
parent_object: Optional[dict] = None,
|
2981
3180
|
) -> dict:
|
2982
|
-
to_object = {}
|
3181
|
+
to_object: dict[str, Any] = {}
|
2983
3182
|
if getv(from_object, ['citationSources']) is not None:
|
2984
3183
|
setv(to_object, ['citations'], getv(from_object, ['citationSources']))
|
2985
3184
|
|
@@ -2987,11 +3186,11 @@ def _CitationMetadata_from_mldev(
|
|
2987
3186
|
|
2988
3187
|
|
2989
3188
|
def _CitationMetadata_from_vertex(
|
2990
|
-
api_client:
|
3189
|
+
api_client: BaseApiClient,
|
2991
3190
|
from_object: Union[dict, object],
|
2992
|
-
parent_object: dict = None,
|
3191
|
+
parent_object: Optional[dict] = None,
|
2993
3192
|
) -> dict:
|
2994
|
-
to_object = {}
|
3193
|
+
to_object: dict[str, Any] = {}
|
2995
3194
|
if getv(from_object, ['citations']) is not None:
|
2996
3195
|
setv(to_object, ['citations'], getv(from_object, ['citations']))
|
2997
3196
|
|
@@ -2999,11 +3198,11 @@ def _CitationMetadata_from_vertex(
|
|
2999
3198
|
|
3000
3199
|
|
3001
3200
|
def _Candidate_from_mldev(
|
3002
|
-
api_client:
|
3201
|
+
api_client: BaseApiClient,
|
3003
3202
|
from_object: Union[dict, object],
|
3004
|
-
parent_object: dict = None,
|
3203
|
+
parent_object: Optional[dict] = None,
|
3005
3204
|
) -> dict:
|
3006
|
-
to_object = {}
|
3205
|
+
to_object: dict[str, Any] = {}
|
3007
3206
|
if getv(from_object, ['content']) is not None:
|
3008
3207
|
setv(
|
3009
3208
|
to_object,
|
@@ -3051,11 +3250,11 @@ def _Candidate_from_mldev(
|
|
3051
3250
|
|
3052
3251
|
|
3053
3252
|
def _Candidate_from_vertex(
|
3054
|
-
api_client:
|
3253
|
+
api_client: BaseApiClient,
|
3055
3254
|
from_object: Union[dict, object],
|
3056
|
-
parent_object: dict = None,
|
3255
|
+
parent_object: Optional[dict] = None,
|
3057
3256
|
) -> dict:
|
3058
|
-
to_object = {}
|
3257
|
+
to_object: dict[str, Any] = {}
|
3059
3258
|
if getv(from_object, ['content']) is not None:
|
3060
3259
|
setv(
|
3061
3260
|
to_object,
|
@@ -3103,11 +3302,11 @@ def _Candidate_from_vertex(
|
|
3103
3302
|
|
3104
3303
|
|
3105
3304
|
def _GenerateContentResponse_from_mldev(
|
3106
|
-
api_client:
|
3305
|
+
api_client: BaseApiClient,
|
3107
3306
|
from_object: Union[dict, object],
|
3108
|
-
parent_object: dict = None,
|
3307
|
+
parent_object: Optional[dict] = None,
|
3109
3308
|
) -> dict:
|
3110
|
-
to_object = {}
|
3309
|
+
to_object: dict[str, Any] = {}
|
3111
3310
|
if getv(from_object, ['candidates']) is not None:
|
3112
3311
|
setv(
|
3113
3312
|
to_object,
|
@@ -3131,11 +3330,11 @@ def _GenerateContentResponse_from_mldev(
|
|
3131
3330
|
|
3132
3331
|
|
3133
3332
|
def _GenerateContentResponse_from_vertex(
|
3134
|
-
api_client:
|
3333
|
+
api_client: BaseApiClient,
|
3135
3334
|
from_object: Union[dict, object],
|
3136
|
-
parent_object: dict = None,
|
3335
|
+
parent_object: Optional[dict] = None,
|
3137
3336
|
) -> dict:
|
3138
|
-
to_object = {}
|
3337
|
+
to_object: dict[str, Any] = {}
|
3139
3338
|
if getv(from_object, ['candidates']) is not None:
|
3140
3339
|
setv(
|
3141
3340
|
to_object,
|
@@ -3146,6 +3345,12 @@ def _GenerateContentResponse_from_vertex(
|
|
3146
3345
|
],
|
3147
3346
|
)
|
3148
3347
|
|
3348
|
+
if getv(from_object, ['createTime']) is not None:
|
3349
|
+
setv(to_object, ['create_time'], getv(from_object, ['createTime']))
|
3350
|
+
|
3351
|
+
if getv(from_object, ['responseId']) is not None:
|
3352
|
+
setv(to_object, ['response_id'], getv(from_object, ['responseId']))
|
3353
|
+
|
3149
3354
|
if getv(from_object, ['modelVersion']) is not None:
|
3150
3355
|
setv(to_object, ['model_version'], getv(from_object, ['modelVersion']))
|
3151
3356
|
|
@@ -3159,21 +3364,21 @@ def _GenerateContentResponse_from_vertex(
|
|
3159
3364
|
|
3160
3365
|
|
3161
3366
|
def _ContentEmbeddingStatistics_from_mldev(
|
3162
|
-
api_client:
|
3367
|
+
api_client: BaseApiClient,
|
3163
3368
|
from_object: Union[dict, object],
|
3164
|
-
parent_object: dict = None,
|
3369
|
+
parent_object: Optional[dict] = None,
|
3165
3370
|
) -> dict:
|
3166
|
-
to_object = {}
|
3371
|
+
to_object: dict[str, Any] = {}
|
3167
3372
|
|
3168
3373
|
return to_object
|
3169
3374
|
|
3170
3375
|
|
3171
3376
|
def _ContentEmbeddingStatistics_from_vertex(
|
3172
|
-
api_client:
|
3377
|
+
api_client: BaseApiClient,
|
3173
3378
|
from_object: Union[dict, object],
|
3174
|
-
parent_object: dict = None,
|
3379
|
+
parent_object: Optional[dict] = None,
|
3175
3380
|
) -> dict:
|
3176
|
-
to_object = {}
|
3381
|
+
to_object: dict[str, Any] = {}
|
3177
3382
|
if getv(from_object, ['truncated']) is not None:
|
3178
3383
|
setv(to_object, ['truncated'], getv(from_object, ['truncated']))
|
3179
3384
|
|
@@ -3184,11 +3389,11 @@ def _ContentEmbeddingStatistics_from_vertex(
|
|
3184
3389
|
|
3185
3390
|
|
3186
3391
|
def _ContentEmbedding_from_mldev(
|
3187
|
-
api_client:
|
3392
|
+
api_client: BaseApiClient,
|
3188
3393
|
from_object: Union[dict, object],
|
3189
|
-
parent_object: dict = None,
|
3394
|
+
parent_object: Optional[dict] = None,
|
3190
3395
|
) -> dict:
|
3191
|
-
to_object = {}
|
3396
|
+
to_object: dict[str, Any] = {}
|
3192
3397
|
if getv(from_object, ['values']) is not None:
|
3193
3398
|
setv(to_object, ['values'], getv(from_object, ['values']))
|
3194
3399
|
|
@@ -3196,11 +3401,11 @@ def _ContentEmbedding_from_mldev(
|
|
3196
3401
|
|
3197
3402
|
|
3198
3403
|
def _ContentEmbedding_from_vertex(
|
3199
|
-
api_client:
|
3404
|
+
api_client: BaseApiClient,
|
3200
3405
|
from_object: Union[dict, object],
|
3201
|
-
parent_object: dict = None,
|
3406
|
+
parent_object: Optional[dict] = None,
|
3202
3407
|
) -> dict:
|
3203
|
-
to_object = {}
|
3408
|
+
to_object: dict[str, Any] = {}
|
3204
3409
|
if getv(from_object, ['values']) is not None:
|
3205
3410
|
setv(to_object, ['values'], getv(from_object, ['values']))
|
3206
3411
|
|
@@ -3217,21 +3422,21 @@ def _ContentEmbedding_from_vertex(
|
|
3217
3422
|
|
3218
3423
|
|
3219
3424
|
def _EmbedContentMetadata_from_mldev(
|
3220
|
-
api_client:
|
3425
|
+
api_client: BaseApiClient,
|
3221
3426
|
from_object: Union[dict, object],
|
3222
|
-
parent_object: dict = None,
|
3427
|
+
parent_object: Optional[dict] = None,
|
3223
3428
|
) -> dict:
|
3224
|
-
to_object = {}
|
3429
|
+
to_object: dict[str, Any] = {}
|
3225
3430
|
|
3226
3431
|
return to_object
|
3227
3432
|
|
3228
3433
|
|
3229
3434
|
def _EmbedContentMetadata_from_vertex(
|
3230
|
-
api_client:
|
3435
|
+
api_client: BaseApiClient,
|
3231
3436
|
from_object: Union[dict, object],
|
3232
|
-
parent_object: dict = None,
|
3437
|
+
parent_object: Optional[dict] = None,
|
3233
3438
|
) -> dict:
|
3234
|
-
to_object = {}
|
3439
|
+
to_object: dict[str, Any] = {}
|
3235
3440
|
if getv(from_object, ['billableCharacterCount']) is not None:
|
3236
3441
|
setv(
|
3237
3442
|
to_object,
|
@@ -3243,11 +3448,11 @@ def _EmbedContentMetadata_from_vertex(
|
|
3243
3448
|
|
3244
3449
|
|
3245
3450
|
def _EmbedContentResponse_from_mldev(
|
3246
|
-
api_client:
|
3451
|
+
api_client: BaseApiClient,
|
3247
3452
|
from_object: Union[dict, object],
|
3248
|
-
parent_object: dict = None,
|
3453
|
+
parent_object: Optional[dict] = None,
|
3249
3454
|
) -> dict:
|
3250
|
-
to_object = {}
|
3455
|
+
to_object: dict[str, Any] = {}
|
3251
3456
|
if getv(from_object, ['embeddings']) is not None:
|
3252
3457
|
setv(
|
3253
3458
|
to_object,
|
@@ -3271,11 +3476,11 @@ def _EmbedContentResponse_from_mldev(
|
|
3271
3476
|
|
3272
3477
|
|
3273
3478
|
def _EmbedContentResponse_from_vertex(
|
3274
|
-
api_client:
|
3479
|
+
api_client: BaseApiClient,
|
3275
3480
|
from_object: Union[dict, object],
|
3276
|
-
parent_object: dict = None,
|
3481
|
+
parent_object: Optional[dict] = None,
|
3277
3482
|
) -> dict:
|
3278
|
-
to_object = {}
|
3483
|
+
to_object: dict[str, Any] = {}
|
3279
3484
|
if getv(from_object, ['predictions[]', 'embeddings']) is not None:
|
3280
3485
|
setv(
|
3281
3486
|
to_object,
|
@@ -3299,11 +3504,11 @@ def _EmbedContentResponse_from_vertex(
|
|
3299
3504
|
|
3300
3505
|
|
3301
3506
|
def _Image_from_mldev(
|
3302
|
-
api_client:
|
3507
|
+
api_client: BaseApiClient,
|
3303
3508
|
from_object: Union[dict, object],
|
3304
|
-
parent_object: dict = None,
|
3509
|
+
parent_object: Optional[dict] = None,
|
3305
3510
|
) -> dict:
|
3306
|
-
to_object = {}
|
3511
|
+
to_object: dict[str, Any] = {}
|
3307
3512
|
|
3308
3513
|
if getv(from_object, ['bytesBase64Encoded']) is not None:
|
3309
3514
|
setv(
|
@@ -3319,11 +3524,11 @@ def _Image_from_mldev(
|
|
3319
3524
|
|
3320
3525
|
|
3321
3526
|
def _Image_from_vertex(
|
3322
|
-
api_client:
|
3527
|
+
api_client: BaseApiClient,
|
3323
3528
|
from_object: Union[dict, object],
|
3324
|
-
parent_object: dict = None,
|
3529
|
+
parent_object: Optional[dict] = None,
|
3325
3530
|
) -> dict:
|
3326
|
-
to_object = {}
|
3531
|
+
to_object: dict[str, Any] = {}
|
3327
3532
|
if getv(from_object, ['gcsUri']) is not None:
|
3328
3533
|
setv(to_object, ['gcs_uri'], getv(from_object, ['gcsUri']))
|
3329
3534
|
|
@@ -3341,11 +3546,11 @@ def _Image_from_vertex(
|
|
3341
3546
|
|
3342
3547
|
|
3343
3548
|
def _GeneratedImage_from_mldev(
|
3344
|
-
api_client:
|
3549
|
+
api_client: BaseApiClient,
|
3345
3550
|
from_object: Union[dict, object],
|
3346
|
-
parent_object: dict = None,
|
3551
|
+
parent_object: Optional[dict] = None,
|
3347
3552
|
) -> dict:
|
3348
|
-
to_object = {}
|
3553
|
+
to_object: dict[str, Any] = {}
|
3349
3554
|
if getv(from_object, ['_self']) is not None:
|
3350
3555
|
setv(
|
3351
3556
|
to_object,
|
@@ -3364,11 +3569,11 @@ def _GeneratedImage_from_mldev(
|
|
3364
3569
|
|
3365
3570
|
|
3366
3571
|
def _GeneratedImage_from_vertex(
|
3367
|
-
api_client:
|
3572
|
+
api_client: BaseApiClient,
|
3368
3573
|
from_object: Union[dict, object],
|
3369
|
-
parent_object: dict = None,
|
3574
|
+
parent_object: Optional[dict] = None,
|
3370
3575
|
) -> dict:
|
3371
|
-
to_object = {}
|
3576
|
+
to_object: dict[str, Any] = {}
|
3372
3577
|
if getv(from_object, ['_self']) is not None:
|
3373
3578
|
setv(
|
3374
3579
|
to_object,
|
@@ -3390,11 +3595,11 @@ def _GeneratedImage_from_vertex(
|
|
3390
3595
|
|
3391
3596
|
|
3392
3597
|
def _GenerateImagesResponse_from_mldev(
|
3393
|
-
api_client:
|
3598
|
+
api_client: BaseApiClient,
|
3394
3599
|
from_object: Union[dict, object],
|
3395
|
-
parent_object: dict = None,
|
3600
|
+
parent_object: Optional[dict] = None,
|
3396
3601
|
) -> dict:
|
3397
|
-
to_object = {}
|
3602
|
+
to_object: dict[str, Any] = {}
|
3398
3603
|
if getv(from_object, ['predictions']) is not None:
|
3399
3604
|
setv(
|
3400
3605
|
to_object,
|
@@ -3409,11 +3614,11 @@ def _GenerateImagesResponse_from_mldev(
|
|
3409
3614
|
|
3410
3615
|
|
3411
3616
|
def _GenerateImagesResponse_from_vertex(
|
3412
|
-
api_client:
|
3617
|
+
api_client: BaseApiClient,
|
3413
3618
|
from_object: Union[dict, object],
|
3414
|
-
parent_object: dict = None,
|
3619
|
+
parent_object: Optional[dict] = None,
|
3415
3620
|
) -> dict:
|
3416
|
-
to_object = {}
|
3621
|
+
to_object: dict[str, Any] = {}
|
3417
3622
|
if getv(from_object, ['predictions']) is not None:
|
3418
3623
|
setv(
|
3419
3624
|
to_object,
|
@@ -3428,11 +3633,11 @@ def _GenerateImagesResponse_from_vertex(
|
|
3428
3633
|
|
3429
3634
|
|
3430
3635
|
def _EditImageResponse_from_mldev(
|
3431
|
-
api_client:
|
3636
|
+
api_client: BaseApiClient,
|
3432
3637
|
from_object: Union[dict, object],
|
3433
|
-
parent_object: dict = None,
|
3638
|
+
parent_object: Optional[dict] = None,
|
3434
3639
|
) -> dict:
|
3435
|
-
to_object = {}
|
3640
|
+
to_object: dict[str, Any] = {}
|
3436
3641
|
if getv(from_object, ['predictions']) is not None:
|
3437
3642
|
setv(
|
3438
3643
|
to_object,
|
@@ -3447,11 +3652,11 @@ def _EditImageResponse_from_mldev(
|
|
3447
3652
|
|
3448
3653
|
|
3449
3654
|
def _EditImageResponse_from_vertex(
|
3450
|
-
api_client:
|
3655
|
+
api_client: BaseApiClient,
|
3451
3656
|
from_object: Union[dict, object],
|
3452
|
-
parent_object: dict = None,
|
3657
|
+
parent_object: Optional[dict] = None,
|
3453
3658
|
) -> dict:
|
3454
|
-
to_object = {}
|
3659
|
+
to_object: dict[str, Any] = {}
|
3455
3660
|
if getv(from_object, ['predictions']) is not None:
|
3456
3661
|
setv(
|
3457
3662
|
to_object,
|
@@ -3466,11 +3671,11 @@ def _EditImageResponse_from_vertex(
|
|
3466
3671
|
|
3467
3672
|
|
3468
3673
|
def _UpscaleImageResponse_from_mldev(
|
3469
|
-
api_client:
|
3674
|
+
api_client: BaseApiClient,
|
3470
3675
|
from_object: Union[dict, object],
|
3471
|
-
parent_object: dict = None,
|
3676
|
+
parent_object: Optional[dict] = None,
|
3472
3677
|
) -> dict:
|
3473
|
-
to_object = {}
|
3678
|
+
to_object: dict[str, Any] = {}
|
3474
3679
|
if getv(from_object, ['predictions']) is not None:
|
3475
3680
|
setv(
|
3476
3681
|
to_object,
|
@@ -3485,11 +3690,11 @@ def _UpscaleImageResponse_from_mldev(
|
|
3485
3690
|
|
3486
3691
|
|
3487
3692
|
def _UpscaleImageResponse_from_vertex(
|
3488
|
-
api_client:
|
3693
|
+
api_client: BaseApiClient,
|
3489
3694
|
from_object: Union[dict, object],
|
3490
|
-
parent_object: dict = None,
|
3695
|
+
parent_object: Optional[dict] = None,
|
3491
3696
|
) -> dict:
|
3492
|
-
to_object = {}
|
3697
|
+
to_object: dict[str, Any] = {}
|
3493
3698
|
if getv(from_object, ['predictions']) is not None:
|
3494
3699
|
setv(
|
3495
3700
|
to_object,
|
@@ -3504,21 +3709,21 @@ def _UpscaleImageResponse_from_vertex(
|
|
3504
3709
|
|
3505
3710
|
|
3506
3711
|
def _Endpoint_from_mldev(
|
3507
|
-
api_client:
|
3712
|
+
api_client: BaseApiClient,
|
3508
3713
|
from_object: Union[dict, object],
|
3509
|
-
parent_object: dict = None,
|
3714
|
+
parent_object: Optional[dict] = None,
|
3510
3715
|
) -> dict:
|
3511
|
-
to_object = {}
|
3716
|
+
to_object: dict[str, Any] = {}
|
3512
3717
|
|
3513
3718
|
return to_object
|
3514
3719
|
|
3515
3720
|
|
3516
3721
|
def _Endpoint_from_vertex(
|
3517
|
-
api_client:
|
3722
|
+
api_client: BaseApiClient,
|
3518
3723
|
from_object: Union[dict, object],
|
3519
|
-
parent_object: dict = None,
|
3724
|
+
parent_object: Optional[dict] = None,
|
3520
3725
|
) -> dict:
|
3521
|
-
to_object = {}
|
3726
|
+
to_object: dict[str, Any] = {}
|
3522
3727
|
if getv(from_object, ['endpoint']) is not None:
|
3523
3728
|
setv(to_object, ['name'], getv(from_object, ['endpoint']))
|
3524
3729
|
|
@@ -3531,11 +3736,11 @@ def _Endpoint_from_vertex(
|
|
3531
3736
|
|
3532
3737
|
|
3533
3738
|
def _TunedModelInfo_from_mldev(
|
3534
|
-
api_client:
|
3739
|
+
api_client: BaseApiClient,
|
3535
3740
|
from_object: Union[dict, object],
|
3536
|
-
parent_object: dict = None,
|
3741
|
+
parent_object: Optional[dict] = None,
|
3537
3742
|
) -> dict:
|
3538
|
-
to_object = {}
|
3743
|
+
to_object: dict[str, Any] = {}
|
3539
3744
|
if getv(from_object, ['baseModel']) is not None:
|
3540
3745
|
setv(to_object, ['base_model'], getv(from_object, ['baseModel']))
|
3541
3746
|
|
@@ -3549,11 +3754,11 @@ def _TunedModelInfo_from_mldev(
|
|
3549
3754
|
|
3550
3755
|
|
3551
3756
|
def _TunedModelInfo_from_vertex(
|
3552
|
-
api_client:
|
3757
|
+
api_client: BaseApiClient,
|
3553
3758
|
from_object: Union[dict, object],
|
3554
|
-
parent_object: dict = None,
|
3759
|
+
parent_object: Optional[dict] = None,
|
3555
3760
|
) -> dict:
|
3556
|
-
to_object = {}
|
3761
|
+
to_object: dict[str, Any] = {}
|
3557
3762
|
if (
|
3558
3763
|
getv(from_object, ['labels', 'google-vertex-llm-tuning-base-model-id'])
|
3559
3764
|
is not None
|
@@ -3574,11 +3779,11 @@ def _TunedModelInfo_from_vertex(
|
|
3574
3779
|
|
3575
3780
|
|
3576
3781
|
def _Model_from_mldev(
|
3577
|
-
api_client:
|
3782
|
+
api_client: BaseApiClient,
|
3578
3783
|
from_object: Union[dict, object],
|
3579
|
-
parent_object: dict = None,
|
3784
|
+
parent_object: Optional[dict] = None,
|
3580
3785
|
) -> dict:
|
3581
|
-
to_object = {}
|
3786
|
+
to_object: dict[str, Any] = {}
|
3582
3787
|
if getv(from_object, ['name']) is not None:
|
3583
3788
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
3584
3789
|
|
@@ -3623,11 +3828,11 @@ def _Model_from_mldev(
|
|
3623
3828
|
|
3624
3829
|
|
3625
3830
|
def _Model_from_vertex(
|
3626
|
-
api_client:
|
3831
|
+
api_client: BaseApiClient,
|
3627
3832
|
from_object: Union[dict, object],
|
3628
|
-
parent_object: dict = None,
|
3833
|
+
parent_object: Optional[dict] = None,
|
3629
3834
|
) -> dict:
|
3630
|
-
to_object = {}
|
3835
|
+
to_object: dict[str, Any] = {}
|
3631
3836
|
if getv(from_object, ['name']) is not None:
|
3632
3837
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
3633
3838
|
|
@@ -3666,11 +3871,11 @@ def _Model_from_vertex(
|
|
3666
3871
|
|
3667
3872
|
|
3668
3873
|
def _ListModelsResponse_from_mldev(
|
3669
|
-
api_client:
|
3874
|
+
api_client: BaseApiClient,
|
3670
3875
|
from_object: Union[dict, object],
|
3671
|
-
parent_object: dict = None,
|
3876
|
+
parent_object: Optional[dict] = None,
|
3672
3877
|
) -> dict:
|
3673
|
-
to_object = {}
|
3878
|
+
to_object: dict[str, Any] = {}
|
3674
3879
|
if getv(from_object, ['nextPageToken']) is not None:
|
3675
3880
|
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
3676
3881
|
|
@@ -3690,11 +3895,11 @@ def _ListModelsResponse_from_mldev(
|
|
3690
3895
|
|
3691
3896
|
|
3692
3897
|
def _ListModelsResponse_from_vertex(
|
3693
|
-
api_client:
|
3898
|
+
api_client: BaseApiClient,
|
3694
3899
|
from_object: Union[dict, object],
|
3695
|
-
parent_object: dict = None,
|
3900
|
+
parent_object: Optional[dict] = None,
|
3696
3901
|
) -> dict:
|
3697
|
-
to_object = {}
|
3902
|
+
to_object: dict[str, Any] = {}
|
3698
3903
|
if getv(from_object, ['nextPageToken']) is not None:
|
3699
3904
|
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
3700
3905
|
|
@@ -3714,76 +3919,286 @@ def _ListModelsResponse_from_vertex(
|
|
3714
3919
|
|
3715
3920
|
|
3716
3921
|
def _DeleteModelResponse_from_mldev(
|
3717
|
-
api_client:
|
3922
|
+
api_client: BaseApiClient,
|
3923
|
+
from_object: Union[dict, object],
|
3924
|
+
parent_object: Optional[dict] = None,
|
3925
|
+
) -> dict:
|
3926
|
+
to_object: dict[str, Any] = {}
|
3927
|
+
|
3928
|
+
return to_object
|
3929
|
+
|
3930
|
+
|
3931
|
+
def _DeleteModelResponse_from_vertex(
|
3932
|
+
api_client: BaseApiClient,
|
3933
|
+
from_object: Union[dict, object],
|
3934
|
+
parent_object: Optional[dict] = None,
|
3935
|
+
) -> dict:
|
3936
|
+
to_object: dict[str, Any] = {}
|
3937
|
+
|
3938
|
+
return to_object
|
3939
|
+
|
3940
|
+
|
3941
|
+
def _CountTokensResponse_from_mldev(
|
3942
|
+
api_client: BaseApiClient,
|
3718
3943
|
from_object: Union[dict, object],
|
3719
|
-
parent_object: dict = None,
|
3944
|
+
parent_object: Optional[dict] = None,
|
3720
3945
|
) -> dict:
|
3721
|
-
to_object = {}
|
3946
|
+
to_object: dict[str, Any] = {}
|
3947
|
+
if getv(from_object, ['totalTokens']) is not None:
|
3948
|
+
setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens']))
|
3949
|
+
|
3950
|
+
if getv(from_object, ['cachedContentTokenCount']) is not None:
|
3951
|
+
setv(
|
3952
|
+
to_object,
|
3953
|
+
['cached_content_token_count'],
|
3954
|
+
getv(from_object, ['cachedContentTokenCount']),
|
3955
|
+
)
|
3956
|
+
|
3957
|
+
return to_object
|
3958
|
+
|
3959
|
+
|
3960
|
+
def _CountTokensResponse_from_vertex(
|
3961
|
+
api_client: BaseApiClient,
|
3962
|
+
from_object: Union[dict, object],
|
3963
|
+
parent_object: Optional[dict] = None,
|
3964
|
+
) -> dict:
|
3965
|
+
to_object: dict[str, Any] = {}
|
3966
|
+
if getv(from_object, ['totalTokens']) is not None:
|
3967
|
+
setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens']))
|
3968
|
+
|
3969
|
+
return to_object
|
3970
|
+
|
3971
|
+
|
3972
|
+
def _ComputeTokensResponse_from_mldev(
|
3973
|
+
api_client: BaseApiClient,
|
3974
|
+
from_object: Union[dict, object],
|
3975
|
+
parent_object: Optional[dict] = None,
|
3976
|
+
) -> dict:
|
3977
|
+
to_object: dict[str, Any] = {}
|
3978
|
+
if getv(from_object, ['tokensInfo']) is not None:
|
3979
|
+
setv(to_object, ['tokens_info'], getv(from_object, ['tokensInfo']))
|
3980
|
+
|
3981
|
+
return to_object
|
3982
|
+
|
3983
|
+
|
3984
|
+
def _ComputeTokensResponse_from_vertex(
|
3985
|
+
api_client: BaseApiClient,
|
3986
|
+
from_object: Union[dict, object],
|
3987
|
+
parent_object: Optional[dict] = None,
|
3988
|
+
) -> dict:
|
3989
|
+
to_object: dict[str, Any] = {}
|
3990
|
+
if getv(from_object, ['tokensInfo']) is not None:
|
3991
|
+
setv(to_object, ['tokens_info'], getv(from_object, ['tokensInfo']))
|
3992
|
+
|
3993
|
+
return to_object
|
3994
|
+
|
3995
|
+
|
3996
|
+
def _Video_from_mldev(
|
3997
|
+
api_client: BaseApiClient,
|
3998
|
+
from_object: Union[dict, object],
|
3999
|
+
parent_object: Optional[dict] = None,
|
4000
|
+
) -> dict:
|
4001
|
+
to_object: dict[str, Any] = {}
|
4002
|
+
if getv(from_object, ['uri']) is not None:
|
4003
|
+
setv(to_object, ['uri'], getv(from_object, ['uri']))
|
4004
|
+
|
4005
|
+
if getv(from_object, ['encodedVideo']) is not None:
|
4006
|
+
setv(
|
4007
|
+
to_object,
|
4008
|
+
['video_bytes'],
|
4009
|
+
t.t_bytes(api_client, getv(from_object, ['encodedVideo'])),
|
4010
|
+
)
|
4011
|
+
|
4012
|
+
if getv(from_object, ['encoding']) is not None:
|
4013
|
+
setv(to_object, ['mime_type'], getv(from_object, ['encoding']))
|
4014
|
+
|
4015
|
+
return to_object
|
4016
|
+
|
4017
|
+
|
4018
|
+
def _Video_from_vertex(
|
4019
|
+
api_client: BaseApiClient,
|
4020
|
+
from_object: Union[dict, object],
|
4021
|
+
parent_object: Optional[dict] = None,
|
4022
|
+
) -> dict:
|
4023
|
+
to_object: dict[str, Any] = {}
|
4024
|
+
if getv(from_object, ['gcsUri']) is not None:
|
4025
|
+
setv(to_object, ['uri'], getv(from_object, ['gcsUri']))
|
4026
|
+
|
4027
|
+
if getv(from_object, ['bytesBase64Encoded']) is not None:
|
4028
|
+
setv(
|
4029
|
+
to_object,
|
4030
|
+
['video_bytes'],
|
4031
|
+
t.t_bytes(api_client, getv(from_object, ['bytesBase64Encoded'])),
|
4032
|
+
)
|
4033
|
+
|
4034
|
+
if getv(from_object, ['mimeType']) is not None:
|
4035
|
+
setv(to_object, ['mime_type'], getv(from_object, ['mimeType']))
|
4036
|
+
|
4037
|
+
return to_object
|
4038
|
+
|
4039
|
+
|
4040
|
+
def _GeneratedVideo_from_mldev(
|
4041
|
+
api_client: BaseApiClient,
|
4042
|
+
from_object: Union[dict, object],
|
4043
|
+
parent_object: Optional[dict] = None,
|
4044
|
+
) -> dict:
|
4045
|
+
to_object: dict[str, Any] = {}
|
4046
|
+
if getv(from_object, ['_self']) is not None:
|
4047
|
+
setv(
|
4048
|
+
to_object,
|
4049
|
+
['video'],
|
4050
|
+
_Video_from_mldev(api_client, getv(from_object, ['_self']), to_object),
|
4051
|
+
)
|
4052
|
+
|
4053
|
+
return to_object
|
4054
|
+
|
4055
|
+
|
4056
|
+
def _GeneratedVideo_from_vertex(
|
4057
|
+
api_client: BaseApiClient,
|
4058
|
+
from_object: Union[dict, object],
|
4059
|
+
parent_object: Optional[dict] = None,
|
4060
|
+
) -> dict:
|
4061
|
+
to_object: dict[str, Any] = {}
|
4062
|
+
if getv(from_object, ['_self']) is not None:
|
4063
|
+
setv(
|
4064
|
+
to_object,
|
4065
|
+
['video'],
|
4066
|
+
_Video_from_vertex(api_client, getv(from_object, ['_self']), to_object),
|
4067
|
+
)
|
4068
|
+
|
4069
|
+
return to_object
|
4070
|
+
|
4071
|
+
|
4072
|
+
def _GenerateVideosResponse_from_mldev(
|
4073
|
+
api_client: BaseApiClient,
|
4074
|
+
from_object: Union[dict, object],
|
4075
|
+
parent_object: Optional[dict] = None,
|
4076
|
+
) -> dict:
|
4077
|
+
to_object: dict[str, Any] = {}
|
4078
|
+
if getv(from_object, ['videos']) is not None:
|
4079
|
+
setv(
|
4080
|
+
to_object,
|
4081
|
+
['generated_videos'],
|
4082
|
+
[
|
4083
|
+
_GeneratedVideo_from_mldev(api_client, item, to_object)
|
4084
|
+
for item in getv(from_object, ['videos'])
|
4085
|
+
],
|
4086
|
+
)
|
4087
|
+
|
4088
|
+
if getv(from_object, ['raiMediaFilteredCount']) is not None:
|
4089
|
+
setv(
|
4090
|
+
to_object,
|
4091
|
+
['rai_media_filtered_count'],
|
4092
|
+
getv(from_object, ['raiMediaFilteredCount']),
|
4093
|
+
)
|
4094
|
+
|
4095
|
+
if getv(from_object, ['raiMediaFilteredReasons']) is not None:
|
4096
|
+
setv(
|
4097
|
+
to_object,
|
4098
|
+
['rai_media_filtered_reasons'],
|
4099
|
+
getv(from_object, ['raiMediaFilteredReasons']),
|
4100
|
+
)
|
4101
|
+
|
4102
|
+
return to_object
|
4103
|
+
|
4104
|
+
|
4105
|
+
def _GenerateVideosResponse_from_vertex(
|
4106
|
+
api_client: BaseApiClient,
|
4107
|
+
from_object: Union[dict, object],
|
4108
|
+
parent_object: Optional[dict] = None,
|
4109
|
+
) -> dict:
|
4110
|
+
to_object: dict[str, Any] = {}
|
4111
|
+
if getv(from_object, ['videos']) is not None:
|
4112
|
+
setv(
|
4113
|
+
to_object,
|
4114
|
+
['generated_videos'],
|
4115
|
+
[
|
4116
|
+
_GeneratedVideo_from_vertex(api_client, item, to_object)
|
4117
|
+
for item in getv(from_object, ['videos'])
|
4118
|
+
],
|
4119
|
+
)
|
4120
|
+
|
4121
|
+
if getv(from_object, ['raiMediaFilteredCount']) is not None:
|
4122
|
+
setv(
|
4123
|
+
to_object,
|
4124
|
+
['rai_media_filtered_count'],
|
4125
|
+
getv(from_object, ['raiMediaFilteredCount']),
|
4126
|
+
)
|
4127
|
+
|
4128
|
+
if getv(from_object, ['raiMediaFilteredReasons']) is not None:
|
4129
|
+
setv(
|
4130
|
+
to_object,
|
4131
|
+
['rai_media_filtered_reasons'],
|
4132
|
+
getv(from_object, ['raiMediaFilteredReasons']),
|
4133
|
+
)
|
3722
4134
|
|
3723
4135
|
return to_object
|
3724
4136
|
|
3725
4137
|
|
3726
|
-
def
|
3727
|
-
api_client:
|
4138
|
+
def _GenerateVideosOperation_from_mldev(
|
4139
|
+
api_client: BaseApiClient,
|
3728
4140
|
from_object: Union[dict, object],
|
3729
|
-
parent_object: dict = None,
|
4141
|
+
parent_object: Optional[dict] = None,
|
3730
4142
|
) -> dict:
|
3731
|
-
to_object = {}
|
4143
|
+
to_object: dict[str, Any] = {}
|
4144
|
+
if getv(from_object, ['name']) is not None:
|
4145
|
+
setv(to_object, ['name'], getv(from_object, ['name']))
|
3732
4146
|
|
3733
|
-
|
4147
|
+
if getv(from_object, ['metadata']) is not None:
|
4148
|
+
setv(to_object, ['metadata'], getv(from_object, ['metadata']))
|
3734
4149
|
|
4150
|
+
if getv(from_object, ['done']) is not None:
|
4151
|
+
setv(to_object, ['done'], getv(from_object, ['done']))
|
3735
4152
|
|
3736
|
-
|
3737
|
-
|
3738
|
-
from_object: Union[dict, object],
|
3739
|
-
parent_object: dict = None,
|
3740
|
-
) -> dict:
|
3741
|
-
to_object = {}
|
3742
|
-
if getv(from_object, ['totalTokens']) is not None:
|
3743
|
-
setv(to_object, ['total_tokens'], getv(from_object, ['totalTokens']))
|
4153
|
+
if getv(from_object, ['error']) is not None:
|
4154
|
+
setv(to_object, ['error'], getv(from_object, ['error']))
|
3744
4155
|
|
3745
|
-
if getv(from_object, ['
|
4156
|
+
if getv(from_object, ['response']) is not None:
|
4157
|
+
setv(to_object, ['response'], getv(from_object, ['response']))
|
4158
|
+
|
4159
|
+
if getv(from_object, ['response', 'generateVideoResponse']) is not None:
|
3746
4160
|
setv(
|
3747
4161
|
to_object,
|
3748
|
-
['
|
3749
|
-
|
4162
|
+
['result'],
|
4163
|
+
_GenerateVideosResponse_from_mldev(
|
4164
|
+
api_client,
|
4165
|
+
getv(from_object, ['response', 'generateVideoResponse']),
|
4166
|
+
to_object,
|
4167
|
+
),
|
3750
4168
|
)
|
3751
4169
|
|
3752
4170
|
return to_object
|
3753
4171
|
|
3754
4172
|
|
3755
|
-
def
|
3756
|
-
api_client:
|
4173
|
+
def _GenerateVideosOperation_from_vertex(
|
4174
|
+
api_client: BaseApiClient,
|
3757
4175
|
from_object: Union[dict, object],
|
3758
|
-
parent_object: dict = None,
|
4176
|
+
parent_object: Optional[dict] = None,
|
3759
4177
|
) -> dict:
|
3760
|
-
to_object = {}
|
3761
|
-
if getv(from_object, ['
|
3762
|
-
setv(to_object, ['
|
3763
|
-
|
3764
|
-
return to_object
|
4178
|
+
to_object: dict[str, Any] = {}
|
4179
|
+
if getv(from_object, ['name']) is not None:
|
4180
|
+
setv(to_object, ['name'], getv(from_object, ['name']))
|
3765
4181
|
|
4182
|
+
if getv(from_object, ['metadata']) is not None:
|
4183
|
+
setv(to_object, ['metadata'], getv(from_object, ['metadata']))
|
3766
4184
|
|
3767
|
-
|
3768
|
-
|
3769
|
-
from_object: Union[dict, object],
|
3770
|
-
parent_object: dict = None,
|
3771
|
-
) -> dict:
|
3772
|
-
to_object = {}
|
3773
|
-
if getv(from_object, ['tokensInfo']) is not None:
|
3774
|
-
setv(to_object, ['tokens_info'], getv(from_object, ['tokensInfo']))
|
4185
|
+
if getv(from_object, ['done']) is not None:
|
4186
|
+
setv(to_object, ['done'], getv(from_object, ['done']))
|
3775
4187
|
|
3776
|
-
|
4188
|
+
if getv(from_object, ['error']) is not None:
|
4189
|
+
setv(to_object, ['error'], getv(from_object, ['error']))
|
3777
4190
|
|
4191
|
+
if getv(from_object, ['response']) is not None:
|
4192
|
+
setv(to_object, ['response'], getv(from_object, ['response']))
|
3778
4193
|
|
3779
|
-
|
3780
|
-
|
3781
|
-
|
3782
|
-
|
3783
|
-
|
3784
|
-
|
3785
|
-
|
3786
|
-
|
4194
|
+
if getv(from_object, ['response']) is not None:
|
4195
|
+
setv(
|
4196
|
+
to_object,
|
4197
|
+
['result'],
|
4198
|
+
_GenerateVideosResponse_from_vertex(
|
4199
|
+
api_client, getv(from_object, ['response']), to_object
|
4200
|
+
),
|
4201
|
+
)
|
3787
4202
|
|
3788
4203
|
return to_object
|
3789
4204
|
|
@@ -3803,23 +4218,33 @@ class Models(_api_module.BaseModule):
|
|
3803
4218
|
config=config,
|
3804
4219
|
)
|
3805
4220
|
|
4221
|
+
request_url_dict: Optional[dict[str, str]]
|
4222
|
+
|
3806
4223
|
if self._api_client.vertexai:
|
3807
4224
|
request_dict = _GenerateContentParameters_to_vertex(
|
3808
4225
|
self._api_client, parameter_model
|
3809
4226
|
)
|
3810
|
-
|
4227
|
+
request_url_dict = request_dict.get('_url')
|
4228
|
+
if request_url_dict:
|
4229
|
+
path = '{model}:generateContent'.format_map(request_url_dict)
|
4230
|
+
else:
|
4231
|
+
path = '{model}:generateContent'
|
3811
4232
|
else:
|
3812
4233
|
request_dict = _GenerateContentParameters_to_mldev(
|
3813
4234
|
self._api_client, parameter_model
|
3814
4235
|
)
|
3815
|
-
|
4236
|
+
request_url_dict = request_dict.get('_url')
|
4237
|
+
if request_url_dict:
|
4238
|
+
path = '{model}:generateContent'.format_map(request_url_dict)
|
4239
|
+
else:
|
4240
|
+
path = '{model}:generateContent'
|
3816
4241
|
query_params = request_dict.get('_query')
|
3817
4242
|
if query_params:
|
3818
4243
|
path = f'{path}?{urlencode(query_params)}'
|
3819
4244
|
# TODO: remove the hack that pops config.
|
3820
4245
|
request_dict.pop('config', None)
|
3821
4246
|
|
3822
|
-
http_options = None
|
4247
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
3823
4248
|
if isinstance(config, dict):
|
3824
4249
|
http_options = config.get('http_options', None)
|
3825
4250
|
elif hasattr(config, 'http_options'):
|
@@ -3842,7 +4267,7 @@ class Models(_api_module.BaseModule):
|
|
3842
4267
|
)
|
3843
4268
|
|
3844
4269
|
return_value = types.GenerateContentResponse._from_response(
|
3845
|
-
response=response_dict, kwargs=parameter_model
|
4270
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
3846
4271
|
)
|
3847
4272
|
self._api_client._verify_response(return_value)
|
3848
4273
|
return return_value
|
@@ -3860,27 +4285,37 @@ class Models(_api_module.BaseModule):
|
|
3860
4285
|
config=config,
|
3861
4286
|
)
|
3862
4287
|
|
4288
|
+
request_url_dict: Optional[dict[str, str]]
|
4289
|
+
|
3863
4290
|
if self._api_client.vertexai:
|
3864
4291
|
request_dict = _GenerateContentParameters_to_vertex(
|
3865
4292
|
self._api_client, parameter_model
|
3866
4293
|
)
|
3867
|
-
|
3868
|
-
|
3869
|
-
|
4294
|
+
request_url_dict = request_dict.get('_url')
|
4295
|
+
if request_url_dict:
|
4296
|
+
path = '{model}:streamGenerateContent?alt=sse'.format_map(
|
4297
|
+
request_url_dict
|
4298
|
+
)
|
4299
|
+
else:
|
4300
|
+
path = '{model}:streamGenerateContent?alt=sse'
|
3870
4301
|
else:
|
3871
4302
|
request_dict = _GenerateContentParameters_to_mldev(
|
3872
4303
|
self._api_client, parameter_model
|
3873
4304
|
)
|
3874
|
-
|
3875
|
-
|
3876
|
-
|
4305
|
+
request_url_dict = request_dict.get('_url')
|
4306
|
+
if request_url_dict:
|
4307
|
+
path = '{model}:streamGenerateContent?alt=sse'.format_map(
|
4308
|
+
request_url_dict
|
4309
|
+
)
|
4310
|
+
else:
|
4311
|
+
path = '{model}:streamGenerateContent?alt=sse'
|
3877
4312
|
query_params = request_dict.get('_query')
|
3878
4313
|
if query_params:
|
3879
4314
|
path = f'{path}?{urlencode(query_params)}'
|
3880
4315
|
# TODO: remove the hack that pops config.
|
3881
4316
|
request_dict.pop('config', None)
|
3882
4317
|
|
3883
|
-
http_options = None
|
4318
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
3884
4319
|
if isinstance(config, dict):
|
3885
4320
|
http_options = config.get('http_options', None)
|
3886
4321
|
elif hasattr(config, 'http_options'):
|
@@ -3903,7 +4338,7 @@ class Models(_api_module.BaseModule):
|
|
3903
4338
|
)
|
3904
4339
|
|
3905
4340
|
return_value = types.GenerateContentResponse._from_response(
|
3906
|
-
response=response_dict, kwargs=parameter_model
|
4341
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
3907
4342
|
)
|
3908
4343
|
self._api_client._verify_response(return_value)
|
3909
4344
|
yield return_value
|
@@ -3915,7 +4350,7 @@ class Models(_api_module.BaseModule):
|
|
3915
4350
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
3916
4351
|
config: Optional[types.EmbedContentConfigOrDict] = None,
|
3917
4352
|
) -> types.EmbedContentResponse:
|
3918
|
-
"""Calculates embeddings for the given contents
|
4353
|
+
"""Calculates embeddings for the given contents. Only text is supported.
|
3919
4354
|
|
3920
4355
|
Args:
|
3921
4356
|
model (str): The model to use.
|
@@ -3944,23 +4379,33 @@ class Models(_api_module.BaseModule):
|
|
3944
4379
|
config=config,
|
3945
4380
|
)
|
3946
4381
|
|
4382
|
+
request_url_dict: Optional[dict[str, str]]
|
4383
|
+
|
3947
4384
|
if self._api_client.vertexai:
|
3948
4385
|
request_dict = _EmbedContentParameters_to_vertex(
|
3949
4386
|
self._api_client, parameter_model
|
3950
4387
|
)
|
3951
|
-
|
4388
|
+
request_url_dict = request_dict.get('_url')
|
4389
|
+
if request_url_dict:
|
4390
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
4391
|
+
else:
|
4392
|
+
path = '{model}:predict'
|
3952
4393
|
else:
|
3953
4394
|
request_dict = _EmbedContentParameters_to_mldev(
|
3954
4395
|
self._api_client, parameter_model
|
3955
4396
|
)
|
3956
|
-
|
4397
|
+
request_url_dict = request_dict.get('_url')
|
4398
|
+
if request_url_dict:
|
4399
|
+
path = '{model}:batchEmbedContents'.format_map(request_url_dict)
|
4400
|
+
else:
|
4401
|
+
path = '{model}:batchEmbedContents'
|
3957
4402
|
query_params = request_dict.get('_query')
|
3958
4403
|
if query_params:
|
3959
4404
|
path = f'{path}?{urlencode(query_params)}'
|
3960
4405
|
# TODO: remove the hack that pops config.
|
3961
4406
|
request_dict.pop('config', None)
|
3962
4407
|
|
3963
|
-
http_options = None
|
4408
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
3964
4409
|
if isinstance(config, dict):
|
3965
4410
|
http_options = config.get('http_options', None)
|
3966
4411
|
elif hasattr(config, 'http_options'):
|
@@ -3983,7 +4428,7 @@ class Models(_api_module.BaseModule):
|
|
3983
4428
|
)
|
3984
4429
|
|
3985
4430
|
return_value = types.EmbedContentResponse._from_response(
|
3986
|
-
response=response_dict, kwargs=parameter_model
|
4431
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
3987
4432
|
)
|
3988
4433
|
self._api_client._verify_response(return_value)
|
3989
4434
|
return return_value
|
@@ -4007,7 +4452,7 @@ class Models(_api_module.BaseModule):
|
|
4007
4452
|
.. code-block:: python
|
4008
4453
|
|
4009
4454
|
response = client.models.generate_images(
|
4010
|
-
model='imagen-3.0-generate-
|
4455
|
+
model='imagen-3.0-generate-002',
|
4011
4456
|
prompt='Man with a dog',
|
4012
4457
|
config=types.GenerateImagesConfig(
|
4013
4458
|
number_of_images= 1,
|
@@ -4024,23 +4469,33 @@ class Models(_api_module.BaseModule):
|
|
4024
4469
|
config=config,
|
4025
4470
|
)
|
4026
4471
|
|
4472
|
+
request_url_dict: Optional[dict[str, str]]
|
4473
|
+
|
4027
4474
|
if self._api_client.vertexai:
|
4028
4475
|
request_dict = _GenerateImagesParameters_to_vertex(
|
4029
4476
|
self._api_client, parameter_model
|
4030
4477
|
)
|
4031
|
-
|
4478
|
+
request_url_dict = request_dict.get('_url')
|
4479
|
+
if request_url_dict:
|
4480
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
4481
|
+
else:
|
4482
|
+
path = '{model}:predict'
|
4032
4483
|
else:
|
4033
4484
|
request_dict = _GenerateImagesParameters_to_mldev(
|
4034
4485
|
self._api_client, parameter_model
|
4035
4486
|
)
|
4036
|
-
|
4487
|
+
request_url_dict = request_dict.get('_url')
|
4488
|
+
if request_url_dict:
|
4489
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
4490
|
+
else:
|
4491
|
+
path = '{model}:predict'
|
4037
4492
|
query_params = request_dict.get('_query')
|
4038
4493
|
if query_params:
|
4039
4494
|
path = f'{path}?{urlencode(query_params)}'
|
4040
4495
|
# TODO: remove the hack that pops config.
|
4041
4496
|
request_dict.pop('config', None)
|
4042
4497
|
|
4043
|
-
http_options = None
|
4498
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4044
4499
|
if isinstance(config, dict):
|
4045
4500
|
http_options = config.get('http_options', None)
|
4046
4501
|
elif hasattr(config, 'http_options'):
|
@@ -4063,7 +4518,7 @@ class Models(_api_module.BaseModule):
|
|
4063
4518
|
)
|
4064
4519
|
|
4065
4520
|
return_value = types.GenerateImagesResponse._from_response(
|
4066
|
-
response=response_dict, kwargs=parameter_model
|
4521
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4067
4522
|
)
|
4068
4523
|
self._api_client._verify_response(return_value)
|
4069
4524
|
return return_value
|
@@ -4105,7 +4560,7 @@ class Models(_api_module.BaseModule):
|
|
4105
4560
|
),
|
4106
4561
|
)
|
4107
4562
|
response = client.models.edit_image(
|
4108
|
-
model='imagen-3.0-capability-
|
4563
|
+
model='imagen-3.0-capability-001',
|
4109
4564
|
prompt='man with dog',
|
4110
4565
|
reference_images=[raw_ref_image, mask_ref_image],
|
4111
4566
|
config=types.EditImageConfig(
|
@@ -4125,13 +4580,18 @@ class Models(_api_module.BaseModule):
|
|
4125
4580
|
config=config,
|
4126
4581
|
)
|
4127
4582
|
|
4583
|
+
request_url_dict: Optional[dict[str, str]]
|
4128
4584
|
if not self._api_client.vertexai:
|
4129
4585
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
4130
4586
|
else:
|
4131
4587
|
request_dict = _EditImageParameters_to_vertex(
|
4132
4588
|
self._api_client, parameter_model
|
4133
4589
|
)
|
4134
|
-
|
4590
|
+
request_url_dict = request_dict.get('_url')
|
4591
|
+
if request_url_dict:
|
4592
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
4593
|
+
else:
|
4594
|
+
path = '{model}:predict'
|
4135
4595
|
|
4136
4596
|
query_params = request_dict.get('_query')
|
4137
4597
|
if query_params:
|
@@ -4139,7 +4599,7 @@ class Models(_api_module.BaseModule):
|
|
4139
4599
|
# TODO: remove the hack that pops config.
|
4140
4600
|
request_dict.pop('config', None)
|
4141
4601
|
|
4142
|
-
http_options = None
|
4602
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4143
4603
|
if isinstance(config, dict):
|
4144
4604
|
http_options = config.get('http_options', None)
|
4145
4605
|
elif hasattr(config, 'http_options'):
|
@@ -4162,7 +4622,7 @@ class Models(_api_module.BaseModule):
|
|
4162
4622
|
)
|
4163
4623
|
|
4164
4624
|
return_value = types.EditImageResponse._from_response(
|
4165
|
-
response=response_dict, kwargs=parameter_model
|
4625
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4166
4626
|
)
|
4167
4627
|
self._api_client._verify_response(return_value)
|
4168
4628
|
return return_value
|
@@ -4191,13 +4651,18 @@ class Models(_api_module.BaseModule):
|
|
4191
4651
|
config=config,
|
4192
4652
|
)
|
4193
4653
|
|
4654
|
+
request_url_dict: Optional[dict[str, str]]
|
4194
4655
|
if not self._api_client.vertexai:
|
4195
4656
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
4196
4657
|
else:
|
4197
4658
|
request_dict = _UpscaleImageAPIParameters_to_vertex(
|
4198
4659
|
self._api_client, parameter_model
|
4199
4660
|
)
|
4200
|
-
|
4661
|
+
request_url_dict = request_dict.get('_url')
|
4662
|
+
if request_url_dict:
|
4663
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
4664
|
+
else:
|
4665
|
+
path = '{model}:predict'
|
4201
4666
|
|
4202
4667
|
query_params = request_dict.get('_query')
|
4203
4668
|
if query_params:
|
@@ -4205,7 +4670,7 @@ class Models(_api_module.BaseModule):
|
|
4205
4670
|
# TODO: remove the hack that pops config.
|
4206
4671
|
request_dict.pop('config', None)
|
4207
4672
|
|
4208
|
-
http_options = None
|
4673
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4209
4674
|
if isinstance(config, dict):
|
4210
4675
|
http_options = config.get('http_options', None)
|
4211
4676
|
elif hasattr(config, 'http_options'):
|
@@ -4228,7 +4693,7 @@ class Models(_api_module.BaseModule):
|
|
4228
4693
|
)
|
4229
4694
|
|
4230
4695
|
return_value = types.UpscaleImageResponse._from_response(
|
4231
|
-
response=response_dict, kwargs=parameter_model
|
4696
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4232
4697
|
)
|
4233
4698
|
self._api_client._verify_response(return_value)
|
4234
4699
|
return return_value
|
@@ -4241,23 +4706,33 @@ class Models(_api_module.BaseModule):
|
|
4241
4706
|
config=config,
|
4242
4707
|
)
|
4243
4708
|
|
4709
|
+
request_url_dict: Optional[dict[str, str]]
|
4710
|
+
|
4244
4711
|
if self._api_client.vertexai:
|
4245
4712
|
request_dict = _GetModelParameters_to_vertex(
|
4246
4713
|
self._api_client, parameter_model
|
4247
4714
|
)
|
4248
|
-
|
4715
|
+
request_url_dict = request_dict.get('_url')
|
4716
|
+
if request_url_dict:
|
4717
|
+
path = '{name}'.format_map(request_url_dict)
|
4718
|
+
else:
|
4719
|
+
path = '{name}'
|
4249
4720
|
else:
|
4250
4721
|
request_dict = _GetModelParameters_to_mldev(
|
4251
4722
|
self._api_client, parameter_model
|
4252
4723
|
)
|
4253
|
-
|
4724
|
+
request_url_dict = request_dict.get('_url')
|
4725
|
+
if request_url_dict:
|
4726
|
+
path = '{name}'.format_map(request_url_dict)
|
4727
|
+
else:
|
4728
|
+
path = '{name}'
|
4254
4729
|
query_params = request_dict.get('_query')
|
4255
4730
|
if query_params:
|
4256
4731
|
path = f'{path}?{urlencode(query_params)}'
|
4257
4732
|
# TODO: remove the hack that pops config.
|
4258
4733
|
request_dict.pop('config', None)
|
4259
4734
|
|
4260
|
-
http_options = None
|
4735
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4261
4736
|
if isinstance(config, dict):
|
4262
4737
|
http_options = config.get('http_options', None)
|
4263
4738
|
elif hasattr(config, 'http_options'):
|
@@ -4276,7 +4751,7 @@ class Models(_api_module.BaseModule):
|
|
4276
4751
|
response_dict = _Model_from_mldev(self._api_client, response_dict)
|
4277
4752
|
|
4278
4753
|
return_value = types.Model._from_response(
|
4279
|
-
response=response_dict, kwargs=parameter_model
|
4754
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4280
4755
|
)
|
4281
4756
|
self._api_client._verify_response(return_value)
|
4282
4757
|
return return_value
|
@@ -4288,23 +4763,33 @@ class Models(_api_module.BaseModule):
|
|
4288
4763
|
config=config,
|
4289
4764
|
)
|
4290
4765
|
|
4766
|
+
request_url_dict: Optional[dict[str, str]]
|
4767
|
+
|
4291
4768
|
if self._api_client.vertexai:
|
4292
4769
|
request_dict = _ListModelsParameters_to_vertex(
|
4293
4770
|
self._api_client, parameter_model
|
4294
4771
|
)
|
4295
|
-
|
4772
|
+
request_url_dict = request_dict.get('_url')
|
4773
|
+
if request_url_dict:
|
4774
|
+
path = '{models_url}'.format_map(request_url_dict)
|
4775
|
+
else:
|
4776
|
+
path = '{models_url}'
|
4296
4777
|
else:
|
4297
4778
|
request_dict = _ListModelsParameters_to_mldev(
|
4298
4779
|
self._api_client, parameter_model
|
4299
4780
|
)
|
4300
|
-
|
4781
|
+
request_url_dict = request_dict.get('_url')
|
4782
|
+
if request_url_dict:
|
4783
|
+
path = '{models_url}'.format_map(request_url_dict)
|
4784
|
+
else:
|
4785
|
+
path = '{models_url}'
|
4301
4786
|
query_params = request_dict.get('_query')
|
4302
4787
|
if query_params:
|
4303
4788
|
path = f'{path}?{urlencode(query_params)}'
|
4304
4789
|
# TODO: remove the hack that pops config.
|
4305
4790
|
request_dict.pop('config', None)
|
4306
4791
|
|
4307
|
-
http_options = None
|
4792
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4308
4793
|
if isinstance(config, dict):
|
4309
4794
|
http_options = config.get('http_options', None)
|
4310
4795
|
elif hasattr(config, 'http_options'):
|
@@ -4327,7 +4812,7 @@ class Models(_api_module.BaseModule):
|
|
4327
4812
|
)
|
4328
4813
|
|
4329
4814
|
return_value = types.ListModelsResponse._from_response(
|
4330
|
-
response=response_dict, kwargs=parameter_model
|
4815
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4331
4816
|
)
|
4332
4817
|
self._api_client._verify_response(return_value)
|
4333
4818
|
return return_value
|
@@ -4343,23 +4828,33 @@ class Models(_api_module.BaseModule):
|
|
4343
4828
|
config=config,
|
4344
4829
|
)
|
4345
4830
|
|
4831
|
+
request_url_dict: Optional[dict[str, str]]
|
4832
|
+
|
4346
4833
|
if self._api_client.vertexai:
|
4347
4834
|
request_dict = _UpdateModelParameters_to_vertex(
|
4348
4835
|
self._api_client, parameter_model
|
4349
4836
|
)
|
4350
|
-
|
4837
|
+
request_url_dict = request_dict.get('_url')
|
4838
|
+
if request_url_dict:
|
4839
|
+
path = '{model}'.format_map(request_url_dict)
|
4840
|
+
else:
|
4841
|
+
path = '{model}'
|
4351
4842
|
else:
|
4352
4843
|
request_dict = _UpdateModelParameters_to_mldev(
|
4353
4844
|
self._api_client, parameter_model
|
4354
4845
|
)
|
4355
|
-
|
4846
|
+
request_url_dict = request_dict.get('_url')
|
4847
|
+
if request_url_dict:
|
4848
|
+
path = '{name}'.format_map(request_url_dict)
|
4849
|
+
else:
|
4850
|
+
path = '{name}'
|
4356
4851
|
query_params = request_dict.get('_query')
|
4357
4852
|
if query_params:
|
4358
4853
|
path = f'{path}?{urlencode(query_params)}'
|
4359
4854
|
# TODO: remove the hack that pops config.
|
4360
4855
|
request_dict.pop('config', None)
|
4361
4856
|
|
4362
|
-
http_options = None
|
4857
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4363
4858
|
if isinstance(config, dict):
|
4364
4859
|
http_options = config.get('http_options', None)
|
4365
4860
|
elif hasattr(config, 'http_options'):
|
@@ -4378,7 +4873,7 @@ class Models(_api_module.BaseModule):
|
|
4378
4873
|
response_dict = _Model_from_mldev(self._api_client, response_dict)
|
4379
4874
|
|
4380
4875
|
return_value = types.Model._from_response(
|
4381
|
-
response=response_dict, kwargs=parameter_model
|
4876
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4382
4877
|
)
|
4383
4878
|
self._api_client._verify_response(return_value)
|
4384
4879
|
return return_value
|
@@ -4394,23 +4889,33 @@ class Models(_api_module.BaseModule):
|
|
4394
4889
|
config=config,
|
4395
4890
|
)
|
4396
4891
|
|
4892
|
+
request_url_dict: Optional[dict[str, str]]
|
4893
|
+
|
4397
4894
|
if self._api_client.vertexai:
|
4398
4895
|
request_dict = _DeleteModelParameters_to_vertex(
|
4399
4896
|
self._api_client, parameter_model
|
4400
4897
|
)
|
4401
|
-
|
4898
|
+
request_url_dict = request_dict.get('_url')
|
4899
|
+
if request_url_dict:
|
4900
|
+
path = '{name}'.format_map(request_url_dict)
|
4901
|
+
else:
|
4902
|
+
path = '{name}'
|
4402
4903
|
else:
|
4403
4904
|
request_dict = _DeleteModelParameters_to_mldev(
|
4404
4905
|
self._api_client, parameter_model
|
4405
4906
|
)
|
4406
|
-
|
4907
|
+
request_url_dict = request_dict.get('_url')
|
4908
|
+
if request_url_dict:
|
4909
|
+
path = '{name}'.format_map(request_url_dict)
|
4910
|
+
else:
|
4911
|
+
path = '{name}'
|
4407
4912
|
query_params = request_dict.get('_query')
|
4408
4913
|
if query_params:
|
4409
4914
|
path = f'{path}?{urlencode(query_params)}'
|
4410
4915
|
# TODO: remove the hack that pops config.
|
4411
4916
|
request_dict.pop('config', None)
|
4412
4917
|
|
4413
|
-
http_options = None
|
4918
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4414
4919
|
if isinstance(config, dict):
|
4415
4920
|
http_options = config.get('http_options', None)
|
4416
4921
|
elif hasattr(config, 'http_options'):
|
@@ -4433,7 +4938,7 @@ class Models(_api_module.BaseModule):
|
|
4433
4938
|
)
|
4434
4939
|
|
4435
4940
|
return_value = types.DeleteModelResponse._from_response(
|
4436
|
-
response=response_dict, kwargs=parameter_model
|
4941
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4437
4942
|
)
|
4438
4943
|
self._api_client._verify_response(return_value)
|
4439
4944
|
return return_value
|
@@ -4447,10 +4952,11 @@ class Models(_api_module.BaseModule):
|
|
4447
4952
|
) -> types.CountTokensResponse:
|
4448
4953
|
"""Counts the number of tokens in the given content.
|
4449
4954
|
|
4955
|
+
Multimodal input is supported for Gemini models.
|
4956
|
+
|
4450
4957
|
Args:
|
4451
4958
|
model (str): The model to use for counting tokens.
|
4452
4959
|
contents (list[types.Content]): The content to count tokens for.
|
4453
|
-
Multimodal input is supported for Gemini models.
|
4454
4960
|
config (CountTokensConfig): The configuration for counting tokens.
|
4455
4961
|
|
4456
4962
|
Usage:
|
@@ -4471,23 +4977,33 @@ class Models(_api_module.BaseModule):
|
|
4471
4977
|
config=config,
|
4472
4978
|
)
|
4473
4979
|
|
4980
|
+
request_url_dict: Optional[dict[str, str]]
|
4981
|
+
|
4474
4982
|
if self._api_client.vertexai:
|
4475
4983
|
request_dict = _CountTokensParameters_to_vertex(
|
4476
4984
|
self._api_client, parameter_model
|
4477
4985
|
)
|
4478
|
-
|
4986
|
+
request_url_dict = request_dict.get('_url')
|
4987
|
+
if request_url_dict:
|
4988
|
+
path = '{model}:countTokens'.format_map(request_url_dict)
|
4989
|
+
else:
|
4990
|
+
path = '{model}:countTokens'
|
4479
4991
|
else:
|
4480
4992
|
request_dict = _CountTokensParameters_to_mldev(
|
4481
4993
|
self._api_client, parameter_model
|
4482
4994
|
)
|
4483
|
-
|
4995
|
+
request_url_dict = request_dict.get('_url')
|
4996
|
+
if request_url_dict:
|
4997
|
+
path = '{model}:countTokens'.format_map(request_url_dict)
|
4998
|
+
else:
|
4999
|
+
path = '{model}:countTokens'
|
4484
5000
|
query_params = request_dict.get('_query')
|
4485
5001
|
if query_params:
|
4486
5002
|
path = f'{path}?{urlencode(query_params)}'
|
4487
5003
|
# TODO: remove the hack that pops config.
|
4488
5004
|
request_dict.pop('config', None)
|
4489
5005
|
|
4490
|
-
http_options = None
|
5006
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4491
5007
|
if isinstance(config, dict):
|
4492
5008
|
http_options = config.get('http_options', None)
|
4493
5009
|
elif hasattr(config, 'http_options'):
|
@@ -4510,7 +5026,7 @@ class Models(_api_module.BaseModule):
|
|
4510
5026
|
)
|
4511
5027
|
|
4512
5028
|
return_value = types.CountTokensResponse._from_response(
|
4513
|
-
response=response_dict, kwargs=parameter_model
|
5029
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4514
5030
|
)
|
4515
5031
|
self._api_client._verify_response(return_value)
|
4516
5032
|
return return_value
|
@@ -4522,14 +5038,15 @@ class Models(_api_module.BaseModule):
|
|
4522
5038
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
4523
5039
|
config: Optional[types.ComputeTokensConfigOrDict] = None,
|
4524
5040
|
) -> types.ComputeTokensResponse:
|
4525
|
-
"""Return a list of tokens based on the input
|
5041
|
+
"""Return a list of tokens based on the input contents.
|
5042
|
+
|
5043
|
+
Only text is supported.
|
4526
5044
|
|
4527
5045
|
This method is not supported by the Gemini Developer API.
|
4528
5046
|
|
4529
5047
|
Args:
|
4530
5048
|
model (str): The model to use.
|
4531
|
-
contents (list[shared.Content]): The content to compute tokens for.
|
4532
|
-
text is supported.
|
5049
|
+
contents (list[shared.Content]): The content to compute tokens for.
|
4533
5050
|
|
4534
5051
|
Usage:
|
4535
5052
|
|
@@ -4550,13 +5067,18 @@ class Models(_api_module.BaseModule):
|
|
4550
5067
|
config=config,
|
4551
5068
|
)
|
4552
5069
|
|
5070
|
+
request_url_dict: Optional[dict[str, str]]
|
4553
5071
|
if not self._api_client.vertexai:
|
4554
5072
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
4555
5073
|
else:
|
4556
5074
|
request_dict = _ComputeTokensParameters_to_vertex(
|
4557
5075
|
self._api_client, parameter_model
|
4558
5076
|
)
|
4559
|
-
|
5077
|
+
request_url_dict = request_dict.get('_url')
|
5078
|
+
if request_url_dict:
|
5079
|
+
path = '{model}:computeTokens'.format_map(request_url_dict)
|
5080
|
+
else:
|
5081
|
+
path = '{model}:computeTokens'
|
4560
5082
|
|
4561
5083
|
query_params = request_dict.get('_query')
|
4562
5084
|
if query_params:
|
@@ -4564,7 +5086,7 @@ class Models(_api_module.BaseModule):
|
|
4564
5086
|
# TODO: remove the hack that pops config.
|
4565
5087
|
request_dict.pop('config', None)
|
4566
5088
|
|
4567
|
-
http_options = None
|
5089
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4568
5090
|
if isinstance(config, dict):
|
4569
5091
|
http_options = config.get('http_options', None)
|
4570
5092
|
elif hasattr(config, 'http_options'):
|
@@ -4587,7 +5109,99 @@ class Models(_api_module.BaseModule):
|
|
4587
5109
|
)
|
4588
5110
|
|
4589
5111
|
return_value = types.ComputeTokensResponse._from_response(
|
4590
|
-
response=response_dict, kwargs=parameter_model
|
5112
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5113
|
+
)
|
5114
|
+
self._api_client._verify_response(return_value)
|
5115
|
+
return return_value
|
5116
|
+
|
5117
|
+
@_common.experimental_warning(
|
5118
|
+
'This method is experimental and may change in future versions.'
|
5119
|
+
)
|
5120
|
+
def generate_videos(
|
5121
|
+
self,
|
5122
|
+
*,
|
5123
|
+
model: str,
|
5124
|
+
prompt: Optional[str] = None,
|
5125
|
+
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
5126
|
+
) -> types.GenerateVideosOperation:
|
5127
|
+
"""Generates videos based on a text description and configuration.
|
5128
|
+
|
5129
|
+
Args:
|
5130
|
+
model: The model to use.
|
5131
|
+
instances: A list of prompts, images and videos to generate videos from.
|
5132
|
+
config: Configuration for generation.
|
5133
|
+
|
5134
|
+
Usage:
|
5135
|
+
|
5136
|
+
```
|
5137
|
+
operation = client.models.generate_videos(
|
5138
|
+
model="veo-2.0-generate-001",
|
5139
|
+
prompt="A neon hologram of a cat driving at top speed",
|
5140
|
+
)
|
5141
|
+
while not operation.done:
|
5142
|
+
time.sleep(10)
|
5143
|
+
operation = client.operations.get(operation)
|
5144
|
+
|
5145
|
+
operation.result.generated_videos[0].video.uri
|
5146
|
+
```
|
5147
|
+
"""
|
5148
|
+
|
5149
|
+
parameter_model = types._GenerateVideosParameters(
|
5150
|
+
model=model,
|
5151
|
+
prompt=prompt,
|
5152
|
+
config=config,
|
5153
|
+
)
|
5154
|
+
|
5155
|
+
request_url_dict: Optional[dict[str, str]]
|
5156
|
+
|
5157
|
+
if self._api_client.vertexai:
|
5158
|
+
request_dict = _GenerateVideosParameters_to_vertex(
|
5159
|
+
self._api_client, parameter_model
|
5160
|
+
)
|
5161
|
+
request_url_dict = request_dict.get('_url')
|
5162
|
+
if request_url_dict:
|
5163
|
+
path = '{model}:predictLongRunning'.format_map(request_url_dict)
|
5164
|
+
else:
|
5165
|
+
path = '{model}:predictLongRunning'
|
5166
|
+
else:
|
5167
|
+
request_dict = _GenerateVideosParameters_to_mldev(
|
5168
|
+
self._api_client, parameter_model
|
5169
|
+
)
|
5170
|
+
request_url_dict = request_dict.get('_url')
|
5171
|
+
if request_url_dict:
|
5172
|
+
path = '{model}:predictLongRunning'.format_map(request_url_dict)
|
5173
|
+
else:
|
5174
|
+
path = '{model}:predictLongRunning'
|
5175
|
+
query_params = request_dict.get('_query')
|
5176
|
+
if query_params:
|
5177
|
+
path = f'{path}?{urlencode(query_params)}'
|
5178
|
+
# TODO: remove the hack that pops config.
|
5179
|
+
request_dict.pop('config', None)
|
5180
|
+
|
5181
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5182
|
+
if isinstance(config, dict):
|
5183
|
+
http_options = config.get('http_options', None)
|
5184
|
+
elif hasattr(config, 'http_options'):
|
5185
|
+
http_options = config.http_options
|
5186
|
+
|
5187
|
+
request_dict = _common.convert_to_dict(request_dict)
|
5188
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
5189
|
+
|
5190
|
+
response_dict = self._api_client.request(
|
5191
|
+
'post', path, request_dict, http_options
|
5192
|
+
)
|
5193
|
+
|
5194
|
+
if self._api_client.vertexai:
|
5195
|
+
response_dict = _GenerateVideosOperation_from_vertex(
|
5196
|
+
self._api_client, response_dict
|
5197
|
+
)
|
5198
|
+
else:
|
5199
|
+
response_dict = _GenerateVideosOperation_from_mldev(
|
5200
|
+
self._api_client, response_dict
|
5201
|
+
)
|
5202
|
+
|
5203
|
+
return_value = types.GenerateVideosOperation._from_response(
|
5204
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
4591
5205
|
)
|
4592
5206
|
self._api_client._verify_response(return_value)
|
4593
5207
|
return return_value
|
@@ -4601,19 +5215,19 @@ class Models(_api_module.BaseModule):
|
|
4601
5215
|
) -> types.GenerateContentResponse:
|
4602
5216
|
"""Makes an API request to generate content using a model.
|
4603
5217
|
|
4604
|
-
For the `model` parameter, supported
|
4605
|
-
-
|
4606
|
-
-
|
5218
|
+
For the `model` parameter, supported formats for Vertex AI API include:
|
5219
|
+
- The Gemini model ID, for example: 'gemini-1.5-flash-002'
|
5220
|
+
- The full resource name starts with 'projects/', for example:
|
4607
5221
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
|
4608
|
-
-
|
5222
|
+
- The partial resource name with 'publishers/', for example:
|
4609
5223
|
'publishers/google/models/gemini-1.5-flash-002' or
|
4610
5224
|
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
4611
5225
|
- `/` separated publisher and model name, for example:
|
4612
5226
|
'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
|
4613
5227
|
|
4614
|
-
For the `model` parameter, supported
|
4615
|
-
-
|
4616
|
-
-
|
5228
|
+
For the `model` parameter, supported formats for Gemini API include:
|
5229
|
+
- The Gemini model ID, for example: 'gemini-1.5-flash-002'
|
5230
|
+
- The model name starts with 'models/', for example:
|
4617
5231
|
'models/gemini-1.5-flash-002'
|
4618
5232
|
- if you would like to use a tuned model, the model name starts with
|
4619
5233
|
'tunedModels/', for example:
|
@@ -4661,10 +5275,10 @@ class Models(_api_module.BaseModule):
|
|
4661
5275
|
model=model, contents=contents, config=config
|
4662
5276
|
)
|
4663
5277
|
remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
|
4664
|
-
|
5278
|
+
logger.info(
|
4665
5279
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
4666
5280
|
)
|
4667
|
-
automatic_function_calling_history = []
|
5281
|
+
automatic_function_calling_history: list[types.Content] = []
|
4668
5282
|
response = None
|
4669
5283
|
i = 0
|
4670
5284
|
while remaining_remote_calls_afc > 0:
|
@@ -4672,10 +5286,10 @@ class Models(_api_module.BaseModule):
|
|
4672
5286
|
response = self._generate_content(
|
4673
5287
|
model=model, contents=contents, config=config
|
4674
5288
|
)
|
4675
|
-
|
5289
|
+
logger.info(f'AFC remote call {i} is done.')
|
4676
5290
|
remaining_remote_calls_afc -= 1
|
4677
5291
|
if remaining_remote_calls_afc == 0:
|
4678
|
-
|
5292
|
+
logger.info('Reached max remote calls for automatic function calling.')
|
4679
5293
|
|
4680
5294
|
function_map = _extra_utils.get_function_map(config)
|
4681
5295
|
if not function_map:
|
@@ -4720,21 +5334,21 @@ class Models(_api_module.BaseModule):
|
|
4720
5334
|
) -> Iterator[types.GenerateContentResponse]:
|
4721
5335
|
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
4722
5336
|
|
4723
|
-
For the `model` parameter, supported
|
4724
|
-
-
|
4725
|
-
-
|
5337
|
+
For the `model` parameter, supported formats for Vertex AI API include:
|
5338
|
+
- The Gemini model ID, for example: 'gemini-1.5-flash-002'
|
5339
|
+
- The full resource name starts with 'projects/', for example:
|
4726
5340
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
|
4727
|
-
-
|
5341
|
+
- The partial resource name with 'publishers/', for example:
|
4728
5342
|
'publishers/google/models/gemini-1.5-flash-002' or
|
4729
5343
|
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
4730
5344
|
- `/` separated publisher and model name, for example:
|
4731
5345
|
'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
|
4732
5346
|
|
4733
|
-
For the `model` parameter, supported
|
4734
|
-
-
|
4735
|
-
-
|
5347
|
+
For the `model` parameter, supported formats for Gemini API include:
|
5348
|
+
- The Gemini model ID, for example: 'gemini-1.5-flash-002'
|
5349
|
+
- The model name starts with 'models/', for example:
|
4736
5350
|
'models/gemini-1.5-flash-002'
|
4737
|
-
-
|
5351
|
+
- If you would like to use a tuned model, the model name starts with
|
4738
5352
|
'tunedModels/', for example:
|
4739
5353
|
'tunedModels/1234567890123456789'
|
4740
5354
|
|
@@ -4782,10 +5396,10 @@ class Models(_api_module.BaseModule):
|
|
4782
5396
|
return
|
4783
5397
|
|
4784
5398
|
remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
|
4785
|
-
|
5399
|
+
logger.info(
|
4786
5400
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
4787
5401
|
)
|
4788
|
-
automatic_function_calling_history = []
|
5402
|
+
automatic_function_calling_history: list[types.Content] = []
|
4789
5403
|
chunk = None
|
4790
5404
|
func_response_parts = None
|
4791
5405
|
i = 0
|
@@ -4794,10 +5408,10 @@ class Models(_api_module.BaseModule):
|
|
4794
5408
|
response = self._generate_content_stream(
|
4795
5409
|
model=model, contents=contents, config=config
|
4796
5410
|
)
|
4797
|
-
|
5411
|
+
logger.info(f'AFC remote call {i} is done.')
|
4798
5412
|
remaining_remote_calls_afc -= 1
|
4799
5413
|
if remaining_remote_calls_afc == 0:
|
4800
|
-
|
5414
|
+
logger.info('Reached max remote calls for automatic function calling.')
|
4801
5415
|
|
4802
5416
|
function_map = _extra_utils.get_function_map(config)
|
4803
5417
|
|
@@ -4809,6 +5423,12 @@ class Models(_api_module.BaseModule):
|
|
4809
5423
|
if not function_map:
|
4810
5424
|
yield chunk
|
4811
5425
|
else:
|
5426
|
+
if (
|
5427
|
+
not chunk.candidates
|
5428
|
+
or not chunk.candidates[0].content
|
5429
|
+
or not chunk.candidates[0].content.parts
|
5430
|
+
):
|
5431
|
+
break
|
4812
5432
|
func_response_parts = _extra_utils.get_function_response_parts(
|
4813
5433
|
chunk, function_map
|
4814
5434
|
)
|
@@ -4823,20 +5443,16 @@ class Models(_api_module.BaseModule):
|
|
4823
5443
|
automatic_function_calling_history
|
4824
5444
|
)
|
4825
5445
|
yield chunk
|
5446
|
+
if (
|
5447
|
+
not chunk.candidates
|
5448
|
+
or not chunk.candidates[0].content
|
5449
|
+
or not chunk.candidates[0].content.parts
|
5450
|
+
):
|
5451
|
+
break
|
4826
5452
|
func_response_parts = _extra_utils.get_function_response_parts(
|
4827
5453
|
chunk, function_map
|
4828
5454
|
)
|
4829
5455
|
|
4830
|
-
if not chunk:
|
4831
|
-
break
|
4832
|
-
if (
|
4833
|
-
not chunk
|
4834
|
-
or not chunk.candidates
|
4835
|
-
or not chunk.candidates[0].content
|
4836
|
-
or not chunk.candidates[0].content.parts
|
4837
|
-
):
|
4838
|
-
break
|
4839
|
-
|
4840
5456
|
if not function_map:
|
4841
5457
|
break
|
4842
5458
|
if not func_response_parts:
|
@@ -4898,7 +5514,7 @@ class Models(_api_module.BaseModule):
|
|
4898
5514
|
|
4899
5515
|
# Convert to API config.
|
4900
5516
|
config = config or {}
|
4901
|
-
config_dct = config if isinstance(config, dict) else config.
|
5517
|
+
config_dct = config if isinstance(config, dict) else config.model_dump()
|
4902
5518
|
api_config = types._UpscaleImageAPIConfigDict(**config_dct) # pylint: disable=protected-access
|
4903
5519
|
|
4904
5520
|
# Provide default values through API config.
|
@@ -4978,23 +5594,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
4978
5594
|
config=config,
|
4979
5595
|
)
|
4980
5596
|
|
5597
|
+
request_url_dict: Optional[dict[str, str]]
|
5598
|
+
|
4981
5599
|
if self._api_client.vertexai:
|
4982
5600
|
request_dict = _GenerateContentParameters_to_vertex(
|
4983
5601
|
self._api_client, parameter_model
|
4984
5602
|
)
|
4985
|
-
|
5603
|
+
request_url_dict = request_dict.get('_url')
|
5604
|
+
if request_url_dict:
|
5605
|
+
path = '{model}:generateContent'.format_map(request_url_dict)
|
5606
|
+
else:
|
5607
|
+
path = '{model}:generateContent'
|
4986
5608
|
else:
|
4987
5609
|
request_dict = _GenerateContentParameters_to_mldev(
|
4988
5610
|
self._api_client, parameter_model
|
4989
5611
|
)
|
4990
|
-
|
5612
|
+
request_url_dict = request_dict.get('_url')
|
5613
|
+
if request_url_dict:
|
5614
|
+
path = '{model}:generateContent'.format_map(request_url_dict)
|
5615
|
+
else:
|
5616
|
+
path = '{model}:generateContent'
|
4991
5617
|
query_params = request_dict.get('_query')
|
4992
5618
|
if query_params:
|
4993
5619
|
path = f'{path}?{urlencode(query_params)}'
|
4994
5620
|
# TODO: remove the hack that pops config.
|
4995
5621
|
request_dict.pop('config', None)
|
4996
5622
|
|
4997
|
-
http_options = None
|
5623
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
4998
5624
|
if isinstance(config, dict):
|
4999
5625
|
http_options = config.get('http_options', None)
|
5000
5626
|
elif hasattr(config, 'http_options'):
|
@@ -5017,7 +5643,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5017
5643
|
)
|
5018
5644
|
|
5019
5645
|
return_value = types.GenerateContentResponse._from_response(
|
5020
|
-
response=response_dict, kwargs=parameter_model
|
5646
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5021
5647
|
)
|
5022
5648
|
self._api_client._verify_response(return_value)
|
5023
5649
|
return return_value
|
@@ -5035,27 +5661,37 @@ class AsyncModels(_api_module.BaseModule):
|
|
5035
5661
|
config=config,
|
5036
5662
|
)
|
5037
5663
|
|
5664
|
+
request_url_dict: Optional[dict[str, str]]
|
5665
|
+
|
5038
5666
|
if self._api_client.vertexai:
|
5039
5667
|
request_dict = _GenerateContentParameters_to_vertex(
|
5040
5668
|
self._api_client, parameter_model
|
5041
5669
|
)
|
5042
|
-
|
5043
|
-
|
5044
|
-
|
5670
|
+
request_url_dict = request_dict.get('_url')
|
5671
|
+
if request_url_dict:
|
5672
|
+
path = '{model}:streamGenerateContent?alt=sse'.format_map(
|
5673
|
+
request_url_dict
|
5674
|
+
)
|
5675
|
+
else:
|
5676
|
+
path = '{model}:streamGenerateContent?alt=sse'
|
5045
5677
|
else:
|
5046
5678
|
request_dict = _GenerateContentParameters_to_mldev(
|
5047
5679
|
self._api_client, parameter_model
|
5048
5680
|
)
|
5049
|
-
|
5050
|
-
|
5051
|
-
|
5681
|
+
request_url_dict = request_dict.get('_url')
|
5682
|
+
if request_url_dict:
|
5683
|
+
path = '{model}:streamGenerateContent?alt=sse'.format_map(
|
5684
|
+
request_url_dict
|
5685
|
+
)
|
5686
|
+
else:
|
5687
|
+
path = '{model}:streamGenerateContent?alt=sse'
|
5052
5688
|
query_params = request_dict.get('_query')
|
5053
5689
|
if query_params:
|
5054
5690
|
path = f'{path}?{urlencode(query_params)}'
|
5055
5691
|
# TODO: remove the hack that pops config.
|
5056
5692
|
request_dict.pop('config', None)
|
5057
5693
|
|
5058
|
-
http_options = None
|
5694
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5059
5695
|
if isinstance(config, dict):
|
5060
5696
|
http_options = config.get('http_options', None)
|
5061
5697
|
elif hasattr(config, 'http_options'):
|
@@ -5081,7 +5717,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5081
5717
|
)
|
5082
5718
|
|
5083
5719
|
return_value = types.GenerateContentResponse._from_response(
|
5084
|
-
response=response_dict, kwargs=parameter_model
|
5720
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5085
5721
|
)
|
5086
5722
|
self._api_client._verify_response(return_value)
|
5087
5723
|
yield return_value
|
@@ -5095,7 +5731,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5095
5731
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
5096
5732
|
config: Optional[types.EmbedContentConfigOrDict] = None,
|
5097
5733
|
) -> types.EmbedContentResponse:
|
5098
|
-
"""Calculates embeddings for the given contents
|
5734
|
+
"""Calculates embeddings for the given contents. Only text is supported.
|
5099
5735
|
|
5100
5736
|
Args:
|
5101
5737
|
model (str): The model to use.
|
@@ -5106,7 +5742,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5106
5742
|
|
5107
5743
|
.. code-block:: python
|
5108
5744
|
|
5109
|
-
embeddings = client.models.embed_content(
|
5745
|
+
embeddings = await client.aio.models.embed_content(
|
5110
5746
|
model= 'text-embedding-004',
|
5111
5747
|
contents=[
|
5112
5748
|
'What is your name?',
|
@@ -5124,23 +5760,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5124
5760
|
config=config,
|
5125
5761
|
)
|
5126
5762
|
|
5763
|
+
request_url_dict: Optional[dict[str, str]]
|
5764
|
+
|
5127
5765
|
if self._api_client.vertexai:
|
5128
5766
|
request_dict = _EmbedContentParameters_to_vertex(
|
5129
5767
|
self._api_client, parameter_model
|
5130
5768
|
)
|
5131
|
-
|
5769
|
+
request_url_dict = request_dict.get('_url')
|
5770
|
+
if request_url_dict:
|
5771
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
5772
|
+
else:
|
5773
|
+
path = '{model}:predict'
|
5132
5774
|
else:
|
5133
5775
|
request_dict = _EmbedContentParameters_to_mldev(
|
5134
5776
|
self._api_client, parameter_model
|
5135
5777
|
)
|
5136
|
-
|
5778
|
+
request_url_dict = request_dict.get('_url')
|
5779
|
+
if request_url_dict:
|
5780
|
+
path = '{model}:batchEmbedContents'.format_map(request_url_dict)
|
5781
|
+
else:
|
5782
|
+
path = '{model}:batchEmbedContents'
|
5137
5783
|
query_params = request_dict.get('_query')
|
5138
5784
|
if query_params:
|
5139
5785
|
path = f'{path}?{urlencode(query_params)}'
|
5140
5786
|
# TODO: remove the hack that pops config.
|
5141
5787
|
request_dict.pop('config', None)
|
5142
5788
|
|
5143
|
-
http_options = None
|
5789
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5144
5790
|
if isinstance(config, dict):
|
5145
5791
|
http_options = config.get('http_options', None)
|
5146
5792
|
elif hasattr(config, 'http_options'):
|
@@ -5163,7 +5809,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5163
5809
|
)
|
5164
5810
|
|
5165
5811
|
return_value = types.EmbedContentResponse._from_response(
|
5166
|
-
response=response_dict, kwargs=parameter_model
|
5812
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5167
5813
|
)
|
5168
5814
|
self._api_client._verify_response(return_value)
|
5169
5815
|
return return_value
|
@@ -5186,8 +5832,8 @@ class AsyncModels(_api_module.BaseModule):
|
|
5186
5832
|
|
5187
5833
|
.. code-block:: python
|
5188
5834
|
|
5189
|
-
response = client.models.generate_images(
|
5190
|
-
model='imagen-3.0-generate-
|
5835
|
+
response = await client.aio.models.generate_images(
|
5836
|
+
model='imagen-3.0-generate-002',
|
5191
5837
|
prompt='Man with a dog',
|
5192
5838
|
config=types.GenerateImagesConfig(
|
5193
5839
|
number_of_images= 1,
|
@@ -5204,23 +5850,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5204
5850
|
config=config,
|
5205
5851
|
)
|
5206
5852
|
|
5853
|
+
request_url_dict: Optional[dict[str, str]]
|
5854
|
+
|
5207
5855
|
if self._api_client.vertexai:
|
5208
5856
|
request_dict = _GenerateImagesParameters_to_vertex(
|
5209
5857
|
self._api_client, parameter_model
|
5210
5858
|
)
|
5211
|
-
|
5859
|
+
request_url_dict = request_dict.get('_url')
|
5860
|
+
if request_url_dict:
|
5861
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
5862
|
+
else:
|
5863
|
+
path = '{model}:predict'
|
5212
5864
|
else:
|
5213
5865
|
request_dict = _GenerateImagesParameters_to_mldev(
|
5214
5866
|
self._api_client, parameter_model
|
5215
5867
|
)
|
5216
|
-
|
5868
|
+
request_url_dict = request_dict.get('_url')
|
5869
|
+
if request_url_dict:
|
5870
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
5871
|
+
else:
|
5872
|
+
path = '{model}:predict'
|
5217
5873
|
query_params = request_dict.get('_query')
|
5218
5874
|
if query_params:
|
5219
5875
|
path = f'{path}?{urlencode(query_params)}'
|
5220
5876
|
# TODO: remove the hack that pops config.
|
5221
5877
|
request_dict.pop('config', None)
|
5222
5878
|
|
5223
|
-
http_options = None
|
5879
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5224
5880
|
if isinstance(config, dict):
|
5225
5881
|
http_options = config.get('http_options', None)
|
5226
5882
|
elif hasattr(config, 'http_options'):
|
@@ -5243,7 +5899,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5243
5899
|
)
|
5244
5900
|
|
5245
5901
|
return_value = types.GenerateImagesResponse._from_response(
|
5246
|
-
response=response_dict, kwargs=parameter_model
|
5902
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5247
5903
|
)
|
5248
5904
|
self._api_client._verify_response(return_value)
|
5249
5905
|
return return_value
|
@@ -5284,8 +5940,8 @@ class AsyncModels(_api_module.BaseModule):
|
|
5284
5940
|
mask_dilation=0.06,
|
5285
5941
|
),
|
5286
5942
|
)
|
5287
|
-
response = client.models.edit_image(
|
5288
|
-
model='imagen-3.0-capability-
|
5943
|
+
response = await client.aio.models.edit_image(
|
5944
|
+
model='imagen-3.0-capability-001',
|
5289
5945
|
prompt='man with dog',
|
5290
5946
|
reference_images=[raw_ref_image, mask_ref_image],
|
5291
5947
|
config=types.EditImageConfig(
|
@@ -5305,13 +5961,18 @@ class AsyncModels(_api_module.BaseModule):
|
|
5305
5961
|
config=config,
|
5306
5962
|
)
|
5307
5963
|
|
5964
|
+
request_url_dict: Optional[dict[str, str]]
|
5308
5965
|
if not self._api_client.vertexai:
|
5309
5966
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
5310
5967
|
else:
|
5311
5968
|
request_dict = _EditImageParameters_to_vertex(
|
5312
5969
|
self._api_client, parameter_model
|
5313
5970
|
)
|
5314
|
-
|
5971
|
+
request_url_dict = request_dict.get('_url')
|
5972
|
+
if request_url_dict:
|
5973
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
5974
|
+
else:
|
5975
|
+
path = '{model}:predict'
|
5315
5976
|
|
5316
5977
|
query_params = request_dict.get('_query')
|
5317
5978
|
if query_params:
|
@@ -5319,7 +5980,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5319
5980
|
# TODO: remove the hack that pops config.
|
5320
5981
|
request_dict.pop('config', None)
|
5321
5982
|
|
5322
|
-
http_options = None
|
5983
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5323
5984
|
if isinstance(config, dict):
|
5324
5985
|
http_options = config.get('http_options', None)
|
5325
5986
|
elif hasattr(config, 'http_options'):
|
@@ -5342,7 +6003,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5342
6003
|
)
|
5343
6004
|
|
5344
6005
|
return_value = types.EditImageResponse._from_response(
|
5345
|
-
response=response_dict, kwargs=parameter_model
|
6006
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5346
6007
|
)
|
5347
6008
|
self._api_client._verify_response(return_value)
|
5348
6009
|
return return_value
|
@@ -5371,13 +6032,18 @@ class AsyncModels(_api_module.BaseModule):
|
|
5371
6032
|
config=config,
|
5372
6033
|
)
|
5373
6034
|
|
6035
|
+
request_url_dict: Optional[dict[str, str]]
|
5374
6036
|
if not self._api_client.vertexai:
|
5375
6037
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
5376
6038
|
else:
|
5377
6039
|
request_dict = _UpscaleImageAPIParameters_to_vertex(
|
5378
6040
|
self._api_client, parameter_model
|
5379
6041
|
)
|
5380
|
-
|
6042
|
+
request_url_dict = request_dict.get('_url')
|
6043
|
+
if request_url_dict:
|
6044
|
+
path = '{model}:predict'.format_map(request_url_dict)
|
6045
|
+
else:
|
6046
|
+
path = '{model}:predict'
|
5381
6047
|
|
5382
6048
|
query_params = request_dict.get('_query')
|
5383
6049
|
if query_params:
|
@@ -5385,7 +6051,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5385
6051
|
# TODO: remove the hack that pops config.
|
5386
6052
|
request_dict.pop('config', None)
|
5387
6053
|
|
5388
|
-
http_options = None
|
6054
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5389
6055
|
if isinstance(config, dict):
|
5390
6056
|
http_options = config.get('http_options', None)
|
5391
6057
|
elif hasattr(config, 'http_options'):
|
@@ -5408,7 +6074,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5408
6074
|
)
|
5409
6075
|
|
5410
6076
|
return_value = types.UpscaleImageResponse._from_response(
|
5411
|
-
response=response_dict, kwargs=parameter_model
|
6077
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5412
6078
|
)
|
5413
6079
|
self._api_client._verify_response(return_value)
|
5414
6080
|
return return_value
|
@@ -5421,23 +6087,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5421
6087
|
config=config,
|
5422
6088
|
)
|
5423
6089
|
|
6090
|
+
request_url_dict: Optional[dict[str, str]]
|
6091
|
+
|
5424
6092
|
if self._api_client.vertexai:
|
5425
6093
|
request_dict = _GetModelParameters_to_vertex(
|
5426
6094
|
self._api_client, parameter_model
|
5427
6095
|
)
|
5428
|
-
|
6096
|
+
request_url_dict = request_dict.get('_url')
|
6097
|
+
if request_url_dict:
|
6098
|
+
path = '{name}'.format_map(request_url_dict)
|
6099
|
+
else:
|
6100
|
+
path = '{name}'
|
5429
6101
|
else:
|
5430
6102
|
request_dict = _GetModelParameters_to_mldev(
|
5431
6103
|
self._api_client, parameter_model
|
5432
6104
|
)
|
5433
|
-
|
6105
|
+
request_url_dict = request_dict.get('_url')
|
6106
|
+
if request_url_dict:
|
6107
|
+
path = '{name}'.format_map(request_url_dict)
|
6108
|
+
else:
|
6109
|
+
path = '{name}'
|
5434
6110
|
query_params = request_dict.get('_query')
|
5435
6111
|
if query_params:
|
5436
6112
|
path = f'{path}?{urlencode(query_params)}'
|
5437
6113
|
# TODO: remove the hack that pops config.
|
5438
6114
|
request_dict.pop('config', None)
|
5439
6115
|
|
5440
|
-
http_options = None
|
6116
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5441
6117
|
if isinstance(config, dict):
|
5442
6118
|
http_options = config.get('http_options', None)
|
5443
6119
|
elif hasattr(config, 'http_options'):
|
@@ -5456,7 +6132,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5456
6132
|
response_dict = _Model_from_mldev(self._api_client, response_dict)
|
5457
6133
|
|
5458
6134
|
return_value = types.Model._from_response(
|
5459
|
-
response=response_dict, kwargs=parameter_model
|
6135
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5460
6136
|
)
|
5461
6137
|
self._api_client._verify_response(return_value)
|
5462
6138
|
return return_value
|
@@ -5468,23 +6144,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5468
6144
|
config=config,
|
5469
6145
|
)
|
5470
6146
|
|
6147
|
+
request_url_dict: Optional[dict[str, str]]
|
6148
|
+
|
5471
6149
|
if self._api_client.vertexai:
|
5472
6150
|
request_dict = _ListModelsParameters_to_vertex(
|
5473
6151
|
self._api_client, parameter_model
|
5474
6152
|
)
|
5475
|
-
|
6153
|
+
request_url_dict = request_dict.get('_url')
|
6154
|
+
if request_url_dict:
|
6155
|
+
path = '{models_url}'.format_map(request_url_dict)
|
6156
|
+
else:
|
6157
|
+
path = '{models_url}'
|
5476
6158
|
else:
|
5477
6159
|
request_dict = _ListModelsParameters_to_mldev(
|
5478
6160
|
self._api_client, parameter_model
|
5479
6161
|
)
|
5480
|
-
|
6162
|
+
request_url_dict = request_dict.get('_url')
|
6163
|
+
if request_url_dict:
|
6164
|
+
path = '{models_url}'.format_map(request_url_dict)
|
6165
|
+
else:
|
6166
|
+
path = '{models_url}'
|
5481
6167
|
query_params = request_dict.get('_query')
|
5482
6168
|
if query_params:
|
5483
6169
|
path = f'{path}?{urlencode(query_params)}'
|
5484
6170
|
# TODO: remove the hack that pops config.
|
5485
6171
|
request_dict.pop('config', None)
|
5486
6172
|
|
5487
|
-
http_options = None
|
6173
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5488
6174
|
if isinstance(config, dict):
|
5489
6175
|
http_options = config.get('http_options', None)
|
5490
6176
|
elif hasattr(config, 'http_options'):
|
@@ -5507,7 +6193,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5507
6193
|
)
|
5508
6194
|
|
5509
6195
|
return_value = types.ListModelsResponse._from_response(
|
5510
|
-
response=response_dict, kwargs=parameter_model
|
6196
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5511
6197
|
)
|
5512
6198
|
self._api_client._verify_response(return_value)
|
5513
6199
|
return return_value
|
@@ -5523,23 +6209,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5523
6209
|
config=config,
|
5524
6210
|
)
|
5525
6211
|
|
6212
|
+
request_url_dict: Optional[dict[str, str]]
|
6213
|
+
|
5526
6214
|
if self._api_client.vertexai:
|
5527
6215
|
request_dict = _UpdateModelParameters_to_vertex(
|
5528
6216
|
self._api_client, parameter_model
|
5529
6217
|
)
|
5530
|
-
|
6218
|
+
request_url_dict = request_dict.get('_url')
|
6219
|
+
if request_url_dict:
|
6220
|
+
path = '{model}'.format_map(request_url_dict)
|
6221
|
+
else:
|
6222
|
+
path = '{model}'
|
5531
6223
|
else:
|
5532
6224
|
request_dict = _UpdateModelParameters_to_mldev(
|
5533
6225
|
self._api_client, parameter_model
|
5534
6226
|
)
|
5535
|
-
|
6227
|
+
request_url_dict = request_dict.get('_url')
|
6228
|
+
if request_url_dict:
|
6229
|
+
path = '{name}'.format_map(request_url_dict)
|
6230
|
+
else:
|
6231
|
+
path = '{name}'
|
5536
6232
|
query_params = request_dict.get('_query')
|
5537
6233
|
if query_params:
|
5538
6234
|
path = f'{path}?{urlencode(query_params)}'
|
5539
6235
|
# TODO: remove the hack that pops config.
|
5540
6236
|
request_dict.pop('config', None)
|
5541
6237
|
|
5542
|
-
http_options = None
|
6238
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5543
6239
|
if isinstance(config, dict):
|
5544
6240
|
http_options = config.get('http_options', None)
|
5545
6241
|
elif hasattr(config, 'http_options'):
|
@@ -5558,7 +6254,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5558
6254
|
response_dict = _Model_from_mldev(self._api_client, response_dict)
|
5559
6255
|
|
5560
6256
|
return_value = types.Model._from_response(
|
5561
|
-
response=response_dict, kwargs=parameter_model
|
6257
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5562
6258
|
)
|
5563
6259
|
self._api_client._verify_response(return_value)
|
5564
6260
|
return return_value
|
@@ -5574,23 +6270,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5574
6270
|
config=config,
|
5575
6271
|
)
|
5576
6272
|
|
6273
|
+
request_url_dict: Optional[dict[str, str]]
|
6274
|
+
|
5577
6275
|
if self._api_client.vertexai:
|
5578
6276
|
request_dict = _DeleteModelParameters_to_vertex(
|
5579
6277
|
self._api_client, parameter_model
|
5580
6278
|
)
|
5581
|
-
|
6279
|
+
request_url_dict = request_dict.get('_url')
|
6280
|
+
if request_url_dict:
|
6281
|
+
path = '{name}'.format_map(request_url_dict)
|
6282
|
+
else:
|
6283
|
+
path = '{name}'
|
5582
6284
|
else:
|
5583
6285
|
request_dict = _DeleteModelParameters_to_mldev(
|
5584
6286
|
self._api_client, parameter_model
|
5585
6287
|
)
|
5586
|
-
|
6288
|
+
request_url_dict = request_dict.get('_url')
|
6289
|
+
if request_url_dict:
|
6290
|
+
path = '{name}'.format_map(request_url_dict)
|
6291
|
+
else:
|
6292
|
+
path = '{name}'
|
5587
6293
|
query_params = request_dict.get('_query')
|
5588
6294
|
if query_params:
|
5589
6295
|
path = f'{path}?{urlencode(query_params)}'
|
5590
6296
|
# TODO: remove the hack that pops config.
|
5591
6297
|
request_dict.pop('config', None)
|
5592
6298
|
|
5593
|
-
http_options = None
|
6299
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5594
6300
|
if isinstance(config, dict):
|
5595
6301
|
http_options = config.get('http_options', None)
|
5596
6302
|
elif hasattr(config, 'http_options'):
|
@@ -5613,7 +6319,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5613
6319
|
)
|
5614
6320
|
|
5615
6321
|
return_value = types.DeleteModelResponse._from_response(
|
5616
|
-
response=response_dict, kwargs=parameter_model
|
6322
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5617
6323
|
)
|
5618
6324
|
self._api_client._verify_response(return_value)
|
5619
6325
|
return return_value
|
@@ -5627,17 +6333,18 @@ class AsyncModels(_api_module.BaseModule):
|
|
5627
6333
|
) -> types.CountTokensResponse:
|
5628
6334
|
"""Counts the number of tokens in the given content.
|
5629
6335
|
|
6336
|
+
Multimodal input is supported for Gemini models.
|
6337
|
+
|
5630
6338
|
Args:
|
5631
6339
|
model (str): The model to use for counting tokens.
|
5632
6340
|
contents (list[types.Content]): The content to count tokens for.
|
5633
|
-
Multimodal input is supported for Gemini models.
|
5634
6341
|
config (CountTokensConfig): The configuration for counting tokens.
|
5635
6342
|
|
5636
6343
|
Usage:
|
5637
6344
|
|
5638
6345
|
.. code-block:: python
|
5639
6346
|
|
5640
|
-
response = client.models.count_tokens(
|
6347
|
+
response = await client.aio.models.count_tokens(
|
5641
6348
|
model='gemini-1.5-flash',
|
5642
6349
|
contents='What is your name?',
|
5643
6350
|
)
|
@@ -5651,23 +6358,33 @@ class AsyncModels(_api_module.BaseModule):
|
|
5651
6358
|
config=config,
|
5652
6359
|
)
|
5653
6360
|
|
6361
|
+
request_url_dict: Optional[dict[str, str]]
|
6362
|
+
|
5654
6363
|
if self._api_client.vertexai:
|
5655
6364
|
request_dict = _CountTokensParameters_to_vertex(
|
5656
6365
|
self._api_client, parameter_model
|
5657
6366
|
)
|
5658
|
-
|
6367
|
+
request_url_dict = request_dict.get('_url')
|
6368
|
+
if request_url_dict:
|
6369
|
+
path = '{model}:countTokens'.format_map(request_url_dict)
|
6370
|
+
else:
|
6371
|
+
path = '{model}:countTokens'
|
5659
6372
|
else:
|
5660
6373
|
request_dict = _CountTokensParameters_to_mldev(
|
5661
6374
|
self._api_client, parameter_model
|
5662
6375
|
)
|
5663
|
-
|
6376
|
+
request_url_dict = request_dict.get('_url')
|
6377
|
+
if request_url_dict:
|
6378
|
+
path = '{model}:countTokens'.format_map(request_url_dict)
|
6379
|
+
else:
|
6380
|
+
path = '{model}:countTokens'
|
5664
6381
|
query_params = request_dict.get('_query')
|
5665
6382
|
if query_params:
|
5666
6383
|
path = f'{path}?{urlencode(query_params)}'
|
5667
6384
|
# TODO: remove the hack that pops config.
|
5668
6385
|
request_dict.pop('config', None)
|
5669
6386
|
|
5670
|
-
http_options = None
|
6387
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5671
6388
|
if isinstance(config, dict):
|
5672
6389
|
http_options = config.get('http_options', None)
|
5673
6390
|
elif hasattr(config, 'http_options'):
|
@@ -5690,7 +6407,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5690
6407
|
)
|
5691
6408
|
|
5692
6409
|
return_value = types.CountTokensResponse._from_response(
|
5693
|
-
response=response_dict, kwargs=parameter_model
|
6410
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5694
6411
|
)
|
5695
6412
|
self._api_client._verify_response(return_value)
|
5696
6413
|
return return_value
|
@@ -5702,20 +6419,21 @@ class AsyncModels(_api_module.BaseModule):
|
|
5702
6419
|
contents: Union[types.ContentListUnion, types.ContentListUnionDict],
|
5703
6420
|
config: Optional[types.ComputeTokensConfigOrDict] = None,
|
5704
6421
|
) -> types.ComputeTokensResponse:
|
5705
|
-
"""Return a list of tokens based on the input
|
6422
|
+
"""Return a list of tokens based on the input contents.
|
6423
|
+
|
6424
|
+
Only text is supported.
|
5706
6425
|
|
5707
6426
|
This method is not supported by the Gemini Developer API.
|
5708
6427
|
|
5709
6428
|
Args:
|
5710
6429
|
model (str): The model to use.
|
5711
|
-
contents (list[shared.Content]): The content to compute tokens for.
|
5712
|
-
text is supported.
|
6430
|
+
contents (list[shared.Content]): The content to compute tokens for.
|
5713
6431
|
|
5714
6432
|
Usage:
|
5715
6433
|
|
5716
6434
|
.. code-block:: python
|
5717
6435
|
|
5718
|
-
response = client.models.compute_tokens(
|
6436
|
+
response = await client.aio.models.compute_tokens(
|
5719
6437
|
model='gemini-1.5-flash',
|
5720
6438
|
contents='What is your name?',
|
5721
6439
|
)
|
@@ -5730,13 +6448,18 @@ class AsyncModels(_api_module.BaseModule):
|
|
5730
6448
|
config=config,
|
5731
6449
|
)
|
5732
6450
|
|
6451
|
+
request_url_dict: Optional[dict[str, str]]
|
5733
6452
|
if not self._api_client.vertexai:
|
5734
6453
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
5735
6454
|
else:
|
5736
6455
|
request_dict = _ComputeTokensParameters_to_vertex(
|
5737
6456
|
self._api_client, parameter_model
|
5738
6457
|
)
|
5739
|
-
|
6458
|
+
request_url_dict = request_dict.get('_url')
|
6459
|
+
if request_url_dict:
|
6460
|
+
path = '{model}:computeTokens'.format_map(request_url_dict)
|
6461
|
+
else:
|
6462
|
+
path = '{model}:computeTokens'
|
5740
6463
|
|
5741
6464
|
query_params = request_dict.get('_query')
|
5742
6465
|
if query_params:
|
@@ -5744,7 +6467,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5744
6467
|
# TODO: remove the hack that pops config.
|
5745
6468
|
request_dict.pop('config', None)
|
5746
6469
|
|
5747
|
-
http_options = None
|
6470
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
5748
6471
|
if isinstance(config, dict):
|
5749
6472
|
http_options = config.get('http_options', None)
|
5750
6473
|
elif hasattr(config, 'http_options'):
|
@@ -5767,7 +6490,99 @@ class AsyncModels(_api_module.BaseModule):
|
|
5767
6490
|
)
|
5768
6491
|
|
5769
6492
|
return_value = types.ComputeTokensResponse._from_response(
|
5770
|
-
response=response_dict, kwargs=parameter_model
|
6493
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
6494
|
+
)
|
6495
|
+
self._api_client._verify_response(return_value)
|
6496
|
+
return return_value
|
6497
|
+
|
6498
|
+
@_common.experimental_warning(
|
6499
|
+
'This method is experimental and may change in future versions.'
|
6500
|
+
)
|
6501
|
+
async def generate_videos(
|
6502
|
+
self,
|
6503
|
+
*,
|
6504
|
+
model: str,
|
6505
|
+
prompt: Optional[str] = None,
|
6506
|
+
config: Optional[types.GenerateVideosConfigOrDict] = None,
|
6507
|
+
) -> types.GenerateVideosOperation:
|
6508
|
+
"""Generates videos based on a text description and configuration.
|
6509
|
+
|
6510
|
+
Args:
|
6511
|
+
model: The model to use.
|
6512
|
+
instances: A list of prompts, images and videos to generate videos from.
|
6513
|
+
config: Configuration for generation.
|
6514
|
+
|
6515
|
+
Usage:
|
6516
|
+
|
6517
|
+
```
|
6518
|
+
operation = client.models.generate_videos(
|
6519
|
+
model="veo-2.0-generate-001",
|
6520
|
+
prompt="A neon hologram of a cat driving at top speed",
|
6521
|
+
)
|
6522
|
+
while not operation.done:
|
6523
|
+
time.sleep(10)
|
6524
|
+
operation = client.operations.get(operation)
|
6525
|
+
|
6526
|
+
operation.result.generated_videos[0].video.uri
|
6527
|
+
```
|
6528
|
+
"""
|
6529
|
+
|
6530
|
+
parameter_model = types._GenerateVideosParameters(
|
6531
|
+
model=model,
|
6532
|
+
prompt=prompt,
|
6533
|
+
config=config,
|
6534
|
+
)
|
6535
|
+
|
6536
|
+
request_url_dict: Optional[dict[str, str]]
|
6537
|
+
|
6538
|
+
if self._api_client.vertexai:
|
6539
|
+
request_dict = _GenerateVideosParameters_to_vertex(
|
6540
|
+
self._api_client, parameter_model
|
6541
|
+
)
|
6542
|
+
request_url_dict = request_dict.get('_url')
|
6543
|
+
if request_url_dict:
|
6544
|
+
path = '{model}:predictLongRunning'.format_map(request_url_dict)
|
6545
|
+
else:
|
6546
|
+
path = '{model}:predictLongRunning'
|
6547
|
+
else:
|
6548
|
+
request_dict = _GenerateVideosParameters_to_mldev(
|
6549
|
+
self._api_client, parameter_model
|
6550
|
+
)
|
6551
|
+
request_url_dict = request_dict.get('_url')
|
6552
|
+
if request_url_dict:
|
6553
|
+
path = '{model}:predictLongRunning'.format_map(request_url_dict)
|
6554
|
+
else:
|
6555
|
+
path = '{model}:predictLongRunning'
|
6556
|
+
query_params = request_dict.get('_query')
|
6557
|
+
if query_params:
|
6558
|
+
path = f'{path}?{urlencode(query_params)}'
|
6559
|
+
# TODO: remove the hack that pops config.
|
6560
|
+
request_dict.pop('config', None)
|
6561
|
+
|
6562
|
+
http_options: Optional[types.HttpOptionsOrDict] = None
|
6563
|
+
if isinstance(config, dict):
|
6564
|
+
http_options = config.get('http_options', None)
|
6565
|
+
elif hasattr(config, 'http_options'):
|
6566
|
+
http_options = config.http_options
|
6567
|
+
|
6568
|
+
request_dict = _common.convert_to_dict(request_dict)
|
6569
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
6570
|
+
|
6571
|
+
response_dict = await self._api_client.async_request(
|
6572
|
+
'post', path, request_dict, http_options
|
6573
|
+
)
|
6574
|
+
|
6575
|
+
if self._api_client.vertexai:
|
6576
|
+
response_dict = _GenerateVideosOperation_from_vertex(
|
6577
|
+
self._api_client, response_dict
|
6578
|
+
)
|
6579
|
+
else:
|
6580
|
+
response_dict = _GenerateVideosOperation_from_mldev(
|
6581
|
+
self._api_client, response_dict
|
6582
|
+
)
|
6583
|
+
|
6584
|
+
return_value = types.GenerateVideosOperation._from_response(
|
6585
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
5771
6586
|
)
|
5772
6587
|
self._api_client._verify_response(return_value)
|
5773
6588
|
return return_value
|
@@ -5813,10 +6628,10 @@ class AsyncModels(_api_module.BaseModule):
|
|
5813
6628
|
model=model, contents=contents, config=config
|
5814
6629
|
)
|
5815
6630
|
remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
|
5816
|
-
|
6631
|
+
logger.info(
|
5817
6632
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
5818
6633
|
)
|
5819
|
-
automatic_function_calling_history = []
|
6634
|
+
automatic_function_calling_history: list[types.Content] = []
|
5820
6635
|
response = None
|
5821
6636
|
while remaining_remote_calls_afc > 0:
|
5822
6637
|
response = await self._generate_content(
|
@@ -5824,7 +6639,7 @@ class AsyncModels(_api_module.BaseModule):
|
|
5824
6639
|
)
|
5825
6640
|
remaining_remote_calls_afc -= 1
|
5826
6641
|
if remaining_remote_calls_afc == 0:
|
5827
|
-
|
6642
|
+
logger.info('Reached max remote calls for automatic function calling.')
|
5828
6643
|
|
5829
6644
|
function_map = _extra_utils.get_function_map(config)
|
5830
6645
|
if not function_map:
|
@@ -5870,21 +6685,21 @@ class AsyncModels(_api_module.BaseModule):
|
|
5870
6685
|
) -> Awaitable[AsyncIterator[types.GenerateContentResponse]]:
|
5871
6686
|
"""Makes an API request to generate content using a model and yields the model's response in chunks.
|
5872
6687
|
|
5873
|
-
For the `model` parameter, supported
|
5874
|
-
-
|
5875
|
-
-
|
6688
|
+
For the `model` parameter, supported formats for Vertex AI API include:
|
6689
|
+
- The Gemini model ID, for example: 'gemini-1.5-flash-002'
|
6690
|
+
- The full resource name starts with 'projects/', for example:
|
5876
6691
|
'projects/my-project-id/locations/us-central1/publishers/google/models/gemini-1.5-flash-002'
|
5877
|
-
-
|
6692
|
+
- The partial resource name with 'publishers/', for example:
|
5878
6693
|
'publishers/google/models/gemini-1.5-flash-002' or
|
5879
6694
|
'publishers/meta/models/llama-3.1-405b-instruct-maas'
|
5880
6695
|
- `/` separated publisher and model name, for example:
|
5881
6696
|
'google/gemini-1.5-flash-002' or 'meta/llama-3.1-405b-instruct-maas'
|
5882
6697
|
|
5883
|
-
For the `model` parameter, supported
|
5884
|
-
-
|
5885
|
-
-
|
6698
|
+
For the `model` parameter, supported formats for Gemini API include:
|
6699
|
+
- The Gemini model ID, for example: 'gemini-1.5-flash-002'
|
6700
|
+
- The model name starts with 'models/', for example:
|
5886
6701
|
'models/gemini-1.5-flash-002'
|
5887
|
-
-
|
6702
|
+
- If you would like to use a tuned model, the model name starts with
|
5888
6703
|
'tunedModels/', for example:
|
5889
6704
|
'tunedModels/1234567890123456789'
|
5890
6705
|
|
@@ -5938,10 +6753,11 @@ class AsyncModels(_api_module.BaseModule):
|
|
5938
6753
|
|
5939
6754
|
async def async_generator(model, contents, config):
|
5940
6755
|
remaining_remote_calls_afc = _extra_utils.get_max_remote_calls_afc(config)
|
5941
|
-
|
6756
|
+
logger.info(
|
5942
6757
|
f'AFC is enabled with max remote calls: {remaining_remote_calls_afc}.'
|
5943
6758
|
)
|
5944
|
-
automatic_function_calling_history = []
|
6759
|
+
automatic_function_calling_history: list[types.Content] = []
|
6760
|
+
func_response_parts = None
|
5945
6761
|
chunk = None
|
5946
6762
|
i = 0
|
5947
6763
|
while remaining_remote_calls_afc > 0:
|
@@ -5949,10 +6765,10 @@ class AsyncModels(_api_module.BaseModule):
|
|
5949
6765
|
response = await self._generate_content_stream(
|
5950
6766
|
model=model, contents=contents, config=config
|
5951
6767
|
)
|
5952
|
-
|
6768
|
+
logger.info(f'AFC remote call {i} is done.')
|
5953
6769
|
remaining_remote_calls_afc -= 1
|
5954
6770
|
if remaining_remote_calls_afc == 0:
|
5955
|
-
|
6771
|
+
logger.info(
|
5956
6772
|
'Reached max remote calls for automatic function calling.'
|
5957
6773
|
)
|
5958
6774
|
|
@@ -5966,6 +6782,12 @@ class AsyncModels(_api_module.BaseModule):
|
|
5966
6782
|
if not function_map:
|
5967
6783
|
yield chunk
|
5968
6784
|
else:
|
6785
|
+
if (
|
6786
|
+
not chunk.candidates
|
6787
|
+
or not chunk.candidates[0].content
|
6788
|
+
or not chunk.candidates[0].content.parts
|
6789
|
+
):
|
6790
|
+
break
|
5969
6791
|
func_response_parts = _extra_utils.get_function_response_parts(
|
5970
6792
|
chunk, function_map
|
5971
6793
|
)
|
@@ -5981,18 +6803,15 @@ class AsyncModels(_api_module.BaseModule):
|
|
5981
6803
|
automatic_function_calling_history
|
5982
6804
|
)
|
5983
6805
|
yield chunk
|
6806
|
+
if (
|
6807
|
+
not chunk.candidates
|
6808
|
+
or not chunk.candidates[0].content
|
6809
|
+
or not chunk.candidates[0].content.parts
|
6810
|
+
):
|
6811
|
+
break
|
5984
6812
|
func_response_parts = _extra_utils.get_function_response_parts(
|
5985
6813
|
chunk, function_map
|
5986
6814
|
)
|
5987
|
-
if not chunk:
|
5988
|
-
break
|
5989
|
-
if (
|
5990
|
-
not chunk
|
5991
|
-
or not chunk.candidates
|
5992
|
-
or not chunk.candidates[0].content
|
5993
|
-
or not chunk.candidates[0].content.parts
|
5994
|
-
):
|
5995
|
-
break
|
5996
6815
|
if not function_map:
|
5997
6816
|
break
|
5998
6817
|
|