google-genai 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +2 -1
- google/genai/_api_client.py +85 -36
- google/genai/_automatic_function_calling_util.py +14 -14
- google/genai/_replay_api_client.py +22 -28
- google/genai/batches.py +16 -16
- google/genai/caches.py +18 -18
- google/genai/chats.py +2 -2
- google/genai/client.py +6 -3
- google/genai/files.py +22 -22
- google/genai/live.py +28 -5
- google/genai/models.py +97 -77
- google/genai/tunings.py +17 -17
- google/genai/types.py +150 -80
- google/genai/version.py +16 -0
- {google_genai-0.3.0.dist-info → google_genai-0.4.0.dist-info}/METADATA +57 -17
- google_genai-0.4.0.dist-info/RECORD +25 -0
- {google_genai-0.3.0.dist-info → google_genai-0.4.0.dist-info}/WHEEL +1 -1
- google_genai-0.3.0.dist-info/RECORD +0 -24
- {google_genai-0.3.0.dist-info → google_genai-0.4.0.dist-info}/LICENSE +0 -0
- {google_genai-0.3.0.dist-info → google_genai-0.4.0.dist-info}/top_level.txt +0 -0
google/genai/models.py
CHANGED
@@ -32,7 +32,7 @@ def _Part_to_mldev(
|
|
32
32
|
parent_object: dict = None,
|
33
33
|
) -> dict:
|
34
34
|
to_object = {}
|
35
|
-
if getv(from_object, ['video_metadata']):
|
35
|
+
if getv(from_object, ['video_metadata']) is not None:
|
36
36
|
raise ValueError('video_metadata parameter is not supported in Google AI.')
|
37
37
|
|
38
38
|
if getv(from_object, ['thought']) is not None:
|
@@ -164,51 +164,51 @@ def _Schema_to_mldev(
|
|
164
164
|
parent_object: dict = None,
|
165
165
|
) -> dict:
|
166
166
|
to_object = {}
|
167
|
-
if getv(from_object, ['min_items']):
|
167
|
+
if getv(from_object, ['min_items']) is not None:
|
168
168
|
raise ValueError('min_items parameter is not supported in Google AI.')
|
169
169
|
|
170
|
-
if getv(from_object, ['example']):
|
170
|
+
if getv(from_object, ['example']) is not None:
|
171
171
|
raise ValueError('example parameter is not supported in Google AI.')
|
172
172
|
|
173
|
-
if getv(from_object, ['property_ordering']):
|
173
|
+
if getv(from_object, ['property_ordering']) is not None:
|
174
174
|
raise ValueError(
|
175
175
|
'property_ordering parameter is not supported in Google AI.'
|
176
176
|
)
|
177
177
|
|
178
|
-
if getv(from_object, ['pattern']):
|
178
|
+
if getv(from_object, ['pattern']) is not None:
|
179
179
|
raise ValueError('pattern parameter is not supported in Google AI.')
|
180
180
|
|
181
|
-
if getv(from_object, ['minimum']):
|
181
|
+
if getv(from_object, ['minimum']) is not None:
|
182
182
|
raise ValueError('minimum parameter is not supported in Google AI.')
|
183
183
|
|
184
|
-
if getv(from_object, ['default']):
|
184
|
+
if getv(from_object, ['default']) is not None:
|
185
185
|
raise ValueError('default parameter is not supported in Google AI.')
|
186
186
|
|
187
|
-
if getv(from_object, ['any_of']):
|
187
|
+
if getv(from_object, ['any_of']) is not None:
|
188
188
|
raise ValueError('any_of parameter is not supported in Google AI.')
|
189
189
|
|
190
|
-
if getv(from_object, ['max_length']):
|
190
|
+
if getv(from_object, ['max_length']) is not None:
|
191
191
|
raise ValueError('max_length parameter is not supported in Google AI.')
|
192
192
|
|
193
|
-
if getv(from_object, ['title']):
|
193
|
+
if getv(from_object, ['title']) is not None:
|
194
194
|
raise ValueError('title parameter is not supported in Google AI.')
|
195
195
|
|
196
|
-
if getv(from_object, ['min_length']):
|
196
|
+
if getv(from_object, ['min_length']) is not None:
|
197
197
|
raise ValueError('min_length parameter is not supported in Google AI.')
|
198
198
|
|
199
|
-
if getv(from_object, ['min_properties']):
|
199
|
+
if getv(from_object, ['min_properties']) is not None:
|
200
200
|
raise ValueError('min_properties parameter is not supported in Google AI.')
|
201
201
|
|
202
|
-
if getv(from_object, ['max_items']):
|
202
|
+
if getv(from_object, ['max_items']) is not None:
|
203
203
|
raise ValueError('max_items parameter is not supported in Google AI.')
|
204
204
|
|
205
|
-
if getv(from_object, ['maximum']):
|
205
|
+
if getv(from_object, ['maximum']) is not None:
|
206
206
|
raise ValueError('maximum parameter is not supported in Google AI.')
|
207
207
|
|
208
|
-
if getv(from_object, ['nullable']):
|
208
|
+
if getv(from_object, ['nullable']) is not None:
|
209
209
|
raise ValueError('nullable parameter is not supported in Google AI.')
|
210
210
|
|
211
|
-
if getv(from_object, ['max_properties']):
|
211
|
+
if getv(from_object, ['max_properties']) is not None:
|
212
212
|
raise ValueError('max_properties parameter is not supported in Google AI.')
|
213
213
|
|
214
214
|
if getv(from_object, ['type']) is not None:
|
@@ -320,7 +320,7 @@ def _SafetySetting_to_mldev(
|
|
320
320
|
parent_object: dict = None,
|
321
321
|
) -> dict:
|
322
322
|
to_object = {}
|
323
|
-
if getv(from_object, ['method']):
|
323
|
+
if getv(from_object, ['method']) is not None:
|
324
324
|
raise ValueError('method parameter is not supported in Google AI.')
|
325
325
|
|
326
326
|
if getv(from_object, ['category']) is not None:
|
@@ -356,7 +356,7 @@ def _FunctionDeclaration_to_mldev(
|
|
356
356
|
parent_object: dict = None,
|
357
357
|
) -> dict:
|
358
358
|
to_object = {}
|
359
|
-
if getv(from_object, ['response']):
|
359
|
+
if getv(from_object, ['response']) is not None:
|
360
360
|
raise ValueError('response parameter is not supported in Google AI.')
|
361
361
|
|
362
362
|
if getv(from_object, ['description']) is not None:
|
@@ -512,7 +512,7 @@ def _Tool_to_mldev(
|
|
512
512
|
],
|
513
513
|
)
|
514
514
|
|
515
|
-
if getv(from_object, ['retrieval']):
|
515
|
+
if getv(from_object, ['retrieval']) is not None:
|
516
516
|
raise ValueError('retrieval parameter is not supported in Google AI.')
|
517
517
|
|
518
518
|
if getv(from_object, ['google_search']) is not None:
|
@@ -797,13 +797,15 @@ def _GenerateContentConfig_to_mldev(
|
|
797
797
|
if getv(from_object, ['stop_sequences']) is not None:
|
798
798
|
setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences']))
|
799
799
|
|
800
|
-
if getv(from_object, ['response_logprobs']):
|
801
|
-
|
802
|
-
|
800
|
+
if getv(from_object, ['response_logprobs']) is not None:
|
801
|
+
setv(
|
802
|
+
to_object,
|
803
|
+
['responseLogprobs'],
|
804
|
+
getv(from_object, ['response_logprobs']),
|
803
805
|
)
|
804
806
|
|
805
|
-
if getv(from_object, ['logprobs']):
|
806
|
-
|
807
|
+
if getv(from_object, ['logprobs']) is not None:
|
808
|
+
setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))
|
807
809
|
|
808
810
|
if getv(from_object, ['presence_penalty']) is not None:
|
809
811
|
setv(
|
@@ -838,7 +840,7 @@ def _GenerateContentConfig_to_mldev(
|
|
838
840
|
),
|
839
841
|
)
|
840
842
|
|
841
|
-
if getv(from_object, ['routing_config']):
|
843
|
+
if getv(from_object, ['routing_config']) is not None:
|
842
844
|
raise ValueError('routing_config parameter is not supported in Google AI.')
|
843
845
|
|
844
846
|
if getv(from_object, ['safety_settings']) is not None:
|
@@ -886,7 +888,7 @@ def _GenerateContentConfig_to_mldev(
|
|
886
888
|
getv(from_object, ['response_modalities']),
|
887
889
|
)
|
888
890
|
|
889
|
-
if getv(from_object, ['media_resolution']):
|
891
|
+
if getv(from_object, ['media_resolution']) is not None:
|
890
892
|
raise ValueError(
|
891
893
|
'media_resolution parameter is not supported in Google AI.'
|
892
894
|
)
|
@@ -1152,10 +1154,10 @@ def _EmbedContentConfig_to_mldev(
|
|
1152
1154
|
getv(from_object, ['output_dimensionality']),
|
1153
1155
|
)
|
1154
1156
|
|
1155
|
-
if getv(from_object, ['mime_type']):
|
1157
|
+
if getv(from_object, ['mime_type']) is not None:
|
1156
1158
|
raise ValueError('mime_type parameter is not supported in Google AI.')
|
1157
1159
|
|
1158
|
-
if getv(from_object, ['auto_truncate']):
|
1160
|
+
if getv(from_object, ['auto_truncate']) is not None:
|
1159
1161
|
raise ValueError('auto_truncate parameter is not supported in Google AI.')
|
1160
1162
|
|
1161
1163
|
return to_object
|
@@ -1282,7 +1284,7 @@ def _GenerateImageConfig_to_mldev(
|
|
1282
1284
|
if getv(from_object, ['http_options']) is not None:
|
1283
1285
|
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1284
1286
|
|
1285
|
-
if getv(from_object, ['output_gcs_uri']):
|
1287
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
1286
1288
|
raise ValueError('output_gcs_uri parameter is not supported in Google AI.')
|
1287
1289
|
|
1288
1290
|
if getv(from_object, ['negative_prompt']) is not None:
|
@@ -1306,7 +1308,7 @@ def _GenerateImageConfig_to_mldev(
|
|
1306
1308
|
getv(from_object, ['guidance_scale']),
|
1307
1309
|
)
|
1308
1310
|
|
1309
|
-
if getv(from_object, ['seed']):
|
1311
|
+
if getv(from_object, ['seed']) is not None:
|
1310
1312
|
raise ValueError('seed parameter is not supported in Google AI.')
|
1311
1313
|
|
1312
1314
|
if getv(from_object, ['safety_filter_level']) is not None:
|
@@ -1358,7 +1360,7 @@ def _GenerateImageConfig_to_mldev(
|
|
1358
1360
|
getv(from_object, ['output_compression_quality']),
|
1359
1361
|
)
|
1360
1362
|
|
1361
|
-
if getv(from_object, ['add_watermark']):
|
1363
|
+
if getv(from_object, ['add_watermark']) is not None:
|
1362
1364
|
raise ValueError('add_watermark parameter is not supported in Google AI.')
|
1363
1365
|
|
1364
1366
|
if getv(from_object, ['aspect_ratio']) is not None:
|
@@ -1539,7 +1541,7 @@ def _Image_to_mldev(
|
|
1539
1541
|
parent_object: dict = None,
|
1540
1542
|
) -> dict:
|
1541
1543
|
to_object = {}
|
1542
|
-
if getv(from_object, ['gcs_uri']):
|
1544
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
1543
1545
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
1544
1546
|
|
1545
1547
|
if getv(from_object, ['image_bytes']) is not None:
|
@@ -1569,15 +1571,15 @@ def _MaskReferenceConfig_to_mldev(
|
|
1569
1571
|
parent_object: dict = None,
|
1570
1572
|
) -> dict:
|
1571
1573
|
to_object = {}
|
1572
|
-
if getv(from_object, ['mask_mode']):
|
1574
|
+
if getv(from_object, ['mask_mode']) is not None:
|
1573
1575
|
raise ValueError('mask_mode parameter is not supported in Google AI.')
|
1574
1576
|
|
1575
|
-
if getv(from_object, ['segmentation_classes']):
|
1577
|
+
if getv(from_object, ['segmentation_classes']) is not None:
|
1576
1578
|
raise ValueError(
|
1577
1579
|
'segmentation_classes parameter is not supported in Google AI.'
|
1578
1580
|
)
|
1579
1581
|
|
1580
|
-
if getv(from_object, ['mask_dilation']):
|
1582
|
+
if getv(from_object, ['mask_dilation']) is not None:
|
1581
1583
|
raise ValueError('mask_dilation parameter is not supported in Google AI.')
|
1582
1584
|
|
1583
1585
|
return to_object
|
@@ -1609,10 +1611,10 @@ def _ControlReferenceConfig_to_mldev(
|
|
1609
1611
|
parent_object: dict = None,
|
1610
1612
|
) -> dict:
|
1611
1613
|
to_object = {}
|
1612
|
-
if getv(from_object, ['control_type']):
|
1614
|
+
if getv(from_object, ['control_type']) is not None:
|
1613
1615
|
raise ValueError('control_type parameter is not supported in Google AI.')
|
1614
1616
|
|
1615
|
-
if getv(from_object, ['enable_control_image_computation']):
|
1617
|
+
if getv(from_object, ['enable_control_image_computation']) is not None:
|
1616
1618
|
raise ValueError(
|
1617
1619
|
'enable_control_image_computation parameter is not supported in'
|
1618
1620
|
' Google AI.'
|
@@ -1646,7 +1648,7 @@ def _StyleReferenceConfig_to_mldev(
|
|
1646
1648
|
parent_object: dict = None,
|
1647
1649
|
) -> dict:
|
1648
1650
|
to_object = {}
|
1649
|
-
if getv(from_object, ['style_description']):
|
1651
|
+
if getv(from_object, ['style_description']) is not None:
|
1650
1652
|
raise ValueError(
|
1651
1653
|
'style_description parameter is not supported in Google AI.'
|
1652
1654
|
)
|
@@ -1676,10 +1678,10 @@ def _SubjectReferenceConfig_to_mldev(
|
|
1676
1678
|
parent_object: dict = None,
|
1677
1679
|
) -> dict:
|
1678
1680
|
to_object = {}
|
1679
|
-
if getv(from_object, ['subject_type']):
|
1681
|
+
if getv(from_object, ['subject_type']) is not None:
|
1680
1682
|
raise ValueError('subject_type parameter is not supported in Google AI.')
|
1681
1683
|
|
1682
|
-
if getv(from_object, ['subject_description']):
|
1684
|
+
if getv(from_object, ['subject_description']) is not None:
|
1683
1685
|
raise ValueError(
|
1684
1686
|
'subject_description parameter is not supported in Google AI.'
|
1685
1687
|
)
|
@@ -1712,31 +1714,31 @@ def _ReferenceImageAPI_to_mldev(
|
|
1712
1714
|
parent_object: dict = None,
|
1713
1715
|
) -> dict:
|
1714
1716
|
to_object = {}
|
1715
|
-
if getv(from_object, ['reference_image']):
|
1717
|
+
if getv(from_object, ['reference_image']) is not None:
|
1716
1718
|
raise ValueError('reference_image parameter is not supported in Google AI.')
|
1717
1719
|
|
1718
|
-
if getv(from_object, ['reference_id']):
|
1720
|
+
if getv(from_object, ['reference_id']) is not None:
|
1719
1721
|
raise ValueError('reference_id parameter is not supported in Google AI.')
|
1720
1722
|
|
1721
|
-
if getv(from_object, ['reference_type']):
|
1723
|
+
if getv(from_object, ['reference_type']) is not None:
|
1722
1724
|
raise ValueError('reference_type parameter is not supported in Google AI.')
|
1723
1725
|
|
1724
|
-
if getv(from_object, ['mask_image_config']):
|
1726
|
+
if getv(from_object, ['mask_image_config']) is not None:
|
1725
1727
|
raise ValueError(
|
1726
1728
|
'mask_image_config parameter is not supported in Google AI.'
|
1727
1729
|
)
|
1728
1730
|
|
1729
|
-
if getv(from_object, ['control_image_config']):
|
1731
|
+
if getv(from_object, ['control_image_config']) is not None:
|
1730
1732
|
raise ValueError(
|
1731
1733
|
'control_image_config parameter is not supported in Google AI.'
|
1732
1734
|
)
|
1733
1735
|
|
1734
|
-
if getv(from_object, ['style_image_config']):
|
1736
|
+
if getv(from_object, ['style_image_config']) is not None:
|
1735
1737
|
raise ValueError(
|
1736
1738
|
'style_image_config parameter is not supported in Google AI.'
|
1737
1739
|
)
|
1738
1740
|
|
1739
|
-
if getv(from_object, ['subject_image_config']):
|
1741
|
+
if getv(from_object, ['subject_image_config']) is not None:
|
1740
1742
|
raise ValueError(
|
1741
1743
|
'subject_image_config parameter is not supported in Google AI.'
|
1742
1744
|
)
|
@@ -1813,7 +1815,7 @@ def _EditImageConfig_to_mldev(
|
|
1813
1815
|
if getv(from_object, ['http_options']) is not None:
|
1814
1816
|
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
1815
1817
|
|
1816
|
-
if getv(from_object, ['output_gcs_uri']):
|
1818
|
+
if getv(from_object, ['output_gcs_uri']) is not None:
|
1817
1819
|
raise ValueError('output_gcs_uri parameter is not supported in Google AI.')
|
1818
1820
|
|
1819
1821
|
if getv(from_object, ['negative_prompt']) is not None:
|
@@ -1837,7 +1839,7 @@ def _EditImageConfig_to_mldev(
|
|
1837
1839
|
getv(from_object, ['guidance_scale']),
|
1838
1840
|
)
|
1839
1841
|
|
1840
|
-
if getv(from_object, ['seed']):
|
1842
|
+
if getv(from_object, ['seed']) is not None:
|
1841
1843
|
raise ValueError('seed parameter is not supported in Google AI.')
|
1842
1844
|
|
1843
1845
|
if getv(from_object, ['safety_filter_level']) is not None:
|
@@ -2083,13 +2085,6 @@ def _UpscaleImageAPIConfig_to_mldev(
|
|
2083
2085
|
if getv(from_object, ['http_options']) is not None:
|
2084
2086
|
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
2085
2087
|
|
2086
|
-
if getv(from_object, ['upscale_factor']) is not None:
|
2087
|
-
setv(
|
2088
|
-
parent_object,
|
2089
|
-
['parameters', 'upscaleConfig', 'upscaleFactor'],
|
2090
|
-
getv(from_object, ['upscale_factor']),
|
2091
|
-
)
|
2092
|
-
|
2093
2088
|
if getv(from_object, ['include_rai_reason']) is not None:
|
2094
2089
|
setv(
|
2095
2090
|
parent_object,
|
@@ -2133,13 +2128,6 @@ def _UpscaleImageAPIConfig_to_vertex(
|
|
2133
2128
|
if getv(from_object, ['http_options']) is not None:
|
2134
2129
|
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
2135
2130
|
|
2136
|
-
if getv(from_object, ['upscale_factor']) is not None:
|
2137
|
-
setv(
|
2138
|
-
parent_object,
|
2139
|
-
['parameters', 'upscaleConfig', 'upscaleFactor'],
|
2140
|
-
getv(from_object, ['upscale_factor']),
|
2141
|
-
)
|
2142
|
-
|
2143
2131
|
if getv(from_object, ['include_rai_reason']) is not None:
|
2144
2132
|
setv(
|
2145
2133
|
parent_object,
|
@@ -2194,6 +2182,13 @@ def _UpscaleImageAPIParameters_to_mldev(
|
|
2194
2182
|
_Image_to_mldev(api_client, getv(from_object, ['image']), to_object),
|
2195
2183
|
)
|
2196
2184
|
|
2185
|
+
if getv(from_object, ['upscale_factor']) is not None:
|
2186
|
+
setv(
|
2187
|
+
to_object,
|
2188
|
+
['parameters', 'upscaleConfig', 'upscaleFactor'],
|
2189
|
+
getv(from_object, ['upscale_factor']),
|
2190
|
+
)
|
2191
|
+
|
2197
2192
|
if getv(from_object, ['config']) is not None:
|
2198
2193
|
setv(
|
2199
2194
|
to_object,
|
@@ -2226,6 +2221,13 @@ def _UpscaleImageAPIParameters_to_vertex(
|
|
2226
2221
|
_Image_to_vertex(api_client, getv(from_object, ['image']), to_object),
|
2227
2222
|
)
|
2228
2223
|
|
2224
|
+
if getv(from_object, ['upscale_factor']) is not None:
|
2225
|
+
setv(
|
2226
|
+
to_object,
|
2227
|
+
['parameters', 'upscaleConfig', 'upscaleFactor'],
|
2228
|
+
getv(from_object, ['upscale_factor']),
|
2229
|
+
)
|
2230
|
+
|
2229
2231
|
if getv(from_object, ['config']) is not None:
|
2230
2232
|
setv(
|
2231
2233
|
to_object,
|
@@ -2496,7 +2498,7 @@ def _CountTokensConfig_to_mldev(
|
|
2496
2498
|
],
|
2497
2499
|
)
|
2498
2500
|
|
2499
|
-
if getv(from_object, ['generation_config']):
|
2501
|
+
if getv(from_object, ['generation_config']) is not None:
|
2500
2502
|
raise ValueError(
|
2501
2503
|
'generation_config parameter is not supported in Google AI.'
|
2502
2504
|
)
|
@@ -2655,7 +2657,7 @@ def _ComputeTokensParameters_to_mldev(
|
|
2655
2657
|
t.t_model(api_client, getv(from_object, ['model'])),
|
2656
2658
|
)
|
2657
2659
|
|
2658
|
-
if getv(from_object, ['contents']):
|
2660
|
+
if getv(from_object, ['contents']) is not None:
|
2659
2661
|
raise ValueError('contents parameter is not supported in Google AI.')
|
2660
2662
|
|
2661
2663
|
if getv(from_object, ['config']) is not None:
|
@@ -3984,6 +3986,7 @@ class Models(_common.BaseModule):
|
|
3984
3986
|
*,
|
3985
3987
|
model: str,
|
3986
3988
|
image: types.ImageOrDict,
|
3989
|
+
upscale_factor: str,
|
3987
3990
|
config: Optional[types._UpscaleImageAPIConfigOrDict] = None,
|
3988
3991
|
) -> types.UpscaleImageResponse:
|
3989
3992
|
"""Upscales an image.
|
@@ -3991,12 +3994,14 @@ class Models(_common.BaseModule):
|
|
3991
3994
|
Args:
|
3992
3995
|
model (str): The model to use.
|
3993
3996
|
image (Image): The input image for upscaling.
|
3997
|
+
upscale_factor (str): The factor to upscale the image (x2 or x4).
|
3994
3998
|
config (_UpscaleImageAPIConfig): Configuration for upscaling.
|
3995
3999
|
"""
|
3996
4000
|
|
3997
4001
|
parameter_model = types._UpscaleImageAPIParameters(
|
3998
4002
|
model=model,
|
3999
4003
|
image=image,
|
4004
|
+
upscale_factor=upscale_factor,
|
4000
4005
|
config=config,
|
4001
4006
|
)
|
4002
4007
|
|
@@ -4443,13 +4448,15 @@ class Models(_common.BaseModule):
|
|
4443
4448
|
*,
|
4444
4449
|
model: str,
|
4445
4450
|
image: types.ImageOrDict,
|
4446
|
-
|
4451
|
+
upscale_factor: str,
|
4452
|
+
config: Optional[types.UpscaleImageConfigOrDict] = None,
|
4447
4453
|
) -> types.UpscaleImageResponse:
|
4448
4454
|
"""Makes an API request to upscale a provided image.
|
4449
4455
|
|
4450
4456
|
Args:
|
4451
4457
|
model (str): The model to use.
|
4452
4458
|
image (Image): The input image for upscaling.
|
4459
|
+
upscale_factor (str): The factor to upscale the image (x2 or x4).
|
4453
4460
|
config (UpscaleImageConfig): Configuration for upscaling.
|
4454
4461
|
|
4455
4462
|
Usage:
|
@@ -4462,9 +4469,7 @@ class Models(_common.BaseModule):
|
|
4462
4469
|
response=client.models.upscale_image(
|
4463
4470
|
model='imagen-3.0-generate-001',
|
4464
4471
|
image=types.Image.from_file(IMAGE_FILE_PATH),
|
4465
|
-
|
4466
|
-
'upscale_factor': 'x2',
|
4467
|
-
}
|
4472
|
+
upscale_factor='x2',
|
4468
4473
|
)
|
4469
4474
|
response.generated_images[0].image.show()
|
4470
4475
|
# Opens my-image.png which is upscaled by a factor of 2.
|
@@ -4474,10 +4479,12 @@ class Models(_common.BaseModule):
|
|
4474
4479
|
types.UpscaleImageParameters(
|
4475
4480
|
model=model,
|
4476
4481
|
image=image,
|
4482
|
+
upscale_factor=upscale_factor,
|
4477
4483
|
config=config,
|
4478
4484
|
)
|
4479
4485
|
|
4480
4486
|
# Convert to API config.
|
4487
|
+
config = config or {}
|
4481
4488
|
config_dct = config if isinstance(config, dict) else config.dict()
|
4482
4489
|
api_config = types._UpscaleImageAPIConfigDict(**config_dct) # pylint: disable=protected-access
|
4483
4490
|
|
@@ -4485,7 +4492,12 @@ class Models(_common.BaseModule):
|
|
4485
4492
|
api_config['mode'] = 'upscale'
|
4486
4493
|
api_config['number_of_images'] = 1
|
4487
4494
|
|
4488
|
-
return self._upscale_image(
|
4495
|
+
return self._upscale_image(
|
4496
|
+
model=model,
|
4497
|
+
image=image,
|
4498
|
+
upscale_factor=upscale_factor,
|
4499
|
+
config=api_config,
|
4500
|
+
)
|
4489
4501
|
|
4490
4502
|
def list(
|
4491
4503
|
self,
|
@@ -4882,6 +4894,7 @@ class AsyncModels(_common.BaseModule):
|
|
4882
4894
|
*,
|
4883
4895
|
model: str,
|
4884
4896
|
image: types.ImageOrDict,
|
4897
|
+
upscale_factor: str,
|
4885
4898
|
config: Optional[types._UpscaleImageAPIConfigOrDict] = None,
|
4886
4899
|
) -> types.UpscaleImageResponse:
|
4887
4900
|
"""Upscales an image.
|
@@ -4889,12 +4902,14 @@ class AsyncModels(_common.BaseModule):
|
|
4889
4902
|
Args:
|
4890
4903
|
model (str): The model to use.
|
4891
4904
|
image (Image): The input image for upscaling.
|
4905
|
+
upscale_factor (str): The factor to upscale the image (x2 or x4).
|
4892
4906
|
config (_UpscaleImageAPIConfig): Configuration for upscaling.
|
4893
4907
|
"""
|
4894
4908
|
|
4895
4909
|
parameter_model = types._UpscaleImageAPIParameters(
|
4896
4910
|
model=model,
|
4897
4911
|
image=image,
|
4912
|
+
upscale_factor=upscale_factor,
|
4898
4913
|
config=config,
|
4899
4914
|
)
|
4900
4915
|
|
@@ -5371,13 +5386,15 @@ class AsyncModels(_common.BaseModule):
|
|
5371
5386
|
*,
|
5372
5387
|
model: str,
|
5373
5388
|
image: types.ImageOrDict,
|
5374
|
-
|
5389
|
+
upscale_factor: str,
|
5390
|
+
config: Optional[types.UpscaleImageConfigOrDict] = None,
|
5375
5391
|
) -> types.UpscaleImageResponse:
|
5376
5392
|
"""Makes an API request to upscale a provided image.
|
5377
5393
|
|
5378
5394
|
Args:
|
5379
5395
|
model (str): The model to use.
|
5380
5396
|
image (Image): The input image for upscaling.
|
5397
|
+
upscale_factor (str): The factor to upscale the image (x2 or x4).
|
5381
5398
|
config (UpscaleImageConfig): Configuration for upscaling.
|
5382
5399
|
|
5383
5400
|
Usage:
|
@@ -5388,11 +5405,9 @@ class AsyncModels(_common.BaseModule):
|
|
5388
5405
|
|
5389
5406
|
IMAGE_FILE_PATH="my-image.png"
|
5390
5407
|
response = await client.aio.models.upscale_image(
|
5391
|
-
model
|
5392
|
-
image
|
5393
|
-
|
5394
|
-
'upscale_factor': 'x2',
|
5395
|
-
}
|
5408
|
+
model='imagen-3.0-generate-001',
|
5409
|
+
image=types.Image.from_file(IMAGE_FILE_PATH),
|
5410
|
+
upscale_factor='x2',
|
5396
5411
|
)
|
5397
5412
|
response.generated_images[0].image.show()
|
5398
5413
|
# Opens my-image.png which is upscaled by a factor of 2.
|
@@ -5402,10 +5417,12 @@ class AsyncModels(_common.BaseModule):
|
|
5402
5417
|
types.UpscaleImageParameters(
|
5403
5418
|
model=model,
|
5404
5419
|
image=image,
|
5420
|
+
upscale_factor=upscale_factor,
|
5405
5421
|
config=config,
|
5406
5422
|
)
|
5407
5423
|
|
5408
5424
|
# Convert to API config.
|
5425
|
+
config = config or {}
|
5409
5426
|
config_dct = config if isinstance(config, dict) else config.dict()
|
5410
5427
|
api_config = types._UpscaleImageAPIConfigDict(**config_dct) # pylint: disable=protected-access
|
5411
5428
|
|
@@ -5414,5 +5431,8 @@ class AsyncModels(_common.BaseModule):
|
|
5414
5431
|
api_config['number_of_images'] = 1
|
5415
5432
|
|
5416
5433
|
return await self._upscale_image(
|
5417
|
-
model=model,
|
5434
|
+
model=model,
|
5435
|
+
image=image,
|
5436
|
+
upscale_factor=upscale_factor,
|
5437
|
+
config=api_config,
|
5418
5438
|
)
|
google/genai/tunings.py
CHANGED
@@ -195,10 +195,10 @@ def _TuningExample_to_vertex(
|
|
195
195
|
parent_object: dict = None,
|
196
196
|
) -> dict:
|
197
197
|
to_object = {}
|
198
|
-
if getv(from_object, ['text_input']):
|
198
|
+
if getv(from_object, ['text_input']) is not None:
|
199
199
|
raise ValueError('text_input parameter is not supported in Vertex AI.')
|
200
200
|
|
201
|
-
if getv(from_object, ['output']):
|
201
|
+
if getv(from_object, ['output']) is not None:
|
202
202
|
raise ValueError('output parameter is not supported in Vertex AI.')
|
203
203
|
|
204
204
|
return to_object
|
@@ -210,7 +210,7 @@ def _TuningDataset_to_mldev(
|
|
210
210
|
parent_object: dict = None,
|
211
211
|
) -> dict:
|
212
212
|
to_object = {}
|
213
|
-
if getv(from_object, ['gcs_uri']):
|
213
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
214
214
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
215
215
|
|
216
216
|
if getv(from_object, ['examples']) is not None:
|
@@ -239,7 +239,7 @@ def _TuningDataset_to_vertex(
|
|
239
239
|
getv(from_object, ['gcs_uri']),
|
240
240
|
)
|
241
241
|
|
242
|
-
if getv(from_object, ['examples']):
|
242
|
+
if getv(from_object, ['examples']) is not None:
|
243
243
|
raise ValueError('examples parameter is not supported in Vertex AI.')
|
244
244
|
|
245
245
|
return to_object
|
@@ -251,7 +251,7 @@ def _TuningValidationDataset_to_mldev(
|
|
251
251
|
parent_object: dict = None,
|
252
252
|
) -> dict:
|
253
253
|
to_object = {}
|
254
|
-
if getv(from_object, ['gcs_uri']):
|
254
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
255
255
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
256
256
|
|
257
257
|
return to_object
|
@@ -278,7 +278,7 @@ def _CreateTuningJobConfig_to_mldev(
|
|
278
278
|
if getv(from_object, ['http_options']) is not None:
|
279
279
|
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
280
280
|
|
281
|
-
if getv(from_object, ['validation_dataset']):
|
281
|
+
if getv(from_object, ['validation_dataset']) is not None:
|
282
282
|
raise ValueError(
|
283
283
|
'validation_dataset parameter is not supported in Google AI.'
|
284
284
|
)
|
@@ -290,7 +290,7 @@ def _CreateTuningJobConfig_to_mldev(
|
|
290
290
|
getv(from_object, ['tuned_model_display_name']),
|
291
291
|
)
|
292
292
|
|
293
|
-
if getv(from_object, ['description']):
|
293
|
+
if getv(from_object, ['description']) is not None:
|
294
294
|
raise ValueError('description parameter is not supported in Google AI.')
|
295
295
|
|
296
296
|
if getv(from_object, ['epoch_count']) is not None:
|
@@ -307,7 +307,7 @@ def _CreateTuningJobConfig_to_mldev(
|
|
307
307
|
getv(from_object, ['learning_rate_multiplier']),
|
308
308
|
)
|
309
309
|
|
310
|
-
if getv(from_object, ['adapter_size']):
|
310
|
+
if getv(from_object, ['adapter_size']) is not None:
|
311
311
|
raise ValueError('adapter_size parameter is not supported in Google AI.')
|
312
312
|
|
313
313
|
if getv(from_object, ['batch_size']) is not None:
|
@@ -376,10 +376,10 @@ def _CreateTuningJobConfig_to_vertex(
|
|
376
376
|
getv(from_object, ['adapter_size']),
|
377
377
|
)
|
378
378
|
|
379
|
-
if getv(from_object, ['batch_size']):
|
379
|
+
if getv(from_object, ['batch_size']) is not None:
|
380
380
|
raise ValueError('batch_size parameter is not supported in Vertex AI.')
|
381
381
|
|
382
|
-
if getv(from_object, ['learning_rate']):
|
382
|
+
if getv(from_object, ['learning_rate']) is not None:
|
383
383
|
raise ValueError('learning_rate parameter is not supported in Vertex AI.')
|
384
384
|
|
385
385
|
return to_object
|
@@ -451,7 +451,7 @@ def _DistillationDataset_to_mldev(
|
|
451
451
|
parent_object: dict = None,
|
452
452
|
) -> dict:
|
453
453
|
to_object = {}
|
454
|
-
if getv(from_object, ['gcs_uri']):
|
454
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
455
455
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
456
456
|
|
457
457
|
return to_object
|
@@ -479,7 +479,7 @@ def _DistillationValidationDataset_to_mldev(
|
|
479
479
|
parent_object: dict = None,
|
480
480
|
) -> dict:
|
481
481
|
to_object = {}
|
482
|
-
if getv(from_object, ['gcs_uri']):
|
482
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
483
483
|
raise ValueError('gcs_uri parameter is not supported in Google AI.')
|
484
484
|
|
485
485
|
return to_object
|
@@ -506,7 +506,7 @@ def _CreateDistillationJobConfig_to_mldev(
|
|
506
506
|
if getv(from_object, ['http_options']) is not None:
|
507
507
|
setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
|
508
508
|
|
509
|
-
if getv(from_object, ['validation_dataset']):
|
509
|
+
if getv(from_object, ['validation_dataset']) is not None:
|
510
510
|
raise ValueError(
|
511
511
|
'validation_dataset parameter is not supported in Google AI.'
|
512
512
|
)
|
@@ -532,10 +532,10 @@ def _CreateDistillationJobConfig_to_mldev(
|
|
532
532
|
getv(from_object, ['learning_rate_multiplier']),
|
533
533
|
)
|
534
534
|
|
535
|
-
if getv(from_object, ['adapter_size']):
|
535
|
+
if getv(from_object, ['adapter_size']) is not None:
|
536
536
|
raise ValueError('adapter_size parameter is not supported in Google AI.')
|
537
537
|
|
538
|
-
if getv(from_object, ['pipeline_root_directory']):
|
538
|
+
if getv(from_object, ['pipeline_root_directory']) is not None:
|
539
539
|
raise ValueError(
|
540
540
|
'pipeline_root_directory parameter is not supported in Google AI.'
|
541
541
|
)
|
@@ -605,10 +605,10 @@ def _CreateDistillationJobParameters_to_mldev(
|
|
605
605
|
parent_object: dict = None,
|
606
606
|
) -> dict:
|
607
607
|
to_object = {}
|
608
|
-
if getv(from_object, ['student_model']):
|
608
|
+
if getv(from_object, ['student_model']) is not None:
|
609
609
|
raise ValueError('student_model parameter is not supported in Google AI.')
|
610
610
|
|
611
|
-
if getv(from_object, ['teacher_model']):
|
611
|
+
if getv(from_object, ['teacher_model']) is not None:
|
612
612
|
raise ValueError('teacher_model parameter is not supported in Google AI.')
|
613
613
|
|
614
614
|
if getv(from_object, ['training_dataset']) is not None:
|