google-genai 1.7.0__py3-none-any.whl → 1.53.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/__init__.py +4 -2
- google/genai/_adapters.py +55 -0
- google/genai/_api_client.py +1301 -299
- google/genai/_api_module.py +1 -1
- google/genai/_automatic_function_calling_util.py +54 -33
- google/genai/_base_transformers.py +26 -0
- google/genai/_base_url.py +50 -0
- google/genai/_common.py +560 -59
- google/genai/_extra_utils.py +371 -38
- google/genai/_live_converters.py +1467 -0
- google/genai/_local_tokenizer_loader.py +214 -0
- google/genai/_mcp_utils.py +117 -0
- google/genai/_operations_converters.py +394 -0
- google/genai/_replay_api_client.py +204 -92
- google/genai/_test_api_client.py +1 -1
- google/genai/_tokens_converters.py +520 -0
- google/genai/_transformers.py +633 -233
- google/genai/batches.py +1733 -538
- google/genai/caches.py +678 -1012
- google/genai/chats.py +48 -38
- google/genai/client.py +142 -15
- google/genai/documents.py +532 -0
- google/genai/errors.py +141 -35
- google/genai/file_search_stores.py +1296 -0
- google/genai/files.py +312 -744
- google/genai/live.py +617 -367
- google/genai/live_music.py +197 -0
- google/genai/local_tokenizer.py +395 -0
- google/genai/models.py +3598 -3116
- google/genai/operations.py +201 -362
- google/genai/pagers.py +23 -7
- google/genai/py.typed +1 -0
- google/genai/tokens.py +362 -0
- google/genai/tunings.py +1274 -496
- google/genai/types.py +14535 -5454
- google/genai/version.py +2 -2
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/METADATA +736 -234
- google_genai-1.53.0.dist-info/RECORD +41 -0
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/WHEEL +1 -1
- google_genai-1.7.0.dist-info/RECORD +0 -27
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info/licenses}/LICENSE +0 -0
- {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/top_level.txt +0 -0
google/genai/tunings.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright
|
|
1
|
+
# Copyright 2025 Google LLC
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -15,409 +15,877 @@
|
|
|
15
15
|
|
|
16
16
|
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
|
|
17
17
|
|
|
18
|
+
import json
|
|
18
19
|
import logging
|
|
19
20
|
from typing import Any, Optional, Union
|
|
20
21
|
from urllib.parse import urlencode
|
|
22
|
+
|
|
21
23
|
from . import _api_module
|
|
22
24
|
from . import _common
|
|
23
25
|
from . import _transformers as t
|
|
24
26
|
from . import types
|
|
25
|
-
from ._api_client import BaseApiClient
|
|
26
27
|
from ._common import get_value_by_path as getv
|
|
27
28
|
from ._common import set_value_by_path as setv
|
|
28
29
|
from .pagers import AsyncPager, Pager
|
|
29
30
|
|
|
31
|
+
|
|
30
32
|
logger = logging.getLogger('google_genai.tunings')
|
|
31
33
|
|
|
32
34
|
|
|
33
|
-
def
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
) -> dict:
|
|
35
|
+
def _AutoraterConfig_from_vertex(
|
|
36
|
+
from_object: Union[dict[str, Any], object],
|
|
37
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
38
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
39
|
+
) -> dict[str, Any]:
|
|
40
|
+
to_object: dict[str, Any] = {}
|
|
41
|
+
if getv(from_object, ['samplingCount']) is not None:
|
|
42
|
+
setv(to_object, ['sampling_count'], getv(from_object, ['samplingCount']))
|
|
43
|
+
|
|
44
|
+
if getv(from_object, ['flipEnabled']) is not None:
|
|
45
|
+
setv(to_object, ['flip_enabled'], getv(from_object, ['flipEnabled']))
|
|
46
|
+
|
|
47
|
+
if getv(from_object, ['autoraterModel']) is not None:
|
|
48
|
+
setv(to_object, ['autorater_model'], getv(from_object, ['autoraterModel']))
|
|
49
|
+
|
|
50
|
+
if getv(from_object, ['generationConfig']) is not None:
|
|
51
|
+
setv(
|
|
52
|
+
to_object,
|
|
53
|
+
['generation_config'],
|
|
54
|
+
_GenerationConfig_from_vertex(
|
|
55
|
+
getv(from_object, ['generationConfig']), to_object, root_object
|
|
56
|
+
),
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
return to_object
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _AutoraterConfig_to_vertex(
|
|
63
|
+
from_object: Union[dict[str, Any], object],
|
|
64
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
65
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
66
|
+
) -> dict[str, Any]:
|
|
67
|
+
to_object: dict[str, Any] = {}
|
|
68
|
+
if getv(from_object, ['sampling_count']) is not None:
|
|
69
|
+
setv(to_object, ['samplingCount'], getv(from_object, ['sampling_count']))
|
|
70
|
+
|
|
71
|
+
if getv(from_object, ['flip_enabled']) is not None:
|
|
72
|
+
setv(to_object, ['flipEnabled'], getv(from_object, ['flip_enabled']))
|
|
73
|
+
|
|
74
|
+
if getv(from_object, ['autorater_model']) is not None:
|
|
75
|
+
setv(to_object, ['autoraterModel'], getv(from_object, ['autorater_model']))
|
|
76
|
+
|
|
77
|
+
if getv(from_object, ['generation_config']) is not None:
|
|
78
|
+
setv(
|
|
79
|
+
to_object,
|
|
80
|
+
['generationConfig'],
|
|
81
|
+
_GenerationConfig_to_vertex(
|
|
82
|
+
getv(from_object, ['generation_config']), to_object, root_object
|
|
83
|
+
),
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
return to_object
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _CancelTuningJobParameters_to_mldev(
|
|
90
|
+
from_object: Union[dict[str, Any], object],
|
|
91
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
92
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
93
|
+
) -> dict[str, Any]:
|
|
38
94
|
to_object: dict[str, Any] = {}
|
|
39
95
|
if getv(from_object, ['name']) is not None:
|
|
40
96
|
setv(to_object, ['_url', 'name'], getv(from_object, ['name']))
|
|
41
97
|
|
|
42
|
-
if getv(from_object, ['config']) is not None:
|
|
43
|
-
setv(to_object, ['config'], getv(from_object, ['config']))
|
|
44
|
-
|
|
45
98
|
return to_object
|
|
46
99
|
|
|
47
100
|
|
|
48
|
-
def
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
) -> dict:
|
|
101
|
+
def _CancelTuningJobParameters_to_vertex(
|
|
102
|
+
from_object: Union[dict[str, Any], object],
|
|
103
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
104
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
105
|
+
) -> dict[str, Any]:
|
|
53
106
|
to_object: dict[str, Any] = {}
|
|
54
107
|
if getv(from_object, ['name']) is not None:
|
|
55
108
|
setv(to_object, ['_url', 'name'], getv(from_object, ['name']))
|
|
56
109
|
|
|
57
|
-
|
|
58
|
-
|
|
110
|
+
return to_object
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def _CancelTuningJobResponse_from_mldev(
|
|
114
|
+
from_object: Union[dict[str, Any], object],
|
|
115
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
116
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
117
|
+
) -> dict[str, Any]:
|
|
118
|
+
to_object: dict[str, Any] = {}
|
|
119
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
120
|
+
setv(
|
|
121
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
122
|
+
)
|
|
59
123
|
|
|
60
124
|
return to_object
|
|
61
125
|
|
|
62
126
|
|
|
63
|
-
def
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
) -> dict:
|
|
127
|
+
def _CancelTuningJobResponse_from_vertex(
|
|
128
|
+
from_object: Union[dict[str, Any], object],
|
|
129
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
130
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
131
|
+
) -> dict[str, Any]:
|
|
68
132
|
to_object: dict[str, Any] = {}
|
|
133
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
134
|
+
setv(
|
|
135
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
136
|
+
)
|
|
69
137
|
|
|
70
|
-
|
|
138
|
+
return to_object
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _CreateTuningJobConfig_to_mldev(
|
|
142
|
+
from_object: Union[dict[str, Any], object],
|
|
143
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
144
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
145
|
+
) -> dict[str, Any]:
|
|
146
|
+
to_object: dict[str, Any] = {}
|
|
147
|
+
|
|
148
|
+
if getv(from_object, ['validation_dataset']) is not None:
|
|
149
|
+
raise ValueError(
|
|
150
|
+
'validation_dataset parameter is not supported in Gemini API.'
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
if getv(from_object, ['tuned_model_display_name']) is not None:
|
|
71
154
|
setv(
|
|
72
|
-
parent_object,
|
|
155
|
+
parent_object,
|
|
156
|
+
['displayName'],
|
|
157
|
+
getv(from_object, ['tuned_model_display_name']),
|
|
73
158
|
)
|
|
74
159
|
|
|
75
|
-
if getv(from_object, ['
|
|
160
|
+
if getv(from_object, ['description']) is not None:
|
|
161
|
+
raise ValueError('description parameter is not supported in Gemini API.')
|
|
162
|
+
|
|
163
|
+
if getv(from_object, ['epoch_count']) is not None:
|
|
76
164
|
setv(
|
|
77
165
|
parent_object,
|
|
78
|
-
['
|
|
79
|
-
getv(from_object, ['
|
|
166
|
+
['tuningTask', 'hyperparameters', 'epochCount'],
|
|
167
|
+
getv(from_object, ['epoch_count']),
|
|
80
168
|
)
|
|
81
169
|
|
|
82
|
-
if getv(from_object, ['
|
|
83
|
-
setv(
|
|
170
|
+
if getv(from_object, ['learning_rate_multiplier']) is not None:
|
|
171
|
+
setv(
|
|
172
|
+
to_object,
|
|
173
|
+
['tuningTask', 'hyperparameters', 'learningRateMultiplier'],
|
|
174
|
+
getv(from_object, ['learning_rate_multiplier']),
|
|
175
|
+
)
|
|
84
176
|
|
|
85
|
-
|
|
177
|
+
if getv(from_object, ['export_last_checkpoint_only']) is not None:
|
|
178
|
+
raise ValueError(
|
|
179
|
+
'export_last_checkpoint_only parameter is not supported in Gemini API.'
|
|
180
|
+
)
|
|
86
181
|
|
|
182
|
+
if getv(from_object, ['pre_tuned_model_checkpoint_id']) is not None:
|
|
183
|
+
raise ValueError(
|
|
184
|
+
'pre_tuned_model_checkpoint_id parameter is not supported in Gemini'
|
|
185
|
+
' API.'
|
|
186
|
+
)
|
|
87
187
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
from_object: Union[dict, object],
|
|
91
|
-
parent_object: Optional[dict] = None,
|
|
92
|
-
) -> dict:
|
|
93
|
-
to_object: dict[str, Any] = {}
|
|
188
|
+
if getv(from_object, ['adapter_size']) is not None:
|
|
189
|
+
raise ValueError('adapter_size parameter is not supported in Gemini API.')
|
|
94
190
|
|
|
95
|
-
if getv(from_object, ['
|
|
191
|
+
if getv(from_object, ['batch_size']) is not None:
|
|
96
192
|
setv(
|
|
97
|
-
parent_object,
|
|
193
|
+
parent_object,
|
|
194
|
+
['tuningTask', 'hyperparameters', 'batchSize'],
|
|
195
|
+
getv(from_object, ['batch_size']),
|
|
98
196
|
)
|
|
99
197
|
|
|
100
|
-
if getv(from_object, ['
|
|
198
|
+
if getv(from_object, ['learning_rate']) is not None:
|
|
101
199
|
setv(
|
|
102
200
|
parent_object,
|
|
103
|
-
['
|
|
104
|
-
getv(from_object, ['
|
|
201
|
+
['tuningTask', 'hyperparameters', 'learningRate'],
|
|
202
|
+
getv(from_object, ['learning_rate']),
|
|
105
203
|
)
|
|
106
204
|
|
|
107
|
-
if getv(from_object, ['
|
|
108
|
-
|
|
205
|
+
if getv(from_object, ['evaluation_config']) is not None:
|
|
206
|
+
raise ValueError(
|
|
207
|
+
'evaluation_config parameter is not supported in Gemini API.'
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
if getv(from_object, ['labels']) is not None:
|
|
211
|
+
raise ValueError('labels parameter is not supported in Gemini API.')
|
|
212
|
+
|
|
213
|
+
if getv(from_object, ['beta']) is not None:
|
|
214
|
+
raise ValueError('beta parameter is not supported in Gemini API.')
|
|
109
215
|
|
|
110
216
|
return to_object
|
|
111
217
|
|
|
112
218
|
|
|
113
|
-
def
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
) -> dict:
|
|
219
|
+
def _CreateTuningJobConfig_to_vertex(
|
|
220
|
+
from_object: Union[dict[str, Any], object],
|
|
221
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
222
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
223
|
+
) -> dict[str, Any]:
|
|
118
224
|
to_object: dict[str, Any] = {}
|
|
119
|
-
|
|
225
|
+
|
|
226
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
227
|
+
if discriminator is None:
|
|
228
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
229
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
230
|
+
if getv(from_object, ['validation_dataset']) is not None:
|
|
231
|
+
setv(
|
|
232
|
+
parent_object,
|
|
233
|
+
['supervisedTuningSpec'],
|
|
234
|
+
_TuningValidationDataset_to_vertex(
|
|
235
|
+
getv(from_object, ['validation_dataset']), to_object, root_object
|
|
236
|
+
),
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
240
|
+
if getv(from_object, ['validation_dataset']) is not None:
|
|
241
|
+
setv(
|
|
242
|
+
parent_object,
|
|
243
|
+
['preferenceOptimizationSpec'],
|
|
244
|
+
_TuningValidationDataset_to_vertex(
|
|
245
|
+
getv(from_object, ['validation_dataset']), to_object, root_object
|
|
246
|
+
),
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
if getv(from_object, ['tuned_model_display_name']) is not None:
|
|
120
250
|
setv(
|
|
121
|
-
|
|
122
|
-
['
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
251
|
+
parent_object,
|
|
252
|
+
['tunedModelDisplayName'],
|
|
253
|
+
getv(from_object, ['tuned_model_display_name']),
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
if getv(from_object, ['description']) is not None:
|
|
257
|
+
setv(parent_object, ['description'], getv(from_object, ['description']))
|
|
258
|
+
|
|
259
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
260
|
+
if discriminator is None:
|
|
261
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
262
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
263
|
+
if getv(from_object, ['epoch_count']) is not None:
|
|
264
|
+
setv(
|
|
265
|
+
parent_object,
|
|
266
|
+
['supervisedTuningSpec', 'hyperParameters', 'epochCount'],
|
|
267
|
+
getv(from_object, ['epoch_count']),
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
271
|
+
if getv(from_object, ['epoch_count']) is not None:
|
|
272
|
+
setv(
|
|
273
|
+
parent_object,
|
|
274
|
+
['preferenceOptimizationSpec', 'hyperParameters', 'epochCount'],
|
|
275
|
+
getv(from_object, ['epoch_count']),
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
279
|
+
if discriminator is None:
|
|
280
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
281
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
282
|
+
if getv(from_object, ['learning_rate_multiplier']) is not None:
|
|
283
|
+
setv(
|
|
284
|
+
parent_object,
|
|
285
|
+
['supervisedTuningSpec', 'hyperParameters', 'learningRateMultiplier'],
|
|
286
|
+
getv(from_object, ['learning_rate_multiplier']),
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
290
|
+
if getv(from_object, ['learning_rate_multiplier']) is not None:
|
|
291
|
+
setv(
|
|
292
|
+
parent_object,
|
|
293
|
+
[
|
|
294
|
+
'preferenceOptimizationSpec',
|
|
295
|
+
'hyperParameters',
|
|
296
|
+
'learningRateMultiplier',
|
|
297
|
+
],
|
|
298
|
+
getv(from_object, ['learning_rate_multiplier']),
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
302
|
+
if discriminator is None:
|
|
303
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
304
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
305
|
+
if getv(from_object, ['export_last_checkpoint_only']) is not None:
|
|
306
|
+
setv(
|
|
307
|
+
parent_object,
|
|
308
|
+
['supervisedTuningSpec', 'exportLastCheckpointOnly'],
|
|
309
|
+
getv(from_object, ['export_last_checkpoint_only']),
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
313
|
+
if getv(from_object, ['export_last_checkpoint_only']) is not None:
|
|
314
|
+
setv(
|
|
315
|
+
parent_object,
|
|
316
|
+
['preferenceOptimizationSpec', 'exportLastCheckpointOnly'],
|
|
317
|
+
getv(from_object, ['export_last_checkpoint_only']),
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
321
|
+
if discriminator is None:
|
|
322
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
323
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
324
|
+
if getv(from_object, ['adapter_size']) is not None:
|
|
325
|
+
setv(
|
|
326
|
+
parent_object,
|
|
327
|
+
['supervisedTuningSpec', 'hyperParameters', 'adapterSize'],
|
|
328
|
+
getv(from_object, ['adapter_size']),
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
332
|
+
if getv(from_object, ['adapter_size']) is not None:
|
|
333
|
+
setv(
|
|
334
|
+
parent_object,
|
|
335
|
+
['preferenceOptimizationSpec', 'hyperParameters', 'adapterSize'],
|
|
336
|
+
getv(from_object, ['adapter_size']),
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
if getv(from_object, ['batch_size']) is not None:
|
|
340
|
+
raise ValueError('batch_size parameter is not supported in Vertex AI.')
|
|
341
|
+
|
|
342
|
+
if getv(from_object, ['learning_rate']) is not None:
|
|
343
|
+
raise ValueError('learning_rate parameter is not supported in Vertex AI.')
|
|
344
|
+
|
|
345
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
346
|
+
if discriminator is None:
|
|
347
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
348
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
349
|
+
if getv(from_object, ['evaluation_config']) is not None:
|
|
350
|
+
setv(
|
|
351
|
+
parent_object,
|
|
352
|
+
['supervisedTuningSpec', 'evaluationConfig'],
|
|
353
|
+
_EvaluationConfig_to_vertex(
|
|
354
|
+
getv(from_object, ['evaluation_config']), to_object, root_object
|
|
355
|
+
),
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
359
|
+
if getv(from_object, ['evaluation_config']) is not None:
|
|
360
|
+
setv(
|
|
361
|
+
parent_object,
|
|
362
|
+
['preferenceOptimizationSpec', 'evaluationConfig'],
|
|
363
|
+
_EvaluationConfig_to_vertex(
|
|
364
|
+
getv(from_object, ['evaluation_config']), to_object, root_object
|
|
365
|
+
),
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
if getv(from_object, ['labels']) is not None:
|
|
369
|
+
setv(parent_object, ['labels'], getv(from_object, ['labels']))
|
|
370
|
+
|
|
371
|
+
if getv(from_object, ['beta']) is not None:
|
|
372
|
+
setv(
|
|
373
|
+
parent_object,
|
|
374
|
+
['preferenceOptimizationSpec', 'hyperParameters', 'beta'],
|
|
375
|
+
getv(from_object, ['beta']),
|
|
126
376
|
)
|
|
127
377
|
|
|
128
378
|
return to_object
|
|
129
379
|
|
|
130
380
|
|
|
131
|
-
def
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
) -> dict:
|
|
381
|
+
def _CreateTuningJobParametersPrivate_to_mldev(
|
|
382
|
+
from_object: Union[dict[str, Any], object],
|
|
383
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
384
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
385
|
+
) -> dict[str, Any]:
|
|
136
386
|
to_object: dict[str, Any] = {}
|
|
387
|
+
if getv(from_object, ['base_model']) is not None:
|
|
388
|
+
setv(to_object, ['baseModel'], getv(from_object, ['base_model']))
|
|
389
|
+
|
|
390
|
+
if getv(from_object, ['pre_tuned_model']) is not None:
|
|
391
|
+
setv(to_object, ['preTunedModel'], getv(from_object, ['pre_tuned_model']))
|
|
392
|
+
|
|
393
|
+
if getv(from_object, ['training_dataset']) is not None:
|
|
394
|
+
_TuningDataset_to_mldev(
|
|
395
|
+
getv(from_object, ['training_dataset']), to_object, root_object
|
|
396
|
+
)
|
|
397
|
+
|
|
137
398
|
if getv(from_object, ['config']) is not None:
|
|
138
|
-
|
|
139
|
-
to_object,
|
|
140
|
-
['config'],
|
|
141
|
-
_ListTuningJobsConfig_to_vertex(
|
|
142
|
-
api_client, getv(from_object, ['config']), to_object
|
|
143
|
-
),
|
|
399
|
+
_CreateTuningJobConfig_to_mldev(
|
|
400
|
+
getv(from_object, ['config']), to_object, root_object
|
|
144
401
|
)
|
|
145
402
|
|
|
146
403
|
return to_object
|
|
147
404
|
|
|
148
405
|
|
|
149
|
-
def
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
) -> dict:
|
|
406
|
+
def _CreateTuningJobParametersPrivate_to_vertex(
|
|
407
|
+
from_object: Union[dict[str, Any], object],
|
|
408
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
409
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
410
|
+
) -> dict[str, Any]:
|
|
154
411
|
to_object: dict[str, Any] = {}
|
|
155
|
-
if getv(from_object, ['
|
|
156
|
-
setv(to_object, ['
|
|
412
|
+
if getv(from_object, ['base_model']) is not None:
|
|
413
|
+
setv(to_object, ['baseModel'], getv(from_object, ['base_model']))
|
|
157
414
|
|
|
158
|
-
if getv(from_object, ['
|
|
159
|
-
setv(to_object, ['
|
|
415
|
+
if getv(from_object, ['pre_tuned_model']) is not None:
|
|
416
|
+
setv(to_object, ['preTunedModel'], getv(from_object, ['pre_tuned_model']))
|
|
417
|
+
|
|
418
|
+
if getv(from_object, ['training_dataset']) is not None:
|
|
419
|
+
_TuningDataset_to_vertex(
|
|
420
|
+
getv(from_object, ['training_dataset']), to_object, root_object
|
|
421
|
+
)
|
|
422
|
+
|
|
423
|
+
if getv(from_object, ['config']) is not None:
|
|
424
|
+
_CreateTuningJobConfig_to_vertex(
|
|
425
|
+
getv(from_object, ['config']), to_object, root_object
|
|
426
|
+
)
|
|
160
427
|
|
|
161
428
|
return to_object
|
|
162
429
|
|
|
163
430
|
|
|
164
|
-
def
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
) -> dict:
|
|
431
|
+
def _EvaluationConfig_from_vertex(
|
|
432
|
+
from_object: Union[dict[str, Any], object],
|
|
433
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
434
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
435
|
+
) -> dict[str, Any]:
|
|
169
436
|
to_object: dict[str, Any] = {}
|
|
170
|
-
if getv(from_object, ['
|
|
171
|
-
|
|
437
|
+
if getv(from_object, ['metrics']) is not None:
|
|
438
|
+
setv(to_object, ['metrics'], t.t_metrics(getv(from_object, ['metrics'])))
|
|
439
|
+
|
|
440
|
+
if getv(from_object, ['outputConfig']) is not None:
|
|
441
|
+
setv(to_object, ['output_config'], getv(from_object, ['outputConfig']))
|
|
172
442
|
|
|
173
|
-
if getv(from_object, ['
|
|
174
|
-
|
|
443
|
+
if getv(from_object, ['autoraterConfig']) is not None:
|
|
444
|
+
setv(
|
|
445
|
+
to_object,
|
|
446
|
+
['autorater_config'],
|
|
447
|
+
_AutoraterConfig_from_vertex(
|
|
448
|
+
getv(from_object, ['autoraterConfig']), to_object, root_object
|
|
449
|
+
),
|
|
450
|
+
)
|
|
175
451
|
|
|
176
452
|
return to_object
|
|
177
453
|
|
|
178
454
|
|
|
179
|
-
def
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
) -> dict:
|
|
455
|
+
def _EvaluationConfig_to_vertex(
|
|
456
|
+
from_object: Union[dict[str, Any], object],
|
|
457
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
458
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
459
|
+
) -> dict[str, Any]:
|
|
184
460
|
to_object: dict[str, Any] = {}
|
|
185
|
-
if getv(from_object, ['
|
|
186
|
-
|
|
461
|
+
if getv(from_object, ['metrics']) is not None:
|
|
462
|
+
setv(to_object, ['metrics'], t.t_metrics(getv(from_object, ['metrics'])))
|
|
187
463
|
|
|
188
|
-
if getv(from_object, ['
|
|
464
|
+
if getv(from_object, ['output_config']) is not None:
|
|
465
|
+
setv(to_object, ['outputConfig'], getv(from_object, ['output_config']))
|
|
466
|
+
|
|
467
|
+
if getv(from_object, ['autorater_config']) is not None:
|
|
189
468
|
setv(
|
|
190
469
|
to_object,
|
|
191
|
-
['
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
],
|
|
470
|
+
['autoraterConfig'],
|
|
471
|
+
_AutoraterConfig_to_vertex(
|
|
472
|
+
getv(from_object, ['autorater_config']), to_object, root_object
|
|
473
|
+
),
|
|
196
474
|
)
|
|
197
475
|
|
|
198
476
|
return to_object
|
|
199
477
|
|
|
200
478
|
|
|
201
|
-
def
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
) -> dict:
|
|
479
|
+
def _GenerationConfig_from_vertex(
|
|
480
|
+
from_object: Union[dict[str, Any], object],
|
|
481
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
482
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
483
|
+
) -> dict[str, Any]:
|
|
206
484
|
to_object: dict[str, Any] = {}
|
|
207
|
-
if getv(from_object, ['
|
|
485
|
+
if getv(from_object, ['modelConfig']) is not None:
|
|
208
486
|
setv(
|
|
209
|
-
|
|
210
|
-
['
|
|
211
|
-
getv(from_object, ['
|
|
487
|
+
to_object,
|
|
488
|
+
['model_selection_config'],
|
|
489
|
+
getv(from_object, ['modelConfig']),
|
|
212
490
|
)
|
|
213
491
|
|
|
214
|
-
if getv(from_object, ['
|
|
215
|
-
|
|
492
|
+
if getv(from_object, ['responseJsonSchema']) is not None:
|
|
493
|
+
setv(
|
|
494
|
+
to_object,
|
|
495
|
+
['response_json_schema'],
|
|
496
|
+
getv(from_object, ['responseJsonSchema']),
|
|
497
|
+
)
|
|
216
498
|
|
|
217
|
-
|
|
499
|
+
if getv(from_object, ['audioTimestamp']) is not None:
|
|
500
|
+
setv(to_object, ['audio_timestamp'], getv(from_object, ['audioTimestamp']))
|
|
218
501
|
|
|
502
|
+
if getv(from_object, ['candidateCount']) is not None:
|
|
503
|
+
setv(to_object, ['candidate_count'], getv(from_object, ['candidateCount']))
|
|
219
504
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
if getv(from_object, ['gcs_uri']) is not None:
|
|
227
|
-
raise ValueError('gcs_uri parameter is not supported in Gemini API.')
|
|
505
|
+
if getv(from_object, ['enableAffectiveDialog']) is not None:
|
|
506
|
+
setv(
|
|
507
|
+
to_object,
|
|
508
|
+
['enable_affective_dialog'],
|
|
509
|
+
getv(from_object, ['enableAffectiveDialog']),
|
|
510
|
+
)
|
|
228
511
|
|
|
229
|
-
|
|
512
|
+
if getv(from_object, ['frequencyPenalty']) is not None:
|
|
513
|
+
setv(
|
|
514
|
+
to_object,
|
|
515
|
+
['frequency_penalty'],
|
|
516
|
+
getv(from_object, ['frequencyPenalty']),
|
|
517
|
+
)
|
|
230
518
|
|
|
519
|
+
if getv(from_object, ['logprobs']) is not None:
|
|
520
|
+
setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))
|
|
231
521
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
522
|
+
if getv(from_object, ['maxOutputTokens']) is not None:
|
|
523
|
+
setv(
|
|
524
|
+
to_object, ['max_output_tokens'], getv(from_object, ['maxOutputTokens'])
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
if getv(from_object, ['mediaResolution']) is not None:
|
|
528
|
+
setv(
|
|
529
|
+
to_object, ['media_resolution'], getv(from_object, ['mediaResolution'])
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
if getv(from_object, ['presencePenalty']) is not None:
|
|
533
|
+
setv(
|
|
534
|
+
to_object, ['presence_penalty'], getv(from_object, ['presencePenalty'])
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
if getv(from_object, ['responseLogprobs']) is not None:
|
|
538
|
+
setv(
|
|
539
|
+
to_object,
|
|
540
|
+
['response_logprobs'],
|
|
541
|
+
getv(from_object, ['responseLogprobs']),
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
if getv(from_object, ['responseMimeType']) is not None:
|
|
545
|
+
setv(
|
|
546
|
+
to_object,
|
|
547
|
+
['response_mime_type'],
|
|
548
|
+
getv(from_object, ['responseMimeType']),
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
if getv(from_object, ['responseModalities']) is not None:
|
|
552
|
+
setv(
|
|
553
|
+
to_object,
|
|
554
|
+
['response_modalities'],
|
|
555
|
+
getv(from_object, ['responseModalities']),
|
|
556
|
+
)
|
|
557
|
+
|
|
558
|
+
if getv(from_object, ['responseSchema']) is not None:
|
|
559
|
+
setv(to_object, ['response_schema'], getv(from_object, ['responseSchema']))
|
|
560
|
+
|
|
561
|
+
if getv(from_object, ['routingConfig']) is not None:
|
|
562
|
+
setv(to_object, ['routing_config'], getv(from_object, ['routingConfig']))
|
|
563
|
+
|
|
564
|
+
if getv(from_object, ['seed']) is not None:
|
|
565
|
+
setv(to_object, ['seed'], getv(from_object, ['seed']))
|
|
566
|
+
|
|
567
|
+
if getv(from_object, ['speechConfig']) is not None:
|
|
568
|
+
setv(to_object, ['speech_config'], getv(from_object, ['speechConfig']))
|
|
569
|
+
|
|
570
|
+
if getv(from_object, ['stopSequences']) is not None:
|
|
571
|
+
setv(to_object, ['stop_sequences'], getv(from_object, ['stopSequences']))
|
|
572
|
+
|
|
573
|
+
if getv(from_object, ['temperature']) is not None:
|
|
574
|
+
setv(to_object, ['temperature'], getv(from_object, ['temperature']))
|
|
575
|
+
|
|
576
|
+
if getv(from_object, ['thinkingConfig']) is not None:
|
|
577
|
+
setv(to_object, ['thinking_config'], getv(from_object, ['thinkingConfig']))
|
|
578
|
+
|
|
579
|
+
if getv(from_object, ['topK']) is not None:
|
|
580
|
+
setv(to_object, ['top_k'], getv(from_object, ['topK']))
|
|
581
|
+
|
|
582
|
+
if getv(from_object, ['topP']) is not None:
|
|
583
|
+
setv(to_object, ['top_p'], getv(from_object, ['topP']))
|
|
240
584
|
|
|
241
585
|
return to_object
|
|
242
586
|
|
|
243
587
|
|
|
244
|
-
def
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
) -> dict:
|
|
588
|
+
def _GenerationConfig_to_vertex(
|
|
589
|
+
from_object: Union[dict[str, Any], object],
|
|
590
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
591
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
592
|
+
) -> dict[str, Any]:
|
|
249
593
|
to_object: dict[str, Any] = {}
|
|
594
|
+
if getv(from_object, ['model_selection_config']) is not None:
|
|
595
|
+
setv(
|
|
596
|
+
to_object,
|
|
597
|
+
['modelConfig'],
|
|
598
|
+
getv(from_object, ['model_selection_config']),
|
|
599
|
+
)
|
|
250
600
|
|
|
251
|
-
if getv(from_object, ['
|
|
252
|
-
|
|
253
|
-
|
|
601
|
+
if getv(from_object, ['response_json_schema']) is not None:
|
|
602
|
+
setv(
|
|
603
|
+
to_object,
|
|
604
|
+
['responseJsonSchema'],
|
|
605
|
+
getv(from_object, ['response_json_schema']),
|
|
254
606
|
)
|
|
255
607
|
|
|
256
|
-
if getv(from_object, ['
|
|
608
|
+
if getv(from_object, ['audio_timestamp']) is not None:
|
|
609
|
+
setv(to_object, ['audioTimestamp'], getv(from_object, ['audio_timestamp']))
|
|
610
|
+
|
|
611
|
+
if getv(from_object, ['candidate_count']) is not None:
|
|
612
|
+
setv(to_object, ['candidateCount'], getv(from_object, ['candidate_count']))
|
|
613
|
+
|
|
614
|
+
if getv(from_object, ['enable_affective_dialog']) is not None:
|
|
257
615
|
setv(
|
|
258
|
-
|
|
259
|
-
['
|
|
260
|
-
getv(from_object, ['
|
|
616
|
+
to_object,
|
|
617
|
+
['enableAffectiveDialog'],
|
|
618
|
+
getv(from_object, ['enable_affective_dialog']),
|
|
261
619
|
)
|
|
262
620
|
|
|
263
|
-
if getv(from_object, ['
|
|
264
|
-
|
|
621
|
+
if getv(from_object, ['frequency_penalty']) is not None:
|
|
622
|
+
setv(
|
|
623
|
+
to_object,
|
|
624
|
+
['frequencyPenalty'],
|
|
625
|
+
getv(from_object, ['frequency_penalty']),
|
|
626
|
+
)
|
|
265
627
|
|
|
266
|
-
if getv(from_object, ['
|
|
628
|
+
if getv(from_object, ['logprobs']) is not None:
|
|
629
|
+
setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))
|
|
630
|
+
|
|
631
|
+
if getv(from_object, ['max_output_tokens']) is not None:
|
|
267
632
|
setv(
|
|
268
|
-
|
|
269
|
-
['tuningTask', 'hyperparameters', 'epochCount'],
|
|
270
|
-
getv(from_object, ['epoch_count']),
|
|
633
|
+
to_object, ['maxOutputTokens'], getv(from_object, ['max_output_tokens'])
|
|
271
634
|
)
|
|
272
635
|
|
|
273
|
-
if getv(from_object, ['
|
|
636
|
+
if getv(from_object, ['media_resolution']) is not None:
|
|
637
|
+
setv(
|
|
638
|
+
to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
if getv(from_object, ['presence_penalty']) is not None:
|
|
642
|
+
setv(
|
|
643
|
+
to_object, ['presencePenalty'], getv(from_object, ['presence_penalty'])
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
if getv(from_object, ['response_logprobs']) is not None:
|
|
274
647
|
setv(
|
|
275
648
|
to_object,
|
|
276
|
-
['
|
|
277
|
-
getv(from_object, ['
|
|
649
|
+
['responseLogprobs'],
|
|
650
|
+
getv(from_object, ['response_logprobs']),
|
|
278
651
|
)
|
|
279
652
|
|
|
280
|
-
if getv(from_object, ['
|
|
281
|
-
|
|
653
|
+
if getv(from_object, ['response_mime_type']) is not None:
|
|
654
|
+
setv(
|
|
655
|
+
to_object,
|
|
656
|
+
['responseMimeType'],
|
|
657
|
+
getv(from_object, ['response_mime_type']),
|
|
658
|
+
)
|
|
282
659
|
|
|
283
|
-
if getv(from_object, ['
|
|
660
|
+
if getv(from_object, ['response_modalities']) is not None:
|
|
284
661
|
setv(
|
|
285
|
-
|
|
286
|
-
['
|
|
287
|
-
getv(from_object, ['
|
|
662
|
+
to_object,
|
|
663
|
+
['responseModalities'],
|
|
664
|
+
getv(from_object, ['response_modalities']),
|
|
288
665
|
)
|
|
289
666
|
|
|
290
|
-
if getv(from_object, ['
|
|
667
|
+
if getv(from_object, ['response_schema']) is not None:
|
|
668
|
+
setv(to_object, ['responseSchema'], getv(from_object, ['response_schema']))
|
|
669
|
+
|
|
670
|
+
if getv(from_object, ['routing_config']) is not None:
|
|
671
|
+
setv(to_object, ['routingConfig'], getv(from_object, ['routing_config']))
|
|
672
|
+
|
|
673
|
+
if getv(from_object, ['seed']) is not None:
|
|
674
|
+
setv(to_object, ['seed'], getv(from_object, ['seed']))
|
|
675
|
+
|
|
676
|
+
if getv(from_object, ['speech_config']) is not None:
|
|
291
677
|
setv(
|
|
292
|
-
|
|
293
|
-
['
|
|
294
|
-
|
|
678
|
+
to_object,
|
|
679
|
+
['speechConfig'],
|
|
680
|
+
_SpeechConfig_to_vertex(
|
|
681
|
+
getv(from_object, ['speech_config']), to_object, root_object
|
|
682
|
+
),
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
if getv(from_object, ['stop_sequences']) is not None:
|
|
686
|
+
setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences']))
|
|
687
|
+
|
|
688
|
+
if getv(from_object, ['temperature']) is not None:
|
|
689
|
+
setv(to_object, ['temperature'], getv(from_object, ['temperature']))
|
|
690
|
+
|
|
691
|
+
if getv(from_object, ['thinking_config']) is not None:
|
|
692
|
+
setv(to_object, ['thinkingConfig'], getv(from_object, ['thinking_config']))
|
|
693
|
+
|
|
694
|
+
if getv(from_object, ['top_k']) is not None:
|
|
695
|
+
setv(to_object, ['topK'], getv(from_object, ['top_k']))
|
|
696
|
+
|
|
697
|
+
if getv(from_object, ['top_p']) is not None:
|
|
698
|
+
setv(to_object, ['topP'], getv(from_object, ['top_p']))
|
|
699
|
+
|
|
700
|
+
if getv(from_object, ['enable_enhanced_civic_answers']) is not None:
|
|
701
|
+
raise ValueError(
|
|
702
|
+
'enable_enhanced_civic_answers parameter is not supported in Vertex AI.'
|
|
295
703
|
)
|
|
296
704
|
|
|
297
705
|
return to_object
|
|
298
706
|
|
|
299
707
|
|
|
300
|
-
def
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
) -> dict:
|
|
708
|
+
def _GetTuningJobParameters_to_mldev(
|
|
709
|
+
from_object: Union[dict[str, Any], object],
|
|
710
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
711
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
712
|
+
) -> dict[str, Any]:
|
|
305
713
|
to_object: dict[str, Any] = {}
|
|
714
|
+
if getv(from_object, ['name']) is not None:
|
|
715
|
+
setv(to_object, ['_url', 'name'], getv(from_object, ['name']))
|
|
306
716
|
|
|
307
|
-
|
|
717
|
+
return to_object
|
|
718
|
+
|
|
719
|
+
|
|
720
|
+
def _GetTuningJobParameters_to_vertex(
|
|
721
|
+
from_object: Union[dict[str, Any], object],
|
|
722
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
723
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
724
|
+
) -> dict[str, Any]:
|
|
725
|
+
to_object: dict[str, Any] = {}
|
|
726
|
+
if getv(from_object, ['name']) is not None:
|
|
727
|
+
setv(to_object, ['_url', 'name'], getv(from_object, ['name']))
|
|
728
|
+
|
|
729
|
+
return to_object
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
def _ListTuningJobsConfig_to_mldev(
|
|
733
|
+
from_object: Union[dict[str, Any], object],
|
|
734
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
735
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
736
|
+
) -> dict[str, Any]:
|
|
737
|
+
to_object: dict[str, Any] = {}
|
|
738
|
+
|
|
739
|
+
if getv(from_object, ['page_size']) is not None:
|
|
308
740
|
setv(
|
|
309
|
-
parent_object,
|
|
310
|
-
['supervisedTuningSpec'],
|
|
311
|
-
_TuningValidationDataset_to_vertex(
|
|
312
|
-
api_client, getv(from_object, ['validation_dataset']), to_object
|
|
313
|
-
),
|
|
741
|
+
parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
|
|
314
742
|
)
|
|
315
743
|
|
|
316
|
-
if getv(from_object, ['
|
|
744
|
+
if getv(from_object, ['page_token']) is not None:
|
|
317
745
|
setv(
|
|
318
746
|
parent_object,
|
|
319
|
-
['
|
|
320
|
-
getv(from_object, ['
|
|
747
|
+
['_query', 'pageToken'],
|
|
748
|
+
getv(from_object, ['page_token']),
|
|
321
749
|
)
|
|
322
750
|
|
|
323
|
-
if getv(from_object, ['
|
|
324
|
-
setv(parent_object, ['
|
|
751
|
+
if getv(from_object, ['filter']) is not None:
|
|
752
|
+
setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))
|
|
325
753
|
|
|
326
|
-
|
|
327
|
-
setv(
|
|
328
|
-
parent_object,
|
|
329
|
-
['supervisedTuningSpec', 'hyperParameters', 'epochCount'],
|
|
330
|
-
getv(from_object, ['epoch_count']),
|
|
331
|
-
)
|
|
754
|
+
return to_object
|
|
332
755
|
|
|
333
|
-
|
|
756
|
+
|
|
757
|
+
def _ListTuningJobsConfig_to_vertex(
|
|
758
|
+
from_object: Union[dict[str, Any], object],
|
|
759
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
760
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
761
|
+
) -> dict[str, Any]:
|
|
762
|
+
to_object: dict[str, Any] = {}
|
|
763
|
+
|
|
764
|
+
if getv(from_object, ['page_size']) is not None:
|
|
334
765
|
setv(
|
|
335
|
-
parent_object,
|
|
336
|
-
['supervisedTuningSpec', 'hyperParameters', 'learningRateMultiplier'],
|
|
337
|
-
getv(from_object, ['learning_rate_multiplier']),
|
|
766
|
+
parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
|
|
338
767
|
)
|
|
339
768
|
|
|
340
|
-
if getv(from_object, ['
|
|
769
|
+
if getv(from_object, ['page_token']) is not None:
|
|
341
770
|
setv(
|
|
342
771
|
parent_object,
|
|
343
|
-
['
|
|
344
|
-
getv(from_object, ['
|
|
772
|
+
['_query', 'pageToken'],
|
|
773
|
+
getv(from_object, ['page_token']),
|
|
345
774
|
)
|
|
346
775
|
|
|
347
|
-
if getv(from_object, ['
|
|
348
|
-
|
|
776
|
+
if getv(from_object, ['filter']) is not None:
|
|
777
|
+
setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))
|
|
349
778
|
|
|
350
|
-
|
|
351
|
-
|
|
779
|
+
return to_object
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
def _ListTuningJobsParameters_to_mldev(
|
|
783
|
+
from_object: Union[dict[str, Any], object],
|
|
784
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
785
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
786
|
+
) -> dict[str, Any]:
|
|
787
|
+
to_object: dict[str, Any] = {}
|
|
788
|
+
if getv(from_object, ['config']) is not None:
|
|
789
|
+
_ListTuningJobsConfig_to_mldev(
|
|
790
|
+
getv(from_object, ['config']), to_object, root_object
|
|
791
|
+
)
|
|
352
792
|
|
|
353
793
|
return to_object
|
|
354
794
|
|
|
355
795
|
|
|
356
|
-
def
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
) -> dict:
|
|
796
|
+
def _ListTuningJobsParameters_to_vertex(
|
|
797
|
+
from_object: Union[dict[str, Any], object],
|
|
798
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
799
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
800
|
+
) -> dict[str, Any]:
|
|
361
801
|
to_object: dict[str, Any] = {}
|
|
362
|
-
if getv(from_object, ['
|
|
363
|
-
|
|
802
|
+
if getv(from_object, ['config']) is not None:
|
|
803
|
+
_ListTuningJobsConfig_to_vertex(
|
|
804
|
+
getv(from_object, ['config']), to_object, root_object
|
|
805
|
+
)
|
|
364
806
|
|
|
365
|
-
|
|
807
|
+
return to_object
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
def _ListTuningJobsResponse_from_mldev(
|
|
811
|
+
from_object: Union[dict[str, Any], object],
|
|
812
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
813
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
814
|
+
) -> dict[str, Any]:
|
|
815
|
+
to_object: dict[str, Any] = {}
|
|
816
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
366
817
|
setv(
|
|
367
|
-
to_object,
|
|
368
|
-
['tuningTask', 'trainingData'],
|
|
369
|
-
_TuningDataset_to_mldev(
|
|
370
|
-
api_client, getv(from_object, ['training_dataset']), to_object
|
|
371
|
-
),
|
|
818
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
372
819
|
)
|
|
373
820
|
|
|
374
|
-
if getv(from_object, ['
|
|
821
|
+
if getv(from_object, ['nextPageToken']) is not None:
|
|
822
|
+
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
|
823
|
+
|
|
824
|
+
if getv(from_object, ['tunedModels']) is not None:
|
|
375
825
|
setv(
|
|
376
826
|
to_object,
|
|
377
|
-
['
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
827
|
+
['tuning_jobs'],
|
|
828
|
+
[
|
|
829
|
+
_TuningJob_from_mldev(item, to_object, root_object)
|
|
830
|
+
for item in getv(from_object, ['tunedModels'])
|
|
831
|
+
],
|
|
381
832
|
)
|
|
382
833
|
|
|
383
834
|
return to_object
|
|
384
835
|
|
|
385
836
|
|
|
386
|
-
def
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
) -> dict:
|
|
837
|
+
def _ListTuningJobsResponse_from_vertex(
|
|
838
|
+
from_object: Union[dict[str, Any], object],
|
|
839
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
840
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
841
|
+
) -> dict[str, Any]:
|
|
391
842
|
to_object: dict[str, Any] = {}
|
|
392
|
-
if getv(from_object, ['
|
|
393
|
-
setv(to_object, ['baseModel'], getv(from_object, ['base_model']))
|
|
394
|
-
|
|
395
|
-
if getv(from_object, ['training_dataset']) is not None:
|
|
843
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
396
844
|
setv(
|
|
397
|
-
to_object,
|
|
398
|
-
['supervisedTuningSpec', 'trainingDatasetUri'],
|
|
399
|
-
_TuningDataset_to_vertex(
|
|
400
|
-
api_client, getv(from_object, ['training_dataset']), to_object
|
|
401
|
-
),
|
|
845
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
402
846
|
)
|
|
403
847
|
|
|
404
|
-
if getv(from_object, ['
|
|
848
|
+
if getv(from_object, ['nextPageToken']) is not None:
|
|
849
|
+
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
|
850
|
+
|
|
851
|
+
if getv(from_object, ['tuningJobs']) is not None:
|
|
405
852
|
setv(
|
|
406
853
|
to_object,
|
|
407
|
-
['
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
854
|
+
['tuning_jobs'],
|
|
855
|
+
[
|
|
856
|
+
_TuningJob_from_vertex(item, to_object, root_object)
|
|
857
|
+
for item in getv(from_object, ['tuningJobs'])
|
|
858
|
+
],
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
return to_object
|
|
862
|
+
|
|
863
|
+
|
|
864
|
+
def _SpeechConfig_to_vertex(
|
|
865
|
+
from_object: Union[dict[str, Any], object],
|
|
866
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
867
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
868
|
+
) -> dict[str, Any]:
|
|
869
|
+
to_object: dict[str, Any] = {}
|
|
870
|
+
if getv(from_object, ['language_code']) is not None:
|
|
871
|
+
setv(to_object, ['languageCode'], getv(from_object, ['language_code']))
|
|
872
|
+
|
|
873
|
+
if getv(from_object, ['voice_config']) is not None:
|
|
874
|
+
setv(to_object, ['voiceConfig'], getv(from_object, ['voice_config']))
|
|
875
|
+
|
|
876
|
+
if getv(from_object, ['multi_speaker_voice_config']) is not None:
|
|
877
|
+
raise ValueError(
|
|
878
|
+
'multi_speaker_voice_config parameter is not supported in Vertex AI.'
|
|
411
879
|
)
|
|
412
880
|
|
|
413
881
|
return to_object
|
|
414
882
|
|
|
415
883
|
|
|
416
884
|
def _TunedModel_from_mldev(
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
) -> dict:
|
|
885
|
+
from_object: Union[dict[str, Any], object],
|
|
886
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
887
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
888
|
+
) -> dict[str, Any]:
|
|
421
889
|
to_object: dict[str, Any] = {}
|
|
422
890
|
if getv(from_object, ['name']) is not None:
|
|
423
891
|
setv(to_object, ['model'], getv(from_object, ['name']))
|
|
@@ -428,27 +896,92 @@ def _TunedModel_from_mldev(
|
|
|
428
896
|
return to_object
|
|
429
897
|
|
|
430
898
|
|
|
431
|
-
def
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
) -> dict:
|
|
899
|
+
def _TuningDataset_to_mldev(
|
|
900
|
+
from_object: Union[dict[str, Any], object],
|
|
901
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
902
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
903
|
+
) -> dict[str, Any]:
|
|
904
|
+
to_object: dict[str, Any] = {}
|
|
905
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
|
906
|
+
raise ValueError('gcs_uri parameter is not supported in Gemini API.')
|
|
907
|
+
|
|
908
|
+
if getv(from_object, ['vertex_dataset_resource']) is not None:
|
|
909
|
+
raise ValueError(
|
|
910
|
+
'vertex_dataset_resource parameter is not supported in Gemini API.'
|
|
911
|
+
)
|
|
912
|
+
|
|
913
|
+
if getv(from_object, ['examples']) is not None:
|
|
914
|
+
setv(
|
|
915
|
+
to_object,
|
|
916
|
+
['examples', 'examples'],
|
|
917
|
+
[item for item in getv(from_object, ['examples'])],
|
|
918
|
+
)
|
|
919
|
+
|
|
920
|
+
return to_object
|
|
921
|
+
|
|
922
|
+
|
|
923
|
+
def _TuningDataset_to_vertex(
|
|
924
|
+
from_object: Union[dict[str, Any], object],
|
|
925
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
926
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
927
|
+
) -> dict[str, Any]:
|
|
436
928
|
to_object: dict[str, Any] = {}
|
|
437
|
-
if getv(from_object, ['model']) is not None:
|
|
438
|
-
setv(to_object, ['model'], getv(from_object, ['model']))
|
|
439
929
|
|
|
440
|
-
|
|
441
|
-
|
|
930
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
931
|
+
if discriminator is None:
|
|
932
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
933
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
934
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
|
935
|
+
setv(
|
|
936
|
+
parent_object,
|
|
937
|
+
['supervisedTuningSpec', 'trainingDatasetUri'],
|
|
938
|
+
getv(from_object, ['gcs_uri']),
|
|
939
|
+
)
|
|
940
|
+
|
|
941
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
942
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
|
943
|
+
setv(
|
|
944
|
+
parent_object,
|
|
945
|
+
['preferenceOptimizationSpec', 'trainingDatasetUri'],
|
|
946
|
+
getv(from_object, ['gcs_uri']),
|
|
947
|
+
)
|
|
948
|
+
|
|
949
|
+
discriminator = getv(root_object, ['config', 'method'])
|
|
950
|
+
if discriminator is None:
|
|
951
|
+
discriminator = 'SUPERVISED_FINE_TUNING'
|
|
952
|
+
if discriminator == 'SUPERVISED_FINE_TUNING':
|
|
953
|
+
if getv(from_object, ['vertex_dataset_resource']) is not None:
|
|
954
|
+
setv(
|
|
955
|
+
parent_object,
|
|
956
|
+
['supervisedTuningSpec', 'trainingDatasetUri'],
|
|
957
|
+
getv(from_object, ['vertex_dataset_resource']),
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
elif discriminator == 'PREFERENCE_TUNING':
|
|
961
|
+
if getv(from_object, ['vertex_dataset_resource']) is not None:
|
|
962
|
+
setv(
|
|
963
|
+
parent_object,
|
|
964
|
+
['preferenceOptimizationSpec', 'trainingDatasetUri'],
|
|
965
|
+
getv(from_object, ['vertex_dataset_resource']),
|
|
966
|
+
)
|
|
967
|
+
|
|
968
|
+
if getv(from_object, ['examples']) is not None:
|
|
969
|
+
raise ValueError('examples parameter is not supported in Vertex AI.')
|
|
442
970
|
|
|
443
971
|
return to_object
|
|
444
972
|
|
|
445
973
|
|
|
446
974
|
def _TuningJob_from_mldev(
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
) -> dict:
|
|
975
|
+
from_object: Union[dict[str, Any], object],
|
|
976
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
977
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
978
|
+
) -> dict[str, Any]:
|
|
451
979
|
to_object: dict[str, Any] = {}
|
|
980
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
981
|
+
setv(
|
|
982
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
983
|
+
)
|
|
984
|
+
|
|
452
985
|
if getv(from_object, ['name']) is not None:
|
|
453
986
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
|
454
987
|
|
|
@@ -456,7 +989,7 @@ def _TuningJob_from_mldev(
|
|
|
456
989
|
setv(
|
|
457
990
|
to_object,
|
|
458
991
|
['state'],
|
|
459
|
-
t.t_tuning_job_status(
|
|
992
|
+
t.t_tuning_job_status(getv(from_object, ['state'])),
|
|
460
993
|
)
|
|
461
994
|
|
|
462
995
|
if getv(from_object, ['createTime']) is not None:
|
|
@@ -490,42 +1023,24 @@ def _TuningJob_from_mldev(
|
|
|
490
1023
|
to_object,
|
|
491
1024
|
['tuned_model'],
|
|
492
1025
|
_TunedModel_from_mldev(
|
|
493
|
-
|
|
1026
|
+
getv(from_object, ['_self']), to_object, root_object
|
|
494
1027
|
),
|
|
495
1028
|
)
|
|
496
1029
|
|
|
497
|
-
if getv(from_object, ['distillationSpec']) is not None:
|
|
498
|
-
setv(
|
|
499
|
-
to_object,
|
|
500
|
-
['distillation_spec'],
|
|
501
|
-
getv(from_object, ['distillationSpec']),
|
|
502
|
-
)
|
|
503
|
-
|
|
504
|
-
if getv(from_object, ['experiment']) is not None:
|
|
505
|
-
setv(to_object, ['experiment'], getv(from_object, ['experiment']))
|
|
506
|
-
|
|
507
|
-
if getv(from_object, ['labels']) is not None:
|
|
508
|
-
setv(to_object, ['labels'], getv(from_object, ['labels']))
|
|
509
|
-
|
|
510
|
-
if getv(from_object, ['pipelineJob']) is not None:
|
|
511
|
-
setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))
|
|
512
|
-
|
|
513
|
-
if getv(from_object, ['tunedModelDisplayName']) is not None:
|
|
514
|
-
setv(
|
|
515
|
-
to_object,
|
|
516
|
-
['tuned_model_display_name'],
|
|
517
|
-
getv(from_object, ['tunedModelDisplayName']),
|
|
518
|
-
)
|
|
519
|
-
|
|
520
1030
|
return to_object
|
|
521
1031
|
|
|
522
1032
|
|
|
523
1033
|
def _TuningJob_from_vertex(
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
) -> dict:
|
|
1034
|
+
from_object: Union[dict[str, Any], object],
|
|
1035
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
1036
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
1037
|
+
) -> dict[str, Any]:
|
|
528
1038
|
to_object: dict[str, Any] = {}
|
|
1039
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
1040
|
+
setv(
|
|
1041
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
1042
|
+
)
|
|
1043
|
+
|
|
529
1044
|
if getv(from_object, ['name']) is not None:
|
|
530
1045
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
|
531
1046
|
|
|
@@ -533,7 +1048,7 @@ def _TuningJob_from_vertex(
|
|
|
533
1048
|
setv(
|
|
534
1049
|
to_object,
|
|
535
1050
|
['state'],
|
|
536
|
-
t.t_tuning_job_status(
|
|
1051
|
+
t.t_tuning_job_status(getv(from_object, ['state'])),
|
|
537
1052
|
)
|
|
538
1053
|
|
|
539
1054
|
if getv(from_object, ['createTime']) is not None:
|
|
@@ -558,19 +1073,23 @@ def _TuningJob_from_vertex(
|
|
|
558
1073
|
setv(to_object, ['base_model'], getv(from_object, ['baseModel']))
|
|
559
1074
|
|
|
560
1075
|
if getv(from_object, ['tunedModel']) is not None:
|
|
1076
|
+
setv(to_object, ['tuned_model'], getv(from_object, ['tunedModel']))
|
|
1077
|
+
|
|
1078
|
+
if getv(from_object, ['preTunedModel']) is not None:
|
|
1079
|
+
setv(to_object, ['pre_tuned_model'], getv(from_object, ['preTunedModel']))
|
|
1080
|
+
|
|
1081
|
+
if getv(from_object, ['supervisedTuningSpec']) is not None:
|
|
561
1082
|
setv(
|
|
562
1083
|
to_object,
|
|
563
|
-
['
|
|
564
|
-
|
|
565
|
-
api_client, getv(from_object, ['tunedModel']), to_object
|
|
566
|
-
),
|
|
1084
|
+
['supervised_tuning_spec'],
|
|
1085
|
+
getv(from_object, ['supervisedTuningSpec']),
|
|
567
1086
|
)
|
|
568
1087
|
|
|
569
|
-
if getv(from_object, ['
|
|
1088
|
+
if getv(from_object, ['preferenceOptimizationSpec']) is not None:
|
|
570
1089
|
setv(
|
|
571
1090
|
to_object,
|
|
572
|
-
['
|
|
573
|
-
getv(from_object, ['
|
|
1091
|
+
['preference_optimization_spec'],
|
|
1092
|
+
getv(from_object, ['preferenceOptimizationSpec']),
|
|
574
1093
|
)
|
|
575
1094
|
|
|
576
1095
|
if getv(from_object, ['tuningDataStats']) is not None:
|
|
@@ -588,11 +1107,18 @@ def _TuningJob_from_vertex(
|
|
|
588
1107
|
getv(from_object, ['partnerModelTuningSpec']),
|
|
589
1108
|
)
|
|
590
1109
|
|
|
591
|
-
if getv(from_object, ['
|
|
1110
|
+
if getv(from_object, ['evaluationConfig']) is not None:
|
|
592
1111
|
setv(
|
|
593
1112
|
to_object,
|
|
594
|
-
['
|
|
595
|
-
|
|
1113
|
+
['evaluation_config'],
|
|
1114
|
+
_EvaluationConfig_from_vertex(
|
|
1115
|
+
getv(from_object, ['evaluationConfig']), to_object, root_object
|
|
1116
|
+
),
|
|
1117
|
+
)
|
|
1118
|
+
|
|
1119
|
+
if getv(from_object, ['customBaseModel']) is not None:
|
|
1120
|
+
setv(
|
|
1121
|
+
to_object, ['custom_base_model'], getv(from_object, ['customBaseModel'])
|
|
596
1122
|
)
|
|
597
1123
|
|
|
598
1124
|
if getv(from_object, ['experiment']) is not None:
|
|
@@ -601,9 +1127,15 @@ def _TuningJob_from_vertex(
|
|
|
601
1127
|
if getv(from_object, ['labels']) is not None:
|
|
602
1128
|
setv(to_object, ['labels'], getv(from_object, ['labels']))
|
|
603
1129
|
|
|
1130
|
+
if getv(from_object, ['outputUri']) is not None:
|
|
1131
|
+
setv(to_object, ['output_uri'], getv(from_object, ['outputUri']))
|
|
1132
|
+
|
|
604
1133
|
if getv(from_object, ['pipelineJob']) is not None:
|
|
605
1134
|
setv(to_object, ['pipeline_job'], getv(from_object, ['pipelineJob']))
|
|
606
1135
|
|
|
1136
|
+
if getv(from_object, ['serviceAccount']) is not None:
|
|
1137
|
+
setv(to_object, ['service_account'], getv(from_object, ['serviceAccount']))
|
|
1138
|
+
|
|
607
1139
|
if getv(from_object, ['tunedModelDisplayName']) is not None:
|
|
608
1140
|
setv(
|
|
609
1141
|
to_object,
|
|
@@ -611,59 +1143,23 @@ def _TuningJob_from_vertex(
|
|
|
611
1143
|
getv(from_object, ['tunedModelDisplayName']),
|
|
612
1144
|
)
|
|
613
1145
|
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
def _ListTuningJobsResponse_from_mldev(
|
|
618
|
-
api_client: BaseApiClient,
|
|
619
|
-
from_object: Union[dict, object],
|
|
620
|
-
parent_object: Optional[dict] = None,
|
|
621
|
-
) -> dict:
|
|
622
|
-
to_object: dict[str, Any] = {}
|
|
623
|
-
if getv(from_object, ['nextPageToken']) is not None:
|
|
624
|
-
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
|
625
|
-
|
|
626
|
-
if getv(from_object, ['tunedModels']) is not None:
|
|
627
|
-
setv(
|
|
628
|
-
to_object,
|
|
629
|
-
['tuning_jobs'],
|
|
630
|
-
[
|
|
631
|
-
_TuningJob_from_mldev(api_client, item, to_object)
|
|
632
|
-
for item in getv(from_object, ['tunedModels'])
|
|
633
|
-
],
|
|
634
|
-
)
|
|
1146
|
+
if getv(from_object, ['veoTuningSpec']) is not None:
|
|
1147
|
+
setv(to_object, ['veo_tuning_spec'], getv(from_object, ['veoTuningSpec']))
|
|
635
1148
|
|
|
636
1149
|
return to_object
|
|
637
1150
|
|
|
638
1151
|
|
|
639
|
-
def
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
) -> dict:
|
|
1152
|
+
def _TuningOperation_from_mldev(
|
|
1153
|
+
from_object: Union[dict[str, Any], object],
|
|
1154
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
1155
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
1156
|
+
) -> dict[str, Any]:
|
|
644
1157
|
to_object: dict[str, Any] = {}
|
|
645
|
-
if getv(from_object, ['
|
|
646
|
-
setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
|
|
647
|
-
|
|
648
|
-
if getv(from_object, ['tuningJobs']) is not None:
|
|
1158
|
+
if getv(from_object, ['sdkHttpResponse']) is not None:
|
|
649
1159
|
setv(
|
|
650
|
-
to_object,
|
|
651
|
-
['tuning_jobs'],
|
|
652
|
-
[
|
|
653
|
-
_TuningJob_from_vertex(api_client, item, to_object)
|
|
654
|
-
for item in getv(from_object, ['tuningJobs'])
|
|
655
|
-
],
|
|
1160
|
+
to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
|
|
656
1161
|
)
|
|
657
1162
|
|
|
658
|
-
return to_object
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
def _Operation_from_mldev(
|
|
662
|
-
api_client: BaseApiClient,
|
|
663
|
-
from_object: Union[dict, object],
|
|
664
|
-
parent_object: Optional[dict] = None,
|
|
665
|
-
) -> dict:
|
|
666
|
-
to_object: dict[str, Any] = {}
|
|
667
1163
|
if getv(from_object, ['name']) is not None:
|
|
668
1164
|
setv(to_object, ['name'], getv(from_object, ['name']))
|
|
669
1165
|
|
|
@@ -676,32 +1172,24 @@ def _Operation_from_mldev(
|
|
|
676
1172
|
if getv(from_object, ['error']) is not None:
|
|
677
1173
|
setv(to_object, ['error'], getv(from_object, ['error']))
|
|
678
1174
|
|
|
679
|
-
if getv(from_object, ['response']) is not None:
|
|
680
|
-
setv(to_object, ['response'], getv(from_object, ['response']))
|
|
681
|
-
|
|
682
1175
|
return to_object
|
|
683
1176
|
|
|
684
1177
|
|
|
685
|
-
def
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
) -> dict:
|
|
1178
|
+
def _TuningValidationDataset_to_vertex(
|
|
1179
|
+
from_object: Union[dict[str, Any], object],
|
|
1180
|
+
parent_object: Optional[dict[str, Any]] = None,
|
|
1181
|
+
root_object: Optional[Union[dict[str, Any], object]] = None,
|
|
1182
|
+
) -> dict[str, Any]:
|
|
690
1183
|
to_object: dict[str, Any] = {}
|
|
691
|
-
if getv(from_object, ['
|
|
692
|
-
setv(to_object, ['
|
|
693
|
-
|
|
694
|
-
if getv(from_object, ['metadata']) is not None:
|
|
695
|
-
setv(to_object, ['metadata'], getv(from_object, ['metadata']))
|
|
696
|
-
|
|
697
|
-
if getv(from_object, ['done']) is not None:
|
|
698
|
-
setv(to_object, ['done'], getv(from_object, ['done']))
|
|
699
|
-
|
|
700
|
-
if getv(from_object, ['error']) is not None:
|
|
701
|
-
setv(to_object, ['error'], getv(from_object, ['error']))
|
|
1184
|
+
if getv(from_object, ['gcs_uri']) is not None:
|
|
1185
|
+
setv(to_object, ['validationDatasetUri'], getv(from_object, ['gcs_uri']))
|
|
702
1186
|
|
|
703
|
-
if getv(from_object, ['
|
|
704
|
-
setv(
|
|
1187
|
+
if getv(from_object, ['vertex_dataset_resource']) is not None:
|
|
1188
|
+
setv(
|
|
1189
|
+
to_object,
|
|
1190
|
+
['validationDatasetUri'],
|
|
1191
|
+
getv(from_object, ['vertex_dataset_resource']),
|
|
1192
|
+
)
|
|
705
1193
|
|
|
706
1194
|
return to_object
|
|
707
1195
|
|
|
@@ -732,7 +1220,7 @@ class Tunings(_api_module.BaseModule):
|
|
|
732
1220
|
|
|
733
1221
|
if self._api_client.vertexai:
|
|
734
1222
|
request_dict = _GetTuningJobParameters_to_vertex(
|
|
735
|
-
|
|
1223
|
+
parameter_model, None, parameter_model
|
|
736
1224
|
)
|
|
737
1225
|
request_url_dict = request_dict.get('_url')
|
|
738
1226
|
if request_url_dict:
|
|
@@ -741,7 +1229,7 @@ class Tunings(_api_module.BaseModule):
|
|
|
741
1229
|
path = '{name}'
|
|
742
1230
|
else:
|
|
743
1231
|
request_dict = _GetTuningJobParameters_to_mldev(
|
|
744
|
-
|
|
1232
|
+
parameter_model, None, parameter_model
|
|
745
1233
|
)
|
|
746
1234
|
request_url_dict = request_dict.get('_url')
|
|
747
1235
|
if request_url_dict:
|
|
@@ -754,42 +1242,38 @@ class Tunings(_api_module.BaseModule):
|
|
|
754
1242
|
# TODO: remove the hack that pops config.
|
|
755
1243
|
request_dict.pop('config', None)
|
|
756
1244
|
|
|
757
|
-
http_options: Optional[types.
|
|
758
|
-
if
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
1245
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1246
|
+
if (
|
|
1247
|
+
parameter_model.config is not None
|
|
1248
|
+
and parameter_model.config.http_options is not None
|
|
1249
|
+
):
|
|
1250
|
+
http_options = parameter_model.config.http_options
|
|
762
1251
|
|
|
763
1252
|
request_dict = _common.convert_to_dict(request_dict)
|
|
764
1253
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
765
1254
|
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
)
|
|
1255
|
+
response = self._api_client.request('get', path, request_dict, http_options)
|
|
1256
|
+
|
|
1257
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
769
1258
|
|
|
770
1259
|
if self._api_client.vertexai:
|
|
771
|
-
response_dict = _TuningJob_from_vertex(
|
|
772
|
-
|
|
773
|
-
|
|
1260
|
+
response_dict = _TuningJob_from_vertex(response_dict)
|
|
1261
|
+
|
|
1262
|
+
if not self._api_client.vertexai:
|
|
1263
|
+
response_dict = _TuningJob_from_mldev(response_dict)
|
|
774
1264
|
|
|
775
1265
|
return_value = types.TuningJob._from_response(
|
|
776
1266
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
777
1267
|
)
|
|
1268
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1269
|
+
headers=response.headers
|
|
1270
|
+
)
|
|
778
1271
|
self._api_client._verify_response(return_value)
|
|
779
1272
|
return return_value
|
|
780
1273
|
|
|
781
1274
|
def _list(
|
|
782
1275
|
self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
|
|
783
1276
|
) -> types.ListTuningJobsResponse:
|
|
784
|
-
"""Lists `TuningJob` objects.
|
|
785
|
-
|
|
786
|
-
Args:
|
|
787
|
-
config: The configuration for the list request.
|
|
788
|
-
|
|
789
|
-
Returns:
|
|
790
|
-
A list of `TuningJob` objects.
|
|
791
|
-
"""
|
|
792
|
-
|
|
793
1277
|
parameter_model = types._ListTuningJobsParameters(
|
|
794
1278
|
config=config,
|
|
795
1279
|
)
|
|
@@ -798,7 +1282,7 @@ class Tunings(_api_module.BaseModule):
|
|
|
798
1282
|
|
|
799
1283
|
if self._api_client.vertexai:
|
|
800
1284
|
request_dict = _ListTuningJobsParameters_to_vertex(
|
|
801
|
-
|
|
1285
|
+
parameter_model, None, parameter_model
|
|
802
1286
|
)
|
|
803
1287
|
request_url_dict = request_dict.get('_url')
|
|
804
1288
|
if request_url_dict:
|
|
@@ -807,7 +1291,7 @@ class Tunings(_api_module.BaseModule):
|
|
|
807
1291
|
path = 'tuningJobs'
|
|
808
1292
|
else:
|
|
809
1293
|
request_dict = _ListTuningJobsParameters_to_mldev(
|
|
810
|
-
|
|
1294
|
+
parameter_model, None, parameter_model
|
|
811
1295
|
)
|
|
812
1296
|
request_url_dict = request_dict.get('_url')
|
|
813
1297
|
if request_url_dict:
|
|
@@ -820,42 +1304,118 @@ class Tunings(_api_module.BaseModule):
|
|
|
820
1304
|
# TODO: remove the hack that pops config.
|
|
821
1305
|
request_dict.pop('config', None)
|
|
822
1306
|
|
|
823
|
-
http_options: Optional[types.
|
|
824
|
-
if
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
1307
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1308
|
+
if (
|
|
1309
|
+
parameter_model.config is not None
|
|
1310
|
+
and parameter_model.config.http_options is not None
|
|
1311
|
+
):
|
|
1312
|
+
http_options = parameter_model.config.http_options
|
|
828
1313
|
|
|
829
1314
|
request_dict = _common.convert_to_dict(request_dict)
|
|
830
1315
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
831
1316
|
|
|
832
|
-
|
|
833
|
-
|
|
1317
|
+
response = self._api_client.request('get', path, request_dict, http_options)
|
|
1318
|
+
|
|
1319
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1320
|
+
|
|
1321
|
+
if self._api_client.vertexai:
|
|
1322
|
+
response_dict = _ListTuningJobsResponse_from_vertex(response_dict)
|
|
1323
|
+
|
|
1324
|
+
if not self._api_client.vertexai:
|
|
1325
|
+
response_dict = _ListTuningJobsResponse_from_mldev(response_dict)
|
|
1326
|
+
|
|
1327
|
+
return_value = types.ListTuningJobsResponse._from_response(
|
|
1328
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
|
1329
|
+
)
|
|
1330
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1331
|
+
headers=response.headers
|
|
1332
|
+
)
|
|
1333
|
+
self._api_client._verify_response(return_value)
|
|
1334
|
+
return return_value
|
|
1335
|
+
|
|
1336
|
+
def cancel(
|
|
1337
|
+
self,
|
|
1338
|
+
*,
|
|
1339
|
+
name: str,
|
|
1340
|
+
config: Optional[types.CancelTuningJobConfigOrDict] = None,
|
|
1341
|
+
) -> types.CancelTuningJobResponse:
|
|
1342
|
+
"""Cancels a tuning job.
|
|
1343
|
+
|
|
1344
|
+
Args:
|
|
1345
|
+
name (str): TuningJob resource name.
|
|
1346
|
+
"""
|
|
1347
|
+
|
|
1348
|
+
parameter_model = types._CancelTuningJobParameters(
|
|
1349
|
+
name=name,
|
|
1350
|
+
config=config,
|
|
834
1351
|
)
|
|
835
1352
|
|
|
1353
|
+
request_url_dict: Optional[dict[str, str]]
|
|
1354
|
+
|
|
836
1355
|
if self._api_client.vertexai:
|
|
837
|
-
|
|
838
|
-
|
|
1356
|
+
request_dict = _CancelTuningJobParameters_to_vertex(
|
|
1357
|
+
parameter_model, None, parameter_model
|
|
839
1358
|
)
|
|
1359
|
+
request_url_dict = request_dict.get('_url')
|
|
1360
|
+
if request_url_dict:
|
|
1361
|
+
path = '{name}:cancel'.format_map(request_url_dict)
|
|
1362
|
+
else:
|
|
1363
|
+
path = '{name}:cancel'
|
|
840
1364
|
else:
|
|
841
|
-
|
|
842
|
-
|
|
1365
|
+
request_dict = _CancelTuningJobParameters_to_mldev(
|
|
1366
|
+
parameter_model, None, parameter_model
|
|
843
1367
|
)
|
|
1368
|
+
request_url_dict = request_dict.get('_url')
|
|
1369
|
+
if request_url_dict:
|
|
1370
|
+
path = '{name}:cancel'.format_map(request_url_dict)
|
|
1371
|
+
else:
|
|
1372
|
+
path = '{name}:cancel'
|
|
1373
|
+
query_params = request_dict.get('_query')
|
|
1374
|
+
if query_params:
|
|
1375
|
+
path = f'{path}?{urlencode(query_params)}'
|
|
1376
|
+
# TODO: remove the hack that pops config.
|
|
1377
|
+
request_dict.pop('config', None)
|
|
844
1378
|
|
|
845
|
-
|
|
1379
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1380
|
+
if (
|
|
1381
|
+
parameter_model.config is not None
|
|
1382
|
+
and parameter_model.config.http_options is not None
|
|
1383
|
+
):
|
|
1384
|
+
http_options = parameter_model.config.http_options
|
|
1385
|
+
|
|
1386
|
+
request_dict = _common.convert_to_dict(request_dict)
|
|
1387
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
|
1388
|
+
|
|
1389
|
+
response = self._api_client.request(
|
|
1390
|
+
'post', path, request_dict, http_options
|
|
1391
|
+
)
|
|
1392
|
+
|
|
1393
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1394
|
+
|
|
1395
|
+
if self._api_client.vertexai:
|
|
1396
|
+
response_dict = _CancelTuningJobResponse_from_vertex(response_dict)
|
|
1397
|
+
|
|
1398
|
+
if not self._api_client.vertexai:
|
|
1399
|
+
response_dict = _CancelTuningJobResponse_from_mldev(response_dict)
|
|
1400
|
+
|
|
1401
|
+
return_value = types.CancelTuningJobResponse._from_response(
|
|
846
1402
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
847
1403
|
)
|
|
1404
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1405
|
+
headers=response.headers
|
|
1406
|
+
)
|
|
848
1407
|
self._api_client._verify_response(return_value)
|
|
849
1408
|
return return_value
|
|
850
1409
|
|
|
851
1410
|
def _tune(
|
|
852
1411
|
self,
|
|
853
1412
|
*,
|
|
854
|
-
base_model: str,
|
|
1413
|
+
base_model: Optional[str] = None,
|
|
1414
|
+
pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
|
|
855
1415
|
training_dataset: types.TuningDatasetOrDict,
|
|
856
1416
|
config: Optional[types.CreateTuningJobConfigOrDict] = None,
|
|
857
1417
|
) -> types.TuningJob:
|
|
858
|
-
"""Creates a
|
|
1418
|
+
"""Creates a tuning job and returns the TuningJob object.
|
|
859
1419
|
|
|
860
1420
|
Args:
|
|
861
1421
|
base_model: The name of the model to tune.
|
|
@@ -866,8 +1426,9 @@ class Tunings(_api_module.BaseModule):
|
|
|
866
1426
|
A TuningJob object.
|
|
867
1427
|
"""
|
|
868
1428
|
|
|
869
|
-
parameter_model = types.
|
|
1429
|
+
parameter_model = types._CreateTuningJobParametersPrivate(
|
|
870
1430
|
base_model=base_model,
|
|
1431
|
+
pre_tuned_model=pre_tuned_model,
|
|
871
1432
|
training_dataset=training_dataset,
|
|
872
1433
|
config=config,
|
|
873
1434
|
)
|
|
@@ -876,8 +1437,8 @@ class Tunings(_api_module.BaseModule):
|
|
|
876
1437
|
if not self._api_client.vertexai:
|
|
877
1438
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
|
878
1439
|
else:
|
|
879
|
-
request_dict =
|
|
880
|
-
|
|
1440
|
+
request_dict = _CreateTuningJobParametersPrivate_to_vertex(
|
|
1441
|
+
parameter_model, None, parameter_model
|
|
881
1442
|
)
|
|
882
1443
|
request_url_dict = request_dict.get('_url')
|
|
883
1444
|
if request_url_dict:
|
|
@@ -891,38 +1452,43 @@ class Tunings(_api_module.BaseModule):
|
|
|
891
1452
|
# TODO: remove the hack that pops config.
|
|
892
1453
|
request_dict.pop('config', None)
|
|
893
1454
|
|
|
894
|
-
http_options: Optional[types.
|
|
895
|
-
if
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
1455
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1456
|
+
if (
|
|
1457
|
+
parameter_model.config is not None
|
|
1458
|
+
and parameter_model.config.http_options is not None
|
|
1459
|
+
):
|
|
1460
|
+
http_options = parameter_model.config.http_options
|
|
899
1461
|
|
|
900
1462
|
request_dict = _common.convert_to_dict(request_dict)
|
|
901
1463
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
902
1464
|
|
|
903
|
-
|
|
1465
|
+
response = self._api_client.request(
|
|
904
1466
|
'post', path, request_dict, http_options
|
|
905
1467
|
)
|
|
906
1468
|
|
|
1469
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1470
|
+
|
|
907
1471
|
if self._api_client.vertexai:
|
|
908
|
-
response_dict = _TuningJob_from_vertex(
|
|
909
|
-
else:
|
|
910
|
-
response_dict = _TuningJob_from_mldev(self._api_client, response_dict)
|
|
1472
|
+
response_dict = _TuningJob_from_vertex(response_dict)
|
|
911
1473
|
|
|
912
1474
|
return_value = types.TuningJob._from_response(
|
|
913
1475
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
914
1476
|
)
|
|
1477
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1478
|
+
headers=response.headers
|
|
1479
|
+
)
|
|
915
1480
|
self._api_client._verify_response(return_value)
|
|
916
1481
|
return return_value
|
|
917
1482
|
|
|
918
1483
|
def _tune_mldev(
|
|
919
1484
|
self,
|
|
920
1485
|
*,
|
|
921
|
-
base_model: str,
|
|
1486
|
+
base_model: Optional[str] = None,
|
|
1487
|
+
pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
|
|
922
1488
|
training_dataset: types.TuningDatasetOrDict,
|
|
923
1489
|
config: Optional[types.CreateTuningJobConfigOrDict] = None,
|
|
924
|
-
) -> types.
|
|
925
|
-
"""Creates a
|
|
1490
|
+
) -> types.TuningOperation:
|
|
1491
|
+
"""Creates a tuning job and returns the TuningJob object.
|
|
926
1492
|
|
|
927
1493
|
Args:
|
|
928
1494
|
base_model: The name of the model to tune.
|
|
@@ -933,18 +1499,21 @@ class Tunings(_api_module.BaseModule):
|
|
|
933
1499
|
A TuningJob operation.
|
|
934
1500
|
"""
|
|
935
1501
|
|
|
936
|
-
parameter_model = types.
|
|
1502
|
+
parameter_model = types._CreateTuningJobParametersPrivate(
|
|
937
1503
|
base_model=base_model,
|
|
1504
|
+
pre_tuned_model=pre_tuned_model,
|
|
938
1505
|
training_dataset=training_dataset,
|
|
939
1506
|
config=config,
|
|
940
1507
|
)
|
|
941
1508
|
|
|
942
1509
|
request_url_dict: Optional[dict[str, str]]
|
|
943
1510
|
if self._api_client.vertexai:
|
|
944
|
-
raise ValueError(
|
|
1511
|
+
raise ValueError(
|
|
1512
|
+
'This method is only supported in the Gemini Developer client.'
|
|
1513
|
+
)
|
|
945
1514
|
else:
|
|
946
|
-
request_dict =
|
|
947
|
-
|
|
1515
|
+
request_dict = _CreateTuningJobParametersPrivate_to_mldev(
|
|
1516
|
+
parameter_model, None, parameter_model
|
|
948
1517
|
)
|
|
949
1518
|
request_url_dict = request_dict.get('_url')
|
|
950
1519
|
if request_url_dict:
|
|
@@ -958,36 +1527,57 @@ class Tunings(_api_module.BaseModule):
|
|
|
958
1527
|
# TODO: remove the hack that pops config.
|
|
959
1528
|
request_dict.pop('config', None)
|
|
960
1529
|
|
|
961
|
-
http_options: Optional[types.
|
|
962
|
-
if
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
1530
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1531
|
+
if (
|
|
1532
|
+
parameter_model.config is not None
|
|
1533
|
+
and parameter_model.config.http_options is not None
|
|
1534
|
+
):
|
|
1535
|
+
http_options = parameter_model.config.http_options
|
|
966
1536
|
|
|
967
1537
|
request_dict = _common.convert_to_dict(request_dict)
|
|
968
1538
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
969
1539
|
|
|
970
|
-
|
|
1540
|
+
response = self._api_client.request(
|
|
971
1541
|
'post', path, request_dict, http_options
|
|
972
1542
|
)
|
|
973
1543
|
|
|
974
|
-
if
|
|
975
|
-
response_dict = _Operation_from_vertex(self._api_client, response_dict)
|
|
976
|
-
else:
|
|
977
|
-
response_dict = _Operation_from_mldev(self._api_client, response_dict)
|
|
1544
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
978
1545
|
|
|
979
|
-
|
|
1546
|
+
if not self._api_client.vertexai:
|
|
1547
|
+
response_dict = _TuningOperation_from_mldev(response_dict)
|
|
1548
|
+
|
|
1549
|
+
return_value = types.TuningOperation._from_response(
|
|
980
1550
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
981
1551
|
)
|
|
1552
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1553
|
+
headers=response.headers
|
|
1554
|
+
)
|
|
982
1555
|
self._api_client._verify_response(return_value)
|
|
983
1556
|
return return_value
|
|
984
1557
|
|
|
985
1558
|
def list(
|
|
986
1559
|
self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
|
|
987
1560
|
) -> Pager[types.TuningJob]:
|
|
1561
|
+
"""Lists `TuningJob` objects.
|
|
1562
|
+
|
|
1563
|
+
Args:
|
|
1564
|
+
config: The configuration for the list request.
|
|
1565
|
+
|
|
1566
|
+
Returns:
|
|
1567
|
+
A Pager object that contains one page of tuning jobs. When iterating over
|
|
1568
|
+
the pager, it automatically fetches the next page if there are more.
|
|
1569
|
+
|
|
1570
|
+
Usage:
|
|
1571
|
+
|
|
1572
|
+
.. code-block:: python
|
|
1573
|
+
for tuning_job in client.tunings.list():
|
|
1574
|
+
print(tuning_job.name)
|
|
1575
|
+
"""
|
|
1576
|
+
|
|
1577
|
+
list_request = self._list
|
|
988
1578
|
return Pager(
|
|
989
1579
|
'tuning_jobs',
|
|
990
|
-
|
|
1580
|
+
list_request,
|
|
991
1581
|
self._list(config=config),
|
|
992
1582
|
config,
|
|
993
1583
|
)
|
|
@@ -1022,11 +1612,55 @@ class Tunings(_api_module.BaseModule):
|
|
|
1022
1612
|
config: Optional[types.CreateTuningJobConfigOrDict] = None,
|
|
1023
1613
|
) -> types.TuningJob:
|
|
1024
1614
|
if self._api_client.vertexai:
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1615
|
+
if base_model.startswith('projects/'): # Pre-tuned model
|
|
1616
|
+
checkpoint_id = None
|
|
1617
|
+
if config:
|
|
1618
|
+
checkpoint_id = getattr(config, 'pre_tuned_model_checkpoint_id', None)
|
|
1619
|
+
pre_tuned_model = types.PreTunedModel(
|
|
1620
|
+
tuned_model_name=base_model, checkpoint_id=checkpoint_id
|
|
1621
|
+
)
|
|
1622
|
+
tuning_job = self._tune(
|
|
1623
|
+
pre_tuned_model=pre_tuned_model,
|
|
1624
|
+
training_dataset=training_dataset,
|
|
1625
|
+
config=config,
|
|
1626
|
+
)
|
|
1627
|
+
else:
|
|
1628
|
+
validated_evaluation_config: Optional[types.EvaluationConfig] = None
|
|
1629
|
+
if (
|
|
1630
|
+
config is not None
|
|
1631
|
+
and getattr(config, 'evaluation_config', None) is not None
|
|
1632
|
+
):
|
|
1633
|
+
evaluation_config = getattr(config, 'evaluation_config')
|
|
1634
|
+
if isinstance(evaluation_config, dict):
|
|
1635
|
+
evaluation_config = types.EvaluationConfig(**evaluation_config)
|
|
1636
|
+
if (
|
|
1637
|
+
not evaluation_config.metrics
|
|
1638
|
+
or not evaluation_config.output_config
|
|
1639
|
+
):
|
|
1640
|
+
raise ValueError(
|
|
1641
|
+
'Evaluation config must have at least one metric and an output'
|
|
1642
|
+
' config.'
|
|
1643
|
+
)
|
|
1644
|
+
for i in range(len(evaluation_config.metrics)):
|
|
1645
|
+
if isinstance(evaluation_config.metrics[i], dict):
|
|
1646
|
+
evaluation_config.metrics[i] = types.Metric.model_validate(
|
|
1647
|
+
evaluation_config.metrics[i]
|
|
1648
|
+
)
|
|
1649
|
+
if isinstance(config, dict):
|
|
1650
|
+
config['evaluation_config'] = evaluation_config
|
|
1651
|
+
else:
|
|
1652
|
+
config.evaluation_config = evaluation_config
|
|
1653
|
+
validated_evaluation_config = evaluation_config
|
|
1654
|
+
tuning_job = self._tune(
|
|
1655
|
+
base_model=base_model,
|
|
1656
|
+
training_dataset=training_dataset,
|
|
1657
|
+
config=config,
|
|
1658
|
+
)
|
|
1659
|
+
if (
|
|
1660
|
+
config is not None
|
|
1661
|
+
and getattr(config, 'evaluation_config', None) is not None
|
|
1662
|
+
):
|
|
1663
|
+
tuning_job.evaluation_config = validated_evaluation_config
|
|
1030
1664
|
else:
|
|
1031
1665
|
operation = self._tune_mldev(
|
|
1032
1666
|
base_model=base_model,
|
|
@@ -1076,7 +1710,7 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1076
1710
|
|
|
1077
1711
|
if self._api_client.vertexai:
|
|
1078
1712
|
request_dict = _GetTuningJobParameters_to_vertex(
|
|
1079
|
-
|
|
1713
|
+
parameter_model, None, parameter_model
|
|
1080
1714
|
)
|
|
1081
1715
|
request_url_dict = request_dict.get('_url')
|
|
1082
1716
|
if request_url_dict:
|
|
@@ -1085,7 +1719,7 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1085
1719
|
path = '{name}'
|
|
1086
1720
|
else:
|
|
1087
1721
|
request_dict = _GetTuningJobParameters_to_mldev(
|
|
1088
|
-
|
|
1722
|
+
parameter_model, None, parameter_model
|
|
1089
1723
|
)
|
|
1090
1724
|
request_url_dict = request_dict.get('_url')
|
|
1091
1725
|
if request_url_dict:
|
|
@@ -1098,42 +1732,40 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1098
1732
|
# TODO: remove the hack that pops config.
|
|
1099
1733
|
request_dict.pop('config', None)
|
|
1100
1734
|
|
|
1101
|
-
http_options: Optional[types.
|
|
1102
|
-
if
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1735
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1736
|
+
if (
|
|
1737
|
+
parameter_model.config is not None
|
|
1738
|
+
and parameter_model.config.http_options is not None
|
|
1739
|
+
):
|
|
1740
|
+
http_options = parameter_model.config.http_options
|
|
1106
1741
|
|
|
1107
1742
|
request_dict = _common.convert_to_dict(request_dict)
|
|
1108
1743
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
1109
1744
|
|
|
1110
|
-
|
|
1745
|
+
response = await self._api_client.async_request(
|
|
1111
1746
|
'get', path, request_dict, http_options
|
|
1112
1747
|
)
|
|
1113
1748
|
|
|
1749
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1750
|
+
|
|
1114
1751
|
if self._api_client.vertexai:
|
|
1115
|
-
response_dict = _TuningJob_from_vertex(
|
|
1116
|
-
|
|
1117
|
-
|
|
1752
|
+
response_dict = _TuningJob_from_vertex(response_dict)
|
|
1753
|
+
|
|
1754
|
+
if not self._api_client.vertexai:
|
|
1755
|
+
response_dict = _TuningJob_from_mldev(response_dict)
|
|
1118
1756
|
|
|
1119
1757
|
return_value = types.TuningJob._from_response(
|
|
1120
1758
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
1121
1759
|
)
|
|
1760
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1761
|
+
headers=response.headers
|
|
1762
|
+
)
|
|
1122
1763
|
self._api_client._verify_response(return_value)
|
|
1123
1764
|
return return_value
|
|
1124
1765
|
|
|
1125
1766
|
async def _list(
|
|
1126
1767
|
self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
|
|
1127
1768
|
) -> types.ListTuningJobsResponse:
|
|
1128
|
-
"""Lists `TuningJob` objects.
|
|
1129
|
-
|
|
1130
|
-
Args:
|
|
1131
|
-
config: The configuration for the list request.
|
|
1132
|
-
|
|
1133
|
-
Returns:
|
|
1134
|
-
A list of `TuningJob` objects.
|
|
1135
|
-
"""
|
|
1136
|
-
|
|
1137
1769
|
parameter_model = types._ListTuningJobsParameters(
|
|
1138
1770
|
config=config,
|
|
1139
1771
|
)
|
|
@@ -1142,7 +1774,7 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1142
1774
|
|
|
1143
1775
|
if self._api_client.vertexai:
|
|
1144
1776
|
request_dict = _ListTuningJobsParameters_to_vertex(
|
|
1145
|
-
|
|
1777
|
+
parameter_model, None, parameter_model
|
|
1146
1778
|
)
|
|
1147
1779
|
request_url_dict = request_dict.get('_url')
|
|
1148
1780
|
if request_url_dict:
|
|
@@ -1151,7 +1783,7 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1151
1783
|
path = 'tuningJobs'
|
|
1152
1784
|
else:
|
|
1153
1785
|
request_dict = _ListTuningJobsParameters_to_mldev(
|
|
1154
|
-
|
|
1786
|
+
parameter_model, None, parameter_model
|
|
1155
1787
|
)
|
|
1156
1788
|
request_url_dict = request_dict.get('_url')
|
|
1157
1789
|
if request_url_dict:
|
|
@@ -1164,42 +1796,120 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1164
1796
|
# TODO: remove the hack that pops config.
|
|
1165
1797
|
request_dict.pop('config', None)
|
|
1166
1798
|
|
|
1167
|
-
http_options: Optional[types.
|
|
1168
|
-
if
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1799
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1800
|
+
if (
|
|
1801
|
+
parameter_model.config is not None
|
|
1802
|
+
and parameter_model.config.http_options is not None
|
|
1803
|
+
):
|
|
1804
|
+
http_options = parameter_model.config.http_options
|
|
1172
1805
|
|
|
1173
1806
|
request_dict = _common.convert_to_dict(request_dict)
|
|
1174
1807
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
1175
1808
|
|
|
1176
|
-
|
|
1809
|
+
response = await self._api_client.async_request(
|
|
1177
1810
|
'get', path, request_dict, http_options
|
|
1178
1811
|
)
|
|
1179
1812
|
|
|
1813
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1814
|
+
|
|
1815
|
+
if self._api_client.vertexai:
|
|
1816
|
+
response_dict = _ListTuningJobsResponse_from_vertex(response_dict)
|
|
1817
|
+
|
|
1818
|
+
if not self._api_client.vertexai:
|
|
1819
|
+
response_dict = _ListTuningJobsResponse_from_mldev(response_dict)
|
|
1820
|
+
|
|
1821
|
+
return_value = types.ListTuningJobsResponse._from_response(
|
|
1822
|
+
response=response_dict, kwargs=parameter_model.model_dump()
|
|
1823
|
+
)
|
|
1824
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1825
|
+
headers=response.headers
|
|
1826
|
+
)
|
|
1827
|
+
self._api_client._verify_response(return_value)
|
|
1828
|
+
return return_value
|
|
1829
|
+
|
|
1830
|
+
async def cancel(
|
|
1831
|
+
self,
|
|
1832
|
+
*,
|
|
1833
|
+
name: str,
|
|
1834
|
+
config: Optional[types.CancelTuningJobConfigOrDict] = None,
|
|
1835
|
+
) -> types.CancelTuningJobResponse:
|
|
1836
|
+
"""Cancels a tuning job asynchronously.
|
|
1837
|
+
|
|
1838
|
+
Args:
|
|
1839
|
+
name (str): A TuningJob resource name.
|
|
1840
|
+
"""
|
|
1841
|
+
|
|
1842
|
+
parameter_model = types._CancelTuningJobParameters(
|
|
1843
|
+
name=name,
|
|
1844
|
+
config=config,
|
|
1845
|
+
)
|
|
1846
|
+
|
|
1847
|
+
request_url_dict: Optional[dict[str, str]]
|
|
1848
|
+
|
|
1180
1849
|
if self._api_client.vertexai:
|
|
1181
|
-
|
|
1182
|
-
|
|
1850
|
+
request_dict = _CancelTuningJobParameters_to_vertex(
|
|
1851
|
+
parameter_model, None, parameter_model
|
|
1183
1852
|
)
|
|
1853
|
+
request_url_dict = request_dict.get('_url')
|
|
1854
|
+
if request_url_dict:
|
|
1855
|
+
path = '{name}:cancel'.format_map(request_url_dict)
|
|
1856
|
+
else:
|
|
1857
|
+
path = '{name}:cancel'
|
|
1184
1858
|
else:
|
|
1185
|
-
|
|
1186
|
-
|
|
1859
|
+
request_dict = _CancelTuningJobParameters_to_mldev(
|
|
1860
|
+
parameter_model, None, parameter_model
|
|
1187
1861
|
)
|
|
1862
|
+
request_url_dict = request_dict.get('_url')
|
|
1863
|
+
if request_url_dict:
|
|
1864
|
+
path = '{name}:cancel'.format_map(request_url_dict)
|
|
1865
|
+
else:
|
|
1866
|
+
path = '{name}:cancel'
|
|
1867
|
+
query_params = request_dict.get('_query')
|
|
1868
|
+
if query_params:
|
|
1869
|
+
path = f'{path}?{urlencode(query_params)}'
|
|
1870
|
+
# TODO: remove the hack that pops config.
|
|
1871
|
+
request_dict.pop('config', None)
|
|
1188
1872
|
|
|
1189
|
-
|
|
1873
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1874
|
+
if (
|
|
1875
|
+
parameter_model.config is not None
|
|
1876
|
+
and parameter_model.config.http_options is not None
|
|
1877
|
+
):
|
|
1878
|
+
http_options = parameter_model.config.http_options
|
|
1879
|
+
|
|
1880
|
+
request_dict = _common.convert_to_dict(request_dict)
|
|
1881
|
+
request_dict = _common.encode_unserializable_types(request_dict)
|
|
1882
|
+
|
|
1883
|
+
response = await self._api_client.async_request(
|
|
1884
|
+
'post', path, request_dict, http_options
|
|
1885
|
+
)
|
|
1886
|
+
|
|
1887
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1888
|
+
|
|
1889
|
+
if self._api_client.vertexai:
|
|
1890
|
+
response_dict = _CancelTuningJobResponse_from_vertex(response_dict)
|
|
1891
|
+
|
|
1892
|
+
if not self._api_client.vertexai:
|
|
1893
|
+
response_dict = _CancelTuningJobResponse_from_mldev(response_dict)
|
|
1894
|
+
|
|
1895
|
+
return_value = types.CancelTuningJobResponse._from_response(
|
|
1190
1896
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
1191
1897
|
)
|
|
1898
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1899
|
+
headers=response.headers
|
|
1900
|
+
)
|
|
1192
1901
|
self._api_client._verify_response(return_value)
|
|
1193
1902
|
return return_value
|
|
1194
1903
|
|
|
1195
1904
|
async def _tune(
|
|
1196
1905
|
self,
|
|
1197
1906
|
*,
|
|
1198
|
-
base_model: str,
|
|
1907
|
+
base_model: Optional[str] = None,
|
|
1908
|
+
pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
|
|
1199
1909
|
training_dataset: types.TuningDatasetOrDict,
|
|
1200
1910
|
config: Optional[types.CreateTuningJobConfigOrDict] = None,
|
|
1201
1911
|
) -> types.TuningJob:
|
|
1202
|
-
"""Creates a
|
|
1912
|
+
"""Creates a tuning job and returns the TuningJob object.
|
|
1203
1913
|
|
|
1204
1914
|
Args:
|
|
1205
1915
|
base_model: The name of the model to tune.
|
|
@@ -1210,8 +1920,9 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1210
1920
|
A TuningJob object.
|
|
1211
1921
|
"""
|
|
1212
1922
|
|
|
1213
|
-
parameter_model = types.
|
|
1923
|
+
parameter_model = types._CreateTuningJobParametersPrivate(
|
|
1214
1924
|
base_model=base_model,
|
|
1925
|
+
pre_tuned_model=pre_tuned_model,
|
|
1215
1926
|
training_dataset=training_dataset,
|
|
1216
1927
|
config=config,
|
|
1217
1928
|
)
|
|
@@ -1220,8 +1931,8 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1220
1931
|
if not self._api_client.vertexai:
|
|
1221
1932
|
raise ValueError('This method is only supported in the Vertex AI client.')
|
|
1222
1933
|
else:
|
|
1223
|
-
request_dict =
|
|
1224
|
-
|
|
1934
|
+
request_dict = _CreateTuningJobParametersPrivate_to_vertex(
|
|
1935
|
+
parameter_model, None, parameter_model
|
|
1225
1936
|
)
|
|
1226
1937
|
request_url_dict = request_dict.get('_url')
|
|
1227
1938
|
if request_url_dict:
|
|
@@ -1235,38 +1946,43 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1235
1946
|
# TODO: remove the hack that pops config.
|
|
1236
1947
|
request_dict.pop('config', None)
|
|
1237
1948
|
|
|
1238
|
-
http_options: Optional[types.
|
|
1239
|
-
if
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1949
|
+
http_options: Optional[types.HttpOptions] = None
|
|
1950
|
+
if (
|
|
1951
|
+
parameter_model.config is not None
|
|
1952
|
+
and parameter_model.config.http_options is not None
|
|
1953
|
+
):
|
|
1954
|
+
http_options = parameter_model.config.http_options
|
|
1243
1955
|
|
|
1244
1956
|
request_dict = _common.convert_to_dict(request_dict)
|
|
1245
1957
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
1246
1958
|
|
|
1247
|
-
|
|
1959
|
+
response = await self._api_client.async_request(
|
|
1248
1960
|
'post', path, request_dict, http_options
|
|
1249
1961
|
)
|
|
1250
1962
|
|
|
1963
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
1964
|
+
|
|
1251
1965
|
if self._api_client.vertexai:
|
|
1252
|
-
response_dict = _TuningJob_from_vertex(
|
|
1253
|
-
else:
|
|
1254
|
-
response_dict = _TuningJob_from_mldev(self._api_client, response_dict)
|
|
1966
|
+
response_dict = _TuningJob_from_vertex(response_dict)
|
|
1255
1967
|
|
|
1256
1968
|
return_value = types.TuningJob._from_response(
|
|
1257
1969
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
1258
1970
|
)
|
|
1971
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
1972
|
+
headers=response.headers
|
|
1973
|
+
)
|
|
1259
1974
|
self._api_client._verify_response(return_value)
|
|
1260
1975
|
return return_value
|
|
1261
1976
|
|
|
1262
1977
|
async def _tune_mldev(
|
|
1263
1978
|
self,
|
|
1264
1979
|
*,
|
|
1265
|
-
base_model: str,
|
|
1980
|
+
base_model: Optional[str] = None,
|
|
1981
|
+
pre_tuned_model: Optional[types.PreTunedModelOrDict] = None,
|
|
1266
1982
|
training_dataset: types.TuningDatasetOrDict,
|
|
1267
1983
|
config: Optional[types.CreateTuningJobConfigOrDict] = None,
|
|
1268
|
-
) -> types.
|
|
1269
|
-
"""Creates a
|
|
1984
|
+
) -> types.TuningOperation:
|
|
1985
|
+
"""Creates a tuning job and returns the TuningJob object.
|
|
1270
1986
|
|
|
1271
1987
|
Args:
|
|
1272
1988
|
base_model: The name of the model to tune.
|
|
@@ -1277,18 +1993,21 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1277
1993
|
A TuningJob operation.
|
|
1278
1994
|
"""
|
|
1279
1995
|
|
|
1280
|
-
parameter_model = types.
|
|
1996
|
+
parameter_model = types._CreateTuningJobParametersPrivate(
|
|
1281
1997
|
base_model=base_model,
|
|
1998
|
+
pre_tuned_model=pre_tuned_model,
|
|
1282
1999
|
training_dataset=training_dataset,
|
|
1283
2000
|
config=config,
|
|
1284
2001
|
)
|
|
1285
2002
|
|
|
1286
2003
|
request_url_dict: Optional[dict[str, str]]
|
|
1287
2004
|
if self._api_client.vertexai:
|
|
1288
|
-
raise ValueError(
|
|
2005
|
+
raise ValueError(
|
|
2006
|
+
'This method is only supported in the Gemini Developer client.'
|
|
2007
|
+
)
|
|
1289
2008
|
else:
|
|
1290
|
-
request_dict =
|
|
1291
|
-
|
|
2009
|
+
request_dict = _CreateTuningJobParametersPrivate_to_mldev(
|
|
2010
|
+
parameter_model, None, parameter_model
|
|
1292
2011
|
)
|
|
1293
2012
|
request_url_dict = request_dict.get('_url')
|
|
1294
2013
|
if request_url_dict:
|
|
@@ -1302,36 +2021,57 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1302
2021
|
# TODO: remove the hack that pops config.
|
|
1303
2022
|
request_dict.pop('config', None)
|
|
1304
2023
|
|
|
1305
|
-
http_options: Optional[types.
|
|
1306
|
-
if
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
2024
|
+
http_options: Optional[types.HttpOptions] = None
|
|
2025
|
+
if (
|
|
2026
|
+
parameter_model.config is not None
|
|
2027
|
+
and parameter_model.config.http_options is not None
|
|
2028
|
+
):
|
|
2029
|
+
http_options = parameter_model.config.http_options
|
|
1310
2030
|
|
|
1311
2031
|
request_dict = _common.convert_to_dict(request_dict)
|
|
1312
2032
|
request_dict = _common.encode_unserializable_types(request_dict)
|
|
1313
2033
|
|
|
1314
|
-
|
|
2034
|
+
response = await self._api_client.async_request(
|
|
1315
2035
|
'post', path, request_dict, http_options
|
|
1316
2036
|
)
|
|
1317
2037
|
|
|
1318
|
-
if
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
response_dict =
|
|
2038
|
+
response_dict = {} if not response.body else json.loads(response.body)
|
|
2039
|
+
|
|
2040
|
+
if not self._api_client.vertexai:
|
|
2041
|
+
response_dict = _TuningOperation_from_mldev(response_dict)
|
|
1322
2042
|
|
|
1323
|
-
return_value = types.
|
|
2043
|
+
return_value = types.TuningOperation._from_response(
|
|
1324
2044
|
response=response_dict, kwargs=parameter_model.model_dump()
|
|
1325
2045
|
)
|
|
2046
|
+
return_value.sdk_http_response = types.HttpResponse(
|
|
2047
|
+
headers=response.headers
|
|
2048
|
+
)
|
|
1326
2049
|
self._api_client._verify_response(return_value)
|
|
1327
2050
|
return return_value
|
|
1328
2051
|
|
|
1329
2052
|
async def list(
|
|
1330
2053
|
self, *, config: Optional[types.ListTuningJobsConfigOrDict] = None
|
|
1331
2054
|
) -> AsyncPager[types.TuningJob]:
|
|
2055
|
+
"""Lists `TuningJob` objects asynchronously.
|
|
2056
|
+
|
|
2057
|
+
Args:
|
|
2058
|
+
config: The configuration for the list request.
|
|
2059
|
+
|
|
2060
|
+
Returns:
|
|
2061
|
+
A Pager object that contains one page of tuning jobs. When iterating over
|
|
2062
|
+
the pager, it automatically fetches the next page if there are more.
|
|
2063
|
+
|
|
2064
|
+
Usage:
|
|
2065
|
+
|
|
2066
|
+
.. code-block:: python
|
|
2067
|
+
async for tuning_job in await client.aio.tunings.list():
|
|
2068
|
+
print(tuning_job.name)
|
|
2069
|
+
"""
|
|
2070
|
+
|
|
2071
|
+
list_request = self._list
|
|
1332
2072
|
return AsyncPager(
|
|
1333
2073
|
'tuning_jobs',
|
|
1334
|
-
|
|
2074
|
+
list_request,
|
|
1335
2075
|
await self._list(config=config),
|
|
1336
2076
|
config,
|
|
1337
2077
|
)
|
|
@@ -1366,11 +2106,49 @@ class AsyncTunings(_api_module.BaseModule):
|
|
|
1366
2106
|
config: Optional[types.CreateTuningJobConfigOrDict] = None,
|
|
1367
2107
|
) -> types.TuningJob:
|
|
1368
2108
|
if self._api_client.vertexai:
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
2109
|
+
if base_model.startswith('projects/'): # Pre-tuned model
|
|
2110
|
+
checkpoint_id = None
|
|
2111
|
+
if config:
|
|
2112
|
+
checkpoint_id = getattr(config, 'pre_tuned_model_checkpoint_id', None)
|
|
2113
|
+
pre_tuned_model = types.PreTunedModel(
|
|
2114
|
+
tuned_model_name=base_model, checkpoint_id=checkpoint_id
|
|
2115
|
+
)
|
|
2116
|
+
|
|
2117
|
+
tuning_job = await self._tune(
|
|
2118
|
+
pre_tuned_model=pre_tuned_model,
|
|
2119
|
+
training_dataset=training_dataset,
|
|
2120
|
+
config=config,
|
|
2121
|
+
)
|
|
2122
|
+
else:
|
|
2123
|
+
if (
|
|
2124
|
+
config is not None
|
|
2125
|
+
and getattr(config, 'evaluation_config', None) is not None
|
|
2126
|
+
):
|
|
2127
|
+
evaluation_config = getattr(config, 'evaluation_config')
|
|
2128
|
+
if isinstance(evaluation_config, dict):
|
|
2129
|
+
evaluation_config = types.EvaluationConfig(**evaluation_config)
|
|
2130
|
+
if (
|
|
2131
|
+
not evaluation_config.metrics
|
|
2132
|
+
or not evaluation_config.output_config
|
|
2133
|
+
):
|
|
2134
|
+
raise ValueError(
|
|
2135
|
+
'Evaluation config must have at least one metric and an output'
|
|
2136
|
+
' config.'
|
|
2137
|
+
)
|
|
2138
|
+
for i in range(len(evaluation_config.metrics)):
|
|
2139
|
+
if isinstance(evaluation_config.metrics[i], dict):
|
|
2140
|
+
evaluation_config.metrics[i] = types.Metric.model_validate(
|
|
2141
|
+
evaluation_config.metrics[i]
|
|
2142
|
+
)
|
|
2143
|
+
if isinstance(config, dict):
|
|
2144
|
+
config['evaluation_config'] = evaluation_config
|
|
2145
|
+
else:
|
|
2146
|
+
config.evaluation_config = evaluation_config
|
|
2147
|
+
tuning_job = await self._tune(
|
|
2148
|
+
base_model=base_model,
|
|
2149
|
+
training_dataset=training_dataset,
|
|
2150
|
+
config=config,
|
|
2151
|
+
)
|
|
1374
2152
|
else:
|
|
1375
2153
|
operation = await self._tune_mldev(
|
|
1376
2154
|
base_model=base_model,
|
|
@@ -1400,7 +2178,7 @@ class _IpythonUtils:
|
|
|
1400
2178
|
displayed_experiments: set[str] = set()
|
|
1401
2179
|
|
|
1402
2180
|
@staticmethod
|
|
1403
|
-
def _get_ipython_shell_name() -> str:
|
|
2181
|
+
def _get_ipython_shell_name() -> Union[str, Any]:
|
|
1404
2182
|
import sys
|
|
1405
2183
|
|
|
1406
2184
|
if 'IPython' in sys.modules:
|
|
@@ -1522,7 +2300,7 @@ class _IpythonUtils:
|
|
|
1522
2300
|
</script>
|
|
1523
2301
|
"""
|
|
1524
2302
|
|
|
1525
|
-
from IPython.
|
|
2303
|
+
from IPython.display import display
|
|
1526
2304
|
from IPython.display import HTML
|
|
1527
2305
|
|
|
1528
2306
|
display(HTML(html))
|