google-genai 1.7.0__py3-none-any.whl → 1.53.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. google/genai/__init__.py +4 -2
  2. google/genai/_adapters.py +55 -0
  3. google/genai/_api_client.py +1301 -299
  4. google/genai/_api_module.py +1 -1
  5. google/genai/_automatic_function_calling_util.py +54 -33
  6. google/genai/_base_transformers.py +26 -0
  7. google/genai/_base_url.py +50 -0
  8. google/genai/_common.py +560 -59
  9. google/genai/_extra_utils.py +371 -38
  10. google/genai/_live_converters.py +1467 -0
  11. google/genai/_local_tokenizer_loader.py +214 -0
  12. google/genai/_mcp_utils.py +117 -0
  13. google/genai/_operations_converters.py +394 -0
  14. google/genai/_replay_api_client.py +204 -92
  15. google/genai/_test_api_client.py +1 -1
  16. google/genai/_tokens_converters.py +520 -0
  17. google/genai/_transformers.py +633 -233
  18. google/genai/batches.py +1733 -538
  19. google/genai/caches.py +678 -1012
  20. google/genai/chats.py +48 -38
  21. google/genai/client.py +142 -15
  22. google/genai/documents.py +532 -0
  23. google/genai/errors.py +141 -35
  24. google/genai/file_search_stores.py +1296 -0
  25. google/genai/files.py +312 -744
  26. google/genai/live.py +617 -367
  27. google/genai/live_music.py +197 -0
  28. google/genai/local_tokenizer.py +395 -0
  29. google/genai/models.py +3598 -3116
  30. google/genai/operations.py +201 -362
  31. google/genai/pagers.py +23 -7
  32. google/genai/py.typed +1 -0
  33. google/genai/tokens.py +362 -0
  34. google/genai/tunings.py +1274 -496
  35. google/genai/types.py +14535 -5454
  36. google/genai/version.py +2 -2
  37. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/METADATA +736 -234
  38. google_genai-1.53.0.dist-info/RECORD +41 -0
  39. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/WHEEL +1 -1
  40. google_genai-1.7.0.dist-info/RECORD +0 -27
  41. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info/licenses}/LICENSE +0 -0
  42. {google_genai-1.7.0.dist-info → google_genai-1.53.0.dist-info}/top_level.txt +0 -0
google/genai/batches.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2024 Google LLC
1
+ # Copyright 2025 Google LLC
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -15,9 +15,11 @@
15
15
 
16
16
  # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
17
 
18
+ import json
18
19
  import logging
19
20
  from typing import Any, Optional, Union
20
21
  from urllib.parse import urlencode
22
+
21
23
  from . import _api_module
22
24
  from . import _common
23
25
  from . import _extra_utils
@@ -25,75 +27,83 @@ from . import _transformers as t
25
27
  from . import types
26
28
  from ._api_client import BaseApiClient
27
29
  from ._common import get_value_by_path as getv
30
+ from ._common import move_value_by_path as movev
28
31
  from ._common import set_value_by_path as setv
29
32
  from .pagers import AsyncPager, Pager
30
33
 
34
+
31
35
  logger = logging.getLogger('google_genai.batches')
32
36
 
33
37
 
34
- def _BatchJobSource_to_mldev(
35
- api_client: BaseApiClient,
36
- from_object: Union[dict, object],
37
- parent_object: Optional[dict] = None,
38
- ) -> dict:
38
+ def _BatchJobDestination_from_mldev(
39
+ from_object: Union[dict[str, Any], object],
40
+ parent_object: Optional[dict[str, Any]] = None,
41
+ ) -> dict[str, Any]:
39
42
  to_object: dict[str, Any] = {}
40
- if getv(from_object, ['format']) is not None:
41
- raise ValueError('format parameter is not supported in Gemini API.')
42
-
43
- if getv(from_object, ['gcs_uri']) is not None:
44
- raise ValueError('gcs_uri parameter is not supported in Gemini API.')
45
-
46
- if getv(from_object, ['bigquery_uri']) is not None:
47
- raise ValueError('bigquery_uri parameter is not supported in Gemini API.')
48
-
49
- return to_object
50
-
51
43
 
52
- def _BatchJobSource_to_vertex(
53
- api_client: BaseApiClient,
54
- from_object: Union[dict, object],
55
- parent_object: Optional[dict] = None,
56
- ) -> dict:
57
- to_object: dict[str, Any] = {}
58
- if getv(from_object, ['format']) is not None:
59
- setv(to_object, ['instancesFormat'], getv(from_object, ['format']))
44
+ if getv(from_object, ['responsesFile']) is not None:
45
+ setv(to_object, ['file_name'], getv(from_object, ['responsesFile']))
60
46
 
61
- if getv(from_object, ['gcs_uri']) is not None:
62
- setv(to_object, ['gcsSource', 'uris'], getv(from_object, ['gcs_uri']))
47
+ if getv(from_object, ['inlinedResponses', 'inlinedResponses']) is not None:
48
+ setv(
49
+ to_object,
50
+ ['inlined_responses'],
51
+ [
52
+ _InlinedResponse_from_mldev(item, to_object)
53
+ for item in getv(
54
+ from_object, ['inlinedResponses', 'inlinedResponses']
55
+ )
56
+ ],
57
+ )
63
58
 
64
- if getv(from_object, ['bigquery_uri']) is not None:
59
+ if (
60
+ getv(from_object, ['inlinedEmbedContentResponses', 'inlinedResponses'])
61
+ is not None
62
+ ):
65
63
  setv(
66
64
  to_object,
67
- ['bigquerySource', 'inputUri'],
68
- getv(from_object, ['bigquery_uri']),
65
+ ['inlined_embed_content_responses'],
66
+ [
67
+ item
68
+ for item in getv(
69
+ from_object,
70
+ ['inlinedEmbedContentResponses', 'inlinedResponses'],
71
+ )
72
+ ],
69
73
  )
70
74
 
71
75
  return to_object
72
76
 
73
77
 
74
- def _BatchJobDestination_to_mldev(
75
- api_client: BaseApiClient,
76
- from_object: Union[dict, object],
77
- parent_object: Optional[dict] = None,
78
- ) -> dict:
78
+ def _BatchJobDestination_from_vertex(
79
+ from_object: Union[dict[str, Any], object],
80
+ parent_object: Optional[dict[str, Any]] = None,
81
+ ) -> dict[str, Any]:
79
82
  to_object: dict[str, Any] = {}
80
- if getv(from_object, ['format']) is not None:
81
- raise ValueError('format parameter is not supported in Gemini API.')
83
+ if getv(from_object, ['predictionsFormat']) is not None:
84
+ setv(to_object, ['format'], getv(from_object, ['predictionsFormat']))
82
85
 
83
- if getv(from_object, ['gcs_uri']) is not None:
84
- raise ValueError('gcs_uri parameter is not supported in Gemini API.')
86
+ if getv(from_object, ['gcsDestination', 'outputUriPrefix']) is not None:
87
+ setv(
88
+ to_object,
89
+ ['gcs_uri'],
90
+ getv(from_object, ['gcsDestination', 'outputUriPrefix']),
91
+ )
85
92
 
86
- if getv(from_object, ['bigquery_uri']) is not None:
87
- raise ValueError('bigquery_uri parameter is not supported in Gemini API.')
93
+ if getv(from_object, ['bigqueryDestination', 'outputUri']) is not None:
94
+ setv(
95
+ to_object,
96
+ ['bigquery_uri'],
97
+ getv(from_object, ['bigqueryDestination', 'outputUri']),
98
+ )
88
99
 
89
100
  return to_object
90
101
 
91
102
 
92
103
  def _BatchJobDestination_to_vertex(
93
- api_client: BaseApiClient,
94
- from_object: Union[dict, object],
95
- parent_object: Optional[dict] = None,
96
- ) -> dict:
104
+ from_object: Union[dict[str, Any], object],
105
+ parent_object: Optional[dict[str, Any]] = None,
106
+ ) -> dict[str, Any]:
97
107
  to_object: dict[str, Any] = {}
98
108
  if getv(from_object, ['format']) is not None:
99
109
  setv(to_object, ['predictionsFormat'], getv(from_object, ['format']))
@@ -112,163 +122,258 @@ def _BatchJobDestination_to_vertex(
112
122
  getv(from_object, ['bigquery_uri']),
113
123
  )
114
124
 
125
+ if getv(from_object, ['file_name']) is not None:
126
+ raise ValueError('file_name parameter is not supported in Vertex AI.')
127
+
128
+ if getv(from_object, ['inlined_responses']) is not None:
129
+ raise ValueError(
130
+ 'inlined_responses parameter is not supported in Vertex AI.'
131
+ )
132
+
133
+ if getv(from_object, ['inlined_embed_content_responses']) is not None:
134
+ raise ValueError(
135
+ 'inlined_embed_content_responses parameter is not supported in'
136
+ ' Vertex AI.'
137
+ )
138
+
115
139
  return to_object
116
140
 
117
141
 
118
- def _CreateBatchJobConfig_to_mldev(
119
- api_client: BaseApiClient,
120
- from_object: Union[dict, object],
121
- parent_object: Optional[dict] = None,
122
- ) -> dict:
142
+ def _BatchJobSource_from_vertex(
143
+ from_object: Union[dict[str, Any], object],
144
+ parent_object: Optional[dict[str, Any]] = None,
145
+ ) -> dict[str, Any]:
123
146
  to_object: dict[str, Any] = {}
147
+ if getv(from_object, ['instancesFormat']) is not None:
148
+ setv(to_object, ['format'], getv(from_object, ['instancesFormat']))
124
149
 
125
- if getv(from_object, ['display_name']) is not None:
126
- setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
150
+ if getv(from_object, ['gcsSource', 'uris']) is not None:
151
+ setv(to_object, ['gcs_uri'], getv(from_object, ['gcsSource', 'uris']))
127
152
 
128
- if getv(from_object, ['dest']) is not None:
129
- raise ValueError('dest parameter is not supported in Gemini API.')
153
+ if getv(from_object, ['bigquerySource', 'inputUri']) is not None:
154
+ setv(
155
+ to_object,
156
+ ['bigquery_uri'],
157
+ getv(from_object, ['bigquerySource', 'inputUri']),
158
+ )
130
159
 
131
160
  return to_object
132
161
 
133
162
 
134
- def _CreateBatchJobConfig_to_vertex(
163
+ def _BatchJobSource_to_mldev(
135
164
  api_client: BaseApiClient,
136
- from_object: Union[dict, object],
137
- parent_object: Optional[dict] = None,
138
- ) -> dict:
165
+ from_object: Union[dict[str, Any], object],
166
+ parent_object: Optional[dict[str, Any]] = None,
167
+ ) -> dict[str, Any]:
139
168
  to_object: dict[str, Any] = {}
169
+ if getv(from_object, ['format']) is not None:
170
+ raise ValueError('format parameter is not supported in Gemini API.')
140
171
 
141
- if getv(from_object, ['display_name']) is not None:
142
- setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
172
+ if getv(from_object, ['gcs_uri']) is not None:
173
+ raise ValueError('gcs_uri parameter is not supported in Gemini API.')
143
174
 
144
- if getv(from_object, ['dest']) is not None:
175
+ if getv(from_object, ['bigquery_uri']) is not None:
176
+ raise ValueError('bigquery_uri parameter is not supported in Gemini API.')
177
+
178
+ if getv(from_object, ['file_name']) is not None:
179
+ setv(to_object, ['fileName'], getv(from_object, ['file_name']))
180
+
181
+ if getv(from_object, ['inlined_requests']) is not None:
145
182
  setv(
146
- parent_object,
147
- ['outputConfig'],
148
- _BatchJobDestination_to_vertex(
149
- api_client,
150
- t.t_batch_job_destination(api_client, getv(from_object, ['dest'])),
151
- to_object,
152
- ),
183
+ to_object,
184
+ ['requests', 'requests'],
185
+ [
186
+ _InlinedRequest_to_mldev(api_client, item, to_object)
187
+ for item in getv(from_object, ['inlined_requests'])
188
+ ],
153
189
  )
154
190
 
155
191
  return to_object
156
192
 
157
193
 
158
- def _CreateBatchJobParameters_to_mldev(
159
- api_client: BaseApiClient,
160
- from_object: Union[dict, object],
161
- parent_object: Optional[dict] = None,
162
- ) -> dict:
194
+ def _BatchJobSource_to_vertex(
195
+ from_object: Union[dict[str, Any], object],
196
+ parent_object: Optional[dict[str, Any]] = None,
197
+ ) -> dict[str, Any]:
163
198
  to_object: dict[str, Any] = {}
164
- if getv(from_object, ['model']) is not None:
165
- raise ValueError('model parameter is not supported in Gemini API.')
199
+ if getv(from_object, ['format']) is not None:
200
+ setv(to_object, ['instancesFormat'], getv(from_object, ['format']))
166
201
 
167
- if getv(from_object, ['src']) is not None:
168
- raise ValueError('src parameter is not supported in Gemini API.')
202
+ if getv(from_object, ['gcs_uri']) is not None:
203
+ setv(to_object, ['gcsSource', 'uris'], getv(from_object, ['gcs_uri']))
169
204
 
170
- if getv(from_object, ['config']) is not None:
205
+ if getv(from_object, ['bigquery_uri']) is not None:
171
206
  setv(
172
207
  to_object,
173
- ['config'],
174
- _CreateBatchJobConfig_to_mldev(
175
- api_client, getv(from_object, ['config']), to_object
176
- ),
208
+ ['bigquerySource', 'inputUri'],
209
+ getv(from_object, ['bigquery_uri']),
210
+ )
211
+
212
+ if getv(from_object, ['file_name']) is not None:
213
+ raise ValueError('file_name parameter is not supported in Vertex AI.')
214
+
215
+ if getv(from_object, ['inlined_requests']) is not None:
216
+ raise ValueError(
217
+ 'inlined_requests parameter is not supported in Vertex AI.'
177
218
  )
178
219
 
179
220
  return to_object
180
221
 
181
222
 
182
- def _CreateBatchJobParameters_to_vertex(
183
- api_client: BaseApiClient,
184
- from_object: Union[dict, object],
185
- parent_object: Optional[dict] = None,
186
- ) -> dict:
223
+ def _BatchJob_from_mldev(
224
+ from_object: Union[dict[str, Any], object],
225
+ parent_object: Optional[dict[str, Any]] = None,
226
+ ) -> dict[str, Any]:
187
227
  to_object: dict[str, Any] = {}
188
- if getv(from_object, ['model']) is not None:
228
+ if getv(from_object, ['name']) is not None:
229
+ setv(to_object, ['name'], getv(from_object, ['name']))
230
+
231
+ if getv(from_object, ['metadata', 'displayName']) is not None:
189
232
  setv(
190
233
  to_object,
191
- ['model'],
192
- t.t_model(api_client, getv(from_object, ['model'])),
234
+ ['display_name'],
235
+ getv(from_object, ['metadata', 'displayName']),
193
236
  )
194
237
 
195
- if getv(from_object, ['src']) is not None:
238
+ if getv(from_object, ['metadata', 'state']) is not None:
196
239
  setv(
197
240
  to_object,
198
- ['inputConfig'],
199
- _BatchJobSource_to_vertex(
200
- api_client,
201
- t.t_batch_job_source(api_client, getv(from_object, ['src'])),
202
- to_object,
203
- ),
241
+ ['state'],
242
+ t.t_job_state(getv(from_object, ['metadata', 'state'])),
204
243
  )
205
244
 
206
- if getv(from_object, ['config']) is not None:
245
+ if getv(from_object, ['metadata', 'createTime']) is not None:
246
+ setv(
247
+ to_object,
248
+ ['create_time'],
249
+ getv(from_object, ['metadata', 'createTime']),
250
+ )
251
+
252
+ if getv(from_object, ['metadata', 'endTime']) is not None:
253
+ setv(to_object, ['end_time'], getv(from_object, ['metadata', 'endTime']))
254
+
255
+ if getv(from_object, ['metadata', 'updateTime']) is not None:
256
+ setv(
257
+ to_object,
258
+ ['update_time'],
259
+ getv(from_object, ['metadata', 'updateTime']),
260
+ )
261
+
262
+ if getv(from_object, ['metadata', 'model']) is not None:
263
+ setv(to_object, ['model'], getv(from_object, ['metadata', 'model']))
264
+
265
+ if getv(from_object, ['metadata', 'output']) is not None:
207
266
  setv(
208
267
  to_object,
209
- ['config'],
210
- _CreateBatchJobConfig_to_vertex(
211
- api_client, getv(from_object, ['config']), to_object
268
+ ['dest'],
269
+ _BatchJobDestination_from_mldev(
270
+ t.t_recv_batch_job_destination(
271
+ getv(from_object, ['metadata', 'output'])
272
+ ),
273
+ to_object,
212
274
  ),
213
275
  )
214
276
 
215
277
  return to_object
216
278
 
217
279
 
218
- def _GetBatchJobParameters_to_mldev(
219
- api_client: BaseApiClient,
220
- from_object: Union[dict, object],
221
- parent_object: Optional[dict] = None,
222
- ) -> dict:
280
+ def _BatchJob_from_vertex(
281
+ from_object: Union[dict[str, Any], object],
282
+ parent_object: Optional[dict[str, Any]] = None,
283
+ ) -> dict[str, Any]:
223
284
  to_object: dict[str, Any] = {}
224
285
  if getv(from_object, ['name']) is not None:
225
- raise ValueError('name parameter is not supported in Gemini API.')
286
+ setv(to_object, ['name'], getv(from_object, ['name']))
226
287
 
227
- if getv(from_object, ['config']) is not None:
228
- setv(to_object, ['config'], getv(from_object, ['config']))
288
+ if getv(from_object, ['displayName']) is not None:
289
+ setv(to_object, ['display_name'], getv(from_object, ['displayName']))
229
290
 
230
- return to_object
291
+ if getv(from_object, ['state']) is not None:
292
+ setv(to_object, ['state'], t.t_job_state(getv(from_object, ['state'])))
231
293
 
294
+ if getv(from_object, ['error']) is not None:
295
+ setv(to_object, ['error'], getv(from_object, ['error']))
232
296
 
233
- def _GetBatchJobParameters_to_vertex(
234
- api_client: BaseApiClient,
235
- from_object: Union[dict, object],
236
- parent_object: Optional[dict] = None,
237
- ) -> dict:
238
- to_object: dict[str, Any] = {}
239
- if getv(from_object, ['name']) is not None:
297
+ if getv(from_object, ['createTime']) is not None:
298
+ setv(to_object, ['create_time'], getv(from_object, ['createTime']))
299
+
300
+ if getv(from_object, ['startTime']) is not None:
301
+ setv(to_object, ['start_time'], getv(from_object, ['startTime']))
302
+
303
+ if getv(from_object, ['endTime']) is not None:
304
+ setv(to_object, ['end_time'], getv(from_object, ['endTime']))
305
+
306
+ if getv(from_object, ['updateTime']) is not None:
307
+ setv(to_object, ['update_time'], getv(from_object, ['updateTime']))
308
+
309
+ if getv(from_object, ['model']) is not None:
310
+ setv(to_object, ['model'], getv(from_object, ['model']))
311
+
312
+ if getv(from_object, ['inputConfig']) is not None:
240
313
  setv(
241
314
  to_object,
242
- ['_url', 'name'],
243
- t.t_batch_job_name(api_client, getv(from_object, ['name'])),
315
+ ['src'],
316
+ _BatchJobSource_from_vertex(
317
+ getv(from_object, ['inputConfig']), to_object
318
+ ),
244
319
  )
245
320
 
246
- if getv(from_object, ['config']) is not None:
247
- setv(to_object, ['config'], getv(from_object, ['config']))
321
+ if getv(from_object, ['outputConfig']) is not None:
322
+ setv(
323
+ to_object,
324
+ ['dest'],
325
+ _BatchJobDestination_from_vertex(
326
+ t.t_recv_batch_job_destination(getv(from_object, ['outputConfig'])),
327
+ to_object,
328
+ ),
329
+ )
330
+
331
+ if getv(from_object, ['completionStats']) is not None:
332
+ setv(
333
+ to_object, ['completion_stats'], getv(from_object, ['completionStats'])
334
+ )
335
+
336
+ return to_object
337
+
338
+
339
+ def _Blob_to_mldev(
340
+ from_object: Union[dict[str, Any], object],
341
+ parent_object: Optional[dict[str, Any]] = None,
342
+ ) -> dict[str, Any]:
343
+ to_object: dict[str, Any] = {}
344
+ if getv(from_object, ['data']) is not None:
345
+ setv(to_object, ['data'], getv(from_object, ['data']))
346
+
347
+ if getv(from_object, ['display_name']) is not None:
348
+ raise ValueError('display_name parameter is not supported in Gemini API.')
349
+
350
+ if getv(from_object, ['mime_type']) is not None:
351
+ setv(to_object, ['mimeType'], getv(from_object, ['mime_type']))
248
352
 
249
353
  return to_object
250
354
 
251
355
 
252
356
  def _CancelBatchJobParameters_to_mldev(
253
357
  api_client: BaseApiClient,
254
- from_object: Union[dict, object],
255
- parent_object: Optional[dict] = None,
256
- ) -> dict:
358
+ from_object: Union[dict[str, Any], object],
359
+ parent_object: Optional[dict[str, Any]] = None,
360
+ ) -> dict[str, Any]:
257
361
  to_object: dict[str, Any] = {}
258
362
  if getv(from_object, ['name']) is not None:
259
- raise ValueError('name parameter is not supported in Gemini API.')
260
-
261
- if getv(from_object, ['config']) is not None:
262
- setv(to_object, ['config'], getv(from_object, ['config']))
363
+ setv(
364
+ to_object,
365
+ ['_url', 'name'],
366
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
367
+ )
263
368
 
264
369
  return to_object
265
370
 
266
371
 
267
372
  def _CancelBatchJobParameters_to_vertex(
268
373
  api_client: BaseApiClient,
269
- from_object: Union[dict, object],
270
- parent_object: Optional[dict] = None,
271
- ) -> dict:
374
+ from_object: Union[dict[str, Any], object],
375
+ parent_object: Optional[dict[str, Any]] = None,
376
+ ) -> dict[str, Any]:
272
377
  to_object: dict[str, Any] = {}
273
378
  if getv(from_object, ['name']) is not None:
274
379
  setv(
@@ -277,112 +382,267 @@ def _CancelBatchJobParameters_to_vertex(
277
382
  t.t_batch_job_name(api_client, getv(from_object, ['name'])),
278
383
  )
279
384
 
280
- if getv(from_object, ['config']) is not None:
281
- setv(to_object, ['config'], getv(from_object, ['config']))
282
-
283
385
  return to_object
284
386
 
285
387
 
286
- def _ListBatchJobsConfig_to_mldev(
287
- api_client: BaseApiClient,
288
- from_object: Union[dict, object],
289
- parent_object: Optional[dict] = None,
290
- ) -> dict:
388
+ def _Candidate_from_mldev(
389
+ from_object: Union[dict[str, Any], object],
390
+ parent_object: Optional[dict[str, Any]] = None,
391
+ ) -> dict[str, Any]:
291
392
  to_object: dict[str, Any] = {}
393
+ if getv(from_object, ['content']) is not None:
394
+ setv(to_object, ['content'], getv(from_object, ['content']))
292
395
 
293
- if getv(from_object, ['page_size']) is not None:
396
+ if getv(from_object, ['citationMetadata']) is not None:
294
397
  setv(
295
- parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
398
+ to_object,
399
+ ['citation_metadata'],
400
+ _CitationMetadata_from_mldev(
401
+ getv(from_object, ['citationMetadata']), to_object
402
+ ),
296
403
  )
297
404
 
298
- if getv(from_object, ['page_token']) is not None:
299
- setv(
300
- parent_object,
301
- ['_query', 'pageToken'],
302
- getv(from_object, ['page_token']),
303
- )
405
+ if getv(from_object, ['tokenCount']) is not None:
406
+ setv(to_object, ['token_count'], getv(from_object, ['tokenCount']))
304
407
 
305
- if getv(from_object, ['filter']) is not None:
306
- raise ValueError('filter parameter is not supported in Gemini API.')
408
+ if getv(from_object, ['finishReason']) is not None:
409
+ setv(to_object, ['finish_reason'], getv(from_object, ['finishReason']))
307
410
 
308
- return to_object
411
+ if getv(from_object, ['avgLogprobs']) is not None:
412
+ setv(to_object, ['avg_logprobs'], getv(from_object, ['avgLogprobs']))
309
413
 
414
+ if getv(from_object, ['groundingMetadata']) is not None:
415
+ setv(
416
+ to_object,
417
+ ['grounding_metadata'],
418
+ getv(from_object, ['groundingMetadata']),
419
+ )
310
420
 
311
- def _ListBatchJobsConfig_to_vertex(
312
- api_client: BaseApiClient,
313
- from_object: Union[dict, object],
314
- parent_object: Optional[dict] = None,
315
- ) -> dict:
316
- to_object: dict[str, Any] = {}
421
+ if getv(from_object, ['index']) is not None:
422
+ setv(to_object, ['index'], getv(from_object, ['index']))
317
423
 
318
- if getv(from_object, ['page_size']) is not None:
424
+ if getv(from_object, ['logprobsResult']) is not None:
425
+ setv(to_object, ['logprobs_result'], getv(from_object, ['logprobsResult']))
426
+
427
+ if getv(from_object, ['safetyRatings']) is not None:
319
428
  setv(
320
- parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
429
+ to_object,
430
+ ['safety_ratings'],
431
+ [item for item in getv(from_object, ['safetyRatings'])],
321
432
  )
322
433
 
323
- if getv(from_object, ['page_token']) is not None:
434
+ if getv(from_object, ['urlContextMetadata']) is not None:
324
435
  setv(
325
- parent_object,
326
- ['_query', 'pageToken'],
327
- getv(from_object, ['page_token']),
436
+ to_object,
437
+ ['url_context_metadata'],
438
+ getv(from_object, ['urlContextMetadata']),
328
439
  )
329
440
 
330
- if getv(from_object, ['filter']) is not None:
331
- setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))
332
-
333
441
  return to_object
334
442
 
335
443
 
336
- def _ListBatchJobsParameters_to_mldev(
337
- api_client: BaseApiClient,
338
- from_object: Union[dict, object],
339
- parent_object: Optional[dict] = None,
340
- ) -> dict:
444
+ def _CitationMetadata_from_mldev(
445
+ from_object: Union[dict[str, Any], object],
446
+ parent_object: Optional[dict[str, Any]] = None,
447
+ ) -> dict[str, Any]:
341
448
  to_object: dict[str, Any] = {}
342
- if getv(from_object, ['config']) is not None:
343
- raise ValueError('config parameter is not supported in Gemini API.')
449
+ if getv(from_object, ['citationSources']) is not None:
450
+ setv(
451
+ to_object,
452
+ ['citations'],
453
+ [item for item in getv(from_object, ['citationSources'])],
454
+ )
344
455
 
345
456
  return to_object
346
457
 
347
458
 
348
- def _ListBatchJobsParameters_to_vertex(
349
- api_client: BaseApiClient,
350
- from_object: Union[dict, object],
351
- parent_object: Optional[dict] = None,
352
- ) -> dict:
459
+ def _Content_to_mldev(
460
+ from_object: Union[dict[str, Any], object],
461
+ parent_object: Optional[dict[str, Any]] = None,
462
+ ) -> dict[str, Any]:
353
463
  to_object: dict[str, Any] = {}
354
- if getv(from_object, ['config']) is not None:
464
+ if getv(from_object, ['parts']) is not None:
355
465
  setv(
356
466
  to_object,
357
- ['config'],
358
- _ListBatchJobsConfig_to_vertex(
359
- api_client, getv(from_object, ['config']), to_object
360
- ),
467
+ ['parts'],
468
+ [
469
+ _Part_to_mldev(item, to_object)
470
+ for item in getv(from_object, ['parts'])
471
+ ],
361
472
  )
362
473
 
474
+ if getv(from_object, ['role']) is not None:
475
+ setv(to_object, ['role'], getv(from_object, ['role']))
476
+
363
477
  return to_object
364
478
 
365
479
 
366
- def _DeleteBatchJobParameters_to_mldev(
480
+ def _CreateBatchJobConfig_to_mldev(
481
+ from_object: Union[dict[str, Any], object],
482
+ parent_object: Optional[dict[str, Any]] = None,
483
+ ) -> dict[str, Any]:
484
+ to_object: dict[str, Any] = {}
485
+
486
+ if getv(from_object, ['display_name']) is not None:
487
+ setv(
488
+ parent_object,
489
+ ['batch', 'displayName'],
490
+ getv(from_object, ['display_name']),
491
+ )
492
+
493
+ if getv(from_object, ['dest']) is not None:
494
+ raise ValueError('dest parameter is not supported in Gemini API.')
495
+
496
+ return to_object
497
+
498
+
499
+ def _CreateBatchJobConfig_to_vertex(
500
+ from_object: Union[dict[str, Any], object],
501
+ parent_object: Optional[dict[str, Any]] = None,
502
+ ) -> dict[str, Any]:
503
+ to_object: dict[str, Any] = {}
504
+
505
+ if getv(from_object, ['display_name']) is not None:
506
+ setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
507
+
508
+ if getv(from_object, ['dest']) is not None:
509
+ setv(
510
+ parent_object,
511
+ ['outputConfig'],
512
+ _BatchJobDestination_to_vertex(
513
+ t.t_batch_job_destination(getv(from_object, ['dest'])), to_object
514
+ ),
515
+ )
516
+
517
+ return to_object
518
+
519
+
520
+ def _CreateBatchJobParameters_to_mldev(
367
521
  api_client: BaseApiClient,
368
- from_object: Union[dict, object],
369
- parent_object: Optional[dict] = None,
370
- ) -> dict:
522
+ from_object: Union[dict[str, Any], object],
523
+ parent_object: Optional[dict[str, Any]] = None,
524
+ ) -> dict[str, Any]:
371
525
  to_object: dict[str, Any] = {}
372
- if getv(from_object, ['name']) is not None:
373
- raise ValueError('name parameter is not supported in Gemini API.')
526
+ if getv(from_object, ['model']) is not None:
527
+ setv(
528
+ to_object,
529
+ ['_url', 'model'],
530
+ t.t_model(api_client, getv(from_object, ['model'])),
531
+ )
532
+
533
+ if getv(from_object, ['src']) is not None:
534
+ setv(
535
+ to_object,
536
+ ['batch', 'inputConfig'],
537
+ _BatchJobSource_to_mldev(
538
+ api_client,
539
+ t.t_batch_job_source(api_client, getv(from_object, ['src'])),
540
+ to_object,
541
+ ),
542
+ )
374
543
 
375
544
  if getv(from_object, ['config']) is not None:
376
- setv(to_object, ['config'], getv(from_object, ['config']))
545
+ _CreateBatchJobConfig_to_mldev(getv(from_object, ['config']), to_object)
546
+
547
+ return to_object
548
+
549
+
550
+ def _CreateBatchJobParameters_to_vertex(
551
+ api_client: BaseApiClient,
552
+ from_object: Union[dict[str, Any], object],
553
+ parent_object: Optional[dict[str, Any]] = None,
554
+ ) -> dict[str, Any]:
555
+ to_object: dict[str, Any] = {}
556
+ if getv(from_object, ['model']) is not None:
557
+ setv(
558
+ to_object,
559
+ ['model'],
560
+ t.t_model(api_client, getv(from_object, ['model'])),
561
+ )
562
+
563
+ if getv(from_object, ['src']) is not None:
564
+ setv(
565
+ to_object,
566
+ ['inputConfig'],
567
+ _BatchJobSource_to_vertex(
568
+ t.t_batch_job_source(api_client, getv(from_object, ['src'])),
569
+ to_object,
570
+ ),
571
+ )
572
+
573
+ if getv(from_object, ['config']) is not None:
574
+ _CreateBatchJobConfig_to_vertex(getv(from_object, ['config']), to_object)
575
+
576
+ return to_object
577
+
578
+
579
+ def _CreateEmbeddingsBatchJobConfig_to_mldev(
580
+ from_object: Union[dict[str, Any], object],
581
+ parent_object: Optional[dict[str, Any]] = None,
582
+ ) -> dict[str, Any]:
583
+ to_object: dict[str, Any] = {}
584
+
585
+ if getv(from_object, ['display_name']) is not None:
586
+ setv(
587
+ parent_object,
588
+ ['batch', 'displayName'],
589
+ getv(from_object, ['display_name']),
590
+ )
591
+
592
+ return to_object
593
+
594
+
595
+ def _CreateEmbeddingsBatchJobParameters_to_mldev(
596
+ api_client: BaseApiClient,
597
+ from_object: Union[dict[str, Any], object],
598
+ parent_object: Optional[dict[str, Any]] = None,
599
+ ) -> dict[str, Any]:
600
+ to_object: dict[str, Any] = {}
601
+ if getv(from_object, ['model']) is not None:
602
+ setv(
603
+ to_object,
604
+ ['_url', 'model'],
605
+ t.t_model(api_client, getv(from_object, ['model'])),
606
+ )
607
+
608
+ if getv(from_object, ['src']) is not None:
609
+ setv(
610
+ to_object,
611
+ ['batch', 'inputConfig'],
612
+ _EmbeddingsBatchJobSource_to_mldev(
613
+ api_client, getv(from_object, ['src']), to_object
614
+ ),
615
+ )
616
+
617
+ if getv(from_object, ['config']) is not None:
618
+ _CreateEmbeddingsBatchJobConfig_to_mldev(
619
+ getv(from_object, ['config']), to_object
620
+ )
621
+
622
+ return to_object
623
+
624
+
625
+ def _DeleteBatchJobParameters_to_mldev(
626
+ api_client: BaseApiClient,
627
+ from_object: Union[dict[str, Any], object],
628
+ parent_object: Optional[dict[str, Any]] = None,
629
+ ) -> dict[str, Any]:
630
+ to_object: dict[str, Any] = {}
631
+ if getv(from_object, ['name']) is not None:
632
+ setv(
633
+ to_object,
634
+ ['_url', 'name'],
635
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
636
+ )
377
637
 
378
638
  return to_object
379
639
 
380
640
 
381
641
  def _DeleteBatchJobParameters_to_vertex(
382
642
  api_client: BaseApiClient,
383
- from_object: Union[dict, object],
384
- parent_object: Optional[dict] = None,
385
- ) -> dict:
643
+ from_object: Union[dict[str, Any], object],
644
+ parent_object: Optional[dict[str, Any]] = None,
645
+ ) -> dict[str, Any]:
386
646
  to_object: dict[str, Any] = {}
387
647
  if getv(from_object, ['name']) is not None:
388
648
  setv(
@@ -391,243 +651,833 @@ def _DeleteBatchJobParameters_to_vertex(
391
651
  t.t_batch_job_name(api_client, getv(from_object, ['name'])),
392
652
  )
393
653
 
654
+ return to_object
655
+
656
+
657
+ def _DeleteResourceJob_from_mldev(
658
+ from_object: Union[dict[str, Any], object],
659
+ parent_object: Optional[dict[str, Any]] = None,
660
+ ) -> dict[str, Any]:
661
+ to_object: dict[str, Any] = {}
662
+ if getv(from_object, ['sdkHttpResponse']) is not None:
663
+ setv(
664
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
665
+ )
666
+
667
+ if getv(from_object, ['name']) is not None:
668
+ setv(to_object, ['name'], getv(from_object, ['name']))
669
+
670
+ if getv(from_object, ['done']) is not None:
671
+ setv(to_object, ['done'], getv(from_object, ['done']))
672
+
673
+ if getv(from_object, ['error']) is not None:
674
+ setv(to_object, ['error'], getv(from_object, ['error']))
675
+
676
+ return to_object
677
+
678
+
679
+ def _DeleteResourceJob_from_vertex(
680
+ from_object: Union[dict[str, Any], object],
681
+ parent_object: Optional[dict[str, Any]] = None,
682
+ ) -> dict[str, Any]:
683
+ to_object: dict[str, Any] = {}
684
+ if getv(from_object, ['sdkHttpResponse']) is not None:
685
+ setv(
686
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
687
+ )
688
+
689
+ if getv(from_object, ['name']) is not None:
690
+ setv(to_object, ['name'], getv(from_object, ['name']))
691
+
692
+ if getv(from_object, ['done']) is not None:
693
+ setv(to_object, ['done'], getv(from_object, ['done']))
694
+
695
+ if getv(from_object, ['error']) is not None:
696
+ setv(to_object, ['error'], getv(from_object, ['error']))
697
+
698
+ return to_object
699
+
700
+
701
+ def _EmbedContentBatch_to_mldev(
702
+ api_client: BaseApiClient,
703
+ from_object: Union[dict[str, Any], object],
704
+ parent_object: Optional[dict[str, Any]] = None,
705
+ ) -> dict[str, Any]:
706
+ to_object: dict[str, Any] = {}
707
+ if getv(from_object, ['contents']) is not None:
708
+ setv(
709
+ to_object,
710
+ ['requests[]', 'request', 'content'],
711
+ [
712
+ item
713
+ for item in t.t_contents_for_embed(
714
+ api_client, getv(from_object, ['contents'])
715
+ )
716
+ ],
717
+ )
718
+
394
719
  if getv(from_object, ['config']) is not None:
395
- setv(to_object, ['config'], getv(from_object, ['config']))
720
+ setv(
721
+ to_object,
722
+ ['_self'],
723
+ _EmbedContentConfig_to_mldev(getv(from_object, ['config']), to_object),
724
+ )
725
+ movev(to_object, {'requests[].*': 'requests[].request.*'})
726
+ return to_object
727
+
728
+
729
+ def _EmbedContentConfig_to_mldev(
730
+ from_object: Union[dict[str, Any], object],
731
+ parent_object: Optional[dict[str, Any]] = None,
732
+ ) -> dict[str, Any]:
733
+ to_object: dict[str, Any] = {}
734
+
735
+ if getv(from_object, ['task_type']) is not None:
736
+ setv(
737
+ parent_object,
738
+ ['requests[]', 'taskType'],
739
+ getv(from_object, ['task_type']),
740
+ )
741
+
742
+ if getv(from_object, ['title']) is not None:
743
+ setv(parent_object, ['requests[]', 'title'], getv(from_object, ['title']))
744
+
745
+ if getv(from_object, ['output_dimensionality']) is not None:
746
+ setv(
747
+ parent_object,
748
+ ['requests[]', 'outputDimensionality'],
749
+ getv(from_object, ['output_dimensionality']),
750
+ )
751
+
752
+ if getv(from_object, ['mime_type']) is not None:
753
+ raise ValueError('mime_type parameter is not supported in Gemini API.')
754
+
755
+ if getv(from_object, ['auto_truncate']) is not None:
756
+ raise ValueError('auto_truncate parameter is not supported in Gemini API.')
396
757
 
397
758
  return to_object
398
759
 
399
760
 
400
- def _JobError_from_mldev(
761
+ def _EmbeddingsBatchJobSource_to_mldev(
401
762
  api_client: BaseApiClient,
402
- from_object: Union[dict, object],
403
- parent_object: Optional[dict] = None,
404
- ) -> dict:
763
+ from_object: Union[dict[str, Any], object],
764
+ parent_object: Optional[dict[str, Any]] = None,
765
+ ) -> dict[str, Any]:
405
766
  to_object: dict[str, Any] = {}
767
+ if getv(from_object, ['file_name']) is not None:
768
+ setv(to_object, ['file_name'], getv(from_object, ['file_name']))
769
+
770
+ if getv(from_object, ['inlined_requests']) is not None:
771
+ setv(
772
+ to_object,
773
+ ['requests'],
774
+ _EmbedContentBatch_to_mldev(
775
+ api_client, getv(from_object, ['inlined_requests']), to_object
776
+ ),
777
+ )
406
778
 
407
779
  return to_object
408
780
 
409
781
 
410
- def _JobError_from_vertex(
782
+ def _FileData_to_mldev(
783
+ from_object: Union[dict[str, Any], object],
784
+ parent_object: Optional[dict[str, Any]] = None,
785
+ ) -> dict[str, Any]:
786
+ to_object: dict[str, Any] = {}
787
+ if getv(from_object, ['display_name']) is not None:
788
+ raise ValueError('display_name parameter is not supported in Gemini API.')
789
+
790
+ if getv(from_object, ['file_uri']) is not None:
791
+ setv(to_object, ['fileUri'], getv(from_object, ['file_uri']))
792
+
793
+ if getv(from_object, ['mime_type']) is not None:
794
+ setv(to_object, ['mimeType'], getv(from_object, ['mime_type']))
795
+
796
+ return to_object
797
+
798
+
799
+ def _FunctionCall_to_mldev(
800
+ from_object: Union[dict[str, Any], object],
801
+ parent_object: Optional[dict[str, Any]] = None,
802
+ ) -> dict[str, Any]:
803
+ to_object: dict[str, Any] = {}
804
+ if getv(from_object, ['id']) is not None:
805
+ setv(to_object, ['id'], getv(from_object, ['id']))
806
+
807
+ if getv(from_object, ['args']) is not None:
808
+ setv(to_object, ['args'], getv(from_object, ['args']))
809
+
810
+ if getv(from_object, ['name']) is not None:
811
+ setv(to_object, ['name'], getv(from_object, ['name']))
812
+
813
+ if getv(from_object, ['partial_args']) is not None:
814
+ raise ValueError('partial_args parameter is not supported in Gemini API.')
815
+
816
+ if getv(from_object, ['will_continue']) is not None:
817
+ raise ValueError('will_continue parameter is not supported in Gemini API.')
818
+
819
+ return to_object
820
+
821
+
822
+ def _FunctionCallingConfig_to_mldev(
823
+ from_object: Union[dict[str, Any], object],
824
+ parent_object: Optional[dict[str, Any]] = None,
825
+ ) -> dict[str, Any]:
826
+ to_object: dict[str, Any] = {}
827
+ if getv(from_object, ['mode']) is not None:
828
+ setv(to_object, ['mode'], getv(from_object, ['mode']))
829
+
830
+ if getv(from_object, ['allowed_function_names']) is not None:
831
+ setv(
832
+ to_object,
833
+ ['allowedFunctionNames'],
834
+ getv(from_object, ['allowed_function_names']),
835
+ )
836
+
837
+ if getv(from_object, ['stream_function_call_arguments']) is not None:
838
+ raise ValueError(
839
+ 'stream_function_call_arguments parameter is not supported in Gemini'
840
+ ' API.'
841
+ )
842
+
843
+ return to_object
844
+
845
+
846
+ def _GenerateContentConfig_to_mldev(
411
847
  api_client: BaseApiClient,
412
- from_object: Union[dict, object],
413
- parent_object: Optional[dict] = None,
414
- ) -> dict:
848
+ from_object: Union[dict[str, Any], object],
849
+ parent_object: Optional[dict[str, Any]] = None,
850
+ ) -> dict[str, Any]:
851
+ to_object: dict[str, Any] = {}
852
+
853
+ if getv(from_object, ['system_instruction']) is not None:
854
+ setv(
855
+ parent_object,
856
+ ['systemInstruction'],
857
+ _Content_to_mldev(
858
+ t.t_content(getv(from_object, ['system_instruction'])), to_object
859
+ ),
860
+ )
861
+
862
+ if getv(from_object, ['temperature']) is not None:
863
+ setv(to_object, ['temperature'], getv(from_object, ['temperature']))
864
+
865
+ if getv(from_object, ['top_p']) is not None:
866
+ setv(to_object, ['topP'], getv(from_object, ['top_p']))
867
+
868
+ if getv(from_object, ['top_k']) is not None:
869
+ setv(to_object, ['topK'], getv(from_object, ['top_k']))
870
+
871
+ if getv(from_object, ['candidate_count']) is not None:
872
+ setv(to_object, ['candidateCount'], getv(from_object, ['candidate_count']))
873
+
874
+ if getv(from_object, ['max_output_tokens']) is not None:
875
+ setv(
876
+ to_object, ['maxOutputTokens'], getv(from_object, ['max_output_tokens'])
877
+ )
878
+
879
+ if getv(from_object, ['stop_sequences']) is not None:
880
+ setv(to_object, ['stopSequences'], getv(from_object, ['stop_sequences']))
881
+
882
+ if getv(from_object, ['response_logprobs']) is not None:
883
+ setv(
884
+ to_object,
885
+ ['responseLogprobs'],
886
+ getv(from_object, ['response_logprobs']),
887
+ )
888
+
889
+ if getv(from_object, ['logprobs']) is not None:
890
+ setv(to_object, ['logprobs'], getv(from_object, ['logprobs']))
891
+
892
+ if getv(from_object, ['presence_penalty']) is not None:
893
+ setv(
894
+ to_object, ['presencePenalty'], getv(from_object, ['presence_penalty'])
895
+ )
896
+
897
+ if getv(from_object, ['frequency_penalty']) is not None:
898
+ setv(
899
+ to_object,
900
+ ['frequencyPenalty'],
901
+ getv(from_object, ['frequency_penalty']),
902
+ )
903
+
904
+ if getv(from_object, ['seed']) is not None:
905
+ setv(to_object, ['seed'], getv(from_object, ['seed']))
906
+
907
+ if getv(from_object, ['response_mime_type']) is not None:
908
+ setv(
909
+ to_object,
910
+ ['responseMimeType'],
911
+ getv(from_object, ['response_mime_type']),
912
+ )
913
+
914
+ if getv(from_object, ['response_schema']) is not None:
915
+ setv(
916
+ to_object,
917
+ ['responseSchema'],
918
+ t.t_schema(api_client, getv(from_object, ['response_schema'])),
919
+ )
920
+
921
+ if getv(from_object, ['response_json_schema']) is not None:
922
+ setv(
923
+ to_object,
924
+ ['responseJsonSchema'],
925
+ getv(from_object, ['response_json_schema']),
926
+ )
927
+
928
+ if getv(from_object, ['routing_config']) is not None:
929
+ raise ValueError('routing_config parameter is not supported in Gemini API.')
930
+
931
+ if getv(from_object, ['model_selection_config']) is not None:
932
+ raise ValueError(
933
+ 'model_selection_config parameter is not supported in Gemini API.'
934
+ )
935
+
936
+ if getv(from_object, ['safety_settings']) is not None:
937
+ setv(
938
+ parent_object,
939
+ ['safetySettings'],
940
+ [
941
+ _SafetySetting_to_mldev(item, to_object)
942
+ for item in getv(from_object, ['safety_settings'])
943
+ ],
944
+ )
945
+
946
+ if getv(from_object, ['tools']) is not None:
947
+ setv(
948
+ parent_object,
949
+ ['tools'],
950
+ [
951
+ _Tool_to_mldev(t.t_tool(api_client, item), to_object)
952
+ for item in t.t_tools(api_client, getv(from_object, ['tools']))
953
+ ],
954
+ )
955
+
956
+ if getv(from_object, ['tool_config']) is not None:
957
+ setv(
958
+ parent_object,
959
+ ['toolConfig'],
960
+ _ToolConfig_to_mldev(getv(from_object, ['tool_config']), to_object),
961
+ )
962
+
963
+ if getv(from_object, ['labels']) is not None:
964
+ raise ValueError('labels parameter is not supported in Gemini API.')
965
+
966
+ if getv(from_object, ['cached_content']) is not None:
967
+ setv(
968
+ parent_object,
969
+ ['cachedContent'],
970
+ t.t_cached_content_name(
971
+ api_client, getv(from_object, ['cached_content'])
972
+ ),
973
+ )
974
+
975
+ if getv(from_object, ['response_modalities']) is not None:
976
+ setv(
977
+ to_object,
978
+ ['responseModalities'],
979
+ getv(from_object, ['response_modalities']),
980
+ )
981
+
982
+ if getv(from_object, ['media_resolution']) is not None:
983
+ setv(
984
+ to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
985
+ )
986
+
987
+ if getv(from_object, ['speech_config']) is not None:
988
+ setv(
989
+ to_object,
990
+ ['speechConfig'],
991
+ t.t_speech_config(getv(from_object, ['speech_config'])),
992
+ )
993
+
994
+ if getv(from_object, ['audio_timestamp']) is not None:
995
+ raise ValueError(
996
+ 'audio_timestamp parameter is not supported in Gemini API.'
997
+ )
998
+
999
+ if getv(from_object, ['thinking_config']) is not None:
1000
+ setv(to_object, ['thinkingConfig'], getv(from_object, ['thinking_config']))
1001
+
1002
+ if getv(from_object, ['image_config']) is not None:
1003
+ setv(
1004
+ to_object,
1005
+ ['imageConfig'],
1006
+ _ImageConfig_to_mldev(getv(from_object, ['image_config']), to_object),
1007
+ )
1008
+
1009
+ return to_object
1010
+
1011
+
1012
+ def _GenerateContentResponse_from_mldev(
1013
+ from_object: Union[dict[str, Any], object],
1014
+ parent_object: Optional[dict[str, Any]] = None,
1015
+ ) -> dict[str, Any]:
415
1016
  to_object: dict[str, Any] = {}
416
- if getv(from_object, ['details']) is not None:
417
- setv(to_object, ['details'], getv(from_object, ['details']))
1017
+ if getv(from_object, ['sdkHttpResponse']) is not None:
1018
+ setv(
1019
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
1020
+ )
1021
+
1022
+ if getv(from_object, ['candidates']) is not None:
1023
+ setv(
1024
+ to_object,
1025
+ ['candidates'],
1026
+ [
1027
+ _Candidate_from_mldev(item, to_object)
1028
+ for item in getv(from_object, ['candidates'])
1029
+ ],
1030
+ )
1031
+
1032
+ if getv(from_object, ['modelVersion']) is not None:
1033
+ setv(to_object, ['model_version'], getv(from_object, ['modelVersion']))
1034
+
1035
+ if getv(from_object, ['promptFeedback']) is not None:
1036
+ setv(to_object, ['prompt_feedback'], getv(from_object, ['promptFeedback']))
418
1037
 
419
- if getv(from_object, ['code']) is not None:
420
- setv(to_object, ['code'], getv(from_object, ['code']))
1038
+ if getv(from_object, ['responseId']) is not None:
1039
+ setv(to_object, ['response_id'], getv(from_object, ['responseId']))
421
1040
 
422
- if getv(from_object, ['message']) is not None:
423
- setv(to_object, ['message'], getv(from_object, ['message']))
1041
+ if getv(from_object, ['usageMetadata']) is not None:
1042
+ setv(to_object, ['usage_metadata'], getv(from_object, ['usageMetadata']))
424
1043
 
425
1044
  return to_object
426
1045
 
427
1046
 
428
- def _BatchJobSource_from_mldev(
1047
+ def _GetBatchJobParameters_to_mldev(
429
1048
  api_client: BaseApiClient,
430
- from_object: Union[dict, object],
431
- parent_object: Optional[dict] = None,
432
- ) -> dict:
1049
+ from_object: Union[dict[str, Any], object],
1050
+ parent_object: Optional[dict[str, Any]] = None,
1051
+ ) -> dict[str, Any]:
433
1052
  to_object: dict[str, Any] = {}
1053
+ if getv(from_object, ['name']) is not None:
1054
+ setv(
1055
+ to_object,
1056
+ ['_url', 'name'],
1057
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
1058
+ )
434
1059
 
435
1060
  return to_object
436
1061
 
437
1062
 
438
- def _BatchJobSource_from_vertex(
1063
+ def _GetBatchJobParameters_to_vertex(
439
1064
  api_client: BaseApiClient,
440
- from_object: Union[dict, object],
441
- parent_object: Optional[dict] = None,
442
- ) -> dict:
1065
+ from_object: Union[dict[str, Any], object],
1066
+ parent_object: Optional[dict[str, Any]] = None,
1067
+ ) -> dict[str, Any]:
443
1068
  to_object: dict[str, Any] = {}
444
- if getv(from_object, ['instancesFormat']) is not None:
445
- setv(to_object, ['format'], getv(from_object, ['instancesFormat']))
1069
+ if getv(from_object, ['name']) is not None:
1070
+ setv(
1071
+ to_object,
1072
+ ['_url', 'name'],
1073
+ t.t_batch_job_name(api_client, getv(from_object, ['name'])),
1074
+ )
446
1075
 
447
- if getv(from_object, ['gcsSource', 'uris']) is not None:
448
- setv(to_object, ['gcs_uri'], getv(from_object, ['gcsSource', 'uris']))
1076
+ return to_object
449
1077
 
450
- if getv(from_object, ['bigquerySource', 'inputUri']) is not None:
1078
+
1079
+ def _GoogleMaps_to_mldev(
1080
+ from_object: Union[dict[str, Any], object],
1081
+ parent_object: Optional[dict[str, Any]] = None,
1082
+ ) -> dict[str, Any]:
1083
+ to_object: dict[str, Any] = {}
1084
+ if getv(from_object, ['auth_config']) is not None:
1085
+ raise ValueError('auth_config parameter is not supported in Gemini API.')
1086
+
1087
+ if getv(from_object, ['enable_widget']) is not None:
1088
+ setv(to_object, ['enableWidget'], getv(from_object, ['enable_widget']))
1089
+
1090
+ return to_object
1091
+
1092
+
1093
+ def _GoogleSearch_to_mldev(
1094
+ from_object: Union[dict[str, Any], object],
1095
+ parent_object: Optional[dict[str, Any]] = None,
1096
+ ) -> dict[str, Any]:
1097
+ to_object: dict[str, Any] = {}
1098
+ if getv(from_object, ['exclude_domains']) is not None:
1099
+ raise ValueError(
1100
+ 'exclude_domains parameter is not supported in Gemini API.'
1101
+ )
1102
+
1103
+ if getv(from_object, ['blocking_confidence']) is not None:
1104
+ raise ValueError(
1105
+ 'blocking_confidence parameter is not supported in Gemini API.'
1106
+ )
1107
+
1108
+ if getv(from_object, ['time_range_filter']) is not None:
1109
+ setv(
1110
+ to_object, ['timeRangeFilter'], getv(from_object, ['time_range_filter'])
1111
+ )
1112
+
1113
+ return to_object
1114
+
1115
+
1116
+ def _ImageConfig_to_mldev(
1117
+ from_object: Union[dict[str, Any], object],
1118
+ parent_object: Optional[dict[str, Any]] = None,
1119
+ ) -> dict[str, Any]:
1120
+ to_object: dict[str, Any] = {}
1121
+ if getv(from_object, ['aspect_ratio']) is not None:
1122
+ setv(to_object, ['aspectRatio'], getv(from_object, ['aspect_ratio']))
1123
+
1124
+ if getv(from_object, ['image_size']) is not None:
1125
+ setv(to_object, ['imageSize'], getv(from_object, ['image_size']))
1126
+
1127
+ if getv(from_object, ['output_mime_type']) is not None:
1128
+ raise ValueError(
1129
+ 'output_mime_type parameter is not supported in Gemini API.'
1130
+ )
1131
+
1132
+ if getv(from_object, ['output_compression_quality']) is not None:
1133
+ raise ValueError(
1134
+ 'output_compression_quality parameter is not supported in Gemini API.'
1135
+ )
1136
+
1137
+ return to_object
1138
+
1139
+
1140
+ def _InlinedRequest_to_mldev(
1141
+ api_client: BaseApiClient,
1142
+ from_object: Union[dict[str, Any], object],
1143
+ parent_object: Optional[dict[str, Any]] = None,
1144
+ ) -> dict[str, Any]:
1145
+ to_object: dict[str, Any] = {}
1146
+ if getv(from_object, ['model']) is not None:
451
1147
  setv(
452
1148
  to_object,
453
- ['bigquery_uri'],
454
- getv(from_object, ['bigquerySource', 'inputUri']),
1149
+ ['request', 'model'],
1150
+ t.t_model(api_client, getv(from_object, ['model'])),
1151
+ )
1152
+
1153
+ if getv(from_object, ['contents']) is not None:
1154
+ setv(
1155
+ to_object,
1156
+ ['request', 'contents'],
1157
+ [
1158
+ _Content_to_mldev(item, to_object)
1159
+ for item in t.t_contents(getv(from_object, ['contents']))
1160
+ ],
455
1161
  )
456
1162
 
1163
+ if getv(from_object, ['metadata']) is not None:
1164
+ setv(to_object, ['metadata'], getv(from_object, ['metadata']))
1165
+
1166
+ if getv(from_object, ['config']) is not None:
1167
+ setv(
1168
+ to_object,
1169
+ ['request', 'generationConfig'],
1170
+ _GenerateContentConfig_to_mldev(
1171
+ api_client,
1172
+ getv(from_object, ['config']),
1173
+ getv(to_object, ['request'], default_value={}),
1174
+ ),
1175
+ )
1176
+
1177
+ return to_object
1178
+
1179
+
1180
+ def _InlinedResponse_from_mldev(
1181
+ from_object: Union[dict[str, Any], object],
1182
+ parent_object: Optional[dict[str, Any]] = None,
1183
+ ) -> dict[str, Any]:
1184
+ to_object: dict[str, Any] = {}
1185
+ if getv(from_object, ['response']) is not None:
1186
+ setv(
1187
+ to_object,
1188
+ ['response'],
1189
+ _GenerateContentResponse_from_mldev(
1190
+ getv(from_object, ['response']), to_object
1191
+ ),
1192
+ )
1193
+
1194
+ if getv(from_object, ['error']) is not None:
1195
+ setv(to_object, ['error'], getv(from_object, ['error']))
1196
+
1197
+ return to_object
1198
+
1199
+
1200
+ def _ListBatchJobsConfig_to_mldev(
1201
+ from_object: Union[dict[str, Any], object],
1202
+ parent_object: Optional[dict[str, Any]] = None,
1203
+ ) -> dict[str, Any]:
1204
+ to_object: dict[str, Any] = {}
1205
+
1206
+ if getv(from_object, ['page_size']) is not None:
1207
+ setv(
1208
+ parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
1209
+ )
1210
+
1211
+ if getv(from_object, ['page_token']) is not None:
1212
+ setv(
1213
+ parent_object,
1214
+ ['_query', 'pageToken'],
1215
+ getv(from_object, ['page_token']),
1216
+ )
1217
+
1218
+ if getv(from_object, ['filter']) is not None:
1219
+ raise ValueError('filter parameter is not supported in Gemini API.')
1220
+
1221
+ return to_object
1222
+
1223
+
1224
+ def _ListBatchJobsConfig_to_vertex(
1225
+ from_object: Union[dict[str, Any], object],
1226
+ parent_object: Optional[dict[str, Any]] = None,
1227
+ ) -> dict[str, Any]:
1228
+ to_object: dict[str, Any] = {}
1229
+
1230
+ if getv(from_object, ['page_size']) is not None:
1231
+ setv(
1232
+ parent_object, ['_query', 'pageSize'], getv(from_object, ['page_size'])
1233
+ )
1234
+
1235
+ if getv(from_object, ['page_token']) is not None:
1236
+ setv(
1237
+ parent_object,
1238
+ ['_query', 'pageToken'],
1239
+ getv(from_object, ['page_token']),
1240
+ )
1241
+
1242
+ if getv(from_object, ['filter']) is not None:
1243
+ setv(parent_object, ['_query', 'filter'], getv(from_object, ['filter']))
1244
+
457
1245
  return to_object
458
1246
 
459
1247
 
460
- def _BatchJobDestination_from_mldev(
461
- api_client: BaseApiClient,
462
- from_object: Union[dict, object],
463
- parent_object: Optional[dict] = None,
464
- ) -> dict:
1248
+ def _ListBatchJobsParameters_to_mldev(
1249
+ from_object: Union[dict[str, Any], object],
1250
+ parent_object: Optional[dict[str, Any]] = None,
1251
+ ) -> dict[str, Any]:
465
1252
  to_object: dict[str, Any] = {}
1253
+ if getv(from_object, ['config']) is not None:
1254
+ _ListBatchJobsConfig_to_mldev(getv(from_object, ['config']), to_object)
466
1255
 
467
1256
  return to_object
468
1257
 
469
1258
 
470
- def _BatchJobDestination_from_vertex(
471
- api_client: BaseApiClient,
472
- from_object: Union[dict, object],
473
- parent_object: Optional[dict] = None,
474
- ) -> dict:
1259
+ def _ListBatchJobsParameters_to_vertex(
1260
+ from_object: Union[dict[str, Any], object],
1261
+ parent_object: Optional[dict[str, Any]] = None,
1262
+ ) -> dict[str, Any]:
475
1263
  to_object: dict[str, Any] = {}
476
- if getv(from_object, ['predictionsFormat']) is not None:
477
- setv(to_object, ['format'], getv(from_object, ['predictionsFormat']))
1264
+ if getv(from_object, ['config']) is not None:
1265
+ _ListBatchJobsConfig_to_vertex(getv(from_object, ['config']), to_object)
478
1266
 
479
- if getv(from_object, ['gcsDestination', 'outputUriPrefix']) is not None:
1267
+ return to_object
1268
+
1269
+
1270
+ def _ListBatchJobsResponse_from_mldev(
1271
+ from_object: Union[dict[str, Any], object],
1272
+ parent_object: Optional[dict[str, Any]] = None,
1273
+ ) -> dict[str, Any]:
1274
+ to_object: dict[str, Any] = {}
1275
+ if getv(from_object, ['sdkHttpResponse']) is not None:
480
1276
  setv(
481
- to_object,
482
- ['gcs_uri'],
483
- getv(from_object, ['gcsDestination', 'outputUriPrefix']),
1277
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
484
1278
  )
485
1279
 
486
- if getv(from_object, ['bigqueryDestination', 'outputUri']) is not None:
1280
+ if getv(from_object, ['nextPageToken']) is not None:
1281
+ setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
1282
+
1283
+ if getv(from_object, ['operations']) is not None:
487
1284
  setv(
488
1285
  to_object,
489
- ['bigquery_uri'],
490
- getv(from_object, ['bigqueryDestination', 'outputUri']),
1286
+ ['batch_jobs'],
1287
+ [
1288
+ _BatchJob_from_mldev(item, to_object)
1289
+ for item in getv(from_object, ['operations'])
1290
+ ],
491
1291
  )
492
1292
 
493
1293
  return to_object
494
1294
 
495
1295
 
496
- def _BatchJob_from_mldev(
497
- api_client: BaseApiClient,
498
- from_object: Union[dict, object],
499
- parent_object: Optional[dict] = None,
500
- ) -> dict:
1296
+ def _ListBatchJobsResponse_from_vertex(
1297
+ from_object: Union[dict[str, Any], object],
1298
+ parent_object: Optional[dict[str, Any]] = None,
1299
+ ) -> dict[str, Any]:
501
1300
  to_object: dict[str, Any] = {}
1301
+ if getv(from_object, ['sdkHttpResponse']) is not None:
1302
+ setv(
1303
+ to_object, ['sdk_http_response'], getv(from_object, ['sdkHttpResponse'])
1304
+ )
502
1305
 
503
- return to_object
1306
+ if getv(from_object, ['nextPageToken']) is not None:
1307
+ setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
504
1308
 
1309
+ if getv(from_object, ['batchPredictionJobs']) is not None:
1310
+ setv(
1311
+ to_object,
1312
+ ['batch_jobs'],
1313
+ [
1314
+ _BatchJob_from_vertex(item, to_object)
1315
+ for item in getv(from_object, ['batchPredictionJobs'])
1316
+ ],
1317
+ )
505
1318
 
506
- def _BatchJob_from_vertex(
507
- api_client: BaseApiClient,
508
- from_object: Union[dict, object],
509
- parent_object: Optional[dict] = None,
510
- ) -> dict:
511
- to_object: dict[str, Any] = {}
512
- if getv(from_object, ['name']) is not None:
513
- setv(to_object, ['name'], getv(from_object, ['name']))
1319
+ return to_object
514
1320
 
515
- if getv(from_object, ['displayName']) is not None:
516
- setv(to_object, ['display_name'], getv(from_object, ['displayName']))
517
1321
 
518
- if getv(from_object, ['state']) is not None:
519
- setv(to_object, ['state'], getv(from_object, ['state']))
1322
+ def _Part_to_mldev(
1323
+ from_object: Union[dict[str, Any], object],
1324
+ parent_object: Optional[dict[str, Any]] = None,
1325
+ ) -> dict[str, Any]:
1326
+ to_object: dict[str, Any] = {}
1327
+ if getv(from_object, ['media_resolution']) is not None:
1328
+ setv(
1329
+ to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
1330
+ )
520
1331
 
521
- if getv(from_object, ['error']) is not None:
1332
+ if getv(from_object, ['code_execution_result']) is not None:
522
1333
  setv(
523
1334
  to_object,
524
- ['error'],
525
- _JobError_from_vertex(
526
- api_client, getv(from_object, ['error']), to_object
527
- ),
1335
+ ['codeExecutionResult'],
1336
+ getv(from_object, ['code_execution_result']),
528
1337
  )
529
1338
 
530
- if getv(from_object, ['createTime']) is not None:
531
- setv(to_object, ['create_time'], getv(from_object, ['createTime']))
532
-
533
- if getv(from_object, ['startTime']) is not None:
534
- setv(to_object, ['start_time'], getv(from_object, ['startTime']))
1339
+ if getv(from_object, ['executable_code']) is not None:
1340
+ setv(to_object, ['executableCode'], getv(from_object, ['executable_code']))
535
1341
 
536
- if getv(from_object, ['endTime']) is not None:
537
- setv(to_object, ['end_time'], getv(from_object, ['endTime']))
1342
+ if getv(from_object, ['file_data']) is not None:
1343
+ setv(
1344
+ to_object,
1345
+ ['fileData'],
1346
+ _FileData_to_mldev(getv(from_object, ['file_data']), to_object),
1347
+ )
538
1348
 
539
- if getv(from_object, ['updateTime']) is not None:
540
- setv(to_object, ['update_time'], getv(from_object, ['updateTime']))
1349
+ if getv(from_object, ['function_call']) is not None:
1350
+ setv(
1351
+ to_object,
1352
+ ['functionCall'],
1353
+ _FunctionCall_to_mldev(getv(from_object, ['function_call']), to_object),
1354
+ )
541
1355
 
542
- if getv(from_object, ['model']) is not None:
543
- setv(to_object, ['model'], getv(from_object, ['model']))
1356
+ if getv(from_object, ['function_response']) is not None:
1357
+ setv(
1358
+ to_object,
1359
+ ['functionResponse'],
1360
+ getv(from_object, ['function_response']),
1361
+ )
544
1362
 
545
- if getv(from_object, ['inputConfig']) is not None:
1363
+ if getv(from_object, ['inline_data']) is not None:
546
1364
  setv(
547
1365
  to_object,
548
- ['src'],
549
- _BatchJobSource_from_vertex(
550
- api_client, getv(from_object, ['inputConfig']), to_object
551
- ),
1366
+ ['inlineData'],
1367
+ _Blob_to_mldev(getv(from_object, ['inline_data']), to_object),
552
1368
  )
553
1369
 
554
- if getv(from_object, ['outputConfig']) is not None:
1370
+ if getv(from_object, ['text']) is not None:
1371
+ setv(to_object, ['text'], getv(from_object, ['text']))
1372
+
1373
+ if getv(from_object, ['thought']) is not None:
1374
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
1375
+
1376
+ if getv(from_object, ['thought_signature']) is not None:
555
1377
  setv(
556
1378
  to_object,
557
- ['dest'],
558
- _BatchJobDestination_from_vertex(
559
- api_client, getv(from_object, ['outputConfig']), to_object
560
- ),
1379
+ ['thoughtSignature'],
1380
+ getv(from_object, ['thought_signature']),
561
1381
  )
562
1382
 
1383
+ if getv(from_object, ['video_metadata']) is not None:
1384
+ setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata']))
1385
+
563
1386
  return to_object
564
1387
 
565
1388
 
566
- def _ListBatchJobsResponse_from_mldev(
567
- api_client: BaseApiClient,
568
- from_object: Union[dict, object],
569
- parent_object: Optional[dict] = None,
570
- ) -> dict:
1389
+ def _SafetySetting_to_mldev(
1390
+ from_object: Union[dict[str, Any], object],
1391
+ parent_object: Optional[dict[str, Any]] = None,
1392
+ ) -> dict[str, Any]:
571
1393
  to_object: dict[str, Any] = {}
572
- if getv(from_object, ['nextPageToken']) is not None:
573
- setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
1394
+ if getv(from_object, ['category']) is not None:
1395
+ setv(to_object, ['category'], getv(from_object, ['category']))
1396
+
1397
+ if getv(from_object, ['method']) is not None:
1398
+ raise ValueError('method parameter is not supported in Gemini API.')
1399
+
1400
+ if getv(from_object, ['threshold']) is not None:
1401
+ setv(to_object, ['threshold'], getv(from_object, ['threshold']))
574
1402
 
575
1403
  return to_object
576
1404
 
577
1405
 
578
- def _ListBatchJobsResponse_from_vertex(
579
- api_client: BaseApiClient,
580
- from_object: Union[dict, object],
581
- parent_object: Optional[dict] = None,
582
- ) -> dict:
1406
+ def _ToolConfig_to_mldev(
1407
+ from_object: Union[dict[str, Any], object],
1408
+ parent_object: Optional[dict[str, Any]] = None,
1409
+ ) -> dict[str, Any]:
583
1410
  to_object: dict[str, Any] = {}
584
- if getv(from_object, ['nextPageToken']) is not None:
585
- setv(to_object, ['next_page_token'], getv(from_object, ['nextPageToken']))
586
-
587
- if getv(from_object, ['batchPredictionJobs']) is not None:
1411
+ if getv(from_object, ['function_calling_config']) is not None:
588
1412
  setv(
589
1413
  to_object,
590
- ['batch_jobs'],
591
- [
592
- _BatchJob_from_vertex(api_client, item, to_object)
593
- for item in getv(from_object, ['batchPredictionJobs'])
594
- ],
1414
+ ['functionCallingConfig'],
1415
+ _FunctionCallingConfig_to_mldev(
1416
+ getv(from_object, ['function_calling_config']), to_object
1417
+ ),
1418
+ )
1419
+
1420
+ if getv(from_object, ['retrieval_config']) is not None:
1421
+ setv(
1422
+ to_object, ['retrievalConfig'], getv(from_object, ['retrieval_config'])
595
1423
  )
596
1424
 
597
1425
  return to_object
598
1426
 
599
1427
 
600
- def _DeleteResourceJob_from_mldev(
601
- api_client: BaseApiClient,
602
- from_object: Union[dict, object],
603
- parent_object: Optional[dict] = None,
604
- ) -> dict:
1428
+ def _Tool_to_mldev(
1429
+ from_object: Union[dict[str, Any], object],
1430
+ parent_object: Optional[dict[str, Any]] = None,
1431
+ ) -> dict[str, Any]:
605
1432
  to_object: dict[str, Any] = {}
1433
+ if getv(from_object, ['function_declarations']) is not None:
1434
+ setv(
1435
+ to_object,
1436
+ ['functionDeclarations'],
1437
+ [item for item in getv(from_object, ['function_declarations'])],
1438
+ )
606
1439
 
607
- return to_object
1440
+ if getv(from_object, ['retrieval']) is not None:
1441
+ raise ValueError('retrieval parameter is not supported in Gemini API.')
608
1442
 
1443
+ if getv(from_object, ['google_search_retrieval']) is not None:
1444
+ setv(
1445
+ to_object,
1446
+ ['googleSearchRetrieval'],
1447
+ getv(from_object, ['google_search_retrieval']),
1448
+ )
609
1449
 
610
- def _DeleteResourceJob_from_vertex(
611
- api_client: BaseApiClient,
612
- from_object: Union[dict, object],
613
- parent_object: Optional[dict] = None,
614
- ) -> dict:
615
- to_object: dict[str, Any] = {}
616
- if getv(from_object, ['name']) is not None:
617
- setv(to_object, ['name'], getv(from_object, ['name']))
1450
+ if getv(from_object, ['computer_use']) is not None:
1451
+ setv(to_object, ['computerUse'], getv(from_object, ['computer_use']))
618
1452
 
619
- if getv(from_object, ['done']) is not None:
620
- setv(to_object, ['done'], getv(from_object, ['done']))
1453
+ if getv(from_object, ['file_search']) is not None:
1454
+ setv(to_object, ['fileSearch'], getv(from_object, ['file_search']))
621
1455
 
622
- if getv(from_object, ['error']) is not None:
1456
+ if getv(from_object, ['code_execution']) is not None:
1457
+ setv(to_object, ['codeExecution'], getv(from_object, ['code_execution']))
1458
+
1459
+ if getv(from_object, ['enterprise_web_search']) is not None:
1460
+ raise ValueError(
1461
+ 'enterprise_web_search parameter is not supported in Gemini API.'
1462
+ )
1463
+
1464
+ if getv(from_object, ['google_maps']) is not None:
623
1465
  setv(
624
1466
  to_object,
625
- ['error'],
626
- _JobError_from_vertex(
627
- api_client, getv(from_object, ['error']), to_object
628
- ),
1467
+ ['googleMaps'],
1468
+ _GoogleMaps_to_mldev(getv(from_object, ['google_maps']), to_object),
1469
+ )
1470
+
1471
+ if getv(from_object, ['google_search']) is not None:
1472
+ setv(
1473
+ to_object,
1474
+ ['googleSearch'],
1475
+ _GoogleSearch_to_mldev(getv(from_object, ['google_search']), to_object),
629
1476
  )
630
1477
 
1478
+ if getv(from_object, ['url_context']) is not None:
1479
+ setv(to_object, ['urlContext'], getv(from_object, ['url_context']))
1480
+
631
1481
  return to_object
632
1482
 
633
1483
 
@@ -636,8 +1486,8 @@ class Batches(_api_module.BaseModule):
636
1486
  def _create(
637
1487
  self,
638
1488
  *,
639
- model: str,
640
- src: str,
1489
+ model: Optional[str] = None,
1490
+ src: Union[types.BatchJobSourceUnion, types.BatchJobSourceUnionDict],
641
1491
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
642
1492
  ) -> types.BatchJob:
643
1493
  parameter_model = types._CreateBatchJobParameters(
@@ -647,9 +1497,8 @@ class Batches(_api_module.BaseModule):
647
1497
  )
648
1498
 
649
1499
  request_url_dict: Optional[dict[str, str]]
650
- if not self._api_client.vertexai:
651
- raise ValueError('This method is only supported in the Vertex AI client.')
652
- else:
1500
+
1501
+ if self._api_client.vertexai:
653
1502
  request_dict = _CreateBatchJobParameters_to_vertex(
654
1503
  self._api_client, parameter_model
655
1504
  )
@@ -658,34 +1507,107 @@ class Batches(_api_module.BaseModule):
658
1507
  path = 'batchPredictionJobs'.format_map(request_url_dict)
659
1508
  else:
660
1509
  path = 'batchPredictionJobs'
661
-
1510
+ else:
1511
+ request_dict = _CreateBatchJobParameters_to_mldev(
1512
+ self._api_client, parameter_model
1513
+ )
1514
+ request_url_dict = request_dict.get('_url')
1515
+ if request_url_dict:
1516
+ path = '{model}:batchGenerateContent'.format_map(request_url_dict)
1517
+ else:
1518
+ path = '{model}:batchGenerateContent'
662
1519
  query_params = request_dict.get('_query')
663
1520
  if query_params:
664
1521
  path = f'{path}?{urlencode(query_params)}'
665
1522
  # TODO: remove the hack that pops config.
666
1523
  request_dict.pop('config', None)
667
1524
 
668
- http_options: Optional[types.HttpOptionsOrDict] = None
669
- if isinstance(config, dict):
670
- http_options = config.get('http_options', None)
671
- elif hasattr(config, 'http_options') and config is not None:
672
- http_options = config.http_options
1525
+ http_options: Optional[types.HttpOptions] = None
1526
+ if (
1527
+ parameter_model.config is not None
1528
+ and parameter_model.config.http_options is not None
1529
+ ):
1530
+ http_options = parameter_model.config.http_options
673
1531
 
674
1532
  request_dict = _common.convert_to_dict(request_dict)
675
1533
  request_dict = _common.encode_unserializable_types(request_dict)
676
1534
 
677
- response_dict = self._api_client.request(
1535
+ response = self._api_client.request(
678
1536
  'post', path, request_dict, http_options
679
1537
  )
680
1538
 
1539
+ response_dict = {} if not response.body else json.loads(response.body)
1540
+
1541
+ if self._api_client.vertexai:
1542
+ response_dict = _BatchJob_from_vertex(response_dict)
1543
+
1544
+ if not self._api_client.vertexai:
1545
+ response_dict = _BatchJob_from_mldev(response_dict)
1546
+
1547
+ return_value = types.BatchJob._from_response(
1548
+ response=response_dict, kwargs=parameter_model.model_dump()
1549
+ )
1550
+
1551
+ self._api_client._verify_response(return_value)
1552
+ return return_value
1553
+
1554
+ def _create_embeddings(
1555
+ self,
1556
+ *,
1557
+ model: Optional[str] = None,
1558
+ src: types.EmbeddingsBatchJobSourceOrDict,
1559
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
1560
+ ) -> types.BatchJob:
1561
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
1562
+ model=model,
1563
+ src=src,
1564
+ config=config,
1565
+ )
1566
+
1567
+ request_url_dict: Optional[dict[str, str]]
681
1568
  if self._api_client.vertexai:
682
- response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
1569
+ raise ValueError(
1570
+ 'This method is only supported in the Gemini Developer client.'
1571
+ )
683
1572
  else:
684
- response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
1573
+ request_dict = _CreateEmbeddingsBatchJobParameters_to_mldev(
1574
+ self._api_client, parameter_model
1575
+ )
1576
+ request_url_dict = request_dict.get('_url')
1577
+ if request_url_dict:
1578
+ path = '{model}:asyncBatchEmbedContent'.format_map(request_url_dict)
1579
+ else:
1580
+ path = '{model}:asyncBatchEmbedContent'
1581
+
1582
+ query_params = request_dict.get('_query')
1583
+ if query_params:
1584
+ path = f'{path}?{urlencode(query_params)}'
1585
+ # TODO: remove the hack that pops config.
1586
+ request_dict.pop('config', None)
1587
+
1588
+ http_options: Optional[types.HttpOptions] = None
1589
+ if (
1590
+ parameter_model.config is not None
1591
+ and parameter_model.config.http_options is not None
1592
+ ):
1593
+ http_options = parameter_model.config.http_options
1594
+
1595
+ request_dict = _common.convert_to_dict(request_dict)
1596
+ request_dict = _common.encode_unserializable_types(request_dict)
1597
+
1598
+ response = self._api_client.request(
1599
+ 'post', path, request_dict, http_options
1600
+ )
1601
+
1602
+ response_dict = {} if not response.body else json.loads(response.body)
1603
+
1604
+ if not self._api_client.vertexai:
1605
+ response_dict = _BatchJob_from_mldev(response_dict)
685
1606
 
686
1607
  return_value = types.BatchJob._from_response(
687
1608
  response=response_dict, kwargs=parameter_model.model_dump()
688
1609
  )
1610
+
689
1611
  self._api_client._verify_response(return_value)
690
1612
  return return_value
691
1613
 
@@ -697,7 +1619,8 @@ class Batches(_api_module.BaseModule):
697
1619
  Args:
698
1620
  name (str): A fully-qualified BatchJob resource name or ID.
699
1621
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
700
- when project and location are initialized in the client.
1622
+ when project and location are initialized in the Vertex AI client. Or
1623
+ "batches/abc" using the Gemini Developer AI client.
701
1624
 
702
1625
  Returns:
703
1626
  A BatchJob object that contains details about the batch job.
@@ -716,9 +1639,8 @@ class Batches(_api_module.BaseModule):
716
1639
  )
717
1640
 
718
1641
  request_url_dict: Optional[dict[str, str]]
719
- if not self._api_client.vertexai:
720
- raise ValueError('This method is only supported in the Vertex AI client.')
721
- else:
1642
+
1643
+ if self._api_client.vertexai:
722
1644
  request_dict = _GetBatchJobParameters_to_vertex(
723
1645
  self._api_client, parameter_model
724
1646
  )
@@ -727,34 +1649,45 @@ class Batches(_api_module.BaseModule):
727
1649
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
728
1650
  else:
729
1651
  path = 'batchPredictionJobs/{name}'
730
-
1652
+ else:
1653
+ request_dict = _GetBatchJobParameters_to_mldev(
1654
+ self._api_client, parameter_model
1655
+ )
1656
+ request_url_dict = request_dict.get('_url')
1657
+ if request_url_dict:
1658
+ path = 'batches/{name}'.format_map(request_url_dict)
1659
+ else:
1660
+ path = 'batches/{name}'
731
1661
  query_params = request_dict.get('_query')
732
1662
  if query_params:
733
1663
  path = f'{path}?{urlencode(query_params)}'
734
1664
  # TODO: remove the hack that pops config.
735
1665
  request_dict.pop('config', None)
736
1666
 
737
- http_options: Optional[types.HttpOptionsOrDict] = None
738
- if isinstance(config, dict):
739
- http_options = config.get('http_options', None)
740
- elif hasattr(config, 'http_options') and config is not None:
741
- http_options = config.http_options
1667
+ http_options: Optional[types.HttpOptions] = None
1668
+ if (
1669
+ parameter_model.config is not None
1670
+ and parameter_model.config.http_options is not None
1671
+ ):
1672
+ http_options = parameter_model.config.http_options
742
1673
 
743
1674
  request_dict = _common.convert_to_dict(request_dict)
744
1675
  request_dict = _common.encode_unserializable_types(request_dict)
745
1676
 
746
- response_dict = self._api_client.request(
747
- 'get', path, request_dict, http_options
748
- )
1677
+ response = self._api_client.request('get', path, request_dict, http_options)
1678
+
1679
+ response_dict = {} if not response.body else json.loads(response.body)
749
1680
 
750
1681
  if self._api_client.vertexai:
751
- response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
752
- else:
753
- response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
1682
+ response_dict = _BatchJob_from_vertex(response_dict)
1683
+
1684
+ if not self._api_client.vertexai:
1685
+ response_dict = _BatchJob_from_mldev(response_dict)
754
1686
 
755
1687
  return_value = types.BatchJob._from_response(
756
1688
  response=response_dict, kwargs=parameter_model.model_dump()
757
1689
  )
1690
+
758
1691
  self._api_client._verify_response(return_value)
759
1692
  return return_value
760
1693
 
@@ -770,8 +1703,9 @@ class Batches(_api_module.BaseModule):
770
1703
 
771
1704
  Args:
772
1705
  name (str): A fully-qualified BatchJob resource name or ID.
773
- Example: "projects/.../locations/.../batchPredictionJobs/123456789" or
774
- "123456789" when project and location are initialized in the client.
1706
+ Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
1707
+ when project and location are initialized in the Vertex AI client. Or
1708
+ "batches/abc" using the Gemini Developer AI client.
775
1709
 
776
1710
  Usage:
777
1711
 
@@ -786,9 +1720,8 @@ class Batches(_api_module.BaseModule):
786
1720
  )
787
1721
 
788
1722
  request_url_dict: Optional[dict[str, str]]
789
- if not self._api_client.vertexai:
790
- raise ValueError('This method is only supported in the Vertex AI client.')
791
- else:
1723
+
1724
+ if self._api_client.vertexai:
792
1725
  request_dict = _CancelBatchJobParameters_to_vertex(
793
1726
  self._api_client, parameter_model
794
1727
  )
@@ -797,77 +1730,90 @@ class Batches(_api_module.BaseModule):
797
1730
  path = 'batchPredictionJobs/{name}:cancel'.format_map(request_url_dict)
798
1731
  else:
799
1732
  path = 'batchPredictionJobs/{name}:cancel'
800
-
1733
+ else:
1734
+ request_dict = _CancelBatchJobParameters_to_mldev(
1735
+ self._api_client, parameter_model
1736
+ )
1737
+ request_url_dict = request_dict.get('_url')
1738
+ if request_url_dict:
1739
+ path = 'batches/{name}:cancel'.format_map(request_url_dict)
1740
+ else:
1741
+ path = 'batches/{name}:cancel'
801
1742
  query_params = request_dict.get('_query')
802
1743
  if query_params:
803
1744
  path = f'{path}?{urlencode(query_params)}'
804
1745
  # TODO: remove the hack that pops config.
805
1746
  request_dict.pop('config', None)
806
1747
 
807
- http_options: Optional[types.HttpOptionsOrDict] = None
808
- if isinstance(config, dict):
809
- http_options = config.get('http_options', None)
810
- elif hasattr(config, 'http_options') and config is not None:
811
- http_options = config.http_options
1748
+ http_options: Optional[types.HttpOptions] = None
1749
+ if (
1750
+ parameter_model.config is not None
1751
+ and parameter_model.config.http_options is not None
1752
+ ):
1753
+ http_options = parameter_model.config.http_options
812
1754
 
813
1755
  request_dict = _common.convert_to_dict(request_dict)
814
1756
  request_dict = _common.encode_unserializable_types(request_dict)
815
1757
 
816
- response_dict = self._api_client.request(
1758
+ response = self._api_client.request(
817
1759
  'post', path, request_dict, http_options
818
1760
  )
819
1761
 
820
1762
  def _list(
821
- self, *, config: types.ListBatchJobsConfigOrDict
1763
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
822
1764
  ) -> types.ListBatchJobsResponse:
823
1765
  parameter_model = types._ListBatchJobsParameters(
824
1766
  config=config,
825
1767
  )
826
1768
 
827
1769
  request_url_dict: Optional[dict[str, str]]
828
- if not self._api_client.vertexai:
829
- raise ValueError('This method is only supported in the Vertex AI client.')
830
- else:
831
- request_dict = _ListBatchJobsParameters_to_vertex(
832
- self._api_client, parameter_model
833
- )
1770
+
1771
+ if self._api_client.vertexai:
1772
+ request_dict = _ListBatchJobsParameters_to_vertex(parameter_model)
834
1773
  request_url_dict = request_dict.get('_url')
835
1774
  if request_url_dict:
836
1775
  path = 'batchPredictionJobs'.format_map(request_url_dict)
837
1776
  else:
838
1777
  path = 'batchPredictionJobs'
839
-
1778
+ else:
1779
+ request_dict = _ListBatchJobsParameters_to_mldev(parameter_model)
1780
+ request_url_dict = request_dict.get('_url')
1781
+ if request_url_dict:
1782
+ path = 'batches'.format_map(request_url_dict)
1783
+ else:
1784
+ path = 'batches'
840
1785
  query_params = request_dict.get('_query')
841
1786
  if query_params:
842
1787
  path = f'{path}?{urlencode(query_params)}'
843
1788
  # TODO: remove the hack that pops config.
844
1789
  request_dict.pop('config', None)
845
1790
 
846
- http_options: Optional[types.HttpOptionsOrDict] = None
847
- if isinstance(config, dict):
848
- http_options = config.get('http_options', None)
849
- elif hasattr(config, 'http_options') and config is not None:
850
- http_options = config.http_options
1791
+ http_options: Optional[types.HttpOptions] = None
1792
+ if (
1793
+ parameter_model.config is not None
1794
+ and parameter_model.config.http_options is not None
1795
+ ):
1796
+ http_options = parameter_model.config.http_options
851
1797
 
852
1798
  request_dict = _common.convert_to_dict(request_dict)
853
1799
  request_dict = _common.encode_unserializable_types(request_dict)
854
1800
 
855
- response_dict = self._api_client.request(
856
- 'get', path, request_dict, http_options
857
- )
1801
+ response = self._api_client.request('get', path, request_dict, http_options)
1802
+
1803
+ response_dict = {} if not response.body else json.loads(response.body)
858
1804
 
859
1805
  if self._api_client.vertexai:
860
- response_dict = _ListBatchJobsResponse_from_vertex(
861
- self._api_client, response_dict
862
- )
863
- else:
864
- response_dict = _ListBatchJobsResponse_from_mldev(
865
- self._api_client, response_dict
866
- )
1806
+ response_dict = _ListBatchJobsResponse_from_vertex(response_dict)
1807
+
1808
+ if not self._api_client.vertexai:
1809
+ response_dict = _ListBatchJobsResponse_from_mldev(response_dict)
867
1810
 
868
1811
  return_value = types.ListBatchJobsResponse._from_response(
869
1812
  response=response_dict, kwargs=parameter_model.model_dump()
870
1813
  )
1814
+ return_value.sdk_http_response = types.HttpResponse(
1815
+ headers=response.headers
1816
+ )
871
1817
  self._api_client._verify_response(return_value)
872
1818
  return return_value
873
1819
 
@@ -900,66 +1846,105 @@ class Batches(_api_module.BaseModule):
900
1846
  )
901
1847
 
902
1848
  request_url_dict: Optional[dict[str, str]]
903
- if not self._api_client.vertexai:
904
- raise ValueError('This method is only supported in the Vertex AI client.')
1849
+
1850
+ if self._api_client.vertexai:
1851
+ request_dict = _DeleteBatchJobParameters_to_vertex(
1852
+ self._api_client, parameter_model
1853
+ )
1854
+ request_url_dict = request_dict.get('_url')
1855
+ if request_url_dict:
1856
+ path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
1857
+ else:
1858
+ path = 'batchPredictionJobs/{name}'
905
1859
  else:
906
- request_dict = _DeleteBatchJobParameters_to_vertex(
1860
+ request_dict = _DeleteBatchJobParameters_to_mldev(
907
1861
  self._api_client, parameter_model
908
1862
  )
909
1863
  request_url_dict = request_dict.get('_url')
910
1864
  if request_url_dict:
911
- path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
1865
+ path = 'batches/{name}'.format_map(request_url_dict)
912
1866
  else:
913
- path = 'batchPredictionJobs/{name}'
914
-
1867
+ path = 'batches/{name}'
915
1868
  query_params = request_dict.get('_query')
916
1869
  if query_params:
917
1870
  path = f'{path}?{urlencode(query_params)}'
918
1871
  # TODO: remove the hack that pops config.
919
1872
  request_dict.pop('config', None)
920
1873
 
921
- http_options: Optional[types.HttpOptionsOrDict] = None
922
- if isinstance(config, dict):
923
- http_options = config.get('http_options', None)
924
- elif hasattr(config, 'http_options') and config is not None:
925
- http_options = config.http_options
1874
+ http_options: Optional[types.HttpOptions] = None
1875
+ if (
1876
+ parameter_model.config is not None
1877
+ and parameter_model.config.http_options is not None
1878
+ ):
1879
+ http_options = parameter_model.config.http_options
926
1880
 
927
1881
  request_dict = _common.convert_to_dict(request_dict)
928
1882
  request_dict = _common.encode_unserializable_types(request_dict)
929
1883
 
930
- response_dict = self._api_client.request(
1884
+ response = self._api_client.request(
931
1885
  'delete', path, request_dict, http_options
932
1886
  )
933
1887
 
1888
+ response_dict = {} if not response.body else json.loads(response.body)
1889
+
934
1890
  if self._api_client.vertexai:
935
- response_dict = _DeleteResourceJob_from_vertex(
936
- self._api_client, response_dict
937
- )
938
- else:
939
- response_dict = _DeleteResourceJob_from_mldev(
940
- self._api_client, response_dict
941
- )
1891
+ response_dict = _DeleteResourceJob_from_vertex(response_dict)
1892
+
1893
+ if not self._api_client.vertexai:
1894
+ response_dict = _DeleteResourceJob_from_mldev(response_dict)
942
1895
 
943
1896
  return_value = types.DeleteResourceJob._from_response(
944
1897
  response=response_dict, kwargs=parameter_model.model_dump()
945
1898
  )
1899
+ return_value.sdk_http_response = types.HttpResponse(
1900
+ headers=response.headers
1901
+ )
946
1902
  self._api_client._verify_response(return_value)
947
1903
  return return_value
948
1904
 
1905
+ def list(
1906
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
1907
+ ) -> Pager[types.BatchJob]:
1908
+ """Lists batch jobs.
1909
+
1910
+ Args:
1911
+ config (ListBatchJobsConfig): Optional configuration for the list request.
1912
+
1913
+ Returns:
1914
+ A Pager object that contains one page of batch jobs. When iterating over
1915
+ the pager, it automatically fetches the next page if there are more.
1916
+
1917
+ Usage:
1918
+
1919
+ .. code-block:: python
1920
+ config = {'page_size': 10}
1921
+ for batch_job in client.batches.list(config):
1922
+ print(batch_job.name)
1923
+ """
1924
+
1925
+ list_request = self._list
1926
+ return Pager(
1927
+ 'batch_jobs',
1928
+ list_request,
1929
+ self._list(config=config),
1930
+ config,
1931
+ )
1932
+
949
1933
  def create(
950
1934
  self,
951
1935
  *,
952
1936
  model: str,
953
- src: str,
1937
+ src: types.BatchJobSourceUnionDict,
954
1938
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
955
1939
  ) -> types.BatchJob:
956
1940
  """Creates a batch job.
957
1941
 
958
1942
  Args:
959
1943
  model (str): The model to use for the batch job.
960
- src (str): The source of the batch job. Currently supports GCS URI(-s) or
961
- BigQuery URI. Example: "gs://path/to/input/data" or
962
- "bq://projectId.bqDatasetId.bqTableId".
1944
+ src: The source of the batch job. Currently Vertex AI supports GCS URI(-s)
1945
+ or BigQuery URI. Example: "gs://path/to/input/data" or
1946
+ "bq://projectId.bqDatasetId.bqTableId". Gemini Developer API supports
1947
+ List of inlined_request, or file name. Example: "files/file_name".
963
1948
  config (CreateBatchJobConfig): Optional configuration for the batch job.
964
1949
 
965
1950
  Returns:
@@ -970,42 +1955,75 @@ class Batches(_api_module.BaseModule):
970
1955
  .. code-block:: python
971
1956
 
972
1957
  batch_job = client.batches.create(
973
- model="gemini-1.5-flash",
1958
+ model="gemini-2.0-flash-001",
974
1959
  src="gs://path/to/input/data",
975
1960
  )
976
1961
  print(batch_job.state)
977
1962
  """
978
- config = _extra_utils.format_destination(src, config)
979
- return self._create(model=model, src=src, config=config)
1963
+ src = t.t_batch_job_source(self._api_client, src)
980
1964
 
981
- def list(
982
- self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
983
- ) -> Pager[types.BatchJob]:
984
- """Lists batch jobs.
1965
+ # Convert all dicts to Pydantic objects.
1966
+ parameter_model = types._CreateBatchJobParameters(
1967
+ model=model,
1968
+ src=src,
1969
+ config=config,
1970
+ )
1971
+
1972
+ if self._api_client.vertexai:
1973
+ config = _extra_utils.format_destination(src, parameter_model.config)
1974
+ return self._create(model=model, src=src, config=config)
1975
+ else:
1976
+ return self._create(model=model, src=src, config=config)
1977
+
1978
+ def create_embeddings(
1979
+ self,
1980
+ *,
1981
+ model: str,
1982
+ src: types.EmbeddingsBatchJobSourceOrDict,
1983
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
1984
+ ) -> types.BatchJob:
1985
+ """**Experimental** Creates an embedding batch job.
985
1986
 
986
1987
  Args:
987
- config (ListBatchJobsConfig): Optional configuration for the list request.
1988
+ model (str): The model to use for the batch job.
1989
+ src: Gemini Developer API supports List of inlined_request, or file name.
1990
+ Example: "files/file_name".
1991
+ config (CreateBatchJobConfig): Optional configuration for the batch job.
988
1992
 
989
1993
  Returns:
990
- A Pager object that contains one page of batch jobs. When iterating over
991
- the pager, it automatically fetches the next page if there are more.
1994
+ A BatchJob object that contains details about the batch job.
992
1995
 
993
1996
  Usage:
994
1997
 
995
1998
  .. code-block:: python
996
1999
 
997
- batch_jobs = client.batches.list(config={"page_size": 10})
998
- for batch_job in batch_jobs:
999
- print(f"Batch job: {batch_job.name}, state {batch_job.state}")
2000
+ batch_job = client.batches.create_embeddings(
2001
+ model="text-embedding-004",
2002
+ src="files/my_embedding_input",
2003
+ )
2004
+ print(batch_job.state)
1000
2005
  """
1001
- if config is None:
1002
- config = types.ListBatchJobsConfig()
1003
- return Pager(
1004
- 'batch_jobs',
1005
- self._list,
1006
- self._list(config=config),
1007
- config,
2006
+ import warnings
2007
+
2008
+ warnings.warn(
2009
+ 'batches.create_embeddings() is experimental and may change without'
2010
+ ' notice.',
2011
+ category=_common.ExperimentalWarning,
2012
+ stacklevel=2, # This is crucial!
1008
2013
  )
2014
+ src = t.t_embedding_batch_job_source(self._api_client, src)
2015
+
2016
+ # Convert all dicts to Pydantic objects.
2017
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
2018
+ model=model,
2019
+ src=src,
2020
+ config=config,
2021
+ )
2022
+
2023
+ if self._api_client.vertexai:
2024
+ raise ValueError('Vertex AI does not support batches.create_embeddings.')
2025
+ else:
2026
+ return self._create_embeddings(model=model, src=src, config=config)
1009
2027
 
1010
2028
 
1011
2029
  class AsyncBatches(_api_module.BaseModule):
@@ -1013,8 +2031,8 @@ class AsyncBatches(_api_module.BaseModule):
1013
2031
  async def _create(
1014
2032
  self,
1015
2033
  *,
1016
- model: str,
1017
- src: str,
2034
+ model: Optional[str] = None,
2035
+ src: Union[types.BatchJobSourceUnion, types.BatchJobSourceUnionDict],
1018
2036
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
1019
2037
  ) -> types.BatchJob:
1020
2038
  parameter_model = types._CreateBatchJobParameters(
@@ -1024,9 +2042,8 @@ class AsyncBatches(_api_module.BaseModule):
1024
2042
  )
1025
2043
 
1026
2044
  request_url_dict: Optional[dict[str, str]]
1027
- if not self._api_client.vertexai:
1028
- raise ValueError('This method is only supported in the Vertex AI client.')
1029
- else:
2045
+
2046
+ if self._api_client.vertexai:
1030
2047
  request_dict = _CreateBatchJobParameters_to_vertex(
1031
2048
  self._api_client, parameter_model
1032
2049
  )
@@ -1035,34 +2052,107 @@ class AsyncBatches(_api_module.BaseModule):
1035
2052
  path = 'batchPredictionJobs'.format_map(request_url_dict)
1036
2053
  else:
1037
2054
  path = 'batchPredictionJobs'
1038
-
2055
+ else:
2056
+ request_dict = _CreateBatchJobParameters_to_mldev(
2057
+ self._api_client, parameter_model
2058
+ )
2059
+ request_url_dict = request_dict.get('_url')
2060
+ if request_url_dict:
2061
+ path = '{model}:batchGenerateContent'.format_map(request_url_dict)
2062
+ else:
2063
+ path = '{model}:batchGenerateContent'
1039
2064
  query_params = request_dict.get('_query')
1040
2065
  if query_params:
1041
2066
  path = f'{path}?{urlencode(query_params)}'
1042
2067
  # TODO: remove the hack that pops config.
1043
2068
  request_dict.pop('config', None)
1044
2069
 
1045
- http_options: Optional[types.HttpOptionsOrDict] = None
1046
- if isinstance(config, dict):
1047
- http_options = config.get('http_options', None)
1048
- elif hasattr(config, 'http_options') and config is not None:
1049
- http_options = config.http_options
2070
+ http_options: Optional[types.HttpOptions] = None
2071
+ if (
2072
+ parameter_model.config is not None
2073
+ and parameter_model.config.http_options is not None
2074
+ ):
2075
+ http_options = parameter_model.config.http_options
1050
2076
 
1051
2077
  request_dict = _common.convert_to_dict(request_dict)
1052
2078
  request_dict = _common.encode_unserializable_types(request_dict)
1053
2079
 
1054
- response_dict = await self._api_client.async_request(
2080
+ response = await self._api_client.async_request(
1055
2081
  'post', path, request_dict, http_options
1056
2082
  )
1057
2083
 
2084
+ response_dict = {} if not response.body else json.loads(response.body)
2085
+
2086
+ if self._api_client.vertexai:
2087
+ response_dict = _BatchJob_from_vertex(response_dict)
2088
+
2089
+ if not self._api_client.vertexai:
2090
+ response_dict = _BatchJob_from_mldev(response_dict)
2091
+
2092
+ return_value = types.BatchJob._from_response(
2093
+ response=response_dict, kwargs=parameter_model.model_dump()
2094
+ )
2095
+
2096
+ self._api_client._verify_response(return_value)
2097
+ return return_value
2098
+
2099
+ async def _create_embeddings(
2100
+ self,
2101
+ *,
2102
+ model: Optional[str] = None,
2103
+ src: types.EmbeddingsBatchJobSourceOrDict,
2104
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
2105
+ ) -> types.BatchJob:
2106
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
2107
+ model=model,
2108
+ src=src,
2109
+ config=config,
2110
+ )
2111
+
2112
+ request_url_dict: Optional[dict[str, str]]
1058
2113
  if self._api_client.vertexai:
1059
- response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
2114
+ raise ValueError(
2115
+ 'This method is only supported in the Gemini Developer client.'
2116
+ )
1060
2117
  else:
1061
- response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
2118
+ request_dict = _CreateEmbeddingsBatchJobParameters_to_mldev(
2119
+ self._api_client, parameter_model
2120
+ )
2121
+ request_url_dict = request_dict.get('_url')
2122
+ if request_url_dict:
2123
+ path = '{model}:asyncBatchEmbedContent'.format_map(request_url_dict)
2124
+ else:
2125
+ path = '{model}:asyncBatchEmbedContent'
2126
+
2127
+ query_params = request_dict.get('_query')
2128
+ if query_params:
2129
+ path = f'{path}?{urlencode(query_params)}'
2130
+ # TODO: remove the hack that pops config.
2131
+ request_dict.pop('config', None)
2132
+
2133
+ http_options: Optional[types.HttpOptions] = None
2134
+ if (
2135
+ parameter_model.config is not None
2136
+ and parameter_model.config.http_options is not None
2137
+ ):
2138
+ http_options = parameter_model.config.http_options
2139
+
2140
+ request_dict = _common.convert_to_dict(request_dict)
2141
+ request_dict = _common.encode_unserializable_types(request_dict)
2142
+
2143
+ response = await self._api_client.async_request(
2144
+ 'post', path, request_dict, http_options
2145
+ )
2146
+
2147
+ response_dict = {} if not response.body else json.loads(response.body)
2148
+
2149
+ if not self._api_client.vertexai:
2150
+ response_dict = _BatchJob_from_mldev(response_dict)
1062
2151
 
1063
2152
  return_value = types.BatchJob._from_response(
1064
2153
  response=response_dict, kwargs=parameter_model.model_dump()
1065
2154
  )
2155
+
1066
2156
  self._api_client._verify_response(return_value)
1067
2157
  return return_value
1068
2158
 
@@ -1074,7 +2164,8 @@ class AsyncBatches(_api_module.BaseModule):
1074
2164
  Args:
1075
2165
  name (str): A fully-qualified BatchJob resource name or ID.
1076
2166
  Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
1077
- when project and location are initialized in the client.
2167
+ when project and location are initialized in the Vertex AI client. Or
2168
+ "batches/abc" using the Gemini Developer AI client.
1078
2169
 
1079
2170
  Returns:
1080
2171
  A BatchJob object that contains details about the batch job.
@@ -1093,9 +2184,8 @@ class AsyncBatches(_api_module.BaseModule):
1093
2184
  )
1094
2185
 
1095
2186
  request_url_dict: Optional[dict[str, str]]
1096
- if not self._api_client.vertexai:
1097
- raise ValueError('This method is only supported in the Vertex AI client.')
1098
- else:
2187
+
2188
+ if self._api_client.vertexai:
1099
2189
  request_dict = _GetBatchJobParameters_to_vertex(
1100
2190
  self._api_client, parameter_model
1101
2191
  )
@@ -1104,34 +2194,47 @@ class AsyncBatches(_api_module.BaseModule):
1104
2194
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
1105
2195
  else:
1106
2196
  path = 'batchPredictionJobs/{name}'
1107
-
2197
+ else:
2198
+ request_dict = _GetBatchJobParameters_to_mldev(
2199
+ self._api_client, parameter_model
2200
+ )
2201
+ request_url_dict = request_dict.get('_url')
2202
+ if request_url_dict:
2203
+ path = 'batches/{name}'.format_map(request_url_dict)
2204
+ else:
2205
+ path = 'batches/{name}'
1108
2206
  query_params = request_dict.get('_query')
1109
2207
  if query_params:
1110
2208
  path = f'{path}?{urlencode(query_params)}'
1111
2209
  # TODO: remove the hack that pops config.
1112
2210
  request_dict.pop('config', None)
1113
2211
 
1114
- http_options: Optional[types.HttpOptionsOrDict] = None
1115
- if isinstance(config, dict):
1116
- http_options = config.get('http_options', None)
1117
- elif hasattr(config, 'http_options') and config is not None:
1118
- http_options = config.http_options
2212
+ http_options: Optional[types.HttpOptions] = None
2213
+ if (
2214
+ parameter_model.config is not None
2215
+ and parameter_model.config.http_options is not None
2216
+ ):
2217
+ http_options = parameter_model.config.http_options
1119
2218
 
1120
2219
  request_dict = _common.convert_to_dict(request_dict)
1121
2220
  request_dict = _common.encode_unserializable_types(request_dict)
1122
2221
 
1123
- response_dict = await self._api_client.async_request(
2222
+ response = await self._api_client.async_request(
1124
2223
  'get', path, request_dict, http_options
1125
2224
  )
1126
2225
 
2226
+ response_dict = {} if not response.body else json.loads(response.body)
2227
+
1127
2228
  if self._api_client.vertexai:
1128
- response_dict = _BatchJob_from_vertex(self._api_client, response_dict)
1129
- else:
1130
- response_dict = _BatchJob_from_mldev(self._api_client, response_dict)
2229
+ response_dict = _BatchJob_from_vertex(response_dict)
2230
+
2231
+ if not self._api_client.vertexai:
2232
+ response_dict = _BatchJob_from_mldev(response_dict)
1131
2233
 
1132
2234
  return_value = types.BatchJob._from_response(
1133
2235
  response=response_dict, kwargs=parameter_model.model_dump()
1134
2236
  )
2237
+
1135
2238
  self._api_client._verify_response(return_value)
1136
2239
  return return_value
1137
2240
 
@@ -1147,8 +2250,9 @@ class AsyncBatches(_api_module.BaseModule):
1147
2250
 
1148
2251
  Args:
1149
2252
  name (str): A fully-qualified BatchJob resource name or ID.
1150
- Example: "projects/.../locations/.../batchPredictionJobs/123456789" or
1151
- "123456789" when project and location are initialized in the client.
2253
+ Example: "projects/.../locations/.../batchPredictionJobs/456" or "456"
2254
+ when project and location are initialized in the Vertex AI client. Or
2255
+ "batches/abc" using the Gemini Developer AI client.
1152
2256
 
1153
2257
  Usage:
1154
2258
 
@@ -1163,9 +2267,8 @@ class AsyncBatches(_api_module.BaseModule):
1163
2267
  )
1164
2268
 
1165
2269
  request_url_dict: Optional[dict[str, str]]
1166
- if not self._api_client.vertexai:
1167
- raise ValueError('This method is only supported in the Vertex AI client.')
1168
- else:
2270
+
2271
+ if self._api_client.vertexai:
1169
2272
  request_dict = _CancelBatchJobParameters_to_vertex(
1170
2273
  self._api_client, parameter_model
1171
2274
  )
@@ -1174,77 +2277,92 @@ class AsyncBatches(_api_module.BaseModule):
1174
2277
  path = 'batchPredictionJobs/{name}:cancel'.format_map(request_url_dict)
1175
2278
  else:
1176
2279
  path = 'batchPredictionJobs/{name}:cancel'
1177
-
2280
+ else:
2281
+ request_dict = _CancelBatchJobParameters_to_mldev(
2282
+ self._api_client, parameter_model
2283
+ )
2284
+ request_url_dict = request_dict.get('_url')
2285
+ if request_url_dict:
2286
+ path = 'batches/{name}:cancel'.format_map(request_url_dict)
2287
+ else:
2288
+ path = 'batches/{name}:cancel'
1178
2289
  query_params = request_dict.get('_query')
1179
2290
  if query_params:
1180
2291
  path = f'{path}?{urlencode(query_params)}'
1181
2292
  # TODO: remove the hack that pops config.
1182
2293
  request_dict.pop('config', None)
1183
2294
 
1184
- http_options: Optional[types.HttpOptionsOrDict] = None
1185
- if isinstance(config, dict):
1186
- http_options = config.get('http_options', None)
1187
- elif hasattr(config, 'http_options') and config is not None:
1188
- http_options = config.http_options
2295
+ http_options: Optional[types.HttpOptions] = None
2296
+ if (
2297
+ parameter_model.config is not None
2298
+ and parameter_model.config.http_options is not None
2299
+ ):
2300
+ http_options = parameter_model.config.http_options
1189
2301
 
1190
2302
  request_dict = _common.convert_to_dict(request_dict)
1191
2303
  request_dict = _common.encode_unserializable_types(request_dict)
1192
2304
 
1193
- response_dict = await self._api_client.async_request(
2305
+ response = await self._api_client.async_request(
1194
2306
  'post', path, request_dict, http_options
1195
2307
  )
1196
2308
 
1197
2309
  async def _list(
1198
- self, *, config: types.ListBatchJobsConfigOrDict
2310
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
1199
2311
  ) -> types.ListBatchJobsResponse:
1200
2312
  parameter_model = types._ListBatchJobsParameters(
1201
2313
  config=config,
1202
2314
  )
1203
2315
 
1204
2316
  request_url_dict: Optional[dict[str, str]]
1205
- if not self._api_client.vertexai:
1206
- raise ValueError('This method is only supported in the Vertex AI client.')
1207
- else:
1208
- request_dict = _ListBatchJobsParameters_to_vertex(
1209
- self._api_client, parameter_model
1210
- )
2317
+
2318
+ if self._api_client.vertexai:
2319
+ request_dict = _ListBatchJobsParameters_to_vertex(parameter_model)
1211
2320
  request_url_dict = request_dict.get('_url')
1212
2321
  if request_url_dict:
1213
2322
  path = 'batchPredictionJobs'.format_map(request_url_dict)
1214
2323
  else:
1215
2324
  path = 'batchPredictionJobs'
1216
-
2325
+ else:
2326
+ request_dict = _ListBatchJobsParameters_to_mldev(parameter_model)
2327
+ request_url_dict = request_dict.get('_url')
2328
+ if request_url_dict:
2329
+ path = 'batches'.format_map(request_url_dict)
2330
+ else:
2331
+ path = 'batches'
1217
2332
  query_params = request_dict.get('_query')
1218
2333
  if query_params:
1219
2334
  path = f'{path}?{urlencode(query_params)}'
1220
2335
  # TODO: remove the hack that pops config.
1221
2336
  request_dict.pop('config', None)
1222
2337
 
1223
- http_options: Optional[types.HttpOptionsOrDict] = None
1224
- if isinstance(config, dict):
1225
- http_options = config.get('http_options', None)
1226
- elif hasattr(config, 'http_options') and config is not None:
1227
- http_options = config.http_options
2338
+ http_options: Optional[types.HttpOptions] = None
2339
+ if (
2340
+ parameter_model.config is not None
2341
+ and parameter_model.config.http_options is not None
2342
+ ):
2343
+ http_options = parameter_model.config.http_options
1228
2344
 
1229
2345
  request_dict = _common.convert_to_dict(request_dict)
1230
2346
  request_dict = _common.encode_unserializable_types(request_dict)
1231
2347
 
1232
- response_dict = await self._api_client.async_request(
2348
+ response = await self._api_client.async_request(
1233
2349
  'get', path, request_dict, http_options
1234
2350
  )
1235
2351
 
2352
+ response_dict = {} if not response.body else json.loads(response.body)
2353
+
1236
2354
  if self._api_client.vertexai:
1237
- response_dict = _ListBatchJobsResponse_from_vertex(
1238
- self._api_client, response_dict
1239
- )
1240
- else:
1241
- response_dict = _ListBatchJobsResponse_from_mldev(
1242
- self._api_client, response_dict
1243
- )
2355
+ response_dict = _ListBatchJobsResponse_from_vertex(response_dict)
2356
+
2357
+ if not self._api_client.vertexai:
2358
+ response_dict = _ListBatchJobsResponse_from_mldev(response_dict)
1244
2359
 
1245
2360
  return_value = types.ListBatchJobsResponse._from_response(
1246
2361
  response=response_dict, kwargs=parameter_model.model_dump()
1247
2362
  )
2363
+ return_value.sdk_http_response = types.HttpResponse(
2364
+ headers=response.headers
2365
+ )
1248
2366
  self._api_client._verify_response(return_value)
1249
2367
  return return_value
1250
2368
 
@@ -1277,9 +2395,8 @@ class AsyncBatches(_api_module.BaseModule):
1277
2395
  )
1278
2396
 
1279
2397
  request_url_dict: Optional[dict[str, str]]
1280
- if not self._api_client.vertexai:
1281
- raise ValueError('This method is only supported in the Vertex AI client.')
1282
- else:
2398
+
2399
+ if self._api_client.vertexai:
1283
2400
  request_dict = _DeleteBatchJobParameters_to_vertex(
1284
2401
  self._api_client, parameter_model
1285
2402
  )
@@ -1288,55 +2405,94 @@ class AsyncBatches(_api_module.BaseModule):
1288
2405
  path = 'batchPredictionJobs/{name}'.format_map(request_url_dict)
1289
2406
  else:
1290
2407
  path = 'batchPredictionJobs/{name}'
1291
-
2408
+ else:
2409
+ request_dict = _DeleteBatchJobParameters_to_mldev(
2410
+ self._api_client, parameter_model
2411
+ )
2412
+ request_url_dict = request_dict.get('_url')
2413
+ if request_url_dict:
2414
+ path = 'batches/{name}'.format_map(request_url_dict)
2415
+ else:
2416
+ path = 'batches/{name}'
1292
2417
  query_params = request_dict.get('_query')
1293
2418
  if query_params:
1294
2419
  path = f'{path}?{urlencode(query_params)}'
1295
2420
  # TODO: remove the hack that pops config.
1296
2421
  request_dict.pop('config', None)
1297
2422
 
1298
- http_options: Optional[types.HttpOptionsOrDict] = None
1299
- if isinstance(config, dict):
1300
- http_options = config.get('http_options', None)
1301
- elif hasattr(config, 'http_options') and config is not None:
1302
- http_options = config.http_options
2423
+ http_options: Optional[types.HttpOptions] = None
2424
+ if (
2425
+ parameter_model.config is not None
2426
+ and parameter_model.config.http_options is not None
2427
+ ):
2428
+ http_options = parameter_model.config.http_options
1303
2429
 
1304
2430
  request_dict = _common.convert_to_dict(request_dict)
1305
2431
  request_dict = _common.encode_unserializable_types(request_dict)
1306
2432
 
1307
- response_dict = await self._api_client.async_request(
2433
+ response = await self._api_client.async_request(
1308
2434
  'delete', path, request_dict, http_options
1309
2435
  )
1310
2436
 
2437
+ response_dict = {} if not response.body else json.loads(response.body)
2438
+
1311
2439
  if self._api_client.vertexai:
1312
- response_dict = _DeleteResourceJob_from_vertex(
1313
- self._api_client, response_dict
1314
- )
1315
- else:
1316
- response_dict = _DeleteResourceJob_from_mldev(
1317
- self._api_client, response_dict
1318
- )
2440
+ response_dict = _DeleteResourceJob_from_vertex(response_dict)
2441
+
2442
+ if not self._api_client.vertexai:
2443
+ response_dict = _DeleteResourceJob_from_mldev(response_dict)
1319
2444
 
1320
2445
  return_value = types.DeleteResourceJob._from_response(
1321
2446
  response=response_dict, kwargs=parameter_model.model_dump()
1322
2447
  )
2448
+ return_value.sdk_http_response = types.HttpResponse(
2449
+ headers=response.headers
2450
+ )
1323
2451
  self._api_client._verify_response(return_value)
1324
2452
  return return_value
1325
2453
 
2454
+ async def list(
2455
+ self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
2456
+ ) -> AsyncPager[types.BatchJob]:
2457
+ """Lists batch jobs asynchronously.
2458
+
2459
+ Args:
2460
+ config (ListBatchJobsConfig): Optional configuration for the list request.
2461
+
2462
+ Returns:
2463
+ A Pager object that contains one page of batch jobs. When iterating over
2464
+ the pager, it automatically fetches the next page if there are more.
2465
+
2466
+ Usage:
2467
+
2468
+ .. code-block:: python
2469
+ async for batch_job in await client.aio.batches.list():
2470
+ print(batch_job.name)
2471
+ """
2472
+
2473
+ list_request = self._list
2474
+ return AsyncPager(
2475
+ 'batch_jobs',
2476
+ list_request,
2477
+ await self._list(config=config),
2478
+ config,
2479
+ )
2480
+
1326
2481
  async def create(
1327
2482
  self,
1328
2483
  *,
1329
2484
  model: str,
1330
- src: str,
2485
+ src: types.BatchJobSourceUnionDict,
1331
2486
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
1332
2487
  ) -> types.BatchJob:
1333
2488
  """Creates a batch job asynchronously.
1334
2489
 
1335
2490
  Args:
1336
2491
  model (str): The model to use for the batch job.
1337
- src (str): The source of the batch job. Currently supports GCS URI(-s) or
1338
- BigQuery URI. Example: "gs://path/to/input/data" or
1339
- "bq://projectId.bqDatasetId.bqTableId".
2492
+ src: The source of the batch job. Currently Vertex AI supports GCS URI(-s)
2493
+ or BigQuery URI. Example: "gs://path/to/input/data" or
2494
+ "bq://projectId.bqDatasetId.bqTableId". Gemini Develop API supports List
2495
+ of inlined_request, or file name. Example: "files/file_name".
1340
2496
  config (CreateBatchJobConfig): Optional configuration for the batch job.
1341
2497
 
1342
2498
  Returns:
@@ -1347,39 +2503,78 @@ class AsyncBatches(_api_module.BaseModule):
1347
2503
  .. code-block:: python
1348
2504
 
1349
2505
  batch_job = await client.aio.batches.create(
1350
- model="gemini-1.5-flash",
2506
+ model="gemini-2.0-flash-001",
1351
2507
  src="gs://path/to/input/data",
1352
2508
  )
1353
2509
  """
1354
- config = _extra_utils.format_destination(src, config)
1355
- return await self._create(model=model, src=src, config=config)
2510
+ src = t.t_batch_job_source(self._api_client, src)
1356
2511
 
1357
- async def list(
1358
- self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
1359
- ) -> AsyncPager[types.BatchJob]:
1360
- """Lists batch jobs asynchronously.
2512
+ # Convert all dicts to Pydantic objects.
2513
+ parameter_model = types._CreateBatchJobParameters(
2514
+ model=model,
2515
+ src=src,
2516
+ config=config,
2517
+ )
2518
+
2519
+ if self._api_client.vertexai:
2520
+ config = _extra_utils.format_destination(src, parameter_model.config)
2521
+ return await self._create(model=model, src=src, config=config)
2522
+ else:
2523
+ return await self._create(model=model, src=src, config=config)
2524
+
2525
+ async def create_embeddings(
2526
+ self,
2527
+ *,
2528
+ model: str,
2529
+ src: types.EmbeddingsBatchJobSourceOrDict,
2530
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
2531
+ ) -> types.BatchJob:
2532
+ """**Experimental** Creates an asynchronously embedding batch job.
1361
2533
 
1362
2534
  Args:
1363
- config (ListBatchJobsConfig): Optional configuration for the list request.
2535
+ model (str): The model to use for the batch job.
2536
+ src: Gemini Developer API supports inlined_requests, or file name.
2537
+ Example: "files/file_name".
2538
+ config (CreateBatchJobConfig): Optional configuration for the batch job.
1364
2539
 
1365
2540
  Returns:
1366
- A Pager object that contains one page of batch jobs. When iterating over
1367
- the pager, it automatically fetches the next page if there are more.
2541
+ A BatchJob object that contains details about the batch job.
1368
2542
 
1369
2543
  Usage:
1370
2544
 
1371
2545
  .. code-block:: python
1372
2546
 
1373
- batch_jobs = await client.aio.batches.list(config={'page_size': 5})
1374
- print(f"current page: {batch_jobs.page}")
1375
- await batch_jobs_pager.next_page()
1376
- print(f"next page: {batch_jobs_pager.page}")
2547
+ batch_job = await client.aio.batches.create_embeddings(
2548
+ model="text-embedding-004",
2549
+ src="files/my_embedding_input",
2550
+ )
2551
+ print(batch_job.state)
1377
2552
  """
1378
- if config is None:
1379
- config = types.ListBatchJobsConfig()
1380
- return AsyncPager(
1381
- 'batch_jobs',
1382
- self._list,
1383
- await self._list(config=config),
1384
- config,
2553
+ import warnings
2554
+
2555
+ warnings.warn(
2556
+ 'batches.create_embeddings() is experimental and may change without'
2557
+ ' notice.',
2558
+ category=_common.ExperimentalWarning,
2559
+ stacklevel=2, # This is crucial!
2560
+ )
2561
+ src = t.t_embedding_batch_job_source(self._api_client, src)
2562
+
2563
+ # Convert all dicts to Pydantic objects.
2564
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
2565
+ model=model,
2566
+ src=src,
2567
+ config=config,
1385
2568
  )
2569
+
2570
+ http_options: Optional[types.HttpOptions] = None
2571
+ if (
2572
+ parameter_model.config is not None
2573
+ and parameter_model.config.http_options is not None
2574
+ ):
2575
+ http_options = parameter_model.config.http_options
2576
+
2577
+ if self._api_client.vertexai:
2578
+ raise ValueError('Vertex AI does not support batches.create_embeddings.')
2579
+ else:
2580
+ return await self._create_embeddings(model=model, src=src, config=config)