google-genai 1.33.0__py3-none-any.whl → 1.34.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/batches.py CHANGED
@@ -85,6 +85,23 @@ def _FileData_to_mldev(
85
85
  return to_object
86
86
 
87
87
 
88
+ def _FunctionCall_to_mldev(
89
+ from_object: Union[dict[str, Any], object],
90
+ parent_object: Optional[dict[str, Any]] = None,
91
+ ) -> dict[str, Any]:
92
+ to_object: dict[str, Any] = {}
93
+ if getv(from_object, ['id']) is not None:
94
+ setv(to_object, ['id'], getv(from_object, ['id']))
95
+
96
+ if getv(from_object, ['args']) is not None:
97
+ setv(to_object, ['args'], getv(from_object, ['args']))
98
+
99
+ if getv(from_object, ['name']) is not None:
100
+ setv(to_object, ['name'], getv(from_object, ['name']))
101
+
102
+ return to_object
103
+
104
+
88
105
  def _Part_to_mldev(
89
106
  from_object: Union[dict[str, Any], object],
90
107
  parent_object: Optional[dict[str, Any]] = None,
@@ -123,6 +140,13 @@ def _Part_to_mldev(
123
140
  getv(from_object, ['thought_signature']),
124
141
  )
125
142
 
143
+ if getv(from_object, ['function_call']) is not None:
144
+ setv(
145
+ to_object,
146
+ ['functionCall'],
147
+ _FunctionCall_to_mldev(getv(from_object, ['function_call']), to_object),
148
+ )
149
+
126
150
  if getv(from_object, ['code_execution_result']) is not None:
127
151
  setv(
128
152
  to_object,
@@ -133,9 +157,6 @@ def _Part_to_mldev(
133
157
  if getv(from_object, ['executable_code']) is not None:
134
158
  setv(to_object, ['executableCode'], getv(from_object, ['executable_code']))
135
159
 
136
- if getv(from_object, ['function_call']) is not None:
137
- setv(to_object, ['functionCall'], getv(from_object, ['function_call']))
138
-
139
160
  if getv(from_object, ['function_response']) is not None:
140
161
  setv(
141
162
  to_object,
@@ -940,6 +961,132 @@ def _CreateBatchJobParameters_to_mldev(
940
961
  return to_object
941
962
 
942
963
 
964
+ def _EmbedContentConfig_to_mldev(
965
+ from_object: Union[dict[str, Any], object],
966
+ parent_object: Optional[dict[str, Any]] = None,
967
+ ) -> dict[str, Any]:
968
+ to_object: dict[str, Any] = {}
969
+
970
+ if getv(from_object, ['task_type']) is not None:
971
+ setv(
972
+ parent_object,
973
+ ['requests[]', 'taskType'],
974
+ getv(from_object, ['task_type']),
975
+ )
976
+
977
+ if getv(from_object, ['title']) is not None:
978
+ setv(parent_object, ['requests[]', 'title'], getv(from_object, ['title']))
979
+
980
+ if getv(from_object, ['output_dimensionality']) is not None:
981
+ setv(
982
+ parent_object,
983
+ ['requests[]', 'outputDimensionality'],
984
+ getv(from_object, ['output_dimensionality']),
985
+ )
986
+
987
+ if getv(from_object, ['mime_type']) is not None:
988
+ raise ValueError('mime_type parameter is not supported in Gemini API.')
989
+
990
+ if getv(from_object, ['auto_truncate']) is not None:
991
+ raise ValueError('auto_truncate parameter is not supported in Gemini API.')
992
+
993
+ return to_object
994
+
995
+
996
+ def _EmbedContentBatch_to_mldev(
997
+ api_client: BaseApiClient,
998
+ from_object: Union[dict[str, Any], object],
999
+ parent_object: Optional[dict[str, Any]] = None,
1000
+ ) -> dict[str, Any]:
1001
+ to_object: dict[str, Any] = {}
1002
+ if getv(from_object, ['contents']) is not None:
1003
+ setv(
1004
+ to_object,
1005
+ ['requests[]', 'request', 'content'],
1006
+ t.t_contents_for_embed(api_client, getv(from_object, ['contents'])),
1007
+ )
1008
+
1009
+ if getv(from_object, ['config']) is not None:
1010
+ setv(
1011
+ to_object,
1012
+ ['config'],
1013
+ _EmbedContentConfig_to_mldev(getv(from_object, ['config']), to_object),
1014
+ )
1015
+
1016
+ return to_object
1017
+
1018
+
1019
+ def _EmbeddingsBatchJobSource_to_mldev(
1020
+ api_client: BaseApiClient,
1021
+ from_object: Union[dict[str, Any], object],
1022
+ parent_object: Optional[dict[str, Any]] = None,
1023
+ ) -> dict[str, Any]:
1024
+ to_object: dict[str, Any] = {}
1025
+ if getv(from_object, ['file_name']) is not None:
1026
+ setv(to_object, ['file_name'], getv(from_object, ['file_name']))
1027
+
1028
+ if getv(from_object, ['inlined_requests']) is not None:
1029
+ setv(
1030
+ to_object,
1031
+ ['requests'],
1032
+ _EmbedContentBatch_to_mldev(
1033
+ api_client, getv(from_object, ['inlined_requests']), to_object
1034
+ ),
1035
+ )
1036
+
1037
+ return to_object
1038
+
1039
+
1040
+ def _CreateEmbeddingsBatchJobConfig_to_mldev(
1041
+ from_object: Union[dict[str, Any], object],
1042
+ parent_object: Optional[dict[str, Any]] = None,
1043
+ ) -> dict[str, Any]:
1044
+ to_object: dict[str, Any] = {}
1045
+
1046
+ if getv(from_object, ['display_name']) is not None:
1047
+ setv(
1048
+ parent_object,
1049
+ ['batch', 'displayName'],
1050
+ getv(from_object, ['display_name']),
1051
+ )
1052
+
1053
+ return to_object
1054
+
1055
+
1056
+ def _CreateEmbeddingsBatchJobParameters_to_mldev(
1057
+ api_client: BaseApiClient,
1058
+ from_object: Union[dict[str, Any], object],
1059
+ parent_object: Optional[dict[str, Any]] = None,
1060
+ ) -> dict[str, Any]:
1061
+ to_object: dict[str, Any] = {}
1062
+ if getv(from_object, ['model']) is not None:
1063
+ setv(
1064
+ to_object,
1065
+ ['_url', 'model'],
1066
+ t.t_model(api_client, getv(from_object, ['model'])),
1067
+ )
1068
+
1069
+ if getv(from_object, ['src']) is not None:
1070
+ setv(
1071
+ to_object,
1072
+ ['batch', 'inputConfig'],
1073
+ _EmbeddingsBatchJobSource_to_mldev(
1074
+ api_client, getv(from_object, ['src']), to_object
1075
+ ),
1076
+ )
1077
+
1078
+ if getv(from_object, ['config']) is not None:
1079
+ setv(
1080
+ to_object,
1081
+ ['config'],
1082
+ _CreateEmbeddingsBatchJobConfig_to_mldev(
1083
+ getv(from_object, ['config']), to_object
1084
+ ),
1085
+ )
1086
+
1087
+ return to_object
1088
+
1089
+
943
1090
  def _GetBatchJobParameters_to_mldev(
944
1091
  api_client: BaseApiClient,
945
1092
  from_object: Union[dict[str, Any], object],
@@ -1095,6 +1242,12 @@ def _BatchJobDestination_to_vertex(
1095
1242
  'inlined_responses parameter is not supported in Vertex AI.'
1096
1243
  )
1097
1244
 
1245
+ if getv(from_object, ['inlined_embed_content_responses']) is not None:
1246
+ raise ValueError(
1247
+ 'inlined_embed_content_responses parameter is not supported in'
1248
+ ' Vertex AI.'
1249
+ )
1250
+
1098
1251
  return to_object
1099
1252
 
1100
1253
 
@@ -1299,6 +1452,23 @@ def _FileData_from_mldev(
1299
1452
  return to_object
1300
1453
 
1301
1454
 
1455
+ def _FunctionCall_from_mldev(
1456
+ from_object: Union[dict[str, Any], object],
1457
+ parent_object: Optional[dict[str, Any]] = None,
1458
+ ) -> dict[str, Any]:
1459
+ to_object: dict[str, Any] = {}
1460
+ if getv(from_object, ['id']) is not None:
1461
+ setv(to_object, ['id'], getv(from_object, ['id']))
1462
+
1463
+ if getv(from_object, ['args']) is not None:
1464
+ setv(to_object, ['args'], getv(from_object, ['args']))
1465
+
1466
+ if getv(from_object, ['name']) is not None:
1467
+ setv(to_object, ['name'], getv(from_object, ['name']))
1468
+
1469
+ return to_object
1470
+
1471
+
1302
1472
  def _Part_from_mldev(
1303
1473
  from_object: Union[dict[str, Any], object],
1304
1474
  parent_object: Optional[dict[str, Any]] = None,
@@ -1337,6 +1507,15 @@ def _Part_from_mldev(
1337
1507
  getv(from_object, ['thoughtSignature']),
1338
1508
  )
1339
1509
 
1510
+ if getv(from_object, ['functionCall']) is not None:
1511
+ setv(
1512
+ to_object,
1513
+ ['function_call'],
1514
+ _FunctionCall_from_mldev(
1515
+ getv(from_object, ['functionCall']), to_object
1516
+ ),
1517
+ )
1518
+
1340
1519
  if getv(from_object, ['codeExecutionResult']) is not None:
1341
1520
  setv(
1342
1521
  to_object,
@@ -1347,9 +1526,6 @@ def _Part_from_mldev(
1347
1526
  if getv(from_object, ['executableCode']) is not None:
1348
1527
  setv(to_object, ['executable_code'], getv(from_object, ['executableCode']))
1349
1528
 
1350
- if getv(from_object, ['functionCall']) is not None:
1351
- setv(to_object, ['function_call'], getv(from_object, ['functionCall']))
1352
-
1353
1529
  if getv(from_object, ['functionResponse']) is not None:
1354
1530
  setv(
1355
1531
  to_object,
@@ -1565,6 +1741,61 @@ def _InlinedResponse_from_mldev(
1565
1741
  return to_object
1566
1742
 
1567
1743
 
1744
+ def _ContentEmbedding_from_mldev(
1745
+ from_object: Union[dict[str, Any], object],
1746
+ parent_object: Optional[dict[str, Any]] = None,
1747
+ ) -> dict[str, Any]:
1748
+ to_object: dict[str, Any] = {}
1749
+ if getv(from_object, ['values']) is not None:
1750
+ setv(to_object, ['values'], getv(from_object, ['values']))
1751
+
1752
+ return to_object
1753
+
1754
+
1755
+ def _SingleEmbedContentResponse_from_mldev(
1756
+ from_object: Union[dict[str, Any], object],
1757
+ parent_object: Optional[dict[str, Any]] = None,
1758
+ ) -> dict[str, Any]:
1759
+ to_object: dict[str, Any] = {}
1760
+ if getv(from_object, ['embedding']) is not None:
1761
+ setv(
1762
+ to_object,
1763
+ ['embedding'],
1764
+ _ContentEmbedding_from_mldev(
1765
+ getv(from_object, ['embedding']), to_object
1766
+ ),
1767
+ )
1768
+
1769
+ if getv(from_object, ['tokenCount']) is not None:
1770
+ setv(to_object, ['token_count'], getv(from_object, ['tokenCount']))
1771
+
1772
+ return to_object
1773
+
1774
+
1775
+ def _InlinedEmbedContentResponse_from_mldev(
1776
+ from_object: Union[dict[str, Any], object],
1777
+ parent_object: Optional[dict[str, Any]] = None,
1778
+ ) -> dict[str, Any]:
1779
+ to_object: dict[str, Any] = {}
1780
+ if getv(from_object, ['response']) is not None:
1781
+ setv(
1782
+ to_object,
1783
+ ['response'],
1784
+ _SingleEmbedContentResponse_from_mldev(
1785
+ getv(from_object, ['response']), to_object
1786
+ ),
1787
+ )
1788
+
1789
+ if getv(from_object, ['error']) is not None:
1790
+ setv(
1791
+ to_object,
1792
+ ['error'],
1793
+ _JobError_from_mldev(getv(from_object, ['error']), to_object),
1794
+ )
1795
+
1796
+ return to_object
1797
+
1798
+
1568
1799
  def _BatchJobDestination_from_mldev(
1569
1800
  from_object: Union[dict[str, Any], object],
1570
1801
  parent_object: Optional[dict[str, Any]] = None,
@@ -1586,6 +1817,22 @@ def _BatchJobDestination_from_mldev(
1586
1817
  ],
1587
1818
  )
1588
1819
 
1820
+ if (
1821
+ getv(from_object, ['inlinedEmbedContentResponses', 'inlinedResponses'])
1822
+ is not None
1823
+ ):
1824
+ setv(
1825
+ to_object,
1826
+ ['inlined_embed_content_responses'],
1827
+ [
1828
+ _InlinedEmbedContentResponse_from_mldev(item, to_object)
1829
+ for item in getv(
1830
+ from_object,
1831
+ ['inlinedEmbedContentResponses', 'inlinedResponses'],
1832
+ )
1833
+ ],
1834
+ )
1835
+
1589
1836
  return to_object
1590
1837
 
1591
1838
 
@@ -1636,7 +1883,10 @@ def _BatchJob_from_mldev(
1636
1883
  to_object,
1637
1884
  ['dest'],
1638
1885
  _BatchJobDestination_from_mldev(
1639
- getv(from_object, ['metadata', 'output']), to_object
1886
+ t.t_recv_batch_job_destination(
1887
+ getv(from_object, ['metadata', 'output'])
1888
+ ),
1889
+ to_object,
1640
1890
  ),
1641
1891
  )
1642
1892
 
@@ -1808,7 +2058,8 @@ def _BatchJob_from_vertex(
1808
2058
  to_object,
1809
2059
  ['dest'],
1810
2060
  _BatchJobDestination_from_vertex(
1811
- getv(from_object, ['outputConfig']), to_object
2061
+ t.t_recv_batch_job_destination(getv(from_object, ['outputConfig'])),
2062
+ to_object,
1812
2063
  ),
1813
2064
  )
1814
2065
 
@@ -1937,6 +2188,66 @@ class Batches(_api_module.BaseModule):
1937
2188
  self._api_client._verify_response(return_value)
1938
2189
  return return_value
1939
2190
 
2191
+ def _create_embeddings(
2192
+ self,
2193
+ *,
2194
+ model: Optional[str] = None,
2195
+ src: types.EmbeddingsBatchJobSourceOrDict,
2196
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
2197
+ ) -> types.BatchJob:
2198
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
2199
+ model=model,
2200
+ src=src,
2201
+ config=config,
2202
+ )
2203
+
2204
+ request_url_dict: Optional[dict[str, str]]
2205
+ if self._api_client.vertexai:
2206
+ raise ValueError(
2207
+ 'This method is only supported in the Gemini Developer client.'
2208
+ )
2209
+ else:
2210
+ request_dict = _CreateEmbeddingsBatchJobParameters_to_mldev(
2211
+ self._api_client, parameter_model
2212
+ )
2213
+ request_url_dict = request_dict.get('_url')
2214
+ if request_url_dict:
2215
+ path = '{model}:asyncBatchEmbedContent'.format_map(request_url_dict)
2216
+ else:
2217
+ path = '{model}:asyncBatchEmbedContent'
2218
+
2219
+ query_params = request_dict.get('_query')
2220
+ if query_params:
2221
+ path = f'{path}?{urlencode(query_params)}'
2222
+ # TODO: remove the hack that pops config.
2223
+ request_dict.pop('config', None)
2224
+
2225
+ http_options: Optional[types.HttpOptions] = None
2226
+ if (
2227
+ parameter_model.config is not None
2228
+ and parameter_model.config.http_options is not None
2229
+ ):
2230
+ http_options = parameter_model.config.http_options
2231
+
2232
+ request_dict = _common.convert_to_dict(request_dict)
2233
+ request_dict = _common.encode_unserializable_types(request_dict)
2234
+
2235
+ response = self._api_client.request(
2236
+ 'post', path, request_dict, http_options
2237
+ )
2238
+
2239
+ response_dict = '' if not response.body else json.loads(response.body)
2240
+
2241
+ if not self._api_client.vertexai:
2242
+ response_dict = _BatchJob_from_mldev(response_dict)
2243
+
2244
+ return_value = types.BatchJob._from_response(
2245
+ response=response_dict, kwargs=parameter_model.model_dump()
2246
+ )
2247
+
2248
+ self._api_client._verify_response(return_value)
2249
+ return return_value
2250
+
1940
2251
  def get(
1941
2252
  self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None
1942
2253
  ) -> types.BatchJob:
@@ -2232,7 +2543,7 @@ class Batches(_api_module.BaseModule):
2232
2543
  self,
2233
2544
  *,
2234
2545
  model: str,
2235
- src: Union[types.BatchJobSourceUnion, types.BatchJobSourceUnionDict],
2546
+ src: types.BatchJobSourceUnionDict,
2236
2547
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
2237
2548
  ) -> types.BatchJob:
2238
2549
  """Creates a batch job.
@@ -2258,85 +2569,120 @@ class Batches(_api_module.BaseModule):
2258
2569
  )
2259
2570
  print(batch_job.state)
2260
2571
  """
2572
+ src = t.t_batch_job_source(self._api_client, src)
2573
+
2574
+ # Convert all dicts to Pydantic objects.
2261
2575
  parameter_model = types._CreateBatchJobParameters(
2262
2576
  model=model,
2263
2577
  src=src,
2264
2578
  config=config,
2265
2579
  )
2580
+
2266
2581
  http_options: Optional[types.HttpOptions] = None
2267
2582
  if (
2268
2583
  parameter_model.config is not None
2269
2584
  and parameter_model.config.http_options is not None
2270
2585
  ):
2271
2586
  http_options = parameter_model.config.http_options
2587
+
2272
2588
  if self._api_client.vertexai:
2273
- if isinstance(src, list):
2274
- raise ValueError(
2275
- 'inlined_requests is not supported in Vertex AI. Please use'
2276
- ' Google Cloud Storage URI or BigQuery URI instead.'
2277
- )
2589
+ config = _extra_utils.format_destination(src, parameter_model.config)
2590
+ return self._create(model=model, src=src, config=config)
2591
+ elif src.inlined_requests is None:
2592
+ return self._create(model=model, src=src, config=config)
2278
2593
 
2279
- config = _extra_utils.format_destination(src, config)
2280
- else:
2281
- if isinstance(parameter_model.src, list) or (
2282
- not isinstance(parameter_model.src, str)
2283
- and parameter_model.src
2284
- and parameter_model.src.inlined_requests
2285
- ):
2286
- # Handle system instruction in InlinedRequests.
2287
- request_url_dict: Optional[dict[str, str]]
2288
- request_dict: dict[str, Any] = _CreateBatchJobParameters_to_mldev(
2289
- self._api_client, parameter_model
2290
- )
2291
- request_url_dict = request_dict.get('_url')
2292
- if request_url_dict:
2293
- path = '{model}:batchGenerateContent'.format_map(request_url_dict)
2294
- else:
2295
- path = '{model}:batchGenerateContent'
2296
- query_params = request_dict.get('_query')
2297
- if query_params:
2298
- path = f'{path}?{urlencode(query_params)}'
2299
- request_dict.pop('config', None)
2300
-
2301
- request_dict = _common.convert_to_dict(request_dict)
2302
- request_dict = _common.encode_unserializable_types(request_dict)
2303
- # Move system instruction to 'request':
2304
- # {'systemInstruction': system_instruction}
2305
- requests = []
2306
- batch_dict = request_dict.get('batch')
2307
- if batch_dict and isinstance(batch_dict, dict):
2308
- input_config_dict = batch_dict.get('inputConfig')
2309
- if input_config_dict and isinstance(input_config_dict, dict):
2310
- requests_dict = input_config_dict.get('requests')
2311
- if requests_dict and isinstance(requests_dict, dict):
2312
- requests = requests_dict.get('requests')
2313
- new_requests = []
2314
- if requests:
2315
- for req in requests:
2316
- if req.get('systemInstruction'):
2317
- value = req.pop('systemInstruction')
2318
- req['request'].update({'systemInstruction': value})
2319
- new_requests.append(req)
2320
- request_dict['batch']['inputConfig']['requests'][ # type: ignore
2321
- 'requests'
2322
- ] = new_requests
2323
-
2324
- response = self._api_client.request(
2325
- 'post', path, request_dict, http_options
2326
- )
2327
-
2328
- response_dict = '' if not response.body else json.loads(response.body)
2329
-
2330
- response_dict = _BatchJob_from_mldev(response_dict)
2331
-
2332
- return_value = types.BatchJob._from_response(
2333
- response=response_dict, kwargs=parameter_model.model_dump()
2334
- )
2335
-
2336
- self._api_client._verify_response(return_value)
2337
- return return_value
2338
-
2339
- return self._create(model=model, src=src, config=config)
2594
+ path, request_dict = _create_inlined_generate_content_request_dict(
2595
+ self._api_client, parameter_model
2596
+ )
2597
+
2598
+ response = self._api_client.request(
2599
+ 'post', path, request_dict, http_options
2600
+ )
2601
+
2602
+ response_dict = '' if not response.body else json.loads(response.body)
2603
+ response_dict = _BatchJob_from_mldev(response_dict)
2604
+
2605
+ return_value = types.BatchJob._from_response(
2606
+ response=response_dict, kwargs=parameter_model.model_dump()
2607
+ )
2608
+
2609
+ self._api_client._verify_response(return_value)
2610
+ return return_value
2611
+
2612
+ def create_embeddings(
2613
+ self,
2614
+ *,
2615
+ model: str,
2616
+ src: types.EmbeddingsBatchJobSourceOrDict,
2617
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
2618
+ ) -> types.BatchJob:
2619
+ """**Experimental** Creates an embedding batch job.
2620
+
2621
+ Args:
2622
+ model (str): The model to use for the batch job.
2623
+ src: Gemini Developer API supports List of inlined_request, or file name.
2624
+ Example: "files/file_name".
2625
+ config (CreateBatchJobConfig): Optional configuration for the batch job.
2626
+
2627
+ Returns:
2628
+ A BatchJob object that contains details about the batch job.
2629
+
2630
+ Usage:
2631
+
2632
+ .. code-block:: python
2633
+
2634
+ batch_job = client.batches.create_embeddings(
2635
+ model="text-embedding-004",
2636
+ src="files/my_embedding_input",
2637
+ )
2638
+ print(batch_job.state)
2639
+ """
2640
+ import warnings
2641
+
2642
+ warnings.warn(
2643
+ 'batches.create_embeddings() is experimental and may change without'
2644
+ ' notice.',
2645
+ category=_common.ExperimentalWarning,
2646
+ stacklevel=2, # This is crucial!
2647
+ )
2648
+ src = t.t_embedding_batch_job_source(self._api_client, src)
2649
+
2650
+ # Convert all dicts to Pydantic objects.
2651
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
2652
+ model=model,
2653
+ src=src,
2654
+ config=config,
2655
+ )
2656
+
2657
+ http_options: Optional[types.HttpOptions] = None
2658
+ if (
2659
+ parameter_model.config is not None
2660
+ and parameter_model.config.http_options is not None
2661
+ ):
2662
+ http_options = parameter_model.config.http_options
2663
+
2664
+ if self._api_client.vertexai:
2665
+ raise ValueError('Vertex AI does not support batches.create_embeddings.')
2666
+ elif src.inlined_requests is None:
2667
+ return self._create_embeddings(model=model, src=src, config=config)
2668
+
2669
+ path, request_dict = _create_inlined_embedding_request_dict(
2670
+ self._api_client, parameter_model
2671
+ )
2672
+
2673
+ response = self._api_client.request(
2674
+ 'post', path, request_dict, http_options
2675
+ )
2676
+
2677
+ response_dict = '' if not response.body else json.loads(response.body)
2678
+ response_dict = _BatchJob_from_mldev(response_dict)
2679
+
2680
+ return_value = types.BatchJob._from_response(
2681
+ response=response_dict, kwargs=parameter_model.model_dump()
2682
+ )
2683
+
2684
+ self._api_client._verify_response(return_value)
2685
+ return return_value
2340
2686
 
2341
2687
  def list(
2342
2688
  self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
@@ -2438,6 +2784,66 @@ class AsyncBatches(_api_module.BaseModule):
2438
2784
  self._api_client._verify_response(return_value)
2439
2785
  return return_value
2440
2786
 
2787
+ async def _create_embeddings(
2788
+ self,
2789
+ *,
2790
+ model: Optional[str] = None,
2791
+ src: types.EmbeddingsBatchJobSourceOrDict,
2792
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
2793
+ ) -> types.BatchJob:
2794
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
2795
+ model=model,
2796
+ src=src,
2797
+ config=config,
2798
+ )
2799
+
2800
+ request_url_dict: Optional[dict[str, str]]
2801
+ if self._api_client.vertexai:
2802
+ raise ValueError(
2803
+ 'This method is only supported in the Gemini Developer client.'
2804
+ )
2805
+ else:
2806
+ request_dict = _CreateEmbeddingsBatchJobParameters_to_mldev(
2807
+ self._api_client, parameter_model
2808
+ )
2809
+ request_url_dict = request_dict.get('_url')
2810
+ if request_url_dict:
2811
+ path = '{model}:asyncBatchEmbedContent'.format_map(request_url_dict)
2812
+ else:
2813
+ path = '{model}:asyncBatchEmbedContent'
2814
+
2815
+ query_params = request_dict.get('_query')
2816
+ if query_params:
2817
+ path = f'{path}?{urlencode(query_params)}'
2818
+ # TODO: remove the hack that pops config.
2819
+ request_dict.pop('config', None)
2820
+
2821
+ http_options: Optional[types.HttpOptions] = None
2822
+ if (
2823
+ parameter_model.config is not None
2824
+ and parameter_model.config.http_options is not None
2825
+ ):
2826
+ http_options = parameter_model.config.http_options
2827
+
2828
+ request_dict = _common.convert_to_dict(request_dict)
2829
+ request_dict = _common.encode_unserializable_types(request_dict)
2830
+
2831
+ response = await self._api_client.async_request(
2832
+ 'post', path, request_dict, http_options
2833
+ )
2834
+
2835
+ response_dict = '' if not response.body else json.loads(response.body)
2836
+
2837
+ if not self._api_client.vertexai:
2838
+ response_dict = _BatchJob_from_mldev(response_dict)
2839
+
2840
+ return_value = types.BatchJob._from_response(
2841
+ response=response_dict, kwargs=parameter_model.model_dump()
2842
+ )
2843
+
2844
+ self._api_client._verify_response(return_value)
2845
+ return return_value
2846
+
2441
2847
  async def get(
2442
2848
  self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None
2443
2849
  ) -> types.BatchJob:
@@ -2737,7 +3143,7 @@ class AsyncBatches(_api_module.BaseModule):
2737
3143
  self,
2738
3144
  *,
2739
3145
  model: str,
2740
- src: Union[types.BatchJobSourceUnion, types.BatchJobSourceUnionDict],
3146
+ src: types.BatchJobSourceUnionDict,
2741
3147
  config: Optional[types.CreateBatchJobConfigOrDict] = None,
2742
3148
  ) -> types.BatchJob:
2743
3149
  """Creates a batch job asynchronously.
@@ -2762,85 +3168,120 @@ class AsyncBatches(_api_module.BaseModule):
2762
3168
  src="gs://path/to/input/data",
2763
3169
  )
2764
3170
  """
3171
+ src = t.t_batch_job_source(self._api_client, src)
3172
+
3173
+ # Convert all dicts to Pydantic objects.
2765
3174
  parameter_model = types._CreateBatchJobParameters(
2766
3175
  model=model,
2767
3176
  src=src,
2768
3177
  config=config,
2769
3178
  )
3179
+
2770
3180
  http_options: Optional[types.HttpOptions] = None
2771
3181
  if (
2772
3182
  parameter_model.config is not None
2773
3183
  and parameter_model.config.http_options is not None
2774
3184
  ):
2775
3185
  http_options = parameter_model.config.http_options
3186
+
2776
3187
  if self._api_client.vertexai:
2777
- if isinstance(src, list):
2778
- raise ValueError(
2779
- 'inlined_requests is not supported in Vertex AI. Please use'
2780
- ' Google Cloud Storage URI or BigQuery URI instead.'
2781
- )
3188
+ config = _extra_utils.format_destination(src, parameter_model.config)
3189
+ return await self._create(model=model, src=src, config=config)
3190
+ elif src.inlined_requests is None:
3191
+ return await self._create(model=model, src=src, config=config)
2782
3192
 
2783
- config = _extra_utils.format_destination(src, config)
2784
- else:
2785
- if isinstance(parameter_model.src, list) or (
2786
- not isinstance(parameter_model.src, str)
2787
- and parameter_model.src
2788
- and parameter_model.src.inlined_requests
2789
- ):
2790
- # Handle system instruction in InlinedRequests.
2791
- request_url_dict: Optional[dict[str, str]]
2792
- request_dict: dict[str, Any] = _CreateBatchJobParameters_to_mldev(
2793
- self._api_client, parameter_model
2794
- )
2795
- request_url_dict = request_dict.get('_url')
2796
- if request_url_dict:
2797
- path = '{model}:batchGenerateContent'.format_map(request_url_dict)
2798
- else:
2799
- path = '{model}:batchGenerateContent'
2800
- query_params = request_dict.get('_query')
2801
- if query_params:
2802
- path = f'{path}?{urlencode(query_params)}'
2803
- request_dict.pop('config', None)
2804
-
2805
- request_dict = _common.convert_to_dict(request_dict)
2806
- request_dict = _common.encode_unserializable_types(request_dict)
2807
- # Move system instruction to 'request':
2808
- # {'systemInstruction': system_instruction}
2809
- requests = []
2810
- batch_dict = request_dict.get('batch')
2811
- if batch_dict and isinstance(batch_dict, dict):
2812
- input_config_dict = batch_dict.get('inputConfig')
2813
- if input_config_dict and isinstance(input_config_dict, dict):
2814
- requests_dict = input_config_dict.get('requests')
2815
- if requests_dict and isinstance(requests_dict, dict):
2816
- requests = requests_dict.get('requests')
2817
- new_requests = []
2818
- if requests:
2819
- for req in requests:
2820
- if req.get('systemInstruction'):
2821
- value = req.pop('systemInstruction')
2822
- req['request'].update({'systemInstruction': value})
2823
- new_requests.append(req)
2824
- request_dict['batch']['inputConfig']['requests'][ # type: ignore
2825
- 'requests'
2826
- ] = new_requests
2827
-
2828
- response = await self._api_client.async_request(
2829
- 'post', path, request_dict, http_options
2830
- )
2831
-
2832
- response_dict = '' if not response.body else json.loads(response.body)
2833
-
2834
- response_dict = _BatchJob_from_mldev(response_dict)
2835
-
2836
- return_value = types.BatchJob._from_response(
2837
- response=response_dict, kwargs=parameter_model.model_dump()
2838
- )
2839
-
2840
- self._api_client._verify_response(return_value)
2841
- return return_value
2842
-
2843
- return await self._create(model=model, src=src, config=config)
3193
+ path, request_dict = _create_inlined_generate_content_request_dict(
3194
+ self._api_client, parameter_model
3195
+ )
3196
+
3197
+ response = await self._api_client.async_request(
3198
+ 'post', path, request_dict, http_options
3199
+ )
3200
+
3201
+ response_dict = '' if not response.body else json.loads(response.body)
3202
+ response_dict = _BatchJob_from_mldev(response_dict)
3203
+
3204
+ return_value = types.BatchJob._from_response(
3205
+ response=response_dict, kwargs=parameter_model.model_dump()
3206
+ )
3207
+
3208
+ self._api_client._verify_response(return_value)
3209
+ return return_value
3210
+
3211
+ async def create_embeddings(
3212
+ self,
3213
+ *,
3214
+ model: str,
3215
+ src: types.EmbeddingsBatchJobSourceOrDict,
3216
+ config: Optional[types.CreateEmbeddingsBatchJobConfigOrDict] = None,
3217
+ ) -> types.BatchJob:
3218
+ """**Experimental** Creates an asynchronously embedding batch job.
3219
+
3220
+ Args:
3221
+ model (str): The model to use for the batch job.
3222
+ src: Gemini Developer API supports inlined_requests, or file name.
3223
+ Example: "files/file_name".
3224
+ config (CreateBatchJobConfig): Optional configuration for the batch job.
3225
+
3226
+ Returns:
3227
+ A BatchJob object that contains details about the batch job.
3228
+
3229
+ Usage:
3230
+
3231
+ .. code-block:: python
3232
+
3233
+ batch_job = await client.aio.batches.create_embeddings(
3234
+ model="text-embedding-004",
3235
+ src="files/my_embedding_input",
3236
+ )
3237
+ print(batch_job.state)
3238
+ """
3239
+ import warnings
3240
+
3241
+ warnings.warn(
3242
+ 'batches.create_embeddings() is experimental and may change without'
3243
+ ' notice.',
3244
+ category=_common.ExperimentalWarning,
3245
+ stacklevel=2, # This is crucial!
3246
+ )
3247
+ src = t.t_embedding_batch_job_source(self._api_client, src)
3248
+
3249
+ # Convert all dicts to Pydantic objects.
3250
+ parameter_model = types._CreateEmbeddingsBatchJobParameters(
3251
+ model=model,
3252
+ src=src,
3253
+ config=config,
3254
+ )
3255
+
3256
+ http_options: Optional[types.HttpOptions] = None
3257
+ if (
3258
+ parameter_model.config is not None
3259
+ and parameter_model.config.http_options is not None
3260
+ ):
3261
+ http_options = parameter_model.config.http_options
3262
+
3263
+ if self._api_client.vertexai:
3264
+ raise ValueError('Vertex AI does not support batches.create_embeddings.')
3265
+ elif src.inlined_requests is None:
3266
+ return await self._create_embeddings(model=model, src=src, config=config)
3267
+
3268
+ path, request_dict = _create_inlined_embedding_request_dict(
3269
+ self._api_client, parameter_model
3270
+ )
3271
+
3272
+ response = await self._api_client.async_request(
3273
+ 'post', path, request_dict, http_options
3274
+ )
3275
+
3276
+ response_dict = '' if not response.body else json.loads(response.body)
3277
+ response_dict = _BatchJob_from_mldev(response_dict)
3278
+
3279
+ return_value = types.BatchJob._from_response(
3280
+ response=response_dict, kwargs=parameter_model.model_dump()
3281
+ )
3282
+
3283
+ self._api_client._verify_response(return_value)
3284
+ return return_value
2844
3285
 
2845
3286
  async def list(
2846
3287
  self, *, config: Optional[types.ListBatchJobsConfigOrDict] = None
@@ -2871,3 +3312,100 @@ class AsyncBatches(_api_module.BaseModule):
2871
3312
  await self._list(config=config),
2872
3313
  config,
2873
3314
  )
3315
+
3316
+
3317
+ def _create_inlined_generate_content_request_dict(
3318
+ client: BaseApiClient, parameter_model: types._CreateBatchJobParameters
3319
+ ) -> tuple[str, dict[str, Any]]:
3320
+ request_url_dict: Optional[dict[str, str]]
3321
+
3322
+ request_dict: dict[str, Any] = _CreateBatchJobParameters_to_mldev(
3323
+ client, parameter_model
3324
+ )
3325
+
3326
+ request_url_dict = request_dict.get('_url')
3327
+ if request_url_dict:
3328
+ path = '{model}:batchGenerateContent'.format_map(request_url_dict)
3329
+ else:
3330
+ path = '{model}:batchGenerateContent'
3331
+ query_params = request_dict.get('_query')
3332
+ if query_params:
3333
+ path = f'{path}?{urlencode(query_params)}'
3334
+ request_dict.pop('config', None)
3335
+
3336
+ request_dict = _common.convert_to_dict(request_dict)
3337
+ request_dict = _common.encode_unserializable_types(request_dict)
3338
+ # Move system instruction to 'request':
3339
+ # {'systemInstruction': system_instruction}
3340
+ requests = []
3341
+ batch_dict = request_dict.get('batch')
3342
+ if batch_dict and isinstance(batch_dict, dict):
3343
+ input_config_dict = batch_dict.get('inputConfig')
3344
+ if input_config_dict and isinstance(input_config_dict, dict):
3345
+ requests_dict = input_config_dict.get('requests')
3346
+ if requests_dict and isinstance(requests_dict, dict):
3347
+ requests = requests_dict.get('requests')
3348
+ new_requests = []
3349
+ if requests:
3350
+ for req in requests:
3351
+ if req.get('systemInstruction'):
3352
+ value = req.pop('systemInstruction')
3353
+ req['request'].update({'systemInstruction': value})
3354
+ new_requests.append(req)
3355
+ request_dict['batch']['inputConfig']['requests'][ # type: ignore
3356
+ 'requests'
3357
+ ] = new_requests
3358
+ return path, request_dict
3359
+
3360
+
3361
+ def _create_inlined_embedding_request_dict(
3362
+ client: BaseApiClient,
3363
+ parameter_model: types._CreateEmbeddingsBatchJobParameters,
3364
+ ) -> tuple[str, dict[str, Any]]:
3365
+ src = parameter_model.src
3366
+ if not isinstance(src, types.EmbeddingsBatchJobSource):
3367
+ raise ValueError(f'Invalid batch job source: {src}.')
3368
+
3369
+ request_url_dict: Optional[dict[str, str]]
3370
+
3371
+ request_dict: dict[str, Any] = _CreateEmbeddingsBatchJobParameters_to_mldev(
3372
+ client, parameter_model
3373
+ )
3374
+
3375
+ request_url_dict = request_dict.get('_url')
3376
+ if request_url_dict:
3377
+ path = '{model}:asyncBatchEmbedContent'.format_map(request_url_dict)
3378
+ else:
3379
+ path = '{model}:asyncBatchEmbedContent'
3380
+ query_params = request_dict.get('_query')
3381
+ if query_params:
3382
+ path = f'{path}?{urlencode(query_params)}'
3383
+
3384
+ request_dict.pop('config', None)
3385
+ request_dict.get('batch', {}).get('inputConfig', {}).get('requests', {}).pop(
3386
+ 'config', None
3387
+ )
3388
+
3389
+ request_dict = _common.convert_to_dict(request_dict)
3390
+ request_dict = _common.encode_unserializable_types(request_dict)
3391
+
3392
+ requests = []
3393
+ batch_dict = request_dict.get('batch')
3394
+ if batch_dict and isinstance(batch_dict, dict):
3395
+ input_config_dict = batch_dict.get('inputConfig')
3396
+ if input_config_dict and isinstance(input_config_dict, dict):
3397
+ requests_dict = input_config_dict.get('requests')
3398
+ if requests_dict and isinstance(requests_dict, dict):
3399
+ requests = requests_dict.get('requests')
3400
+ new_requests = []
3401
+ if requests:
3402
+ for req in requests:
3403
+ for k in list(req.keys()):
3404
+ if k != 'request':
3405
+ req['request'][k] = req.pop(k)
3406
+ new_requests.append(req)
3407
+ request_dict['batch']['inputConfig']['requests'][ # type: ignore
3408
+ 'requests'
3409
+ ] = new_requests
3410
+
3411
+ return path, request_dict