google-genai 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/__init__.py CHANGED
@@ -17,6 +17,6 @@
17
17
 
18
18
  from .client import Client
19
19
 
20
- __version__ = '0.1.0'
20
+ __version__ = '0.2.0'
21
21
 
22
22
  __all__ = ['Client']
@@ -24,7 +24,7 @@ import json
24
24
  import os
25
25
  import sys
26
26
  from typing import Any, Optional, TypedDict, Union
27
- import urllib
27
+ from urllib.parse import urlparse, urlunparse
28
28
 
29
29
  import google.auth
30
30
  import google.auth.credentials
@@ -51,7 +51,7 @@ class HttpOptions(TypedDict):
51
51
  def _append_library_version_headers(headers: dict[str, str]) -> None:
52
52
  """Appends the telemetry header to the headers dict."""
53
53
  # TODO: Automate revisions to the SDK library version.
54
- library_label = f'google-genai-sdk/0.1.0'
54
+ library_label = f'google-genai-sdk/0.2.0'
55
55
  language_label = 'gl-python/' + sys.version.split()[0]
56
56
  version_header_value = f'{library_label} {language_label}'
57
57
  if (
@@ -89,6 +89,13 @@ def _patch_http_options(
89
89
  return copy_option
90
90
 
91
91
 
92
+ def _join_url_path(base_url: str, path: str) -> str:
93
+ parsed_base = urlparse(base_url)
94
+ base_path = parsed_base.path[:-1] if parsed_base.path.endswith('/') else parsed_base.path
95
+ path = path[1:] if path.startswith('/') else path
96
+ return urlunparse(parsed_base._replace(path=base_path + '/' + path))
97
+
98
+
92
99
  @dataclass
93
100
  class HttpRequest:
94
101
  headers: dict[str, str]
@@ -216,9 +223,10 @@ class ApiClient:
216
223
  patched_http_options = self._http_options
217
224
  if self.vertexai and not path.startswith('projects/'):
218
225
  path = f'projects/{self.project}/locations/{self.location}/' + path
219
- url = urllib.parse.urljoin(
226
+ url = _join_url_path(
220
227
  patched_http_options['base_url'],
221
- patched_http_options['api_version'] + '/' + path)
228
+ patched_http_options['api_version'] + '/' + path,
229
+ )
222
230
  return HttpRequest(
223
231
  method=http_method,
224
232
  url=url,
@@ -265,13 +273,13 @@ class ApiClient:
265
273
  data = http_request.data
266
274
 
267
275
  http_session = requests.Session()
268
- async_request = requests.Request(
276
+ request = requests.Request(
269
277
  method=http_request.method,
270
278
  url=http_request.url,
271
279
  headers=http_request.headers,
272
280
  data=data,
273
281
  ).prepare()
274
- response = http_session.send(async_request, stream=stream)
282
+ response = http_session.send(request, stream=stream)
275
283
  errors.APIError.raise_for_response(response)
276
284
  return HttpResponse(
277
285
  response.headers, response if stream else [response.text]
google/genai/batches.py CHANGED
@@ -213,6 +213,30 @@ def _CreateBatchJobParameters_to_vertex(
213
213
  return to_object
214
214
 
215
215
 
216
+ def _GetBatchJobConfig_to_mldev(
217
+ api_client: ApiClient,
218
+ from_object: Union[dict, object],
219
+ parent_object: dict = None,
220
+ ) -> dict:
221
+ to_object = {}
222
+ if getv(from_object, ['http_options']) is not None:
223
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
224
+
225
+ return to_object
226
+
227
+
228
+ def _GetBatchJobConfig_to_vertex(
229
+ api_client: ApiClient,
230
+ from_object: Union[dict, object],
231
+ parent_object: dict = None,
232
+ ) -> dict:
233
+ to_object = {}
234
+ if getv(from_object, ['http_options']) is not None:
235
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
236
+
237
+ return to_object
238
+
239
+
216
240
  def _GetBatchJobParameters_to_mldev(
217
241
  api_client: ApiClient,
218
242
  from_object: Union[dict, object],
@@ -222,6 +246,15 @@ def _GetBatchJobParameters_to_mldev(
222
246
  if getv(from_object, ['name']):
223
247
  raise ValueError('name parameter is not supported in Google AI.')
224
248
 
249
+ if getv(from_object, ['config']) is not None:
250
+ setv(
251
+ to_object,
252
+ ['config'],
253
+ _GetBatchJobConfig_to_mldev(
254
+ api_client, getv(from_object, ['config']), to_object
255
+ ),
256
+ )
257
+
225
258
  return to_object
226
259
 
227
260
 
@@ -238,6 +271,39 @@ def _GetBatchJobParameters_to_vertex(
238
271
  t.t_batch_job_name(api_client, getv(from_object, ['name'])),
239
272
  )
240
273
 
274
+ if getv(from_object, ['config']) is not None:
275
+ setv(
276
+ to_object,
277
+ ['config'],
278
+ _GetBatchJobConfig_to_vertex(
279
+ api_client, getv(from_object, ['config']), to_object
280
+ ),
281
+ )
282
+
283
+ return to_object
284
+
285
+
286
+ def _CancelBatchJobConfig_to_mldev(
287
+ api_client: ApiClient,
288
+ from_object: Union[dict, object],
289
+ parent_object: dict = None,
290
+ ) -> dict:
291
+ to_object = {}
292
+ if getv(from_object, ['http_options']) is not None:
293
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
294
+
295
+ return to_object
296
+
297
+
298
+ def _CancelBatchJobConfig_to_vertex(
299
+ api_client: ApiClient,
300
+ from_object: Union[dict, object],
301
+ parent_object: dict = None,
302
+ ) -> dict:
303
+ to_object = {}
304
+ if getv(from_object, ['http_options']) is not None:
305
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
306
+
241
307
  return to_object
242
308
 
243
309
 
@@ -250,6 +316,15 @@ def _CancelBatchJobParameters_to_mldev(
250
316
  if getv(from_object, ['name']):
251
317
  raise ValueError('name parameter is not supported in Google AI.')
252
318
 
319
+ if getv(from_object, ['config']) is not None:
320
+ setv(
321
+ to_object,
322
+ ['config'],
323
+ _CancelBatchJobConfig_to_mldev(
324
+ api_client, getv(from_object, ['config']), to_object
325
+ ),
326
+ )
327
+
253
328
  return to_object
254
329
 
255
330
 
@@ -266,6 +341,15 @@ def _CancelBatchJobParameters_to_vertex(
266
341
  t.t_batch_job_name(api_client, getv(from_object, ['name'])),
267
342
  )
268
343
 
344
+ if getv(from_object, ['config']) is not None:
345
+ setv(
346
+ to_object,
347
+ ['config'],
348
+ _CancelBatchJobConfig_to_vertex(
349
+ api_client, getv(from_object, ['config']), to_object
350
+ ),
351
+ )
352
+
269
353
  return to_object
270
354
 
271
355
 
@@ -660,7 +744,9 @@ class Batches(_common.BaseModule):
660
744
  self.api_client._verify_response(return_value)
661
745
  return return_value
662
746
 
663
- def get(self, *, name: str) -> types.BatchJob:
747
+ def get(
748
+ self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None
749
+ ) -> types.BatchJob:
664
750
  """Gets a batch job.
665
751
 
666
752
  Args:
@@ -681,6 +767,7 @@ class Batches(_common.BaseModule):
681
767
 
682
768
  parameter_model = types._GetBatchJobParameters(
683
769
  name=name,
770
+ config=config,
684
771
  )
685
772
 
686
773
  if not self.api_client.vertexai:
@@ -713,9 +800,15 @@ class Batches(_common.BaseModule):
713
800
  self.api_client._verify_response(return_value)
714
801
  return return_value
715
802
 
716
- def cancel(self, *, name: str) -> None:
803
+ def cancel(
804
+ self,
805
+ *,
806
+ name: str,
807
+ config: Optional[types.CancelBatchJobConfigOrDict] = None,
808
+ ) -> None:
717
809
  parameter_model = types._CancelBatchJobParameters(
718
810
  name=name,
811
+ config=config,
719
812
  )
720
813
 
721
814
  if not self.api_client.vertexai:
@@ -947,7 +1040,9 @@ class AsyncBatches(_common.BaseModule):
947
1040
  self.api_client._verify_response(return_value)
948
1041
  return return_value
949
1042
 
950
- async def get(self, *, name: str) -> types.BatchJob:
1043
+ async def get(
1044
+ self, *, name: str, config: Optional[types.GetBatchJobConfigOrDict] = None
1045
+ ) -> types.BatchJob:
951
1046
  """Gets a batch job.
952
1047
 
953
1048
  Args:
@@ -968,6 +1063,7 @@ class AsyncBatches(_common.BaseModule):
968
1063
 
969
1064
  parameter_model = types._GetBatchJobParameters(
970
1065
  name=name,
1066
+ config=config,
971
1067
  )
972
1068
 
973
1069
  if not self.api_client.vertexai:
@@ -1000,9 +1096,15 @@ class AsyncBatches(_common.BaseModule):
1000
1096
  self.api_client._verify_response(return_value)
1001
1097
  return return_value
1002
1098
 
1003
- async def cancel(self, *, name: str) -> None:
1099
+ async def cancel(
1100
+ self,
1101
+ *,
1102
+ name: str,
1103
+ config: Optional[types.CancelBatchJobConfigOrDict] = None,
1104
+ ) -> None:
1004
1105
  parameter_model = types._CancelBatchJobParameters(
1005
1106
  name=name,
1107
+ config=config,
1006
1108
  )
1007
1109
 
1008
1110
  if not self.api_client.vertexai:
google/genai/chats.py CHANGED
@@ -54,6 +54,7 @@ class Chat(_BaseChat):
54
54
  Usage:
55
55
 
56
56
  .. code-block:: python
57
+
57
58
  chat = client.chats.create(model='gemini-1.5-flash')
58
59
  response = chat.send_message('tell me a story')
59
60
  """
@@ -132,6 +133,7 @@ class AsyncChat(_BaseChat):
132
133
  Usage:
133
134
 
134
135
  .. code-block:: python
136
+
135
137
  chat = client.chats.create(model='gemini-1.5-flash')
136
138
  response = chat.send_message('tell me a story')
137
139
  """
google/genai/files.py CHANGED
@@ -605,6 +605,7 @@ class Files(_common.BaseModule):
605
605
  Usage:
606
606
 
607
607
  .. code-block:: python
608
+
608
609
  pager = client.files.list(config={'page_size': 10})
609
610
  for file in pager.page:
610
611
  print(file.name)
@@ -712,6 +713,7 @@ class Files(_common.BaseModule):
712
713
  Usage:
713
714
 
714
715
  .. code-block:: python
716
+
715
717
  file = client.files.get(name='files/...')
716
718
  print(file.uri)
717
719
  """
@@ -766,6 +768,7 @@ class Files(_common.BaseModule):
766
768
  Usage:
767
769
 
768
770
  .. code-block:: python
771
+
769
772
  client.files.delete(name='files/...')
770
773
  """
771
774
 
@@ -916,6 +919,7 @@ class AsyncFiles(_common.BaseModule):
916
919
  Usage:
917
920
 
918
921
  .. code-block:: python
922
+
919
923
  pager = client.files.list(config={'page_size': 10})
920
924
  for file in pager.page:
921
925
  print(file.name)
@@ -1023,6 +1027,7 @@ class AsyncFiles(_common.BaseModule):
1023
1027
  Usage:
1024
1028
 
1025
1029
  .. code-block:: python
1030
+
1026
1031
  file = client.files.get(name='files/...')
1027
1032
  print(file.uri)
1028
1033
  """
@@ -1077,6 +1082,7 @@ class AsyncFiles(_common.BaseModule):
1077
1082
  Usage:
1078
1083
 
1079
1084
  .. code-block:: python
1085
+
1080
1086
  client.files.delete(name='files/...')
1081
1087
  """
1082
1088
 
google/genai/models.py CHANGED
@@ -880,6 +880,11 @@ def _GenerateContentConfig_to_mldev(
880
880
  getv(from_object, ['response_modalities']),
881
881
  )
882
882
 
883
+ if getv(from_object, ['media_resolution']):
884
+ raise ValueError(
885
+ 'media_resolution parameter is not supported in Google AI.'
886
+ )
887
+
883
888
  if getv(from_object, ['speech_config']) is not None:
884
889
  setv(
885
890
  to_object,
@@ -1022,6 +1027,11 @@ def _GenerateContentConfig_to_vertex(
1022
1027
  getv(from_object, ['response_modalities']),
1023
1028
  )
1024
1029
 
1030
+ if getv(from_object, ['media_resolution']) is not None:
1031
+ setv(
1032
+ to_object, ['mediaResolution'], getv(from_object, ['media_resolution'])
1033
+ )
1034
+
1025
1035
  if getv(from_object, ['speech_config']) is not None:
1026
1036
  setv(
1027
1037
  to_object,
@@ -3733,6 +3743,7 @@ class Models(_common.BaseModule):
3733
3743
  Usage:
3734
3744
 
3735
3745
  .. code-block:: python
3746
+
3736
3747
  embeddings = client.models.embed_content(
3737
3748
  model= 'text-embedding-004',
3738
3749
  contents=[
@@ -4630,6 +4641,7 @@ class AsyncModels(_common.BaseModule):
4630
4641
  Usage:
4631
4642
 
4632
4643
  .. code-block:: python
4644
+
4633
4645
  embeddings = client.models.embed_content(
4634
4646
  model= 'text-embedding-004',
4635
4647
  contents=[
google/genai/tunings.py CHANGED
@@ -24,6 +24,30 @@ from ._common import set_value_by_path as setv
24
24
  from .pagers import AsyncPager, Pager
25
25
 
26
26
 
27
+ def _GetTuningJobConfig_to_mldev(
28
+ api_client: ApiClient,
29
+ from_object: Union[dict, object],
30
+ parent_object: dict = None,
31
+ ) -> dict:
32
+ to_object = {}
33
+ if getv(from_object, ['http_options']) is not None:
34
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
35
+
36
+ return to_object
37
+
38
+
39
+ def _GetTuningJobConfig_to_vertex(
40
+ api_client: ApiClient,
41
+ from_object: Union[dict, object],
42
+ parent_object: dict = None,
43
+ ) -> dict:
44
+ to_object = {}
45
+ if getv(from_object, ['http_options']) is not None:
46
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
47
+
48
+ return to_object
49
+
50
+
27
51
  def _GetTuningJobParameters_to_mldev(
28
52
  api_client: ApiClient,
29
53
  from_object: Union[dict, object],
@@ -33,6 +57,15 @@ def _GetTuningJobParameters_to_mldev(
33
57
  if getv(from_object, ['name']) is not None:
34
58
  setv(to_object, ['_url', 'name'], getv(from_object, ['name']))
35
59
 
60
+ if getv(from_object, ['config']) is not None:
61
+ setv(
62
+ to_object,
63
+ ['config'],
64
+ _GetTuningJobConfig_to_mldev(
65
+ api_client, getv(from_object, ['config']), to_object
66
+ ),
67
+ )
68
+
36
69
  return to_object
37
70
 
38
71
 
@@ -45,6 +78,15 @@ def _GetTuningJobParameters_to_vertex(
45
78
  if getv(from_object, ['name']) is not None:
46
79
  setv(to_object, ['_url', 'name'], getv(from_object, ['name']))
47
80
 
81
+ if getv(from_object, ['config']) is not None:
82
+ setv(
83
+ to_object,
84
+ ['config'],
85
+ _GetTuningJobConfig_to_vertex(
86
+ api_client, getv(from_object, ['config']), to_object
87
+ ),
88
+ )
89
+
48
90
  return to_object
49
91
 
50
92
 
@@ -233,6 +275,9 @@ def _CreateTuningJobConfig_to_mldev(
233
275
  parent_object: dict = None,
234
276
  ) -> dict:
235
277
  to_object = {}
278
+ if getv(from_object, ['http_options']) is not None:
279
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
280
+
236
281
  if getv(from_object, ['validation_dataset']):
237
282
  raise ValueError(
238
283
  'validation_dataset parameter is not supported in Google AI.'
@@ -288,6 +333,9 @@ def _CreateTuningJobConfig_to_vertex(
288
333
  parent_object: dict = None,
289
334
  ) -> dict:
290
335
  to_object = {}
336
+ if getv(from_object, ['http_options']) is not None:
337
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
338
+
291
339
  if getv(from_object, ['validation_dataset']) is not None:
292
340
  setv(
293
341
  parent_object,
@@ -455,6 +503,9 @@ def _CreateDistillationJobConfig_to_mldev(
455
503
  parent_object: dict = None,
456
504
  ) -> dict:
457
505
  to_object = {}
506
+ if getv(from_object, ['http_options']) is not None:
507
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
508
+
458
509
  if getv(from_object, ['validation_dataset']):
459
510
  raise ValueError(
460
511
  'validation_dataset parameter is not supported in Google AI.'
@@ -498,6 +549,9 @@ def _CreateDistillationJobConfig_to_vertex(
498
549
  parent_object: dict = None,
499
550
  ) -> dict:
500
551
  to_object = {}
552
+ if getv(from_object, ['http_options']) is not None:
553
+ setv(to_object, ['httpOptions'], getv(from_object, ['http_options']))
554
+
501
555
  if getv(from_object, ['validation_dataset']) is not None:
502
556
  setv(
503
557
  parent_object,
@@ -896,7 +950,12 @@ def _TuningJobOrOperation_from_vertex(
896
950
 
897
951
  class Tunings(_common.BaseModule):
898
952
 
899
- def _get(self, *, name: str) -> types.TuningJob:
953
+ def _get(
954
+ self,
955
+ *,
956
+ name: str,
957
+ config: Optional[types.GetTuningJobConfigOrDict] = None,
958
+ ) -> types.TuningJob:
900
959
  """Gets a TuningJob.
901
960
 
902
961
  Args:
@@ -908,6 +967,7 @@ class Tunings(_common.BaseModule):
908
967
 
909
968
  parameter_model = types._GetTuningJobParameters(
910
969
  name=name,
970
+ config=config,
911
971
  )
912
972
 
913
973
  if self.api_client.vertexai:
@@ -1129,8 +1189,13 @@ class Tunings(_common.BaseModule):
1129
1189
  config,
1130
1190
  )
1131
1191
 
1132
- def get(self, *, name: str) -> types.TuningJob:
1133
- job = self._get(name=name)
1192
+ def get(
1193
+ self,
1194
+ *,
1195
+ name: str,
1196
+ config: Optional[types.GetTuningJobConfigOrDict] = None,
1197
+ ) -> types.TuningJob:
1198
+ job = self._get(name=name, config=config)
1134
1199
  if job.experiment and self.api_client.vertexai:
1135
1200
  _IpythonUtils.display_experiment_button(
1136
1201
  experiment=job.experiment,
@@ -1157,7 +1222,12 @@ class Tunings(_common.BaseModule):
1157
1222
 
1158
1223
  class AsyncTunings(_common.BaseModule):
1159
1224
 
1160
- async def _get(self, *, name: str) -> types.TuningJob:
1225
+ async def _get(
1226
+ self,
1227
+ *,
1228
+ name: str,
1229
+ config: Optional[types.GetTuningJobConfigOrDict] = None,
1230
+ ) -> types.TuningJob:
1161
1231
  """Gets a TuningJob.
1162
1232
 
1163
1233
  Args:
@@ -1169,6 +1239,7 @@ class AsyncTunings(_common.BaseModule):
1169
1239
 
1170
1240
  parameter_model = types._GetTuningJobParameters(
1171
1241
  name=name,
1242
+ config=config,
1172
1243
  )
1173
1244
 
1174
1245
  if self.api_client.vertexai:
@@ -1390,8 +1461,13 @@ class AsyncTunings(_common.BaseModule):
1390
1461
  config,
1391
1462
  )
1392
1463
 
1393
- async def get(self, *, name: str) -> types.TuningJob:
1394
- job = await self._get(name=name)
1464
+ async def get(
1465
+ self,
1466
+ *,
1467
+ name: str,
1468
+ config: Optional[types.GetTuningJobConfigOrDict] = None,
1469
+ ) -> types.TuningJob:
1470
+ job = await self._get(name=name, config=config)
1395
1471
  if job.experiment and self.api_client.vertexai:
1396
1472
  _IpythonUtils.display_experiment_button(
1397
1473
  experiment=job.experiment,
google/genai/types.py CHANGED
@@ -154,6 +154,14 @@ DynamicRetrievalConfigMode = Literal["MODE_UNSPECIFIED", "MODE_DYNAMIC"]
154
154
  FunctionCallingConfigMode = Literal["MODE_UNSPECIFIED", "AUTO", "ANY", "NONE"]
155
155
 
156
156
 
157
+ MediaResolution = Literal[
158
+ "MEDIA_RESOLUTION_UNSPECIFIED",
159
+ "MEDIA_RESOLUTION_LOW",
160
+ "MEDIA_RESOLUTION_MEDIUM",
161
+ "MEDIA_RESOLUTION_HIGH",
162
+ ]
163
+
164
+
157
165
  SafetyFilterLevel = Literal[
158
166
  "BLOCK_LOW_AND_ABOVE",
159
167
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -1524,6 +1532,11 @@ class GenerateContentConfig(_common.BaseModel):
1524
1532
  modalities that the model can return.
1525
1533
  """,
1526
1534
  )
1535
+ media_resolution: Optional[MediaResolution] = Field(
1536
+ default=None,
1537
+ description="""If specified, the media resolution specified will be used.
1538
+ """,
1539
+ )
1527
1540
  speech_config: Optional[SpeechConfigUnion] = Field(
1528
1541
  default=None,
1529
1542
  description="""The speech generation configuration.
@@ -1647,6 +1660,10 @@ class GenerateContentConfigDict(TypedDict, total=False):
1647
1660
  modalities that the model can return.
1648
1661
  """
1649
1662
 
1663
+ media_resolution: Optional[MediaResolution]
1664
+ """If specified, the media resolution specified will be used.
1665
+ """
1666
+
1650
1667
  speech_config: Optional[SpeechConfigUnionDict]
1651
1668
  """The speech generation configuration.
1652
1669
  """
@@ -4210,10 +4227,31 @@ ComputeTokensResponseOrDict = Union[
4210
4227
  ]
4211
4228
 
4212
4229
 
4230
+ class GetTuningJobConfig(_common.BaseModel):
4231
+ """Optional parameters for tunings.get method."""
4232
+
4233
+ http_options: Optional[dict[str, Any]] = Field(
4234
+ default=None, description="""Used to override HTTP request options."""
4235
+ )
4236
+
4237
+
4238
+ class GetTuningJobConfigDict(TypedDict, total=False):
4239
+ """Optional parameters for tunings.get method."""
4240
+
4241
+ http_options: Optional[dict[str, Any]]
4242
+ """Used to override HTTP request options."""
4243
+
4244
+
4245
+ GetTuningJobConfigOrDict = Union[GetTuningJobConfig, GetTuningJobConfigDict]
4246
+
4247
+
4213
4248
  class _GetTuningJobParameters(_common.BaseModel):
4214
4249
  """Parameters for the get method."""
4215
4250
 
4216
4251
  name: Optional[str] = Field(default=None, description="""""")
4252
+ config: Optional[GetTuningJobConfig] = Field(
4253
+ default=None, description="""Optional parameters for the request."""
4254
+ )
4217
4255
 
4218
4256
 
4219
4257
  class _GetTuningJobParametersDict(TypedDict, total=False):
@@ -4222,6 +4260,9 @@ class _GetTuningJobParametersDict(TypedDict, total=False):
4222
4260
  name: Optional[str]
4223
4261
  """"""
4224
4262
 
4263
+ config: Optional[GetTuningJobConfigDict]
4264
+ """Optional parameters for the request."""
4265
+
4225
4266
 
4226
4267
  _GetTuningJobParametersOrDict = Union[
4227
4268
  _GetTuningJobParameters, _GetTuningJobParametersDict
@@ -5242,6 +5283,9 @@ TuningValidationDatasetOrDict = Union[
5242
5283
  class CreateTuningJobConfig(_common.BaseModel):
5243
5284
  """Supervised fine-tuning job creation request - optional fields."""
5244
5285
 
5286
+ http_options: Optional[dict[str, Any]] = Field(
5287
+ default=None, description="""Used to override HTTP request options."""
5288
+ )
5245
5289
  validation_dataset: Optional[TuningValidationDataset] = Field(
5246
5290
  default=None,
5247
5291
  description="""Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file.""",
@@ -5277,6 +5321,9 @@ class CreateTuningJobConfig(_common.BaseModel):
5277
5321
  class CreateTuningJobConfigDict(TypedDict, total=False):
5278
5322
  """Supervised fine-tuning job creation request - optional fields."""
5279
5323
 
5324
+ http_options: Optional[dict[str, Any]]
5325
+ """Used to override HTTP request options."""
5326
+
5280
5327
  validation_dataset: Optional[TuningValidationDatasetDict]
5281
5328
  """Cloud Storage path to file containing training dataset for tuning. The dataset must be formatted as a JSONL file."""
5282
5329
 
@@ -5402,6 +5449,9 @@ DistillationValidationDatasetOrDict = Union[
5402
5449
  class CreateDistillationJobConfig(_common.BaseModel):
5403
5450
  """Distillation job creation request - optional fields."""
5404
5451
 
5452
+ http_options: Optional[dict[str, Any]] = Field(
5453
+ default=None, description="""Used to override HTTP request options."""
5454
+ )
5405
5455
  validation_dataset: Optional[DistillationValidationDataset] = Field(
5406
5456
  default=None,
5407
5457
  description="""Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file.""",
@@ -5430,6 +5480,9 @@ class CreateDistillationJobConfig(_common.BaseModel):
5430
5480
  class CreateDistillationJobConfigDict(TypedDict, total=False):
5431
5481
  """Distillation job creation request - optional fields."""
5432
5482
 
5483
+ http_options: Optional[dict[str, Any]]
5484
+ """Used to override HTTP request options."""
5485
+
5433
5486
  validation_dataset: Optional[DistillationValidationDatasetDict]
5434
5487
  """Cloud Storage path to file containing validation dataset for tuning. The dataset must be formatted as a JSONL file."""
5435
5488
 
@@ -6621,6 +6674,24 @@ class BatchJobDict(TypedDict, total=False):
6621
6674
  BatchJobOrDict = Union[BatchJob, BatchJobDict]
6622
6675
 
6623
6676
 
6677
+ class GetBatchJobConfig(_common.BaseModel):
6678
+ """Optional parameters."""
6679
+
6680
+ http_options: Optional[dict[str, Any]] = Field(
6681
+ default=None, description="""Used to override HTTP request options."""
6682
+ )
6683
+
6684
+
6685
+ class GetBatchJobConfigDict(TypedDict, total=False):
6686
+ """Optional parameters."""
6687
+
6688
+ http_options: Optional[dict[str, Any]]
6689
+ """Used to override HTTP request options."""
6690
+
6691
+
6692
+ GetBatchJobConfigOrDict = Union[GetBatchJobConfig, GetBatchJobConfigDict]
6693
+
6694
+
6624
6695
  class _GetBatchJobParameters(_common.BaseModel):
6625
6696
  """Config class for batches.get parameters."""
6626
6697
 
@@ -6631,6 +6702,9 @@ class _GetBatchJobParameters(_common.BaseModel):
6631
6702
  or "456" when project and location are initialized in the client.
6632
6703
  """,
6633
6704
  )
6705
+ config: Optional[GetBatchJobConfig] = Field(
6706
+ default=None, description="""Optional parameters for the request."""
6707
+ )
6634
6708
 
6635
6709
 
6636
6710
  class _GetBatchJobParametersDict(TypedDict, total=False):
@@ -6642,12 +6716,35 @@ class _GetBatchJobParametersDict(TypedDict, total=False):
6642
6716
  or "456" when project and location are initialized in the client.
6643
6717
  """
6644
6718
 
6719
+ config: Optional[GetBatchJobConfigDict]
6720
+ """Optional parameters for the request."""
6721
+
6645
6722
 
6646
6723
  _GetBatchJobParametersOrDict = Union[
6647
6724
  _GetBatchJobParameters, _GetBatchJobParametersDict
6648
6725
  ]
6649
6726
 
6650
6727
 
6728
+ class CancelBatchJobConfig(_common.BaseModel):
6729
+ """Optional parameters."""
6730
+
6731
+ http_options: Optional[dict[str, Any]] = Field(
6732
+ default=None, description="""Used to override HTTP request options."""
6733
+ )
6734
+
6735
+
6736
+ class CancelBatchJobConfigDict(TypedDict, total=False):
6737
+ """Optional parameters."""
6738
+
6739
+ http_options: Optional[dict[str, Any]]
6740
+ """Used to override HTTP request options."""
6741
+
6742
+
6743
+ CancelBatchJobConfigOrDict = Union[
6744
+ CancelBatchJobConfig, CancelBatchJobConfigDict
6745
+ ]
6746
+
6747
+
6651
6748
  class _CancelBatchJobParameters(_common.BaseModel):
6652
6749
  """Config class for batches.cancel parameters."""
6653
6750
 
@@ -6658,6 +6755,9 @@ class _CancelBatchJobParameters(_common.BaseModel):
6658
6755
  or "456" when project and location are initialized in the client.
6659
6756
  """,
6660
6757
  )
6758
+ config: Optional[CancelBatchJobConfig] = Field(
6759
+ default=None, description="""Optional parameters for the request."""
6760
+ )
6661
6761
 
6662
6762
 
6663
6763
  class _CancelBatchJobParametersDict(TypedDict, total=False):
@@ -6669,6 +6769,9 @@ class _CancelBatchJobParametersDict(TypedDict, total=False):
6669
6769
  or "456" when project and location are initialized in the client.
6670
6770
  """
6671
6771
 
6772
+ config: Optional[CancelBatchJobConfigDict]
6773
+ """Optional parameters for the request."""
6774
+
6672
6775
 
6673
6776
  _CancelBatchJobParametersOrDict = Union[
6674
6777
  _CancelBatchJobParameters, _CancelBatchJobParametersDict
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: google-genai
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -28,6 +28,13 @@ Requires-Dist: websockets<15.0dev,>=13.0
28
28
 
29
29
  # Google Gen AI SDK
30
30
 
31
+ [![PyPI version](https://img.shields.io/pypi/v/google-genai.svg)](https://pypi.org/project/google-genai/)
32
+
33
+ --------
34
+ **Documentation:** https://googleapis.github.io/python-genai/
35
+
36
+ -----
37
+
31
38
  ## Imports
32
39
 
33
40
  ``` python
@@ -504,7 +511,7 @@ client.models.generate_content(
504
511
  ## Tunings
505
512
 
506
513
  `client.tunings` contains tuning job APIs and supports supervised fine
507
- tuning through `tune` and distiallation through `distill`
514
+ tuning through `tune` and distillation through `distill`
508
515
 
509
516
  ### Tune
510
517
 
@@ -512,7 +519,7 @@ tuning through `tune` and distiallation through `distill`
512
519
  - Google AI supports tuning from inline examples
513
520
 
514
521
  ``` python
515
- if client._api_client.vertexai:
522
+ if client.vertexai:
516
523
  model = 'gemini-1.5-pro-002'
517
524
  training_dataset=types.TuningDataset(
518
525
  gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
@@ -591,9 +598,9 @@ for model in client.models.list(config={'page_size': 10}):
591
598
  ``` python
592
599
  pager = client.models.list(config={'page_size': 10})
593
600
  print(pager.page_size)
594
- print(pager.page[0])
601
+ print(pager[0])
595
602
  pager.next_page()
596
- print(pager.page[0])
603
+ print(pager[0])
597
604
  ```
598
605
 
599
606
  #### Async
@@ -606,15 +613,15 @@ async for job in await client.aio.models.list(config={'page_size': 10}):
606
613
  ``` python
607
614
  async_pager = await client.aio.models.list(config={'page_size': 10})
608
615
  print(async_pager.page_size)
609
- print(async_pager.page[0])
616
+ print(async_pager[0])
610
617
  await async_pager.next_page()
611
- print(async_pager.page[0])
618
+ print(async_pager[0])
612
619
  ```
613
620
 
614
621
  ### Update Tuned Model
615
622
 
616
623
  ``` python
617
- model = pager.page[0]
624
+ model = pager[0]
618
625
 
619
626
  model = client.models.update(
620
627
  model=model.name,
@@ -674,9 +681,9 @@ for job in client.tunings.list(config={'page_size': 10}):
674
681
  ``` python
675
682
  pager = client.tunings.list(config={'page_size': 10})
676
683
  print(pager.page_size)
677
- print(pager.page[0])
684
+ print(pager[0])
678
685
  pager.next_page()
679
- print(pager.page[0])
686
+ print(pager[0])
680
687
  ```
681
688
 
682
689
  #### Async
@@ -689,9 +696,9 @@ async for job in await client.aio.tunings.list(config={'page_size': 10}):
689
696
  ``` python
690
697
  async_pager = await client.aio.tunings.list(config={'page_size': 10})
691
698
  print(async_pager.page_size)
692
- print(async_pager.page[0])
699
+ print(async_pager[0])
693
700
  await async_pager.next_page()
694
- print(async_pager.page[0])
701
+ print(async_pager[0])
695
702
  ```
696
703
 
697
704
  ## Batch Prediction
@@ -705,6 +712,8 @@ Only supported in Vertex AI.
705
712
  job = client.batches.create(
706
713
  model='gemini-1.5-flash-002',
707
714
  src='bq://my-project.my-dataset.my-table',
715
+ )
716
+
708
717
  job
709
718
  ```
710
719
 
@@ -741,9 +750,9 @@ for job in client.batches.list(config={'page_size': 10}):
741
750
  ``` python
742
751
  pager = client.batches.list(config={'page_size': 10})
743
752
  print(pager.page_size)
744
- print(pager.page[0])
753
+ print(pager[0])
745
754
  pager.next_page()
746
- print(pager.page[0])
755
+ print(pager[0])
747
756
  ```
748
757
 
749
758
  #### Async
@@ -754,11 +763,11 @@ async for job in await client.aio.batches.list(config={'page_size': 10}):
754
763
  ```
755
764
 
756
765
  ``` python
757
- async_pager = await client.aio.tunings.list(config={'page_size': 10})
766
+ async_pager = await client.aio.batches.list(config={'page_size': 10})
758
767
  print(async_pager.page_size)
759
- print(async_pager.page[0])
768
+ print(async_pager[0])
760
769
  await async_pager.next_page()
761
- print(async_pager.page[0])
770
+ print(async_pager[0])
762
771
  ```
763
772
 
764
773
  ### Delete
@@ -1,24 +1,24 @@
1
- google/genai/__init__.py,sha256=BkkJfmiA_SwVZvz_tSjJaEYEVh75HXC9UcH91FRSjUU,674
2
- google/genai/_api_client.py,sha256=9fSVC8elIVV-EwU1-GmC95OxHptdWrIrLRNs6Dvcs8Y,15638
1
+ google/genai/__init__.py,sha256=bO4TBLSOack_93tDti_USNTkE8cvkLUn45TH-WS-HOE,674
2
+ google/genai/_api_client.py,sha256=74qm-UTxDQ3-KCb61eqD3SAM379MrsQw8ly6GBLoDB8,15963
3
3
  google/genai/_automatic_function_calling_util.py,sha256=E25_66RH3DbDIucq7x-93XWPPBwB9FnzwD1NCGyPrjM,10242
4
4
  google/genai/_common.py,sha256=Yj5cBkq5QRNFSBqvpB949Rjo7cbIhdtKp5dJxMW_I6I,7971
5
5
  google/genai/_extra_utils.py,sha256=GQZnraFCrMffqrBEpurdcBmgrltRsnYgMizt-Ok6xX8,11098
6
6
  google/genai/_replay_api_client.py,sha256=QPNg4SBpOLS58bx-kuJQngxy1tbjMpCpJzmImCwYePA,16226
7
7
  google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759
8
8
  google/genai/_transformers.py,sha256=_zWNr7zFTrUFniECYaZUn0n4TdioLpj783l3-z1XvIE,13443
9
- google/genai/batches.py,sha256=gAuFZOKosJj-GYHvftFdbBuw6Y9VkFC9scOjeUssAC0,34632
9
+ google/genai/batches.py,sha256=Wi4Kptampp2WepAqv_AawwNCR6MKVhLKmzJdYXDQ_aE,37148
10
10
  google/genai/caches.py,sha256=LJm2raykec7_iCHsVbEtX4v942mR-OSQvxTVKcBN2RA,53434
11
- google/genai/chats.py,sha256=QcOqW87D5bpGQ_78xTu9y1G28dQx9iUEyqNFwPsAFdU,5686
11
+ google/genai/chats.py,sha256=x-vCXrsxZ8kdEZ_0ZDfrBQnQ9urCr42x3urP0OXHyTo,5688
12
12
  google/genai/client.py,sha256=HH_lYnjPOwW-4Vgynyw4K8cwurT2g578Dc51H_uk7GY,9244
13
13
  google/genai/errors.py,sha256=TrlUk1jz7r1aN1lrL3FZZ30LU4iMfSonm1ZwEAk07k4,3048
14
- google/genai/files.py,sha256=9B1Xsb2R7jBAU3Ot-1Z4YJjC9gQEywzAfLT24yHN4EI,35567
14
+ google/genai/files.py,sha256=dn3q8P9aTN9OG3PtA4AYDs9hF6Uk-jkMjgAW7dSlt_4,35573
15
15
  google/genai/live.py,sha256=T-pOtq7k43wE2VjQzqLrx-kqhotS66I2PY_NHBdv9G8,22056
16
- google/genai/models.py,sha256=1slDoZuGq91B7erpxeBaFGfv1PAcIPV4j1Gqg-aHhEs,154534
16
+ google/genai/models.py,sha256=t5XgwlgkNrQKb6eww0oBGzjMiMQaj-BQedc8lVdJHz4,154834
17
17
  google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
18
- google/genai/tunings.py,sha256=EYA9FykWZSkhcHkHcCO7VvYkFBVzQCWYX2umtIhlhQQ,47018
19
- google/genai/types.py,sha256=73tKrlpCmV4PRb2HMYHVZXmiHTS2iK-CL89uJvNE54Q,260869
20
- google_genai-0.1.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
21
- google_genai-0.1.0.dist-info/METADATA,sha256=1AAUKL2kZ6sGbkfQdUr-rSWttYg1qFAVXe9mneSikSc,17260
22
- google_genai-0.1.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
23
- google_genai-0.1.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
24
- google_genai-0.1.0.dist-info/RECORD,,
18
+ google/genai/tunings.py,sha256=tFTSEaECKZ6xeYcxUTIKUmXqPoDymYP3eyTcEKjnPa4,49010
19
+ google/genai/types.py,sha256=mIjtCSXbp6CRL5iEhtdxczoMTtyQ1EKYpBlzLvGIedY,263841
20
+ google_genai-0.2.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
21
+ google_genai-0.2.0.dist-info/METADATA,sha256=qhc4AtoMxFa_-BIEzNTw36s-cE1imA3zOoILYIpL7as,17371
22
+ google_genai-0.2.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
23
+ google_genai-0.2.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
24
+ google_genai-0.2.0.dist-info/RECORD,,