google-genai 1.25.0__py3-none-any.whl → 1.27.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/types.py CHANGED
@@ -15,6 +15,7 @@
15
15
 
16
16
  # Code generated by the Google Gen AI SDK generator DO NOT EDIT.
17
17
 
18
+ from abc import ABC, abstractmethod
18
19
  import datetime
19
20
  from enum import Enum, EnumMeta
20
21
  import inspect
@@ -26,7 +27,7 @@ import typing
26
27
  from typing import Any, Callable, Literal, Optional, Sequence, Union, _UnionGenericAlias # type: ignore
27
28
  import pydantic
28
29
  from pydantic import Field
29
- from typing_extensions import TypedDict
30
+ from typing_extensions import Self, TypedDict
30
31
  from . import _common
31
32
 
32
33
  if sys.version_info >= (3, 10):
@@ -1269,7 +1270,10 @@ class HttpOptions(_common.BaseModel):
1269
1270
  )
1270
1271
  extra_body: Optional[dict[str, Any]] = Field(
1271
1272
  default=None,
1272
- description="""Extra parameters to add to the request body.""",
1273
+ description="""Extra parameters to add to the request body.
1274
+ The structure must match the backend API's request structure.
1275
+ - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
1276
+ - GeminiAPI backend API docs: https://ai.google.dev/api/rest""",
1273
1277
  )
1274
1278
  retry_options: Optional[HttpRetryOptions] = Field(
1275
1279
  default=None, description="""HTTP retry options for the request."""
@@ -1298,7 +1302,10 @@ class HttpOptionsDict(TypedDict, total=False):
1298
1302
  """Args passed to the async HTTP client."""
1299
1303
 
1300
1304
  extra_body: Optional[dict[str, Any]]
1301
- """Extra parameters to add to the request body."""
1305
+ """Extra parameters to add to the request body.
1306
+ The structure must match the backend API's request structure.
1307
+ - VertexAI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
1308
+ - GeminiAPI backend API docs: https://ai.google.dev/api/rest"""
1302
1309
 
1303
1310
  retry_options: Optional[HttpRetryOptionsDict]
1304
1311
  """HTTP retry options for the request."""
@@ -5713,6 +5720,12 @@ class GenerateImagesConfig(_common.BaseModel):
5713
5720
  description="""Whether to add a watermark to the generated images.
5714
5721
  """,
5715
5722
  )
5723
+ image_size: Optional[str] = Field(
5724
+ default=None,
5725
+ description="""The size of the largest dimension of the generated image.
5726
+ Supported sizes are 1K and 2K (not supported for Imagen 3 models).
5727
+ """,
5728
+ )
5716
5729
  enhance_prompt: Optional[bool] = Field(
5717
5730
  default=None,
5718
5731
  description="""Whether to use the prompt rewriting logic.
@@ -5789,6 +5802,11 @@ class GenerateImagesConfigDict(TypedDict, total=False):
5789
5802
  """Whether to add a watermark to the generated images.
5790
5803
  """
5791
5804
 
5805
+ image_size: Optional[str]
5806
+ """The size of the largest dimension of the generated image.
5807
+ Supported sizes are 1K and 2K (not supported for Imagen 3 models).
5808
+ """
5809
+
5792
5810
  enhance_prompt: Optional[bool]
5793
5811
  """Whether to use the prompt rewriting logic.
5794
5812
  """
@@ -6381,6 +6399,11 @@ class EditImageConfig(_common.BaseModel):
6381
6399
  only).
6382
6400
  """,
6383
6401
  )
6402
+ add_watermark: Optional[bool] = Field(
6403
+ default=None,
6404
+ description="""Whether to add a watermark to the generated images.
6405
+ """,
6406
+ )
6384
6407
  edit_mode: Optional[EditMode] = Field(
6385
6408
  default=None,
6386
6409
  description="""Describes the editing mode for the request.""",
@@ -6457,6 +6480,10 @@ class EditImageConfigDict(TypedDict, total=False):
6457
6480
  only).
6458
6481
  """
6459
6482
 
6483
+ add_watermark: Optional[bool]
6484
+ """Whether to add a watermark to the generated images.
6485
+ """
6486
+
6460
6487
  edit_mode: Optional[EditMode]
6461
6488
  """Describes the editing mode for the request."""
6462
6489
 
@@ -6957,12 +6984,18 @@ _ListModelsParametersOrDict = Union[
6957
6984
 
6958
6985
  class ListModelsResponse(_common.BaseModel):
6959
6986
 
6987
+ sdk_http_response: Optional[HttpResponse] = Field(
6988
+ default=None, description="""Used to retain the full HTTP response."""
6989
+ )
6960
6990
  next_page_token: Optional[str] = Field(default=None, description="""""")
6961
6991
  models: Optional[list[Model]] = Field(default=None, description="""""")
6962
6992
 
6963
6993
 
6964
6994
  class ListModelsResponseDict(TypedDict, total=False):
6965
6995
 
6996
+ sdk_http_response: Optional[HttpResponseDict]
6997
+ """Used to retain the full HTTP response."""
6998
+
6966
6999
  next_page_token: Optional[str]
6967
7000
  """"""
6968
7001
 
@@ -7613,7 +7646,7 @@ class GenerateVideosConfig(_common.BaseModel):
7613
7646
  )
7614
7647
  resolution: Optional[str] = Field(
7615
7648
  default=None,
7616
- description="""The resolution for the generated video. 1280x720, 1920x1080 are supported.""",
7649
+ description="""The resolution for the generated video. 720p and 1080p are supported.""",
7617
7650
  )
7618
7651
  person_generation: Optional[str] = Field(
7619
7652
  default=None,
@@ -7669,7 +7702,7 @@ class GenerateVideosConfigDict(TypedDict, total=False):
7669
7702
  """The aspect ratio for the generated video. 16:9 (landscape) and 9:16 (portrait) are supported."""
7670
7703
 
7671
7704
  resolution: Optional[str]
7672
- """The resolution for the generated video. 1280x720, 1920x1080 are supported."""
7705
+ """The resolution for the generated video. 720p and 1080p are supported."""
7673
7706
 
7674
7707
  person_generation: Optional[str]
7675
7708
  """Whether allow to generate person videos, and restrict to specific ages. Supported values are: dont_allow, allow_adult."""
@@ -7785,26 +7818,8 @@ class GenerateVideosResponse(_common.BaseModel):
7785
7818
  )
7786
7819
 
7787
7820
 
7788
- class GenerateVideosResponseDict(TypedDict, total=False):
7789
- """Response with generated videos."""
7790
-
7791
- generated_videos: Optional[list[GeneratedVideoDict]]
7792
- """List of the generated videos"""
7793
-
7794
- rai_media_filtered_count: Optional[int]
7795
- """Returns if any videos were filtered due to RAI policies."""
7796
-
7797
- rai_media_filtered_reasons: Optional[list[str]]
7798
- """Returns rai failure reasons if any."""
7799
-
7800
-
7801
- GenerateVideosResponseOrDict = Union[
7802
- GenerateVideosResponse, GenerateVideosResponseDict
7803
- ]
7804
-
7805
-
7806
- class GenerateVideosOperation(_common.BaseModel):
7807
- """A video generation operation."""
7821
+ class Operation(ABC):
7822
+ """A long-running operation."""
7808
7823
 
7809
7824
  name: Optional[str] = Field(
7810
7825
  default=None,
@@ -7822,38 +7837,101 @@ class GenerateVideosOperation(_common.BaseModel):
7822
7837
  default=None,
7823
7838
  description="""The error result of the operation in case of failure or cancellation.""",
7824
7839
  )
7840
+
7841
+ @classmethod
7842
+ @abstractmethod
7843
+ def from_api_response(
7844
+ cls, api_response: Any, is_vertex_ai: bool = False
7845
+ ) -> Self:
7846
+ """Creates an Operation from an API response."""
7847
+ pass
7848
+
7849
+
7850
+ class GenerateVideosOperation(_common.BaseModel, Operation):
7851
+ """A video generation operation."""
7852
+
7825
7853
  response: Optional[GenerateVideosResponse] = Field(
7826
7854
  default=None, description="""The generated videos."""
7827
7855
  )
7856
+
7828
7857
  result: Optional[GenerateVideosResponse] = Field(
7829
7858
  default=None, description="""The generated videos."""
7830
7859
  )
7831
7860
 
7861
+ @classmethod
7862
+ def from_api_response(
7863
+ cls, api_response: Any, is_vertex_ai: bool = False
7864
+ ) -> Self:
7865
+ """Instantiates a GenerateVideosOperation from an API response."""
7866
+ new_operation = cls()
7867
+ new_operation.name = api_response.get('name', None)
7868
+ new_operation.metadata = api_response.get('metadata', None)
7869
+ new_operation.done = api_response.get('done', None)
7870
+ new_operation.error = api_response.get('error', None)
7871
+
7872
+ if is_vertex_ai:
7873
+ if api_response.get('response', None) is not None:
7874
+ new_operation.response = GenerateVideosResponse(
7875
+ generated_videos=[
7876
+ GeneratedVideo(
7877
+ video=Video(
7878
+ uri=video.get('gcsUri', None),
7879
+ video_bytes=video.get('bytesBase64Encoded', None),
7880
+ mime_type=video.get('mimeType', None),
7881
+ )
7882
+ )
7883
+ for video in api_response.get('response', {}).get('videos', [])
7884
+ ],
7885
+ rai_media_filtered_count=api_response.get('response', {}).get(
7886
+ 'raiMediaFilteredCount', None
7887
+ ),
7888
+ rai_media_filtered_reasons=api_response.get('response', {}).get(
7889
+ 'raiMediaFilteredReasons', None
7890
+ ),
7891
+ )
7892
+ else:
7893
+ if api_response.get('response', None) is not None:
7894
+ new_operation.response = GenerateVideosResponse(
7895
+ generated_videos=[
7896
+ GeneratedVideo(
7897
+ video=Video(
7898
+ uri=video.get('video', {}).get('uri', None),
7899
+ video_bytes=video.get('video', {}).get(
7900
+ 'encodedVideo', None
7901
+ ),
7902
+ mime_type=video.get('encoding', None),
7903
+ )
7904
+ )
7905
+ for video in api_response.get('response', {})
7906
+ .get('generateVideoResponse', {})
7907
+ .get('generatedSamples', [])
7908
+ ],
7909
+ rai_media_filtered_count=api_response.get('response', {})
7910
+ .get('generateVideoResponse', {})
7911
+ .get('raiMediaFilteredCount', None),
7912
+ rai_media_filtered_reasons=api_response.get('response', {})
7913
+ .get('generateVideoResponse', {})
7914
+ .get('raiMediaFilteredReasons', None),
7915
+ )
7916
+ new_operation.result = new_operation.response
7917
+ return new_operation
7832
7918
 
7833
- class GenerateVideosOperationDict(TypedDict, total=False):
7834
- """A video generation operation."""
7835
-
7836
- name: Optional[str]
7837
- """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."""
7838
-
7839
- metadata: Optional[dict[str, Any]]
7840
- """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any."""
7841
7919
 
7842
- done: Optional[bool]
7843
- """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available."""
7920
+ class GenerateVideosResponseDict(TypedDict, total=False):
7921
+ """Response with generated videos."""
7844
7922
 
7845
- error: Optional[dict[str, Any]]
7846
- """The error result of the operation in case of failure or cancellation."""
7923
+ generated_videos: Optional[list[GeneratedVideoDict]]
7924
+ """List of the generated videos"""
7847
7925
 
7848
- response: Optional[GenerateVideosResponseDict]
7849
- """The generated videos."""
7926
+ rai_media_filtered_count: Optional[int]
7927
+ """Returns if any videos were filtered due to RAI policies."""
7850
7928
 
7851
- result: Optional[GenerateVideosResponseDict]
7852
- """The generated videos."""
7929
+ rai_media_filtered_reasons: Optional[list[str]]
7930
+ """Returns rai failure reasons if any."""
7853
7931
 
7854
7932
 
7855
- GenerateVideosOperationOrDict = Union[
7856
- GenerateVideosOperation, GenerateVideosOperationDict
7933
+ GenerateVideosResponseOrDict = Union[
7934
+ GenerateVideosResponse, GenerateVideosResponseDict
7857
7935
  ]
7858
7936
 
7859
7937
 
@@ -8921,6 +8999,9 @@ _ListTuningJobsParametersOrDict = Union[
8921
8999
  class ListTuningJobsResponse(_common.BaseModel):
8922
9000
  """Response for the list tuning jobs method."""
8923
9001
 
9002
+ sdk_http_response: Optional[HttpResponse] = Field(
9003
+ default=None, description="""Used to retain the full HTTP response."""
9004
+ )
8924
9005
  next_page_token: Optional[str] = Field(
8925
9006
  default=None,
8926
9007
  description="""A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page.""",
@@ -8933,6 +9014,9 @@ class ListTuningJobsResponse(_common.BaseModel):
8933
9014
  class ListTuningJobsResponseDict(TypedDict, total=False):
8934
9015
  """Response for the list tuning jobs method."""
8935
9016
 
9017
+ sdk_http_response: Optional[HttpResponseDict]
9018
+ """Used to retain the full HTTP response."""
9019
+
8936
9020
  next_page_token: Optional[str]
8937
9021
  """A token to retrieve the next page of results. Pass to ListTuningJobsRequest.page_token to obtain that page."""
8938
9022
 
@@ -9141,7 +9225,7 @@ _CreateTuningJobParametersOrDict = Union[
9141
9225
  ]
9142
9226
 
9143
9227
 
9144
- class Operation(_common.BaseModel):
9228
+ class TuningOperation(_common.BaseModel):
9145
9229
  """A long-running operation."""
9146
9230
 
9147
9231
  name: Optional[str] = Field(
@@ -9162,7 +9246,7 @@ class Operation(_common.BaseModel):
9162
9246
  )
9163
9247
 
9164
9248
 
9165
- class OperationDict(TypedDict, total=False):
9249
+ class TuningOperationDict(TypedDict, total=False):
9166
9250
  """A long-running operation."""
9167
9251
 
9168
9252
  name: Optional[str]
@@ -9178,7 +9262,7 @@ class OperationDict(TypedDict, total=False):
9178
9262
  """The error result of the operation in case of failure or cancellation."""
9179
9263
 
9180
9264
 
9181
- OperationOrDict = Union[Operation, OperationDict]
9265
+ TuningOperationOrDict = Union[TuningOperation, TuningOperationDict]
9182
9266
 
9183
9267
 
9184
9268
  class CreateCachedContentConfig(_common.BaseModel):
@@ -9655,6 +9739,9 @@ _ListCachedContentsParametersOrDict = Union[
9655
9739
 
9656
9740
  class ListCachedContentsResponse(_common.BaseModel):
9657
9741
 
9742
+ sdk_http_response: Optional[HttpResponse] = Field(
9743
+ default=None, description="""Used to retain the full HTTP response."""
9744
+ )
9658
9745
  next_page_token: Optional[str] = Field(default=None, description="""""")
9659
9746
  cached_contents: Optional[list[CachedContent]] = Field(
9660
9747
  default=None,
@@ -9665,6 +9752,9 @@ class ListCachedContentsResponse(_common.BaseModel):
9665
9752
 
9666
9753
  class ListCachedContentsResponseDict(TypedDict, total=False):
9667
9754
 
9755
+ sdk_http_response: Optional[HttpResponseDict]
9756
+ """Used to retain the full HTTP response."""
9757
+
9668
9758
  next_page_token: Optional[str]
9669
9759
  """"""
9670
9760
 
@@ -9728,6 +9818,9 @@ _ListFilesParametersOrDict = Union[
9728
9818
  class ListFilesResponse(_common.BaseModel):
9729
9819
  """Response for the list files method."""
9730
9820
 
9821
+ sdk_http_response: Optional[HttpResponse] = Field(
9822
+ default=None, description="""Used to retain the full HTTP response."""
9823
+ )
9731
9824
  next_page_token: Optional[str] = Field(
9732
9825
  default=None, description="""A token to retrieve next page of results."""
9733
9826
  )
@@ -9739,6 +9832,9 @@ class ListFilesResponse(_common.BaseModel):
9739
9832
  class ListFilesResponseDict(TypedDict, total=False):
9740
9833
  """Response for the list files method."""
9741
9834
 
9835
+ sdk_http_response: Optional[HttpResponseDict]
9836
+ """Used to retain the full HTTP response."""
9837
+
9742
9838
  next_page_token: Optional[str]
9743
9839
  """A token to retrieve next page of results."""
9744
9840
 
@@ -10166,6 +10262,14 @@ class BatchJobDestinationDict(TypedDict, total=False):
10166
10262
  BatchJobDestinationOrDict = Union[BatchJobDestination, BatchJobDestinationDict]
10167
10263
 
10168
10264
 
10265
+ BatchJobDestinationUnion = Union[BatchJobDestination, str]
10266
+
10267
+
10268
+ BatchJobDestinationUnionDict = Union[
10269
+ BatchJobDestinationUnion, BatchJobDestinationDict
10270
+ ]
10271
+
10272
+
10169
10273
  class CreateBatchJobConfig(_common.BaseModel):
10170
10274
  """Config for optional parameters."""
10171
10275
 
@@ -10177,7 +10281,7 @@ class CreateBatchJobConfig(_common.BaseModel):
10177
10281
  description="""The user-defined name of this BatchJob.
10178
10282
  """,
10179
10283
  )
10180
- dest: Optional[str] = Field(
10284
+ dest: Optional[BatchJobDestinationUnion] = Field(
10181
10285
  default=None,
10182
10286
  description="""GCS or BigQuery URI prefix for the output predictions. Example:
10183
10287
  "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId".
@@ -10195,7 +10299,7 @@ class CreateBatchJobConfigDict(TypedDict, total=False):
10195
10299
  """The user-defined name of this BatchJob.
10196
10300
  """
10197
10301
 
10198
- dest: Optional[str]
10302
+ dest: Optional[BatchJobDestinationUnionDict]
10199
10303
  """GCS or BigQuery URI prefix for the output predictions. Example:
10200
10304
  "gs://path/to/output/data" or "bq://projectId.bqDatasetId.bqTableId".
10201
10305
  """
@@ -10521,6 +10625,9 @@ _ListBatchJobsParametersOrDict = Union[
10521
10625
  class ListBatchJobsResponse(_common.BaseModel):
10522
10626
  """Config for batches.list return value."""
10523
10627
 
10628
+ sdk_http_response: Optional[HttpResponse] = Field(
10629
+ default=None, description="""Used to retain the full HTTP response."""
10630
+ )
10524
10631
  next_page_token: Optional[str] = Field(default=None, description="""""")
10525
10632
  batch_jobs: Optional[list[BatchJob]] = Field(default=None, description="""""")
10526
10633
 
@@ -10528,6 +10635,9 @@ class ListBatchJobsResponse(_common.BaseModel):
10528
10635
  class ListBatchJobsResponseDict(TypedDict, total=False):
10529
10636
  """Config for batches.list return value."""
10530
10637
 
10638
+ sdk_http_response: Optional[HttpResponseDict]
10639
+ """Used to retain the full HTTP response."""
10640
+
10531
10641
  next_page_token: Optional[str]
10532
10642
  """"""
10533
10643
 
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '1.25.0' # x-release-please-version
16
+ __version__ = '1.27.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.25.0
3
+ Version: 1.27.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -609,16 +609,16 @@ from google.genai import types
609
609
  function = types.FunctionDeclaration(
610
610
  name='get_current_weather',
611
611
  description='Get the current weather in a given location',
612
- parameters=types.Schema(
613
- type='OBJECT',
614
- properties={
615
- 'location': types.Schema(
616
- type='STRING',
617
- description='The city and state, e.g. San Francisco, CA',
618
- ),
612
+ parameters_json_schema={
613
+ 'type': 'object',
614
+ 'properties': {
615
+ 'location': {
616
+ 'type': 'string',
617
+ 'description': 'The city and state, e.g. San Francisco, CA',
618
+ }
619
619
  },
620
- required=['location'],
621
- ),
620
+ 'required': ['location'],
621
+ },
622
622
  )
623
623
 
624
624
  tool = types.Tool(function_declarations=[function])
@@ -799,6 +799,40 @@ However you define your schema, don't duplicate it in your input prompt,
799
799
  including by giving examples of expected JSON output. If you do, the generated
800
800
  output might be lower in quality.
801
801
 
802
+ #### JSON Schema support
803
+ Schemas can be provided as standard JSON schema.
804
+ ```python
805
+ user_profile = {
806
+ 'properties': {
807
+ 'age': {
808
+ 'anyOf': [
809
+ {'maximum': 20, 'minimum': 0, 'type': 'integer'},
810
+ {'type': 'null'},
811
+ ],
812
+ 'title': 'Age',
813
+ },
814
+ 'username': {
815
+ 'description': "User's unique name",
816
+ 'title': 'Username',
817
+ 'type': 'string',
818
+ },
819
+ },
820
+ 'required': ['username', 'age'],
821
+ 'title': 'User Schema',
822
+ 'type': 'object',
823
+ }
824
+
825
+ response = client.models.generate_content(
826
+ model='gemini-2.0-flash',
827
+ contents='Give me information of the United States.',
828
+ config={
829
+ 'response_mime_type': 'application/json',
830
+ 'response_json_schema': userProfile
831
+ },
832
+ )
833
+ print(response.parsed)
834
+ ```
835
+
802
836
  #### Pydantic Model Schema support
803
837
 
804
838
  Schemas can be provided as Pydantic Models.
@@ -1370,35 +1404,21 @@ print(response.text)
1370
1404
  ## Tunings
1371
1405
 
1372
1406
  `client.tunings` contains tuning job APIs and supports supervised fine
1373
- tuning through `tune`. See the 'Create a client' section above to initialize a
1374
- client.
1407
+ tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
1408
+ section above to initialize a client.
1375
1409
 
1376
1410
  ### Tune
1377
1411
 
1378
1412
  - Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
1379
- - Gemini Developer API supports tuning from inline examples
1380
1413
 
1381
1414
  ```python
1382
1415
  from google.genai import types
1383
1416
 
1384
- if client.vertexai:
1385
- model = 'gemini-2.0-flash-001'
1386
- training_dataset = types.TuningDataset(
1387
- # or gcs_uri=my_vertex_multimodal_dataset
1388
- gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1389
- )
1390
- else:
1391
- model = 'models/gemini-2.0-flash-001'
1392
- # or gcs_uri=my_vertex_multimodal_dataset.resource_name
1393
- training_dataset = types.TuningDataset(
1394
- examples=[
1395
- types.TuningExample(
1396
- text_input=f'Input text {i}',
1397
- output=f'Output text {i}',
1398
- )
1399
- for i in range(5)
1400
- ],
1401
- )
1417
+ model = 'gemini-2.0-flash-001'
1418
+ training_dataset = types.TuningDataset(
1419
+ # or gcs_uri=my_vertex_multimodal_dataset
1420
+ gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1421
+ )
1402
1422
  ```
1403
1423
 
1404
1424
  ```python
@@ -1424,14 +1444,15 @@ print(tuning_job)
1424
1444
  ```python
1425
1445
  import time
1426
1446
 
1427
- running_states = set(
1447
+ completed_states = set(
1428
1448
  [
1429
- 'JOB_STATE_PENDING',
1430
- 'JOB_STATE_RUNNING',
1449
+ 'JOB_STATE_SUCCEEDED',
1450
+ 'JOB_STATE_FAILED',
1451
+ 'JOB_STATE_CANCELLED',
1431
1452
  ]
1432
1453
  )
1433
1454
 
1434
- while tuning_job.state in running_states:
1455
+ while tuning_job.state not in completed_states:
1435
1456
  print(tuning_job.state)
1436
1457
  tuning_job = client.tunings.get(name=tuning_job.name)
1437
1458
  time.sleep(10)
@@ -1542,16 +1563,63 @@ initialize a client.
1542
1563
 
1543
1564
  ### Create
1544
1565
 
1566
+ Vertex AI:
1567
+
1545
1568
  ```python
1546
1569
  # Specify model and source file only, destination and job display name will be auto-populated
1547
1570
  job = client.batches.create(
1548
1571
  model='gemini-2.0-flash-001',
1549
- src='bq://my-project.my-dataset.my-table',
1572
+ src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1573
+ )
1574
+
1575
+ job
1576
+ ```
1577
+
1578
+ Gemini Developer API:
1579
+
1580
+ ```python
1581
+ # Create a batch job with inlined requests
1582
+ batch_job = client.batches.create(
1583
+ model="gemini-2.0-flash",
1584
+ src=[{
1585
+ "contents": [{
1586
+ "parts": [{
1587
+ "text": "Hello!",
1588
+ }],
1589
+ "role": "user",
1590
+ }],
1591
+ "config:": {"response_modalities": ["text"]},
1592
+ }],
1550
1593
  )
1551
1594
 
1552
1595
  job
1553
1596
  ```
1554
1597
 
1598
+ In order to create a batch job with file name. Need to upload a jsonl file.
1599
+ For example myrequests.json:
1600
+
1601
+ ```
1602
+ {"key":"request_1", "request": {"contents": [{"parts": [{"text":
1603
+ "Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
1604
+ {"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
1605
+ ```
1606
+ Then upload the file.
1607
+
1608
+ ```python
1609
+ # Upload the file
1610
+ file = client.files.upload(
1611
+ file='myrequest.json',
1612
+ config=types.UploadFileConfig(display_name='test_json')
1613
+ )
1614
+
1615
+ # Create a batch job with file name
1616
+ batch_job = client.batches.create(
1617
+ model="gemini-2.0-flash",
1618
+ src="files/file_name",
1619
+ )
1620
+ ```
1621
+
1622
+
1555
1623
  ```python
1556
1624
  # Get a job by name
1557
1625
  job = client.batches.get(name=job.name)
@@ -0,0 +1,35 @@
1
+ google/genai/__init__.py,sha256=SYTxz3Ho06pP2TBlvDU0FkUJz8ytbR3MgEpS9HvVYq4,709
2
+ google/genai/_adapters.py,sha256=Kok38miNYJff2n--l0zEK_hbq0y2rWOH7k75J7SMYbQ,1744
3
+ google/genai/_api_client.py,sha256=X9JVR_XBnedZgkOKbBcvJjMuHupA75XXJkmlrVZqTw0,53102
4
+ google/genai/_api_module.py,sha256=lj8eUWx8_LBGBz-49qz6_ywWm3GYp3d8Bg5JoOHbtbI,902
5
+ google/genai/_automatic_function_calling_util.py,sha256=IJkPq2fT9pYxYm5Pbu5-e0nBoZKoZla7yT4_txWRKLs,10324
6
+ google/genai/_base_url.py,sha256=E5H4dew14Y16qfnB3XRnjSCi19cJVlkaMNoM_8ip-PM,1597
7
+ google/genai/_common.py,sha256=sJpzeoEJ6dZSPPfHb2Dsar-F5KmpwLjFqNSkxxxVfS8,19876
8
+ google/genai/_extra_utils.py,sha256=jWhJIdaFoVsrvExl6L7of3Bu8miSKvVidx4pbQ6EO2A,20571
9
+ google/genai/_live_converters.py,sha256=pIcEfsAxjV1zeWmxMNigDD7ETHiuutp-a2y0HQThdmU,100162
10
+ google/genai/_mcp_utils.py,sha256=khECx-DMuHemKzOQQ3msWp7FivPeEOnl3n1lvWc_b5o,3833
11
+ google/genai/_replay_api_client.py,sha256=2ndavmUMySvjLIdYEvjPZIOPfc-IA5rbWQgEwWuWpfc,21567
12
+ google/genai/_test_api_client.py,sha256=4ruFIy5_1qcbKqqIBu3HSQbpSOBrxiecBtDZaTGFR1s,4797
13
+ google/genai/_tokens_converters.py,sha256=ClWTsgcqn91zSw_qTqLPTNSP1-_G8s-NlBCD8-DQniw,23803
14
+ google/genai/_transformers.py,sha256=uTRSd9qrUZbJoFzLEWy3wGR_j1KqVg5cypGzQM1Mc4w,37357
15
+ google/genai/batches.py,sha256=a8X0wi8D6d7WC8873C2oVtk_NBYbd8xeI77GSkww4Nc,81094
16
+ google/genai/caches.py,sha256=isEzVYJgQVOjHf0XkFl86HOXzoYFXB-PgEVqwuo1V4s,64673
17
+ google/genai/chats.py,sha256=0QdOUeWEYDQgAWBy1f7a3z3yY9S8tXSowUzNrzazzj4,16651
18
+ google/genai/client.py,sha256=wXnfZBSv9p-yKtX_gabUrfBXoYHuqHhzK_VgwRttMgY,10777
19
+ google/genai/errors.py,sha256=IdSymOuUJDprfPRBhBtFDkc_XX81UvgNbWrOLR8L2GU,5582
20
+ google/genai/files.py,sha256=Z9CP2RLAZlZDE3zWXVNA2LF3x7wJTXOhNzDoSyHmr9k,40154
21
+ google/genai/live.py,sha256=WvOPBFDwD2eyUx89XCW6oudKtlK7960RqQuk5-SY1Ac,39482
22
+ google/genai/live_music.py,sha256=3GG9nsto8Vhkohcs-4CPMS4DFp1ZtMuLYzHfvEPYAeg,6971
23
+ google/genai/models.py,sha256=wdxOAA2OTklPBobfvKSDLqdihd74IIG6lco-7Z2S4xg,239491
24
+ google/genai/operations.py,sha256=3eudPaItN6_JJKMWNT9lLIJLUGyAQfFK1xken7Rv8vQ,12814
25
+ google/genai/pagers.py,sha256=hlfLtH8Fv_eU19bfqKaabvWqDZYhIYXhOVuoH2oBJkA,7077
26
+ google/genai/py.typed,sha256=RsMFoLwBkAvY05t6izop4UHZtqOPLiKp3GkIEizzmQY,40
27
+ google/genai/tokens.py,sha256=QGW1jI0Y5wXqiaad0-N6Utgh9sK4TK0todHf5h0GLeI,12490
28
+ google/genai/tunings.py,sha256=diQSMojun63gFI9b-bluxTYjwbOwEIjviMJwHvUb9A4,48961
29
+ google/genai/types.py,sha256=0otLX5sfNZoW98bgHI7D2NAYTnF5MKXwvRuKHEqQhv0,474334
30
+ google/genai/version.py,sha256=zQZWqve_yrf9cPtiYvuJgv4hhwEymigHvKuPVVKoisc,627
31
+ google_genai-1.27.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
32
+ google_genai-1.27.0.dist-info/METADATA,sha256=3h-l2dUas9jaOIvWv6r6Vnd1gWz-Ppz6Hn7i4sHkOcM,43091
33
+ google_genai-1.27.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
34
+ google_genai-1.27.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
35
+ google_genai-1.27.0.dist-info/RECORD,,