google-genai 0.8.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/types.py CHANGED
@@ -20,12 +20,22 @@ from enum import Enum, EnumMeta
20
20
  import inspect
21
21
  import json
22
22
  import logging
23
+ import sys
23
24
  import typing
24
25
  from typing import Any, Callable, GenericAlias, Literal, Optional, Type, TypedDict, Union
25
26
  import pydantic
26
27
  from pydantic import Field
27
28
  from . import _common
28
29
 
30
+ if sys.version_info >= (3, 10):
31
+ # Supports both Union[t1, t2] and t1 | t2
32
+ VersionedUnionType = Union[typing.types.UnionType, typing._UnionGenericAlias]
33
+ _UNION_TYPES = (typing.Union, typing.types.UnionType)
34
+ else:
35
+ # Supports only Union[t1, t2]
36
+ VersionedUnionType = typing._UnionGenericAlias
37
+ _UNION_TYPES = (typing.Union,)
38
+
29
39
  _is_pillow_image_imported = False
30
40
  if typing.TYPE_CHECKING:
31
41
  import PIL.Image
@@ -713,10 +723,6 @@ class HttpOptions(_common.BaseModel):
713
723
  timeout: Optional[int] = Field(
714
724
  default=None, description="""Timeout for the request in milliseconds."""
715
725
  )
716
- deprecated_response_payload: Optional[dict[str, any]] = Field(
717
- default=None,
718
- description="""This field is deprecated. If set, the response payload will be returned int the supplied dict.""",
719
- )
720
726
 
721
727
 
722
728
  class HttpOptionsDict(TypedDict, total=False):
@@ -734,9 +740,6 @@ class HttpOptionsDict(TypedDict, total=False):
734
740
  timeout: Optional[int]
735
741
  """Timeout for the request in milliseconds."""
736
742
 
737
- deprecated_response_payload: Optional[dict[str, any]]
738
- """This field is deprecated. If set, the response payload will be returned int the supplied dict."""
739
-
740
743
 
741
744
  HttpOptionsOrDict = Union[HttpOptions, HttpOptionsDict]
742
745
 
@@ -1656,7 +1659,7 @@ ContentUnion = Union[Content, list[PartUnion], PartUnion]
1656
1659
  ContentUnionDict = Union[ContentUnion, ContentDict]
1657
1660
 
1658
1661
 
1659
- SchemaUnion = Union[dict, type, Schema, GenericAlias]
1662
+ SchemaUnion = Union[dict, type, Schema, GenericAlias, VersionedUnionType]
1660
1663
 
1661
1664
 
1662
1665
  SchemaUnionDict = Union[SchemaUnion, SchemaDict]
@@ -1863,6 +1866,10 @@ class GenerateContentConfig(_common.BaseModel):
1863
1866
  description="""Associates model output to a specific function call.
1864
1867
  """,
1865
1868
  )
1869
+ labels: Optional[dict[str, str]] = Field(
1870
+ default=None,
1871
+ description="""Labels with user-defined metadata to break down billed charges.""",
1872
+ )
1866
1873
  cached_content: Optional[str] = Field(
1867
1874
  default=None,
1868
1875
  description="""Resource name of a context cache that can be used in subsequent
@@ -2007,6 +2014,9 @@ class GenerateContentConfigDict(TypedDict, total=False):
2007
2014
  """Associates model output to a specific function call.
2008
2015
  """
2009
2016
 
2017
+ labels: Optional[dict[str, str]]
2018
+ """Labels with user-defined metadata to break down billed charges."""
2019
+
2010
2020
  cached_content: Optional[str]
2011
2021
  """Resource name of a context cache that can be used in subsequent
2012
2022
  requests.
@@ -2902,6 +2912,29 @@ class GenerateContentResponse(_common.BaseModel):
2902
2912
  # may not be a valid json per stream response
2903
2913
  except json.decoder.JSONDecodeError:
2904
2914
  pass
2915
+ elif typing.get_origin(response_schema) in _UNION_TYPES:
2916
+ # Union schema.
2917
+ union_types = typing.get_args(response_schema)
2918
+ for union_type in union_types:
2919
+ if issubclass(union_type, pydantic.BaseModel):
2920
+ try:
2921
+
2922
+ class Placeholder(pydantic.BaseModel):
2923
+ placeholder: response_schema
2924
+
2925
+ parsed = {'placeholder': json.loads(result.text)}
2926
+ placeholder = Placeholder.model_validate(parsed)
2927
+ result.parsed = placeholder.placeholder
2928
+ except json.decoder.JSONDecodeError:
2929
+ pass
2930
+ except pydantic.ValidationError:
2931
+ pass
2932
+ else:
2933
+ try:
2934
+ result.parsed = json.loads(result.text)
2935
+ # may not be a valid json per stream response
2936
+ except json.decoder.JSONDecodeError:
2937
+ pass
2905
2938
 
2906
2939
  return result
2907
2940
 
@@ -5921,22 +5954,51 @@ _CreateTuningJobParametersOrDict = Union[
5921
5954
  ]
5922
5955
 
5923
5956
 
5924
- class TuningJobOrOperation(_common.BaseModel):
5925
- """A tuning job or an long-running-operation that resolves to a tuning job."""
5957
+ class Operation(_common.BaseModel):
5958
+ """A long-running operation."""
5926
5959
 
5927
- tuning_job: Optional[TuningJob] = Field(default=None, description="""""")
5960
+ name: Optional[str] = Field(
5961
+ default=None,
5962
+ description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""",
5963
+ )
5964
+ metadata: Optional[dict[str, Any]] = Field(
5965
+ default=None,
5966
+ description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""",
5967
+ )
5968
+ done: Optional[bool] = Field(
5969
+ default=None,
5970
+ description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""",
5971
+ )
5972
+ error: Optional[dict[str, Any]] = Field(
5973
+ default=None,
5974
+ description="""The error result of the operation in case of failure or cancellation.""",
5975
+ )
5976
+ response: Optional[dict[str, Any]] = Field(
5977
+ default=None,
5978
+ description="""The normal response of the operation in case of success.""",
5979
+ )
5928
5980
 
5929
5981
 
5930
- class TuningJobOrOperationDict(TypedDict, total=False):
5931
- """A tuning job or an long-running-operation that resolves to a tuning job."""
5982
+ class OperationDict(TypedDict, total=False):
5983
+ """A long-running operation."""
5932
5984
 
5933
- tuning_job: Optional[TuningJobDict]
5934
- """"""
5985
+ name: Optional[str]
5986
+ """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."""
5935
5987
 
5988
+ metadata: Optional[dict[str, Any]]
5989
+ """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any."""
5936
5990
 
5937
- TuningJobOrOperationOrDict = Union[
5938
- TuningJobOrOperation, TuningJobOrOperationDict
5939
- ]
5991
+ done: Optional[bool]
5992
+ """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available."""
5993
+
5994
+ error: Optional[dict[str, Any]]
5995
+ """The error result of the operation in case of failure or cancellation."""
5996
+
5997
+ response: Optional[dict[str, Any]]
5998
+ """The normal response of the operation in case of success."""
5999
+
6000
+
6001
+ OperationOrDict = Union[Operation, OperationDict]
5940
6002
 
5941
6003
 
5942
6004
  class CreateCachedContentConfig(_common.BaseModel):
@@ -6543,13 +6605,17 @@ _CreateFileParametersOrDict = Union[
6543
6605
  class CreateFileResponse(_common.BaseModel):
6544
6606
  """Response for the create file method."""
6545
6607
 
6546
- pass
6608
+ http_headers: Optional[dict[str, str]] = Field(
6609
+ default=None,
6610
+ description="""Used to retain the HTTP headers in the request""",
6611
+ )
6547
6612
 
6548
6613
 
6549
6614
  class CreateFileResponseDict(TypedDict, total=False):
6550
6615
  """Response for the create file method."""
6551
6616
 
6552
- pass
6617
+ http_headers: Optional[dict[str, str]]
6618
+ """Used to retain the HTTP headers in the request"""
6553
6619
 
6554
6620
 
6555
6621
  CreateFileResponseOrDict = Union[CreateFileResponse, CreateFileResponseDict]
@@ -7245,53 +7311,6 @@ _GetOperationParametersOrDict = Union[
7245
7311
  ]
7246
7312
 
7247
7313
 
7248
- class Operation(_common.BaseModel):
7249
- """A long-running operation."""
7250
-
7251
- name: Optional[str] = Field(
7252
- default=None,
7253
- description="""The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.""",
7254
- )
7255
- metadata: Optional[dict[str, Any]] = Field(
7256
- default=None,
7257
- description="""Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.""",
7258
- )
7259
- done: Optional[bool] = Field(
7260
- default=None,
7261
- description="""If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.""",
7262
- )
7263
- error: Optional[dict[str, Any]] = Field(
7264
- default=None,
7265
- description="""The error result of the operation in case of failure or cancellation.""",
7266
- )
7267
- response: Optional[dict[str, Any]] = Field(
7268
- default=None,
7269
- description="""The normal response of the operation in case of success.""",
7270
- )
7271
-
7272
-
7273
- class OperationDict(TypedDict, total=False):
7274
- """A long-running operation."""
7275
-
7276
- name: Optional[str]
7277
- """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`."""
7278
-
7279
- metadata: Optional[dict[str, Any]]
7280
- """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any."""
7281
-
7282
- done: Optional[bool]
7283
- """If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available."""
7284
-
7285
- error: Optional[dict[str, Any]]
7286
- """The error result of the operation in case of failure or cancellation."""
7287
-
7288
- response: Optional[dict[str, Any]]
7289
- """The normal response of the operation in case of success."""
7290
-
7291
-
7292
- OperationOrDict = Union[Operation, OperationDict]
7293
-
7294
-
7295
7314
  class FetchPredictOperationConfig(_common.BaseModel):
7296
7315
 
7297
7316
  http_options: Optional[HttpOptions] = Field(
google/genai/version.py CHANGED
@@ -13,4 +13,4 @@
13
13
  # limitations under the License.
14
14
  #
15
15
 
16
- __version__ = '0.8.0' # x-release-please-version
16
+ __version__ = '1.0.0' # x-release-please-version
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: google-genai
3
- Version: 0.8.0
3
+ Version: 1.0.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -34,7 +34,7 @@ Requires-Dist: websockets<15.0dev,>=13.0
34
34
 
35
35
  -----
36
36
 
37
- Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs. This is an early release. API is subject to change. Please do not use this SDK in production environments at this stage.
37
+ Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs.
38
38
 
39
39
  ## Installation
40
40
 
@@ -66,6 +66,23 @@ client = genai.Client(
66
66
  )
67
67
  ```
68
68
 
69
+ To set the API version use `http_options`. For example, to set the API version
70
+ to `v1` for Vertex AI:
71
+
72
+ ```python
73
+ client = genai.Client(
74
+ vertexai=True, project='your-project-id', location='us-central1',
75
+ http_options={'api_version': 'v1'}
76
+ )
77
+ ```
78
+
79
+ To set the API version to `v1alpha` for the Gemini API:
80
+
81
+ ```python
82
+ client = genai.Client(api_key='GEMINI_API_KEY',
83
+ http_options={'api_version': 'v1alpha'})
84
+ ```
85
+
69
86
  ## Types
70
87
 
71
88
  Parameter types can be specified as either dictionaries(`TypedDict`) or
@@ -144,53 +161,17 @@ response = client.models.generate_content(
144
161
  print(response.text)
145
162
  ```
146
163
 
147
- ### Thinking
148
-
149
- The Gemini 2.0 Flash Thinking model is an experimental model that could return
150
- "thoughts" as part of its response.
151
-
152
- #### Gemini Developer API
153
-
154
- Thinking config is only available in v1alpha for Gemini AI API.
155
-
156
- ```python
157
- response = client.models.generate_content(
158
- model='gemini-2.0-flash-thinking-exp',
159
- contents='What is the sum of natural numbers from 1 to 100?',
160
- config=types.GenerateContentConfig(
161
- thinking_config=types.ThinkingConfig(include_thoughts=True),
162
- http_options=types.HttpOptions(api_version='v1alpha'),
163
- )
164
- )
165
- for part in response.candidates[0].content.parts:
166
- print(part)
167
- ```
168
-
169
- #### Vertex AI API
170
-
171
- ```python
172
- response = client.models.generate_content(
173
- model='gemini-2.0-flash-thinking-exp-01-21',
174
- contents='What is the sum of natural numbers from 1 to 100?',
175
- config=types.GenerateContentConfig(
176
- thinking_config=types.ThinkingConfig(include_thoughts=True),
177
- )
178
- )
179
- for part in response.candidates[0].content.parts:
180
- print(part)
181
- ```
182
-
183
164
  ### List Base Models
184
165
 
185
166
  To retrieve tuned models, see [list tuned models](#list-tuned-models).
186
167
 
187
168
  ```python
188
- for model in client.models.list(config={'query_base':True}):
169
+ for model in client.models.list():
189
170
  print(model)
190
171
  ```
191
172
 
192
173
  ```python
193
- pager = client.models.list(config={"page_size": 10, 'query_base':True})
174
+ pager = client.models.list(config={"page_size": 10})
194
175
  print(pager.page_size)
195
176
  print(pager[0])
196
177
  pager.next_page()
@@ -200,12 +181,12 @@ print(pager[0])
200
181
  #### Async
201
182
 
202
183
  ```python
203
- async for job in await client.aio.models.list(config={'query_base':True}):
184
+ async for job in await client.aio.models.list():
204
185
  print(job)
205
186
  ```
206
187
 
207
188
  ```python
208
- async_pager = await client.aio.models.list(config={"page_size": 10, 'query_base':True})
189
+ async_pager = await client.aio.models.list(config={"page_size": 10})
209
190
  print(async_pager.page_size)
210
191
  print(async_pager[0])
211
192
  await async_pager.next_page()
@@ -338,6 +319,66 @@ response = client.models.generate_content(
338
319
  print(response.text)
339
320
  ```
340
321
 
322
+ #### Function calling with `ANY` tools config mode
323
+
324
+ If you configure function calling mode to be `ANY`, then the model will always
325
+ return function call parts. If you also pass a python function as a tool, by
326
+ default the SDK will perform automatic function calling until the remote calls exceed the
327
+ maximum remote call for automatic function calling (default to 10 times).
328
+
329
+ If you'd like to disable automatic function calling in `ANY` mode:
330
+
331
+ ```python
332
+ def get_current_weather(location: str) -> str:
333
+ """Returns the current weather.
334
+
335
+ Args:
336
+ location: The city and state, e.g. San Francisco, CA
337
+ """
338
+ return "sunny"
339
+
340
+ response = client.models.generate_content(
341
+ model="gemini-2.0-flash-exp",
342
+ contents="What is the weather like in Boston?",
343
+ config=types.GenerateContentConfig(
344
+ tools=[get_current_weather],
345
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
346
+ disable=True
347
+ ),
348
+ tool_config=types.ToolConfig(
349
+ function_calling_config=types.FunctionCallingConfig(mode='ANY')
350
+ ),
351
+ ),
352
+ )
353
+ ```
354
+
355
+ If you'd like to set `x` number of automatic function call turns, you can
356
+ configure the maximum remote calls to be `x + 1`.
357
+ Assuming you prefer `1` turn for automatic function calling.
358
+
359
+ ```python
360
+ def get_current_weather(location: str) -> str:
361
+ """Returns the current weather.
362
+
363
+ Args:
364
+ location: The city and state, e.g. San Francisco, CA
365
+ """
366
+ return "sunny"
367
+
368
+ response = client.models.generate_content(
369
+ model="gemini-2.0-flash-exp",
370
+ contents="What is the weather like in Boston?",
371
+ config=types.GenerateContentConfig(
372
+ tools=[get_current_weather],
373
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
374
+ maximum_remote_calls=2
375
+ ),
376
+ tool_config=types.ToolConfig(
377
+ function_calling_config=types.FunctionCallingConfig(mode='ANY')
378
+ ),
379
+ ),
380
+ )
381
+ ```
341
382
  ### JSON Response Schema
342
383
 
343
384
  #### Pydantic Model Schema support
@@ -864,12 +905,12 @@ print(tuned_model)
864
905
  To retrieve base models, see [list base models](#list-base-models).
865
906
 
866
907
  ```python
867
- for model in client.models.list(config={"page_size": 10}):
908
+ for model in client.models.list(config={"page_size": 10, "query_base": False}):
868
909
  print(model)
869
910
  ```
870
911
 
871
912
  ```python
872
- pager = client.models.list(config={"page_size": 10})
913
+ pager = client.models.list(config={"page_size": 10, "query_base": False})
873
914
  print(pager.page_size)
874
915
  print(pager[0])
875
916
  pager.next_page()
@@ -879,12 +920,12 @@ print(pager[0])
879
920
  #### Async
880
921
 
881
922
  ```python
882
- async for job in await client.aio.models.list(config={"page_size": 10}):
923
+ async for job in await client.aio.models.list(config={"page_size": 10, "query_base": False}):
883
924
  print(job)
884
925
  ```
885
926
 
886
927
  ```python
887
- async_pager = await client.aio.models.list(config={"page_size": 10})
928
+ async_pager = await client.aio.models.list(config={"page_size": 10, "query_base": False})
888
929
  print(async_pager.page_size)
889
930
  print(async_pager[0])
890
931
  await async_pager.next_page()
@@ -0,0 +1,27 @@
1
+ google/genai/__init__.py,sha256=IYw-PcsdgjSpS1mU_ZcYkTfPocsJ4aVmrDxP7vX7c6Y,709
2
+ google/genai/_api_client.py,sha256=wLDcPY7qZi3caUlN-uIzbir6gzKQZg-zP8cxBp9Yq5g,22534
3
+ google/genai/_api_module.py,sha256=9bxmtcSTpT8Ht6VwJyw7fQqiR0jJXz7350dWGl-bC5E,780
4
+ google/genai/_automatic_function_calling_util.py,sha256=c7t7AbM1KqbImGWKWedRta6KW63TjOZmn0YbkXHjabI,10693
5
+ google/genai/_common.py,sha256=8vhUVofxerNfqL2STOkOt10hzE4wYelydxYO27j5ISs,9547
6
+ google/genai/_extra_utils.py,sha256=HxiS6rRjqWXKDOyK-ok0JlpRUykRyz3V6aJQfNCHkV4,11518
7
+ google/genai/_operations.py,sha256=KaDgwqG5g6Odd1P6ftvIUT9gF3ov08drm01jE1CjB_s,11490
8
+ google/genai/_replay_api_client.py,sha256=f_znGdDKXUOEN8lkRBzVZ6LDSGwrWHzy9N-Sk6pUU4E,14963
9
+ google/genai/_test_api_client.py,sha256=2PvDcW3h01U4UOSoj7TUo6TwdBHSEN_lO2tXjBoh5Fw,4765
10
+ google/genai/_transformers.py,sha256=Jwgbwl7DufqRZGeoPc1GClRQj1X6WM4avZEFfq9WGwA,22690
11
+ google/genai/batches.py,sha256=jv8pW_g_cZee6ol5ER5bQRUkXsj4IUcZC5cMo-YAnt0,38033
12
+ google/genai/caches.py,sha256=jsiclHO71kIa2CNrds3O8PL2fCNr_dlhUSPjhiRsjNE,53152
13
+ google/genai/chats.py,sha256=GyufXQPtyP_v4L3943xaKXMpo1Us9sBTdMSTUV4P6s8,7827
14
+ google/genai/client.py,sha256=MTZ3DOXk1_xgljaHlvF16jr_SKVPRfU8lZ1eH_dfDeQ,9334
15
+ google/genai/errors.py,sha256=dea3cQecyGFMoV5oIvUfKeMY904HzlcT4oiPWQzDCZo,3746
16
+ google/genai/files.py,sha256=U3qaa31AX7dKcZh6RkZSAkrDO7FVitukMW67wxL70kQ,42074
17
+ google/genai/live.py,sha256=xDu1wV8iQ5lI2i4_AmtOQOuiiPXBt6WykV_rXfjb0Sc,23467
18
+ google/genai/models.py,sha256=pj67WnH0Qn6K0u-qeOheM7sJHNdh8GAayEEpnd_v2wo,178201
19
+ google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
20
+ google/genai/tunings.py,sha256=OCuzmfjUK1iIElaOGxY3nQrh0gWNkk2xGqbLdBcHZr4,47009
21
+ google/genai/types.py,sha256=WkcW0qvewDO2CTERusZ75ZU0brE44HV45qxRTMjA7AM,279319
22
+ google/genai/version.py,sha256=9ko0qza0b8zl2aUxsnPkYRfwTKWmgtyAq6ngYDI-O6E,626
23
+ google_genai-1.0.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
24
+ google_genai-1.0.0.dist-info/METADATA,sha256=8tnXFyxbncOQ-VVdpRLmaID64CYrBgF-WvckQt7NYnU,25185
25
+ google_genai-1.0.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
26
+ google_genai-1.0.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
27
+ google_genai-1.0.0.dist-info/RECORD,,
@@ -1,27 +0,0 @@
1
- google/genai/__init__.py,sha256=IYw-PcsdgjSpS1mU_ZcYkTfPocsJ4aVmrDxP7vX7c6Y,709
2
- google/genai/_api_client.py,sha256=ZXiLrTI0wWVZaGEC1BHU9fLVp1MZKpnY8J3wV59VOk4,22806
3
- google/genai/_api_module.py,sha256=9bxmtcSTpT8Ht6VwJyw7fQqiR0jJXz7350dWGl-bC5E,780
4
- google/genai/_automatic_function_calling_util.py,sha256=sEaDAeHjv-H71o1L3_P8sqOslK4TK0Rybn4WPymeEBk,10665
5
- google/genai/_common.py,sha256=Q-3n5U7GCDDfOU_7uBkGYkEcEH2VcMa_NuLcyNzWExM,9017
6
- google/genai/_extra_utils.py,sha256=y-6Jr2GN2BKZV67I6fTgDtwfsOTQs7QlLDBQdmW_jKk,11258
7
- google/genai/_operations.py,sha256=KaDgwqG5g6Odd1P6ftvIUT9gF3ov08drm01jE1CjB_s,11490
8
- google/genai/_replay_api_client.py,sha256=yV2vJJk5Nvdqyi0QmNhupMdm2jrzdYoHVFnVMAx-l0M,14834
9
- google/genai/_test_api_client.py,sha256=2PvDcW3h01U4UOSoj7TUo6TwdBHSEN_lO2tXjBoh5Fw,4765
10
- google/genai/_transformers.py,sha256=AiSVoQML3MK6AP5xTStIiJUOlrZO4m_qBULOjgdZHC0,21963
11
- google/genai/batches.py,sha256=jv8pW_g_cZee6ol5ER5bQRUkXsj4IUcZC5cMo-YAnt0,38033
12
- google/genai/caches.py,sha256=jsiclHO71kIa2CNrds3O8PL2fCNr_dlhUSPjhiRsjNE,53152
13
- google/genai/chats.py,sha256=GyufXQPtyP_v4L3943xaKXMpo1Us9sBTdMSTUV4P6s8,7827
14
- google/genai/client.py,sha256=MTZ3DOXk1_xgljaHlvF16jr_SKVPRfU8lZ1eH_dfDeQ,9334
15
- google/genai/errors.py,sha256=DtpDZT5UDqumk2cTRUlg3k4ypmO_0tkMNzJgA3qzCmc,3666
16
- google/genai/files.py,sha256=arkka0MaNKjfKyVXdQ6-Llnr9W6J4NQYgxHdN69yAwQ,41885
17
- google/genai/live.py,sha256=wxz8ebqcPR6JJs39OOVz8zPzfAf31Zol7sGE7byQEyI,23302
18
- google/genai/models.py,sha256=GP80VHjWRizW8fypNDUvMIWsObmQMDYuqIHEhLKB0lk,166870
19
- google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
20
- google/genai/tunings.py,sha256=iJJYn1O_wjFKTIL8VS2zpIRqpfCNRrO2REP2ztgFW6M,39144
21
- google/genai/types.py,sha256=1xYY4hxaoi86aoc9n2J8CcoQcAg8HQE1m8Idq9nYbDQ,278477
22
- google/genai/version.py,sha256=0XfCLnUKjoi5AZ_PK2-LMgByDDmYvGy8YPU9SF8C5Bo,626
23
- google_genai-0.8.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
24
- google_genai-0.8.0.dist-info/METADATA,sha256=M_NzvfnF5Oaeuf06FISINgAbXOgEYVhD2UjJK3nSeMU,23962
25
- google_genai-0.8.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
26
- google_genai-0.8.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
27
- google_genai-0.8.0.dist-info/RECORD,,