google-genai 0.2.2__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/__init__.py CHANGED
@@ -17,6 +17,6 @@
17
17
 
18
18
  from .client import Client
19
19
 
20
- __version__ = '0.2.2'
20
+ __version__ = '0.3.0'
21
21
 
22
22
  __all__ = ['Client']
@@ -51,7 +51,7 @@ class HttpOptions(TypedDict):
51
51
  def _append_library_version_headers(headers: dict[str, str]) -> None:
52
52
  """Appends the telemetry header to the headers dict."""
53
53
  # TODO: Automate revisions to the SDK library version.
54
- library_label = f'google-genai-sdk/0.2.2'
54
+ library_label = f'google-genai-sdk/0.3.0'
55
55
  language_label = 'gl-python/' + sys.version.split()[0]
56
56
  version_header_value = f'{library_label} {language_label}'
57
57
  if (
@@ -241,7 +241,9 @@ class ApiClient:
241
241
  ) -> HttpResponse:
242
242
  if self.vertexai:
243
243
  if not self._credentials:
244
- self._credentials, _ = google.auth.default()
244
+ self._credentials, _ = google.auth.default(
245
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
246
+ )
245
247
  authed_session = AuthorizedSession(self._credentials)
246
248
  authed_session.stream = stream
247
249
  response = authed_session.request(
@@ -290,7 +292,9 @@ class ApiClient:
290
292
  ):
291
293
  if self.vertexai:
292
294
  if not self._credentials:
293
- self._credentials, _ = google.auth.default()
295
+ self._credentials, _ = google.auth.default(
296
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
297
+ )
294
298
  return await asyncio.to_thread(
295
299
  self._request,
296
300
  http_request,
@@ -170,7 +170,6 @@ def _parse_schema_from_parameter(
170
170
  ):
171
171
  if not _is_default_value_compatible(param.default, param.annotation):
172
172
  raise ValueError(default_value_error_msg)
173
- # TODO: b/379715133 - handle pydantic model default value
174
173
  schema.default = param.default
175
174
  _raise_if_schema_unsupported(client, schema)
176
175
  return schema
@@ -258,12 +257,11 @@ def _parse_schema_from_parameter(
258
257
  # for user defined class, we only support pydantic model
259
258
  and issubclass(param.annotation, pydantic.BaseModel)
260
259
  ):
261
- if param.default is not inspect.Parameter.empty:
262
- # TODO: b/379715133 - handle pydantic model default value
263
- raise ValueError(
264
- f'Default value {param.default} of Pydantic model{param} of function'
265
- f' {func_name} is not supported.'
266
- )
260
+ if (
261
+ param.default is not inspect.Parameter.empty
262
+ and param.default is not None
263
+ ):
264
+ schema.default = param.default
267
265
  schema.type = 'OBJECT'
268
266
  schema.properties = {}
269
267
  for field_name, field_info in param.annotation.model_fields.items():
@@ -294,4 +292,3 @@ def _get_required_fields(schema: types.Schema) -> list[str]:
294
292
  for field_name, field_schema in schema.properties.items()
295
293
  if not field_schema.nullable and field_schema.default is None
296
294
  ]
297
-
@@ -282,7 +282,7 @@ class ReplayApiClient(ApiClient):
282
282
  with open(replay_file_path, 'w') as f:
283
283
  f.write(
284
284
  json.dumps(
285
- self.replay_session.model_dump(), indent=2, cls=RequestJsonEncoder
285
+ self.replay_session.model_dump(), indent=2, cls=ResponseJsonEncoder
286
286
  )
287
287
  )
288
288
  self.replay_session = None
@@ -24,6 +24,7 @@ import time
24
24
  from typing import Any, Optional, Union
25
25
 
26
26
  import PIL.Image
27
+ import PIL.PngImagePlugin
27
28
 
28
29
  from . import _api_client
29
30
  from . import types
@@ -298,6 +299,20 @@ def t_speech_config(
298
299
  prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name=origin)
299
300
  )
300
301
  )
302
+ if (
303
+ isinstance(origin, dict)
304
+ and 'voice_config' in origin
305
+ and 'prebuilt_voice_config' in origin['voice_config']
306
+ ):
307
+ return types.SpeechConfig(
308
+ voice_config=types.VoiceConfig(
309
+ prebuilt_voice_config=types.PrebuiltVoiceConfig(
310
+ voice_name=origin['voice_config']['prebuilt_voice_config'].get(
311
+ 'voice_name'
312
+ )
313
+ )
314
+ )
315
+ )
301
316
  raise ValueError(f'Unsupported speechConfig type: {type(origin)}')
302
317
 
303
318
 
google/genai/caches.py CHANGED
@@ -33,6 +33,9 @@ def _Part_to_mldev(
33
33
  if getv(from_object, ['video_metadata']):
34
34
  raise ValueError('video_metadata parameter is not supported in Google AI.')
35
35
 
36
+ if getv(from_object, ['thought']) is not None:
37
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
38
+
36
39
  if getv(from_object, ['code_execution_result']) is not None:
37
40
  setv(
38
41
  to_object,
@@ -74,6 +77,9 @@ def _Part_to_vertex(
74
77
  if getv(from_object, ['video_metadata']) is not None:
75
78
  setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata']))
76
79
 
80
+ if getv(from_object, ['thought']) is not None:
81
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
82
+
77
83
  if getv(from_object, ['code_execution_result']) is not None:
78
84
  setv(
79
85
  to_object,
@@ -638,6 +644,18 @@ def _CreateCachedContentConfig_to_mldev(
638
644
  if getv(from_object, ['display_name']) is not None:
639
645
  setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
640
646
 
647
+ if getv(from_object, ['contents']) is not None:
648
+ setv(
649
+ parent_object,
650
+ ['contents'],
651
+ [
652
+ _Content_to_mldev(api_client, item, to_object)
653
+ for item in t.t_contents(
654
+ api_client, getv(from_object, ['contents'])
655
+ )
656
+ ],
657
+ )
658
+
641
659
  if getv(from_object, ['system_instruction']) is not None:
642
660
  setv(
643
661
  parent_object,
@@ -689,6 +707,18 @@ def _CreateCachedContentConfig_to_vertex(
689
707
  if getv(from_object, ['display_name']) is not None:
690
708
  setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
691
709
 
710
+ if getv(from_object, ['contents']) is not None:
711
+ setv(
712
+ parent_object,
713
+ ['contents'],
714
+ [
715
+ _Content_to_vertex(api_client, item, to_object)
716
+ for item in t.t_contents(
717
+ api_client, getv(from_object, ['contents'])
718
+ )
719
+ ],
720
+ )
721
+
692
722
  if getv(from_object, ['system_instruction']) is not None:
693
723
  setv(
694
724
  parent_object,
@@ -735,18 +765,6 @@ def _CreateCachedContentParameters_to_mldev(
735
765
  t.t_caches_model(api_client, getv(from_object, ['model'])),
736
766
  )
737
767
 
738
- if getv(from_object, ['contents']) is not None:
739
- setv(
740
- to_object,
741
- ['contents'],
742
- [
743
- _Content_to_mldev(api_client, item, to_object)
744
- for item in t.t_contents(
745
- api_client, getv(from_object, ['contents'])
746
- )
747
- ],
748
- )
749
-
750
768
  if getv(from_object, ['config']) is not None:
751
769
  setv(
752
770
  to_object,
@@ -772,18 +790,6 @@ def _CreateCachedContentParameters_to_vertex(
772
790
  t.t_caches_model(api_client, getv(from_object, ['model'])),
773
791
  )
774
792
 
775
- if getv(from_object, ['contents']) is not None:
776
- setv(
777
- to_object,
778
- ['contents'],
779
- [
780
- _Content_to_vertex(api_client, item, to_object)
781
- for item in t.t_contents(
782
- api_client, getv(from_object, ['contents'])
783
- )
784
- ],
785
- )
786
-
787
793
  if getv(from_object, ['config']) is not None:
788
794
  setv(
789
795
  to_object,
@@ -1238,7 +1244,6 @@ class Caches(_common.BaseModule):
1238
1244
  self,
1239
1245
  *,
1240
1246
  model: str,
1241
- contents: Union[types.ContentListUnion, types.ContentListUnionDict],
1242
1247
  config: Optional[types.CreateCachedContentConfigOrDict] = None,
1243
1248
  ) -> types.CachedContent:
1244
1249
  """Creates cached content, this call will initialize the cached
@@ -1264,7 +1269,6 @@ class Caches(_common.BaseModule):
1264
1269
 
1265
1270
  parameter_model = types._CreateCachedContentParameters(
1266
1271
  model=model,
1267
- contents=contents,
1268
1272
  config=config,
1269
1273
  )
1270
1274
 
@@ -1542,7 +1546,6 @@ class AsyncCaches(_common.BaseModule):
1542
1546
  self,
1543
1547
  *,
1544
1548
  model: str,
1545
- contents: Union[types.ContentListUnion, types.ContentListUnionDict],
1546
1549
  config: Optional[types.CreateCachedContentConfigOrDict] = None,
1547
1550
  ) -> types.CachedContent:
1548
1551
  """Creates cached content, this call will initialize the cached
@@ -1568,7 +1571,6 @@ class AsyncCaches(_common.BaseModule):
1568
1571
 
1569
1572
  parameter_model = types._CreateCachedContentParameters(
1570
1573
  model=model,
1571
- contents=contents,
1572
1574
  config=config,
1573
1575
  )
1574
1576
 
google/genai/chats.py CHANGED
@@ -18,7 +18,25 @@ from typing import Union
18
18
 
19
19
  from . import _transformers as t
20
20
  from .models import AsyncModels, Models
21
- from .types import Content, ContentDict, GenerateContentConfigOrDict, GenerateContentResponse, PartUnionDict
21
+ from .types import Content, ContentDict, GenerateContentConfigOrDict, GenerateContentResponse, Part, PartUnionDict
22
+
23
+
24
+ def _validate_response(
25
+ response: GenerateContentResponse
26
+ ) -> bool:
27
+ if not response.candidates:
28
+ return False
29
+ if not response.candidates[0].content:
30
+ return False
31
+ if not response.candidates[0].content.parts:
32
+ return False
33
+ for part in response.candidates[0].content.parts:
34
+ if part == Part():
35
+ return False
36
+ if part.text is not None and part.text == "":
37
+ return False
38
+ return True
39
+
22
40
 
23
41
  class _BaseChat:
24
42
  """Base chat session."""
@@ -65,7 +83,7 @@ class Chat(_BaseChat):
65
83
  contents=self._curated_history + [input_content],
66
84
  config=self._config,
67
85
  )
68
- if response.candidates and response.candidates[0].content:
86
+ if _validate_response(response):
69
87
  if response.automatic_function_calling_history:
70
88
  self._curated_history.extend(
71
89
  response.automatic_function_calling_history
@@ -75,14 +93,42 @@ class Chat(_BaseChat):
75
93
  self._curated_history.append(response.candidates[0].content)
76
94
  return response
77
95
 
78
- def _send_message_stream(self, message: Union[list[ContentDict], str]):
79
- for content in t.t_contents(self._modules.api_client, message):
80
- self._curated_history.append(content)
96
+ def send_message_stream(
97
+ self, message: Union[list[PartUnionDict], PartUnionDict]
98
+ ):
99
+ """Sends the conversation history with the additional message and yields the model's response in chunks.
100
+
101
+ Args:
102
+ message: The message to send to the model.
103
+
104
+ Yields:
105
+ The model's response in chunks.
106
+
107
+ Usage:
108
+
109
+ .. code-block:: python
110
+
111
+ chat = client.chats.create(model='gemini-1.5-flash')
112
+ for chunk in chat.send_message_stream('tell me a story')
113
+ print(chunk.text)
114
+ """
115
+
116
+ input_content = t.t_content(self._modules.api_client, message)
117
+ output_contents = []
118
+ finish_reason = None
81
119
  for chunk in self._modules.generate_content_stream(
82
- model=self._model, contents=self._curated_history, config=self._config
120
+ model=self._model,
121
+ contents=self._curated_history + [input_content],
122
+ config=self._config,
83
123
  ):
84
- # TODO(b/381089069): add successful response to history
124
+ if _validate_response(chunk):
125
+ output_contents.append(chunk.candidates[0].content)
126
+ if chunk.candidates and chunk.candidates[0].finish_reason:
127
+ finish_reason = chunk.candidates[0].finish_reason
85
128
  yield chunk
129
+ if output_contents and finish_reason:
130
+ self._curated_history.append(input_content)
131
+ self._curated_history.extend(output_contents)
86
132
 
87
133
 
88
134
  class Chats:
@@ -134,8 +180,8 @@ class AsyncChat(_BaseChat):
134
180
 
135
181
  .. code-block:: python
136
182
 
137
- chat = client.chats.create(model='gemini-1.5-flash')
138
- response = chat.send_message('tell me a story')
183
+ chat = client.aio.chats.create(model='gemini-1.5-flash')
184
+ response = await chat.send_message('tell me a story')
139
185
  """
140
186
 
141
187
  input_content = t.t_content(self._modules.api_client, message)
@@ -144,7 +190,7 @@ class AsyncChat(_BaseChat):
144
190
  contents=self._curated_history + [input_content],
145
191
  config=self._config,
146
192
  )
147
- if response.candidates and response.candidates[0].content:
193
+ if _validate_response(response):
148
194
  if response.automatic_function_calling_history:
149
195
  self._curated_history.extend(
150
196
  response.automatic_function_calling_history
@@ -154,14 +200,41 @@ class AsyncChat(_BaseChat):
154
200
  self._curated_history.append(response.candidates[0].content)
155
201
  return response
156
202
 
157
- async def _send_message_stream(self, message: Union[list[ContentDict], str]):
158
- for content in t.t_contents(self._modules.api_client, message):
159
- self._curated_history.append(content)
203
+ async def send_message_stream(
204
+ self, message: Union[list[PartUnionDict], PartUnionDict]
205
+ ):
206
+ """Sends the conversation history with the additional message and yields the model's response in chunks.
207
+
208
+ Args:
209
+ message: The message to send to the model.
210
+
211
+ Yields:
212
+ The model's response in chunks.
213
+
214
+ Usage:
215
+
216
+ .. code-block:: python
217
+ chat = client.aio.chats.create(model='gemini-1.5-flash')
218
+ async for chunk in chat.send_message_stream('tell me a story')
219
+ print(chunk.text)
220
+ """
221
+
222
+ input_content = t.t_content(self._modules.api_client, message)
223
+ output_contents = []
224
+ finish_reason = None
160
225
  async for chunk in self._modules.generate_content_stream(
161
- model=self._model, contents=self._curated_history, config=self._config
226
+ model=self._model,
227
+ contents=self._curated_history + [input_content],
228
+ config=self._config,
162
229
  ):
163
- # TODO(b/381089069): add successful response to history
230
+ if _validate_response(chunk):
231
+ output_contents.append(chunk.candidates[0].content)
232
+ if chunk.candidates and chunk.candidates[0].finish_reason:
233
+ finish_reason = chunk.candidates[0].finish_reason
164
234
  yield chunk
235
+ if output_contents and finish_reason:
236
+ self._curated_history.append(input_content)
237
+ self._curated_history.extend(output_contents)
165
238
 
166
239
 
167
240
  class AsyncChats:
google/genai/models.py CHANGED
@@ -35,6 +35,9 @@ def _Part_to_mldev(
35
35
  if getv(from_object, ['video_metadata']):
36
36
  raise ValueError('video_metadata parameter is not supported in Google AI.')
37
37
 
38
+ if getv(from_object, ['thought']) is not None:
39
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
40
+
38
41
  if getv(from_object, ['code_execution_result']) is not None:
39
42
  setv(
40
43
  to_object,
@@ -76,6 +79,9 @@ def _Part_to_vertex(
76
79
  if getv(from_object, ['video_metadata']) is not None:
77
80
  setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata']))
78
81
 
82
+ if getv(from_object, ['thought']) is not None:
83
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
84
+
79
85
  if getv(from_object, ['code_execution_result']) is not None:
80
86
  setv(
81
87
  to_object,
@@ -2708,6 +2714,9 @@ def _Part_from_mldev(
2708
2714
  ) -> dict:
2709
2715
  to_object = {}
2710
2716
 
2717
+ if getv(from_object, ['thought']) is not None:
2718
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
2719
+
2711
2720
  if getv(from_object, ['codeExecutionResult']) is not None:
2712
2721
  setv(
2713
2722
  to_object,
@@ -2749,6 +2758,9 @@ def _Part_from_vertex(
2749
2758
  if getv(from_object, ['videoMetadata']) is not None:
2750
2759
  setv(to_object, ['video_metadata'], getv(from_object, ['videoMetadata']))
2751
2760
 
2761
+ if getv(from_object, ['thought']) is not None:
2762
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
2763
+
2752
2764
  if getv(from_object, ['codeExecutionResult']) is not None:
2753
2765
  setv(
2754
2766
  to_object,
google/genai/types.py CHANGED
@@ -441,6 +441,10 @@ class Part(_common.BaseModel):
441
441
  video_metadata: Optional[VideoMetadata] = Field(
442
442
  default=None, description="""Metadata for a given video."""
443
443
  )
444
+ thought: Optional[bool] = Field(
445
+ default=None,
446
+ description="""Indicates if the part is thought from the model.""",
447
+ )
444
448
  code_execution_result: Optional[CodeExecutionResult] = Field(
445
449
  default=None,
446
450
  description="""Optional. Result of executing the [ExecutableCode].""",
@@ -525,6 +529,9 @@ class PartDict(TypedDict, total=False):
525
529
  video_metadata: Optional[VideoMetadataDict]
526
530
  """Metadata for a given video."""
527
531
 
532
+ thought: Optional[bool]
533
+ """Indicates if the part is thought from the model."""
534
+
528
535
  code_execution_result: Optional[CodeExecutionResultDict]
529
536
  """Optional. Result of executing the [ExecutableCode]."""
530
537
 
@@ -2430,13 +2437,17 @@ class GenerateContentResponse(_common.BaseModel):
2430
2437
  text = ""
2431
2438
  any_text_part_text = False
2432
2439
  for part in self.candidates[0].content.parts:
2433
- for field_name, field_value in part.dict(exclude={"text"}).items():
2440
+ for field_name, field_value in part.dict(
2441
+ exclude={"text", "thought"}
2442
+ ).items():
2434
2443
  if field_value is not None:
2435
2444
  raise ValueError(
2436
2445
  "GenerateContentResponse.text only supports text parts, but got"
2437
2446
  f" {field_name} part{part}"
2438
2447
  )
2439
2448
  if isinstance(part.text, str):
2449
+ if isinstance(part.thought, bool) and part.thought:
2450
+ continue
2440
2451
  any_text_part_text = True
2441
2452
  text += part.text
2442
2453
  # part.text == '' is different from part.text is None
@@ -5567,6 +5578,11 @@ class CreateCachedContentConfig(_common.BaseModel):
5567
5578
  description="""The user-generated meaningful display name of the cached content.
5568
5579
  """,
5569
5580
  )
5581
+ contents: Optional[ContentListUnion] = Field(
5582
+ default=None,
5583
+ description="""The content to cache.
5584
+ """,
5585
+ )
5570
5586
  system_instruction: Optional[ContentUnion] = Field(
5571
5587
  default=None,
5572
5588
  description="""Developer set system instruction.
@@ -5600,6 +5616,10 @@ class CreateCachedContentConfigDict(TypedDict, total=False):
5600
5616
  """The user-generated meaningful display name of the cached content.
5601
5617
  """
5602
5618
 
5619
+ contents: Optional[ContentListUnionDict]
5620
+ """The content to cache.
5621
+ """
5622
+
5603
5623
  system_instruction: Optional[ContentUnionDict]
5604
5624
  """Developer set system instruction.
5605
5625
  """
@@ -5625,11 +5645,6 @@ class _CreateCachedContentParameters(_common.BaseModel):
5625
5645
  default=None,
5626
5646
  description="""ID of the model to use. Example: gemini-1.5-flash""",
5627
5647
  )
5628
- contents: Optional[ContentListUnion] = Field(
5629
- default=None,
5630
- description="""The content to cache.
5631
- """,
5632
- )
5633
5648
  config: Optional[CreateCachedContentConfig] = Field(
5634
5649
  default=None,
5635
5650
  description="""Configuration that contains optional parameters.
@@ -5643,10 +5658,6 @@ class _CreateCachedContentParametersDict(TypedDict, total=False):
5643
5658
  model: Optional[str]
5644
5659
  """ID of the model to use. Example: gemini-1.5-flash"""
5645
5660
 
5646
- contents: Optional[ContentListUnionDict]
5647
- """The content to cache.
5648
- """
5649
-
5650
5661
  config: Optional[CreateCachedContentConfigDict]
5651
5662
  """Configuration that contains optional parameters.
5652
5663
  """
@@ -7699,6 +7710,8 @@ class LiveServerMessage(_common.BaseModel):
7699
7710
  text = ""
7700
7711
  for part in self.server_content.model_turn.parts:
7701
7712
  if isinstance(part.text, str):
7713
+ if isinstance(part.thought, bool) and part.thought:
7714
+ continue
7702
7715
  text += part.text
7703
7716
  return text if text else None
7704
7717
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: google-genai
3
- Version: 0.2.2
3
+ Version: 0.3.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -35,6 +35,12 @@ Requires-Dist: websockets<15.0dev,>=13.0
35
35
 
36
36
  -----
37
37
 
38
+ ## Installation
39
+
40
+ ``` cmd
41
+ pip install google-genai
42
+ ```
43
+
38
44
  ## Imports
39
45
 
40
46
  ``` python
@@ -455,6 +461,8 @@ response2.generated_images[0].image.show()
455
461
 
456
462
  #### Edit Image
457
463
 
464
+ Edit image uses a separate model from generate and upscale.
465
+
458
466
  Edit image is not supported in Google AI.
459
467
 
460
468
  ``` python
@@ -475,7 +483,7 @@ mask_ref_image = MaskReferenceImage(
475
483
  )
476
484
 
477
485
  response3 = client.models.edit_image(
478
- model='imagen-3.0-capability-preview-0930',
486
+ model='imagen-3.0-capability-001',
479
487
  prompt='Sunlight and clear sky',
480
488
  reference_images=[raw_ref_image, mask_ref_image],
481
489
  config=types.EditImageConfig(
@@ -542,9 +550,9 @@ cached_content = client.caches.create(
542
550
  file_uri=file_uris[1],
543
551
  mime_type='application/pdf',)])
544
552
  ],
553
+ system_instruction='What is the sum of the two pdfs?',
545
554
  config=types.CreateCachedContentConfig(
546
555
  display_name='test cache',
547
- system_instruction='What is the sum of the two pdfs?',
548
556
  ttl='3600s',
549
557
  ),
550
558
  )
@@ -0,0 +1,24 @@
1
+ google/genai/__init__.py,sha256=rFj7z7zuHpBIR70hkGCm9mGZstrO4TgxaJcv-89xokQ,674
2
+ google/genai/_api_client.py,sha256=05gZvxU9r1LcXmHEY9qscuDCm0O8GsxepYm8wpnO_3Y,16110
3
+ google/genai/_automatic_function_calling_util.py,sha256=aiAIsHMyW6NM3ROS7J7n6BhTG_DuMeeTBVGZbAaCJFs,10048
4
+ google/genai/_common.py,sha256=Yj5cBkq5QRNFSBqvpB949Rjo7cbIhdtKp5dJxMW_I6I,7971
5
+ google/genai/_extra_utils.py,sha256=GQZnraFCrMffqrBEpurdcBmgrltRsnYgMizt-Ok6xX8,11098
6
+ google/genai/_replay_api_client.py,sha256=D9AedHL5jgJAojDxJaLuD-HmDSpBjfV_6gt-ZTFfzLo,16227
7
+ google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759
8
+ google/genai/_transformers.py,sha256=_2p1HbZYeDYfQiu24gLBRwMh5HqzSRgDHy9XvZTvogQ,13900
9
+ google/genai/batches.py,sha256=Wi4Kptampp2WepAqv_AawwNCR6MKVhLKmzJdYXDQ_aE,37148
10
+ google/genai/caches.py,sha256=YSzKMwnxbiwev9TqPlUjCvxq8-3Ez-LMqYZiI5eSE_M,53468
11
+ google/genai/chats.py,sha256=NK3zHE64odk22TJYEY2ywFqsCxBiCGWfU17GQkavEPk,7643
12
+ google/genai/client.py,sha256=HH_lYnjPOwW-4Vgynyw4K8cwurT2g578Dc51H_uk7GY,9244
13
+ google/genai/errors.py,sha256=ZqJvfuJ7oS334WBrq3otzdZfmhEgcM1OBZhHccYzDok,3665
14
+ google/genai/files.py,sha256=dn3q8P9aTN9OG3PtA4AYDs9hF6Uk-jkMjgAW7dSlt_4,35573
15
+ google/genai/live.py,sha256=T-pOtq7k43wE2VjQzqLrx-kqhotS66I2PY_NHBdv9G8,22056
16
+ google/genai/models.py,sha256=T-ElWMqfzzhpcVaOuiTlqrwyxNGhaTyO8eAWULDYxvc,155294
17
+ google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
18
+ google/genai/tunings.py,sha256=tFTSEaECKZ6xeYcxUTIKUmXqPoDymYP3eyTcEKjnPa4,49010
19
+ google/genai/types.py,sha256=TclDP-B52YJHt7mxabOQY8M9Sd2IMHJgADcYWYVsvew,264243
20
+ google_genai-0.3.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
21
+ google_genai-0.3.0.dist-info/METADATA,sha256=yYY6ejmQZExyP6h__U0ihpstvnuMIOu_XpcmT1-eVck,19278
22
+ google_genai-0.3.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
23
+ google_genai-0.3.0.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
24
+ google_genai-0.3.0.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- google/genai/__init__.py,sha256=R7sy9MQmlItVERIQEWt25bApyNXxDIkYP4nl3-EtX50,674
2
- google/genai/_api_client.py,sha256=naJy-6OjgjUNlciZNxDconmSOFhewD_0QsggNY1aCik,15950
3
- google/genai/_automatic_function_calling_util.py,sha256=E25_66RH3DbDIucq7x-93XWPPBwB9FnzwD1NCGyPrjM,10242
4
- google/genai/_common.py,sha256=Yj5cBkq5QRNFSBqvpB949Rjo7cbIhdtKp5dJxMW_I6I,7971
5
- google/genai/_extra_utils.py,sha256=GQZnraFCrMffqrBEpurdcBmgrltRsnYgMizt-Ok6xX8,11098
6
- google/genai/_replay_api_client.py,sha256=QPNg4SBpOLS58bx-kuJQngxy1tbjMpCpJzmImCwYePA,16226
7
- google/genai/_test_api_client.py,sha256=p771T27icmzENxKtyNDwPG1sTI7jaoJNFPwlwq9GK6o,4759
8
- google/genai/_transformers.py,sha256=_zWNr7zFTrUFniECYaZUn0n4TdioLpj783l3-z1XvIE,13443
9
- google/genai/batches.py,sha256=Wi4Kptampp2WepAqv_AawwNCR6MKVhLKmzJdYXDQ_aE,37148
10
- google/genai/caches.py,sha256=LJm2raykec7_iCHsVbEtX4v942mR-OSQvxTVKcBN2RA,53434
11
- google/genai/chats.py,sha256=x-vCXrsxZ8kdEZ_0ZDfrBQnQ9urCr42x3urP0OXHyTo,5688
12
- google/genai/client.py,sha256=HH_lYnjPOwW-4Vgynyw4K8cwurT2g578Dc51H_uk7GY,9244
13
- google/genai/errors.py,sha256=ZqJvfuJ7oS334WBrq3otzdZfmhEgcM1OBZhHccYzDok,3665
14
- google/genai/files.py,sha256=dn3q8P9aTN9OG3PtA4AYDs9hF6Uk-jkMjgAW7dSlt_4,35573
15
- google/genai/live.py,sha256=T-pOtq7k43wE2VjQzqLrx-kqhotS66I2PY_NHBdv9G8,22056
16
- google/genai/models.py,sha256=t5XgwlgkNrQKb6eww0oBGzjMiMQaj-BQedc8lVdJHz4,154834
17
- google/genai/pagers.py,sha256=hSHd-gLvEzYWwK85i8EcFNWUMKtszUs7Nw2r3L7d6_U,6686
18
- google/genai/tunings.py,sha256=tFTSEaECKZ6xeYcxUTIKUmXqPoDymYP3eyTcEKjnPa4,49010
19
- google/genai/types.py,sha256=JC7CBQVRzVwImsT03t6Qv_vMYq8V58z3SF-rzvrUJHc,263839
20
- google_genai-0.2.2.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
21
- google_genai-0.2.2.dist-info/METADATA,sha256=Q1eNLWWM0fqko3S5gfudpPYrlmnebdnXXLOJGjAtmG0,19175
22
- google_genai-0.2.2.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
23
- google_genai-0.2.2.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
24
- google_genai-0.2.2.dist-info/RECORD,,