google-genai 1.0.0rc0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/chats.py CHANGED
@@ -57,12 +57,16 @@ class Chat(_BaseChat):
57
57
  """Chat session."""
58
58
 
59
59
  def send_message(
60
- self, message: Union[list[PartUnionDict], PartUnionDict]
60
+ self,
61
+ message: Union[list[PartUnionDict], PartUnionDict],
62
+ config: Optional[GenerateContentConfigOrDict] = None,
61
63
  ) -> GenerateContentResponse:
62
64
  """Sends the conversation history with the additional message and returns the model's response.
63
65
 
64
66
  Args:
65
67
  message: The message to send to the model.
68
+ config: Optional config to override the default Chat config for this
69
+ request.
66
70
 
67
71
  Returns:
68
72
  The model's response.
@@ -79,7 +83,7 @@ class Chat(_BaseChat):
79
83
  response = self._modules.generate_content(
80
84
  model=self._model,
81
85
  contents=self._curated_history + [input_content],
82
- config=self._config,
86
+ config=config if config else self._config,
83
87
  )
84
88
  if _validate_response(response):
85
89
  if response.automatic_function_calling_history:
@@ -92,12 +96,16 @@ class Chat(_BaseChat):
92
96
  return response
93
97
 
94
98
  def send_message_stream(
95
- self, message: Union[list[PartUnionDict], PartUnionDict]
99
+ self,
100
+ message: Union[list[PartUnionDict], PartUnionDict],
101
+ config: Optional[GenerateContentConfigOrDict] = None,
96
102
  ):
97
103
  """Sends the conversation history with the additional message and yields the model's response in chunks.
98
104
 
99
105
  Args:
100
106
  message: The message to send to the model.
107
+ config: Optional config to override the default Chat config for this
108
+ request.
101
109
 
102
110
  Yields:
103
111
  The model's response in chunks.
@@ -117,7 +125,7 @@ class Chat(_BaseChat):
117
125
  for chunk in self._modules.generate_content_stream(
118
126
  model=self._model,
119
127
  contents=self._curated_history + [input_content],
120
- config=self._config,
128
+ config=config if config else self._config,
121
129
  ):
122
130
  if _validate_response(chunk):
123
131
  output_contents.append(chunk.candidates[0].content)
@@ -164,12 +172,16 @@ class AsyncChat(_BaseChat):
164
172
  """Async chat session."""
165
173
 
166
174
  async def send_message(
167
- self, message: Union[list[PartUnionDict], PartUnionDict]
175
+ self,
176
+ message: Union[list[PartUnionDict], PartUnionDict],
177
+ config: Optional[GenerateContentConfigOrDict] = None,
168
178
  ) -> GenerateContentResponse:
169
179
  """Sends the conversation history with the additional message and returns model's response.
170
180
 
171
181
  Args:
172
182
  message: The message to send to the model.
183
+ config: Optional config to override the default Chat config for this
184
+ request.
173
185
 
174
186
  Returns:
175
187
  The model's response.
@@ -186,7 +198,7 @@ class AsyncChat(_BaseChat):
186
198
  response = await self._modules.generate_content(
187
199
  model=self._model,
188
200
  contents=self._curated_history + [input_content],
189
- config=self._config,
201
+ config=config if config else self._config,
190
202
  )
191
203
  if _validate_response(response):
192
204
  if response.automatic_function_calling_history:
@@ -199,12 +211,16 @@ class AsyncChat(_BaseChat):
199
211
  return response
200
212
 
201
213
  async def send_message_stream(
202
- self, message: Union[list[PartUnionDict], PartUnionDict]
214
+ self,
215
+ message: Union[list[PartUnionDict], PartUnionDict],
216
+ config: Optional[GenerateContentConfigOrDict] = None,
203
217
  ) -> Awaitable[AsyncIterator[GenerateContentResponse]]:
204
218
  """Sends the conversation history with the additional message and yields the model's response in chunks.
205
219
 
206
220
  Args:
207
221
  message: The message to send to the model.
222
+ config: Optional config to override the default Chat config for this
223
+ request.
208
224
 
209
225
  Yields:
210
226
  The model's response in chunks.
@@ -225,7 +241,7 @@ class AsyncChat(_BaseChat):
225
241
  async for chunk in await self._modules.generate_content_stream(
226
242
  model=self._model,
227
243
  contents=self._curated_history + [input_content],
228
- config=self._config,
244
+ config=config if config else self._config,
229
245
  ):
230
246
  if _validate_response(chunk):
231
247
  output_contents.append(chunk.candidates[0].content)
google/genai/client.py CHANGED
@@ -94,6 +94,17 @@ class Client:
94
94
  Use this client to make a request to the Gemini Developer API or Vertex AI
95
95
  API and then wait for the response.
96
96
 
97
+ To initialize the client, provide the required arguments either directly
98
+ or by using environment variables. Gemini API users and Vertex AI users in
99
+ express mode can provide API key by providing input argument
100
+ `api_key="your-api-key"` or by defining `GOOGLE_API_KEY="your-api-key"` as an
101
+ environment variable
102
+
103
+ Vertex AI API users can provide inputs argument as `vertexai=false,
104
+ project="your-project-id", location="us-central1"` or by defining
105
+ `GOOGLE_GENAI_USE_VERTEXAI=false`, `GOOGLE_CLOUD_PROJECT` and
106
+ `GOOGLE_CLOUD_LOCATION` environment variables.
107
+
97
108
  Attributes:
98
109
  api_key: The `API key <https://ai.google.dev/gemini-api/docs/api-key>`_ to
99
110
  use for authentication. Applies to the Gemini Developer API only.
@@ -173,21 +184,11 @@ class Client:
173
184
  debug_config (DebugConfig): Config settings that control network behavior
174
185
  of the client. This is typically used when running test code.
175
186
  http_options (Union[HttpOptions, HttpOptionsDict]): Http options to use
176
- for the client. The field deprecated_response_payload should not be set
177
- in http_options.
187
+ for the client.
178
188
  """
179
189
 
180
190
  self._debug_config = debug_config or DebugConfig()
181
191
 
182
- # Throw ValueError if deprecated_response_payload is set in http_options
183
- # due to unpredictable behavior when running multiple coroutines through
184
- # client.aio.
185
- if http_options and 'deprecated_response_payload' in http_options:
186
- raise ValueError(
187
- 'Setting deprecated_response_payload in http_options is not'
188
- ' supported.'
189
- )
190
-
191
192
  self._api_client = self._get_api_client(
192
193
  vertexai=vertexai,
193
194
  api_key=api_key,
@@ -272,4 +273,4 @@ class Client:
272
273
  @property
273
274
  def vertexai(self) -> bool:
274
275
  """Returns whether the client is using the Vertex AI API."""
275
- return self._api_client.vertexai or False
276
+ return self._api_client.vertexai or False
google/genai/errors.py CHANGED
@@ -128,3 +128,7 @@ class FunctionInvocationError(ValueError):
128
128
  """Raised when the function cannot be invoked with the given arguments."""
129
129
 
130
130
  pass
131
+
132
+
133
+ class ExperimentalWarning(Warning):
134
+ """Warning for experimental features."""
google/genai/files.py CHANGED
@@ -494,6 +494,8 @@ def _CreateFileResponse_from_mldev(
494
494
  parent_object: dict = None,
495
495
  ) -> dict:
496
496
  to_object = {}
497
+ if getv(from_object, ['httpHeaders']) is not None:
498
+ setv(to_object, ['http_headers'], getv(from_object, ['httpHeaders']))
497
499
 
498
500
  return to_object
499
501
 
@@ -504,6 +506,8 @@ def _CreateFileResponse_from_vertex(
504
506
  parent_object: dict = None,
505
507
  ) -> dict:
506
508
  to_object = {}
509
+ if getv(from_object, ['httpHeaders']) is not None:
510
+ setv(to_object, ['http_headers'], getv(from_object, ['httpHeaders']))
507
511
 
508
512
  return to_object
509
513
 
@@ -840,7 +844,7 @@ class Files(_api_module.BaseModule):
840
844
  'Unknown mime type: Could not determine the mimetype for your'
841
845
  ' file\n please set the `mime_type` argument'
842
846
  )
843
- response = {}
847
+
844
848
  if config_model and config_model.http_options:
845
849
  http_options = config_model.http_options
846
850
  else:
@@ -853,19 +857,20 @@ class Files(_api_module.BaseModule):
853
857
  'X-Goog-Upload-Header-Content-Length': f'{file_obj.size_bytes}',
854
858
  'X-Goog-Upload-Header-Content-Type': f'{file_obj.mime_type}',
855
859
  },
856
- 'deprecated_response_payload': response,
857
860
  }
858
- self._create(file=file_obj, config={'http_options': http_options})
861
+ response = self._create(
862
+ file=file_obj, config={'http_options': http_options}
863
+ )
859
864
 
860
865
  if (
861
- 'headers' not in response
862
- or 'X-Goog-Upload-URL' not in response['headers']
866
+ response.http_headers is None
867
+ or 'X-Goog-Upload-URL' not in response.http_headers
863
868
  ):
864
869
  raise KeyError(
865
870
  'Failed to create file. Upload URL did not returned from the create'
866
871
  ' file request.'
867
872
  )
868
- upload_url = response['headers']['X-Goog-Upload-URL']
873
+ upload_url = response.http_headers['X-Goog-Upload-URL']
869
874
 
870
875
  if isinstance(file, io.IOBase):
871
876
  return_file = self._api_client.upload_file(
@@ -1272,7 +1277,6 @@ class AsyncFiles(_api_module.BaseModule):
1272
1277
  ' file\n please set the `mime_type` argument'
1273
1278
  )
1274
1279
 
1275
- response = {}
1276
1280
  if config_model and config_model.http_options:
1277
1281
  http_options = config_model.http_options
1278
1282
  else:
@@ -1285,18 +1289,20 @@ class AsyncFiles(_api_module.BaseModule):
1285
1289
  'X-Goog-Upload-Header-Content-Length': f'{file_obj.size_bytes}',
1286
1290
  'X-Goog-Upload-Header-Content-Type': f'{file_obj.mime_type}',
1287
1291
  },
1288
- 'deprecated_response_payload': response,
1289
1292
  }
1290
- await self._create(file=file_obj, config={'http_options': http_options})
1293
+ response = await self._create(
1294
+ file=file_obj, config={'http_options': http_options}
1295
+ )
1296
+
1291
1297
  if (
1292
- 'headers' not in response
1293
- or 'X-Goog-Upload-URL' not in response['headers']
1298
+ response.http_headers is None
1299
+ or 'X-Goog-Upload-URL' not in response.http_headers
1294
1300
  ):
1295
1301
  raise KeyError(
1296
1302
  'Failed to create file. Upload URL did not returned from the create'
1297
1303
  ' file request.'
1298
1304
  )
1299
- upload_url = response['headers']['X-Goog-Upload-URL']
1305
+ upload_url = response.http_headers['X-Goog-Upload-URL']
1300
1306
 
1301
1307
  if isinstance(file, io.IOBase):
1302
1308
  return_file = await self._api_client.async_upload_file(
google/genai/live.py CHANGED
@@ -29,8 +29,10 @@ from . import _api_module
29
29
  from . import _common
30
30
  from . import _transformers as t
31
31
  from . import client
32
+ from . import errors
32
33
  from . import types
33
34
  from ._api_client import ApiClient
35
+ from ._common import experimental_warning
34
36
  from ._common import get_value_by_path as getv
35
37
  from ._common import set_value_by_path as setv
36
38
  from .models import _Content_from_mldev
@@ -633,6 +635,9 @@ class AsyncLive(_api_module.BaseModule):
633
635
  return_value['setup'].update(to_object)
634
636
  return return_value
635
637
 
638
+ @experimental_warning(
639
+ "The live API is experimental and may change in future versions.",
640
+ )
636
641
  @contextlib.asynccontextmanager
637
642
  async def connect(
638
643
  self,