google-genai 0.2.2__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/caches.py CHANGED
@@ -30,9 +30,12 @@ def _Part_to_mldev(
30
30
  parent_object: dict = None,
31
31
  ) -> dict:
32
32
  to_object = {}
33
- if getv(from_object, ['video_metadata']):
33
+ if getv(from_object, ['video_metadata']) is not None:
34
34
  raise ValueError('video_metadata parameter is not supported in Google AI.')
35
35
 
36
+ if getv(from_object, ['thought']) is not None:
37
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
38
+
36
39
  if getv(from_object, ['code_execution_result']) is not None:
37
40
  setv(
38
41
  to_object,
@@ -74,6 +77,9 @@ def _Part_to_vertex(
74
77
  if getv(from_object, ['video_metadata']) is not None:
75
78
  setv(to_object, ['videoMetadata'], getv(from_object, ['video_metadata']))
76
79
 
80
+ if getv(from_object, ['thought']) is not None:
81
+ setv(to_object, ['thought'], getv(from_object, ['thought']))
82
+
77
83
  if getv(from_object, ['code_execution_result']) is not None:
78
84
  setv(
79
85
  to_object,
@@ -156,51 +162,51 @@ def _Schema_to_mldev(
156
162
  parent_object: dict = None,
157
163
  ) -> dict:
158
164
  to_object = {}
159
- if getv(from_object, ['min_items']):
165
+ if getv(from_object, ['min_items']) is not None:
160
166
  raise ValueError('min_items parameter is not supported in Google AI.')
161
167
 
162
- if getv(from_object, ['example']):
168
+ if getv(from_object, ['example']) is not None:
163
169
  raise ValueError('example parameter is not supported in Google AI.')
164
170
 
165
- if getv(from_object, ['property_ordering']):
171
+ if getv(from_object, ['property_ordering']) is not None:
166
172
  raise ValueError(
167
173
  'property_ordering parameter is not supported in Google AI.'
168
174
  )
169
175
 
170
- if getv(from_object, ['pattern']):
176
+ if getv(from_object, ['pattern']) is not None:
171
177
  raise ValueError('pattern parameter is not supported in Google AI.')
172
178
 
173
- if getv(from_object, ['minimum']):
179
+ if getv(from_object, ['minimum']) is not None:
174
180
  raise ValueError('minimum parameter is not supported in Google AI.')
175
181
 
176
- if getv(from_object, ['default']):
182
+ if getv(from_object, ['default']) is not None:
177
183
  raise ValueError('default parameter is not supported in Google AI.')
178
184
 
179
- if getv(from_object, ['any_of']):
185
+ if getv(from_object, ['any_of']) is not None:
180
186
  raise ValueError('any_of parameter is not supported in Google AI.')
181
187
 
182
- if getv(from_object, ['max_length']):
188
+ if getv(from_object, ['max_length']) is not None:
183
189
  raise ValueError('max_length parameter is not supported in Google AI.')
184
190
 
185
- if getv(from_object, ['title']):
191
+ if getv(from_object, ['title']) is not None:
186
192
  raise ValueError('title parameter is not supported in Google AI.')
187
193
 
188
- if getv(from_object, ['min_length']):
194
+ if getv(from_object, ['min_length']) is not None:
189
195
  raise ValueError('min_length parameter is not supported in Google AI.')
190
196
 
191
- if getv(from_object, ['min_properties']):
197
+ if getv(from_object, ['min_properties']) is not None:
192
198
  raise ValueError('min_properties parameter is not supported in Google AI.')
193
199
 
194
- if getv(from_object, ['max_items']):
200
+ if getv(from_object, ['max_items']) is not None:
195
201
  raise ValueError('max_items parameter is not supported in Google AI.')
196
202
 
197
- if getv(from_object, ['maximum']):
203
+ if getv(from_object, ['maximum']) is not None:
198
204
  raise ValueError('maximum parameter is not supported in Google AI.')
199
205
 
200
- if getv(from_object, ['nullable']):
206
+ if getv(from_object, ['nullable']) is not None:
201
207
  raise ValueError('nullable parameter is not supported in Google AI.')
202
208
 
203
- if getv(from_object, ['max_properties']):
209
+ if getv(from_object, ['max_properties']) is not None:
204
210
  raise ValueError('max_properties parameter is not supported in Google AI.')
205
211
 
206
212
  if getv(from_object, ['type']) is not None:
@@ -312,7 +318,7 @@ def _FunctionDeclaration_to_mldev(
312
318
  parent_object: dict = None,
313
319
  ) -> dict:
314
320
  to_object = {}
315
- if getv(from_object, ['response']):
321
+ if getv(from_object, ['response']) is not None:
316
322
  raise ValueError('response parameter is not supported in Google AI.')
317
323
 
318
324
  if getv(from_object, ['description']) is not None:
@@ -468,7 +474,7 @@ def _Tool_to_mldev(
468
474
  ],
469
475
  )
470
476
 
471
- if getv(from_object, ['retrieval']):
477
+ if getv(from_object, ['retrieval']) is not None:
472
478
  raise ValueError('retrieval parameter is not supported in Google AI.')
473
479
 
474
480
  if getv(from_object, ['google_search']) is not None:
@@ -638,6 +644,18 @@ def _CreateCachedContentConfig_to_mldev(
638
644
  if getv(from_object, ['display_name']) is not None:
639
645
  setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
640
646
 
647
+ if getv(from_object, ['contents']) is not None:
648
+ setv(
649
+ parent_object,
650
+ ['contents'],
651
+ [
652
+ _Content_to_mldev(api_client, item, to_object)
653
+ for item in t.t_contents(
654
+ api_client, getv(from_object, ['contents'])
655
+ )
656
+ ],
657
+ )
658
+
641
659
  if getv(from_object, ['system_instruction']) is not None:
642
660
  setv(
643
661
  parent_object,
@@ -689,6 +707,18 @@ def _CreateCachedContentConfig_to_vertex(
689
707
  if getv(from_object, ['display_name']) is not None:
690
708
  setv(parent_object, ['displayName'], getv(from_object, ['display_name']))
691
709
 
710
+ if getv(from_object, ['contents']) is not None:
711
+ setv(
712
+ parent_object,
713
+ ['contents'],
714
+ [
715
+ _Content_to_vertex(api_client, item, to_object)
716
+ for item in t.t_contents(
717
+ api_client, getv(from_object, ['contents'])
718
+ )
719
+ ],
720
+ )
721
+
692
722
  if getv(from_object, ['system_instruction']) is not None:
693
723
  setv(
694
724
  parent_object,
@@ -735,18 +765,6 @@ def _CreateCachedContentParameters_to_mldev(
735
765
  t.t_caches_model(api_client, getv(from_object, ['model'])),
736
766
  )
737
767
 
738
- if getv(from_object, ['contents']) is not None:
739
- setv(
740
- to_object,
741
- ['contents'],
742
- [
743
- _Content_to_mldev(api_client, item, to_object)
744
- for item in t.t_contents(
745
- api_client, getv(from_object, ['contents'])
746
- )
747
- ],
748
- )
749
-
750
768
  if getv(from_object, ['config']) is not None:
751
769
  setv(
752
770
  to_object,
@@ -772,18 +790,6 @@ def _CreateCachedContentParameters_to_vertex(
772
790
  t.t_caches_model(api_client, getv(from_object, ['model'])),
773
791
  )
774
792
 
775
- if getv(from_object, ['contents']) is not None:
776
- setv(
777
- to_object,
778
- ['contents'],
779
- [
780
- _Content_to_vertex(api_client, item, to_object)
781
- for item in t.t_contents(
782
- api_client, getv(from_object, ['contents'])
783
- )
784
- ],
785
- )
786
-
787
793
  if getv(from_object, ['config']) is not None:
788
794
  setv(
789
795
  to_object,
@@ -1238,7 +1244,6 @@ class Caches(_common.BaseModule):
1238
1244
  self,
1239
1245
  *,
1240
1246
  model: str,
1241
- contents: Union[types.ContentListUnion, types.ContentListUnionDict],
1242
1247
  config: Optional[types.CreateCachedContentConfigOrDict] = None,
1243
1248
  ) -> types.CachedContent:
1244
1249
  """Creates cached content, this call will initialize the cached
@@ -1264,7 +1269,6 @@ class Caches(_common.BaseModule):
1264
1269
 
1265
1270
  parameter_model = types._CreateCachedContentParameters(
1266
1271
  model=model,
1267
- contents=contents,
1268
1272
  config=config,
1269
1273
  )
1270
1274
 
@@ -1542,7 +1546,6 @@ class AsyncCaches(_common.BaseModule):
1542
1546
  self,
1543
1547
  *,
1544
1548
  model: str,
1545
- contents: Union[types.ContentListUnion, types.ContentListUnionDict],
1546
1549
  config: Optional[types.CreateCachedContentConfigOrDict] = None,
1547
1550
  ) -> types.CachedContent:
1548
1551
  """Creates cached content, this call will initialize the cached
@@ -1568,7 +1571,6 @@ class AsyncCaches(_common.BaseModule):
1568
1571
 
1569
1572
  parameter_model = types._CreateCachedContentParameters(
1570
1573
  model=model,
1571
- contents=contents,
1572
1574
  config=config,
1573
1575
  )
1574
1576
 
google/genai/chats.py CHANGED
@@ -18,7 +18,25 @@ from typing import Union
18
18
 
19
19
  from . import _transformers as t
20
20
  from .models import AsyncModels, Models
21
- from .types import Content, ContentDict, GenerateContentConfigOrDict, GenerateContentResponse, PartUnionDict
21
+ from .types import Content, ContentDict, GenerateContentConfigOrDict, GenerateContentResponse, Part, PartUnionDict
22
+
23
+
24
+ def _validate_response(
25
+ response: GenerateContentResponse
26
+ ) -> bool:
27
+ if not response.candidates:
28
+ return False
29
+ if not response.candidates[0].content:
30
+ return False
31
+ if not response.candidates[0].content.parts:
32
+ return False
33
+ for part in response.candidates[0].content.parts:
34
+ if part == Part():
35
+ return False
36
+ if part.text is not None and part.text == "":
37
+ return False
38
+ return True
39
+
22
40
 
23
41
  class _BaseChat:
24
42
  """Base chat session."""
@@ -65,7 +83,7 @@ class Chat(_BaseChat):
65
83
  contents=self._curated_history + [input_content],
66
84
  config=self._config,
67
85
  )
68
- if response.candidates and response.candidates[0].content:
86
+ if _validate_response(response):
69
87
  if response.automatic_function_calling_history:
70
88
  self._curated_history.extend(
71
89
  response.automatic_function_calling_history
@@ -75,14 +93,42 @@ class Chat(_BaseChat):
75
93
  self._curated_history.append(response.candidates[0].content)
76
94
  return response
77
95
 
78
- def _send_message_stream(self, message: Union[list[ContentDict], str]):
79
- for content in t.t_contents(self._modules.api_client, message):
80
- self._curated_history.append(content)
96
+ def send_message_stream(
97
+ self, message: Union[list[PartUnionDict], PartUnionDict]
98
+ ):
99
+ """Sends the conversation history with the additional message and yields the model's response in chunks.
100
+
101
+ Args:
102
+ message: The message to send to the model.
103
+
104
+ Yields:
105
+ The model's response in chunks.
106
+
107
+ Usage:
108
+
109
+ .. code-block:: python
110
+
111
+ chat = client.chats.create(model='gemini-1.5-flash')
112
+ for chunk in chat.send_message_stream('tell me a story'):
113
+ print(chunk.text)
114
+ """
115
+
116
+ input_content = t.t_content(self._modules.api_client, message)
117
+ output_contents = []
118
+ finish_reason = None
81
119
  for chunk in self._modules.generate_content_stream(
82
- model=self._model, contents=self._curated_history, config=self._config
120
+ model=self._model,
121
+ contents=self._curated_history + [input_content],
122
+ config=self._config,
83
123
  ):
84
- # TODO(b/381089069): add successful response to history
124
+ if _validate_response(chunk):
125
+ output_contents.append(chunk.candidates[0].content)
126
+ if chunk.candidates and chunk.candidates[0].finish_reason:
127
+ finish_reason = chunk.candidates[0].finish_reason
85
128
  yield chunk
129
+ if output_contents and finish_reason:
130
+ self._curated_history.append(input_content)
131
+ self._curated_history.extend(output_contents)
86
132
 
87
133
 
88
134
  class Chats:
@@ -134,8 +180,8 @@ class AsyncChat(_BaseChat):
134
180
 
135
181
  .. code-block:: python
136
182
 
137
- chat = client.chats.create(model='gemini-1.5-flash')
138
- response = chat.send_message('tell me a story')
183
+ chat = client.aio.chats.create(model='gemini-1.5-flash')
184
+ response = await chat.send_message('tell me a story')
139
185
  """
140
186
 
141
187
  input_content = t.t_content(self._modules.api_client, message)
@@ -144,7 +190,7 @@ class AsyncChat(_BaseChat):
144
190
  contents=self._curated_history + [input_content],
145
191
  config=self._config,
146
192
  )
147
- if response.candidates and response.candidates[0].content:
193
+ if _validate_response(response):
148
194
  if response.automatic_function_calling_history:
149
195
  self._curated_history.extend(
150
196
  response.automatic_function_calling_history
@@ -154,14 +200,41 @@ class AsyncChat(_BaseChat):
154
200
  self._curated_history.append(response.candidates[0].content)
155
201
  return response
156
202
 
157
- async def _send_message_stream(self, message: Union[list[ContentDict], str]):
158
- for content in t.t_contents(self._modules.api_client, message):
159
- self._curated_history.append(content)
203
+ async def send_message_stream(
204
+ self, message: Union[list[PartUnionDict], PartUnionDict]
205
+ ):
206
+ """Sends the conversation history with the additional message and yields the model's response in chunks.
207
+
208
+ Args:
209
+ message: The message to send to the model.
210
+
211
+ Yields:
212
+ The model's response in chunks.
213
+
214
+ Usage:
215
+
216
+ .. code-block:: python
217
+ chat = client.aio.chats.create(model='gemini-1.5-flash')
218
+ async for chunk in chat.send_message_stream('tell me a story'):
219
+ print(chunk.text)
220
+ """
221
+
222
+ input_content = t.t_content(self._modules.api_client, message)
223
+ output_contents = []
224
+ finish_reason = None
160
225
  async for chunk in self._modules.generate_content_stream(
161
- model=self._model, contents=self._curated_history, config=self._config
226
+ model=self._model,
227
+ contents=self._curated_history + [input_content],
228
+ config=self._config,
162
229
  ):
163
- # TODO(b/381089069): add successful response to history
230
+ if _validate_response(chunk):
231
+ output_contents.append(chunk.candidates[0].content)
232
+ if chunk.candidates and chunk.candidates[0].finish_reason:
233
+ finish_reason = chunk.candidates[0].finish_reason
164
234
  yield chunk
235
+ if output_contents and finish_reason:
236
+ self._curated_history.append(input_content)
237
+ self._curated_history.extend(output_contents)
165
238
 
166
239
 
167
240
  class AsyncChats:
google/genai/client.py CHANGED
@@ -14,12 +14,12 @@
14
14
  #
15
15
 
16
16
  import os
17
- from typing import Optional
17
+ from typing import Optional, Union
18
18
 
19
19
  import google.auth
20
20
  import pydantic
21
21
 
22
- from ._api_client import ApiClient, HttpOptions
22
+ from ._api_client import ApiClient, HttpOptions, HttpOptionsDict
23
23
  from ._replay_api_client import ReplayApiClient
24
24
  from .batches import AsyncBatches, Batches
25
25
  from .caches import AsyncCaches, Caches
@@ -145,7 +145,7 @@ class Client:
145
145
  project: Optional[str] = None,
146
146
  location: Optional[str] = None,
147
147
  debug_config: Optional[DebugConfig] = None,
148
- http_options: Optional[HttpOptions] = None,
148
+ http_options: Optional[Union[HttpOptions, HttpOptionsDict]] = None,
149
149
  ):
150
150
  """Initializes the client.
151
151
 
@@ -179,6 +179,9 @@ class Client:
179
179
  debug_config (DebugConfig):
180
180
  Config settings that control network
181
181
  behavior of the client. This is typically used when running test code.
182
+ http_options (Union[HttpOptions, HttpOptionsDict]):
183
+ Http options to use for the client. Response_payload can't be
184
+ set when passing to the client constructor.
182
185
  """
183
186
 
184
187
  self._debug_config = debug_config or DebugConfig()
google/genai/files.py CHANGED
@@ -98,7 +98,7 @@ def _ListFilesParameters_to_vertex(
98
98
  parent_object: dict = None,
99
99
  ) -> dict:
100
100
  to_object = {}
101
- if getv(from_object, ['config']):
101
+ if getv(from_object, ['config']) is not None:
102
102
  raise ValueError('config parameter is not supported in Vertex AI.')
103
103
 
104
104
  return to_object
@@ -128,13 +128,13 @@ def _FileStatus_to_vertex(
128
128
  parent_object: dict = None,
129
129
  ) -> dict:
130
130
  to_object = {}
131
- if getv(from_object, ['details']):
131
+ if getv(from_object, ['details']) is not None:
132
132
  raise ValueError('details parameter is not supported in Vertex AI.')
133
133
 
134
- if getv(from_object, ['message']):
134
+ if getv(from_object, ['message']) is not None:
135
135
  raise ValueError('message parameter is not supported in Vertex AI.')
136
136
 
137
- if getv(from_object, ['code']):
137
+ if getv(from_object, ['code']) is not None:
138
138
  raise ValueError('code parameter is not supported in Vertex AI.')
139
139
 
140
140
  return to_object
@@ -197,40 +197,40 @@ def _File_to_vertex(
197
197
  parent_object: dict = None,
198
198
  ) -> dict:
199
199
  to_object = {}
200
- if getv(from_object, ['name']):
200
+ if getv(from_object, ['name']) is not None:
201
201
  raise ValueError('name parameter is not supported in Vertex AI.')
202
202
 
203
- if getv(from_object, ['display_name']):
203
+ if getv(from_object, ['display_name']) is not None:
204
204
  raise ValueError('display_name parameter is not supported in Vertex AI.')
205
205
 
206
- if getv(from_object, ['mime_type']):
206
+ if getv(from_object, ['mime_type']) is not None:
207
207
  raise ValueError('mime_type parameter is not supported in Vertex AI.')
208
208
 
209
- if getv(from_object, ['size_bytes']):
209
+ if getv(from_object, ['size_bytes']) is not None:
210
210
  raise ValueError('size_bytes parameter is not supported in Vertex AI.')
211
211
 
212
- if getv(from_object, ['create_time']):
212
+ if getv(from_object, ['create_time']) is not None:
213
213
  raise ValueError('create_time parameter is not supported in Vertex AI.')
214
214
 
215
- if getv(from_object, ['expiration_time']):
215
+ if getv(from_object, ['expiration_time']) is not None:
216
216
  raise ValueError('expiration_time parameter is not supported in Vertex AI.')
217
217
 
218
- if getv(from_object, ['update_time']):
218
+ if getv(from_object, ['update_time']) is not None:
219
219
  raise ValueError('update_time parameter is not supported in Vertex AI.')
220
220
 
221
- if getv(from_object, ['sha256_hash']):
221
+ if getv(from_object, ['sha256_hash']) is not None:
222
222
  raise ValueError('sha256_hash parameter is not supported in Vertex AI.')
223
223
 
224
- if getv(from_object, ['uri']):
224
+ if getv(from_object, ['uri']) is not None:
225
225
  raise ValueError('uri parameter is not supported in Vertex AI.')
226
226
 
227
- if getv(from_object, ['state']):
227
+ if getv(from_object, ['state']) is not None:
228
228
  raise ValueError('state parameter is not supported in Vertex AI.')
229
229
 
230
- if getv(from_object, ['video_metadata']):
230
+ if getv(from_object, ['video_metadata']) is not None:
231
231
  raise ValueError('video_metadata parameter is not supported in Vertex AI.')
232
232
 
233
- if getv(from_object, ['error']):
233
+ if getv(from_object, ['error']) is not None:
234
234
  raise ValueError('error parameter is not supported in Vertex AI.')
235
235
 
236
236
  return to_object
@@ -291,10 +291,10 @@ def _CreateFileParameters_to_vertex(
291
291
  parent_object: dict = None,
292
292
  ) -> dict:
293
293
  to_object = {}
294
- if getv(from_object, ['file']):
294
+ if getv(from_object, ['file']) is not None:
295
295
  raise ValueError('file parameter is not supported in Vertex AI.')
296
296
 
297
- if getv(from_object, ['config']):
297
+ if getv(from_object, ['config']) is not None:
298
298
  raise ValueError('config parameter is not supported in Vertex AI.')
299
299
 
300
300
  return to_object
@@ -355,10 +355,10 @@ def _GetFileParameters_to_vertex(
355
355
  parent_object: dict = None,
356
356
  ) -> dict:
357
357
  to_object = {}
358
- if getv(from_object, ['name']):
358
+ if getv(from_object, ['name']) is not None:
359
359
  raise ValueError('name parameter is not supported in Vertex AI.')
360
360
 
361
- if getv(from_object, ['config']):
361
+ if getv(from_object, ['config']) is not None:
362
362
  raise ValueError('config parameter is not supported in Vertex AI.')
363
363
 
364
364
  return to_object
@@ -419,10 +419,10 @@ def _DeleteFileParameters_to_vertex(
419
419
  parent_object: dict = None,
420
420
  ) -> dict:
421
421
  to_object = {}
422
- if getv(from_object, ['name']):
422
+ if getv(from_object, ['name']) is not None:
423
423
  raise ValueError('name parameter is not supported in Vertex AI.')
424
424
 
425
- if getv(from_object, ['config']):
425
+ if getv(from_object, ['config']) is not None:
426
426
  raise ValueError('config parameter is not supported in Vertex AI.')
427
427
 
428
428
  return to_object
google/genai/live.py CHANGED
@@ -68,6 +68,7 @@ class AsyncSession:
68
68
 
69
69
  async def send(
70
70
  self,
71
+ *,
71
72
  input: Union[
72
73
  types.ContentListUnion,
73
74
  types.ContentListUnionDict,
@@ -80,6 +81,25 @@ class AsyncSession:
80
81
  ],
81
82
  end_of_turn: Optional[bool] = False,
82
83
  ):
84
+ """Send input to the model.
85
+
86
+ The method will send the input request to the server.
87
+
88
+ Args:
89
+ input: The input request to the model.
90
+ end_of_turn: Whether the input is the last message in a turn.
91
+
92
+ Example usage:
93
+
94
+ .. code-block:: python
95
+
96
+ client = genai.Client(api_key=API_KEY)
97
+
98
+ async with client.aio.live.connect(model='...') as session:
99
+ await session.send(input='Hello world!', end_of_turn=True)
100
+ async for message in session.receive():
101
+ print(message)
102
+ """
83
103
  client_message = self._parse_client_message(input, end_of_turn)
84
104
  await self._ws.send(json.dumps(client_message))
85
105
 
@@ -113,7 +133,7 @@ class AsyncSession:
113
133
  yield result
114
134
 
115
135
  async def start_stream(
116
- self, stream: AsyncIterator[bytes], mime_type: str
136
+ self, *, stream: AsyncIterator[bytes], mime_type: str
117
137
  ) -> AsyncIterator[types.LiveServerMessage]:
118
138
  """start a live session from a data stream.
119
139
 
@@ -199,7 +219,7 @@ class AsyncSession:
199
219
  ):
200
220
  async for data in data_stream:
201
221
  input = {'data': data, 'mimeType': mime_type}
202
- await self.send(input)
222
+ await self.send(input=input)
203
223
  # Give a chance for the receive loop to process responses.
204
224
  await asyncio.sleep(10**-12)
205
225
  # Give a chance for the receiver to process the last response.
@@ -599,7 +619,10 @@ class AsyncLive(_common.BaseModule):
599
619
 
600
620
  @contextlib.asynccontextmanager
601
621
  async def connect(
602
- self, model: str, config: Optional[types.LiveConnectConfigOrDict] = None
622
+ self,
623
+ *,
624
+ model: str,
625
+ config: Optional[types.LiveConnectConfigOrDict] = None,
603
626
  ) -> AsyncSession:
604
627
  """Connect to the live server.
605
628
 
@@ -609,9 +632,9 @@ class AsyncLive(_common.BaseModule):
609
632
 
610
633
  client = genai.Client(api_key=API_KEY)
611
634
  config = {}
612
- async with client.aio.live.connect(model='gemini-1.0-pro-002', config=config) as session:
635
+ async with client.aio.live.connect(model='...', config=config) as session:
613
636
  await session.send(input='Hello world!', end_of_turn=True)
614
- async for message in session:
637
+ async for message in session.receive():
615
638
  print(message)
616
639
  """
617
640
  base_url = self.api_client._websocket_base_url()