gemini-webapi 1.17.3__py3-none-any.whl → 1.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gemini_webapi/client.py +644 -306
- gemini_webapi/components/gem_mixin.py +35 -20
- gemini_webapi/constants.py +11 -8
- gemini_webapi/types/candidate.py +2 -0
- gemini_webapi/types/image.py +8 -7
- gemini_webapi/types/modeloutput.py +8 -0
- gemini_webapi/utils/__init__.py +1 -6
- gemini_webapi/utils/decorators.py +75 -30
- gemini_webapi/utils/get_access_token.py +55 -34
- gemini_webapi/utils/parsing.py +207 -37
- gemini_webapi/utils/rotate_1psidts.py +40 -21
- gemini_webapi/utils/upload_file.py +51 -18
- {gemini_webapi-1.17.3.dist-info → gemini_webapi-1.18.0.dist-info}/METADATA +33 -9
- gemini_webapi-1.18.0.dist-info/RECORD +25 -0
- {gemini_webapi-1.17.3.dist-info → gemini_webapi-1.18.0.dist-info}/WHEEL +1 -1
- gemini_webapi-1.17.3.dist-info/RECORD +0 -25
- {gemini_webapi-1.17.3.dist-info → gemini_webapi-1.18.0.dist-info}/licenses/LICENSE +0 -0
- {gemini_webapi-1.17.3.dist-info → gemini_webapi-1.18.0.dist-info}/top_level.txt +0 -0
gemini_webapi/client.py
CHANGED
|
@@ -1,19 +1,21 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import codecs
|
|
3
|
+
import io
|
|
4
|
+
import random
|
|
2
5
|
import re
|
|
3
6
|
from asyncio import Task
|
|
4
7
|
from pathlib import Path
|
|
5
|
-
from typing import Any, Optional
|
|
8
|
+
from typing import Any, AsyncGenerator, Optional
|
|
6
9
|
|
|
7
10
|
import orjson as json
|
|
8
|
-
from httpx import AsyncClient, ReadTimeout, Response
|
|
11
|
+
from httpx import AsyncClient, Cookies, ReadTimeout, Response
|
|
9
12
|
|
|
10
13
|
from .components import GemMixin
|
|
11
|
-
from .constants import Endpoint, ErrorCode, Headers, Model
|
|
14
|
+
from .constants import Endpoint, ErrorCode, GRPC, Headers, Model
|
|
12
15
|
from .exceptions import (
|
|
13
16
|
APIError,
|
|
14
17
|
AuthError,
|
|
15
18
|
GeminiError,
|
|
16
|
-
ImageGenerationError,
|
|
17
19
|
ModelInvalid,
|
|
18
20
|
TemporarilyBlocked,
|
|
19
21
|
TimeoutError,
|
|
@@ -28,13 +30,12 @@ from .types import (
|
|
|
28
30
|
WebImage,
|
|
29
31
|
)
|
|
30
32
|
from .utils import (
|
|
31
|
-
extract_json_from_response,
|
|
32
33
|
get_access_token,
|
|
33
34
|
get_nested_value,
|
|
34
35
|
logger,
|
|
35
36
|
parse_file_name,
|
|
37
|
+
parse_stream_frames,
|
|
36
38
|
rotate_1psidts,
|
|
37
|
-
rotate_tasks,
|
|
38
39
|
running,
|
|
39
40
|
upload_file,
|
|
40
41
|
)
|
|
@@ -52,7 +53,7 @@ class GeminiClient(GemMixin):
|
|
|
52
53
|
secure_1psid: `str`, optional
|
|
53
54
|
__Secure-1PSID cookie value.
|
|
54
55
|
secure_1psidts: `str`, optional
|
|
55
|
-
__Secure-1PSIDTS cookie value, some
|
|
56
|
+
__Secure-1PSIDTS cookie value, some Google accounts don't require this value, provide only if it's in the cookie list.
|
|
56
57
|
proxy: `str`, optional
|
|
57
58
|
Proxy URL.
|
|
58
59
|
kwargs: `dict`, optional
|
|
@@ -71,12 +72,18 @@ class GeminiClient(GemMixin):
|
|
|
71
72
|
"_running",
|
|
72
73
|
"client",
|
|
73
74
|
"access_token",
|
|
75
|
+
"build_label",
|
|
76
|
+
"session_id",
|
|
74
77
|
"timeout",
|
|
75
78
|
"auto_close",
|
|
76
79
|
"close_delay",
|
|
77
80
|
"close_task",
|
|
78
81
|
"auto_refresh",
|
|
79
82
|
"refresh_interval",
|
|
83
|
+
"refresh_task",
|
|
84
|
+
"verbose",
|
|
85
|
+
"_lock",
|
|
86
|
+
"_reqid",
|
|
80
87
|
"_gems", # From GemMixin
|
|
81
88
|
"kwargs",
|
|
82
89
|
]
|
|
@@ -89,23 +96,31 @@ class GeminiClient(GemMixin):
|
|
|
89
96
|
**kwargs,
|
|
90
97
|
):
|
|
91
98
|
super().__init__()
|
|
92
|
-
self.cookies =
|
|
99
|
+
self.cookies = Cookies()
|
|
93
100
|
self.proxy = proxy
|
|
94
101
|
self._running: bool = False
|
|
95
102
|
self.client: AsyncClient | None = None
|
|
96
103
|
self.access_token: str | None = None
|
|
104
|
+
self.build_label: str | None = None
|
|
105
|
+
self.session_id: str | None = None
|
|
97
106
|
self.timeout: float = 300
|
|
98
107
|
self.auto_close: bool = False
|
|
99
108
|
self.close_delay: float = 300
|
|
100
109
|
self.close_task: Task | None = None
|
|
101
110
|
self.auto_refresh: bool = True
|
|
102
111
|
self.refresh_interval: float = 540
|
|
112
|
+
self.refresh_task: Task | None = None
|
|
113
|
+
self.verbose: bool = True
|
|
114
|
+
self._lock = asyncio.Lock()
|
|
115
|
+
self._reqid: int = random.randint(10000, 99999)
|
|
103
116
|
self.kwargs = kwargs
|
|
104
117
|
|
|
105
118
|
if secure_1psid:
|
|
106
|
-
self.cookies
|
|
119
|
+
self.cookies.set("__Secure-1PSID", secure_1psid, domain=".google.com")
|
|
107
120
|
if secure_1psidts:
|
|
108
|
-
self.cookies
|
|
121
|
+
self.cookies.set(
|
|
122
|
+
"__Secure-1PSIDTS", secure_1psidts, domain=".google.com"
|
|
123
|
+
)
|
|
109
124
|
|
|
110
125
|
async def init(
|
|
111
126
|
self,
|
|
@@ -129,50 +144,64 @@ class GeminiClient(GemMixin):
|
|
|
129
144
|
close_delay: `float`, optional
|
|
130
145
|
Time to wait before auto-closing the client in seconds. Effective only if `auto_close` is `True`.
|
|
131
146
|
auto_refresh: `bool`, optional
|
|
132
|
-
If `True`, will schedule a task to automatically refresh cookies in the background.
|
|
147
|
+
If `True`, will schedule a task to automatically refresh cookies and access token in the background.
|
|
133
148
|
refresh_interval: `float`, optional
|
|
134
|
-
Time interval for background cookie refresh in seconds. Effective only if `auto_refresh` is `True`.
|
|
149
|
+
Time interval for background cookie and access token refresh in seconds. Effective only if `auto_refresh` is `True`.
|
|
135
150
|
verbose: `bool`, optional
|
|
136
151
|
If `True`, will print more infomation in logs.
|
|
137
152
|
"""
|
|
138
153
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
)
|
|
154
|
+
async with self._lock:
|
|
155
|
+
if self._running:
|
|
156
|
+
return
|
|
143
157
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
self.cookies = valid_cookies
|
|
154
|
-
self._running = True
|
|
155
|
-
|
|
156
|
-
self.timeout = timeout
|
|
157
|
-
self.auto_close = auto_close
|
|
158
|
-
self.close_delay = close_delay
|
|
159
|
-
if self.auto_close:
|
|
160
|
-
await self.reset_close_task()
|
|
161
|
-
|
|
162
|
-
self.auto_refresh = auto_refresh
|
|
163
|
-
self.refresh_interval = refresh_interval
|
|
164
|
-
if task := rotate_tasks.get(self.cookies["__Secure-1PSID"]):
|
|
165
|
-
task.cancel()
|
|
166
|
-
if self.auto_refresh:
|
|
167
|
-
rotate_tasks[self.cookies["__Secure-1PSID"]] = asyncio.create_task(
|
|
168
|
-
self.start_auto_refresh()
|
|
158
|
+
try:
|
|
159
|
+
self.verbose = verbose
|
|
160
|
+
access_token, build_label, session_id, valid_cookies = (
|
|
161
|
+
await get_access_token(
|
|
162
|
+
base_cookies=self.cookies,
|
|
163
|
+
proxy=self.proxy,
|
|
164
|
+
verbose=self.verbose,
|
|
165
|
+
verify=self.kwargs.get("verify", True),
|
|
166
|
+
)
|
|
169
167
|
)
|
|
170
168
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
169
|
+
self.client = AsyncClient(
|
|
170
|
+
http2=True,
|
|
171
|
+
timeout=timeout,
|
|
172
|
+
proxy=self.proxy,
|
|
173
|
+
follow_redirects=True,
|
|
174
|
+
headers=Headers.GEMINI.value,
|
|
175
|
+
cookies=valid_cookies,
|
|
176
|
+
**self.kwargs,
|
|
177
|
+
)
|
|
178
|
+
self.access_token = access_token
|
|
179
|
+
self.cookies = valid_cookies
|
|
180
|
+
self.build_label = build_label
|
|
181
|
+
self.session_id = session_id
|
|
182
|
+
self._running = True
|
|
183
|
+
|
|
184
|
+
self.timeout = timeout
|
|
185
|
+
self.auto_close = auto_close
|
|
186
|
+
self.close_delay = close_delay
|
|
187
|
+
if self.auto_close:
|
|
188
|
+
await self.reset_close_task()
|
|
189
|
+
|
|
190
|
+
self.auto_refresh = auto_refresh
|
|
191
|
+
self.refresh_interval = refresh_interval
|
|
192
|
+
|
|
193
|
+
if self.refresh_task:
|
|
194
|
+
self.refresh_task.cancel()
|
|
195
|
+
self.refresh_task = None
|
|
196
|
+
|
|
197
|
+
if self.auto_refresh:
|
|
198
|
+
self.refresh_task = asyncio.create_task(self.start_auto_refresh())
|
|
199
|
+
|
|
200
|
+
if self.verbose:
|
|
201
|
+
logger.success("Gemini client initialized successfully.")
|
|
202
|
+
except Exception:
|
|
203
|
+
await self.close()
|
|
204
|
+
raise
|
|
176
205
|
|
|
177
206
|
async def close(self, delay: float = 0) -> None:
|
|
178
207
|
"""
|
|
@@ -193,6 +222,10 @@ class GeminiClient(GemMixin):
|
|
|
193
222
|
self.close_task.cancel()
|
|
194
223
|
self.close_task = None
|
|
195
224
|
|
|
225
|
+
if self.refresh_task:
|
|
226
|
+
self.refresh_task.cancel()
|
|
227
|
+
self.refresh_task = None
|
|
228
|
+
|
|
196
229
|
if self.client:
|
|
197
230
|
await self.client.aclose()
|
|
198
231
|
|
|
@@ -211,34 +244,49 @@ class GeminiClient(GemMixin):
|
|
|
211
244
|
"""
|
|
212
245
|
Start the background task to automatically refresh cookies.
|
|
213
246
|
"""
|
|
247
|
+
if self.refresh_interval < 60:
|
|
248
|
+
self.refresh_interval = 60
|
|
249
|
+
|
|
250
|
+
while self._running:
|
|
251
|
+
await asyncio.sleep(self.refresh_interval)
|
|
252
|
+
|
|
253
|
+
if not self._running:
|
|
254
|
+
break
|
|
214
255
|
|
|
215
|
-
while True:
|
|
216
|
-
new_1psidts: str | None = None
|
|
217
256
|
try:
|
|
218
|
-
|
|
257
|
+
async with self._lock:
|
|
258
|
+
# Refresh all cookies in the background to keep the session alive.
|
|
259
|
+
new_1psidts, rotated_cookies = await rotate_1psidts(
|
|
260
|
+
self.cookies, self.proxy
|
|
261
|
+
)
|
|
262
|
+
if rotated_cookies:
|
|
263
|
+
self.cookies.update(rotated_cookies)
|
|
264
|
+
if self.client:
|
|
265
|
+
self.client.cookies.update(rotated_cookies)
|
|
266
|
+
|
|
267
|
+
if new_1psidts:
|
|
268
|
+
if rotated_cookies:
|
|
269
|
+
logger.debug("Cookies refreshed (network update).")
|
|
270
|
+
else:
|
|
271
|
+
logger.debug("Cookies are up to date (cached).")
|
|
272
|
+
else:
|
|
273
|
+
logger.warning(
|
|
274
|
+
"Rotation response did not contain a new __Secure-1PSIDTS. "
|
|
275
|
+
"Session might expire soon if this persists."
|
|
276
|
+
)
|
|
277
|
+
except asyncio.CancelledError:
|
|
278
|
+
raise
|
|
219
279
|
except AuthError:
|
|
220
|
-
if task := rotate_tasks.get(self.cookies.get("__Secure-1PSID", "")):
|
|
221
|
-
task.cancel()
|
|
222
280
|
logger.warning(
|
|
223
|
-
"AuthError: Failed to refresh cookies.
|
|
281
|
+
"AuthError: Failed to refresh cookies. Retrying in next interval."
|
|
224
282
|
)
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
logger.warning(f"Unexpected error while refreshing cookies: {exc}")
|
|
228
|
-
|
|
229
|
-
if new_1psidts:
|
|
230
|
-
self.cookies["__Secure-1PSIDTS"] = new_1psidts
|
|
231
|
-
if self._running:
|
|
232
|
-
self.client.cookies.set("__Secure-1PSIDTS", new_1psidts)
|
|
233
|
-
logger.debug("Cookies refreshed. New __Secure-1PSIDTS applied.")
|
|
283
|
+
except Exception as e:
|
|
284
|
+
logger.warning(f"Unexpected error while refreshing cookies: {e}")
|
|
234
285
|
|
|
235
|
-
await asyncio.sleep(self.refresh_interval)
|
|
236
|
-
|
|
237
|
-
@running(retry=2)
|
|
238
286
|
async def generate_content(
|
|
239
287
|
self,
|
|
240
288
|
prompt: str,
|
|
241
|
-
files: list[str | Path] | None = None,
|
|
289
|
+
files: list[str | Path | bytes | io.BytesIO] | None = None,
|
|
242
290
|
model: Model | str | dict = Model.UNSPECIFIED,
|
|
243
291
|
gem: Gem | str | None = None,
|
|
244
292
|
chat: Optional["ChatSession"] = None,
|
|
@@ -285,6 +333,182 @@ class GeminiClient(GemMixin):
|
|
|
285
333
|
- If response structure is invalid and failed to parse.
|
|
286
334
|
"""
|
|
287
335
|
|
|
336
|
+
if self.auto_close:
|
|
337
|
+
await self.reset_close_task()
|
|
338
|
+
|
|
339
|
+
if not (isinstance(chat, ChatSession) and chat.cid):
|
|
340
|
+
self._reqid = random.randint(10000, 99999)
|
|
341
|
+
|
|
342
|
+
file_data = None
|
|
343
|
+
if files:
|
|
344
|
+
await self._batch_execute(
|
|
345
|
+
[
|
|
346
|
+
RPCData(
|
|
347
|
+
rpcid=GRPC.BARD_ACTIVITY,
|
|
348
|
+
payload='[[["bard_activity_enabled"]]]',
|
|
349
|
+
)
|
|
350
|
+
]
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
uploaded_urls = await asyncio.gather(
|
|
354
|
+
*(upload_file(file, self.proxy) for file in files)
|
|
355
|
+
)
|
|
356
|
+
file_data = [
|
|
357
|
+
[[url], parse_file_name(file)]
|
|
358
|
+
for url, file in zip(uploaded_urls, files)
|
|
359
|
+
]
|
|
360
|
+
|
|
361
|
+
try:
|
|
362
|
+
await self._batch_execute(
|
|
363
|
+
[
|
|
364
|
+
RPCData(
|
|
365
|
+
rpcid=GRPC.BARD_ACTIVITY,
|
|
366
|
+
payload='[[["bard_activity_enabled"]]]',
|
|
367
|
+
)
|
|
368
|
+
]
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
output = None
|
|
372
|
+
async for output in self._generate(
|
|
373
|
+
prompt=prompt,
|
|
374
|
+
req_file_data=file_data,
|
|
375
|
+
model=model,
|
|
376
|
+
gem=gem,
|
|
377
|
+
chat=chat,
|
|
378
|
+
**kwargs,
|
|
379
|
+
):
|
|
380
|
+
pass
|
|
381
|
+
|
|
382
|
+
if output is None:
|
|
383
|
+
raise GeminiError(
|
|
384
|
+
"Failed to generate contents. No output data found in response."
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
if isinstance(chat, ChatSession):
|
|
388
|
+
chat.last_output = output
|
|
389
|
+
|
|
390
|
+
return output
|
|
391
|
+
|
|
392
|
+
finally:
|
|
393
|
+
if files:
|
|
394
|
+
for file in files:
|
|
395
|
+
if isinstance(file, io.BytesIO):
|
|
396
|
+
file.close()
|
|
397
|
+
|
|
398
|
+
async def generate_content_stream(
|
|
399
|
+
self,
|
|
400
|
+
prompt: str,
|
|
401
|
+
files: list[str | Path | bytes | io.BytesIO] | None = None,
|
|
402
|
+
model: Model | str | dict = Model.UNSPECIFIED,
|
|
403
|
+
gem: Gem | str | None = None,
|
|
404
|
+
chat: Optional["ChatSession"] = None,
|
|
405
|
+
**kwargs,
|
|
406
|
+
) -> AsyncGenerator[ModelOutput, None]:
|
|
407
|
+
"""
|
|
408
|
+
Generates contents with prompt in streaming mode.
|
|
409
|
+
|
|
410
|
+
This method sends a request to Gemini and yields partial responses as they arrive.
|
|
411
|
+
It automatically calculates the text delta (new characters) to provide a smooth
|
|
412
|
+
streaming experience. It also continuously updates chat metadata and candidate IDs.
|
|
413
|
+
|
|
414
|
+
Parameters
|
|
415
|
+
----------
|
|
416
|
+
prompt: `str`
|
|
417
|
+
Prompt provided by user.
|
|
418
|
+
files: `list[str | Path | bytes | io.BytesIO]`, optional
|
|
419
|
+
List of file paths or byte streams to be attached.
|
|
420
|
+
model: `Model | str | dict`, optional
|
|
421
|
+
Specify the model to use for generation.
|
|
422
|
+
gem: `Gem | str`, optional
|
|
423
|
+
Specify a gem to use as system prompt for the chat session.
|
|
424
|
+
chat: `ChatSession`, optional
|
|
425
|
+
Chat data to retrieve conversation history.
|
|
426
|
+
kwargs: `dict`, optional
|
|
427
|
+
Additional arguments passed to `httpx.AsyncClient.stream`.
|
|
428
|
+
|
|
429
|
+
Yields
|
|
430
|
+
------
|
|
431
|
+
:class:`ModelOutput`
|
|
432
|
+
Partial output data. The `text` attribute contains only the NEW characters
|
|
433
|
+
received since the last yield.
|
|
434
|
+
|
|
435
|
+
Raises
|
|
436
|
+
------
|
|
437
|
+
`gemini_webapi.APIError`
|
|
438
|
+
If the request fails or response structure is invalid.
|
|
439
|
+
`gemini_webapi.TimeoutError`
|
|
440
|
+
If the stream request times out.
|
|
441
|
+
"""
|
|
442
|
+
|
|
443
|
+
if self.auto_close:
|
|
444
|
+
await self.reset_close_task()
|
|
445
|
+
|
|
446
|
+
if not (isinstance(chat, ChatSession) and chat.cid):
|
|
447
|
+
self._reqid = random.randint(10000, 99999)
|
|
448
|
+
|
|
449
|
+
file_data = None
|
|
450
|
+
if files:
|
|
451
|
+
await self._batch_execute(
|
|
452
|
+
[
|
|
453
|
+
RPCData(
|
|
454
|
+
rpcid=GRPC.BARD_ACTIVITY,
|
|
455
|
+
payload='[[["bard_activity_enabled"]]]',
|
|
456
|
+
)
|
|
457
|
+
]
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
uploaded_urls = await asyncio.gather(
|
|
461
|
+
*(upload_file(file, self.proxy) for file in files)
|
|
462
|
+
)
|
|
463
|
+
file_data = [
|
|
464
|
+
[[url], parse_file_name(file)]
|
|
465
|
+
for url, file in zip(uploaded_urls, files)
|
|
466
|
+
]
|
|
467
|
+
|
|
468
|
+
try:
|
|
469
|
+
await self._batch_execute(
|
|
470
|
+
[
|
|
471
|
+
RPCData(
|
|
472
|
+
rpcid=GRPC.BARD_ACTIVITY,
|
|
473
|
+
payload='[[["bard_activity_enabled"]]]',
|
|
474
|
+
)
|
|
475
|
+
]
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
output = None
|
|
479
|
+
async for output in self._generate(
|
|
480
|
+
prompt=prompt,
|
|
481
|
+
req_file_data=file_data,
|
|
482
|
+
model=model,
|
|
483
|
+
gem=gem,
|
|
484
|
+
chat=chat,
|
|
485
|
+
**kwargs,
|
|
486
|
+
):
|
|
487
|
+
yield output
|
|
488
|
+
|
|
489
|
+
if output and isinstance(chat, ChatSession):
|
|
490
|
+
chat.last_output = output
|
|
491
|
+
|
|
492
|
+
finally:
|
|
493
|
+
if files:
|
|
494
|
+
for file in files:
|
|
495
|
+
if isinstance(file, io.BytesIO):
|
|
496
|
+
file.close()
|
|
497
|
+
|
|
498
|
+
@running(retry=5)
|
|
499
|
+
async def _generate(
|
|
500
|
+
self,
|
|
501
|
+
prompt: str,
|
|
502
|
+
req_file_data: list[Any] | None = None,
|
|
503
|
+
model: Model | str | dict = Model.UNSPECIFIED,
|
|
504
|
+
gem: Gem | str | None = None,
|
|
505
|
+
chat: Optional["ChatSession"] = None,
|
|
506
|
+
**kwargs,
|
|
507
|
+
) -> AsyncGenerator[ModelOutput, None]:
|
|
508
|
+
"""
|
|
509
|
+
Internal method which actually sends content generation requests.
|
|
510
|
+
"""
|
|
511
|
+
|
|
288
512
|
assert prompt, "Prompt cannot be empty."
|
|
289
513
|
|
|
290
514
|
if isinstance(model, str):
|
|
@@ -297,253 +521,298 @@ class GeminiClient(GemMixin):
|
|
|
297
521
|
f"string, or dictionary; got `{type(model).__name__}`"
|
|
298
522
|
)
|
|
299
523
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
else:
|
|
303
|
-
gem_id = gem
|
|
524
|
+
_reqid = self._reqid
|
|
525
|
+
self._reqid += 100000
|
|
304
526
|
|
|
305
|
-
if
|
|
306
|
-
await self.reset_close_task()
|
|
527
|
+
gem_id = gem.id if isinstance(gem, Gem) else gem
|
|
307
528
|
|
|
308
529
|
try:
|
|
309
|
-
|
|
310
|
-
|
|
530
|
+
message_content = [
|
|
531
|
+
prompt,
|
|
532
|
+
0,
|
|
533
|
+
None,
|
|
534
|
+
req_file_data,
|
|
535
|
+
None,
|
|
536
|
+
None,
|
|
537
|
+
0,
|
|
538
|
+
]
|
|
539
|
+
|
|
540
|
+
params: dict[str, Any] = {"_reqid": _reqid, "rt": "c"}
|
|
541
|
+
if self.build_label:
|
|
542
|
+
params["bl"] = self.build_label
|
|
543
|
+
if self.session_id:
|
|
544
|
+
params["f.sid"] = self.session_id
|
|
545
|
+
|
|
546
|
+
inner_req_list: list[Any] = [None] * 69
|
|
547
|
+
inner_req_list[0] = message_content
|
|
548
|
+
inner_req_list[2] = (
|
|
549
|
+
chat.metadata
|
|
550
|
+
if chat
|
|
551
|
+
else ["", "", "", None, None, None, None, None, None, ""]
|
|
552
|
+
)
|
|
553
|
+
inner_req_list[7] = 1 # Enable Snapshot Streaming
|
|
554
|
+
if gem_id:
|
|
555
|
+
inner_req_list[19] = gem_id
|
|
556
|
+
|
|
557
|
+
request_data = {
|
|
558
|
+
"at": self.access_token,
|
|
559
|
+
"f.req": json.dumps(
|
|
560
|
+
[
|
|
561
|
+
None,
|
|
562
|
+
json.dumps(inner_req_list).decode("utf-8"),
|
|
563
|
+
]
|
|
564
|
+
).decode("utf-8"),
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
async with self.client.stream(
|
|
568
|
+
"POST",
|
|
569
|
+
Endpoint.GENERATE,
|
|
570
|
+
params=params,
|
|
311
571
|
headers=model.model_header,
|
|
312
|
-
data=
|
|
313
|
-
"at": self.access_token,
|
|
314
|
-
"f.req": json.dumps(
|
|
315
|
-
[
|
|
316
|
-
None,
|
|
317
|
-
json.dumps(
|
|
318
|
-
[
|
|
319
|
-
files
|
|
320
|
-
and [
|
|
321
|
-
prompt,
|
|
322
|
-
0,
|
|
323
|
-
None,
|
|
324
|
-
[
|
|
325
|
-
[
|
|
326
|
-
[await upload_file(file, self.proxy)],
|
|
327
|
-
parse_file_name(file),
|
|
328
|
-
]
|
|
329
|
-
for file in files
|
|
330
|
-
],
|
|
331
|
-
]
|
|
332
|
-
or [prompt],
|
|
333
|
-
None,
|
|
334
|
-
chat and chat.metadata,
|
|
335
|
-
]
|
|
336
|
-
+ (gem_id and [None] * 16 + [gem_id] or [])
|
|
337
|
-
).decode(),
|
|
338
|
-
]
|
|
339
|
-
).decode(),
|
|
340
|
-
},
|
|
572
|
+
data=request_data,
|
|
341
573
|
**kwargs,
|
|
342
|
-
)
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
if response.status_code != 200:
|
|
350
|
-
await self.close()
|
|
351
|
-
raise APIError(
|
|
352
|
-
f"Failed to generate contents. Request failed with status code {response.status_code}"
|
|
353
|
-
)
|
|
354
|
-
else:
|
|
355
|
-
response_json: list[Any] = []
|
|
356
|
-
body: list[Any] = []
|
|
357
|
-
body_index = 0
|
|
574
|
+
) as response:
|
|
575
|
+
if response.status_code != 200:
|
|
576
|
+
await self.close()
|
|
577
|
+
raise APIError(
|
|
578
|
+
f"Failed to generate contents. Status: {response.status_code}"
|
|
579
|
+
)
|
|
358
580
|
|
|
359
|
-
|
|
360
|
-
|
|
581
|
+
if self.client:
|
|
582
|
+
self.cookies.update(self.client.cookies)
|
|
361
583
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
part_body = get_nested_value(part, [2])
|
|
365
|
-
if not part_body:
|
|
366
|
-
continue
|
|
584
|
+
buffer = ""
|
|
585
|
+
decoder = codecs.getincrementaldecoder("utf-8")(errors="replace")
|
|
367
586
|
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
break
|
|
372
|
-
except json.JSONDecodeError:
|
|
373
|
-
continue
|
|
587
|
+
# Track last seen content for each candidate by rcid
|
|
588
|
+
last_texts: dict[str, str] = {}
|
|
589
|
+
last_thoughts: dict[str, str] = {}
|
|
374
590
|
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
except Exception:
|
|
378
|
-
await self.close()
|
|
591
|
+
is_busy = False
|
|
592
|
+
has_candidates = False
|
|
379
593
|
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
raise UsageLimitExceeded(
|
|
385
|
-
f"Failed to generate contents. Usage limit of {model.model_name} model has exceeded. Please try switching to another model."
|
|
386
|
-
)
|
|
387
|
-
case ErrorCode.MODEL_INCONSISTENT:
|
|
388
|
-
raise ModelInvalid(
|
|
389
|
-
"Failed to generate contents. The specified model is inconsistent with the chat history. Please make sure to pass the same "
|
|
390
|
-
"`model` parameter when starting a chat session with previous metadata."
|
|
391
|
-
)
|
|
392
|
-
case ErrorCode.MODEL_HEADER_INVALID:
|
|
393
|
-
raise ModelInvalid(
|
|
394
|
-
"Failed to generate contents. The specified model is not available. Please update gemini_webapi to the latest version. "
|
|
395
|
-
"If the error persists and is caused by the package, please report it on GitHub."
|
|
396
|
-
)
|
|
397
|
-
case ErrorCode.IP_TEMPORARILY_BLOCKED:
|
|
398
|
-
raise TemporarilyBlocked(
|
|
399
|
-
"Failed to generate contents. Your IP address is temporarily blocked by Google. Please try using a proxy or waiting for a while."
|
|
400
|
-
)
|
|
401
|
-
case _:
|
|
402
|
-
raise Exception
|
|
403
|
-
except GeminiError:
|
|
404
|
-
raise
|
|
405
|
-
except Exception:
|
|
406
|
-
logger.debug(f"Invalid response: {response.text}")
|
|
407
|
-
raise APIError(
|
|
408
|
-
"Failed to generate contents. Invalid response data received. Client will try to re-initialize on next request."
|
|
409
|
-
)
|
|
410
|
-
|
|
411
|
-
try:
|
|
412
|
-
candidate_list: list[Any] = get_nested_value(body, [4], [])
|
|
413
|
-
output_candidates: list[Candidate] = []
|
|
414
|
-
|
|
415
|
-
for candidate_index, candidate in enumerate(candidate_list):
|
|
416
|
-
rcid = get_nested_value(candidate, [0])
|
|
417
|
-
if not rcid:
|
|
418
|
-
continue # Skip candidate if it has no rcid
|
|
419
|
-
|
|
420
|
-
# Text output and thoughts
|
|
421
|
-
text = get_nested_value(candidate, [1, 0], "")
|
|
422
|
-
if re.match(
|
|
423
|
-
r"^http://googleusercontent\.com/card_content/\d+", text
|
|
424
|
-
):
|
|
425
|
-
text = get_nested_value(candidate, [22, 0]) or text
|
|
426
|
-
|
|
427
|
-
thoughts = get_nested_value(candidate, [37, 0, 0])
|
|
428
|
-
|
|
429
|
-
# Web images
|
|
430
|
-
web_images = []
|
|
431
|
-
for web_img_data in get_nested_value(candidate, [12, 1], []):
|
|
432
|
-
url = get_nested_value(web_img_data, [0, 0, 0])
|
|
433
|
-
if not url:
|
|
434
|
-
continue
|
|
594
|
+
async for chunk in response.aiter_bytes():
|
|
595
|
+
buffer += decoder.decode(chunk, final=False)
|
|
596
|
+
if buffer.startswith(")]}'"):
|
|
597
|
+
buffer = buffer[4:].lstrip()
|
|
435
598
|
|
|
436
|
-
|
|
437
|
-
WebImage(
|
|
438
|
-
url=url,
|
|
439
|
-
title=get_nested_value(web_img_data, [7, 0], ""),
|
|
440
|
-
alt=get_nested_value(web_img_data, [0, 4], ""),
|
|
441
|
-
proxy=self.proxy,
|
|
442
|
-
)
|
|
443
|
-
)
|
|
599
|
+
parsed_parts, buffer = parse_stream_frames(buffer)
|
|
444
600
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
if img_part_index < body_index:
|
|
451
|
-
continue
|
|
601
|
+
for part in parsed_parts:
|
|
602
|
+
part_json = None
|
|
603
|
+
# 0. Update chat metadata first whenever available to support follow-up polls
|
|
604
|
+
inner_json_str = get_nested_value(part, [2])
|
|
605
|
+
if inner_json_str:
|
|
452
606
|
try:
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
if
|
|
459
|
-
|
|
607
|
+
part_json = json.loads(inner_json_str)
|
|
608
|
+
m_data = get_nested_value(part_json, [1])
|
|
609
|
+
if m_data and isinstance(chat, ChatSession):
|
|
610
|
+
chat.metadata = m_data
|
|
611
|
+
|
|
612
|
+
# Update context string from index 25 if available
|
|
613
|
+
context_str = get_nested_value(part_json, [25])
|
|
614
|
+
if isinstance(context_str, str) and isinstance(
|
|
615
|
+
chat, ChatSession
|
|
460
616
|
):
|
|
461
|
-
|
|
462
|
-
break
|
|
617
|
+
chat.metadata = [None] * 9 + [context_str]
|
|
463
618
|
except json.JSONDecodeError:
|
|
464
|
-
|
|
619
|
+
pass
|
|
620
|
+
|
|
621
|
+
# 1. Check for fatal error codes in any part
|
|
622
|
+
error_code = get_nested_value(part, [5, 2, 0, 1, 0])
|
|
623
|
+
if error_code:
|
|
624
|
+
await self.close()
|
|
625
|
+
match error_code:
|
|
626
|
+
case ErrorCode.USAGE_LIMIT_EXCEEDED:
|
|
627
|
+
raise UsageLimitExceeded(
|
|
628
|
+
f"Usage limit exceeded for model '{model.model_name}'. Please wait a few minutes, "
|
|
629
|
+
"switch to a different model (e.g., Gemini Flash), or check your account limits on gemini.google.com."
|
|
630
|
+
)
|
|
631
|
+
case ErrorCode.MODEL_INCONSISTENT:
|
|
632
|
+
raise ModelInvalid(
|
|
633
|
+
"The specified model is inconsistent with the conversation history. "
|
|
634
|
+
"Please ensure you are using the same 'model' parameter throughout the entire ChatSession."
|
|
635
|
+
)
|
|
636
|
+
case ErrorCode.MODEL_HEADER_INVALID:
|
|
637
|
+
raise ModelInvalid(
|
|
638
|
+
f"The model '{model.model_name}' is currently unavailable or the request structure is outdated. "
|
|
639
|
+
"Please update 'gemini_webapi' to the latest version or report this on GitHub if the problem persists."
|
|
640
|
+
)
|
|
641
|
+
case ErrorCode.IP_TEMPORARILY_BLOCKED:
|
|
642
|
+
raise TemporarilyBlocked(
|
|
643
|
+
"Your IP address has been temporarily flagged or blocked by Google. "
|
|
644
|
+
"Please try using a proxy, a different network, or wait for a while before retrying."
|
|
645
|
+
)
|
|
646
|
+
case _:
|
|
647
|
+
raise APIError(
|
|
648
|
+
f"Failed to generate contents (stream). Unknown API error code: {error_code}. "
|
|
649
|
+
"This might be a temporary Google service issue."
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
# 2. Detect if model is busy analyzing data (Thinking state)
|
|
653
|
+
if "data_analysis_tool" in str(part):
|
|
654
|
+
is_busy = True
|
|
655
|
+
if not has_candidates:
|
|
656
|
+
logger.debug("Model is busy (thinking/analyzing)...")
|
|
657
|
+
|
|
658
|
+
# 3. Check for queueing status
|
|
659
|
+
status = get_nested_value(part, [5])
|
|
660
|
+
if isinstance(status, list) and status:
|
|
661
|
+
is_busy = True
|
|
662
|
+
if not has_candidates:
|
|
663
|
+
logger.debug(
|
|
664
|
+
"Model is in a waiting state (queueing)..."
|
|
665
|
+
)
|
|
465
666
|
|
|
466
|
-
if not
|
|
467
|
-
|
|
468
|
-
"Failed to parse generated images. Please update gemini_webapi to the latest version. "
|
|
469
|
-
"If the error persists and is caused by the package, please report it on GitHub."
|
|
470
|
-
)
|
|
667
|
+
if not inner_json_str:
|
|
668
|
+
continue
|
|
471
669
|
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
670
|
+
try:
|
|
671
|
+
if part_json is None:
|
|
672
|
+
part_json = json.loads(inner_json_str)
|
|
475
673
|
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
text = re.sub(
|
|
480
|
-
r"http://googleusercontent\.com/image_generation_content/\d+",
|
|
481
|
-
"",
|
|
482
|
-
finished_text,
|
|
483
|
-
).rstrip()
|
|
484
|
-
|
|
485
|
-
for img_index, gen_img_data in enumerate(
|
|
486
|
-
get_nested_value(img_candidate, [12, 7, 0], [])
|
|
487
|
-
):
|
|
488
|
-
url = get_nested_value(gen_img_data, [0, 3, 3])
|
|
489
|
-
if not url:
|
|
674
|
+
# Extract data from candidates
|
|
675
|
+
candidates_list = get_nested_value(part_json, [4], [])
|
|
676
|
+
if not candidates_list:
|
|
490
677
|
continue
|
|
491
678
|
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
679
|
+
output_candidates = []
|
|
680
|
+
any_changed = False
|
|
681
|
+
|
|
682
|
+
for candidate_data in candidates_list:
|
|
683
|
+
rcid = get_nested_value(candidate_data, [0])
|
|
684
|
+
if not rcid:
|
|
685
|
+
continue
|
|
686
|
+
|
|
687
|
+
if isinstance(chat, ChatSession):
|
|
688
|
+
chat.rcid = rcid
|
|
689
|
+
|
|
690
|
+
# Text output and thoughts
|
|
691
|
+
text = get_nested_value(candidate_data, [1, 0], "")
|
|
692
|
+
if re.match(
|
|
693
|
+
r"^http://googleusercontent\.com/card_content/\d+",
|
|
694
|
+
text,
|
|
695
|
+
):
|
|
696
|
+
text = (
|
|
697
|
+
get_nested_value(candidate_data, [22, 0])
|
|
698
|
+
or text
|
|
699
|
+
)
|
|
700
|
+
|
|
701
|
+
# Cleanup googleusercontent artifacts
|
|
702
|
+
text = re.sub(
|
|
703
|
+
r"http://googleusercontent\.com/\w+/\d+\n*",
|
|
704
|
+
"",
|
|
705
|
+
text,
|
|
706
|
+
).rstrip()
|
|
707
|
+
|
|
708
|
+
thoughts = (
|
|
709
|
+
get_nested_value(candidate_data, [37, 0, 0]) or ""
|
|
513
710
|
)
|
|
514
|
-
)
|
|
515
|
-
|
|
516
|
-
output_candidates.append(
|
|
517
|
-
Candidate(
|
|
518
|
-
rcid=rcid,
|
|
519
|
-
text=text,
|
|
520
|
-
thoughts=thoughts,
|
|
521
|
-
web_images=web_images,
|
|
522
|
-
generated_images=generated_images,
|
|
523
|
-
)
|
|
524
|
-
)
|
|
525
711
|
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
712
|
+
# Web images
|
|
713
|
+
web_images = []
|
|
714
|
+
for web_img_data in get_nested_value(
|
|
715
|
+
candidate_data, [12, 1], []
|
|
716
|
+
):
|
|
717
|
+
url = get_nested_value(web_img_data, [0, 0, 0])
|
|
718
|
+
if url:
|
|
719
|
+
web_images.append(
|
|
720
|
+
WebImage(
|
|
721
|
+
url=url,
|
|
722
|
+
title=get_nested_value(
|
|
723
|
+
web_img_data, [7, 0], ""
|
|
724
|
+
),
|
|
725
|
+
alt=get_nested_value(
|
|
726
|
+
web_img_data, [0, 4], ""
|
|
727
|
+
),
|
|
728
|
+
proxy=self.proxy,
|
|
729
|
+
)
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# Generated images
|
|
733
|
+
generated_images = []
|
|
734
|
+
for gen_img_data in get_nested_value(
|
|
735
|
+
candidate_data, [12, 7, 0], []
|
|
736
|
+
):
|
|
737
|
+
url = get_nested_value(gen_img_data, [0, 3, 3])
|
|
738
|
+
if url:
|
|
739
|
+
img_num = get_nested_value(gen_img_data, [3, 6])
|
|
740
|
+
alt_list = get_nested_value(
|
|
741
|
+
gen_img_data, [3, 5], []
|
|
742
|
+
)
|
|
743
|
+
generated_images.append(
|
|
744
|
+
GeneratedImage(
|
|
745
|
+
url=url,
|
|
746
|
+
title=(
|
|
747
|
+
f"[Generated Image {img_num}]"
|
|
748
|
+
if img_num
|
|
749
|
+
else "[Generated Image]"
|
|
750
|
+
),
|
|
751
|
+
alt=get_nested_value(alt_list, [0], ""),
|
|
752
|
+
proxy=self.proxy,
|
|
753
|
+
cookies=self.cookies,
|
|
754
|
+
)
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
# Calculate Deltas for this specific candidate
|
|
758
|
+
last_text = last_texts.get(rcid, "")
|
|
759
|
+
last_thought = last_thoughts.get(rcid, "")
|
|
760
|
+
|
|
761
|
+
text_delta = text
|
|
762
|
+
if text.startswith(last_text):
|
|
763
|
+
text_delta = text[len(last_text) :]
|
|
764
|
+
|
|
765
|
+
thoughts_delta = thoughts
|
|
766
|
+
if thoughts.startswith(last_thought):
|
|
767
|
+
thoughts_delta = thoughts[len(last_thought) :]
|
|
768
|
+
|
|
769
|
+
if (
|
|
770
|
+
text_delta
|
|
771
|
+
or thoughts_delta
|
|
772
|
+
or web_images
|
|
773
|
+
or generated_images
|
|
774
|
+
):
|
|
775
|
+
any_changed = True
|
|
776
|
+
|
|
777
|
+
last_texts[rcid] = text
|
|
778
|
+
last_thoughts[rcid] = thoughts
|
|
779
|
+
|
|
780
|
+
output_candidates.append(
|
|
781
|
+
Candidate(
|
|
782
|
+
rcid=rcid,
|
|
783
|
+
text=text,
|
|
784
|
+
text_delta=text_delta,
|
|
785
|
+
thoughts=thoughts or None,
|
|
786
|
+
thoughts_delta=thoughts_delta,
|
|
787
|
+
web_images=web_images,
|
|
788
|
+
generated_images=generated_images,
|
|
789
|
+
)
|
|
790
|
+
)
|
|
530
791
|
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
raise APIError(
|
|
540
|
-
"Failed to parse response body. Data structure is invalid."
|
|
541
|
-
)
|
|
792
|
+
if any_changed:
|
|
793
|
+
has_candidates = True
|
|
794
|
+
yield ModelOutput(
|
|
795
|
+
metadata=get_nested_value(part_json, [1], []),
|
|
796
|
+
candidates=output_candidates,
|
|
797
|
+
)
|
|
798
|
+
except json.JSONDecodeError:
|
|
799
|
+
continue
|
|
542
800
|
|
|
543
|
-
|
|
544
|
-
|
|
801
|
+
if is_busy and not has_candidates:
|
|
802
|
+
raise APIError("Model is busy. Polling again...")
|
|
545
803
|
|
|
546
|
-
|
|
804
|
+
except ReadTimeout:
|
|
805
|
+
raise TimeoutError(
|
|
806
|
+
"The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
|
|
807
|
+
"or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
|
|
808
|
+
)
|
|
809
|
+
except (GeminiError, APIError):
|
|
810
|
+
raise
|
|
811
|
+
except Exception as e:
|
|
812
|
+
logger.debug(
|
|
813
|
+
f"{type(e).__name__}: {e}; Unexpected response or parsing error. Response: {locals().get('response', 'N/A')}"
|
|
814
|
+
)
|
|
815
|
+
raise APIError(f"Failed to parse response body: {e}")
|
|
547
816
|
|
|
548
817
|
def start_chat(self, **kwargs) -> "ChatSession":
|
|
549
818
|
"""
|
|
@@ -563,14 +832,15 @@ class GeminiClient(GemMixin):
|
|
|
563
832
|
|
|
564
833
|
return ChatSession(geminiclient=self, **kwargs)
|
|
565
834
|
|
|
835
|
+
@running(retry=2)
|
|
566
836
|
async def _batch_execute(self, payloads: list[RPCData], **kwargs) -> Response:
|
|
567
837
|
"""
|
|
568
838
|
Execute a batch of requests to Gemini API.
|
|
569
839
|
|
|
570
840
|
Parameters
|
|
571
841
|
----------
|
|
572
|
-
payloads: `list[
|
|
573
|
-
List of `gemini_webapi.types.
|
|
842
|
+
payloads: `list[RPCData]`
|
|
843
|
+
List of `gemini_webapi.types.RPCData` objects to be executed.
|
|
574
844
|
kwargs: `dict`, optional
|
|
575
845
|
Additional arguments which will be passed to the post request.
|
|
576
846
|
Refer to `httpx.AsyncClient.request` for more information.
|
|
@@ -581,31 +851,47 @@ class GeminiClient(GemMixin):
|
|
|
581
851
|
Response object containing the result of the batch execution.
|
|
582
852
|
"""
|
|
583
853
|
|
|
854
|
+
_reqid = self._reqid
|
|
855
|
+
self._reqid += 100000
|
|
856
|
+
|
|
584
857
|
try:
|
|
858
|
+
params: dict[str, Any] = {
|
|
859
|
+
"rpcids": ",".join([p.rpcid.value for p in payloads]),
|
|
860
|
+
"_reqid": _reqid,
|
|
861
|
+
"rt": "c",
|
|
862
|
+
"source-path": "/app",
|
|
863
|
+
}
|
|
864
|
+
if self.build_label:
|
|
865
|
+
params["bl"] = self.build_label
|
|
866
|
+
if self.session_id:
|
|
867
|
+
params["f.sid"] = self.session_id
|
|
868
|
+
|
|
585
869
|
response = await self.client.post(
|
|
586
870
|
Endpoint.BATCH_EXEC,
|
|
871
|
+
params=params,
|
|
587
872
|
data={
|
|
588
873
|
"at": self.access_token,
|
|
589
874
|
"f.req": json.dumps(
|
|
590
875
|
[[payload.serialize() for payload in payloads]]
|
|
591
|
-
).decode(),
|
|
876
|
+
).decode("utf-8"),
|
|
592
877
|
},
|
|
593
878
|
**kwargs,
|
|
594
879
|
)
|
|
595
880
|
except ReadTimeout:
|
|
596
881
|
raise TimeoutError(
|
|
597
|
-
"
|
|
598
|
-
"
|
|
882
|
+
"The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
|
|
883
|
+
"or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
|
|
599
884
|
)
|
|
600
885
|
|
|
601
|
-
# ? Seems like batch execution will immediately invalidate the current access token,
|
|
602
|
-
# ? causing the next request to fail with 401 Unauthorized.
|
|
603
886
|
if response.status_code != 200:
|
|
604
887
|
await self.close()
|
|
605
888
|
raise APIError(
|
|
606
889
|
f"Batch execution failed with status code {response.status_code}"
|
|
607
890
|
)
|
|
608
891
|
|
|
892
|
+
if self.client:
|
|
893
|
+
self.cookies.update(self.client.cookies)
|
|
894
|
+
|
|
609
895
|
return response
|
|
610
896
|
|
|
611
897
|
|
|
@@ -652,7 +938,18 @@ class ChatSession:
|
|
|
652
938
|
model: Model | str | dict = Model.UNSPECIFIED,
|
|
653
939
|
gem: Gem | str | None = None,
|
|
654
940
|
):
|
|
655
|
-
self.__metadata: list[str | None] = [
|
|
941
|
+
self.__metadata: list[str | None] = [
|
|
942
|
+
"",
|
|
943
|
+
"",
|
|
944
|
+
"",
|
|
945
|
+
None,
|
|
946
|
+
None,
|
|
947
|
+
None,
|
|
948
|
+
None,
|
|
949
|
+
None,
|
|
950
|
+
None,
|
|
951
|
+
"",
|
|
952
|
+
]
|
|
656
953
|
self.geminiclient: GeminiClient = geminiclient
|
|
657
954
|
self.last_output: ModelOutput | None = None
|
|
658
955
|
self.model: Model | str | dict = model
|
|
@@ -727,6 +1024,43 @@ class ChatSession:
|
|
|
727
1024
|
**kwargs,
|
|
728
1025
|
)
|
|
729
1026
|
|
|
1027
|
+
async def send_message_stream(
|
|
1028
|
+
self,
|
|
1029
|
+
prompt: str,
|
|
1030
|
+
files: list[str | Path] | None = None,
|
|
1031
|
+
**kwargs,
|
|
1032
|
+
) -> AsyncGenerator[ModelOutput, None]:
|
|
1033
|
+
"""
|
|
1034
|
+
Generates contents with prompt in streaming mode within this chat session.
|
|
1035
|
+
|
|
1036
|
+
This is a shortcut for `GeminiClient.generate_content_stream(prompt, files, self)`.
|
|
1037
|
+
The session's metadata and conversation history are automatically managed.
|
|
1038
|
+
|
|
1039
|
+
Parameters
|
|
1040
|
+
----------
|
|
1041
|
+
prompt: `str`
|
|
1042
|
+
Prompt provided by user.
|
|
1043
|
+
files: `list[str | Path]`, optional
|
|
1044
|
+
List of file paths to be attached.
|
|
1045
|
+
kwargs: `dict`, optional
|
|
1046
|
+
Additional arguments passed to the streaming request.
|
|
1047
|
+
|
|
1048
|
+
Yields
|
|
1049
|
+
------
|
|
1050
|
+
:class:`ModelOutput`
|
|
1051
|
+
Partial output data containing text deltas.
|
|
1052
|
+
"""
|
|
1053
|
+
|
|
1054
|
+
async for output in self.geminiclient.generate_content_stream(
|
|
1055
|
+
prompt=prompt,
|
|
1056
|
+
files=files,
|
|
1057
|
+
model=self.model,
|
|
1058
|
+
gem=self.gem,
|
|
1059
|
+
chat=self,
|
|
1060
|
+
**kwargs,
|
|
1061
|
+
):
|
|
1062
|
+
yield output
|
|
1063
|
+
|
|
730
1064
|
def choose_candidate(self, index: int) -> ModelOutput:
|
|
731
1065
|
"""
|
|
732
1066
|
Choose a candidate from the last `ModelOutput` to control the ongoing conversation flow.
|
|
@@ -765,9 +1099,13 @@ class ChatSession:
|
|
|
765
1099
|
|
|
766
1100
|
@metadata.setter
|
|
767
1101
|
def metadata(self, value: list[str]):
|
|
768
|
-
if
|
|
769
|
-
|
|
770
|
-
|
|
1102
|
+
if not isinstance(value, list):
|
|
1103
|
+
return
|
|
1104
|
+
|
|
1105
|
+
# Update only non-None elements to preserve existing CID/RID/RCID/Context
|
|
1106
|
+
for i, val in enumerate(value):
|
|
1107
|
+
if i < 10 and val is not None:
|
|
1108
|
+
self.__metadata[i] = val
|
|
771
1109
|
|
|
772
1110
|
@property
|
|
773
1111
|
def cid(self):
|
|
@@ -777,14 +1115,6 @@ class ChatSession:
|
|
|
777
1115
|
def cid(self, value: str):
|
|
778
1116
|
self.__metadata[0] = value
|
|
779
1117
|
|
|
780
|
-
@property
|
|
781
|
-
def rid(self):
|
|
782
|
-
return self.__metadata[1]
|
|
783
|
-
|
|
784
|
-
@rid.setter
|
|
785
|
-
def rid(self, value: str):
|
|
786
|
-
self.__metadata[1] = value
|
|
787
|
-
|
|
788
1118
|
@property
|
|
789
1119
|
def rcid(self):
|
|
790
1120
|
return self.__metadata[2]
|
|
@@ -792,3 +1122,11 @@ class ChatSession:
|
|
|
792
1122
|
@rcid.setter
|
|
793
1123
|
def rcid(self, value: str):
|
|
794
1124
|
self.__metadata[2] = value
|
|
1125
|
+
|
|
1126
|
+
@property
|
|
1127
|
+
def rid(self):
|
|
1128
|
+
return self.__metadata[1]
|
|
1129
|
+
|
|
1130
|
+
@rid.setter
|
|
1131
|
+
def rid(self, value: str):
|
|
1132
|
+
self.__metadata[1] = value
|