gemini-webapi 1.17.3__py3-none-any.whl → 1.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gemini_webapi/client.py CHANGED
@@ -1,19 +1,21 @@
1
1
  import asyncio
2
+ import codecs
3
+ import io
4
+ import random
2
5
  import re
3
6
  from asyncio import Task
4
7
  from pathlib import Path
5
- from typing import Any, Optional
8
+ from typing import Any, AsyncGenerator, Optional
6
9
 
7
10
  import orjson as json
8
- from httpx import AsyncClient, ReadTimeout, Response
11
+ from httpx import AsyncClient, Cookies, ReadTimeout, Response
9
12
 
10
13
  from .components import GemMixin
11
- from .constants import Endpoint, ErrorCode, Headers, Model
14
+ from .constants import Endpoint, ErrorCode, GRPC, Headers, Model
12
15
  from .exceptions import (
13
16
  APIError,
14
17
  AuthError,
15
18
  GeminiError,
16
- ImageGenerationError,
17
19
  ModelInvalid,
18
20
  TemporarilyBlocked,
19
21
  TimeoutError,
@@ -28,13 +30,12 @@ from .types import (
28
30
  WebImage,
29
31
  )
30
32
  from .utils import (
31
- extract_json_from_response,
32
33
  get_access_token,
33
34
  get_nested_value,
34
35
  logger,
35
36
  parse_file_name,
37
+ parse_stream_frames,
36
38
  rotate_1psidts,
37
- rotate_tasks,
38
39
  running,
39
40
  upload_file,
40
41
  )
@@ -52,7 +53,7 @@ class GeminiClient(GemMixin):
52
53
  secure_1psid: `str`, optional
53
54
  __Secure-1PSID cookie value.
54
55
  secure_1psidts: `str`, optional
55
- __Secure-1PSIDTS cookie value, some google accounts don't require this value, provide only if it's in the cookie list.
56
+ __Secure-1PSIDTS cookie value, some Google accounts don't require this value, provide only if it's in the cookie list.
56
57
  proxy: `str`, optional
57
58
  Proxy URL.
58
59
  kwargs: `dict`, optional
@@ -71,12 +72,18 @@ class GeminiClient(GemMixin):
71
72
  "_running",
72
73
  "client",
73
74
  "access_token",
75
+ "build_label",
76
+ "session_id",
74
77
  "timeout",
75
78
  "auto_close",
76
79
  "close_delay",
77
80
  "close_task",
78
81
  "auto_refresh",
79
82
  "refresh_interval",
83
+ "refresh_task",
84
+ "verbose",
85
+ "_lock",
86
+ "_reqid",
80
87
  "_gems", # From GemMixin
81
88
  "kwargs",
82
89
  ]
@@ -89,23 +96,31 @@ class GeminiClient(GemMixin):
89
96
  **kwargs,
90
97
  ):
91
98
  super().__init__()
92
- self.cookies = {}
99
+ self.cookies = Cookies()
93
100
  self.proxy = proxy
94
101
  self._running: bool = False
95
102
  self.client: AsyncClient | None = None
96
103
  self.access_token: str | None = None
104
+ self.build_label: str | None = None
105
+ self.session_id: str | None = None
97
106
  self.timeout: float = 300
98
107
  self.auto_close: bool = False
99
108
  self.close_delay: float = 300
100
109
  self.close_task: Task | None = None
101
110
  self.auto_refresh: bool = True
102
111
  self.refresh_interval: float = 540
112
+ self.refresh_task: Task | None = None
113
+ self.verbose: bool = True
114
+ self._lock = asyncio.Lock()
115
+ self._reqid: int = random.randint(10000, 99999)
103
116
  self.kwargs = kwargs
104
117
 
105
118
  if secure_1psid:
106
- self.cookies["__Secure-1PSID"] = secure_1psid
119
+ self.cookies.set("__Secure-1PSID", secure_1psid, domain=".google.com")
107
120
  if secure_1psidts:
108
- self.cookies["__Secure-1PSIDTS"] = secure_1psidts
121
+ self.cookies.set(
122
+ "__Secure-1PSIDTS", secure_1psidts, domain=".google.com"
123
+ )
109
124
 
110
125
  async def init(
111
126
  self,
@@ -129,50 +144,64 @@ class GeminiClient(GemMixin):
129
144
  close_delay: `float`, optional
130
145
  Time to wait before auto-closing the client in seconds. Effective only if `auto_close` is `True`.
131
146
  auto_refresh: `bool`, optional
132
- If `True`, will schedule a task to automatically refresh cookies in the background.
147
+ If `True`, will schedule a task to automatically refresh cookies and access token in the background.
133
148
  refresh_interval: `float`, optional
134
- Time interval for background cookie refresh in seconds. Effective only if `auto_refresh` is `True`.
149
+ Time interval for background cookie and access token refresh in seconds. Effective only if `auto_refresh` is `True`.
135
150
  verbose: `bool`, optional
136
151
  If `True`, will print more infomation in logs.
137
152
  """
138
153
 
139
- try:
140
- access_token, valid_cookies = await get_access_token(
141
- base_cookies=self.cookies, proxy=self.proxy, verbose=verbose
142
- )
154
+ async with self._lock:
155
+ if self._running:
156
+ return
143
157
 
144
- self.client = AsyncClient(
145
- timeout=timeout,
146
- proxy=self.proxy,
147
- follow_redirects=True,
148
- headers=Headers.GEMINI.value,
149
- cookies=valid_cookies,
150
- **self.kwargs,
151
- )
152
- self.access_token = access_token
153
- self.cookies = valid_cookies
154
- self._running = True
155
-
156
- self.timeout = timeout
157
- self.auto_close = auto_close
158
- self.close_delay = close_delay
159
- if self.auto_close:
160
- await self.reset_close_task()
161
-
162
- self.auto_refresh = auto_refresh
163
- self.refresh_interval = refresh_interval
164
- if task := rotate_tasks.get(self.cookies["__Secure-1PSID"]):
165
- task.cancel()
166
- if self.auto_refresh:
167
- rotate_tasks[self.cookies["__Secure-1PSID"]] = asyncio.create_task(
168
- self.start_auto_refresh()
158
+ try:
159
+ self.verbose = verbose
160
+ access_token, build_label, session_id, valid_cookies = (
161
+ await get_access_token(
162
+ base_cookies=self.cookies,
163
+ proxy=self.proxy,
164
+ verbose=self.verbose,
165
+ verify=self.kwargs.get("verify", True),
166
+ )
169
167
  )
170
168
 
171
- if verbose:
172
- logger.success("Gemini client initialized successfully.")
173
- except Exception:
174
- await self.close()
175
- raise
169
+ self.client = AsyncClient(
170
+ http2=True,
171
+ timeout=timeout,
172
+ proxy=self.proxy,
173
+ follow_redirects=True,
174
+ headers=Headers.GEMINI.value,
175
+ cookies=valid_cookies,
176
+ **self.kwargs,
177
+ )
178
+ self.access_token = access_token
179
+ self.cookies = valid_cookies
180
+ self.build_label = build_label
181
+ self.session_id = session_id
182
+ self._running = True
183
+
184
+ self.timeout = timeout
185
+ self.auto_close = auto_close
186
+ self.close_delay = close_delay
187
+ if self.auto_close:
188
+ await self.reset_close_task()
189
+
190
+ self.auto_refresh = auto_refresh
191
+ self.refresh_interval = refresh_interval
192
+
193
+ if self.refresh_task:
194
+ self.refresh_task.cancel()
195
+ self.refresh_task = None
196
+
197
+ if self.auto_refresh:
198
+ self.refresh_task = asyncio.create_task(self.start_auto_refresh())
199
+
200
+ if self.verbose:
201
+ logger.success("Gemini client initialized successfully.")
202
+ except Exception:
203
+ await self.close()
204
+ raise
176
205
 
177
206
  async def close(self, delay: float = 0) -> None:
178
207
  """
@@ -193,6 +222,10 @@ class GeminiClient(GemMixin):
193
222
  self.close_task.cancel()
194
223
  self.close_task = None
195
224
 
225
+ if self.refresh_task:
226
+ self.refresh_task.cancel()
227
+ self.refresh_task = None
228
+
196
229
  if self.client:
197
230
  await self.client.aclose()
198
231
 
@@ -211,34 +244,49 @@ class GeminiClient(GemMixin):
211
244
  """
212
245
  Start the background task to automatically refresh cookies.
213
246
  """
247
+ if self.refresh_interval < 60:
248
+ self.refresh_interval = 60
249
+
250
+ while self._running:
251
+ await asyncio.sleep(self.refresh_interval)
252
+
253
+ if not self._running:
254
+ break
214
255
 
215
- while True:
216
- new_1psidts: str | None = None
217
256
  try:
218
- new_1psidts = await rotate_1psidts(self.cookies, self.proxy)
257
+ async with self._lock:
258
+ # Refresh all cookies in the background to keep the session alive.
259
+ new_1psidts, rotated_cookies = await rotate_1psidts(
260
+ self.cookies, self.proxy
261
+ )
262
+ if rotated_cookies:
263
+ self.cookies.update(rotated_cookies)
264
+ if self.client:
265
+ self.client.cookies.update(rotated_cookies)
266
+
267
+ if new_1psidts:
268
+ if rotated_cookies:
269
+ logger.debug("Cookies refreshed (network update).")
270
+ else:
271
+ logger.debug("Cookies are up to date (cached).")
272
+ else:
273
+ logger.warning(
274
+ "Rotation response did not contain a new __Secure-1PSIDTS. "
275
+ "Session might expire soon if this persists."
276
+ )
277
+ except asyncio.CancelledError:
278
+ raise
219
279
  except AuthError:
220
- if task := rotate_tasks.get(self.cookies.get("__Secure-1PSID", "")):
221
- task.cancel()
222
280
  logger.warning(
223
- "AuthError: Failed to refresh cookies. Auto refresh task canceled."
281
+ "AuthError: Failed to refresh cookies. Retrying in next interval."
224
282
  )
225
- return
226
- except Exception as exc:
227
- logger.warning(f"Unexpected error while refreshing cookies: {exc}")
228
-
229
- if new_1psidts:
230
- self.cookies["__Secure-1PSIDTS"] = new_1psidts
231
- if self._running:
232
- self.client.cookies.set("__Secure-1PSIDTS", new_1psidts)
233
- logger.debug("Cookies refreshed. New __Secure-1PSIDTS applied.")
234
-
235
- await asyncio.sleep(self.refresh_interval)
283
+ except Exception as e:
284
+ logger.warning(f"Unexpected error while refreshing cookies: {e}")
236
285
 
237
- @running(retry=2)
238
286
  async def generate_content(
239
287
  self,
240
288
  prompt: str,
241
- files: list[str | Path] | None = None,
289
+ files: list[str | Path | bytes | io.BytesIO] | None = None,
242
290
  model: Model | str | dict = Model.UNSPECIFIED,
243
291
  gem: Gem | str | None = None,
244
292
  chat: Optional["ChatSession"] = None,
@@ -285,6 +333,182 @@ class GeminiClient(GemMixin):
285
333
  - If response structure is invalid and failed to parse.
286
334
  """
287
335
 
336
+ if self.auto_close:
337
+ await self.reset_close_task()
338
+
339
+ if not (isinstance(chat, ChatSession) and chat.cid):
340
+ self._reqid = random.randint(10000, 99999)
341
+
342
+ file_data = None
343
+ if files:
344
+ await self._batch_execute(
345
+ [
346
+ RPCData(
347
+ rpcid=GRPC.BARD_ACTIVITY,
348
+ payload='[[["bard_activity_enabled"]]]',
349
+ )
350
+ ]
351
+ )
352
+
353
+ uploaded_urls = await asyncio.gather(
354
+ *(upload_file(file, self.proxy) for file in files)
355
+ )
356
+ file_data = [
357
+ [[url], parse_file_name(file)]
358
+ for url, file in zip(uploaded_urls, files)
359
+ ]
360
+
361
+ try:
362
+ await self._batch_execute(
363
+ [
364
+ RPCData(
365
+ rpcid=GRPC.BARD_ACTIVITY,
366
+ payload='[[["bard_activity_enabled"]]]',
367
+ )
368
+ ]
369
+ )
370
+
371
+ output = None
372
+ async for output in self._generate(
373
+ prompt=prompt,
374
+ req_file_data=file_data,
375
+ model=model,
376
+ gem=gem,
377
+ chat=chat,
378
+ **kwargs,
379
+ ):
380
+ pass
381
+
382
+ if output is None:
383
+ raise GeminiError(
384
+ "Failed to generate contents. No output data found in response."
385
+ )
386
+
387
+ if isinstance(chat, ChatSession):
388
+ chat.last_output = output
389
+
390
+ return output
391
+
392
+ finally:
393
+ if files:
394
+ for file in files:
395
+ if isinstance(file, io.BytesIO):
396
+ file.close()
397
+
398
+ async def generate_content_stream(
399
+ self,
400
+ prompt: str,
401
+ files: list[str | Path | bytes | io.BytesIO] | None = None,
402
+ model: Model | str | dict = Model.UNSPECIFIED,
403
+ gem: Gem | str | None = None,
404
+ chat: Optional["ChatSession"] = None,
405
+ **kwargs,
406
+ ) -> AsyncGenerator[ModelOutput, None]:
407
+ """
408
+ Generates contents with prompt in streaming mode.
409
+
410
+ This method sends a request to Gemini and yields partial responses as they arrive.
411
+ It automatically calculates the text delta (new characters) to provide a smooth
412
+ streaming experience. It also continuously updates chat metadata and candidate IDs.
413
+
414
+ Parameters
415
+ ----------
416
+ prompt: `str`
417
+ Prompt provided by user.
418
+ files: `list[str | Path | bytes | io.BytesIO]`, optional
419
+ List of file paths or byte streams to be attached.
420
+ model: `Model | str | dict`, optional
421
+ Specify the model to use for generation.
422
+ gem: `Gem | str`, optional
423
+ Specify a gem to use as system prompt for the chat session.
424
+ chat: `ChatSession`, optional
425
+ Chat data to retrieve conversation history.
426
+ kwargs: `dict`, optional
427
+ Additional arguments passed to `httpx.AsyncClient.stream`.
428
+
429
+ Yields
430
+ ------
431
+ :class:`ModelOutput`
432
+ Partial output data. The `text` attribute contains only the NEW characters
433
+ received since the last yield.
434
+
435
+ Raises
436
+ ------
437
+ `gemini_webapi.APIError`
438
+ If the request fails or response structure is invalid.
439
+ `gemini_webapi.TimeoutError`
440
+ If the stream request times out.
441
+ """
442
+
443
+ if self.auto_close:
444
+ await self.reset_close_task()
445
+
446
+ if not (isinstance(chat, ChatSession) and chat.cid):
447
+ self._reqid = random.randint(10000, 99999)
448
+
449
+ file_data = None
450
+ if files:
451
+ await self._batch_execute(
452
+ [
453
+ RPCData(
454
+ rpcid=GRPC.BARD_ACTIVITY,
455
+ payload='[[["bard_activity_enabled"]]]',
456
+ )
457
+ ]
458
+ )
459
+
460
+ uploaded_urls = await asyncio.gather(
461
+ *(upload_file(file, self.proxy) for file in files)
462
+ )
463
+ file_data = [
464
+ [[url], parse_file_name(file)]
465
+ for url, file in zip(uploaded_urls, files)
466
+ ]
467
+
468
+ try:
469
+ await self._batch_execute(
470
+ [
471
+ RPCData(
472
+ rpcid=GRPC.BARD_ACTIVITY,
473
+ payload='[[["bard_activity_enabled"]]]',
474
+ )
475
+ ]
476
+ )
477
+
478
+ output = None
479
+ async for output in self._generate(
480
+ prompt=prompt,
481
+ req_file_data=file_data,
482
+ model=model,
483
+ gem=gem,
484
+ chat=chat,
485
+ **kwargs,
486
+ ):
487
+ yield output
488
+
489
+ if output and isinstance(chat, ChatSession):
490
+ chat.last_output = output
491
+
492
+ finally:
493
+ if files:
494
+ for file in files:
495
+ if isinstance(file, io.BytesIO):
496
+ file.close()
497
+
498
+ @running(retry=5)
499
+ async def _generate(
500
+ self,
501
+ prompt: str,
502
+ req_file_data: list[Any] | None = None,
503
+ model: Model | str | dict = Model.UNSPECIFIED,
504
+ gem: Gem | str | None = None,
505
+ chat: Optional["ChatSession"] = None,
506
+ **kwargs,
507
+ ) -> AsyncGenerator[ModelOutput, None]:
508
+ """
509
+ Internal method which actually sends content generation requests.
510
+ """
511
+
288
512
  assert prompt, "Prompt cannot be empty."
289
513
 
290
514
  if isinstance(model, str):
@@ -297,253 +521,298 @@ class GeminiClient(GemMixin):
297
521
  f"string, or dictionary; got `{type(model).__name__}`"
298
522
  )
299
523
 
300
- if isinstance(gem, Gem):
301
- gem_id = gem.id
302
- else:
303
- gem_id = gem
524
+ _reqid = self._reqid
525
+ self._reqid += 100000
304
526
 
305
- if self.auto_close:
306
- await self.reset_close_task()
527
+ gem_id = gem.id if isinstance(gem, Gem) else gem
307
528
 
308
529
  try:
309
- response = await self.client.post(
310
- Endpoint.GENERATE.value,
530
+ message_content = [
531
+ prompt,
532
+ 0,
533
+ None,
534
+ req_file_data,
535
+ None,
536
+ None,
537
+ 0,
538
+ ]
539
+
540
+ params: dict[str, Any] = {"_reqid": _reqid, "rt": "c"}
541
+ if self.build_label:
542
+ params["bl"] = self.build_label
543
+ if self.session_id:
544
+ params["f.sid"] = self.session_id
545
+
546
+ inner_req_list: list[Any] = [None] * 69
547
+ inner_req_list[0] = message_content
548
+ inner_req_list[2] = (
549
+ chat.metadata
550
+ if chat
551
+ else ["", "", "", None, None, None, None, None, None, ""]
552
+ )
553
+ inner_req_list[7] = 1 # Enable Snapshot Streaming
554
+ if gem_id:
555
+ inner_req_list[19] = gem_id
556
+
557
+ request_data = {
558
+ "at": self.access_token,
559
+ "f.req": json.dumps(
560
+ [
561
+ None,
562
+ json.dumps(inner_req_list).decode("utf-8"),
563
+ ]
564
+ ).decode("utf-8"),
565
+ }
566
+
567
+ async with self.client.stream(
568
+ "POST",
569
+ Endpoint.GENERATE,
570
+ params=params,
311
571
  headers=model.model_header,
312
- data={
313
- "at": self.access_token,
314
- "f.req": json.dumps(
315
- [
316
- None,
317
- json.dumps(
318
- [
319
- files
320
- and [
321
- prompt,
322
- 0,
323
- None,
324
- [
325
- [
326
- [await upload_file(file, self.proxy)],
327
- parse_file_name(file),
328
- ]
329
- for file in files
330
- ],
331
- ]
332
- or [prompt],
333
- None,
334
- chat and chat.metadata,
335
- ]
336
- + (gem_id and [None] * 16 + [gem_id] or [])
337
- ).decode(),
338
- ]
339
- ).decode(),
340
- },
572
+ data=request_data,
341
573
  **kwargs,
342
- )
343
- except ReadTimeout:
344
- raise TimeoutError(
345
- "Generate content request timed out, please try again. If the problem persists, "
346
- "consider setting a higher `timeout` value when initializing GeminiClient."
347
- )
348
-
349
- if response.status_code != 200:
350
- await self.close()
351
- raise APIError(
352
- f"Failed to generate contents. Request failed with status code {response.status_code}"
353
- )
354
- else:
355
- response_json: list[Any] = []
356
- body: list[Any] = []
357
- body_index = 0
358
-
359
- try:
360
- response_json = extract_json_from_response(response.text)
574
+ ) as response:
575
+ if response.status_code != 200:
576
+ await self.close()
577
+ raise APIError(
578
+ f"Failed to generate contents. Status: {response.status_code}"
579
+ )
361
580
 
362
- for part_index, part in enumerate(response_json):
363
- try:
364
- part_body = get_nested_value(part, [2])
365
- if not part_body:
366
- continue
581
+ if self.client:
582
+ self.cookies.update(self.client.cookies)
367
583
 
368
- part_json = json.loads(part_body)
369
- if get_nested_value(part_json, [4]):
370
- body_index, body = part_index, part_json
371
- break
372
- except json.JSONDecodeError:
373
- continue
584
+ buffer = ""
585
+ decoder = codecs.getincrementaldecoder("utf-8")(errors="replace")
374
586
 
375
- if not body:
376
- raise Exception
377
- except Exception:
378
- await self.close()
587
+ # Track last seen content for each candidate by rcid
588
+ last_texts: dict[str, str] = {}
589
+ last_thoughts: dict[str, str] = {}
379
590
 
380
- try:
381
- error_code = get_nested_value(response_json, [0, 5, 2, 0, 1, 0], -1)
382
- match error_code:
383
- case ErrorCode.USAGE_LIMIT_EXCEEDED:
384
- raise UsageLimitExceeded(
385
- f"Failed to generate contents. Usage limit of {model.model_name} model has exceeded. Please try switching to another model."
386
- )
387
- case ErrorCode.MODEL_INCONSISTENT:
388
- raise ModelInvalid(
389
- "Failed to generate contents. The specified model is inconsistent with the chat history. Please make sure to pass the same "
390
- "`model` parameter when starting a chat session with previous metadata."
391
- )
392
- case ErrorCode.MODEL_HEADER_INVALID:
393
- raise ModelInvalid(
394
- "Failed to generate contents. The specified model is not available. Please update gemini_webapi to the latest version. "
395
- "If the error persists and is caused by the package, please report it on GitHub."
396
- )
397
- case ErrorCode.IP_TEMPORARILY_BLOCKED:
398
- raise TemporarilyBlocked(
399
- "Failed to generate contents. Your IP address is temporarily blocked by Google. Please try using a proxy or waiting for a while."
400
- )
401
- case _:
402
- raise Exception
403
- except GeminiError:
404
- raise
405
- except Exception:
406
- logger.debug(f"Invalid response: {response.text}")
407
- raise APIError(
408
- "Failed to generate contents. Invalid response data received. Client will try to re-initialize on next request."
409
- )
591
+ is_busy = False
592
+ has_candidates = False
410
593
 
411
- try:
412
- candidate_list: list[Any] = get_nested_value(body, [4], [])
413
- output_candidates: list[Candidate] = []
414
-
415
- for candidate_index, candidate in enumerate(candidate_list):
416
- rcid = get_nested_value(candidate, [0])
417
- if not rcid:
418
- continue # Skip candidate if it has no rcid
419
-
420
- # Text output and thoughts
421
- text = get_nested_value(candidate, [1, 0], "")
422
- if re.match(
423
- r"^http://googleusercontent\.com/card_content/\d+", text
424
- ):
425
- text = get_nested_value(candidate, [22, 0]) or text
426
-
427
- thoughts = get_nested_value(candidate, [37, 0, 0])
428
-
429
- # Web images
430
- web_images = []
431
- for web_img_data in get_nested_value(candidate, [12, 1], []):
432
- url = get_nested_value(web_img_data, [0, 0, 0])
433
- if not url:
434
- continue
594
+ async for chunk in response.aiter_bytes():
595
+ buffer += decoder.decode(chunk, final=False)
596
+ if buffer.startswith(")]}'"):
597
+ buffer = buffer[4:].lstrip()
435
598
 
436
- web_images.append(
437
- WebImage(
438
- url=url,
439
- title=get_nested_value(web_img_data, [7, 0], ""),
440
- alt=get_nested_value(web_img_data, [0, 4], ""),
441
- proxy=self.proxy,
442
- )
443
- )
599
+ parsed_parts, buffer = parse_stream_frames(buffer)
444
600
 
445
- # Generated images
446
- generated_images = []
447
- if get_nested_value(candidate, [12, 7, 0]):
448
- img_body = None
449
- for img_part_index, part in enumerate(response_json):
450
- if img_part_index < body_index:
451
- continue
601
+ for part in parsed_parts:
602
+ part_json = None
603
+ # 0. Update chat metadata first whenever available to support follow-up polls
604
+ inner_json_str = get_nested_value(part, [2])
605
+ if inner_json_str:
452
606
  try:
453
- img_part_body = get_nested_value(part, [2])
454
- if not img_part_body:
455
- continue
456
-
457
- img_part_json = json.loads(img_part_body)
458
- if get_nested_value(
459
- img_part_json, [4, candidate_index, 12, 7, 0]
607
+ part_json = json.loads(inner_json_str)
608
+ m_data = get_nested_value(part_json, [1])
609
+ if m_data and isinstance(chat, ChatSession):
610
+ chat.metadata = m_data
611
+
612
+ # Update context string from index 25 if available
613
+ context_str = get_nested_value(part_json, [25])
614
+ if isinstance(context_str, str) and isinstance(
615
+ chat, ChatSession
460
616
  ):
461
- img_body = img_part_json
462
- break
617
+ chat.metadata = [None] * 9 + [context_str]
463
618
  except json.JSONDecodeError:
464
- continue
619
+ pass
620
+
621
+ # 1. Check for fatal error codes in any part
622
+ error_code = get_nested_value(part, [5, 2, 0, 1, 0])
623
+ if error_code:
624
+ await self.close()
625
+ match error_code:
626
+ case ErrorCode.USAGE_LIMIT_EXCEEDED:
627
+ raise UsageLimitExceeded(
628
+ f"Usage limit exceeded for model '{model.model_name}'. Please wait a few minutes, "
629
+ "switch to a different model (e.g., Gemini Flash), or check your account limits on gemini.google.com."
630
+ )
631
+ case ErrorCode.MODEL_INCONSISTENT:
632
+ raise ModelInvalid(
633
+ "The specified model is inconsistent with the conversation history. "
634
+ "Please ensure you are using the same 'model' parameter throughout the entire ChatSession."
635
+ )
636
+ case ErrorCode.MODEL_HEADER_INVALID:
637
+ raise ModelInvalid(
638
+ f"The model '{model.model_name}' is currently unavailable or the request structure is outdated. "
639
+ "Please update 'gemini_webapi' to the latest version or report this on GitHub if the problem persists."
640
+ )
641
+ case ErrorCode.IP_TEMPORARILY_BLOCKED:
642
+ raise TemporarilyBlocked(
643
+ "Your IP address has been temporarily flagged or blocked by Google. "
644
+ "Please try using a proxy, a different network, or wait for a while before retrying."
645
+ )
646
+ case _:
647
+ raise APIError(
648
+ f"Failed to generate contents (stream). Unknown API error code: {error_code}. "
649
+ "This might be a temporary Google service issue."
650
+ )
651
+
652
+ # 2. Detect if model is busy analyzing data (Thinking state)
653
+ if "data_analysis_tool" in str(part):
654
+ is_busy = True
655
+ if not has_candidates:
656
+ logger.debug("Model is busy (thinking/analyzing)...")
657
+
658
+ # 3. Check for queueing status
659
+ status = get_nested_value(part, [5])
660
+ if isinstance(status, list) and status:
661
+ is_busy = True
662
+ if not has_candidates:
663
+ logger.debug(
664
+ "Model is in a waiting state (queueing)..."
665
+ )
465
666
 
466
- if not img_body:
467
- raise ImageGenerationError(
468
- "Failed to parse generated images. Please update gemini_webapi to the latest version. "
469
- "If the error persists and is caused by the package, please report it on GitHub."
470
- )
667
+ if not inner_json_str:
668
+ continue
471
669
 
472
- img_candidate = get_nested_value(
473
- img_body, [4, candidate_index], []
474
- )
670
+ try:
671
+ if part_json is None:
672
+ part_json = json.loads(inner_json_str)
475
673
 
476
- if finished_text := get_nested_value(
477
- img_candidate, [1, 0]
478
- ): # Only overwrite if new text is returned after image generation
479
- text = re.sub(
480
- r"http://googleusercontent\.com/image_generation_content/\d+",
481
- "",
482
- finished_text,
483
- ).rstrip()
484
-
485
- for img_index, gen_img_data in enumerate(
486
- get_nested_value(img_candidate, [12, 7, 0], [])
487
- ):
488
- url = get_nested_value(gen_img_data, [0, 3, 3])
489
- if not url:
674
+ # Extract data from candidates
675
+ candidates_list = get_nested_value(part_json, [4], [])
676
+ if not candidates_list:
490
677
  continue
491
678
 
492
- img_num = get_nested_value(gen_img_data, [3, 6])
493
- title = (
494
- f"[Generated Image {img_num}]"
495
- if img_num
496
- else "[Generated Image]"
497
- )
498
-
499
- alt_list = get_nested_value(gen_img_data, [3, 5], [])
500
- alt = (
501
- get_nested_value(alt_list, [img_index])
502
- or get_nested_value(alt_list, [0])
503
- or ""
504
- )
505
-
506
- generated_images.append(
507
- GeneratedImage(
508
- url=url,
509
- title=title,
510
- alt=alt,
511
- proxy=self.proxy,
512
- cookies=self.cookies,
679
+ output_candidates = []
680
+ any_changed = False
681
+
682
+ for candidate_data in candidates_list:
683
+ rcid = get_nested_value(candidate_data, [0])
684
+ if not rcid:
685
+ continue
686
+
687
+ if isinstance(chat, ChatSession):
688
+ chat.rcid = rcid
689
+
690
+ # Text output and thoughts
691
+ text = get_nested_value(candidate_data, [1, 0], "")
692
+ if re.match(
693
+ r"^http://googleusercontent\.com/card_content/\d+",
694
+ text,
695
+ ):
696
+ text = (
697
+ get_nested_value(candidate_data, [22, 0])
698
+ or text
699
+ )
700
+
701
+ # Cleanup googleusercontent artifacts
702
+ text = re.sub(
703
+ r"http://googleusercontent\.com/\w+/\d+\n*",
704
+ "",
705
+ text,
706
+ ).rstrip()
707
+
708
+ thoughts = (
709
+ get_nested_value(candidate_data, [37, 0, 0]) or ""
513
710
  )
514
- )
515
-
516
- output_candidates.append(
517
- Candidate(
518
- rcid=rcid,
519
- text=text,
520
- thoughts=thoughts,
521
- web_images=web_images,
522
- generated_images=generated_images,
523
- )
524
- )
525
711
 
526
- if not output_candidates:
527
- raise GeminiError(
528
- "Failed to generate contents. No output data found in response."
529
- )
712
+ # Web images
713
+ web_images = []
714
+ for web_img_data in get_nested_value(
715
+ candidate_data, [12, 1], []
716
+ ):
717
+ url = get_nested_value(web_img_data, [0, 0, 0])
718
+ if url:
719
+ web_images.append(
720
+ WebImage(
721
+ url=url,
722
+ title=get_nested_value(
723
+ web_img_data, [7, 0], ""
724
+ ),
725
+ alt=get_nested_value(
726
+ web_img_data, [0, 4], ""
727
+ ),
728
+ proxy=self.proxy,
729
+ )
730
+ )
731
+
732
+ # Generated images
733
+ generated_images = []
734
+ for gen_img_data in get_nested_value(
735
+ candidate_data, [12, 7, 0], []
736
+ ):
737
+ url = get_nested_value(gen_img_data, [0, 3, 3])
738
+ if url:
739
+ img_num = get_nested_value(gen_img_data, [3, 6])
740
+ alt_list = get_nested_value(
741
+ gen_img_data, [3, 5], []
742
+ )
743
+ generated_images.append(
744
+ GeneratedImage(
745
+ url=url,
746
+ title=(
747
+ f"[Generated Image {img_num}]"
748
+ if img_num
749
+ else "[Generated Image]"
750
+ ),
751
+ alt=get_nested_value(alt_list, [0], ""),
752
+ proxy=self.proxy,
753
+ cookies=self.cookies,
754
+ )
755
+ )
756
+
757
+ # Calculate Deltas for this specific candidate
758
+ last_text = last_texts.get(rcid, "")
759
+ last_thought = last_thoughts.get(rcid, "")
760
+
761
+ text_delta = text
762
+ if text.startswith(last_text):
763
+ text_delta = text[len(last_text) :]
764
+
765
+ thoughts_delta = thoughts
766
+ if thoughts.startswith(last_thought):
767
+ thoughts_delta = thoughts[len(last_thought) :]
768
+
769
+ if (
770
+ text_delta
771
+ or thoughts_delta
772
+ or web_images
773
+ or generated_images
774
+ ):
775
+ any_changed = True
776
+
777
+ last_texts[rcid] = text
778
+ last_thoughts[rcid] = thoughts
779
+
780
+ output_candidates.append(
781
+ Candidate(
782
+ rcid=rcid,
783
+ text=text,
784
+ text_delta=text_delta,
785
+ thoughts=thoughts or None,
786
+ thoughts_delta=thoughts_delta,
787
+ web_images=web_images,
788
+ generated_images=generated_images,
789
+ )
790
+ )
530
791
 
531
- output = ModelOutput(
532
- metadata=get_nested_value(body, [1], []),
533
- candidates=output_candidates,
534
- )
535
- except (TypeError, IndexError) as e:
536
- logger.debug(
537
- f"{type(e).__name__}: {e}; Invalid response structure: {response.text}"
538
- )
539
- raise APIError(
540
- "Failed to parse response body. Data structure is invalid."
541
- )
792
+ if any_changed:
793
+ has_candidates = True
794
+ yield ModelOutput(
795
+ metadata=get_nested_value(part_json, [1], []),
796
+ candidates=output_candidates,
797
+ )
798
+ except json.JSONDecodeError:
799
+ continue
542
800
 
543
- if isinstance(chat, ChatSession):
544
- chat.last_output = output
801
+ if is_busy and not has_candidates:
802
+ raise APIError("Model is busy. Polling again...")
545
803
 
546
- return output
804
+ except ReadTimeout:
805
+ raise TimeoutError(
806
+ "The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
807
+ "or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
808
+ )
809
+ except (GeminiError, APIError):
810
+ raise
811
+ except Exception as e:
812
+ logger.debug(
813
+ f"{type(e).__name__}: {e}; Unexpected response or parsing error. Response: {locals().get('response', 'N/A')}"
814
+ )
815
+ raise APIError(f"Failed to parse response body: {e}")
547
816
 
548
817
  def start_chat(self, **kwargs) -> "ChatSession":
549
818
  """
@@ -563,14 +832,34 @@ class GeminiClient(GemMixin):
563
832
 
564
833
  return ChatSession(geminiclient=self, **kwargs)
565
834
 
835
+ async def delete_chat(self, cid: str) -> None:
836
+ """
837
+ Delete a specific conversation by chat id.
838
+
839
+ Parameters
840
+ ----------
841
+ cid: `str`
842
+ The ID of the chat requiring deletion (e.g. "c_...").
843
+ """
844
+
845
+ await self._batch_execute(
846
+ [
847
+ RPCData(
848
+ rpcid=GRPC.DELETE_CHAT,
849
+ payload=json.dumps([cid]),
850
+ ),
851
+ ]
852
+ )
853
+
854
+ @running(retry=2)
566
855
  async def _batch_execute(self, payloads: list[RPCData], **kwargs) -> Response:
567
856
  """
568
857
  Execute a batch of requests to Gemini API.
569
858
 
570
859
  Parameters
571
860
  ----------
572
- payloads: `list[GRPC]`
573
- List of `gemini_webapi.types.GRPC` objects to be executed.
861
+ payloads: `list[RPCData]`
862
+ List of `gemini_webapi.types.RPCData` objects to be executed.
574
863
  kwargs: `dict`, optional
575
864
  Additional arguments which will be passed to the post request.
576
865
  Refer to `httpx.AsyncClient.request` for more information.
@@ -581,31 +870,47 @@ class GeminiClient(GemMixin):
581
870
  Response object containing the result of the batch execution.
582
871
  """
583
872
 
873
+ _reqid = self._reqid
874
+ self._reqid += 100000
875
+
584
876
  try:
877
+ params: dict[str, Any] = {
878
+ "rpcids": ",".join([p.rpcid for p in payloads]),
879
+ "_reqid": _reqid,
880
+ "rt": "c",
881
+ "source-path": "/app",
882
+ }
883
+ if self.build_label:
884
+ params["bl"] = self.build_label
885
+ if self.session_id:
886
+ params["f.sid"] = self.session_id
887
+
585
888
  response = await self.client.post(
586
889
  Endpoint.BATCH_EXEC,
890
+ params=params,
587
891
  data={
588
892
  "at": self.access_token,
589
893
  "f.req": json.dumps(
590
894
  [[payload.serialize() for payload in payloads]]
591
- ).decode(),
895
+ ).decode("utf-8"),
592
896
  },
593
897
  **kwargs,
594
898
  )
595
899
  except ReadTimeout:
596
900
  raise TimeoutError(
597
- "Batch execute request timed out, please try again. If the problem persists, "
598
- "consider setting a higher `timeout` value when initializing GeminiClient."
901
+ "The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
902
+ "or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
599
903
  )
600
904
 
601
- # ? Seems like batch execution will immediately invalidate the current access token,
602
- # ? causing the next request to fail with 401 Unauthorized.
603
905
  if response.status_code != 200:
604
906
  await self.close()
605
907
  raise APIError(
606
908
  f"Batch execution failed with status code {response.status_code}"
607
909
  )
608
910
 
911
+ if self.client:
912
+ self.cookies.update(self.client.cookies)
913
+
609
914
  return response
610
915
 
611
916
 
@@ -652,7 +957,18 @@ class ChatSession:
652
957
  model: Model | str | dict = Model.UNSPECIFIED,
653
958
  gem: Gem | str | None = None,
654
959
  ):
655
- self.__metadata: list[str | None] = [None, None, None]
960
+ self.__metadata: list[str | None] = [
961
+ "",
962
+ "",
963
+ "",
964
+ None,
965
+ None,
966
+ None,
967
+ None,
968
+ None,
969
+ None,
970
+ "",
971
+ ]
656
972
  self.geminiclient: GeminiClient = geminiclient
657
973
  self.last_output: ModelOutput | None = None
658
974
  self.model: Model | str | dict = model
@@ -727,6 +1043,43 @@ class ChatSession:
727
1043
  **kwargs,
728
1044
  )
729
1045
 
1046
+ async def send_message_stream(
1047
+ self,
1048
+ prompt: str,
1049
+ files: list[str | Path] | None = None,
1050
+ **kwargs,
1051
+ ) -> AsyncGenerator[ModelOutput, None]:
1052
+ """
1053
+ Generates contents with prompt in streaming mode within this chat session.
1054
+
1055
+ This is a shortcut for `GeminiClient.generate_content_stream(prompt, files, self)`.
1056
+ The session's metadata and conversation history are automatically managed.
1057
+
1058
+ Parameters
1059
+ ----------
1060
+ prompt: `str`
1061
+ Prompt provided by user.
1062
+ files: `list[str | Path]`, optional
1063
+ List of file paths to be attached.
1064
+ kwargs: `dict`, optional
1065
+ Additional arguments passed to the streaming request.
1066
+
1067
+ Yields
1068
+ ------
1069
+ :class:`ModelOutput`
1070
+ Partial output data containing text deltas.
1071
+ """
1072
+
1073
+ async for output in self.geminiclient.generate_content_stream(
1074
+ prompt=prompt,
1075
+ files=files,
1076
+ model=self.model,
1077
+ gem=self.gem,
1078
+ chat=self,
1079
+ **kwargs,
1080
+ ):
1081
+ yield output
1082
+
730
1083
  def choose_candidate(self, index: int) -> ModelOutput:
731
1084
  """
732
1085
  Choose a candidate from the last `ModelOutput` to control the ongoing conversation flow.
@@ -765,9 +1118,13 @@ class ChatSession:
765
1118
 
766
1119
  @metadata.setter
767
1120
  def metadata(self, value: list[str]):
768
- if len(value) > 3:
769
- raise ValueError("metadata cannot exceed 3 elements")
770
- self.__metadata[: len(value)] = value
1121
+ if not isinstance(value, list):
1122
+ return
1123
+
1124
+ # Update only non-None elements to preserve existing CID/RID/RCID/Context
1125
+ for i, val in enumerate(value):
1126
+ if i < 10 and val is not None:
1127
+ self.__metadata[i] = val
771
1128
 
772
1129
  @property
773
1130
  def cid(self):
@@ -777,14 +1134,6 @@ class ChatSession:
777
1134
  def cid(self, value: str):
778
1135
  self.__metadata[0] = value
779
1136
 
780
- @property
781
- def rid(self):
782
- return self.__metadata[1]
783
-
784
- @rid.setter
785
- def rid(self, value: str):
786
- self.__metadata[1] = value
787
-
788
1137
  @property
789
1138
  def rcid(self):
790
1139
  return self.__metadata[2]
@@ -792,3 +1141,11 @@ class ChatSession:
792
1141
  @rcid.setter
793
1142
  def rcid(self, value: str):
794
1143
  self.__metadata[2] = value
1144
+
1145
+ @property
1146
+ def rid(self):
1147
+ return self.__metadata[1]
1148
+
1149
+ @rid.setter
1150
+ def rid(self, value: str):
1151
+ self.__metadata[1] = value