gemini-webapi 1.17.2__py3-none-any.whl → 1.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gemini_webapi/client.py CHANGED
@@ -1,19 +1,21 @@
1
1
  import asyncio
2
+ import codecs
3
+ import io
4
+ import random
2
5
  import re
3
6
  from asyncio import Task
4
7
  from pathlib import Path
5
- from typing import Any, Optional
8
+ from typing import Any, AsyncGenerator, Optional
6
9
 
7
10
  import orjson as json
8
- from httpx import AsyncClient, ReadTimeout, Response
11
+ from httpx import AsyncClient, Cookies, ReadTimeout, Response
9
12
 
10
13
  from .components import GemMixin
11
- from .constants import Endpoint, ErrorCode, Headers, Model
14
+ from .constants import Endpoint, ErrorCode, GRPC, Headers, Model
12
15
  from .exceptions import (
13
16
  APIError,
14
17
  AuthError,
15
18
  GeminiError,
16
- ImageGenerationError,
17
19
  ModelInvalid,
18
20
  TemporarilyBlocked,
19
21
  TimeoutError,
@@ -28,13 +30,12 @@ from .types import (
28
30
  WebImage,
29
31
  )
30
32
  from .utils import (
31
- extract_json_from_response,
32
33
  get_access_token,
33
34
  get_nested_value,
34
35
  logger,
35
36
  parse_file_name,
37
+ parse_stream_frames,
36
38
  rotate_1psidts,
37
- rotate_tasks,
38
39
  running,
39
40
  upload_file,
40
41
  )
@@ -52,7 +53,7 @@ class GeminiClient(GemMixin):
52
53
  secure_1psid: `str`, optional
53
54
  __Secure-1PSID cookie value.
54
55
  secure_1psidts: `str`, optional
55
- __Secure-1PSIDTS cookie value, some google accounts don't require this value, provide only if it's in the cookie list.
56
+ __Secure-1PSIDTS cookie value, some Google accounts don't require this value, provide only if it's in the cookie list.
56
57
  proxy: `str`, optional
57
58
  Proxy URL.
58
59
  kwargs: `dict`, optional
@@ -71,12 +72,18 @@ class GeminiClient(GemMixin):
71
72
  "_running",
72
73
  "client",
73
74
  "access_token",
75
+ "build_label",
76
+ "session_id",
74
77
  "timeout",
75
78
  "auto_close",
76
79
  "close_delay",
77
80
  "close_task",
78
81
  "auto_refresh",
79
82
  "refresh_interval",
83
+ "refresh_task",
84
+ "verbose",
85
+ "_lock",
86
+ "_reqid",
80
87
  "_gems", # From GemMixin
81
88
  "kwargs",
82
89
  ]
@@ -89,23 +96,31 @@ class GeminiClient(GemMixin):
89
96
  **kwargs,
90
97
  ):
91
98
  super().__init__()
92
- self.cookies = {}
99
+ self.cookies = Cookies()
93
100
  self.proxy = proxy
94
101
  self._running: bool = False
95
102
  self.client: AsyncClient | None = None
96
103
  self.access_token: str | None = None
104
+ self.build_label: str | None = None
105
+ self.session_id: str | None = None
97
106
  self.timeout: float = 300
98
107
  self.auto_close: bool = False
99
108
  self.close_delay: float = 300
100
109
  self.close_task: Task | None = None
101
110
  self.auto_refresh: bool = True
102
111
  self.refresh_interval: float = 540
112
+ self.refresh_task: Task | None = None
113
+ self.verbose: bool = True
114
+ self._lock = asyncio.Lock()
115
+ self._reqid: int = random.randint(10000, 99999)
103
116
  self.kwargs = kwargs
104
117
 
105
118
  if secure_1psid:
106
- self.cookies["__Secure-1PSID"] = secure_1psid
119
+ self.cookies.set("__Secure-1PSID", secure_1psid, domain=".google.com")
107
120
  if secure_1psidts:
108
- self.cookies["__Secure-1PSIDTS"] = secure_1psidts
121
+ self.cookies.set(
122
+ "__Secure-1PSIDTS", secure_1psidts, domain=".google.com"
123
+ )
109
124
 
110
125
  async def init(
111
126
  self,
@@ -129,51 +144,64 @@ class GeminiClient(GemMixin):
129
144
  close_delay: `float`, optional
130
145
  Time to wait before auto-closing the client in seconds. Effective only if `auto_close` is `True`.
131
146
  auto_refresh: `bool`, optional
132
- If `True`, will schedule a task to automatically refresh cookies in the background.
147
+ If `True`, will schedule a task to automatically refresh cookies and access token in the background.
133
148
  refresh_interval: `float`, optional
134
- Time interval for background cookie refresh in seconds. Effective only if `auto_refresh` is `True`.
149
+ Time interval for background cookie and access token refresh in seconds. Effective only if `auto_refresh` is `True`.
135
150
  verbose: `bool`, optional
136
151
  If `True`, will print more infomation in logs.
137
152
  """
138
153
 
139
- try:
140
- access_token, valid_cookies = await get_access_token(
141
- base_cookies=self.cookies, proxy=self.proxy, verbose=verbose
142
- )
154
+ async with self._lock:
155
+ if self._running:
156
+ return
143
157
 
144
- self.client = AsyncClient(
145
- http2=True,
146
- timeout=timeout,
147
- proxy=self.proxy,
148
- follow_redirects=True,
149
- headers=Headers.GEMINI.value,
150
- cookies=valid_cookies,
151
- **self.kwargs,
152
- )
153
- self.access_token = access_token
154
- self.cookies = valid_cookies
155
- self._running = True
156
-
157
- self.timeout = timeout
158
- self.auto_close = auto_close
159
- self.close_delay = close_delay
160
- if self.auto_close:
161
- await self.reset_close_task()
162
-
163
- self.auto_refresh = auto_refresh
164
- self.refresh_interval = refresh_interval
165
- if task := rotate_tasks.get(self.cookies["__Secure-1PSID"]):
166
- task.cancel()
167
- if self.auto_refresh:
168
- rotate_tasks[self.cookies["__Secure-1PSID"]] = asyncio.create_task(
169
- self.start_auto_refresh()
158
+ try:
159
+ self.verbose = verbose
160
+ access_token, build_label, session_id, valid_cookies = (
161
+ await get_access_token(
162
+ base_cookies=self.cookies,
163
+ proxy=self.proxy,
164
+ verbose=self.verbose,
165
+ verify=self.kwargs.get("verify", True),
166
+ )
170
167
  )
171
168
 
172
- if verbose:
173
- logger.success("Gemini client initialized successfully.")
174
- except Exception:
175
- await self.close()
176
- raise
169
+ self.client = AsyncClient(
170
+ http2=True,
171
+ timeout=timeout,
172
+ proxy=self.proxy,
173
+ follow_redirects=True,
174
+ headers=Headers.GEMINI.value,
175
+ cookies=valid_cookies,
176
+ **self.kwargs,
177
+ )
178
+ self.access_token = access_token
179
+ self.cookies = valid_cookies
180
+ self.build_label = build_label
181
+ self.session_id = session_id
182
+ self._running = True
183
+
184
+ self.timeout = timeout
185
+ self.auto_close = auto_close
186
+ self.close_delay = close_delay
187
+ if self.auto_close:
188
+ await self.reset_close_task()
189
+
190
+ self.auto_refresh = auto_refresh
191
+ self.refresh_interval = refresh_interval
192
+
193
+ if self.refresh_task:
194
+ self.refresh_task.cancel()
195
+ self.refresh_task = None
196
+
197
+ if self.auto_refresh:
198
+ self.refresh_task = asyncio.create_task(self.start_auto_refresh())
199
+
200
+ if self.verbose:
201
+ logger.success("Gemini client initialized successfully.")
202
+ except Exception:
203
+ await self.close()
204
+ raise
177
205
 
178
206
  async def close(self, delay: float = 0) -> None:
179
207
  """
@@ -194,6 +222,10 @@ class GeminiClient(GemMixin):
194
222
  self.close_task.cancel()
195
223
  self.close_task = None
196
224
 
225
+ if self.refresh_task:
226
+ self.refresh_task.cancel()
227
+ self.refresh_task = None
228
+
197
229
  if self.client:
198
230
  await self.client.aclose()
199
231
 
@@ -212,34 +244,49 @@ class GeminiClient(GemMixin):
212
244
  """
213
245
  Start the background task to automatically refresh cookies.
214
246
  """
247
+ if self.refresh_interval < 60:
248
+ self.refresh_interval = 60
249
+
250
+ while self._running:
251
+ await asyncio.sleep(self.refresh_interval)
252
+
253
+ if not self._running:
254
+ break
215
255
 
216
- while True:
217
- new_1psidts: str | None = None
218
256
  try:
219
- new_1psidts = await rotate_1psidts(self.cookies, self.proxy)
257
+ async with self._lock:
258
+ # Refresh all cookies in the background to keep the session alive.
259
+ new_1psidts, rotated_cookies = await rotate_1psidts(
260
+ self.cookies, self.proxy
261
+ )
262
+ if rotated_cookies:
263
+ self.cookies.update(rotated_cookies)
264
+ if self.client:
265
+ self.client.cookies.update(rotated_cookies)
266
+
267
+ if new_1psidts:
268
+ if rotated_cookies:
269
+ logger.debug("Cookies refreshed (network update).")
270
+ else:
271
+ logger.debug("Cookies are up to date (cached).")
272
+ else:
273
+ logger.warning(
274
+ "Rotation response did not contain a new __Secure-1PSIDTS. "
275
+ "Session might expire soon if this persists."
276
+ )
277
+ except asyncio.CancelledError:
278
+ raise
220
279
  except AuthError:
221
- if task := rotate_tasks.get(self.cookies.get("__Secure-1PSID", "")):
222
- task.cancel()
223
280
  logger.warning(
224
- "AuthError: Failed to refresh cookies. Auto refresh task canceled."
281
+ "AuthError: Failed to refresh cookies. Retrying in next interval."
225
282
  )
226
- return
227
- except Exception as exc:
228
- logger.warning(f"Unexpected error while refreshing cookies: {exc}")
229
-
230
- if new_1psidts:
231
- self.cookies["__Secure-1PSIDTS"] = new_1psidts
232
- if self._running:
233
- self.client.cookies.set("__Secure-1PSIDTS", new_1psidts)
234
- logger.debug("Cookies refreshed. New __Secure-1PSIDTS applied.")
283
+ except Exception as e:
284
+ logger.warning(f"Unexpected error while refreshing cookies: {e}")
235
285
 
236
- await asyncio.sleep(self.refresh_interval)
237
-
238
- @running(retry=2)
239
286
  async def generate_content(
240
287
  self,
241
288
  prompt: str,
242
- files: list[str | Path] | None = None,
289
+ files: list[str | Path | bytes | io.BytesIO] | None = None,
243
290
  model: Model | str | dict = Model.UNSPECIFIED,
244
291
  gem: Gem | str | None = None,
245
292
  chat: Optional["ChatSession"] = None,
@@ -286,6 +333,182 @@ class GeminiClient(GemMixin):
286
333
  - If response structure is invalid and failed to parse.
287
334
  """
288
335
 
336
+ if self.auto_close:
337
+ await self.reset_close_task()
338
+
339
+ if not (isinstance(chat, ChatSession) and chat.cid):
340
+ self._reqid = random.randint(10000, 99999)
341
+
342
+ file_data = None
343
+ if files:
344
+ await self._batch_execute(
345
+ [
346
+ RPCData(
347
+ rpcid=GRPC.BARD_ACTIVITY,
348
+ payload='[[["bard_activity_enabled"]]]',
349
+ )
350
+ ]
351
+ )
352
+
353
+ uploaded_urls = await asyncio.gather(
354
+ *(upload_file(file, self.proxy) for file in files)
355
+ )
356
+ file_data = [
357
+ [[url], parse_file_name(file)]
358
+ for url, file in zip(uploaded_urls, files)
359
+ ]
360
+
361
+ try:
362
+ await self._batch_execute(
363
+ [
364
+ RPCData(
365
+ rpcid=GRPC.BARD_ACTIVITY,
366
+ payload='[[["bard_activity_enabled"]]]',
367
+ )
368
+ ]
369
+ )
370
+
371
+ output = None
372
+ async for output in self._generate(
373
+ prompt=prompt,
374
+ req_file_data=file_data,
375
+ model=model,
376
+ gem=gem,
377
+ chat=chat,
378
+ **kwargs,
379
+ ):
380
+ pass
381
+
382
+ if output is None:
383
+ raise GeminiError(
384
+ "Failed to generate contents. No output data found in response."
385
+ )
386
+
387
+ if isinstance(chat, ChatSession):
388
+ chat.last_output = output
389
+
390
+ return output
391
+
392
+ finally:
393
+ if files:
394
+ for file in files:
395
+ if isinstance(file, io.BytesIO):
396
+ file.close()
397
+
398
+ async def generate_content_stream(
399
+ self,
400
+ prompt: str,
401
+ files: list[str | Path | bytes | io.BytesIO] | None = None,
402
+ model: Model | str | dict = Model.UNSPECIFIED,
403
+ gem: Gem | str | None = None,
404
+ chat: Optional["ChatSession"] = None,
405
+ **kwargs,
406
+ ) -> AsyncGenerator[ModelOutput, None]:
407
+ """
408
+ Generates contents with prompt in streaming mode.
409
+
410
+ This method sends a request to Gemini and yields partial responses as they arrive.
411
+ It automatically calculates the text delta (new characters) to provide a smooth
412
+ streaming experience. It also continuously updates chat metadata and candidate IDs.
413
+
414
+ Parameters
415
+ ----------
416
+ prompt: `str`
417
+ Prompt provided by user.
418
+ files: `list[str | Path | bytes | io.BytesIO]`, optional
419
+ List of file paths or byte streams to be attached.
420
+ model: `Model | str | dict`, optional
421
+ Specify the model to use for generation.
422
+ gem: `Gem | str`, optional
423
+ Specify a gem to use as system prompt for the chat session.
424
+ chat: `ChatSession`, optional
425
+ Chat data to retrieve conversation history.
426
+ kwargs: `dict`, optional
427
+ Additional arguments passed to `httpx.AsyncClient.stream`.
428
+
429
+ Yields
430
+ ------
431
+ :class:`ModelOutput`
432
+ Partial output data. The `text` attribute contains only the NEW characters
433
+ received since the last yield.
434
+
435
+ Raises
436
+ ------
437
+ `gemini_webapi.APIError`
438
+ If the request fails or response structure is invalid.
439
+ `gemini_webapi.TimeoutError`
440
+ If the stream request times out.
441
+ """
442
+
443
+ if self.auto_close:
444
+ await self.reset_close_task()
445
+
446
+ if not (isinstance(chat, ChatSession) and chat.cid):
447
+ self._reqid = random.randint(10000, 99999)
448
+
449
+ file_data = None
450
+ if files:
451
+ await self._batch_execute(
452
+ [
453
+ RPCData(
454
+ rpcid=GRPC.BARD_ACTIVITY,
455
+ payload='[[["bard_activity_enabled"]]]',
456
+ )
457
+ ]
458
+ )
459
+
460
+ uploaded_urls = await asyncio.gather(
461
+ *(upload_file(file, self.proxy) for file in files)
462
+ )
463
+ file_data = [
464
+ [[url], parse_file_name(file)]
465
+ for url, file in zip(uploaded_urls, files)
466
+ ]
467
+
468
+ try:
469
+ await self._batch_execute(
470
+ [
471
+ RPCData(
472
+ rpcid=GRPC.BARD_ACTIVITY,
473
+ payload='[[["bard_activity_enabled"]]]',
474
+ )
475
+ ]
476
+ )
477
+
478
+ output = None
479
+ async for output in self._generate(
480
+ prompt=prompt,
481
+ req_file_data=file_data,
482
+ model=model,
483
+ gem=gem,
484
+ chat=chat,
485
+ **kwargs,
486
+ ):
487
+ yield output
488
+
489
+ if output and isinstance(chat, ChatSession):
490
+ chat.last_output = output
491
+
492
+ finally:
493
+ if files:
494
+ for file in files:
495
+ if isinstance(file, io.BytesIO):
496
+ file.close()
497
+
498
+ @running(retry=5)
499
+ async def _generate(
500
+ self,
501
+ prompt: str,
502
+ req_file_data: list[Any] | None = None,
503
+ model: Model | str | dict = Model.UNSPECIFIED,
504
+ gem: Gem | str | None = None,
505
+ chat: Optional["ChatSession"] = None,
506
+ **kwargs,
507
+ ) -> AsyncGenerator[ModelOutput, None]:
508
+ """
509
+ Internal method which actually sends content generation requests.
510
+ """
511
+
289
512
  assert prompt, "Prompt cannot be empty."
290
513
 
291
514
  if isinstance(model, str):
@@ -298,253 +521,298 @@ class GeminiClient(GemMixin):
298
521
  f"string, or dictionary; got `{type(model).__name__}`"
299
522
  )
300
523
 
301
- if isinstance(gem, Gem):
302
- gem_id = gem.id
303
- else:
304
- gem_id = gem
524
+ _reqid = self._reqid
525
+ self._reqid += 100000
305
526
 
306
- if self.auto_close:
307
- await self.reset_close_task()
527
+ gem_id = gem.id if isinstance(gem, Gem) else gem
308
528
 
309
529
  try:
310
- response = await self.client.post(
311
- Endpoint.GENERATE.value,
530
+ message_content = [
531
+ prompt,
532
+ 0,
533
+ None,
534
+ req_file_data,
535
+ None,
536
+ None,
537
+ 0,
538
+ ]
539
+
540
+ params: dict[str, Any] = {"_reqid": _reqid, "rt": "c"}
541
+ if self.build_label:
542
+ params["bl"] = self.build_label
543
+ if self.session_id:
544
+ params["f.sid"] = self.session_id
545
+
546
+ inner_req_list: list[Any] = [None] * 69
547
+ inner_req_list[0] = message_content
548
+ inner_req_list[2] = (
549
+ chat.metadata
550
+ if chat
551
+ else ["", "", "", None, None, None, None, None, None, ""]
552
+ )
553
+ inner_req_list[7] = 1 # Enable Snapshot Streaming
554
+ if gem_id:
555
+ inner_req_list[19] = gem_id
556
+
557
+ request_data = {
558
+ "at": self.access_token,
559
+ "f.req": json.dumps(
560
+ [
561
+ None,
562
+ json.dumps(inner_req_list).decode("utf-8"),
563
+ ]
564
+ ).decode("utf-8"),
565
+ }
566
+
567
+ async with self.client.stream(
568
+ "POST",
569
+ Endpoint.GENERATE,
570
+ params=params,
312
571
  headers=model.model_header,
313
- data={
314
- "at": self.access_token,
315
- "f.req": json.dumps(
316
- [
317
- None,
318
- json.dumps(
319
- [
320
- files
321
- and [
322
- prompt,
323
- 0,
324
- None,
325
- [
326
- [
327
- [await upload_file(file, self.proxy)],
328
- parse_file_name(file),
329
- ]
330
- for file in files
331
- ],
332
- ]
333
- or [prompt],
334
- None,
335
- chat and chat.metadata,
336
- ]
337
- + (gem_id and [None] * 16 + [gem_id] or [])
338
- ).decode(),
339
- ]
340
- ).decode(),
341
- },
572
+ data=request_data,
342
573
  **kwargs,
343
- )
344
- except ReadTimeout:
345
- raise TimeoutError(
346
- "Generate content request timed out, please try again. If the problem persists, "
347
- "consider setting a higher `timeout` value when initializing GeminiClient."
348
- )
349
-
350
- if response.status_code != 200:
351
- await self.close()
352
- raise APIError(
353
- f"Failed to generate contents. Request failed with status code {response.status_code}"
354
- )
355
- else:
356
- response_json: list[Any] = []
357
- body: list[Any] = []
358
- body_index = 0
574
+ ) as response:
575
+ if response.status_code != 200:
576
+ await self.close()
577
+ raise APIError(
578
+ f"Failed to generate contents. Status: {response.status_code}"
579
+ )
359
580
 
360
- try:
361
- response_json = extract_json_from_response(response.text)
581
+ if self.client:
582
+ self.cookies.update(self.client.cookies)
362
583
 
363
- for part_index, part in enumerate(response_json):
364
- try:
365
- part_body = get_nested_value(part, [2])
366
- if not part_body:
367
- continue
584
+ buffer = ""
585
+ decoder = codecs.getincrementaldecoder("utf-8")(errors="replace")
368
586
 
369
- part_json = json.loads(part_body)
370
- if get_nested_value(part_json, [4]):
371
- body_index, body = part_index, part_json
372
- break
373
- except json.JSONDecodeError:
374
- continue
587
+ # Track last seen content for each candidate by rcid
588
+ last_texts: dict[str, str] = {}
589
+ last_thoughts: dict[str, str] = {}
375
590
 
376
- if not body:
377
- raise Exception
378
- except Exception:
379
- await self.close()
591
+ is_busy = False
592
+ has_candidates = False
380
593
 
381
- try:
382
- error_code = get_nested_value(response_json, [0, 5, 2, 0, 1, 0], -1)
383
- match error_code:
384
- case ErrorCode.USAGE_LIMIT_EXCEEDED:
385
- raise UsageLimitExceeded(
386
- f"Failed to generate contents. Usage limit of {model.model_name} model has exceeded. Please try switching to another model."
387
- )
388
- case ErrorCode.MODEL_INCONSISTENT:
389
- raise ModelInvalid(
390
- "Failed to generate contents. The specified model is inconsistent with the chat history. Please make sure to pass the same "
391
- "`model` parameter when starting a chat session with previous metadata."
392
- )
393
- case ErrorCode.MODEL_HEADER_INVALID:
394
- raise ModelInvalid(
395
- "Failed to generate contents. The specified model is not available. Please update gemini_webapi to the latest version. "
396
- "If the error persists and is caused by the package, please report it on GitHub."
397
- )
398
- case ErrorCode.IP_TEMPORARILY_BLOCKED:
399
- raise TemporarilyBlocked(
400
- "Failed to generate contents. Your IP address is temporarily blocked by Google. Please try using a proxy or waiting for a while."
401
- )
402
- case _:
403
- raise Exception
404
- except GeminiError:
405
- raise
406
- except Exception:
407
- logger.debug(f"Invalid response: {response.text}")
408
- raise APIError(
409
- "Failed to generate contents. Invalid response data received. Client will try to re-initialize on next request."
410
- )
411
-
412
- try:
413
- candidate_list: list[Any] = get_nested_value(body, [4], [])
414
- output_candidates: list[Candidate] = []
415
-
416
- for candidate_index, candidate in enumerate(candidate_list):
417
- rcid = get_nested_value(candidate, [0])
418
- if not rcid:
419
- continue # Skip candidate if it has no rcid
420
-
421
- # Text output and thoughts
422
- text = get_nested_value(candidate, [1, 0], "")
423
- if re.match(
424
- r"^http://googleusercontent\.com/card_content/\d+", text
425
- ):
426
- text = get_nested_value(candidate, [22, 0]) or text
427
-
428
- thoughts = get_nested_value(candidate, [37, 0, 0])
429
-
430
- # Web images
431
- web_images = []
432
- for web_img_data in get_nested_value(candidate, [12, 1], []):
433
- url = get_nested_value(web_img_data, [0, 0, 0])
434
- if not url:
435
- continue
594
+ async for chunk in response.aiter_bytes():
595
+ buffer += decoder.decode(chunk, final=False)
596
+ if buffer.startswith(")]}'"):
597
+ buffer = buffer[4:].lstrip()
436
598
 
437
- web_images.append(
438
- WebImage(
439
- url=url,
440
- title=get_nested_value(web_img_data, [7, 0], ""),
441
- alt=get_nested_value(web_img_data, [0, 4], ""),
442
- proxy=self.proxy,
443
- )
444
- )
599
+ parsed_parts, buffer = parse_stream_frames(buffer)
445
600
 
446
- # Generated images
447
- generated_images = []
448
- if get_nested_value(candidate, [12, 7, 0]):
449
- img_body = None
450
- for img_part_index, part in enumerate(response_json):
451
- if img_part_index < body_index:
452
- continue
601
+ for part in parsed_parts:
602
+ part_json = None
603
+ # 0. Update chat metadata first whenever available to support follow-up polls
604
+ inner_json_str = get_nested_value(part, [2])
605
+ if inner_json_str:
453
606
  try:
454
- img_part_body = get_nested_value(part, [2])
455
- if not img_part_body:
456
- continue
457
-
458
- img_part_json = json.loads(img_part_body)
459
- if get_nested_value(
460
- img_part_json, [4, candidate_index, 12, 7, 0]
607
+ part_json = json.loads(inner_json_str)
608
+ m_data = get_nested_value(part_json, [1])
609
+ if m_data and isinstance(chat, ChatSession):
610
+ chat.metadata = m_data
611
+
612
+ # Update context string from index 25 if available
613
+ context_str = get_nested_value(part_json, [25])
614
+ if isinstance(context_str, str) and isinstance(
615
+ chat, ChatSession
461
616
  ):
462
- img_body = img_part_json
463
- break
617
+ chat.metadata = [None] * 9 + [context_str]
464
618
  except json.JSONDecodeError:
465
- continue
619
+ pass
620
+
621
+ # 1. Check for fatal error codes in any part
622
+ error_code = get_nested_value(part, [5, 2, 0, 1, 0])
623
+ if error_code:
624
+ await self.close()
625
+ match error_code:
626
+ case ErrorCode.USAGE_LIMIT_EXCEEDED:
627
+ raise UsageLimitExceeded(
628
+ f"Usage limit exceeded for model '{model.model_name}'. Please wait a few minutes, "
629
+ "switch to a different model (e.g., Gemini Flash), or check your account limits on gemini.google.com."
630
+ )
631
+ case ErrorCode.MODEL_INCONSISTENT:
632
+ raise ModelInvalid(
633
+ "The specified model is inconsistent with the conversation history. "
634
+ "Please ensure you are using the same 'model' parameter throughout the entire ChatSession."
635
+ )
636
+ case ErrorCode.MODEL_HEADER_INVALID:
637
+ raise ModelInvalid(
638
+ f"The model '{model.model_name}' is currently unavailable or the request structure is outdated. "
639
+ "Please update 'gemini_webapi' to the latest version or report this on GitHub if the problem persists."
640
+ )
641
+ case ErrorCode.IP_TEMPORARILY_BLOCKED:
642
+ raise TemporarilyBlocked(
643
+ "Your IP address has been temporarily flagged or blocked by Google. "
644
+ "Please try using a proxy, a different network, or wait for a while before retrying."
645
+ )
646
+ case _:
647
+ raise APIError(
648
+ f"Failed to generate contents (stream). Unknown API error code: {error_code}. "
649
+ "This might be a temporary Google service issue."
650
+ )
651
+
652
+ # 2. Detect if model is busy analyzing data (Thinking state)
653
+ if "data_analysis_tool" in str(part):
654
+ is_busy = True
655
+ if not has_candidates:
656
+ logger.debug("Model is busy (thinking/analyzing)...")
657
+
658
+ # 3. Check for queueing status
659
+ status = get_nested_value(part, [5])
660
+ if isinstance(status, list) and status:
661
+ is_busy = True
662
+ if not has_candidates:
663
+ logger.debug(
664
+ "Model is in a waiting state (queueing)..."
665
+ )
466
666
 
467
- if not img_body:
468
- raise ImageGenerationError(
469
- "Failed to parse generated images. Please update gemini_webapi to the latest version. "
470
- "If the error persists and is caused by the package, please report it on GitHub."
471
- )
667
+ if not inner_json_str:
668
+ continue
472
669
 
473
- img_candidate = get_nested_value(
474
- img_body, [4, candidate_index], []
475
- )
670
+ try:
671
+ if part_json is None:
672
+ part_json = json.loads(inner_json_str)
476
673
 
477
- if finished_text := get_nested_value(
478
- img_candidate, [1, 0]
479
- ): # Only overwrite if new text is returned after image generation
480
- text = re.sub(
481
- r"http://googleusercontent\.com/image_generation_content/\d+",
482
- "",
483
- finished_text,
484
- ).rstrip()
485
-
486
- for img_index, gen_img_data in enumerate(
487
- get_nested_value(img_candidate, [12, 7, 0], [])
488
- ):
489
- url = get_nested_value(gen_img_data, [0, 3, 3])
490
- if not url:
674
+ # Extract data from candidates
675
+ candidates_list = get_nested_value(part_json, [4], [])
676
+ if not candidates_list:
491
677
  continue
492
678
 
493
- img_num = get_nested_value(gen_img_data, [3, 6])
494
- title = (
495
- f"[Generated Image {img_num}]"
496
- if img_num
497
- else "[Generated Image]"
498
- )
499
-
500
- alt_list = get_nested_value(gen_img_data, [3, 5], [])
501
- alt = (
502
- get_nested_value(alt_list, [img_index])
503
- or get_nested_value(alt_list, [0])
504
- or ""
505
- )
506
-
507
- generated_images.append(
508
- GeneratedImage(
509
- url=url,
510
- title=title,
511
- alt=alt,
512
- proxy=self.proxy,
513
- cookies=self.cookies,
679
+ output_candidates = []
680
+ any_changed = False
681
+
682
+ for candidate_data in candidates_list:
683
+ rcid = get_nested_value(candidate_data, [0])
684
+ if not rcid:
685
+ continue
686
+
687
+ if isinstance(chat, ChatSession):
688
+ chat.rcid = rcid
689
+
690
+ # Text output and thoughts
691
+ text = get_nested_value(candidate_data, [1, 0], "")
692
+ if re.match(
693
+ r"^http://googleusercontent\.com/card_content/\d+",
694
+ text,
695
+ ):
696
+ text = (
697
+ get_nested_value(candidate_data, [22, 0])
698
+ or text
699
+ )
700
+
701
+ # Cleanup googleusercontent artifacts
702
+ text = re.sub(
703
+ r"http://googleusercontent\.com/\w+/\d+\n*",
704
+ "",
705
+ text,
706
+ ).rstrip()
707
+
708
+ thoughts = (
709
+ get_nested_value(candidate_data, [37, 0, 0]) or ""
514
710
  )
515
- )
516
-
517
- output_candidates.append(
518
- Candidate(
519
- rcid=rcid,
520
- text=text,
521
- thoughts=thoughts,
522
- web_images=web_images,
523
- generated_images=generated_images,
524
- )
525
- )
526
711
 
527
- if not output_candidates:
528
- raise GeminiError(
529
- "Failed to generate contents. No output data found in response."
530
- )
712
+ # Web images
713
+ web_images = []
714
+ for web_img_data in get_nested_value(
715
+ candidate_data, [12, 1], []
716
+ ):
717
+ url = get_nested_value(web_img_data, [0, 0, 0])
718
+ if url:
719
+ web_images.append(
720
+ WebImage(
721
+ url=url,
722
+ title=get_nested_value(
723
+ web_img_data, [7, 0], ""
724
+ ),
725
+ alt=get_nested_value(
726
+ web_img_data, [0, 4], ""
727
+ ),
728
+ proxy=self.proxy,
729
+ )
730
+ )
731
+
732
+ # Generated images
733
+ generated_images = []
734
+ for gen_img_data in get_nested_value(
735
+ candidate_data, [12, 7, 0], []
736
+ ):
737
+ url = get_nested_value(gen_img_data, [0, 3, 3])
738
+ if url:
739
+ img_num = get_nested_value(gen_img_data, [3, 6])
740
+ alt_list = get_nested_value(
741
+ gen_img_data, [3, 5], []
742
+ )
743
+ generated_images.append(
744
+ GeneratedImage(
745
+ url=url,
746
+ title=(
747
+ f"[Generated Image {img_num}]"
748
+ if img_num
749
+ else "[Generated Image]"
750
+ ),
751
+ alt=get_nested_value(alt_list, [0], ""),
752
+ proxy=self.proxy,
753
+ cookies=self.cookies,
754
+ )
755
+ )
756
+
757
+ # Calculate Deltas for this specific candidate
758
+ last_text = last_texts.get(rcid, "")
759
+ last_thought = last_thoughts.get(rcid, "")
760
+
761
+ text_delta = text
762
+ if text.startswith(last_text):
763
+ text_delta = text[len(last_text) :]
764
+
765
+ thoughts_delta = thoughts
766
+ if thoughts.startswith(last_thought):
767
+ thoughts_delta = thoughts[len(last_thought) :]
768
+
769
+ if (
770
+ text_delta
771
+ or thoughts_delta
772
+ or web_images
773
+ or generated_images
774
+ ):
775
+ any_changed = True
776
+
777
+ last_texts[rcid] = text
778
+ last_thoughts[rcid] = thoughts
779
+
780
+ output_candidates.append(
781
+ Candidate(
782
+ rcid=rcid,
783
+ text=text,
784
+ text_delta=text_delta,
785
+ thoughts=thoughts or None,
786
+ thoughts_delta=thoughts_delta,
787
+ web_images=web_images,
788
+ generated_images=generated_images,
789
+ )
790
+ )
531
791
 
532
- output = ModelOutput(
533
- metadata=get_nested_value(body, [1], []),
534
- candidates=output_candidates,
535
- )
536
- except (TypeError, IndexError) as e:
537
- logger.debug(
538
- f"{type(e).__name__}: {e}; Invalid response structure: {response.text}"
539
- )
540
- raise APIError(
541
- "Failed to parse response body. Data structure is invalid."
542
- )
792
+ if any_changed:
793
+ has_candidates = True
794
+ yield ModelOutput(
795
+ metadata=get_nested_value(part_json, [1], []),
796
+ candidates=output_candidates,
797
+ )
798
+ except json.JSONDecodeError:
799
+ continue
543
800
 
544
- if isinstance(chat, ChatSession):
545
- chat.last_output = output
801
+ if is_busy and not has_candidates:
802
+ raise APIError("Model is busy. Polling again...")
546
803
 
547
- return output
804
+ except ReadTimeout:
805
+ raise TimeoutError(
806
+ "The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
807
+ "or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
808
+ )
809
+ except (GeminiError, APIError):
810
+ raise
811
+ except Exception as e:
812
+ logger.debug(
813
+ f"{type(e).__name__}: {e}; Unexpected response or parsing error. Response: {locals().get('response', 'N/A')}"
814
+ )
815
+ raise APIError(f"Failed to parse response body: {e}")
548
816
 
549
817
  def start_chat(self, **kwargs) -> "ChatSession":
550
818
  """
@@ -564,14 +832,15 @@ class GeminiClient(GemMixin):
564
832
 
565
833
  return ChatSession(geminiclient=self, **kwargs)
566
834
 
835
+ @running(retry=2)
567
836
  async def _batch_execute(self, payloads: list[RPCData], **kwargs) -> Response:
568
837
  """
569
838
  Execute a batch of requests to Gemini API.
570
839
 
571
840
  Parameters
572
841
  ----------
573
- payloads: `list[GRPC]`
574
- List of `gemini_webapi.types.GRPC` objects to be executed.
842
+ payloads: `list[RPCData]`
843
+ List of `gemini_webapi.types.RPCData` objects to be executed.
575
844
  kwargs: `dict`, optional
576
845
  Additional arguments which will be passed to the post request.
577
846
  Refer to `httpx.AsyncClient.request` for more information.
@@ -582,31 +851,47 @@ class GeminiClient(GemMixin):
582
851
  Response object containing the result of the batch execution.
583
852
  """
584
853
 
854
+ _reqid = self._reqid
855
+ self._reqid += 100000
856
+
585
857
  try:
858
+ params: dict[str, Any] = {
859
+ "rpcids": ",".join([p.rpcid.value for p in payloads]),
860
+ "_reqid": _reqid,
861
+ "rt": "c",
862
+ "source-path": "/app",
863
+ }
864
+ if self.build_label:
865
+ params["bl"] = self.build_label
866
+ if self.session_id:
867
+ params["f.sid"] = self.session_id
868
+
586
869
  response = await self.client.post(
587
870
  Endpoint.BATCH_EXEC,
871
+ params=params,
588
872
  data={
589
873
  "at": self.access_token,
590
874
  "f.req": json.dumps(
591
875
  [[payload.serialize() for payload in payloads]]
592
- ).decode(),
876
+ ).decode("utf-8"),
593
877
  },
594
878
  **kwargs,
595
879
  )
596
880
  except ReadTimeout:
597
881
  raise TimeoutError(
598
- "Batch execute request timed out, please try again. If the problem persists, "
599
- "consider setting a higher `timeout` value when initializing GeminiClient."
882
+ "The request timed out while waiting for Gemini to respond. This often happens with very long prompts "
883
+ "or complex file analysis. Try increasing the 'timeout' value when initializing GeminiClient."
600
884
  )
601
885
 
602
- # ? Seems like batch execution will immediately invalidate the current access token,
603
- # ? causing the next request to fail with 401 Unauthorized.
604
886
  if response.status_code != 200:
605
887
  await self.close()
606
888
  raise APIError(
607
889
  f"Batch execution failed with status code {response.status_code}"
608
890
  )
609
891
 
892
+ if self.client:
893
+ self.cookies.update(self.client.cookies)
894
+
610
895
  return response
611
896
 
612
897
 
@@ -653,7 +938,18 @@ class ChatSession:
653
938
  model: Model | str | dict = Model.UNSPECIFIED,
654
939
  gem: Gem | str | None = None,
655
940
  ):
656
- self.__metadata: list[str | None] = [None, None, None]
941
+ self.__metadata: list[str | None] = [
942
+ "",
943
+ "",
944
+ "",
945
+ None,
946
+ None,
947
+ None,
948
+ None,
949
+ None,
950
+ None,
951
+ "",
952
+ ]
657
953
  self.geminiclient: GeminiClient = geminiclient
658
954
  self.last_output: ModelOutput | None = None
659
955
  self.model: Model | str | dict = model
@@ -728,6 +1024,43 @@ class ChatSession:
728
1024
  **kwargs,
729
1025
  )
730
1026
 
1027
+ async def send_message_stream(
1028
+ self,
1029
+ prompt: str,
1030
+ files: list[str | Path] | None = None,
1031
+ **kwargs,
1032
+ ) -> AsyncGenerator[ModelOutput, None]:
1033
+ """
1034
+ Generates contents with prompt in streaming mode within this chat session.
1035
+
1036
+ This is a shortcut for `GeminiClient.generate_content_stream(prompt, files, self)`.
1037
+ The session's metadata and conversation history are automatically managed.
1038
+
1039
+ Parameters
1040
+ ----------
1041
+ prompt: `str`
1042
+ Prompt provided by user.
1043
+ files: `list[str | Path]`, optional
1044
+ List of file paths to be attached.
1045
+ kwargs: `dict`, optional
1046
+ Additional arguments passed to the streaming request.
1047
+
1048
+ Yields
1049
+ ------
1050
+ :class:`ModelOutput`
1051
+ Partial output data containing text deltas.
1052
+ """
1053
+
1054
+ async for output in self.geminiclient.generate_content_stream(
1055
+ prompt=prompt,
1056
+ files=files,
1057
+ model=self.model,
1058
+ gem=self.gem,
1059
+ chat=self,
1060
+ **kwargs,
1061
+ ):
1062
+ yield output
1063
+
731
1064
  def choose_candidate(self, index: int) -> ModelOutput:
732
1065
  """
733
1066
  Choose a candidate from the last `ModelOutput` to control the ongoing conversation flow.
@@ -766,9 +1099,13 @@ class ChatSession:
766
1099
 
767
1100
  @metadata.setter
768
1101
  def metadata(self, value: list[str]):
769
- if len(value) > 3:
770
- raise ValueError("metadata cannot exceed 3 elements")
771
- self.__metadata[: len(value)] = value
1102
+ if not isinstance(value, list):
1103
+ return
1104
+
1105
+ # Update only non-None elements to preserve existing CID/RID/RCID/Context
1106
+ for i, val in enumerate(value):
1107
+ if i < 10 and val is not None:
1108
+ self.__metadata[i] = val
772
1109
 
773
1110
  @property
774
1111
  def cid(self):
@@ -778,14 +1115,6 @@ class ChatSession:
778
1115
  def cid(self, value: str):
779
1116
  self.__metadata[0] = value
780
1117
 
781
- @property
782
- def rid(self):
783
- return self.__metadata[1]
784
-
785
- @rid.setter
786
- def rid(self, value: str):
787
- self.__metadata[1] = value
788
-
789
1118
  @property
790
1119
  def rcid(self):
791
1120
  return self.__metadata[2]
@@ -793,3 +1122,11 @@ class ChatSession:
793
1122
  @rcid.setter
794
1123
  def rcid(self, value: str):
795
1124
  self.__metadata[2] = value
1125
+
1126
+ @property
1127
+ def rid(self):
1128
+ return self.__metadata[1]
1129
+
1130
+ @rid.setter
1131
+ def rid(self, value: str):
1132
+ self.__metadata[1] = value