gemini-webapi 1.13.0__py3-none-any.whl → 1.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
gemini_webapi/__init__.py CHANGED
@@ -1,4 +1,6 @@
1
- from .client import GeminiClient, ChatSession # noqa: F401
2
- from .exceptions import * # noqa: F401, F403
3
- from .types import * # noqa: F401, F403
4
- from .utils import set_log_level, logger # noqa: F401
1
+ # flake8: noqa
2
+
3
+ from .client import GeminiClient, ChatSession
4
+ from .exceptions import *
5
+ from .types import *
6
+ from .utils import set_log_level, logger
gemini_webapi/client.py CHANGED
@@ -1,11 +1,12 @@
1
1
  import asyncio
2
2
  import functools
3
- import json
3
+ import itertools
4
4
  import re
5
5
  from asyncio import Task
6
6
  from pathlib import Path
7
7
  from typing import Any, Optional
8
8
 
9
+ import orjson as json
9
10
  from httpx import AsyncClient, ReadTimeout
10
11
 
11
12
  from .constants import Endpoint, ErrorCode, Headers, Model
@@ -19,7 +20,7 @@ from .exceptions import (
19
20
  ModelInvalid,
20
21
  TemporarilyBlocked,
21
22
  )
22
- from .types import WebImage, GeneratedImage, Candidate, ModelOutput
23
+ from .types import WebImage, GeneratedImage, Candidate, ModelOutput, Gem, GemJar
23
24
  from .utils import (
24
25
  upload_file,
25
26
  parse_file_name,
@@ -116,6 +117,7 @@ class GeminiClient:
116
117
  "close_task",
117
118
  "auto_refresh",
118
119
  "refresh_interval",
120
+ "_gems",
119
121
  "kwargs",
120
122
  ]
121
123
 
@@ -131,12 +133,13 @@ class GeminiClient:
131
133
  self.running: bool = False
132
134
  self.client: AsyncClient | None = None
133
135
  self.access_token: str | None = None
134
- self.timeout: float = 30
136
+ self.timeout: float = 300
135
137
  self.auto_close: bool = False
136
138
  self.close_delay: float = 300
137
139
  self.close_task: Task | None = None
138
140
  self.auto_refresh: bool = True
139
141
  self.refresh_interval: float = 540
142
+ self._gems: GemJar | None = None
140
143
  self.kwargs = kwargs
141
144
 
142
145
  # Validate cookies
@@ -274,12 +277,131 @@ class GeminiClient:
274
277
  self.cookies["__Secure-1PSIDTS"] = new_1psidts
275
278
  await asyncio.sleep(self.refresh_interval)
276
279
 
280
+ @property
281
+ def gems(self) -> GemJar:
282
+ """
283
+ Returns a `GemJar` object containing cached gems.
284
+ Only available after calling `GeminiClient.fetch_gems()`.
285
+
286
+ Returns
287
+ -------
288
+ :class:`GemJar`
289
+ Refer to `gemini_webapi.types.GemJar`.
290
+
291
+ Raises
292
+ ------
293
+ `RuntimeError`
294
+ If `GeminiClient.fetch_gems()` has not been called before accessing this property.
295
+ """
296
+
297
+ if self._gems is None:
298
+ raise RuntimeError(
299
+ "Gems not fetched yet. Call `GeminiClient.fetch_gems()` method to fetch gems from gemini.google.com."
300
+ )
301
+
302
+ return self._gems
303
+
304
+ @running(retry=2)
305
+ async def fetch_gems(self, **kwargs) -> GemJar:
306
+ """
307
+ Get a list of available gems from gemini, including system predefined gems and user-created custom gems.
308
+
309
+ Note that network request will be sent every time this method is called.
310
+ Once the gems are fetched, they will be cached and accessible via `GeminiClient.gems` property.
311
+
312
+ Returns
313
+ -------
314
+ :class:`GemJar`
315
+ Refer to `gemini_webapi.types.GemJar`.
316
+ """
317
+
318
+ try:
319
+ response = await self.client.post(
320
+ Endpoint.BATCH_EXEC,
321
+ data={
322
+ "at": self.access_token,
323
+ "f.req": json.dumps(
324
+ [
325
+ [
326
+ ["CNgdBe", '[2,["en"],0]', None, "custom"],
327
+ ["CNgdBe", '[3,["en"],0]', None, "system"],
328
+ ]
329
+ ]
330
+ ).decode(),
331
+ },
332
+ **kwargs,
333
+ )
334
+ except ReadTimeout:
335
+ raise TimeoutError(
336
+ "Fetch gems request timed out, please try again. If the problem persists, "
337
+ "consider setting a higher `timeout` value when initializing GeminiClient."
338
+ )
339
+
340
+ if response.status_code != 200:
341
+ raise APIError(
342
+ f"Failed to fetch gems. Request failed with status code {response.status_code}"
343
+ )
344
+ else:
345
+ try:
346
+ response_json = json.loads(response.text.split("\n")[2])
347
+
348
+ predefined_gems, custom_gems = [], []
349
+
350
+ for part in response_json:
351
+ if part[-1] == "system":
352
+ predefined_gems = json.loads(part[2])[2]
353
+ elif part[-1] == "custom":
354
+ if custom_gems_container := json.loads(part[2]):
355
+ custom_gems = custom_gems_container[2]
356
+
357
+ if not predefined_gems and not custom_gems:
358
+ raise Exception
359
+ except Exception:
360
+ logger.debug(f"Invalid response: {response.text}")
361
+ raise APIError(
362
+ "Failed to fetch gems. Invalid response data received. Client will try to re-initialize on next request."
363
+ )
364
+
365
+ self._gems = GemJar(
366
+ itertools.chain(
367
+ (
368
+ (
369
+ gem[0],
370
+ Gem(
371
+ id=gem[0],
372
+ name=gem[1][0],
373
+ description=gem[1][1],
374
+ prompt=gem[2] and gem[2][0] or None,
375
+ predefined=True,
376
+ ),
377
+ )
378
+ for gem in predefined_gems
379
+ ),
380
+ (
381
+ (
382
+ gem[0],
383
+ Gem(
384
+ id=gem[0],
385
+ name=gem[1][0],
386
+ description=gem[1][1],
387
+ prompt=gem[2] and gem[2][0] or None,
388
+ predefined=False,
389
+ ),
390
+ )
391
+ for gem in custom_gems
392
+ ),
393
+ )
394
+ )
395
+
396
+ return self._gems
397
+
277
398
  @running(retry=2)
278
399
  async def generate_content(
279
400
  self,
280
401
  prompt: str,
281
402
  files: list[str | Path] | None = None,
282
403
  model: Model | str = Model.UNSPECIFIED,
404
+ gem: Gem | str | None = None,
283
405
  chat: Optional["ChatSession"] = None,
284
406
  **kwargs,
285
407
  ) -> ModelOutput:
@@ -295,6 +417,9 @@ class GeminiClient:
295
417
  model: `Model` | `str`, optional
296
418
  Specify the model to use for generation.
297
419
  Pass either a `gemini_webapi.constants.Model` enum or a model name string.
420
+ gem: `Gem | str`, optional
421
+ Specify a gem to use as system prompt for the chat session.
422
+ Pass either a `gemini_webapi.types.Gem` object or a gem id string.
298
423
  chat: `ChatSession`, optional
299
424
  Chat data to retrieve conversation history. If None, will automatically generate a new chat id when sending post request.
300
425
  kwargs: `dict`, optional
@@ -325,6 +450,9 @@ class GeminiClient:
325
450
  if not isinstance(model, Model):
326
451
  model = Model.from_name(model)
327
452
 
453
+ if isinstance(gem, Gem):
454
+ gem = gem.id
455
+
328
456
  if self.auto_close:
329
457
  await self.reset_close_task()
330
458
 
@@ -356,15 +484,17 @@ class GeminiClient:
356
484
  None,
357
485
  chat and chat.metadata,
358
486
  ]
359
- ),
487
+ + (gem and [None] * 16 + [gem] or [])
488
+ ).decode(),
360
489
  ]
361
- ),
490
+ ).decode(),
362
491
  },
363
492
  **kwargs,
364
493
  )
365
494
  except ReadTimeout:
366
495
  raise TimeoutError(
367
- "Request timed out, please try again. If the problem persists, consider setting a higher `timeout` value when initializing GeminiClient."
496
+ "Generate content request timed out, please try again. If the problem persists, "
497
+ "consider setting a higher `timeout` value when initializing GeminiClient."
368
498
  )
369
499
 
370
500
  if response.status_code != 200:
@@ -554,6 +684,9 @@ class ChatSession:
554
684
  model: `Model` | `str`, optional
555
685
  Specify the model to use for generation.
556
686
  Pass either a `gemini_webapi.constants.Model` enum or a model name string.
687
+ gem: `Gem | str`, optional
688
+ Specify a gem to use as system prompt for the chat session.
689
+ Pass either a `gemini_webapi.types.Gem` object or a gem id string.
557
690
  """
558
691
 
559
692
  __slots__ = [
@@ -561,6 +694,7 @@ class ChatSession:
561
694
  "geminiclient",
562
695
  "last_output",
563
696
  "model",
697
+ "gem",
564
698
  ]
565
699
 
566
700
  def __init__(
@@ -571,11 +705,13 @@ class ChatSession:
571
705
  rid: str | None = None, # reply id
572
706
  rcid: str | None = None, # reply candidate id
573
707
  model: Model | str = Model.UNSPECIFIED,
708
+ gem: Gem | str | None = None,
574
709
  ):
575
710
  self.__metadata: list[str | None] = [None, None, None]
576
711
  self.geminiclient: GeminiClient = geminiclient
577
712
  self.last_output: ModelOutput | None = None
578
- self.model = model
713
+ self.model: Model | str = model
714
+ self.gem: Gem | str | None = gem
579
715
 
580
716
  if metadata:
581
717
  self.metadata = metadata
@@ -638,7 +774,12 @@ class ChatSession:
638
774
  """
639
775
 
640
776
  return await self.geminiclient.generate_content(
641
- prompt=prompt, files=files, model=self.model, chat=self, **kwargs
777
+ prompt=prompt,
778
+ files=files,
779
+ model=self.model,
780
+ gem=self.gem,
781
+ chat=self,
782
+ **kwargs,
642
783
  )
643
784
 
644
785
  def choose_candidate(self, index: int) -> ModelOutput:
@@ -1,11 +1,13 @@
1
- from enum import Enum
1
+ from enum import Enum, IntEnum, StrEnum
2
2
 
3
3
 
4
- class Endpoint(Enum):
4
+ class Endpoint(StrEnum):
5
+ GOOGLE = "https://www.google.com"
5
6
  INIT = "https://gemini.google.com/app"
6
7
  GENERATE = "https://gemini.google.com/_/BardChatUi/data/assistant.lamda.BardFrontendService/StreamGenerate"
7
8
  ROTATE_COOKIES = "https://accounts.google.com/RotateCookies"
8
9
  UPLOAD = "https://content-push.googleapis.com/upload"
10
+ BATCH_EXEC = "https://gemini.google.com/_/BardChatUi/data/batchexecute"
9
11
 
10
12
 
11
13
  class Headers(Enum):
@@ -45,16 +47,6 @@ class Model(Enum):
45
47
  {"x-goog-ext-525001261-jspb": '[null,null,null,null,"7ca48d02d802f20a"]'},
46
48
  False,
47
49
  ) # Deprecated
48
- G_2_0_EXP_ADVANCED = (
49
- "gemini-2.0-exp-advanced",
50
- {"x-goog-ext-525001261-jspb": '[null,null,null,null,"b1e46a6037e6aa9f"]'},
51
- True,
52
- ) # Deprecated
53
- G_2_5_EXP_ADVANCED = (
54
- "gemini-2.5-exp-advanced",
55
- {"x-goog-ext-525001261-jspb": '[null,null,null,null,"203e6bb81620bcfe"]'},
56
- True,
57
- ) # Deprecated
58
50
 
59
51
  def __init__(self, name, header, advanced_only):
60
52
  self.model_name = name
@@ -71,7 +63,7 @@ class Model(Enum):
71
63
  )
72
64
 
73
65
 
74
- class ErrorCode(Enum):
66
+ class ErrorCode(IntEnum):
75
67
  """
76
68
  Known error codes returned from server.
77
69
  """
@@ -1,3 +1,6 @@
1
- from .image import Image, WebImage, GeneratedImage # noqa: F401
2
- from .candidate import Candidate # noqa: F401
3
- from .modeloutput import ModelOutput # noqa: F401
1
+ # flake8: noqa
2
+
3
+ from .candidate import Candidate
4
+ from .gem import Gem, GemJar
5
+ from .image import Image, WebImage, GeneratedImage
6
+ from .modeloutput import ModelOutput
@@ -0,0 +1,132 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class Gem(BaseModel):
5
+ """
6
+ Reusable Gemini Gem object working as a system prompt, providing additional context to the model.
7
+ Gemini provides a set of predefined gems, and users can create custom gems as well.
8
+
9
+ Parameters
10
+ ----------
11
+ id: `str`
12
+ Unique identifier for the gem.
13
+ name: `str`
14
+ User-friendly name of the gem.
15
+ description: `str`, optional
16
+ Brief description of the gem's purpose or content.
17
+ prompt: `str`, optional
18
+ The system prompt text that the gem provides to the model.
19
+ predefined: `bool`
20
+ Indicates whether the gem is predefined by Gemini or created by the user.
21
+ """
22
+
23
+ id: str
24
+ name: str
25
+ description: str | None = None
26
+ prompt: str | None = None
27
+ predefined: bool
28
+
29
+ def __str__(self) -> str:
30
+ return (
31
+ f"Gem(id='{self.id}', name='{self.name}', description='{self.description}', "
32
+ f"prompt='{self.prompt}', predefined={self.predefined})"
33
+ )
34
+
35
+
36
+ class GemJar(dict[str, Gem]):
37
+ """
38
+ Helper class for handling a collection of `Gem` objects, stored by their ID.
39
+ This class extends `dict` to allows retrieving gems with extra filtering options.
40
+ """
41
+
42
+ def __iter__(self):
43
+ """
44
+ Iter over the gems in the jar.
45
+ """
46
+
47
+ return self.values().__iter__()
48
+
49
+ def get(
50
+ self, id: str | None = None, name: str | None = None, default: Gem | None = None
51
+ ) -> Gem | None:
52
+ """
53
+ Retrieves a gem by its id and/or name.
54
+ If both id and name are provided, returns the gem that matches both id and name.
55
+ If only id is provided, it's a direct lookup.
56
+ If only name is provided, it searches through the gems.
57
+
58
+ Parameters
59
+ ----------
60
+ id: `str`, optional
61
+ The unique identifier of the gem to retrieve.
62
+ name: `str`, optional
63
+ The user-friendly name of the gem to retrieve.
64
+ default: `Gem`, optional
65
+ The default value to return if no matching gem is found.
66
+
67
+ Returns
68
+ -------
69
+ `Gem` | None
70
+ The matching gem if found, otherwise return the default value.
71
+
72
+ Raises
73
+ ------
74
+ `AssertionError`
75
+ If neither id nor name is provided.
76
+ """
77
+
78
+ assert not (
79
+ id is None and name is None
80
+ ), "At least one of gem id or name must be provided."
81
+
82
+ if id is not None:
83
+ gem_candidate = super().get(id)
84
+ if gem_candidate:
85
+ if name is not None:
86
+ if gem_candidate.name == name:
87
+ return gem_candidate
88
+ else:
89
+ return default
90
+ else:
91
+ return gem_candidate
92
+ else:
93
+ return default
94
+ elif name is not None:
95
+ for gem_obj in self.values():
96
+ if gem_obj.name == name:
97
+ return gem_obj
98
+ return default
99
+
100
+ # Should be unreachable due to the assertion.
101
+ return default
102
+
103
+ def filter(
104
+ self, predefined: bool | None = None, name: str | None = None
105
+ ) -> "GemJar":
106
+ """
107
+ Returns a new `GemJar` containing gems that match the given filters.
108
+
109
+ Parameters
110
+ ----------
111
+ predefined: `bool`, optional
112
+ If provided, filters gems by whether they are predefined (True) or user-created (False).
113
+ name: `str`, optional
114
+ If provided, filters gems by their name (exact match).
115
+
116
+ Returns
117
+ -------
118
+ `GemJar`
119
+ A new `GemJar` containing the filtered gems. Can be empty if no gems match the criteria.
120
+ """
121
+
122
+ filtered_gems = GemJar()
123
+
124
+ for gem_id, gem in self.items():
125
+ if predefined is not None and gem.predefined != predefined:
126
+ continue
127
+ if name is not None and gem.name != name:
128
+ continue
129
+
130
+ filtered_gems[gem_id] = gem
131
+
132
+ return GemJar(filtered_gems)
@@ -33,7 +33,7 @@ class Image(BaseModel):
33
33
  return f"{self.title}({self.url}) - {self.alt}"
34
34
 
35
35
  def __repr__(self):
36
- return f"""Image(title='{self.title}', url='{len(self.url) <= 20 and self.url or self.url[:8] + '...' + self.url[-12:]}', alt='{self.alt}')"""
36
+ return f"Image(title='{self.title}', url='{len(self.url) <= 20 and self.url or self.url[:8] + '...' + self.url[-12:]}', alt='{self.alt}')"
37
37
 
38
38
  async def save(
39
39
  self,
@@ -1,10 +1,12 @@
1
+ # flake8: noqa
2
+
1
3
  from asyncio import Task
2
4
 
3
- from .upload_file import upload_file, parse_file_name # noqa: F401
4
- from .rotate_1psidts import rotate_1psidts # noqa: F401
5
- from .get_access_token import get_access_token # noqa: F401
6
- from .load_browser_cookies import load_browser_cookies # noqa: F401
7
- from .logger import logger, set_log_level # noqa: F401
5
+ from .upload_file import upload_file, parse_file_name
6
+ from .rotate_1psidts import rotate_1psidts
7
+ from .get_access_token import get_access_token
8
+ from .load_browser_cookies import load_browser_cookies
9
+ from .logger import logger, set_log_level
8
10
 
9
11
 
10
12
  rotate_tasks: dict[str, Task] = {}
@@ -11,6 +11,26 @@ from .load_browser_cookies import load_browser_cookies
11
11
  from .logger import logger
12
12
 
13
13
 
14
+ async def send_request(
15
+ cookies: dict, proxy: str | None = None
16
+ ) -> tuple[Response | None, dict]:
17
+ """
18
+ Send http request with provided cookies.
19
+ """
20
+
21
+ async with AsyncClient(
22
+ http2=True,
23
+ proxy=proxy,
24
+ headers=Headers.GEMINI.value,
25
+ cookies=cookies,
26
+ follow_redirects=True,
27
+ verify=False,
28
+ ) as client:
29
+ response = await client.get(Endpoint.INIT.value)
30
+ response.raise_for_status()
31
+ return response, cookies
32
+
33
+
14
34
  async def get_access_token(
15
35
  base_cookies: dict, proxy: str | None = None, verbose: bool = False
16
36
  ) -> tuple[str, dict]:
@@ -45,23 +65,23 @@ async def get_access_token(
45
65
  If all requests failed.
46
66
  """
47
67
 
48
- async def send_request(cookies: dict) -> tuple[Response | None, dict]:
49
- async with AsyncClient(
50
- http2=True,
51
- proxy=proxy,
52
- headers=Headers.GEMINI.value,
53
- cookies=cookies,
54
- follow_redirects=True,
55
- ) as client:
56
- response = await client.get(Endpoint.INIT.value)
57
- response.raise_for_status()
58
- return response, cookies
68
+ async with AsyncClient(
69
+ http2=True,
70
+ proxy=proxy,
71
+ follow_redirects=True,
72
+ verify=False,
73
+ ) as client:
74
+ response = await client.get(Endpoint.GOOGLE.value)
75
+
76
+ extra_cookies = {}
77
+ if response.status_code == 200:
78
+ extra_cookies = response.cookies
59
79
 
60
80
  tasks = []
61
81
 
62
82
  # Base cookies passed directly on initializing client
63
83
  if "__Secure-1PSID" in base_cookies and "__Secure-1PSIDTS" in base_cookies:
64
- tasks.append(Task(send_request(base_cookies)))
84
+ tasks.append(Task(send_request({**extra_cookies, **base_cookies}, proxy=proxy)))
65
85
  elif verbose:
66
86
  logger.debug(
67
87
  "Skipping loading base cookies. Either __Secure-1PSID or __Secure-1PSIDTS is not provided."
@@ -75,8 +95,12 @@ async def get_access_token(
75
95
  if cache_file.is_file():
76
96
  cached_1psidts = cache_file.read_text()
77
97
  if cached_1psidts:
78
- cached_cookies = {**base_cookies, "__Secure-1PSIDTS": cached_1psidts}
79
- tasks.append(Task(send_request(cached_cookies)))
98
+ cached_cookies = {
99
+ **extra_cookies,
100
+ **base_cookies,
101
+ "__Secure-1PSIDTS": cached_1psidts,
102
+ }
103
+ tasks.append(Task(send_request(cached_cookies, proxy=proxy)))
80
104
  elif verbose:
81
105
  logger.debug("Skipping loading cached cookies. Cache file is empty.")
82
106
  elif verbose:
@@ -88,10 +112,11 @@ async def get_access_token(
88
112
  cached_1psidts = cache_file.read_text()
89
113
  if cached_1psidts:
90
114
  cached_cookies = {
115
+ **extra_cookies,
91
116
  "__Secure-1PSID": cache_file.stem[16:],
92
117
  "__Secure-1PSIDTS": cached_1psidts,
93
118
  }
94
- tasks.append(Task(send_request(cached_cookies)))
119
+ tasks.append(Task(send_request(cached_cookies, proxy=proxy)))
95
120
  valid_caches += 1
96
121
 
97
122
  if valid_caches == 0 and verbose:
@@ -108,7 +133,9 @@ async def get_access_token(
108
133
  local_cookies = {"__Secure-1PSID": secure_1psid}
109
134
  if secure_1psidts := browser_cookies.get("__Secure-1PSIDTS"):
110
135
  local_cookies["__Secure-1PSIDTS"] = secure_1psidts
111
- tasks.append(Task(send_request(local_cookies)))
136
+ if nid := browser_cookies.get("NID"):
137
+ local_cookies["NID"] = nid
138
+ tasks.append(Task(send_request(local_cookies, proxy=proxy)))
112
139
  elif verbose:
113
140
  logger.debug(
114
141
  "Skipping loading local browser cookies. Login to gemini.google.com in your browser first."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gemini-webapi
3
- Version: 1.13.0
3
+ Version: 1.14.1
4
4
  Summary: ✨ An elegant async Python wrapper for Google Gemini web app
5
5
  Author: UZQueen
6
6
  License: GNU AFFERO GENERAL PUBLIC LICENSE
@@ -678,7 +678,8 @@ Description-Content-Type: text/markdown
678
678
  License-File: LICENSE
679
679
  Requires-Dist: httpx[http2]~=0.28.1
680
680
  Requires-Dist: loguru~=0.7.3
681
- Requires-Dist: pydantic~=2.11.3
681
+ Requires-Dist: orjson~=3.10.18
682
+ Requires-Dist: pydantic~=2.11.5
682
683
  Dynamic: license-file
683
684
 
684
685
  <p align="center">
@@ -713,8 +714,9 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
713
714
 
714
715
  - **Persistent Cookies** - Automatically refreshes cookies in background. Optimized for always-on services.
715
716
  - **Image Generation** - Natively supports generating and modifying images with natural language.
717
+ - **System Prompt** - Supports customizing model's system prompt with [Gemini Gems](https://gemini.google.com/gems/view).
716
718
  - **Extension Support** - Supports generating contents with [Gemini extensions](https://gemini.google.com/extensions) on, like YouTube and Gmail.
717
- - **Classified Outputs** - Automatically categorizes texts, web images and AI generated images in the response.
719
+ - **Classified Outputs** - Categorizes texts, thoughts, web images and AI generated images in the response.
718
720
  - **Official Flavor** - Provides a simple and elegant interface inspired by [Google Generative AI](https://ai.google.dev/tutorials/python_quickstart)'s official API.
719
721
  - **Asynchronous** - Utilizes `asyncio` to run generating tasks and return outputs efficiently.
720
722
 
@@ -726,14 +728,15 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
726
728
  - [Authentication](#authentication)
727
729
  - [Usage](#usage)
728
730
  - [Initialization](#initialization)
729
- - [Select language model](#select-language-model)
730
- - [Generate contents from text](#generate-contents-from-text)
731
+ - [Generate contents](#generate-contents)
731
732
  - [Generate contents with files](#generate-contents-with-files)
732
733
  - [Conversations across multiple turns](#conversations-across-multiple-turns)
733
734
  - [Continue previous conversations](#continue-previous-conversations)
735
+ - [Select language model](#select-language-model)
736
+ - [Apply system prompt with Gemini Gems](#apply-system-prompt-with-gemini-gems)
734
737
  - [Retrieve model's thought process](#retrieve-models-thought-process)
735
738
  - [Retrieve images in response](#retrieve-images-in-response)
736
- - [Generate images with Imagen3](#generate-images-with-imagen3)
739
+ - [Generate images with Imagen4](#generate-images-with-imagen4)
737
740
  - [Generate contents with Gemini extensions](#generate-contents-with-gemini-extensions)
738
741
  - [Check and switch to other reply candidates](#check-and-switch-to-other-reply-candidates)
739
742
  - [Logging Configuration](#logging-configuration)
@@ -748,13 +751,13 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
748
751
 
749
752
  Install/update the package with pip.
750
753
 
751
- ```bash
754
+ ```sh
752
755
  pip install -U gemini_webapi
753
756
  ```
754
757
 
755
758
  Optionally, package offers a way to automatically import cookies from your local browser. To enable this feature, install `browser-cookie3` as well. Supported platforms and browsers can be found [here](https://github.com/borisbabic/browser_cookie3?tab=readme-ov-file#contribute).
756
759
 
757
- ```bash
760
+ ```sh
758
761
  pip install -U browser-cookie3
759
762
  ```
760
763
 
@@ -816,43 +819,9 @@ asyncio.run(main())
816
819
  >
817
820
  > `auto_close` and `close_delay` are optional arguments for automatically closing the client after a certain period of inactivity. This feature is disabled by default. In an always-on service like chatbot, it's recommended to set `auto_close` to `True` combined with reasonable seconds of `close_delay` for better resource management.
818
821
 
819
- ### Select language model
820
-
821
- You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
822
+ ### Generate contents
822
823
 
823
- Currently available models (as of Feb 5, 2025):
824
-
825
- - `unspecified` - Default model
826
- - `gemini-2.0-flash` - Gemini 2.0 Flash
827
- - `gemini-2.0-flash-thinking` - Gemini 2.0 Flash Thinking Experimental
828
- - `gemini-2.5-flash` - Gemini 2.5 Flash
829
- - `gemini-2.5-pro` - Gemini 2.5 Pro (daily usage limit imposed)
830
-
831
- Models pending update (may not work as expected):
832
-
833
- - `gemini-2.5-exp-advanced` - Gemini 2.5 Experimental Advanced **(requires Gemini Advanced account)**
834
- - `gemini-2.0-exp-advanced` - Gemini 2.0 Experimental Advanced **(requires Gemini Advanced account)**
835
-
836
- ```python
837
- from gemini_webapi.constants import Model
838
-
839
- async def main():
840
- response1 = await client.generate_content(
841
- "What's you language model version? Reply version number only.",
842
- model=Model.G_2_0_FLASH,
843
- )
844
- print(f"Model version ({Model.G_2_0_FLASH.model_name}): {response1.text}")
845
-
846
- chat = client.start_chat(model="gemini-2.0-flash-thinking")
847
- response2 = await chat.send_message("What's you language model version? Reply version number only.")
848
- print(f"Model version (gemini-2.0-flash-thinking): {response2.text}")
849
-
850
- asyncio.run(main())
851
- ```
852
-
853
- ### Generate contents from text
854
-
855
- Ask a one-turn quick question by calling `GeminiClient.generate_content`.
824
+ Ask a single-turn question by calling `GeminiClient.generate_content`, which returns a `gemini_webapi.ModelOutput` object containing the generated text, images, thoughts, and conversation metadata.
856
825
 
857
826
  ```python
858
827
  async def main():
@@ -883,7 +852,7 @@ asyncio.run(main())
883
852
 
884
853
  ### Conversations across multiple turns
885
854
 
886
- If you want to keep conversation continuous, please use `GeminiClient.start_chat` to create a `ChatSession` object and send messages through it. The conversation history will be automatically handled and get updated after each turn.
855
+ If you want to keep conversation continuous, please use `GeminiClient.start_chat` to create a `gemini_webapi.ChatSession` object and send messages through it. The conversation history will be automatically handled and get updated after each turn.
887
856
 
888
857
  ```python
889
858
  async def main():
@@ -926,6 +895,70 @@ async def main():
926
895
  asyncio.run(main())
927
896
  ```
928
897
 
898
+ ### Select language model
899
+
900
+ You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
901
+
902
+ Currently available models (as of June 12, 2025):
903
+
904
+ - `unspecified` - Default model
905
+ - `gemini-2.5-flash` - Gemini 2.5 Flash
906
+ - `gemini-2.5-pro` - Gemini 2.5 Pro (daily usage limit imposed)
907
+
908
+ Deprecated models (yet still working):
909
+
910
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
911
+ - `gemini-2.0-flash-thinking` - Gemini 2.0 Flash Thinking
912
+
913
+ ```python
914
+ from gemini_webapi.constants import Model
915
+
916
+ async def main():
917
+ response1 = await client.generate_content(
918
+ "What's you language model version? Reply version number only.",
919
+ model=Model.G_2_5_FLASH,
920
+ )
921
+ print(f"Model version ({Model.G_2_5_FLASH.model_name}): {response1.text}")
922
+
923
+ chat = client.start_chat(model="gemini-2.5-pro")
924
+ response2 = await chat.send_message("What's you language model version? Reply version number only.")
925
+ print(f"Model version (gemini-2.5-pro): {response2.text}")
926
+
927
+ asyncio.run(main())
928
+ ```
929
+
930
+ ### Apply system prompt with Gemini Gems
931
+
932
+ System prompt can be applied to conversations via [Gemini Gems](https://gemini.google.com/gems/view). To use a gem, you can pass `gem` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. `gem` can be either a string of gem id or a `gemini_webapi.Gem` object. Only one gem can be applied to a single conversation.
933
+
934
+ ```python
935
+ async def main():
936
+ # Fetch all gems for the current account, including both predefined and user-created ones
937
+ await client.fetch_gems()
938
+
939
+ # Once fetched, gems will be cached in `GeminiClient.gems`
940
+ gems = client.gems
941
+
942
+ # Get the gem you want to use
943
+ system_gems = gems.filter(predefined=True)
944
+ coding_partner = system_gems.get(id="coding-partner")
945
+
946
+ response1 = await client.generate_content(
947
+ "what's your system prompt?",
948
+ model=Model.G_2_5_FLASH,
949
+ gem=coding_partner,
950
+ )
951
+ print(response1.text)
952
+
953
+ # Another example with a user-created custom gem
954
+ # Gem ids are consistent strings. Store them somewhere to avoid fetching gems every time
955
+ your_gem = gems.get(name="Your Gem Name")
956
+ your_gem_id = your_gem.id
957
+ chat = client.start_chat(gem=your_gem_id)
958
+ response2 = await chat.send_message("what's your system prompt?")
959
+ print(response2)
960
+ ```
961
+
929
962
  ### Retrieve model's thought process
930
963
 
931
964
  When using models with thinking capabilities, the model's thought process will be populated in `ModelOutput.thoughts`.
@@ -933,7 +966,7 @@ When using models with thinking capabilities, the model's thought process will b
933
966
  ```python
934
967
  async def main():
935
968
  response = await client.generate_content(
936
- "What's 1+1?", model="gemini-2.0-flash-thinking"
969
+ "What's 1+1?", model="gemini-2.5-pro"
937
970
  )
938
971
  print(response.thoughts)
939
972
  print(response.text)
@@ -943,7 +976,7 @@ asyncio.run(main())
943
976
 
944
977
  ### Retrieve images in response
945
978
 
946
- Images in the API's output are stored as a list of `Image` objects. You can access the image title, URL, and description by calling `image.title`, `image.url` and `image.alt` respectively.
979
+ Images in the API's output are stored as a list of `gemini_webapi.Image` objects. You can access the image title, URL, and description by calling `Image.title`, `Image.url` and `Image.alt` respectively.
947
980
 
948
981
  ```python
949
982
  async def main():
@@ -954,9 +987,9 @@ async def main():
954
987
  asyncio.run(main())
955
988
  ```
956
989
 
957
- ### Generate images with Imagen3
990
+ ### Generate images with Imagen4
958
991
 
959
- You can ask Gemini to generate and modify images with Imagen3, Google's latest AI image generator, simply by natural language.
992
+ You can ask Gemini to generate and modify images with Imagen4, Google's latest AI image generator, simply by natural language.
960
993
 
961
994
  > [!IMPORTANT]
962
995
  >
@@ -1015,7 +1048,7 @@ asyncio.run(main())
1015
1048
 
1016
1049
  ### Check and switch to other reply candidates
1017
1050
 
1018
- A response from Gemini usually contains multiple reply candidates with different generated contents. You can check all candidates and choose one to continue the conversation. By default, the first candidate will be chosen automatically.
1051
+ A response from Gemini sometimes contains multiple reply candidates with different generated contents. You can check all candidates and choose one to continue the conversation. By default, the first candidate will be chosen.
1019
1052
 
1020
1053
  ```python
1021
1054
  async def main():
@@ -0,0 +1,20 @@
1
+ gemini_webapi/__init__.py,sha256=7ELCiUoI10ea3daeJxnv0UwqLVKpM7rxsgOZsPMstO8,150
2
+ gemini_webapi/client.py,sha256=9t_ytH4ClkjJWUwvLJ6KyArSRbNeGxd2MJVyPUmBIUA,30333
3
+ gemini_webapi/constants.py,sha256=rkugkck9qXW_f-9dc0XXEjVIzJ9Ja4gHBVYDP71BESg,2352
4
+ gemini_webapi/exceptions.py,sha256=qkXrIpr0L7LtGbq3VcTO8D1xZ50pJtt0dDRp5I3uDSg,1038
5
+ gemini_webapi/types/__init__.py,sha256=Xap_FGOKOOC9mU3bmp_VT_1pQgFMuqbys_fOcyvTnuE,166
6
+ gemini_webapi/types/candidate.py,sha256=67BhY75toE5mVuB21cmHcTFtw332V_KmCjr3-9VTbJo,1477
7
+ gemini_webapi/types/gem.py,sha256=3Ppjq9V22Zp4Lb9a9ZnDviDKQpfSQf8UZxqOEjeEWd4,4070
8
+ gemini_webapi/types/image.py,sha256=MflOex2tAxBF5zQkYTGTR78CuiqH3Wa6KxMKKXr5Yvo,5233
9
+ gemini_webapi/types/modeloutput.py,sha256=h07kQOkL5r-oPLvZ59uVtO1eP4FGy5ZpzuYQzAeQdr8,1196
10
+ gemini_webapi/utils/__init__.py,sha256=cJ9HQYxr8l0CsY61TFlho-5DdPxCaEOpRfjxPX-eils,320
11
+ gemini_webapi/utils/get_access_token.py,sha256=eNn1omFO41wWXco1eM-KXR2CEi0Tb-chlph7H-PCNjg,6137
12
+ gemini_webapi/utils/load_browser_cookies.py,sha256=A5n_VsB7Rm8ck5lpy856UNJEhv30l3dvQ3j0g3ln1fE,1535
13
+ gemini_webapi/utils/logger.py,sha256=0VcxhVLhHBRDQutNCpapP1y_MhPoQ2ud1uIFLqxC3Z8,958
14
+ gemini_webapi/utils/rotate_1psidts.py,sha256=NyQ9OYPLBOcvpc8bodvEYDIVFrsYN0kdfc831lPEctM,1680
15
+ gemini_webapi/utils/upload_file.py,sha256=SJOMr6kryK_ClrKmqI96fqZBNFOMPsyAvFINAGAU3rk,1468
16
+ gemini_webapi-1.14.1.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
17
+ gemini_webapi-1.14.1.dist-info/METADATA,sha256=Qlym2uwFz03bgKIed0UtBmLMGXfpJwJ8j-2qMraLgJw,58925
18
+ gemini_webapi-1.14.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
19
+ gemini_webapi-1.14.1.dist-info/top_level.txt,sha256=dtWtug_ZrmnUqCYuu8NmGzTgWglHeNzhHU_hXmqZGWE,14
20
+ gemini_webapi-1.14.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.8.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,19 +0,0 @@
1
- gemini_webapi/__init__.py,sha256=UBrwmqT-7nWrEr90xz78KqWqMIdapIdjkUyYEZsIktc,202
2
- gemini_webapi/client.py,sha256=wyLDWl-CXLg4YN13HqWj8TKX-IzAzArDP8Jwofppj18,25396
3
- gemini_webapi/constants.py,sha256=2KKPqz0r3DKYuxQhwCQpTBQmp9sqFMrH88n3wG41V7g,2572
4
- gemini_webapi/exceptions.py,sha256=qkXrIpr0L7LtGbq3VcTO8D1xZ50pJtt0dDRp5I3uDSg,1038
5
- gemini_webapi/types/__init__.py,sha256=d2kvXnE004s2E2KDmPPLi5N-BQ59FgDSlrGrO3Wphww,163
6
- gemini_webapi/types/candidate.py,sha256=67BhY75toE5mVuB21cmHcTFtw332V_KmCjr3-9VTbJo,1477
7
- gemini_webapi/types/image.py,sha256=4BC8hxAWJrYFwzA60CivF1di4RZkzPKjcaSPPFKmRdY,5237
8
- gemini_webapi/types/modeloutput.py,sha256=h07kQOkL5r-oPLvZ59uVtO1eP4FGy5ZpzuYQzAeQdr8,1196
9
- gemini_webapi/utils/__init__.py,sha256=tO6Sx-3fcPeyITZcvUmcFKBFlR5XW87xUFbNrIh3_mE,374
10
- gemini_webapi/utils/get_access_token.py,sha256=uyb6tuzPr3mHttCjiM86M29ykrnHqsUClYdf5sVkyEQ,5465
11
- gemini_webapi/utils/load_browser_cookies.py,sha256=A5n_VsB7Rm8ck5lpy856UNJEhv30l3dvQ3j0g3ln1fE,1535
12
- gemini_webapi/utils/logger.py,sha256=0VcxhVLhHBRDQutNCpapP1y_MhPoQ2ud1uIFLqxC3Z8,958
13
- gemini_webapi/utils/rotate_1psidts.py,sha256=NyQ9OYPLBOcvpc8bodvEYDIVFrsYN0kdfc831lPEctM,1680
14
- gemini_webapi/utils/upload_file.py,sha256=SJOMr6kryK_ClrKmqI96fqZBNFOMPsyAvFINAGAU3rk,1468
15
- gemini_webapi-1.13.0.dist-info/licenses/LICENSE,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
16
- gemini_webapi-1.13.0.dist-info/METADATA,sha256=4JhYUbv5-iqYquPepYL_wE46lut6y6dFw5DFdBP89vg,57546
17
- gemini_webapi-1.13.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
18
- gemini_webapi-1.13.0.dist-info/top_level.txt,sha256=dtWtug_ZrmnUqCYuu8NmGzTgWglHeNzhHU_hXmqZGWE,14
19
- gemini_webapi-1.13.0.dist-info/RECORD,,