gemini-webapi 1.12.3__tar.gz → 1.14.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/PKG-INFO +90 -53
  2. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/README.md +87 -51
  3. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/pyproject.toml +2 -1
  4. gemini_webapi-1.14.1/src/gemini_webapi/__init__.py +6 -0
  5. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/client.py +151 -10
  6. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/constants.py +5 -13
  7. gemini_webapi-1.14.1/src/gemini_webapi/types/__init__.py +6 -0
  8. gemini_webapi-1.14.1/src/gemini_webapi/types/gem.py +132 -0
  9. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/types/image.py +1 -1
  10. gemini_webapi-1.14.1/src/gemini_webapi/utils/__init__.py +12 -0
  11. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/utils/get_access_token.py +43 -16
  12. gemini_webapi-1.14.1/src/gemini_webapi/utils/logger.py +37 -0
  13. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi.egg-info/PKG-INFO +90 -53
  14. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi.egg-info/SOURCES.txt +1 -1
  15. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi.egg-info/requires.txt +2 -1
  16. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/tests/test_client_features.py +52 -37
  17. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/tests/test_rotate_cookies.py +1 -3
  18. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/tests/test_save_image.py +2 -3
  19. gemini_webapi-1.12.3/src/gemini_webapi/__init__.py +0 -4
  20. gemini_webapi-1.12.3/src/gemini_webapi/types/__init__.py +0 -3
  21. gemini_webapi-1.12.3/src/gemini_webapi/utils/__init__.py +0 -10
  22. gemini_webapi-1.12.3/src/gemini_webapi/utils/logger.py +0 -39
  23. gemini_webapi-1.12.3/tests/test_html_entity_decode.py +0 -48
  24. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/.github/dependabot.yml +0 -0
  25. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/.github/workflows/github-release.yml +0 -0
  26. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/.github/workflows/pypi-publish.yml +0 -0
  27. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/.gitignore +0 -0
  28. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/.vscode/launch.json +0 -0
  29. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/.vscode/settings.json +0 -0
  30. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/LICENSE +0 -0
  31. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/assets/banner.png +0 -0
  32. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/assets/favicon.png +0 -0
  33. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/assets/logo.svg +0 -0
  34. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/assets/sample.pdf +0 -0
  35. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/setup.cfg +0 -0
  36. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/exceptions.py +0 -0
  37. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/types/candidate.py +0 -0
  38. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/types/modeloutput.py +0 -0
  39. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/utils/load_browser_cookies.py +0 -0
  40. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/utils/rotate_1psidts.py +0 -0
  41. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi/utils/upload_file.py +0 -0
  42. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi.egg-info/dependency_links.txt +0 -0
  43. {gemini_webapi-1.12.3 → gemini_webapi-1.14.1}/src/gemini_webapi.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: gemini-webapi
3
- Version: 1.12.3
3
+ Version: 1.14.1
4
4
  Summary: ✨ An elegant async Python wrapper for Google Gemini web app
5
5
  Author: UZQueen
6
6
  License: GNU AFFERO GENERAL PUBLIC LICENSE
@@ -678,7 +678,8 @@ Description-Content-Type: text/markdown
678
678
  License-File: LICENSE
679
679
  Requires-Dist: httpx[http2]~=0.28.1
680
680
  Requires-Dist: loguru~=0.7.3
681
- Requires-Dist: pydantic~=2.11.3
681
+ Requires-Dist: orjson~=3.10.18
682
+ Requires-Dist: pydantic~=2.11.5
682
683
  Dynamic: license-file
683
684
 
684
685
  <p align="center">
@@ -713,8 +714,9 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
713
714
 
714
715
  - **Persistent Cookies** - Automatically refreshes cookies in background. Optimized for always-on services.
715
716
  - **Image Generation** - Natively supports generating and modifying images with natural language.
717
+ - **System Prompt** - Supports customizing model's system prompt with [Gemini Gems](https://gemini.google.com/gems/view).
716
718
  - **Extension Support** - Supports generating contents with [Gemini extensions](https://gemini.google.com/extensions) on, like YouTube and Gmail.
717
- - **Classified Outputs** - Automatically categorizes texts, web images and AI generated images in the response.
719
+ - **Classified Outputs** - Categorizes texts, thoughts, web images and AI generated images in the response.
718
720
  - **Official Flavor** - Provides a simple and elegant interface inspired by [Google Generative AI](https://ai.google.dev/tutorials/python_quickstart)'s official API.
719
721
  - **Asynchronous** - Utilizes `asyncio` to run generating tasks and return outputs efficiently.
720
722
 
@@ -726,17 +728,18 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
726
728
  - [Authentication](#authentication)
727
729
  - [Usage](#usage)
728
730
  - [Initialization](#initialization)
729
- - [Select language model](#select-language-model)
730
- - [Generate contents from text](#generate-contents-from-text)
731
+ - [Generate contents](#generate-contents)
731
732
  - [Generate contents with files](#generate-contents-with-files)
732
733
  - [Conversations across multiple turns](#conversations-across-multiple-turns)
733
734
  - [Continue previous conversations](#continue-previous-conversations)
735
+ - [Select language model](#select-language-model)
736
+ - [Apply system prompt with Gemini Gems](#apply-system-prompt-with-gemini-gems)
734
737
  - [Retrieve model's thought process](#retrieve-models-thought-process)
735
738
  - [Retrieve images in response](#retrieve-images-in-response)
736
- - [Generate images with Imagen3](#generate-images-with-imagen3)
739
+ - [Generate images with Imagen4](#generate-images-with-imagen4)
737
740
  - [Generate contents with Gemini extensions](#generate-contents-with-gemini-extensions)
738
741
  - [Check and switch to other reply candidates](#check-and-switch-to-other-reply-candidates)
739
- - [Control log level](#control-log-level)
742
+ - [Logging Configuration](#logging-configuration)
740
743
  - [References](#references)
741
744
  - [Stargazers](#stargazers)
742
745
 
@@ -748,13 +751,13 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
748
751
 
749
752
  Install/update the package with pip.
750
753
 
751
- ```bash
754
+ ```sh
752
755
  pip install -U gemini_webapi
753
756
  ```
754
757
 
755
758
  Optionally, package offers a way to automatically import cookies from your local browser. To enable this feature, install `browser-cookie3` as well. Supported platforms and browsers can be found [here](https://github.com/borisbabic/browser_cookie3?tab=readme-ov-file#contribute).
756
759
 
757
- ```bash
760
+ ```sh
758
761
  pip install -U browser-cookie3
759
762
  ```
760
763
 
@@ -816,43 +819,9 @@ asyncio.run(main())
816
819
  >
817
820
  > `auto_close` and `close_delay` are optional arguments for automatically closing the client after a certain period of inactivity. This feature is disabled by default. In an always-on service like chatbot, it's recommended to set `auto_close` to `True` combined with reasonable seconds of `close_delay` for better resource management.
818
821
 
819
- ### Select language model
820
-
821
- You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
822
-
823
- Currently available models (as of Feb 5, 2025):
824
-
825
- - `unspecified` - Default model
826
- - `gemini-2.0-flash` - Gemini 2.0 Flash
827
- - `gemini-2.0-flash-thinking` - Gemini 2.0 Flash Thinking Experimental
828
- - `gemini-2.5-flash` - Gemini 2.5 Flash
829
- - `gemini-2.5-pro` - Gemini 2.5 Pro (daily usage limit imposed)
830
-
831
- Models pending update (may not work as expected):
832
-
833
- - `gemini-2.5-exp-advanced` - Gemini 2.5 Experimental Advanced **(requires Gemini Advanced account)**
834
- - `gemini-2.0-exp-advanced` - Gemini 2.0 Experimental Advanced **(requires Gemini Advanced account)**
835
-
836
- ```python
837
- from gemini_webapi.constants import Model
838
-
839
- async def main():
840
- response1 = await client.generate_content(
841
- "What's you language model version? Reply version number only.",
842
- model=Model.G_2_0_FLASH,
843
- )
844
- print(f"Model version ({Model.G_2_0_FLASH.model_name}): {response1.text}")
845
-
846
- chat = client.start_chat(model="gemini-2.0-flash-thinking")
847
- response2 = await chat.send_message("What's you language model version? Reply version number only.")
848
- print(f"Model version (gemini-2.0-flash-thinking): {response2.text}")
849
-
850
- asyncio.run(main())
851
- ```
822
+ ### Generate contents
852
823
 
853
- ### Generate contents from text
854
-
855
- Ask a one-turn quick question by calling `GeminiClient.generate_content`.
824
+ Ask a single-turn question by calling `GeminiClient.generate_content`, which returns a `gemini_webapi.ModelOutput` object containing the generated text, images, thoughts, and conversation metadata.
856
825
 
857
826
  ```python
858
827
  async def main():
@@ -883,7 +852,7 @@ asyncio.run(main())
883
852
 
884
853
  ### Conversations across multiple turns
885
854
 
886
- If you want to keep conversation continuous, please use `GeminiClient.start_chat` to create a `ChatSession` object and send messages through it. The conversation history will be automatically handled and get updated after each turn.
855
+ If you want to keep conversation continuous, please use `GeminiClient.start_chat` to create a `gemini_webapi.ChatSession` object and send messages through it. The conversation history will be automatically handled and get updated after each turn.
887
856
 
888
857
  ```python
889
858
  async def main():
@@ -926,6 +895,70 @@ async def main():
926
895
  asyncio.run(main())
927
896
  ```
928
897
 
898
+ ### Select language model
899
+
900
+ You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
901
+
902
+ Currently available models (as of June 12, 2025):
903
+
904
+ - `unspecified` - Default model
905
+ - `gemini-2.5-flash` - Gemini 2.5 Flash
906
+ - `gemini-2.5-pro` - Gemini 2.5 Pro (daily usage limit imposed)
907
+
908
+ Deprecated models (yet still working):
909
+
910
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
911
+ - `gemini-2.0-flash-thinking` - Gemini 2.0 Flash Thinking
912
+
913
+ ```python
914
+ from gemini_webapi.constants import Model
915
+
916
+ async def main():
917
+ response1 = await client.generate_content(
918
+ "What's you language model version? Reply version number only.",
919
+ model=Model.G_2_5_FLASH,
920
+ )
921
+ print(f"Model version ({Model.G_2_5_FLASH.model_name}): {response1.text}")
922
+
923
+ chat = client.start_chat(model="gemini-2.5-pro")
924
+ response2 = await chat.send_message("What's you language model version? Reply version number only.")
925
+ print(f"Model version (gemini-2.5-pro): {response2.text}")
926
+
927
+ asyncio.run(main())
928
+ ```
929
+
930
+ ### Apply system prompt with Gemini Gems
931
+
932
+ System prompt can be applied to conversations via [Gemini Gems](https://gemini.google.com/gems/view). To use a gem, you can pass `gem` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. `gem` can be either a string of gem id or a `gemini_webapi.Gem` object. Only one gem can be applied to a single conversation.
933
+
934
+ ```python
935
+ async def main():
936
+ # Fetch all gems for the current account, including both predefined and user-created ones
937
+ await client.fetch_gems()
938
+
939
+ # Once fetched, gems will be cached in `GeminiClient.gems`
940
+ gems = client.gems
941
+
942
+ # Get the gem you want to use
943
+ system_gems = gems.filter(predefined=True)
944
+ coding_partner = system_gems.get(id="coding-partner")
945
+
946
+ response1 = await client.generate_content(
947
+ "what's your system prompt?",
948
+ model=Model.G_2_5_FLASH,
949
+ gem=coding_partner,
950
+ )
951
+ print(response1.text)
952
+
953
+ # Another example with a user-created custom gem
954
+ # Gem ids are consistent strings. Store them somewhere to avoid fetching gems every time
955
+ your_gem = gems.get(name="Your Gem Name")
956
+ your_gem_id = your_gem.id
957
+ chat = client.start_chat(gem=your_gem_id)
958
+ response2 = await chat.send_message("what's your system prompt?")
959
+ print(response2)
960
+ ```
961
+
929
962
  ### Retrieve model's thought process
930
963
 
931
964
  When using models with thinking capabilities, the model's thought process will be populated in `ModelOutput.thoughts`.
@@ -933,7 +966,7 @@ When using models with thinking capabilities, the model's thought process will b
933
966
  ```python
934
967
  async def main():
935
968
  response = await client.generate_content(
936
- "What's 1+1?", model="gemini-2.0-flash-thinking"
969
+ "What's 1+1?", model="gemini-2.5-pro"
937
970
  )
938
971
  print(response.thoughts)
939
972
  print(response.text)
@@ -943,7 +976,7 @@ asyncio.run(main())
943
976
 
944
977
  ### Retrieve images in response
945
978
 
946
- Images in the API's output are stored as a list of `Image` objects. You can access the image title, URL, and description by calling `image.title`, `image.url` and `image.alt` respectively.
979
+ Images in the API's output are stored as a list of `gemini_webapi.Image` objects. You can access the image title, URL, and description by calling `Image.title`, `Image.url` and `Image.alt` respectively.
947
980
 
948
981
  ```python
949
982
  async def main():
@@ -954,9 +987,9 @@ async def main():
954
987
  asyncio.run(main())
955
988
  ```
956
989
 
957
- ### Generate images with Imagen3
990
+ ### Generate images with Imagen4
958
991
 
959
- You can ask Gemini to generate and modify images with Imagen3, Google's latest AI image generator, simply by natural language.
992
+ You can ask Gemini to generate and modify images with Imagen4, Google's latest AI image generator, simply by natural language.
960
993
 
961
994
  > [!IMPORTANT]
962
995
  >
@@ -1015,7 +1048,7 @@ asyncio.run(main())
1015
1048
 
1016
1049
  ### Check and switch to other reply candidates
1017
1050
 
1018
- A response from Gemini usually contains multiple reply candidates with different generated contents. You can check all candidates and choose one to continue the conversation. By default, the first candidate will be chosen automatically.
1051
+ A response from Gemini sometimes contains multiple reply candidates with different generated contents. You can check all candidates and choose one to continue the conversation. By default, the first candidate will be chosen.
1019
1052
 
1020
1053
  ```python
1021
1054
  async def main():
@@ -1036,9 +1069,9 @@ async def main():
1036
1069
  asyncio.run(main())
1037
1070
  ```
1038
1071
 
1039
- ### Control log level
1072
+ ### Logging Configuration
1040
1073
 
1041
- You can set the log level of the package to one of the following values: `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL`. The default value is `INFO`.
1074
+ This package uses [loguru](https://loguru.readthedocs.io/en/stable/) for logging, and exposes a function `set_log_level` to control log level. You can set log level to one of the following values: `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL`. The default value is `INFO`.
1042
1075
 
1043
1076
  ```python
1044
1077
  from gemini_webapi import set_log_level
@@ -1046,6 +1079,10 @@ from gemini_webapi import set_log_level
1046
1079
  set_log_level("DEBUG")
1047
1080
  ```
1048
1081
 
1082
+ > [!NOTE]
1083
+ >
1084
+ > Calling `set_log_level` for the first time will **globally** remove all existing loguru handlers. You may want to configure logging directly with loguru to avoid this issue and have more advanced control over logging behaviors.
1085
+
1049
1086
  ## References
1050
1087
 
1051
1088
  [Google AI Studio](https://ai.google.dev/tutorials/ai-studio_quickstart)
@@ -30,8 +30,9 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
30
30
 
31
31
  - **Persistent Cookies** - Automatically refreshes cookies in background. Optimized for always-on services.
32
32
  - **Image Generation** - Natively supports generating and modifying images with natural language.
33
+ - **System Prompt** - Supports customizing model's system prompt with [Gemini Gems](https://gemini.google.com/gems/view).
33
34
  - **Extension Support** - Supports generating contents with [Gemini extensions](https://gemini.google.com/extensions) on, like YouTube and Gmail.
34
- - **Classified Outputs** - Automatically categorizes texts, web images and AI generated images in the response.
35
+ - **Classified Outputs** - Categorizes texts, thoughts, web images and AI generated images in the response.
35
36
  - **Official Flavor** - Provides a simple and elegant interface inspired by [Google Generative AI](https://ai.google.dev/tutorials/python_quickstart)'s official API.
36
37
  - **Asynchronous** - Utilizes `asyncio` to run generating tasks and return outputs efficiently.
37
38
 
@@ -43,17 +44,18 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
43
44
  - [Authentication](#authentication)
44
45
  - [Usage](#usage)
45
46
  - [Initialization](#initialization)
46
- - [Select language model](#select-language-model)
47
- - [Generate contents from text](#generate-contents-from-text)
47
+ - [Generate contents](#generate-contents)
48
48
  - [Generate contents with files](#generate-contents-with-files)
49
49
  - [Conversations across multiple turns](#conversations-across-multiple-turns)
50
50
  - [Continue previous conversations](#continue-previous-conversations)
51
+ - [Select language model](#select-language-model)
52
+ - [Apply system prompt with Gemini Gems](#apply-system-prompt-with-gemini-gems)
51
53
  - [Retrieve model's thought process](#retrieve-models-thought-process)
52
54
  - [Retrieve images in response](#retrieve-images-in-response)
53
- - [Generate images with Imagen3](#generate-images-with-imagen3)
55
+ - [Generate images with Imagen4](#generate-images-with-imagen4)
54
56
  - [Generate contents with Gemini extensions](#generate-contents-with-gemini-extensions)
55
57
  - [Check and switch to other reply candidates](#check-and-switch-to-other-reply-candidates)
56
- - [Control log level](#control-log-level)
58
+ - [Logging Configuration](#logging-configuration)
57
59
  - [References](#references)
58
60
  - [Stargazers](#stargazers)
59
61
 
@@ -65,13 +67,13 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
65
67
 
66
68
  Install/update the package with pip.
67
69
 
68
- ```bash
70
+ ```sh
69
71
  pip install -U gemini_webapi
70
72
  ```
71
73
 
72
74
  Optionally, package offers a way to automatically import cookies from your local browser. To enable this feature, install `browser-cookie3` as well. Supported platforms and browsers can be found [here](https://github.com/borisbabic/browser_cookie3?tab=readme-ov-file#contribute).
73
75
 
74
- ```bash
76
+ ```sh
75
77
  pip install -U browser-cookie3
76
78
  ```
77
79
 
@@ -133,43 +135,9 @@ asyncio.run(main())
133
135
  >
134
136
  > `auto_close` and `close_delay` are optional arguments for automatically closing the client after a certain period of inactivity. This feature is disabled by default. In an always-on service like chatbot, it's recommended to set `auto_close` to `True` combined with reasonable seconds of `close_delay` for better resource management.
135
137
 
136
- ### Select language model
137
-
138
- You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
139
-
140
- Currently available models (as of Feb 5, 2025):
141
-
142
- - `unspecified` - Default model
143
- - `gemini-2.0-flash` - Gemini 2.0 Flash
144
- - `gemini-2.0-flash-thinking` - Gemini 2.0 Flash Thinking Experimental
145
- - `gemini-2.5-flash` - Gemini 2.5 Flash
146
- - `gemini-2.5-pro` - Gemini 2.5 Pro (daily usage limit imposed)
147
-
148
- Models pending update (may not work as expected):
149
-
150
- - `gemini-2.5-exp-advanced` - Gemini 2.5 Experimental Advanced **(requires Gemini Advanced account)**
151
- - `gemini-2.0-exp-advanced` - Gemini 2.0 Experimental Advanced **(requires Gemini Advanced account)**
152
-
153
- ```python
154
- from gemini_webapi.constants import Model
155
-
156
- async def main():
157
- response1 = await client.generate_content(
158
- "What's you language model version? Reply version number only.",
159
- model=Model.G_2_0_FLASH,
160
- )
161
- print(f"Model version ({Model.G_2_0_FLASH.model_name}): {response1.text}")
162
-
163
- chat = client.start_chat(model="gemini-2.0-flash-thinking")
164
- response2 = await chat.send_message("What's you language model version? Reply version number only.")
165
- print(f"Model version (gemini-2.0-flash-thinking): {response2.text}")
166
-
167
- asyncio.run(main())
168
- ```
138
+ ### Generate contents
169
139
 
170
- ### Generate contents from text
171
-
172
- Ask a one-turn quick question by calling `GeminiClient.generate_content`.
140
+ Ask a single-turn question by calling `GeminiClient.generate_content`, which returns a `gemini_webapi.ModelOutput` object containing the generated text, images, thoughts, and conversation metadata.
173
141
 
174
142
  ```python
175
143
  async def main():
@@ -200,7 +168,7 @@ asyncio.run(main())
200
168
 
201
169
  ### Conversations across multiple turns
202
170
 
203
- If you want to keep conversation continuous, please use `GeminiClient.start_chat` to create a `ChatSession` object and send messages through it. The conversation history will be automatically handled and get updated after each turn.
171
+ If you want to keep conversation continuous, please use `GeminiClient.start_chat` to create a `gemini_webapi.ChatSession` object and send messages through it. The conversation history will be automatically handled and get updated after each turn.
204
172
 
205
173
  ```python
206
174
  async def main():
@@ -243,6 +211,70 @@ async def main():
243
211
  asyncio.run(main())
244
212
  ```
245
213
 
214
+ ### Select language model
215
+
216
+ You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
217
+
218
+ Currently available models (as of June 12, 2025):
219
+
220
+ - `unspecified` - Default model
221
+ - `gemini-2.5-flash` - Gemini 2.5 Flash
222
+ - `gemini-2.5-pro` - Gemini 2.5 Pro (daily usage limit imposed)
223
+
224
+ Deprecated models (yet still working):
225
+
226
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
227
+ - `gemini-2.0-flash-thinking` - Gemini 2.0 Flash Thinking
228
+
229
+ ```python
230
+ from gemini_webapi.constants import Model
231
+
232
+ async def main():
233
+ response1 = await client.generate_content(
234
+ "What's you language model version? Reply version number only.",
235
+ model=Model.G_2_5_FLASH,
236
+ )
237
+ print(f"Model version ({Model.G_2_5_FLASH.model_name}): {response1.text}")
238
+
239
+ chat = client.start_chat(model="gemini-2.5-pro")
240
+ response2 = await chat.send_message("What's you language model version? Reply version number only.")
241
+ print(f"Model version (gemini-2.5-pro): {response2.text}")
242
+
243
+ asyncio.run(main())
244
+ ```
245
+
246
+ ### Apply system prompt with Gemini Gems
247
+
248
+ System prompt can be applied to conversations via [Gemini Gems](https://gemini.google.com/gems/view). To use a gem, you can pass `gem` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. `gem` can be either a string of gem id or a `gemini_webapi.Gem` object. Only one gem can be applied to a single conversation.
249
+
250
+ ```python
251
+ async def main():
252
+ # Fetch all gems for the current account, including both predefined and user-created ones
253
+ await client.fetch_gems()
254
+
255
+ # Once fetched, gems will be cached in `GeminiClient.gems`
256
+ gems = client.gems
257
+
258
+ # Get the gem you want to use
259
+ system_gems = gems.filter(predefined=True)
260
+ coding_partner = system_gems.get(id="coding-partner")
261
+
262
+ response1 = await client.generate_content(
263
+ "what's your system prompt?",
264
+ model=Model.G_2_5_FLASH,
265
+ gem=coding_partner,
266
+ )
267
+ print(response1.text)
268
+
269
+ # Another example with a user-created custom gem
270
+ # Gem ids are consistent strings. Store them somewhere to avoid fetching gems every time
271
+ your_gem = gems.get(name="Your Gem Name")
272
+ your_gem_id = your_gem.id
273
+ chat = client.start_chat(gem=your_gem_id)
274
+ response2 = await chat.send_message("what's your system prompt?")
275
+ print(response2)
276
+ ```
277
+
246
278
  ### Retrieve model's thought process
247
279
 
248
280
  When using models with thinking capabilities, the model's thought process will be populated in `ModelOutput.thoughts`.
@@ -250,7 +282,7 @@ When using models with thinking capabilities, the model's thought process will b
250
282
  ```python
251
283
  async def main():
252
284
  response = await client.generate_content(
253
- "What's 1+1?", model="gemini-2.0-flash-thinking"
285
+ "What's 1+1?", model="gemini-2.5-pro"
254
286
  )
255
287
  print(response.thoughts)
256
288
  print(response.text)
@@ -260,7 +292,7 @@ asyncio.run(main())
260
292
 
261
293
  ### Retrieve images in response
262
294
 
263
- Images in the API's output are stored as a list of `Image` objects. You can access the image title, URL, and description by calling `image.title`, `image.url` and `image.alt` respectively.
295
+ Images in the API's output are stored as a list of `gemini_webapi.Image` objects. You can access the image title, URL, and description by calling `Image.title`, `Image.url` and `Image.alt` respectively.
264
296
 
265
297
  ```python
266
298
  async def main():
@@ -271,9 +303,9 @@ async def main():
271
303
  asyncio.run(main())
272
304
  ```
273
305
 
274
- ### Generate images with Imagen3
306
+ ### Generate images with Imagen4
275
307
 
276
- You can ask Gemini to generate and modify images with Imagen3, Google's latest AI image generator, simply by natural language.
308
+ You can ask Gemini to generate and modify images with Imagen4, Google's latest AI image generator, simply by natural language.
277
309
 
278
310
  > [!IMPORTANT]
279
311
  >
@@ -332,7 +364,7 @@ asyncio.run(main())
332
364
 
333
365
  ### Check and switch to other reply candidates
334
366
 
335
- A response from Gemini usually contains multiple reply candidates with different generated contents. You can check all candidates and choose one to continue the conversation. By default, the first candidate will be chosen automatically.
367
+ A response from Gemini sometimes contains multiple reply candidates with different generated contents. You can check all candidates and choose one to continue the conversation. By default, the first candidate will be chosen.
336
368
 
337
369
  ```python
338
370
  async def main():
@@ -353,9 +385,9 @@ async def main():
353
385
  asyncio.run(main())
354
386
  ```
355
387
 
356
- ### Control log level
388
+ ### Logging Configuration
357
389
 
358
- You can set the log level of the package to one of the following values: `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL`. The default value is `INFO`.
390
+ This package uses [loguru](https://loguru.readthedocs.io/en/stable/) for logging, and exposes a function `set_log_level` to control log level. You can set log level to one of the following values: `DEBUG`, `INFO`, `WARNING`, `ERROR` and `CRITICAL`. The default value is `INFO`.
359
391
 
360
392
  ```python
361
393
  from gemini_webapi import set_log_level
@@ -363,6 +395,10 @@ from gemini_webapi import set_log_level
363
395
  set_log_level("DEBUG")
364
396
  ```
365
397
 
398
+ > [!NOTE]
399
+ >
400
+ > Calling `set_log_level` for the first time will **globally** remove all existing loguru handlers. You may want to configure logging directly with loguru to avoid this issue and have more advanced control over logging behaviors.
401
+
366
402
  ## References
367
403
 
368
404
  [Google AI Studio](https://ai.google.dev/tutorials/ai-studio_quickstart)
@@ -22,7 +22,8 @@ requires-python = ">=3.10"
22
22
  dependencies = [
23
23
  "httpx[http2]~=0.28.1",
24
24
  "loguru~=0.7.3",
25
- "pydantic~=2.11.3",
25
+ "orjson~=3.10.18",
26
+ "pydantic~=2.11.5",
26
27
  ]
27
28
  dynamic = ["version"]
28
29
 
@@ -0,0 +1,6 @@
1
+ # flake8: noqa
2
+
3
+ from .client import GeminiClient, ChatSession
4
+ from .exceptions import *
5
+ from .types import *
6
+ from .utils import set_log_level, logger