gemini-webapi 1.17.2__tar.gz → 1.18.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/.github/workflows/pypi-publish.yml +2 -2
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/PKG-INFO +32 -8
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/README.md +29 -5
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/pyproject.toml +2 -2
- gemini_webapi-1.18.0/src/gemini_webapi/client.py +1132 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/components/gem_mixin.py +35 -20
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/constants.py +12 -8
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/types/candidate.py +2 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/types/image.py +7 -6
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/types/modeloutput.py +8 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/utils/__init__.py +1 -6
- gemini_webapi-1.18.0/src/gemini_webapi/utils/decorators.py +98 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/utils/get_access_token.py +52 -37
- gemini_webapi-1.18.0/src/gemini_webapi/utils/parsing.py +250 -0
- gemini_webapi-1.18.0/src/gemini_webapi/utils/rotate_1psidts.py +78 -0
- gemini_webapi-1.18.0/src/gemini_webapi/utils/upload_file.py +99 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi.egg-info/PKG-INFO +32 -8
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi.egg-info/requires.txt +2 -2
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/tests/test_client_features.py +18 -3
- gemini_webapi-1.17.2/src/gemini_webapi/client.py +0 -795
- gemini_webapi-1.17.2/src/gemini_webapi/utils/decorators.py +0 -53
- gemini_webapi-1.17.2/src/gemini_webapi/utils/parsing.py +0 -79
- gemini_webapi-1.17.2/src/gemini_webapi/utils/rotate_1psidts.py +0 -59
- gemini_webapi-1.17.2/src/gemini_webapi/utils/upload_file.py +0 -66
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/.github/dependabot.yml +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/.github/workflows/github-release.yml +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/.gitignore +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/.vscode/launch.json +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/.vscode/settings.json +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/LICENSE +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/assets/banner.png +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/assets/favicon.png +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/assets/logo.svg +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/assets/sample.pdf +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/setup.cfg +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/__init__.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/components/__init__.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/exceptions.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/types/__init__.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/types/gem.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/types/grpc.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/utils/load_browser_cookies.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi/utils/logger.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi.egg-info/SOURCES.txt +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi.egg-info/dependency_links.txt +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/src/gemini_webapi.egg-info/top_level.txt +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/tests/test_gem_mixin.py +0 -0
- {gemini_webapi-1.17.2 → gemini_webapi-1.18.0}/tests/test_save_image.py +0 -0
|
@@ -36,7 +36,7 @@ jobs:
|
|
|
36
36
|
- name: Build package
|
|
37
37
|
run: python -m build
|
|
38
38
|
- name: Archive production artifacts
|
|
39
|
-
uses: actions/upload-artifact@
|
|
39
|
+
uses: actions/upload-artifact@v6.0.0
|
|
40
40
|
with:
|
|
41
41
|
name: dist
|
|
42
42
|
path: dist
|
|
@@ -52,7 +52,7 @@ jobs:
|
|
|
52
52
|
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
|
|
53
53
|
steps:
|
|
54
54
|
- name: Retrieve built artifacts
|
|
55
|
-
uses: actions/download-artifact@
|
|
55
|
+
uses: actions/download-artifact@v7.0.0
|
|
56
56
|
with:
|
|
57
57
|
name: dist
|
|
58
58
|
path: dist
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: gemini-webapi
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.18.0
|
|
4
4
|
Summary: ✨ An elegant async Python wrapper for Google Gemini web app
|
|
5
5
|
Author: UZQueen
|
|
6
6
|
License: GNU AFFERO GENERAL PUBLIC LICENSE
|
|
@@ -678,8 +678,8 @@ Description-Content-Type: text/markdown
|
|
|
678
678
|
License-File: LICENSE
|
|
679
679
|
Requires-Dist: httpx[http2]~=0.28.1
|
|
680
680
|
Requires-Dist: loguru~=0.7.3
|
|
681
|
-
Requires-Dist: orjson~=3.11.
|
|
682
|
-
Requires-Dist: pydantic~=2.12.
|
|
681
|
+
Requires-Dist: orjson~=3.11.7
|
|
682
|
+
Requires-Dist: pydantic~=2.12.5
|
|
683
683
|
Dynamic: license-file
|
|
684
684
|
|
|
685
685
|
<p align="center">
|
|
@@ -717,6 +717,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
717
717
|
- **System Prompt** - Supports customizing model's system prompt with [Gemini Gems](https://gemini.google.com/gems/view).
|
|
718
718
|
- **Extension Support** - Supports generating contents with [Gemini extensions](https://gemini.google.com/extensions) on, like YouTube and Gmail.
|
|
719
719
|
- **Classified Outputs** - Categorizes texts, thoughts, web images and AI generated images in the response.
|
|
720
|
+
- **Streaming Mode** - Supports stream generation, yielding partial outputs as they are generated.
|
|
720
721
|
- **Official Flavor** - Provides a simple and elegant interface inspired by [Google Generative AI](https://ai.google.dev/tutorials/python_quickstart)'s official API.
|
|
721
722
|
- **Asynchronous** - Utilizes `asyncio` to run generating tasks and return outputs efficiently.
|
|
722
723
|
|
|
@@ -732,6 +733,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
732
733
|
- [Generate contents with files](#generate-contents-with-files)
|
|
733
734
|
- [Conversations across multiple turns](#conversations-across-multiple-turns)
|
|
734
735
|
- [Continue previous conversations](#continue-previous-conversations)
|
|
736
|
+
- [Streaming mode](#streaming-mode)
|
|
735
737
|
- [Select language model](#select-language-model)
|
|
736
738
|
- [Apply system prompt with Gemini Gems](#apply-system-prompt-with-gemini-gems)
|
|
737
739
|
- [Manage Custom Gems](#manage-custom-gems)
|
|
@@ -901,6 +903,28 @@ async def main():
|
|
|
901
903
|
asyncio.run(main())
|
|
902
904
|
```
|
|
903
905
|
|
|
906
|
+
### Streaming mode
|
|
907
|
+
|
|
908
|
+
For longer responses, you can use streaming mode to receive partial outputs as they are generated. This provides a more responsive user experience, especially for real-time applications like chatbots.
|
|
909
|
+
|
|
910
|
+
The `generate_content_stream` method yields `ModelOutput` objects where the `text_delta` attribute contains only the **new characters** received since the last yield, making it easy to display incremental updates.
|
|
911
|
+
|
|
912
|
+
```python
|
|
913
|
+
async def main():
|
|
914
|
+
async for chunk in client.generate_content_stream(
|
|
915
|
+
"What's the difference between 'await' and 'async for'?"
|
|
916
|
+
):
|
|
917
|
+
print(chunk.text_delta, end="", flush=True)
|
|
918
|
+
|
|
919
|
+
print()
|
|
920
|
+
|
|
921
|
+
asyncio.run(main())
|
|
922
|
+
```
|
|
923
|
+
|
|
924
|
+
> [!TIP]
|
|
925
|
+
>
|
|
926
|
+
> You can also use streaming mode in multi-turn conversations with `ChatSession.send_message_stream`.
|
|
927
|
+
|
|
904
928
|
### Select language model
|
|
905
929
|
|
|
906
930
|
You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
|
|
@@ -909,8 +933,8 @@ Currently available models (as of November 20, 2025):
|
|
|
909
933
|
|
|
910
934
|
- `unspecified` - Default model
|
|
911
935
|
- `gemini-3.0-pro` - Gemini 3.0 Pro
|
|
912
|
-
- `gemini-
|
|
913
|
-
- `gemini-
|
|
936
|
+
- `gemini-3.0-flash` - Gemini 3.0 Flash
|
|
937
|
+
- `gemini-3.0-flash-thinking` - Gemini 3.0 Flash Thinking
|
|
914
938
|
|
|
915
939
|
```python
|
|
916
940
|
from gemini_webapi.constants import Model
|
|
@@ -918,9 +942,9 @@ from gemini_webapi.constants import Model
|
|
|
918
942
|
async def main():
|
|
919
943
|
response1 = await client.generate_content(
|
|
920
944
|
"What's you language model version? Reply version number only.",
|
|
921
|
-
model=Model.
|
|
945
|
+
model=Model.G_3_0_FLASH,
|
|
922
946
|
)
|
|
923
|
-
print(f"Model version ({Model.
|
|
947
|
+
print(f"Model version ({Model.G_3_0_FLASH.model_name}): {response1.text}")
|
|
924
948
|
|
|
925
949
|
chat = client.start_chat(model="gemini-2.5-pro")
|
|
926
950
|
response2 = await chat.send_message("What's you language model version? Reply version number only.")
|
|
@@ -968,7 +992,7 @@ async def main():
|
|
|
968
992
|
|
|
969
993
|
response1 = await client.generate_content(
|
|
970
994
|
"what's your system prompt?",
|
|
971
|
-
model=Model.
|
|
995
|
+
model=Model.G_3_0_FLASH,
|
|
972
996
|
gem=coding_partner,
|
|
973
997
|
)
|
|
974
998
|
print(response1.text)
|
|
@@ -33,6 +33,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
33
33
|
- **System Prompt** - Supports customizing model's system prompt with [Gemini Gems](https://gemini.google.com/gems/view).
|
|
34
34
|
- **Extension Support** - Supports generating contents with [Gemini extensions](https://gemini.google.com/extensions) on, like YouTube and Gmail.
|
|
35
35
|
- **Classified Outputs** - Categorizes texts, thoughts, web images and AI generated images in the response.
|
|
36
|
+
- **Streaming Mode** - Supports stream generation, yielding partial outputs as they are generated.
|
|
36
37
|
- **Official Flavor** - Provides a simple and elegant interface inspired by [Google Generative AI](https://ai.google.dev/tutorials/python_quickstart)'s official API.
|
|
37
38
|
- **Asynchronous** - Utilizes `asyncio` to run generating tasks and return outputs efficiently.
|
|
38
39
|
|
|
@@ -48,6 +49,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
48
49
|
- [Generate contents with files](#generate-contents-with-files)
|
|
49
50
|
- [Conversations across multiple turns](#conversations-across-multiple-turns)
|
|
50
51
|
- [Continue previous conversations](#continue-previous-conversations)
|
|
52
|
+
- [Streaming mode](#streaming-mode)
|
|
51
53
|
- [Select language model](#select-language-model)
|
|
52
54
|
- [Apply system prompt with Gemini Gems](#apply-system-prompt-with-gemini-gems)
|
|
53
55
|
- [Manage Custom Gems](#manage-custom-gems)
|
|
@@ -217,6 +219,28 @@ async def main():
|
|
|
217
219
|
asyncio.run(main())
|
|
218
220
|
```
|
|
219
221
|
|
|
222
|
+
### Streaming mode
|
|
223
|
+
|
|
224
|
+
For longer responses, you can use streaming mode to receive partial outputs as they are generated. This provides a more responsive user experience, especially for real-time applications like chatbots.
|
|
225
|
+
|
|
226
|
+
The `generate_content_stream` method yields `ModelOutput` objects where the `text_delta` attribute contains only the **new characters** received since the last yield, making it easy to display incremental updates.
|
|
227
|
+
|
|
228
|
+
```python
|
|
229
|
+
async def main():
|
|
230
|
+
async for chunk in client.generate_content_stream(
|
|
231
|
+
"What's the difference between 'await' and 'async for'?"
|
|
232
|
+
):
|
|
233
|
+
print(chunk.text_delta, end="", flush=True)
|
|
234
|
+
|
|
235
|
+
print()
|
|
236
|
+
|
|
237
|
+
asyncio.run(main())
|
|
238
|
+
```
|
|
239
|
+
|
|
240
|
+
> [!TIP]
|
|
241
|
+
>
|
|
242
|
+
> You can also use streaming mode in multi-turn conversations with `ChatSession.send_message_stream`.
|
|
243
|
+
|
|
220
244
|
### Select language model
|
|
221
245
|
|
|
222
246
|
You can specify which language model to use by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
|
|
@@ -225,8 +249,8 @@ Currently available models (as of November 20, 2025):
|
|
|
225
249
|
|
|
226
250
|
- `unspecified` - Default model
|
|
227
251
|
- `gemini-3.0-pro` - Gemini 3.0 Pro
|
|
228
|
-
- `gemini-
|
|
229
|
-
- `gemini-
|
|
252
|
+
- `gemini-3.0-flash` - Gemini 3.0 Flash
|
|
253
|
+
- `gemini-3.0-flash-thinking` - Gemini 3.0 Flash Thinking
|
|
230
254
|
|
|
231
255
|
```python
|
|
232
256
|
from gemini_webapi.constants import Model
|
|
@@ -234,9 +258,9 @@ from gemini_webapi.constants import Model
|
|
|
234
258
|
async def main():
|
|
235
259
|
response1 = await client.generate_content(
|
|
236
260
|
"What's you language model version? Reply version number only.",
|
|
237
|
-
model=Model.
|
|
261
|
+
model=Model.G_3_0_FLASH,
|
|
238
262
|
)
|
|
239
|
-
print(f"Model version ({Model.
|
|
263
|
+
print(f"Model version ({Model.G_3_0_FLASH.model_name}): {response1.text}")
|
|
240
264
|
|
|
241
265
|
chat = client.start_chat(model="gemini-2.5-pro")
|
|
242
266
|
response2 = await chat.send_message("What's you language model version? Reply version number only.")
|
|
@@ -284,7 +308,7 @@ async def main():
|
|
|
284
308
|
|
|
285
309
|
response1 = await client.generate_content(
|
|
286
310
|
"what's your system prompt?",
|
|
287
|
-
model=Model.
|
|
311
|
+
model=Model.G_3_0_FLASH,
|
|
288
312
|
gem=coding_partner,
|
|
289
313
|
)
|
|
290
314
|
print(response1.text)
|