gemini-webapi 1.7.0__tar.gz → 1.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/.github/workflows/pypi-publish.yml +1 -1
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/PKG-INFO +32 -4
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/README.md +31 -3
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/client.py +42 -11
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/constants.py +25 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi.egg-info/PKG-INFO +32 -4
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/tests/test_client_features.py +13 -4
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/.github/dependabot.yml +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/.github/workflows/github-release.yml +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/.gitignore +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/.vscode/launch.json +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/.vscode/settings.json +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/LICENSE +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/assets/banner.png +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/assets/favicon.png +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/assets/logo.svg +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/pyproject.toml +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/setup.cfg +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/__init__.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/exceptions.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/types/__init__.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/types/candidate.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/types/image.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/types/modeloutput.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/utils/__init__.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/utils/get_access_token.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/utils/load_browser_cookies.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/utils/logger.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/utils/rotate_1psidts.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi/utils/upload_file.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi.egg-info/SOURCES.txt +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi.egg-info/dependency_links.txt +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi.egg-info/requires.txt +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/src/gemini_webapi.egg-info/top_level.txt +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/tests/test_rotate_cookies.py +0 -0
- {gemini_webapi-1.7.0 → gemini_webapi-1.8.0}/tests/test_save_image.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: gemini-webapi
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.8.0
|
|
4
4
|
Summary: ✨ An elegant async Python wrapper for Google Gemini web app
|
|
5
5
|
Author: UZQueen
|
|
6
6
|
License: GNU AFFERO GENERAL PUBLIC LICENSE
|
|
@@ -732,6 +732,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
732
732
|
- [Retrieve images in response](#retrieve-images-in-response)
|
|
733
733
|
- [Generate images with ImageFx](#generate-images-with-imagefx)
|
|
734
734
|
- [Save images to local files](#save-images-to-local-files)
|
|
735
|
+
- [Specify language model version](#specify-language-model-version)
|
|
735
736
|
- [Generate contents with Gemini extensions](#generate-contents-with-gemini-extensions)
|
|
736
737
|
- [Check and switch to other reply candidates](#check-and-switch-to-other-reply-candidates)
|
|
737
738
|
- [Control log level](#control-log-level)
|
|
@@ -774,9 +775,9 @@ pip install -U browser-cookie3
|
|
|
774
775
|
|
|
775
776
|
```yaml
|
|
776
777
|
services:
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
778
|
+
main:
|
|
779
|
+
volumes:
|
|
780
|
+
- ./gemini_cookies:/usr/local/lib/python3.12/site-packages/gemini_webapi/utils/temp
|
|
780
781
|
```
|
|
781
782
|
|
|
782
783
|
> [!NOTE]
|
|
@@ -937,6 +938,33 @@ async def main():
|
|
|
937
938
|
asyncio.run(main())
|
|
938
939
|
```
|
|
939
940
|
|
|
941
|
+
### Specify language model version
|
|
942
|
+
|
|
943
|
+
You can choose a specified language model version by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
|
|
944
|
+
|
|
945
|
+
Currently available models (as of Dec 21, 2024):
|
|
946
|
+
|
|
947
|
+
- `unspecified` - Default model (Gemini 1.5 Flash)
|
|
948
|
+
- `gemini-1.5-flash` - Gemini 1.5 Flash
|
|
949
|
+
- `gemini-2.0-flash-exp` - Gemini 2.0 Flash Experimental
|
|
950
|
+
|
|
951
|
+
```python
|
|
952
|
+
from gemini_webapi.constants import Model
|
|
953
|
+
|
|
954
|
+
async def main():
|
|
955
|
+
response1 = await client.generate_content(
|
|
956
|
+
"What's you language model version? Reply version number only.",
|
|
957
|
+
model="gemini-1.5-flash",
|
|
958
|
+
)
|
|
959
|
+
print(f"Model version (gemini-1.5-flash): {response1.text}")
|
|
960
|
+
|
|
961
|
+
chat = client.start_chat(model=Model.G_2_0_FLASH_EXP)
|
|
962
|
+
response2 = await chat.send_message("What's you language model version? Reply version number only.")
|
|
963
|
+
print(f"Model version ({Model.G_2_0_FLASH_EXP.model_name}): {response2.text}")
|
|
964
|
+
|
|
965
|
+
asyncio.run(main())
|
|
966
|
+
```
|
|
967
|
+
|
|
940
968
|
### Generate contents with Gemini extensions
|
|
941
969
|
|
|
942
970
|
> [!IMPORTANT]
|
|
@@ -50,6 +50,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
50
50
|
- [Retrieve images in response](#retrieve-images-in-response)
|
|
51
51
|
- [Generate images with ImageFx](#generate-images-with-imagefx)
|
|
52
52
|
- [Save images to local files](#save-images-to-local-files)
|
|
53
|
+
- [Specify language model version](#specify-language-model-version)
|
|
53
54
|
- [Generate contents with Gemini extensions](#generate-contents-with-gemini-extensions)
|
|
54
55
|
- [Check and switch to other reply candidates](#check-and-switch-to-other-reply-candidates)
|
|
55
56
|
- [Control log level](#control-log-level)
|
|
@@ -92,9 +93,9 @@ pip install -U browser-cookie3
|
|
|
92
93
|
|
|
93
94
|
```yaml
|
|
94
95
|
services:
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
96
|
+
main:
|
|
97
|
+
volumes:
|
|
98
|
+
- ./gemini_cookies:/usr/local/lib/python3.12/site-packages/gemini_webapi/utils/temp
|
|
98
99
|
```
|
|
99
100
|
|
|
100
101
|
> [!NOTE]
|
|
@@ -255,6 +256,33 @@ async def main():
|
|
|
255
256
|
asyncio.run(main())
|
|
256
257
|
```
|
|
257
258
|
|
|
259
|
+
### Specify language model version
|
|
260
|
+
|
|
261
|
+
You can choose a specified language model version by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
|
|
262
|
+
|
|
263
|
+
Currently available models (as of Dec 21, 2024):
|
|
264
|
+
|
|
265
|
+
- `unspecified` - Default model (Gemini 1.5 Flash)
|
|
266
|
+
- `gemini-1.5-flash` - Gemini 1.5 Flash
|
|
267
|
+
- `gemini-2.0-flash-exp` - Gemini 2.0 Flash Experimental
|
|
268
|
+
|
|
269
|
+
```python
|
|
270
|
+
from gemini_webapi.constants import Model
|
|
271
|
+
|
|
272
|
+
async def main():
|
|
273
|
+
response1 = await client.generate_content(
|
|
274
|
+
"What's you language model version? Reply version number only.",
|
|
275
|
+
model="gemini-1.5-flash",
|
|
276
|
+
)
|
|
277
|
+
print(f"Model version (gemini-1.5-flash): {response1.text}")
|
|
278
|
+
|
|
279
|
+
chat = client.start_chat(model=Model.G_2_0_FLASH_EXP)
|
|
280
|
+
response2 = await chat.send_message("What's you language model version? Reply version number only.")
|
|
281
|
+
print(f"Model version ({Model.G_2_0_FLASH_EXP.model_name}): {response2.text}")
|
|
282
|
+
|
|
283
|
+
asyncio.run(main())
|
|
284
|
+
```
|
|
285
|
+
|
|
258
286
|
### Generate contents with Gemini extensions
|
|
259
287
|
|
|
260
288
|
> [!IMPORTANT]
|
|
@@ -8,7 +8,7 @@ from typing import Any, Optional
|
|
|
8
8
|
|
|
9
9
|
from httpx import AsyncClient, ReadTimeout
|
|
10
10
|
|
|
11
|
-
from .constants import Endpoint, Headers
|
|
11
|
+
from .constants import Endpoint, Headers, Model
|
|
12
12
|
from .exceptions import AuthError, APIError, TimeoutError, GeminiError
|
|
13
13
|
from .types import WebImage, GeneratedImage, Candidate, ModelOutput
|
|
14
14
|
from .utils import (
|
|
@@ -79,6 +79,9 @@ class GeminiClient:
|
|
|
79
79
|
__Secure-1PSIDTS cookie value, some google accounts don't require this value, provide only if it's in the cookie list.
|
|
80
80
|
proxy: `str`, optional
|
|
81
81
|
Proxy URL.
|
|
82
|
+
kwargs: `dict`, optional
|
|
83
|
+
Additional arguments which will be passed to the http client.
|
|
84
|
+
Refer to `httpx.AsyncClient` for more information.
|
|
82
85
|
|
|
83
86
|
Raises
|
|
84
87
|
------
|
|
@@ -98,6 +101,7 @@ class GeminiClient:
|
|
|
98
101
|
"close_task",
|
|
99
102
|
"auto_refresh",
|
|
100
103
|
"refresh_interval",
|
|
104
|
+
"kwargs",
|
|
101
105
|
]
|
|
102
106
|
|
|
103
107
|
def __init__(
|
|
@@ -105,6 +109,7 @@ class GeminiClient:
|
|
|
105
109
|
secure_1psid: str | None = None,
|
|
106
110
|
secure_1psidts: str | None = None,
|
|
107
111
|
proxy: str | None = None,
|
|
112
|
+
**kwargs,
|
|
108
113
|
):
|
|
109
114
|
self.cookies = {}
|
|
110
115
|
self.proxy = proxy
|
|
@@ -117,6 +122,7 @@ class GeminiClient:
|
|
|
117
122
|
self.close_task: Task | None = None
|
|
118
123
|
self.auto_refresh: bool = True
|
|
119
124
|
self.refresh_interval: float = 540
|
|
125
|
+
self.kwargs = kwargs
|
|
120
126
|
|
|
121
127
|
# Validate cookies
|
|
122
128
|
if secure_1psid:
|
|
@@ -173,6 +179,7 @@ class GeminiClient:
|
|
|
173
179
|
follow_redirects=True,
|
|
174
180
|
headers=Headers.GEMINI.value,
|
|
175
181
|
cookies=valid_cookies,
|
|
182
|
+
**self.kwargs,
|
|
176
183
|
)
|
|
177
184
|
self.access_token = access_token
|
|
178
185
|
self.cookies = valid_cookies
|
|
@@ -256,7 +263,9 @@ class GeminiClient:
|
|
|
256
263
|
self,
|
|
257
264
|
prompt: str,
|
|
258
265
|
images: list[bytes | str | Path] | None = None,
|
|
266
|
+
model: Model | str = Model.UNSPECIFIED,
|
|
259
267
|
chat: Optional["ChatSession"] = None,
|
|
268
|
+
**kwargs,
|
|
260
269
|
) -> ModelOutput:
|
|
261
270
|
"""
|
|
262
271
|
Generates contents with prompt.
|
|
@@ -267,8 +276,14 @@ class GeminiClient:
|
|
|
267
276
|
Prompt provided by user.
|
|
268
277
|
images: `list[bytes | str | Path]`, optional
|
|
269
278
|
List of image file paths or file data in bytes.
|
|
279
|
+
model: `Model` | `str`, optional
|
|
280
|
+
Specify the model to use for generation.
|
|
281
|
+
Pass either a `gemini_webapi.constants.Model` enum or a model name string.
|
|
270
282
|
chat: `ChatSession`, optional
|
|
271
283
|
Chat data to retrieve conversation history. If None, will automatically generate a new chat id when sending post request.
|
|
284
|
+
kwargs: `dict`, optional
|
|
285
|
+
Additional arguments which will be passed to the post request.
|
|
286
|
+
Refer to `httpx.AsyncClient.request` for more information.
|
|
272
287
|
|
|
273
288
|
Returns
|
|
274
289
|
-------
|
|
@@ -291,12 +306,16 @@ class GeminiClient:
|
|
|
291
306
|
|
|
292
307
|
assert prompt, "Prompt cannot be empty."
|
|
293
308
|
|
|
309
|
+
if not isinstance(model, Model):
|
|
310
|
+
model = Model.from_name(model)
|
|
311
|
+
|
|
294
312
|
if self.auto_close:
|
|
295
313
|
await self.reset_close_task()
|
|
296
314
|
|
|
297
315
|
try:
|
|
298
316
|
response = await self.client.post(
|
|
299
317
|
Endpoint.GENERATE.value,
|
|
318
|
+
headers=model.model_header,
|
|
300
319
|
data={
|
|
301
320
|
"at": self.access_token,
|
|
302
321
|
"f.req": json.dumps(
|
|
@@ -311,12 +330,8 @@ class GeminiClient:
|
|
|
311
330
|
None,
|
|
312
331
|
[
|
|
313
332
|
[
|
|
314
|
-
[
|
|
315
|
-
|
|
316
|
-
image, self.proxy
|
|
317
|
-
),
|
|
318
|
-
1,
|
|
319
|
-
]
|
|
333
|
+
[await upload_file(image, self.proxy)],
|
|
334
|
+
"filename.jpg",
|
|
320
335
|
]
|
|
321
336
|
for image in images
|
|
322
337
|
],
|
|
@@ -329,6 +344,7 @@ class GeminiClient:
|
|
|
329
344
|
]
|
|
330
345
|
),
|
|
331
346
|
},
|
|
347
|
+
**kwargs,
|
|
332
348
|
)
|
|
333
349
|
except ReadTimeout:
|
|
334
350
|
raise TimeoutError(
|
|
@@ -435,12 +451,13 @@ class GeminiClient:
|
|
|
435
451
|
Parameters
|
|
436
452
|
----------
|
|
437
453
|
kwargs: `dict`, optional
|
|
438
|
-
|
|
454
|
+
Additional arguments which will be passed to the chat session.
|
|
455
|
+
Refer to `gemini_webapi.ChatSession` for more information.
|
|
439
456
|
|
|
440
457
|
Returns
|
|
441
458
|
-------
|
|
442
459
|
:class:`ChatSession`
|
|
443
|
-
Empty chat object for retrieving conversation history.
|
|
460
|
+
Empty chat session object for retrieving conversation history.
|
|
444
461
|
"""
|
|
445
462
|
|
|
446
463
|
return ChatSession(geminiclient=self, **kwargs)
|
|
@@ -462,9 +479,17 @@ class ChatSession:
|
|
|
462
479
|
Reply id, if provided together with metadata, will override the second value in it.
|
|
463
480
|
rcid: `str`, optional
|
|
464
481
|
Reply candidate id, if provided together with metadata, will override the third value in it.
|
|
482
|
+
model: `Model` | `str`, optional
|
|
483
|
+
Specify the model to use for generation.
|
|
484
|
+
Pass either a `gemini_webapi.constants.Model` enum or a model name string.
|
|
465
485
|
"""
|
|
466
486
|
|
|
467
|
-
__slots__ = [
|
|
487
|
+
__slots__ = [
|
|
488
|
+
"__metadata",
|
|
489
|
+
"geminiclient",
|
|
490
|
+
"last_output",
|
|
491
|
+
"model",
|
|
492
|
+
]
|
|
468
493
|
|
|
469
494
|
def __init__(
|
|
470
495
|
self,
|
|
@@ -473,10 +498,12 @@ class ChatSession:
|
|
|
473
498
|
cid: str | None = None, # chat id
|
|
474
499
|
rid: str | None = None, # reply id
|
|
475
500
|
rcid: str | None = None, # reply candidate id
|
|
501
|
+
model: Model | str = Model.UNSPECIFIED,
|
|
476
502
|
):
|
|
477
503
|
self.__metadata: list[str | None] = [None, None, None]
|
|
478
504
|
self.geminiclient: GeminiClient = geminiclient
|
|
479
505
|
self.last_output: ModelOutput | None = None
|
|
506
|
+
self.model = model
|
|
480
507
|
|
|
481
508
|
if metadata:
|
|
482
509
|
self.metadata = metadata
|
|
@@ -503,6 +530,7 @@ class ChatSession:
|
|
|
503
530
|
self,
|
|
504
531
|
prompt: str,
|
|
505
532
|
images: list[bytes | str | Path] | None = None,
|
|
533
|
+
**kwargs,
|
|
506
534
|
) -> ModelOutput:
|
|
507
535
|
"""
|
|
508
536
|
Generates contents with prompt.
|
|
@@ -514,6 +542,9 @@ class ChatSession:
|
|
|
514
542
|
Prompt provided by user.
|
|
515
543
|
images: `list[bytes | str | Path]`, optional
|
|
516
544
|
List of image file paths or file data in bytes.
|
|
545
|
+
kwargs: `dict`, optional
|
|
546
|
+
Additional arguments which will be passed to the post request.
|
|
547
|
+
Refer to `httpx.AsyncClient.request` for more information.
|
|
517
548
|
|
|
518
549
|
Returns
|
|
519
550
|
-------
|
|
@@ -535,7 +566,7 @@ class ChatSession:
|
|
|
535
566
|
"""
|
|
536
567
|
|
|
537
568
|
return await self.geminiclient.generate_content(
|
|
538
|
-
prompt=prompt, images=images, chat=self
|
|
569
|
+
prompt=prompt, images=images, model=self.model, chat=self, **kwargs
|
|
539
570
|
)
|
|
540
571
|
|
|
541
572
|
def choose_candidate(self, index: int) -> ModelOutput:
|
|
@@ -21,3 +21,28 @@ class Headers(Enum):
|
|
|
21
21
|
"Content-Type": "application/json",
|
|
22
22
|
}
|
|
23
23
|
UPLOAD = {"Push-ID": "feeds/mcudyrk2a4khkz"}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Model(Enum):
|
|
27
|
+
UNSPECIFIED = ("unspecified", {})
|
|
28
|
+
G_1_5_FLASH = (
|
|
29
|
+
"gemini-1.5-flash",
|
|
30
|
+
{"x-goog-ext-525001261-jspb": '[null,null,null,null,"7daceb7ef88130f5"]'},
|
|
31
|
+
)
|
|
32
|
+
G_2_0_FLASH_EXP = (
|
|
33
|
+
"gemini-2.0-flash-exp",
|
|
34
|
+
{"x-goog-ext-525001261-jspb": '[null,null,null,null,"948b866104ccf484"]'},
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
def __init__(self, name, header):
|
|
38
|
+
self.model_name = name
|
|
39
|
+
self.model_header = header
|
|
40
|
+
|
|
41
|
+
@classmethod
|
|
42
|
+
def from_name(cls, name: str):
|
|
43
|
+
for model in cls:
|
|
44
|
+
if model.model_name == name:
|
|
45
|
+
return model
|
|
46
|
+
raise ValueError(
|
|
47
|
+
f"Unknown model name: {name}. Available models: {', '.join([model.model_name for model in cls])}"
|
|
48
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: gemini-webapi
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.8.0
|
|
4
4
|
Summary: ✨ An elegant async Python wrapper for Google Gemini web app
|
|
5
5
|
Author: UZQueen
|
|
6
6
|
License: GNU AFFERO GENERAL PUBLIC LICENSE
|
|
@@ -732,6 +732,7 @@ A reverse-engineered asynchronous python wrapper for [Google Gemini](https://gem
|
|
|
732
732
|
- [Retrieve images in response](#retrieve-images-in-response)
|
|
733
733
|
- [Generate images with ImageFx](#generate-images-with-imagefx)
|
|
734
734
|
- [Save images to local files](#save-images-to-local-files)
|
|
735
|
+
- [Specify language model version](#specify-language-model-version)
|
|
735
736
|
- [Generate contents with Gemini extensions](#generate-contents-with-gemini-extensions)
|
|
736
737
|
- [Check and switch to other reply candidates](#check-and-switch-to-other-reply-candidates)
|
|
737
738
|
- [Control log level](#control-log-level)
|
|
@@ -774,9 +775,9 @@ pip install -U browser-cookie3
|
|
|
774
775
|
|
|
775
776
|
```yaml
|
|
776
777
|
services:
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
778
|
+
main:
|
|
779
|
+
volumes:
|
|
780
|
+
- ./gemini_cookies:/usr/local/lib/python3.12/site-packages/gemini_webapi/utils/temp
|
|
780
781
|
```
|
|
781
782
|
|
|
782
783
|
> [!NOTE]
|
|
@@ -937,6 +938,33 @@ async def main():
|
|
|
937
938
|
asyncio.run(main())
|
|
938
939
|
```
|
|
939
940
|
|
|
941
|
+
### Specify language model version
|
|
942
|
+
|
|
943
|
+
You can choose a specified language model version by passing `model` argument to `GeminiClient.generate_content` or `GeminiClient.start_chat`. The default value is `unspecified`.
|
|
944
|
+
|
|
945
|
+
Currently available models (as of Dec 21, 2024):
|
|
946
|
+
|
|
947
|
+
- `unspecified` - Default model (Gemini 1.5 Flash)
|
|
948
|
+
- `gemini-1.5-flash` - Gemini 1.5 Flash
|
|
949
|
+
- `gemini-2.0-flash-exp` - Gemini 2.0 Flash Experimental
|
|
950
|
+
|
|
951
|
+
```python
|
|
952
|
+
from gemini_webapi.constants import Model
|
|
953
|
+
|
|
954
|
+
async def main():
|
|
955
|
+
response1 = await client.generate_content(
|
|
956
|
+
"What's you language model version? Reply version number only.",
|
|
957
|
+
model="gemini-1.5-flash",
|
|
958
|
+
)
|
|
959
|
+
print(f"Model version (gemini-1.5-flash): {response1.text}")
|
|
960
|
+
|
|
961
|
+
chat = client.start_chat(model=Model.G_2_0_FLASH_EXP)
|
|
962
|
+
response2 = await chat.send_message("What's you language model version? Reply version number only.")
|
|
963
|
+
print(f"Model version ({Model.G_2_0_FLASH_EXP.model_name}): {response2.text}")
|
|
964
|
+
|
|
965
|
+
asyncio.run(main())
|
|
966
|
+
```
|
|
967
|
+
|
|
940
968
|
### Generate contents with Gemini extensions
|
|
941
969
|
|
|
942
970
|
> [!IMPORTANT]
|
|
@@ -6,6 +6,7 @@ from pathlib import Path
|
|
|
6
6
|
from loguru import logger
|
|
7
7
|
|
|
8
8
|
from gemini_webapi import GeminiClient, AuthError, set_log_level
|
|
9
|
+
from gemini_webapi.constants import Model
|
|
9
10
|
|
|
10
11
|
logging.getLogger("asyncio").setLevel(logging.ERROR)
|
|
11
12
|
set_log_level("DEBUG")
|
|
@@ -27,10 +28,20 @@ class TestGeminiClient(unittest.IsolatedAsyncioTestCase):
|
|
|
27
28
|
response = await self.geminiclient.generate_content("Hello World!")
|
|
28
29
|
self.assertTrue(response.text)
|
|
29
30
|
|
|
31
|
+
@logger.catch(reraise=True)
|
|
32
|
+
async def test_switch_model(self):
|
|
33
|
+
for model in Model:
|
|
34
|
+
response = await self.geminiclient.generate_content(
|
|
35
|
+
"What's you language model version? Reply version number only.",
|
|
36
|
+
model=model,
|
|
37
|
+
)
|
|
38
|
+
logger.debug(f"Model version ({model.model_name}): {response.text}")
|
|
39
|
+
|
|
30
40
|
@logger.catch(reraise=True)
|
|
31
41
|
async def test_upload_image(self):
|
|
32
42
|
response = await self.geminiclient.generate_content(
|
|
33
|
-
"Describe these images",
|
|
43
|
+
"Describe these images",
|
|
44
|
+
images=[Path("assets/banner.png"), "assets/favicon.png"],
|
|
34
45
|
)
|
|
35
46
|
logger.debug(response.text)
|
|
36
47
|
|
|
@@ -86,9 +97,7 @@ class TestGeminiClient(unittest.IsolatedAsyncioTestCase):
|
|
|
86
97
|
|
|
87
98
|
@logger.catch(reraise=True)
|
|
88
99
|
async def test_card_content(self):
|
|
89
|
-
response = await self.geminiclient.generate_content(
|
|
90
|
-
"How is today's weather?"
|
|
91
|
-
)
|
|
100
|
+
response = await self.geminiclient.generate_content("How is today's weather?")
|
|
92
101
|
logger.debug(response.text)
|
|
93
102
|
|
|
94
103
|
@logger.catch(reraise=True)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|