llm-gemini 0.4.1__tar.gz → 0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.4.1 → llm_gemini-0.5}/PKG-INFO +17 -3
- {llm_gemini-0.4.1 → llm_gemini-0.5}/README.md +15 -1
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.egg-info/PKG-INFO +17 -3
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.egg-info/requires.txt +1 -1
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.py +16 -0
- {llm_gemini-0.4.1 → llm_gemini-0.5}/pyproject.toml +2 -2
- {llm_gemini-0.4.1 → llm_gemini-0.5}/tests/test_gemini.py +6 -1
- {llm_gemini-0.4.1 → llm_gemini-0.5}/LICENSE +0 -0
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.egg-info/SOURCES.txt +0 -0
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.egg-info/dependency_links.txt +0 -0
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.egg-info/entry_points.txt +0 -0
- {llm_gemini-0.4.1 → llm_gemini-0.5}/llm_gemini.egg-info/top_level.txt +0 -0
- {llm_gemini-0.4.1 → llm_gemini-0.5}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm>=0.
|
14
|
+
Requires-Dist: llm>=0.19
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
@@ -60,7 +60,8 @@ Other models are:
|
|
60
60
|
|
61
61
|
- `gemini-1.5-flash-latest`
|
62
62
|
- `gemini-1.5-flash-8b-latest` - the least expensive
|
63
|
-
- `gemini-exp-1114` - recent experimental
|
63
|
+
- `gemini-exp-1114` - recent experimental #1
|
64
|
+
- `gemini-exp-1121` - recent experimental #2
|
64
65
|
|
65
66
|
### Images, audio and video
|
66
67
|
|
@@ -157,3 +158,16 @@ To run the tests:
|
|
157
158
|
```bash
|
158
159
|
pytest
|
159
160
|
```
|
161
|
+
|
162
|
+
This project uses [pytest-recording](https://github.com/kiwicom/pytest-recording) to record Gemini API responses for the tests.
|
163
|
+
|
164
|
+
If you add a new test that calls the API you can capture the API response like this:
|
165
|
+
```bash
|
166
|
+
PYTEST_GEMINI_API_KEY="$(llm keys get gemini)" pytest --record-mode once
|
167
|
+
```
|
168
|
+
You will need to have stored a valid Gemini API key using this command first:
|
169
|
+
```bash
|
170
|
+
llm keys set gemini
|
171
|
+
# Paste key here
|
172
|
+
```
|
173
|
+
|
@@ -40,7 +40,8 @@ Other models are:
|
|
40
40
|
|
41
41
|
- `gemini-1.5-flash-latest`
|
42
42
|
- `gemini-1.5-flash-8b-latest` - the least expensive
|
43
|
-
- `gemini-exp-1114` - recent experimental
|
43
|
+
- `gemini-exp-1114` - recent experimental #1
|
44
|
+
- `gemini-exp-1121` - recent experimental #2
|
44
45
|
|
45
46
|
### Images, audio and video
|
46
47
|
|
@@ -137,3 +138,16 @@ To run the tests:
|
|
137
138
|
```bash
|
138
139
|
pytest
|
139
140
|
```
|
141
|
+
|
142
|
+
This project uses [pytest-recording](https://github.com/kiwicom/pytest-recording) to record Gemini API responses for the tests.
|
143
|
+
|
144
|
+
If you add a new test that calls the API you can capture the API response like this:
|
145
|
+
```bash
|
146
|
+
PYTEST_GEMINI_API_KEY="$(llm keys get gemini)" pytest --record-mode once
|
147
|
+
```
|
148
|
+
You will need to have stored a valid Gemini API key using this command first:
|
149
|
+
```bash
|
150
|
+
llm keys set gemini
|
151
|
+
# Paste key here
|
152
|
+
```
|
153
|
+
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.5
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm>=0.
|
14
|
+
Requires-Dist: llm>=0.19
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
@@ -60,7 +60,8 @@ Other models are:
|
|
60
60
|
|
61
61
|
- `gemini-1.5-flash-latest`
|
62
62
|
- `gemini-1.5-flash-8b-latest` - the least expensive
|
63
|
-
- `gemini-exp-1114` - recent experimental
|
63
|
+
- `gemini-exp-1114` - recent experimental #1
|
64
|
+
- `gemini-exp-1121` - recent experimental #2
|
64
65
|
|
65
66
|
### Images, audio and video
|
66
67
|
|
@@ -157,3 +158,16 @@ To run the tests:
|
|
157
158
|
```bash
|
158
159
|
pytest
|
159
160
|
```
|
161
|
+
|
162
|
+
This project uses [pytest-recording](https://github.com/kiwicom/pytest-recording) to record Gemini API responses for the tests.
|
163
|
+
|
164
|
+
If you add a new test that calls the API you can capture the API response like this:
|
165
|
+
```bash
|
166
|
+
PYTEST_GEMINI_API_KEY="$(llm keys get gemini)" pytest --record-mode once
|
167
|
+
```
|
168
|
+
You will need to have stored a valid Gemini API key using this command first:
|
169
|
+
```bash
|
170
|
+
llm keys set gemini
|
171
|
+
# Paste key here
|
172
|
+
```
|
173
|
+
|
@@ -38,6 +38,7 @@ def register_models(register):
|
|
38
38
|
"gemini-1.5-flash-8b-latest",
|
39
39
|
"gemini-1.5-flash-8b-001",
|
40
40
|
"gemini-exp-1114",
|
41
|
+
"gemini-exp-1121",
|
41
42
|
]:
|
42
43
|
register(GeminiPro(model_id), AsyncGeminiPro(model_id))
|
43
44
|
|
@@ -210,6 +211,19 @@ class _SharedGemini:
|
|
210
211
|
return f'```\n{part["codeExecutionResult"]["output"].strip()}\n```\n'
|
211
212
|
return ""
|
212
213
|
|
214
|
+
def set_usage(self, response):
|
215
|
+
try:
|
216
|
+
usage = response.response_json[-1].pop("usageMetadata")
|
217
|
+
input_tokens = usage.pop("promptTokenCount", None)
|
218
|
+
output_tokens = usage.pop("candidatesTokenCount", None)
|
219
|
+
usage.pop("totalTokenCount", None)
|
220
|
+
if input_tokens is not None:
|
221
|
+
response.set_usage(
|
222
|
+
input=input_tokens, output=output_tokens, details=usage or None
|
223
|
+
)
|
224
|
+
except (IndexError, KeyError):
|
225
|
+
pass
|
226
|
+
|
213
227
|
|
214
228
|
class GeminiPro(_SharedGemini, llm.Model):
|
215
229
|
def execute(self, prompt, stream, response, conversation):
|
@@ -241,6 +255,7 @@ class GeminiPro(_SharedGemini, llm.Model):
|
|
241
255
|
gathered.append(event)
|
242
256
|
events.clear()
|
243
257
|
response.response_json = gathered
|
258
|
+
self.set_usage(response)
|
244
259
|
|
245
260
|
|
246
261
|
class AsyncGeminiPro(_SharedGemini, llm.AsyncModel):
|
@@ -274,6 +289,7 @@ class AsyncGeminiPro(_SharedGemini, llm.AsyncModel):
|
|
274
289
|
gathered.append(event)
|
275
290
|
events.clear()
|
276
291
|
response.response_json = gathered
|
292
|
+
self.set_usage(response)
|
277
293
|
|
278
294
|
|
279
295
|
@llm.hookimpl
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "llm-gemini"
|
3
|
-
version = "0.
|
3
|
+
version = "0.5"
|
4
4
|
description = "LLM plugin to access Google's Gemini family of models"
|
5
5
|
readme = "README.md"
|
6
6
|
authors = [{name = "Simon Willison"}]
|
@@ -9,7 +9,7 @@ classifiers = [
|
|
9
9
|
"License :: OSI Approved :: Apache Software License"
|
10
10
|
]
|
11
11
|
dependencies = [
|
12
|
-
"llm>=0.
|
12
|
+
"llm>=0.19",
|
13
13
|
"httpx",
|
14
14
|
"ijson"
|
15
15
|
]
|
@@ -17,10 +17,15 @@ async def test_prompt():
|
|
17
17
|
"candidates": [
|
18
18
|
{"content": {"parts": [{"text": "Percy"}], "role": "model"}}
|
19
19
|
],
|
20
|
-
"usageMetadata": {"promptTokenCount": 10, "totalTokenCount": 10},
|
21
20
|
"modelVersion": "gemini-1.5-flash-002",
|
22
21
|
}
|
23
22
|
]
|
23
|
+
assert response.token_details is None
|
24
|
+
assert response.input_tokens == 10
|
25
|
+
# Not sure why our pytest-recording setup doesn't report output tokens
|
26
|
+
# https://github.com/simonw/llm-gemini/issues/25#issuecomment-2487464339
|
27
|
+
assert response.output_tokens is None
|
28
|
+
|
24
29
|
# And try it async too
|
25
30
|
async_model = llm.get_async_model("gemini-1.5-flash-latest")
|
26
31
|
async_model.key = async_model.key or GEMINI_API_KEY
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|