llm-gemini 0.10__tar.gz → 0.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.10 → llm_gemini-0.11}/PKG-INFO +3 -2
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.egg-info/PKG-INFO +3 -2
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.egg-info/requires.txt +2 -1
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.py +8 -9
- {llm_gemini-0.10 → llm_gemini-0.11}/pyproject.toml +3 -3
- {llm_gemini-0.10 → llm_gemini-0.11}/tests/test_gemini.py +8 -5
- {llm_gemini-0.10 → llm_gemini-0.11}/LICENSE +0 -0
- {llm_gemini-0.10 → llm_gemini-0.11}/README.md +0 -0
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.egg-info/SOURCES.txt +0 -0
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.egg-info/dependency_links.txt +0 -0
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.egg-info/entry_points.txt +0 -0
- {llm_gemini-0.10 → llm_gemini-0.11}/llm_gemini.egg-info/top_level.txt +0 -0
- {llm_gemini-0.10 → llm_gemini-0.11}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.11
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,12 +11,13 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm>=0.
|
14
|
+
Requires-Dist: llm>=0.22
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
18
18
|
Requires-Dist: pytest; extra == "test"
|
19
19
|
Requires-Dist: pytest-recording; extra == "test"
|
20
|
+
Requires-Dist: nest-asyncio; extra == "test"
|
20
21
|
|
21
22
|
# llm-gemini
|
22
23
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.11
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,12 +11,13 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm>=0.
|
14
|
+
Requires-Dist: llm>=0.22
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
18
18
|
Requires-Dist: pytest; extra == "test"
|
19
19
|
Requires-Dist: pytest-recording; extra == "test"
|
20
|
+
Requires-Dist: nest-asyncio; extra == "test"
|
20
21
|
|
21
22
|
# llm-gemini
|
22
23
|
|
@@ -32,6 +32,7 @@ GOOGLE_SEARCH_MODELS = {
|
|
32
32
|
"gemini-1.5-pro-002",
|
33
33
|
"gemini-1.5-flash-002",
|
34
34
|
"gemini-2.0-flash-exp",
|
35
|
+
"gemini-2.0-flash",
|
35
36
|
}
|
36
37
|
|
37
38
|
|
@@ -220,7 +221,7 @@ class _SharedGemini:
|
|
220
221
|
if prompt.options and prompt.options.code_execution:
|
221
222
|
body["tools"] = [{"codeExecution": {}}]
|
222
223
|
if prompt.options and self.can_google_search and prompt.options.google_search:
|
223
|
-
body["tools"] = [{"
|
224
|
+
body["tools"] = [{"google_search": {}}]
|
224
225
|
if prompt.system:
|
225
226
|
body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
|
226
227
|
|
@@ -268,9 +269,8 @@ class _SharedGemini:
|
|
268
269
|
pass
|
269
270
|
|
270
271
|
|
271
|
-
class GeminiPro(_SharedGemini, llm.
|
272
|
-
def execute(self, prompt, stream, response, conversation):
|
273
|
-
key = self.get_key()
|
272
|
+
class GeminiPro(_SharedGemini, llm.KeyModel):
|
273
|
+
def execute(self, prompt, stream, response, conversation, key):
|
274
274
|
url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model_id}:streamGenerateContent"
|
275
275
|
gathered = []
|
276
276
|
body = self.build_request_body(prompt, conversation)
|
@@ -279,7 +279,7 @@ class GeminiPro(_SharedGemini, llm.Model):
|
|
279
279
|
"POST",
|
280
280
|
url,
|
281
281
|
timeout=None,
|
282
|
-
headers={"x-goog-api-key": key},
|
282
|
+
headers={"x-goog-api-key": self.get_key(key)},
|
283
283
|
json=body,
|
284
284
|
) as http_response:
|
285
285
|
events = ijson.sendable_list()
|
@@ -301,9 +301,8 @@ class GeminiPro(_SharedGemini, llm.Model):
|
|
301
301
|
self.set_usage(response)
|
302
302
|
|
303
303
|
|
304
|
-
class AsyncGeminiPro(_SharedGemini, llm.
|
305
|
-
async def execute(self, prompt, stream, response, conversation):
|
306
|
-
key = self.get_key()
|
304
|
+
class AsyncGeminiPro(_SharedGemini, llm.AsyncKeyModel):
|
305
|
+
async def execute(self, prompt, stream, response, conversation, key):
|
307
306
|
url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model_id}:streamGenerateContent"
|
308
307
|
gathered = []
|
309
308
|
body = self.build_request_body(prompt, conversation)
|
@@ -313,7 +312,7 @@ class AsyncGeminiPro(_SharedGemini, llm.AsyncModel):
|
|
313
312
|
"POST",
|
314
313
|
url,
|
315
314
|
timeout=None,
|
316
|
-
headers={"x-goog-api-key": key},
|
315
|
+
headers={"x-goog-api-key": self.get_key(key)},
|
317
316
|
json=body,
|
318
317
|
) as http_response:
|
319
318
|
events = ijson.sendable_list()
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "llm-gemini"
|
3
|
-
version = "0.
|
3
|
+
version = "0.11"
|
4
4
|
description = "LLM plugin to access Google's Gemini family of models"
|
5
5
|
readme = "README.md"
|
6
6
|
authors = [{name = "Simon Willison"}]
|
@@ -9,7 +9,7 @@ classifiers = [
|
|
9
9
|
"License :: OSI Approved :: Apache Software License"
|
10
10
|
]
|
11
11
|
dependencies = [
|
12
|
-
"llm>=0.
|
12
|
+
"llm>=0.22",
|
13
13
|
"httpx",
|
14
14
|
"ijson"
|
15
15
|
]
|
@@ -24,4 +24,4 @@ CI = "https://github.com/simonw/llm-gemini/actions"
|
|
24
24
|
gemini = "llm_gemini"
|
25
25
|
|
26
26
|
[project.optional-dependencies]
|
27
|
-
test = ["pytest", "pytest-recording"]
|
27
|
+
test = ["pytest", "pytest-recording", "nest-asyncio"]
|
@@ -1,7 +1,10 @@
|
|
1
1
|
import llm
|
2
|
+
import nest_asyncio
|
2
3
|
import os
|
3
4
|
import pytest
|
4
5
|
|
6
|
+
nest_asyncio.apply()
|
7
|
+
|
5
8
|
GEMINI_API_KEY = os.environ.get("PYTEST_GEMINI_API_KEY", None) or "gm-..."
|
6
9
|
|
7
10
|
|
@@ -9,15 +12,14 @@ GEMINI_API_KEY = os.environ.get("PYTEST_GEMINI_API_KEY", None) or "gm-..."
|
|
9
12
|
@pytest.mark.asyncio
|
10
13
|
async def test_prompt():
|
11
14
|
model = llm.get_model("gemini-1.5-flash-latest")
|
12
|
-
|
13
|
-
response = model.prompt("Name for a pet pelican, just the name")
|
15
|
+
response = model.prompt("Name for a pet pelican, just the name", key=GEMINI_API_KEY)
|
14
16
|
assert str(response) == "Percy"
|
15
17
|
assert response.response_json == [
|
16
18
|
{
|
17
19
|
"candidates": [
|
18
20
|
{"content": {"parts": [{"text": "Percy"}], "role": "model"}}
|
19
21
|
],
|
20
|
-
"modelVersion": "gemini-1.5-flash-
|
22
|
+
"modelVersion": "gemini-1.5-flash-latest",
|
21
23
|
}
|
22
24
|
]
|
23
25
|
assert response.token_details is None
|
@@ -28,7 +30,8 @@ async def test_prompt():
|
|
28
30
|
|
29
31
|
# And try it async too
|
30
32
|
async_model = llm.get_async_model("gemini-1.5-flash-latest")
|
31
|
-
|
32
|
-
|
33
|
+
response = await async_model.prompt(
|
34
|
+
"Name for a pet pelican, just the name", key=GEMINI_API_KEY
|
35
|
+
)
|
33
36
|
text = await response.text()
|
34
37
|
assert text == "Percy"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|