llm-gemini 0.13a0__tar.gz → 0.13.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/PKG-INFO +2 -2
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.egg-info/PKG-INFO +2 -2
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.egg-info/requires.txt +1 -1
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.py +43 -25
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/pyproject.toml +2 -2
- llm_gemini-0.13.1/tests/test_gemini.py +104 -0
- llm_gemini-0.13a0/tests/test_gemini.py +0 -37
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/LICENSE +0 -0
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/README.md +0 -0
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.egg-info/SOURCES.txt +0 -0
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.egg-info/dependency_links.txt +0 -0
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.egg-info/entry_points.txt +0 -0
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/llm_gemini.egg-info/top_level.txt +0 -0
- {llm_gemini-0.13a0 → llm_gemini-0.13.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.13.1
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm>=0.
|
14
|
+
Requires-Dist: llm>=0.23
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.13.1
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm>=0.
|
14
|
+
Requires-Dist: llm>=0.23
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
@@ -65,8 +65,16 @@ def register_models(register):
|
|
65
65
|
]:
|
66
66
|
can_google_search = model_id in GOOGLE_SEARCH_MODELS
|
67
67
|
register(
|
68
|
-
GeminiPro(
|
69
|
-
|
68
|
+
GeminiPro(
|
69
|
+
model_id,
|
70
|
+
can_google_search=can_google_search,
|
71
|
+
can_schema="flash-thinking" not in model_id,
|
72
|
+
),
|
73
|
+
AsyncGeminiPro(
|
74
|
+
model_id,
|
75
|
+
can_google_search=can_google_search,
|
76
|
+
can_schema="flash-thinking" not in model_id,
|
77
|
+
),
|
70
78
|
)
|
71
79
|
|
72
80
|
|
@@ -82,7 +90,7 @@ def resolve_type(attachment):
|
|
82
90
|
|
83
91
|
def cleanup_schema(schema):
|
84
92
|
"Gemini supports only a subset of JSON schema"
|
85
|
-
keys_to_remove = ("$schema", "additionalProperties")
|
93
|
+
keys_to_remove = ("$schema", "additionalProperties", "title")
|
86
94
|
# Recursively remove them
|
87
95
|
if isinstance(schema, dict):
|
88
96
|
for key in keys_to_remove:
|
@@ -186,9 +194,10 @@ class _SharedGemini:
|
|
186
194
|
default=None,
|
187
195
|
)
|
188
196
|
|
189
|
-
def __init__(self, model_id, can_google_search=False):
|
197
|
+
def __init__(self, model_id, can_google_search=False, can_schema=False):
|
190
198
|
self.model_id = model_id
|
191
199
|
self.can_google_search = can_google_search
|
200
|
+
self.supports_schema = can_schema
|
192
201
|
if can_google_search:
|
193
202
|
self.Options = self.OptionsWithGoogleSearch
|
194
203
|
|
@@ -279,9 +288,17 @@ class _SharedGemini:
|
|
279
288
|
return f'```\n{part["codeExecutionResult"]["output"].strip()}\n```\n'
|
280
289
|
return ""
|
281
290
|
|
291
|
+
def process_candidates(self, candidates):
|
292
|
+
# We only use the first candidate
|
293
|
+
for part in candidates[0]["content"]["parts"]:
|
294
|
+
yield self.process_part(part)
|
295
|
+
|
282
296
|
def set_usage(self, response):
|
283
297
|
try:
|
284
|
-
|
298
|
+
# Don't record the "content" key from that last candidate
|
299
|
+
for candidate in response.response_json["candidates"]:
|
300
|
+
candidate.pop("content", None)
|
301
|
+
usage = response.response_json.pop("usageMetadata")
|
285
302
|
input_tokens = usage.pop("promptTokenCount", None)
|
286
303
|
output_tokens = usage.pop("candidatesTokenCount", None)
|
287
304
|
usage.pop("totalTokenCount", None)
|
@@ -311,17 +328,16 @@ class GeminiPro(_SharedGemini, llm.KeyModel):
|
|
311
328
|
for chunk in http_response.iter_bytes():
|
312
329
|
coro.send(chunk)
|
313
330
|
if events:
|
314
|
-
event
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
gathered.append(event)
|
331
|
+
for event in events:
|
332
|
+
if isinstance(event, dict) and "error" in event:
|
333
|
+
raise llm.ModelError(event["error"]["message"])
|
334
|
+
try:
|
335
|
+
yield from self.process_candidates(event["candidates"])
|
336
|
+
except KeyError:
|
337
|
+
yield ""
|
338
|
+
gathered.append(event)
|
323
339
|
events.clear()
|
324
|
-
response.response_json = gathered
|
340
|
+
response.response_json = gathered[-1]
|
325
341
|
self.set_usage(response)
|
326
342
|
|
327
343
|
|
@@ -344,17 +360,19 @@ class AsyncGeminiPro(_SharedGemini, llm.AsyncKeyModel):
|
|
344
360
|
async for chunk in http_response.aiter_bytes():
|
345
361
|
coro.send(chunk)
|
346
362
|
if events:
|
347
|
-
event
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
363
|
+
for event in events:
|
364
|
+
if isinstance(event, dict) and "error" in event:
|
365
|
+
raise llm.ModelError(event["error"]["message"])
|
366
|
+
try:
|
367
|
+
for chunk in self.process_candidates(
|
368
|
+
event["candidates"]
|
369
|
+
):
|
370
|
+
yield chunk
|
371
|
+
except KeyError:
|
372
|
+
yield ""
|
373
|
+
gathered.append(event)
|
356
374
|
events.clear()
|
357
|
-
response.response_json = gathered
|
375
|
+
response.response_json = gathered[-1]
|
358
376
|
self.set_usage(response)
|
359
377
|
|
360
378
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "llm-gemini"
|
3
|
-
version = "0.
|
3
|
+
version = "0.13.1"
|
4
4
|
description = "LLM plugin to access Google's Gemini family of models"
|
5
5
|
readme = "README.md"
|
6
6
|
authors = [{name = "Simon Willison"}]
|
@@ -9,7 +9,7 @@ classifiers = [
|
|
9
9
|
"License :: OSI Approved :: Apache Software License"
|
10
10
|
]
|
11
11
|
dependencies = [
|
12
|
-
"llm>=0.
|
12
|
+
"llm>=0.23",
|
13
13
|
"httpx",
|
14
14
|
"ijson"
|
15
15
|
]
|
@@ -0,0 +1,104 @@
|
|
1
|
+
import llm
|
2
|
+
import nest_asyncio
|
3
|
+
import json
|
4
|
+
import os
|
5
|
+
import pytest
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
nest_asyncio.apply()
|
9
|
+
|
10
|
+
GEMINI_API_KEY = os.environ.get("PYTEST_GEMINI_API_KEY", None) or "gm-..."
|
11
|
+
|
12
|
+
|
13
|
+
@pytest.mark.vcr
|
14
|
+
@pytest.mark.asyncio
|
15
|
+
async def test_prompt():
|
16
|
+
model = llm.get_model("gemini-1.5-flash-latest")
|
17
|
+
response = model.prompt("Name for a pet pelican, just the name", key=GEMINI_API_KEY)
|
18
|
+
assert str(response) == "Percy\n"
|
19
|
+
assert response.response_json == {
|
20
|
+
"candidates": [
|
21
|
+
{
|
22
|
+
"finishReason": "STOP",
|
23
|
+
"safetyRatings": [
|
24
|
+
{
|
25
|
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
26
|
+
"probability": "NEGLIGIBLE",
|
27
|
+
},
|
28
|
+
{
|
29
|
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
30
|
+
"probability": "NEGLIGIBLE",
|
31
|
+
},
|
32
|
+
{
|
33
|
+
"category": "HARM_CATEGORY_HARASSMENT",
|
34
|
+
"probability": "NEGLIGIBLE",
|
35
|
+
},
|
36
|
+
{
|
37
|
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
38
|
+
"probability": "NEGLIGIBLE",
|
39
|
+
},
|
40
|
+
],
|
41
|
+
}
|
42
|
+
],
|
43
|
+
"modelVersion": "gemini-1.5-flash-latest",
|
44
|
+
}
|
45
|
+
assert response.token_details == {
|
46
|
+
"promptTokensDetails": [{"modality": "TEXT", "tokenCount": 9}],
|
47
|
+
"candidatesTokensDetails": [{"modality": "TEXT", "tokenCount": 2}],
|
48
|
+
}
|
49
|
+
assert response.input_tokens == 9
|
50
|
+
assert response.output_tokens == 2
|
51
|
+
|
52
|
+
# And try it async too
|
53
|
+
async_model = llm.get_async_model("gemini-1.5-flash-latest")
|
54
|
+
response = await async_model.prompt(
|
55
|
+
"Name for a pet pelican, just the name", key=GEMINI_API_KEY
|
56
|
+
)
|
57
|
+
text = await response.text()
|
58
|
+
assert text == "Percy\n"
|
59
|
+
|
60
|
+
|
61
|
+
@pytest.mark.vcr
|
62
|
+
@pytest.mark.asyncio
|
63
|
+
async def test_prompt_with_pydantic_schema():
|
64
|
+
class Dog(pydantic.BaseModel):
|
65
|
+
name: str
|
66
|
+
age: int
|
67
|
+
bio: str
|
68
|
+
|
69
|
+
model = llm.get_model("gemini-1.5-flash-latest")
|
70
|
+
response = model.prompt(
|
71
|
+
"Invent a cool dog", key=GEMINI_API_KEY, schema=Dog, stream=False
|
72
|
+
)
|
73
|
+
assert json.loads(response.text()) == {
|
74
|
+
"age": 3,
|
75
|
+
"bio": "A fluffy Samoyed with exceptional intelligence and a love for belly rubs. He's mastered several tricks, including fetching the newspaper and opening doors.",
|
76
|
+
"name": "Cloud",
|
77
|
+
}
|
78
|
+
assert response.response_json == {
|
79
|
+
"candidates": [
|
80
|
+
{
|
81
|
+
"finishReason": "STOP",
|
82
|
+
"safetyRatings": [
|
83
|
+
{
|
84
|
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
85
|
+
"probability": "NEGLIGIBLE",
|
86
|
+
},
|
87
|
+
{
|
88
|
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
89
|
+
"probability": "NEGLIGIBLE",
|
90
|
+
},
|
91
|
+
{
|
92
|
+
"category": "HARM_CATEGORY_HARASSMENT",
|
93
|
+
"probability": "NEGLIGIBLE",
|
94
|
+
},
|
95
|
+
{
|
96
|
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
97
|
+
"probability": "NEGLIGIBLE",
|
98
|
+
},
|
99
|
+
],
|
100
|
+
}
|
101
|
+
],
|
102
|
+
"modelVersion": "gemini-1.5-flash-latest",
|
103
|
+
}
|
104
|
+
assert response.input_tokens == 10
|
@@ -1,37 +0,0 @@
|
|
1
|
-
import llm
|
2
|
-
import nest_asyncio
|
3
|
-
import os
|
4
|
-
import pytest
|
5
|
-
|
6
|
-
nest_asyncio.apply()
|
7
|
-
|
8
|
-
GEMINI_API_KEY = os.environ.get("PYTEST_GEMINI_API_KEY", None) or "gm-..."
|
9
|
-
|
10
|
-
|
11
|
-
@pytest.mark.vcr
|
12
|
-
@pytest.mark.asyncio
|
13
|
-
async def test_prompt():
|
14
|
-
model = llm.get_model("gemini-1.5-flash-latest")
|
15
|
-
response = model.prompt("Name for a pet pelican, just the name", key=GEMINI_API_KEY)
|
16
|
-
assert str(response) == "Percy"
|
17
|
-
assert response.response_json == [
|
18
|
-
{
|
19
|
-
"candidates": [
|
20
|
-
{"content": {"parts": [{"text": "Percy"}], "role": "model"}}
|
21
|
-
],
|
22
|
-
"modelVersion": "gemini-1.5-flash-latest",
|
23
|
-
}
|
24
|
-
]
|
25
|
-
assert response.token_details is None
|
26
|
-
assert response.input_tokens == 10
|
27
|
-
# Not sure why our pytest-recording setup doesn't report output tokens
|
28
|
-
# https://github.com/simonw/llm-gemini/issues/25#issuecomment-2487464339
|
29
|
-
assert response.output_tokens is None
|
30
|
-
|
31
|
-
# And try it async too
|
32
|
-
async_model = llm.get_async_model("gemini-1.5-flash-latest")
|
33
|
-
response = await async_model.prompt(
|
34
|
-
"Name for a pet pelican, just the name", key=GEMINI_API_KEY
|
35
|
-
)
|
36
|
-
text = await response.text()
|
37
|
-
assert text == "Percy"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|