llm-gemini 0.13__tar.gz → 0.13.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.13 → llm_gemini-0.13.1}/PKG-INFO +1 -1
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.egg-info/PKG-INFO +1 -1
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.py +31 -22
- {llm_gemini-0.13 → llm_gemini-0.13.1}/pyproject.toml +1 -1
- llm_gemini-0.13.1/tests/test_gemini.py +104 -0
- llm_gemini-0.13/tests/test_gemini.py +0 -37
- {llm_gemini-0.13 → llm_gemini-0.13.1}/LICENSE +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/README.md +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.egg-info/SOURCES.txt +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.egg-info/dependency_links.txt +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.egg-info/entry_points.txt +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.egg-info/requires.txt +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/llm_gemini.egg-info/top_level.txt +0 -0
- {llm_gemini-0.13 → llm_gemini-0.13.1}/setup.cfg +0 -0
@@ -90,7 +90,7 @@ def resolve_type(attachment):
|
|
90
90
|
|
91
91
|
def cleanup_schema(schema):
|
92
92
|
"Gemini supports only a subset of JSON schema"
|
93
|
-
keys_to_remove = ("$schema", "additionalProperties")
|
93
|
+
keys_to_remove = ("$schema", "additionalProperties", "title")
|
94
94
|
# Recursively remove them
|
95
95
|
if isinstance(schema, dict):
|
96
96
|
for key in keys_to_remove:
|
@@ -288,9 +288,17 @@ class _SharedGemini:
|
|
288
288
|
return f'```\n{part["codeExecutionResult"]["output"].strip()}\n```\n'
|
289
289
|
return ""
|
290
290
|
|
291
|
+
def process_candidates(self, candidates):
|
292
|
+
# We only use the first candidate
|
293
|
+
for part in candidates[0]["content"]["parts"]:
|
294
|
+
yield self.process_part(part)
|
295
|
+
|
291
296
|
def set_usage(self, response):
|
292
297
|
try:
|
293
|
-
|
298
|
+
# Don't record the "content" key from that last candidate
|
299
|
+
for candidate in response.response_json["candidates"]:
|
300
|
+
candidate.pop("content", None)
|
301
|
+
usage = response.response_json.pop("usageMetadata")
|
294
302
|
input_tokens = usage.pop("promptTokenCount", None)
|
295
303
|
output_tokens = usage.pop("candidatesTokenCount", None)
|
296
304
|
usage.pop("totalTokenCount", None)
|
@@ -320,17 +328,16 @@ class GeminiPro(_SharedGemini, llm.KeyModel):
|
|
320
328
|
for chunk in http_response.iter_bytes():
|
321
329
|
coro.send(chunk)
|
322
330
|
if events:
|
323
|
-
event
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
gathered.append(event)
|
331
|
+
for event in events:
|
332
|
+
if isinstance(event, dict) and "error" in event:
|
333
|
+
raise llm.ModelError(event["error"]["message"])
|
334
|
+
try:
|
335
|
+
yield from self.process_candidates(event["candidates"])
|
336
|
+
except KeyError:
|
337
|
+
yield ""
|
338
|
+
gathered.append(event)
|
332
339
|
events.clear()
|
333
|
-
response.response_json = gathered
|
340
|
+
response.response_json = gathered[-1]
|
334
341
|
self.set_usage(response)
|
335
342
|
|
336
343
|
|
@@ -353,17 +360,19 @@ class AsyncGeminiPro(_SharedGemini, llm.AsyncKeyModel):
|
|
353
360
|
async for chunk in http_response.aiter_bytes():
|
354
361
|
coro.send(chunk)
|
355
362
|
if events:
|
356
|
-
event
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
363
|
+
for event in events:
|
364
|
+
if isinstance(event, dict) and "error" in event:
|
365
|
+
raise llm.ModelError(event["error"]["message"])
|
366
|
+
try:
|
367
|
+
for chunk in self.process_candidates(
|
368
|
+
event["candidates"]
|
369
|
+
):
|
370
|
+
yield chunk
|
371
|
+
except KeyError:
|
372
|
+
yield ""
|
373
|
+
gathered.append(event)
|
365
374
|
events.clear()
|
366
|
-
response.response_json = gathered
|
375
|
+
response.response_json = gathered[-1]
|
367
376
|
self.set_usage(response)
|
368
377
|
|
369
378
|
|
@@ -0,0 +1,104 @@
|
|
1
|
+
import llm
|
2
|
+
import nest_asyncio
|
3
|
+
import json
|
4
|
+
import os
|
5
|
+
import pytest
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
nest_asyncio.apply()
|
9
|
+
|
10
|
+
GEMINI_API_KEY = os.environ.get("PYTEST_GEMINI_API_KEY", None) or "gm-..."
|
11
|
+
|
12
|
+
|
13
|
+
@pytest.mark.vcr
|
14
|
+
@pytest.mark.asyncio
|
15
|
+
async def test_prompt():
|
16
|
+
model = llm.get_model("gemini-1.5-flash-latest")
|
17
|
+
response = model.prompt("Name for a pet pelican, just the name", key=GEMINI_API_KEY)
|
18
|
+
assert str(response) == "Percy\n"
|
19
|
+
assert response.response_json == {
|
20
|
+
"candidates": [
|
21
|
+
{
|
22
|
+
"finishReason": "STOP",
|
23
|
+
"safetyRatings": [
|
24
|
+
{
|
25
|
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
26
|
+
"probability": "NEGLIGIBLE",
|
27
|
+
},
|
28
|
+
{
|
29
|
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
30
|
+
"probability": "NEGLIGIBLE",
|
31
|
+
},
|
32
|
+
{
|
33
|
+
"category": "HARM_CATEGORY_HARASSMENT",
|
34
|
+
"probability": "NEGLIGIBLE",
|
35
|
+
},
|
36
|
+
{
|
37
|
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
38
|
+
"probability": "NEGLIGIBLE",
|
39
|
+
},
|
40
|
+
],
|
41
|
+
}
|
42
|
+
],
|
43
|
+
"modelVersion": "gemini-1.5-flash-latest",
|
44
|
+
}
|
45
|
+
assert response.token_details == {
|
46
|
+
"promptTokensDetails": [{"modality": "TEXT", "tokenCount": 9}],
|
47
|
+
"candidatesTokensDetails": [{"modality": "TEXT", "tokenCount": 2}],
|
48
|
+
}
|
49
|
+
assert response.input_tokens == 9
|
50
|
+
assert response.output_tokens == 2
|
51
|
+
|
52
|
+
# And try it async too
|
53
|
+
async_model = llm.get_async_model("gemini-1.5-flash-latest")
|
54
|
+
response = await async_model.prompt(
|
55
|
+
"Name for a pet pelican, just the name", key=GEMINI_API_KEY
|
56
|
+
)
|
57
|
+
text = await response.text()
|
58
|
+
assert text == "Percy\n"
|
59
|
+
|
60
|
+
|
61
|
+
@pytest.mark.vcr
|
62
|
+
@pytest.mark.asyncio
|
63
|
+
async def test_prompt_with_pydantic_schema():
|
64
|
+
class Dog(pydantic.BaseModel):
|
65
|
+
name: str
|
66
|
+
age: int
|
67
|
+
bio: str
|
68
|
+
|
69
|
+
model = llm.get_model("gemini-1.5-flash-latest")
|
70
|
+
response = model.prompt(
|
71
|
+
"Invent a cool dog", key=GEMINI_API_KEY, schema=Dog, stream=False
|
72
|
+
)
|
73
|
+
assert json.loads(response.text()) == {
|
74
|
+
"age": 3,
|
75
|
+
"bio": "A fluffy Samoyed with exceptional intelligence and a love for belly rubs. He's mastered several tricks, including fetching the newspaper and opening doors.",
|
76
|
+
"name": "Cloud",
|
77
|
+
}
|
78
|
+
assert response.response_json == {
|
79
|
+
"candidates": [
|
80
|
+
{
|
81
|
+
"finishReason": "STOP",
|
82
|
+
"safetyRatings": [
|
83
|
+
{
|
84
|
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
85
|
+
"probability": "NEGLIGIBLE",
|
86
|
+
},
|
87
|
+
{
|
88
|
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
89
|
+
"probability": "NEGLIGIBLE",
|
90
|
+
},
|
91
|
+
{
|
92
|
+
"category": "HARM_CATEGORY_HARASSMENT",
|
93
|
+
"probability": "NEGLIGIBLE",
|
94
|
+
},
|
95
|
+
{
|
96
|
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
97
|
+
"probability": "NEGLIGIBLE",
|
98
|
+
},
|
99
|
+
],
|
100
|
+
}
|
101
|
+
],
|
102
|
+
"modelVersion": "gemini-1.5-flash-latest",
|
103
|
+
}
|
104
|
+
assert response.input_tokens == 10
|
@@ -1,37 +0,0 @@
|
|
1
|
-
import llm
|
2
|
-
import nest_asyncio
|
3
|
-
import os
|
4
|
-
import pytest
|
5
|
-
|
6
|
-
nest_asyncio.apply()
|
7
|
-
|
8
|
-
GEMINI_API_KEY = os.environ.get("PYTEST_GEMINI_API_KEY", None) or "gm-..."
|
9
|
-
|
10
|
-
|
11
|
-
@pytest.mark.vcr
|
12
|
-
@pytest.mark.asyncio
|
13
|
-
async def test_prompt():
|
14
|
-
model = llm.get_model("gemini-1.5-flash-latest")
|
15
|
-
response = model.prompt("Name for a pet pelican, just the name", key=GEMINI_API_KEY)
|
16
|
-
assert str(response) == "Percy"
|
17
|
-
assert response.response_json == [
|
18
|
-
{
|
19
|
-
"candidates": [
|
20
|
-
{"content": {"parts": [{"text": "Percy"}], "role": "model"}}
|
21
|
-
],
|
22
|
-
"modelVersion": "gemini-1.5-flash-latest",
|
23
|
-
}
|
24
|
-
]
|
25
|
-
assert response.token_details is None
|
26
|
-
assert response.input_tokens == 10
|
27
|
-
# Not sure why our pytest-recording setup doesn't report output tokens
|
28
|
-
# https://github.com/simonw/llm-gemini/issues/25#issuecomment-2487464339
|
29
|
-
assert response.output_tokens is None
|
30
|
-
|
31
|
-
# And try it async too
|
32
|
-
async_model = llm.get_async_model("gemini-1.5-flash-latest")
|
33
|
-
response = await async_model.prompt(
|
34
|
-
"Name for a pet pelican, just the name", key=GEMINI_API_KEY
|
35
|
-
)
|
36
|
-
text = await response.text()
|
37
|
-
assert text == "Percy"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|