llm-gemini 0.18__tar.gz → 0.18.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.18 → llm_gemini-0.18.1}/PKG-INFO +1 -1
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.egg-info/PKG-INFO +1 -1
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.py +26 -4
- {llm_gemini-0.18 → llm_gemini-0.18.1}/pyproject.toml +1 -1
- {llm_gemini-0.18 → llm_gemini-0.18.1}/tests/test_gemini.py +21 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/LICENSE +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/README.md +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.egg-info/SOURCES.txt +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.egg-info/dependency_links.txt +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.egg-info/entry_points.txt +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.egg-info/requires.txt +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/llm_gemini.egg-info/top_level.txt +0 -0
- {llm_gemini-0.18 → llm_gemini-0.18.1}/setup.cfg +0 -0
@@ -36,7 +36,21 @@ GOOGLE_SEARCH_MODELS = {
|
|
36
36
|
"gemini-1.5-flash-002",
|
37
37
|
"gemini-2.0-flash-exp",
|
38
38
|
"gemini-2.0-flash",
|
39
|
+
"gemini-2.5-pro-preview-03-25",
|
40
|
+
"gemini-2.5-pro-exp-03-25",
|
39
41
|
}
|
42
|
+
|
43
|
+
# Older Google models used google_search_retrieval instead of google_search
|
44
|
+
GOOGLE_SEARCH_MODELS_USING_SEARCH_RETRIEVAL = {
|
45
|
+
"gemini-1.5-pro-latest",
|
46
|
+
"gemini-1.5-flash-latest",
|
47
|
+
"gemini-1.5-pro-001",
|
48
|
+
"gemini-1.5-flash-001",
|
49
|
+
"gemini-1.5-pro-002",
|
50
|
+
"gemini-1.5-flash-002",
|
51
|
+
"gemini-2.0-flash-exp",
|
52
|
+
}
|
53
|
+
|
40
54
|
THINKING_BUDGET_MODELS = {
|
41
55
|
"gemini-2.5-flash-preview-04-17",
|
42
56
|
}
|
@@ -285,7 +299,12 @@ class _SharedGemini:
|
|
285
299
|
if prompt.options and prompt.options.code_execution:
|
286
300
|
body["tools"] = [{"codeExecution": {}}]
|
287
301
|
if prompt.options and self.can_google_search and prompt.options.google_search:
|
288
|
-
|
302
|
+
tool_name = (
|
303
|
+
"google_search_retrieval"
|
304
|
+
if self.model_id in GOOGLE_SEARCH_MODELS_USING_SEARCH_RETRIEVAL
|
305
|
+
else "google_search"
|
306
|
+
)
|
307
|
+
body["tools"] = [{tool_name: {}}]
|
289
308
|
if prompt.system:
|
290
309
|
body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
|
291
310
|
|
@@ -490,9 +509,12 @@ def register_commands(cli):
|
|
490
509
|
def models(key):
|
491
510
|
"List of Gemini models pulled from their API"
|
492
511
|
key = llm.get_key(key, "gemini", "LLM_GEMINI_KEY")
|
493
|
-
|
494
|
-
|
495
|
-
|
512
|
+
if not key:
|
513
|
+
raise click.ClickException(
|
514
|
+
"You must set the LLM_GEMINI_KEY environment variable or use --key"
|
515
|
+
)
|
516
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models"
|
517
|
+
response = httpx.get(url, headers={"x-goog-api-key": key})
|
496
518
|
response.raise_for_status()
|
497
519
|
click.echo(json.dumps(response.json()["models"], indent=2))
|
498
520
|
|
@@ -1,4 +1,6 @@
|
|
1
|
+
from click.testing import CliRunner
|
1
2
|
import llm
|
3
|
+
from llm.cli import cli
|
2
4
|
import nest_asyncio
|
3
5
|
import json
|
4
6
|
import os
|
@@ -210,3 +212,22 @@ def test_cleanup_schema(schema, expected):
|
|
210
212
|
# Use a deep copy so the original test data remains unchanged.
|
211
213
|
result = cleanup_schema(schema)
|
212
214
|
assert result == expected
|
215
|
+
|
216
|
+
|
217
|
+
@pytest.mark.vcr
|
218
|
+
def test_cli_gemini_models(tmpdir, monkeypatch):
|
219
|
+
user_dir = tmpdir / "llm.datasette.io"
|
220
|
+
user_dir.mkdir()
|
221
|
+
monkeypatch.setenv("LLM_USER_PATH", str(user_dir))
|
222
|
+
# With no key set should error nicely
|
223
|
+
runner = CliRunner()
|
224
|
+
result = runner.invoke(cli, ["gemini", "models"])
|
225
|
+
assert result.exit_code == 1
|
226
|
+
assert (
|
227
|
+
"Error: You must set the LLM_GEMINI_KEY environment variable or use --key\n"
|
228
|
+
== result.output
|
229
|
+
)
|
230
|
+
# Try again with --key
|
231
|
+
result2 = runner.invoke(cli, ["gemini", "models", "--key", GEMINI_API_KEY])
|
232
|
+
assert result2.exit_code == 0
|
233
|
+
assert "gemini-1.5-flash-latest" in result2.output
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|