llm-gemini 0.14.1__tar.gz → 0.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: llm-gemini
3
- Version: 0.14.1
3
+ Version: 0.16
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -19,6 +19,7 @@ Requires-Dist: pytest; extra == "test"
19
19
  Requires-Dist: pytest-recording; extra == "test"
20
20
  Requires-Dist: pytest-asyncio; extra == "test"
21
21
  Requires-Dist: nest-asyncio; extra == "test"
22
+ Dynamic: license-file
22
23
 
23
24
  # llm-gemini
24
25
 
@@ -46,32 +47,32 @@ llm keys set gemini
46
47
  ```
47
48
  You can also set the API key by assigning it to the environment variable `LLM_GEMINI_KEY`.
48
49
 
49
- Now run the model using `-m gemini-1.5-pro-latest`, for example:
50
+ Now run the model using `-m gemini-2.0-flash`, for example:
50
51
 
51
52
  ```bash
52
- llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
53
+ llm -m gemini-2.0-flash "A short joke about a pelican and a walrus"
53
54
  ```
54
55
 
55
- > A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
56
+ > A pelican and a walrus are sitting at a bar. The pelican orders a fishbowl cocktail, and the walrus orders a plate of clams. The bartender asks, "So, what brings you two together?"
56
57
  >
57
- > "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
58
- >
59
- > The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
58
+ > The walrus sighs and says, "It's a long story. Let's just say we met through a mutual friend... of the fin."
60
59
 
61
60
  Other models are:
62
61
 
63
- - `gemini-1.5-flash-latest`
64
- - `gemini-1.5-flash-8b-latest` - the least expensive
65
- - `gemini-exp-1114` - recent experimental #1
66
- - `gemini-exp-1121` - recent experimental #2
67
- - `gemini-exp-1206` - recent experimental #3
68
- - `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
69
- - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
70
- - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
71
- - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
72
- - `gemini-2.0-flash` - Gemini 2.0 Flash
73
- - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
62
+ - `gemini-2.5-pro-exp-03-25` - experimental release of Gemini 2.5 Pro
63
+ - `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
74
64
  - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
65
+ - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
66
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
67
+ - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
68
+ - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
69
+ - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
70
+ - `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
71
+ - `gemini-exp-1206` - recent experimental #3
72
+ - `gemini-exp-1121` - recent experimental #2
73
+ - `gemini-exp-1114` - recent experimental #1
74
+ - `gemini-1.5-flash-8b-latest` - the least expensive
75
+ - `gemini-1.5-flash-latest`
75
76
 
76
77
  ### Images, audio and video
77
78
 
@@ -24,32 +24,32 @@ llm keys set gemini
24
24
  ```
25
25
  You can also set the API key by assigning it to the environment variable `LLM_GEMINI_KEY`.
26
26
 
27
- Now run the model using `-m gemini-1.5-pro-latest`, for example:
27
+ Now run the model using `-m gemini-2.0-flash`, for example:
28
28
 
29
29
  ```bash
30
- llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
30
+ llm -m gemini-2.0-flash "A short joke about a pelican and a walrus"
31
31
  ```
32
32
 
33
- > A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
33
+ > A pelican and a walrus are sitting at a bar. The pelican orders a fishbowl cocktail, and the walrus orders a plate of clams. The bartender asks, "So, what brings you two together?"
34
34
  >
35
- > "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
36
- >
37
- > The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
35
+ > The walrus sighs and says, "It's a long story. Let's just say we met through a mutual friend... of the fin."
38
36
 
39
37
  Other models are:
40
38
 
41
- - `gemini-1.5-flash-latest`
42
- - `gemini-1.5-flash-8b-latest` - the least expensive
43
- - `gemini-exp-1114` - recent experimental #1
44
- - `gemini-exp-1121` - recent experimental #2
45
- - `gemini-exp-1206` - recent experimental #3
46
- - `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
47
- - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
48
- - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
49
- - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
50
- - `gemini-2.0-flash` - Gemini 2.0 Flash
51
- - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
39
+ - `gemini-2.5-pro-exp-03-25` - experimental release of Gemini 2.5 Pro
40
+ - `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
52
41
  - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
42
+ - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
43
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
44
+ - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
45
+ - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
46
+ - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
47
+ - `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
48
+ - `gemini-exp-1206` - recent experimental #3
49
+ - `gemini-exp-1121` - recent experimental #2
50
+ - `gemini-exp-1114` - recent experimental #1
51
+ - `gemini-1.5-flash-8b-latest` - the least expensive
52
+ - `gemini-1.5-flash-latest`
53
53
 
54
54
  ### Images, audio and video
55
55
 
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: llm-gemini
3
- Version: 0.14.1
3
+ Version: 0.16
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -19,6 +19,7 @@ Requires-Dist: pytest; extra == "test"
19
19
  Requires-Dist: pytest-recording; extra == "test"
20
20
  Requires-Dist: pytest-asyncio; extra == "test"
21
21
  Requires-Dist: nest-asyncio; extra == "test"
22
+ Dynamic: license-file
22
23
 
23
24
  # llm-gemini
24
25
 
@@ -46,32 +47,32 @@ llm keys set gemini
46
47
  ```
47
48
  You can also set the API key by assigning it to the environment variable `LLM_GEMINI_KEY`.
48
49
 
49
- Now run the model using `-m gemini-1.5-pro-latest`, for example:
50
+ Now run the model using `-m gemini-2.0-flash`, for example:
50
51
 
51
52
  ```bash
52
- llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
53
+ llm -m gemini-2.0-flash "A short joke about a pelican and a walrus"
53
54
  ```
54
55
 
55
- > A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
56
+ > A pelican and a walrus are sitting at a bar. The pelican orders a fishbowl cocktail, and the walrus orders a plate of clams. The bartender asks, "So, what brings you two together?"
56
57
  >
57
- > "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
58
- >
59
- > The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
58
+ > The walrus sighs and says, "It's a long story. Let's just say we met through a mutual friend... of the fin."
60
59
 
61
60
  Other models are:
62
61
 
63
- - `gemini-1.5-flash-latest`
64
- - `gemini-1.5-flash-8b-latest` - the least expensive
65
- - `gemini-exp-1114` - recent experimental #1
66
- - `gemini-exp-1121` - recent experimental #2
67
- - `gemini-exp-1206` - recent experimental #3
68
- - `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
69
- - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
70
- - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
71
- - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
72
- - `gemini-2.0-flash` - Gemini 2.0 Flash
73
- - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
62
+ - `gemini-2.5-pro-exp-03-25` - experimental release of Gemini 2.5 Pro
63
+ - `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
74
64
  - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
65
+ - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
66
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
67
+ - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
68
+ - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
69
+ - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
70
+ - `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
71
+ - `gemini-exp-1206` - recent experimental #3
72
+ - `gemini-exp-1121` - recent experimental #2
73
+ - `gemini-exp-1114` - recent experimental #1
74
+ - `gemini-1.5-flash-8b-latest` - the least expensive
75
+ - `gemini-1.5-flash-latest`
75
76
 
76
77
  ### Images, audio and video
77
78
 
@@ -1,6 +1,8 @@
1
+ import click
1
2
  import copy
2
3
  import httpx
3
4
  import ijson
5
+ import json
4
6
  import llm
5
7
  from pydantic import Field
6
8
  from typing import Optional
@@ -62,6 +64,10 @@ def register_models(register):
62
64
  "gemini-2.0-pro-exp-02-05",
63
65
  # Released 25th Feb 2025:
64
66
  "gemini-2.0-flash-lite",
67
+ # Released 12th March 2025:
68
+ "gemma-3-27b-it",
69
+ # 25th March 2025:
70
+ "gemini-2.5-pro-exp-03-25",
65
71
  ]:
66
72
  can_google_search = model_id in GOOGLE_SEARCH_MODELS
67
73
  register(
@@ -437,3 +443,32 @@ class GeminiEmbeddingModel(llm.EmbeddingModel):
437
443
  if self.truncate:
438
444
  values = [value[: self.truncate] for value in values]
439
445
  return values
446
+
447
+
448
+ @llm.hookimpl
449
+ def register_commands(cli):
450
+ @cli.group()
451
+ def gemini():
452
+ "Commands relating to the llm-gemini plugin"
453
+
454
+ @gemini.command()
455
+ @click.option("--key", help="API key to use")
456
+ def models(key):
457
+ "List of Gemini models pulled from their API"
458
+ key = llm.get_key(key, "gemini", "LLM_GEMINI_KEY")
459
+ response = httpx.get(
460
+ f"https://generativelanguage.googleapis.com/v1beta/models?key={key}",
461
+ )
462
+ response.raise_for_status()
463
+ click.echo(json.dumps(response.json()["models"], indent=2))
464
+
465
+ @gemini.command()
466
+ @click.option("--key", help="API key to use")
467
+ def files(key):
468
+ "List of files uploaded to the Gemini API"
469
+ key = llm.get_key(key, "gemini", "LLM_GEMINI_KEY")
470
+ response = httpx.get(
471
+ f"https://generativelanguage.googleapis.com/v1beta/files?key={key}",
472
+ )
473
+ response.raise_for_status()
474
+ click.echo(json.dumps(response.json()["files"], indent=2))
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llm-gemini"
3
- version = "0.14.1"
3
+ version = "0.16"
4
4
  description = "LLM plugin to access Google's Gemini family of models"
5
5
  readme = "README.md"
6
6
  authors = [{name = "Simon Willison"}]
File without changes
File without changes