llm-gemini 0.16__py3-none-any.whl → 0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.16.dist-info → llm_gemini-0.18.dist-info}/METADATA +19 -11
- llm_gemini-0.18.dist-info/RECORD +7 -0
- {llm_gemini-0.16.dist-info → llm_gemini-0.18.dist-info}/WHEEL +1 -1
- llm_gemini.py +40 -6
- llm_gemini-0.16.dist-info/RECORD +0 -7
- {llm_gemini-0.16.dist-info → llm_gemini-0.18.dist-info}/entry_points.txt +0 -0
- {llm_gemini-0.16.dist-info → llm_gemini-0.18.dist-info}/licenses/LICENSE +0 -0
- {llm_gemini-0.16.dist-info → llm_gemini-0.18.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.18
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -57,9 +57,18 @@ llm -m gemini-2.0-flash "A short joke about a pelican and a walrus"
|
|
57
57
|
>
|
58
58
|
> The walrus sighs and says, "It's a long story. Let's just say we met through a mutual friend... of the fin."
|
59
59
|
|
60
|
+
You can set the [default model](https://llm.datasette.io/en/stable/setup.html#setting-a-custom-default-model) to avoid the extra `-m` option:
|
61
|
+
|
62
|
+
```bash
|
63
|
+
llm models default gemini-2.0-flash
|
64
|
+
llm "A joke about a pelican and a walrus"
|
65
|
+
```
|
66
|
+
|
60
67
|
Other models are:
|
61
68
|
|
62
|
-
- `gemini-2.5-
|
69
|
+
- `gemini-2.5-flash-preview-04-17` - Gemini 2.5 Flash preview
|
70
|
+
- `gemini-2.5-pro-exp-03-25` - free experimental release of Gemini 2.5 Pro
|
71
|
+
- `gemini-2.5-pro-preview-03-25` - paid preview of Gemini 2.5 Pro
|
63
72
|
- `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
|
64
73
|
- `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
|
65
74
|
- `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
|
@@ -79,23 +88,23 @@ Other models are:
|
|
79
88
|
Gemini models are multi-modal. You can provide images, audio or video files as input like this:
|
80
89
|
|
81
90
|
```bash
|
82
|
-
llm -m gemini-
|
91
|
+
llm -m gemini-2.0-flash 'extract text' -a image.jpg
|
83
92
|
```
|
84
93
|
Or with a URL:
|
85
94
|
```bash
|
86
|
-
llm -m gemini-
|
95
|
+
llm -m gemini-2.0-flash-lite 'describe image' \
|
87
96
|
-a https://static.simonwillison.net/static/2024/pelicans.jpg
|
88
97
|
```
|
89
98
|
Audio works too:
|
90
99
|
|
91
100
|
```bash
|
92
|
-
llm -m gemini-
|
101
|
+
llm -m gemini-2.0-flash 'transcribe audio' -a audio.mp3
|
93
102
|
```
|
94
103
|
|
95
104
|
And video:
|
96
105
|
|
97
106
|
```bash
|
98
|
-
llm -m gemini-
|
107
|
+
llm -m gemini-2.0-flash 'describe what happens' -a video.mp4
|
99
108
|
```
|
100
109
|
The Gemini prompting guide includes [extensive advice](https://ai.google.dev/gemini-api/docs/file-prompting-strategies) on multi-modal prompting.
|
101
110
|
|
@@ -104,7 +113,7 @@ The Gemini prompting guide includes [extensive advice](https://ai.google.dev/gem
|
|
104
113
|
Use `-o json_object 1` to force the output to be JSON:
|
105
114
|
|
106
115
|
```bash
|
107
|
-
llm -m gemini-
|
116
|
+
llm -m gemini-2.0-flash -o json_object 1 \
|
108
117
|
'3 largest cities in California, list of {"name": "..."}'
|
109
118
|
```
|
110
119
|
Outputs:
|
@@ -119,7 +128,7 @@ Gemini models can [write and execute code](https://ai.google.dev/gemini-api/docs
|
|
119
128
|
To enable this feature, use `-o code_execution 1`:
|
120
129
|
|
121
130
|
```bash
|
122
|
-
llm -m gemini-
|
131
|
+
llm -m gemini-2.0-flash -o code_execution 1 \
|
123
132
|
'use python to calculate (factorial of 13) * 3'
|
124
133
|
```
|
125
134
|
### Google search
|
@@ -131,7 +140,7 @@ Using this feature may incur additional requirements in terms of how you use the
|
|
131
140
|
To run a prompt with Google search enabled, use `-o google_search 1`:
|
132
141
|
|
133
142
|
```bash
|
134
|
-
llm -m gemini-
|
143
|
+
llm -m gemini-2.0-flash -o google_search 1 \
|
135
144
|
'What happened in Ireland today?'
|
136
145
|
```
|
137
146
|
|
@@ -142,7 +151,7 @@ Use `llm logs -c --json` after running a prompt to see the full JSON response, w
|
|
142
151
|
To chat interactively with the model, run `llm chat`:
|
143
152
|
|
144
153
|
```bash
|
145
|
-
llm chat -m gemini-
|
154
|
+
llm chat -m gemini-2.0-flash
|
146
155
|
```
|
147
156
|
|
148
157
|
## Embeddings
|
@@ -205,4 +214,3 @@ You will need to have stored a valid Gemini API key using this command first:
|
|
205
214
|
llm keys set gemini
|
206
215
|
# Paste key here
|
207
216
|
```
|
208
|
-
|
@@ -0,0 +1,7 @@
|
|
1
|
+
llm_gemini.py,sha256=eoMPxKnWgEN3Li1HDsKIgKnXT5AL4stDhWc1MU6uIRE,17558
|
2
|
+
llm_gemini-0.18.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
+
llm_gemini-0.18.dist-info/METADATA,sha256=X3NSwwbxo8TIAgyf3PA13WUzSWKTxXc38EhpD2VK_ds,8047
|
4
|
+
llm_gemini-0.18.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
5
|
+
llm_gemini-0.18.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
+
llm_gemini-0.18.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
+
llm_gemini-0.18.dist-info/RECORD,,
|
llm_gemini.py
CHANGED
@@ -37,6 +37,9 @@ GOOGLE_SEARCH_MODELS = {
|
|
37
37
|
"gemini-2.0-flash-exp",
|
38
38
|
"gemini-2.0-flash",
|
39
39
|
}
|
40
|
+
THINKING_BUDGET_MODELS = {
|
41
|
+
"gemini-2.5-flash-preview-04-17",
|
42
|
+
}
|
40
43
|
|
41
44
|
|
42
45
|
@llm.hookimpl
|
@@ -68,17 +71,24 @@ def register_models(register):
|
|
68
71
|
"gemma-3-27b-it",
|
69
72
|
# 25th March 2025:
|
70
73
|
"gemini-2.5-pro-exp-03-25",
|
74
|
+
# 4th April 2025 (paid):
|
75
|
+
"gemini-2.5-pro-preview-03-25",
|
76
|
+
# 17th April 2025:
|
77
|
+
"gemini-2.5-flash-preview-04-17",
|
71
78
|
]:
|
72
79
|
can_google_search = model_id in GOOGLE_SEARCH_MODELS
|
80
|
+
can_thinking_budget = model_id in THINKING_BUDGET_MODELS
|
73
81
|
register(
|
74
82
|
GeminiPro(
|
75
83
|
model_id,
|
76
84
|
can_google_search=can_google_search,
|
85
|
+
can_thinking_budget=can_thinking_budget,
|
77
86
|
can_schema="flash-thinking" not in model_id,
|
78
87
|
),
|
79
88
|
AsyncGeminiPro(
|
80
89
|
model_id,
|
81
90
|
can_google_search=can_google_search,
|
91
|
+
can_thinking_budget=can_thinking_budget,
|
82
92
|
can_schema="flash-thinking" not in model_id,
|
83
93
|
),
|
84
94
|
)
|
@@ -206,12 +216,27 @@ class _SharedGemini:
|
|
206
216
|
default=None,
|
207
217
|
)
|
208
218
|
|
209
|
-
|
219
|
+
class OptionsWithThinkingBudget(OptionsWithGoogleSearch):
|
220
|
+
thinking_budget: Optional[int] = Field(
|
221
|
+
description="Indicates the thinking budget in tokens. Set to 0 to disable.",
|
222
|
+
default=None,
|
223
|
+
)
|
224
|
+
|
225
|
+
def __init__(
|
226
|
+
self,
|
227
|
+
model_id,
|
228
|
+
can_google_search=False,
|
229
|
+
can_thinking_budget=False,
|
230
|
+
can_schema=False,
|
231
|
+
):
|
210
232
|
self.model_id = model_id
|
211
233
|
self.can_google_search = can_google_search
|
212
234
|
self.supports_schema = can_schema
|
213
235
|
if can_google_search:
|
214
236
|
self.Options = self.OptionsWithGoogleSearch
|
237
|
+
self.can_thinking_budget = can_thinking_budget
|
238
|
+
if can_thinking_budget:
|
239
|
+
self.Options = self.OptionsWithThinkingBudget
|
215
240
|
|
216
241
|
def build_messages(self, prompt, conversation):
|
217
242
|
messages = []
|
@@ -264,10 +289,18 @@ class _SharedGemini:
|
|
264
289
|
if prompt.system:
|
265
290
|
body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
|
266
291
|
|
292
|
+
generation_config = {}
|
293
|
+
|
267
294
|
if prompt.schema:
|
268
|
-
|
269
|
-
|
270
|
-
|
295
|
+
generation_config.update(
|
296
|
+
{
|
297
|
+
"response_mime_type": "application/json",
|
298
|
+
"response_schema": cleanup_schema(copy.deepcopy(prompt.schema)),
|
299
|
+
}
|
300
|
+
)
|
301
|
+
if self.can_thinking_budget and prompt.options.thinking_budget is not None:
|
302
|
+
generation_config["thinking_config"] = {
|
303
|
+
"thinking_budget": prompt.options.thinking_budget
|
271
304
|
}
|
272
305
|
|
273
306
|
config_map = {
|
@@ -277,16 +310,17 @@ class _SharedGemini:
|
|
277
310
|
"top_k": "topK",
|
278
311
|
}
|
279
312
|
if prompt.options and prompt.options.json_object:
|
280
|
-
|
313
|
+
generation_config["response_mime_type"] = "application/json"
|
281
314
|
|
282
315
|
if any(
|
283
316
|
getattr(prompt.options, key, None) is not None for key in config_map.keys()
|
284
317
|
):
|
285
|
-
generation_config = {}
|
286
318
|
for key, other_key in config_map.items():
|
287
319
|
config_value = getattr(prompt.options, key, None)
|
288
320
|
if config_value is not None:
|
289
321
|
generation_config[other_key] = config_value
|
322
|
+
|
323
|
+
if generation_config:
|
290
324
|
body["generationConfig"] = generation_config
|
291
325
|
|
292
326
|
return body
|
llm_gemini-0.16.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
llm_gemini.py,sha256=GqrUhLIM3PxxrC3K6XNHy1cVcOCQfw44oL_36Cv7wug,16438
|
2
|
-
llm_gemini-0.16.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
-
llm_gemini-0.16.dist-info/METADATA,sha256=MVM2mmBLB1QhBVYClxvhnLKP1URrVFmk-9EyzoaEo1c,7725
|
4
|
-
llm_gemini-0.16.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
|
5
|
-
llm_gemini-0.16.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
-
llm_gemini-0.16.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
-
llm_gemini-0.16.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|