llm-gemini 0.24__py3-none-any.whl → 0.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llm-gemini
3
- Version: 0.24
3
+ Version: 0.26
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License-Expression: Apache-2.0
@@ -10,7 +10,7 @@ Project-URL: Issues, https://github.com/simonw/llm-gemini/issues
10
10
  Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
11
  Description-Content-Type: text/markdown
12
12
  License-File: LICENSE
13
- Requires-Dist: llm>=0.26
13
+ Requires-Dist: llm>=0.27
14
14
  Requires-Dist: httpx
15
15
  Requires-Dist: ijson
16
16
  Provides-Extra: test
@@ -75,6 +75,9 @@ result = runner.invoke(cli.cli, ["models", "-q", "gemini/"])
75
75
  lines = reversed(result.output.strip().split("\n"))
76
76
  to_output = []
77
77
  NOTES = {
78
+ "gemini/gemini-flash-latest": "Latest Gemini Flash",
79
+ "gemini/gemini-flash-lite-latest": "Latest Gemini Flash Lite",
80
+ "gemini/gemini-2.5-flash": "Gemini 2.5 Flash",
78
81
  "gemini/gemini-2.5-pro": "Gemini 2.5 Pro",
79
82
  "gemini/gemini-2.5-flash": "Gemini 2.5 Flash",
80
83
  "gemini/gemini-2.5-flash-lite": "Gemini 2.5 Flash Lite",
@@ -93,6 +96,10 @@ for line in lines:
93
96
  )
94
97
  cog.out("\n".join(to_output))
95
98
  ]]] -->
99
+ - `gemini/gemini-2.5-flash-lite-preview-09-2025`
100
+ - `gemini/gemini-2.5-flash-preview-09-2025`
101
+ - `gemini/gemini-flash-lite-latest`: Latest Gemini Flash Lite
102
+ - `gemini/gemini-flash-latest`: Latest Gemini Flash
96
103
  - `gemini/gemini-2.5-flash-lite`: Gemini 2.5 Flash Lite
97
104
  - `gemini/gemini-2.5-pro`: Gemini 2.5 Pro
98
105
  - `gemini/gemini-2.5-flash`: Gemini 2.5 Flash
@@ -197,6 +204,27 @@ llm -m gemini-2.0-flash -o google_search 1 \
197
204
 
198
205
  Use `llm logs -c --json` after running a prompt to see the full JSON response, which includes [additional information](https://github.com/simonw/llm-gemini/pull/29#issuecomment-2606201877) about grounded results.
199
206
 
207
+ ### URL context
208
+
209
+ Gemini models support a [URL context](https://ai.google.dev/gemini-api/docs/url-context) tool which, when enabled, allows the models to fetch additional content from URLs as part of their execution.
210
+
211
+ You can enable that with the `-o url_context 1` option - for example:
212
+
213
+ ```bash
214
+ llm -m gemini-2.5-flash -o url_context 1 'Latest headline on simonwillison.net'
215
+ ```
216
+ Extra tokens introduced by this tool will be charged as input tokens. Use `--usage` to see details of those:
217
+ ```bash
218
+ llm -m gemini-2.5-flash -o url_context 1 --usage \
219
+ 'Latest headline on simonwillison.net'
220
+ ```
221
+ Outputs:
222
+ ```
223
+ The latest headline on simonwillison.net as of August 17, 2025, is "TIL: Running a gpt-oss eval suite against LM Studio on a Mac.".
224
+ Token usage: 9,613 input, 87 output, {"candidatesTokenCount": 57, "promptTokensDetails": [{"modality": "TEXT", "tokenCount": 10}], "toolUsePromptTokenCount": 9603, "toolUsePromptTokensDetails": [{"modality": "TEXT", "tokenCount": 9603}], "thoughtsTokenCount": 30}
225
+ ```
226
+ The `"toolUsePromptTokenCount"` key shows how many tokens were used for that URL context.
227
+
200
228
  ### Chat
201
229
 
202
230
  To chat interactively with the model, run `llm chat`:
@@ -0,0 +1,7 @@
1
+ llm_gemini.py,sha256=KQX_Yb-YG5_-SK1Aq5wdvDHkKg8Mdz42JbtI5RFO9wA,24779
2
+ llm_gemini-0.26.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
3
+ llm_gemini-0.26.dist-info/METADATA,sha256=UcYMTtjFQhHeBhWyRW2RRAJMkcy_H1rz0tkXZqp4sCM,11947
4
+ llm_gemini-0.26.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
5
+ llm_gemini-0.26.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
6
+ llm_gemini-0.26.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
7
+ llm_gemini-0.26.dist-info/RECORD,,
llm_gemini.py CHANGED
@@ -45,6 +45,10 @@ GOOGLE_SEARCH_MODELS = {
45
45
  "gemini-2.5-pro",
46
46
  "gemini-2.5-flash",
47
47
  "gemini-2.5-flash-lite",
48
+ "gemini-flash-latest",
49
+ "gemini-flash-lite-latest",
50
+ "gemini-2.5-flash-preview-09-2025",
51
+ "gemini-2.5-flash-lite-preview-09-2025",
48
52
  }
49
53
 
50
54
  # Older Google models used google_search_retrieval instead of google_search
@@ -70,6 +74,10 @@ THINKING_BUDGET_MODELS = {
70
74
  "gemini-2.5-pro",
71
75
  "gemini-2.5-flash",
72
76
  "gemini-2.5-flash-lite",
77
+ "gemini-flash-latest",
78
+ "gemini-flash-lite-latest",
79
+ "gemini-2.5-flash-preview-09-2025",
80
+ "gemini-2.5-flash-lite-preview-09-2025",
73
81
  }
74
82
 
75
83
  NO_VISION_MODELS = {"gemma-3-1b-it", "gemma-3n-e4b-it"}
@@ -156,6 +164,11 @@ def register_models(register):
156
164
  "gemini-2.5-pro",
157
165
  # 22nd July 2025:
158
166
  "gemini-2.5-flash-lite",
167
+ # 25th Spetember 2025:
168
+ "gemini-flash-latest",
169
+ "gemini-flash-lite-latest",
170
+ "gemini-2.5-flash-preview-09-2025",
171
+ "gemini-2.5-flash-lite-preview-09-2025",
159
172
  ):
160
173
  can_google_search = model_id in GOOGLE_SEARCH_MODELS
161
174
  can_thinking_budget = model_id in THINKING_BUDGET_MODELS
@@ -272,6 +285,13 @@ class _SharedGemini:
272
285
  ),
273
286
  default=None,
274
287
  )
288
+ url_context: Optional[bool] = Field(
289
+ description=(
290
+ "Enable the URL context tool so the model can fetch content "
291
+ "from URLs mentioned in the prompt"
292
+ ),
293
+ default=None,
294
+ )
275
295
 
276
296
  class OptionsWithGoogleSearch(Options):
277
297
  google_search: Optional[bool] = Field(
@@ -404,6 +424,8 @@ class _SharedGemini:
404
424
  else "google_search"
405
425
  )
406
426
  tools.append({tool_name: {}})
427
+ if prompt.options and prompt.options.url_context:
428
+ tools.append({"url_context": {}})
407
429
  if prompt.tools:
408
430
  tools.append(
409
431
  {
@@ -489,6 +511,12 @@ class _SharedGemini:
489
511
  candidates_token_count = usage.get("candidatesTokenCount") or 0
490
512
  thoughts_token_count = usage.get("thoughtsTokenCount") or 0
491
513
  output_tokens = candidates_token_count + thoughts_token_count
514
+ tool_token_count = usage.get("toolUsePromptTokenCount") or 0
515
+ if tool_token_count:
516
+ if input_tokens is None:
517
+ input_tokens = tool_token_count
518
+ else:
519
+ input_tokens += tool_token_count
492
520
  usage.pop("totalTokenCount", None)
493
521
  if input_tokens is not None:
494
522
  response.set_usage(
@@ -528,6 +556,8 @@ class GeminiPro(_SharedGemini, llm.KeyModel):
528
556
  gathered.append(event)
529
557
  events.clear()
530
558
  response.response_json = gathered[-1]
559
+ resolved_model = gathered[-1]["modelVersion"]
560
+ response.set_resolved_model(resolved_model)
531
561
  self.set_usage(response)
532
562
 
533
563
 
@@ -1,7 +0,0 @@
1
- llm_gemini.py,sha256=plGri2fQasL1iWZlEeWsuZZwlGxNyp0WpK_msVqo0AQ,23564
2
- llm_gemini-0.24.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
3
- llm_gemini-0.24.dist-info/METADATA,sha256=ehpIeJRRZAnlVb8T8cHVwfx6mpvy9Sb0e4eot6AcOqg,10468
4
- llm_gemini-0.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
5
- llm_gemini-0.24.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
6
- llm_gemini-0.24.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
7
- llm_gemini-0.24.dist-info/RECORD,,