llm-gemini 0.24__py3-none-any.whl → 0.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.24.dist-info → llm_gemini-0.25.dist-info}/METADATA +22 -1
- llm_gemini-0.25.dist-info/RECORD +7 -0
- llm_gemini.py +15 -0
- llm_gemini-0.24.dist-info/RECORD +0 -7
- {llm_gemini-0.24.dist-info → llm_gemini-0.25.dist-info}/WHEEL +0 -0
- {llm_gemini-0.24.dist-info → llm_gemini-0.25.dist-info}/entry_points.txt +0 -0
- {llm_gemini-0.24.dist-info → llm_gemini-0.25.dist-info}/licenses/LICENSE +0 -0
- {llm_gemini-0.24.dist-info → llm_gemini-0.25.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.25
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License-Expression: Apache-2.0
|
@@ -197,6 +197,27 @@ llm -m gemini-2.0-flash -o google_search 1 \
|
|
197
197
|
|
198
198
|
Use `llm logs -c --json` after running a prompt to see the full JSON response, which includes [additional information](https://github.com/simonw/llm-gemini/pull/29#issuecomment-2606201877) about grounded results.
|
199
199
|
|
200
|
+
### URL context
|
201
|
+
|
202
|
+
Gemini models support a [URL context](https://ai.google.dev/gemini-api/docs/url-context) tool which, when enabled, allows the models to fetch additional content from URLs as part of their execution.
|
203
|
+
|
204
|
+
You can enable that with the `-o url_context 1` option - for example:
|
205
|
+
|
206
|
+
```bash
|
207
|
+
llm -m gemini-2.5-flash -o url_context 1 'Latest headline on simonwillison.net'
|
208
|
+
```
|
209
|
+
Extra tokens introduced by this tool will be charged as input tokens. Use `--usage` to see details of those:
|
210
|
+
```bash
|
211
|
+
llm -m gemini-2.5-flash -o url_context 1 --usage \
|
212
|
+
'Latest headline on simonwillison.net'
|
213
|
+
```
|
214
|
+
Outputs:
|
215
|
+
```
|
216
|
+
The latest headline on simonwillison.net as of August 17, 2025, is "TIL: Running a gpt-oss eval suite against LM Studio on a Mac.".
|
217
|
+
Token usage: 9,613 input, 87 output, {"candidatesTokenCount": 57, "promptTokensDetails": [{"modality": "TEXT", "tokenCount": 10}], "toolUsePromptTokenCount": 9603, "toolUsePromptTokensDetails": [{"modality": "TEXT", "tokenCount": 9603}], "thoughtsTokenCount": 30}
|
218
|
+
```
|
219
|
+
The `"toolUsePromptTokenCount"` key shows how many tokens were used for that URL context.
|
220
|
+
|
200
221
|
### Chat
|
201
222
|
|
202
223
|
To chat interactively with the model, run `llm chat`:
|
@@ -0,0 +1,7 @@
|
|
1
|
+
llm_gemini.py,sha256=uItceU020Z6RMurfvvBCnQ25dfxfr-V_dW1h0YsLBOM,24194
|
2
|
+
llm_gemini-0.25.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
+
llm_gemini-0.25.dist-info/METADATA,sha256=sFpTVF87sHt8ldPlmTteoZ-Yg1KDIDmVgztjAshWKGA,11565
|
4
|
+
llm_gemini-0.25.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
5
|
+
llm_gemini-0.25.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
+
llm_gemini-0.25.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
+
llm_gemini-0.25.dist-info/RECORD,,
|
llm_gemini.py
CHANGED
@@ -272,6 +272,13 @@ class _SharedGemini:
|
|
272
272
|
),
|
273
273
|
default=None,
|
274
274
|
)
|
275
|
+
url_context: Optional[bool] = Field(
|
276
|
+
description=(
|
277
|
+
"Enable the URL context tool so the model can fetch content "
|
278
|
+
"from URLs mentioned in the prompt"
|
279
|
+
),
|
280
|
+
default=None,
|
281
|
+
)
|
275
282
|
|
276
283
|
class OptionsWithGoogleSearch(Options):
|
277
284
|
google_search: Optional[bool] = Field(
|
@@ -404,6 +411,8 @@ class _SharedGemini:
|
|
404
411
|
else "google_search"
|
405
412
|
)
|
406
413
|
tools.append({tool_name: {}})
|
414
|
+
if prompt.options and prompt.options.url_context:
|
415
|
+
tools.append({"url_context": {}})
|
407
416
|
if prompt.tools:
|
408
417
|
tools.append(
|
409
418
|
{
|
@@ -489,6 +498,12 @@ class _SharedGemini:
|
|
489
498
|
candidates_token_count = usage.get("candidatesTokenCount") or 0
|
490
499
|
thoughts_token_count = usage.get("thoughtsTokenCount") or 0
|
491
500
|
output_tokens = candidates_token_count + thoughts_token_count
|
501
|
+
tool_token_count = usage.get("toolUsePromptTokenCount") or 0
|
502
|
+
if tool_token_count:
|
503
|
+
if input_tokens is None:
|
504
|
+
input_tokens = tool_token_count
|
505
|
+
else:
|
506
|
+
input_tokens += tool_token_count
|
492
507
|
usage.pop("totalTokenCount", None)
|
493
508
|
if input_tokens is not None:
|
494
509
|
response.set_usage(
|
llm_gemini-0.24.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
llm_gemini.py,sha256=plGri2fQasL1iWZlEeWsuZZwlGxNyp0WpK_msVqo0AQ,23564
|
2
|
-
llm_gemini-0.24.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
-
llm_gemini-0.24.dist-info/METADATA,sha256=ehpIeJRRZAnlVb8T8cHVwfx6mpvy9Sb0e4eot6AcOqg,10468
|
4
|
-
llm_gemini-0.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
5
|
-
llm_gemini-0.24.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
-
llm_gemini-0.24.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
-
llm_gemini-0.24.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|