llm-gemini 0.19__tar.gz → 0.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llm-gemini
3
- Version: 0.19
3
+ Version: 0.20
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -67,7 +67,8 @@ llm "A joke about a pelican and a walrus"
67
67
  Other models are:
68
68
 
69
69
  - `gemini-2.5-pro-preview-05-06` - latest paid Gemini 2.5 Pro preview
70
- - `gemini-2.5-flash-preview-04-17` - Gemini 2.5 Flash preview
70
+ - `gemini-2.5-flash-preview-05-20` - Gemini 2.5 Flash preview
71
+ - `gemini-2.5-flash-preview-04-17` - Earlier Gemini 2.5 Flash preview
71
72
  - `gemini-2.5-pro-exp-03-25` - free experimental release of Gemini 2.5 Pro
72
73
  - `gemini-2.5-pro-preview-03-25` - paid preview of Gemini 2.5 Pro
73
74
  - `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
@@ -44,7 +44,8 @@ llm "A joke about a pelican and a walrus"
44
44
  Other models are:
45
45
 
46
46
  - `gemini-2.5-pro-preview-05-06` - latest paid Gemini 2.5 Pro preview
47
- - `gemini-2.5-flash-preview-04-17` - Gemini 2.5 Flash preview
47
+ - `gemini-2.5-flash-preview-05-20` - Gemini 2.5 Flash preview
48
+ - `gemini-2.5-flash-preview-04-17` - Earlier Gemini 2.5 Flash preview
48
49
  - `gemini-2.5-pro-exp-03-25` - free experimental release of Gemini 2.5 Pro
49
50
  - `gemini-2.5-pro-preview-03-25` - paid preview of Gemini 2.5 Pro
50
51
  - `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llm-gemini
3
- Version: 0.19
3
+ Version: 0.20
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -67,7 +67,8 @@ llm "A joke about a pelican and a walrus"
67
67
  Other models are:
68
68
 
69
69
  - `gemini-2.5-pro-preview-05-06` - latest paid Gemini 2.5 Pro preview
70
- - `gemini-2.5-flash-preview-04-17` - Gemini 2.5 Flash preview
70
+ - `gemini-2.5-flash-preview-05-20` - Gemini 2.5 Flash preview
71
+ - `gemini-2.5-flash-preview-04-17` - Earlier Gemini 2.5 Flash preview
71
72
  - `gemini-2.5-pro-exp-03-25` - free experimental release of Gemini 2.5 Pro
72
73
  - `gemini-2.5-pro-preview-03-25` - paid preview of Gemini 2.5 Pro
73
74
  - `gemma-3-27b-it` - [Gemma 3](https://blog.google/technology/developers/gemma-3/) 27B
@@ -40,6 +40,7 @@ GOOGLE_SEARCH_MODELS = {
40
40
  "gemini-2.5-pro-exp-03-25",
41
41
  "gemini-2.5-flash-preview-04-17",
42
42
  "gemini-2.5-pro-preview-05-06",
43
+ "gemini-2.5-flash-preview-05-20",
43
44
  }
44
45
 
45
46
  # Older Google models used google_search_retrieval instead of google_search
@@ -55,6 +56,7 @@ GOOGLE_SEARCH_MODELS_USING_SEARCH_RETRIEVAL = {
55
56
 
56
57
  THINKING_BUDGET_MODELS = {
57
58
  "gemini-2.5-flash-preview-04-17",
59
+ "gemini-2.5-flash-preview-05-20",
58
60
  }
59
61
 
60
62
 
@@ -93,6 +95,8 @@ def register_models(register):
93
95
  "gemini-2.5-flash-preview-04-17",
94
96
  # 6th May 2025:
95
97
  "gemini-2.5-pro-preview-05-06",
98
+ # 20th May 2025:
99
+ "gemini-2.5-flash-preview-05-20",
96
100
  ]:
97
101
  can_google_search = model_id in GOOGLE_SEARCH_MODELS
98
102
  can_thinking_budget = model_id in THINKING_BUDGET_MODELS
@@ -369,7 +373,10 @@ class _SharedGemini:
369
373
  candidate.pop("content", None)
370
374
  usage = response.response_json.pop("usageMetadata")
371
375
  input_tokens = usage.pop("promptTokenCount", None)
372
- output_tokens = usage.pop("candidatesTokenCount", None)
376
+ # See https://github.com/simonw/llm-gemini/issues/75#issuecomment-2861827509
377
+ candidates_token_count = usage.get("candidatesTokenCount") or 0
378
+ thoughts_token_count = usage.get("thoughtsTokenCount") or 0
379
+ output_tokens = candidates_token_count + thoughts_token_count
373
380
  usage.pop("totalTokenCount", None)
374
381
  if input_tokens is not None:
375
382
  response.set_usage(
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llm-gemini"
3
- version = "0.19"
3
+ version = "0.20"
4
4
  description = "LLM plugin to access Google's Gemini family of models"
5
5
  readme = "README.md"
6
6
  authors = [{name = "Simon Willison"}]
@@ -46,6 +46,7 @@ async def test_prompt():
46
46
  "modelVersion": "gemini-1.5-flash-latest",
47
47
  }
48
48
  assert response.token_details == {
49
+ "candidatesTokenCount": 2,
49
50
  "promptTokensDetails": [{"modality": "TEXT", "tokenCount": 9}],
50
51
  "candidatesTokensDetails": [{"modality": "TEXT", "tokenCount": 2}],
51
52
  }
File without changes
File without changes