llm-gemini 0.11__tar.gz → 0.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm-gemini
3
- Version: 0.11
3
+ Version: 0.13
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: llm>=0.22
14
+ Requires-Dist: llm>=0.23
15
15
  Requires-Dist: httpx
16
16
  Requires-Dist: ijson
17
17
  Provides-Extra: test
@@ -69,7 +69,7 @@ Other models are:
69
69
  - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
70
70
  - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
71
71
  - `gemini-2.0-flash` - Gemini 2.0 Flash
72
- - `gemini-2.0-flash-lite-preview-02-05` - Gemini 2.0 Flash-Lite
72
+ - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
73
73
  - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
74
74
 
75
75
  ### Images, audio and video
@@ -48,7 +48,7 @@ Other models are:
48
48
  - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
49
49
  - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
50
50
  - `gemini-2.0-flash` - Gemini 2.0 Flash
51
- - `gemini-2.0-flash-lite-preview-02-05` - Gemini 2.0 Flash-Lite
51
+ - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
52
52
  - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
53
53
 
54
54
  ### Images, audio and video
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm-gemini
3
- Version: 0.11
3
+ Version: 0.13
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: llm>=0.22
14
+ Requires-Dist: llm>=0.23
15
15
  Requires-Dist: httpx
16
16
  Requires-Dist: ijson
17
17
  Provides-Extra: test
@@ -69,7 +69,7 @@ Other models are:
69
69
  - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
70
70
  - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
71
71
  - `gemini-2.0-flash` - Gemini 2.0 Flash
72
- - `gemini-2.0-flash-lite-preview-02-05` - Gemini 2.0 Flash-Lite
72
+ - `gemini-2.0-flash-lite` - Gemini 2.0 Flash-Lite
73
73
  - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
74
74
 
75
75
  ### Images, audio and video
@@ -1,4 +1,4 @@
1
- llm>=0.22
1
+ llm>=0.23
2
2
  httpx
3
3
  ijson
4
4
 
@@ -1,3 +1,4 @@
1
+ import copy
1
2
  import httpx
2
3
  import ijson
3
4
  import llm
@@ -58,13 +59,22 @@ def register_models(register):
58
59
  "gemini-2.0-flash-thinking-exp-01-21",
59
60
  # Released 5th Feb 2025:
60
61
  "gemini-2.0-flash",
61
- "gemini-2.0-flash-lite-preview-02-05",
62
62
  "gemini-2.0-pro-exp-02-05",
63
+ # Released 25th Feb 2025:
64
+ "gemini-2.0-flash-lite",
63
65
  ]:
64
66
  can_google_search = model_id in GOOGLE_SEARCH_MODELS
65
67
  register(
66
- GeminiPro(model_id, can_google_search=can_google_search),
67
- AsyncGeminiPro(model_id, can_google_search=can_google_search),
68
+ GeminiPro(
69
+ model_id,
70
+ can_google_search=can_google_search,
71
+ can_schema="flash-thinking" not in model_id,
72
+ ),
73
+ AsyncGeminiPro(
74
+ model_id,
75
+ can_google_search=can_google_search,
76
+ can_schema="flash-thinking" not in model_id,
77
+ ),
68
78
  )
69
79
 
70
80
 
@@ -78,10 +88,26 @@ def resolve_type(attachment):
78
88
  return mime_type
79
89
 
80
90
 
91
+ def cleanup_schema(schema):
92
+ "Gemini supports only a subset of JSON schema"
93
+ keys_to_remove = ("$schema", "additionalProperties")
94
+ # Recursively remove them
95
+ if isinstance(schema, dict):
96
+ for key in keys_to_remove:
97
+ schema.pop(key, None)
98
+ for value in schema.values():
99
+ cleanup_schema(value)
100
+ elif isinstance(schema, list):
101
+ for value in schema:
102
+ cleanup_schema(value)
103
+ return schema
104
+
105
+
81
106
  class _SharedGemini:
82
107
  needs_key = "gemini"
83
108
  key_env_var = "LLM_GEMINI_KEY"
84
109
  can_stream = True
110
+ supports_schema = True
85
111
 
86
112
  attachment_types = (
87
113
  # Text
@@ -168,9 +194,10 @@ class _SharedGemini:
168
194
  default=None,
169
195
  )
170
196
 
171
- def __init__(self, model_id, can_google_search=False):
197
+ def __init__(self, model_id, can_google_search=False, can_schema=False):
172
198
  self.model_id = model_id
173
199
  self.can_google_search = can_google_search
200
+ self.supports_schema = can_schema
174
201
  if can_google_search:
175
202
  self.Options = self.OptionsWithGoogleSearch
176
203
 
@@ -225,6 +252,12 @@ class _SharedGemini:
225
252
  if prompt.system:
226
253
  body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
227
254
 
255
+ if prompt.schema:
256
+ body["generationConfig"] = {
257
+ "response_mime_type": "application/json",
258
+ "response_schema": cleanup_schema(copy.deepcopy(prompt.schema)),
259
+ }
260
+
228
261
  config_map = {
229
262
  "temperature": "temperature",
230
263
  "max_output_tokens": "maxOutputTokens",
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llm-gemini"
3
- version = "0.11"
3
+ version = "0.13"
4
4
  description = "LLM plugin to access Google's Gemini family of models"
5
5
  readme = "README.md"
6
6
  authors = [{name = "Simon Willison"}]
@@ -9,7 +9,7 @@ classifiers = [
9
9
  "License :: OSI Approved :: Apache Software License"
10
10
  ]
11
11
  dependencies = [
12
- "llm>=0.22",
12
+ "llm>=0.23",
13
13
  "httpx",
14
14
  "ijson"
15
15
  ]
File without changes
File without changes