llm-gemini 0.9__py3-none-any.whl → 0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: llm-gemini
3
- Version: 0.9
3
+ Version: 0.11
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -11,12 +11,13 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: llm>=0.19
14
+ Requires-Dist: llm>=0.22
15
15
  Requires-Dist: httpx
16
16
  Requires-Dist: ijson
17
17
  Provides-Extra: test
18
18
  Requires-Dist: pytest; extra == "test"
19
19
  Requires-Dist: pytest-recording; extra == "test"
20
+ Requires-Dist: nest-asyncio; extra == "test"
20
21
 
21
22
  # llm-gemini
22
23
 
@@ -67,6 +68,9 @@ Other models are:
67
68
  - `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
68
69
  - `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
69
70
  - `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
71
+ - `gemini-2.0-flash` - Gemini 2.0 Flash
72
+ - `gemini-2.0-flash-lite-preview-02-05` - Gemini 2.0 Flash-Lite
73
+ - `gemini-2.0-pro-exp-02-05` - experimental release of Gemini 2.0 Pro
70
74
 
71
75
  ### Images, audio and video
72
76
 
@@ -0,0 +1,7 @@
1
+ llm_gemini.py,sha256=tILVMtCj52Tt8VHK1YsTD0n55CpzzW3nMceavTdC9P8,12896
2
+ llm_gemini-0.11.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
3
+ llm_gemini-0.11.dist-info/METADATA,sha256=L_7OzL0ToVpD-pCTCFIOwZuCNU60CdWbx_9psWgTdlM,7028
4
+ llm_gemini-0.11.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
5
+ llm_gemini-0.11.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
6
+ llm_gemini-0.11.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
7
+ llm_gemini-0.11.dist-info/RECORD,,
llm_gemini.py CHANGED
@@ -32,6 +32,7 @@ GOOGLE_SEARCH_MODELS = {
32
32
  "gemini-1.5-pro-002",
33
33
  "gemini-1.5-flash-002",
34
34
  "gemini-2.0-flash-exp",
35
+ "gemini-2.0-flash",
35
36
  }
36
37
 
37
38
 
@@ -55,6 +56,10 @@ def register_models(register):
55
56
  "learnlm-1.5-pro-experimental",
56
57
  "gemini-2.0-flash-thinking-exp-1219",
57
58
  "gemini-2.0-flash-thinking-exp-01-21",
59
+ # Released 5th Feb 2025:
60
+ "gemini-2.0-flash",
61
+ "gemini-2.0-flash-lite-preview-02-05",
62
+ "gemini-2.0-pro-exp-02-05",
58
63
  ]:
59
64
  can_google_search = model_id in GOOGLE_SEARCH_MODELS
60
65
  register(
@@ -216,7 +221,7 @@ class _SharedGemini:
216
221
  if prompt.options and prompt.options.code_execution:
217
222
  body["tools"] = [{"codeExecution": {}}]
218
223
  if prompt.options and self.can_google_search and prompt.options.google_search:
219
- body["tools"] = [{"google_search_retrieval": {}}]
224
+ body["tools"] = [{"google_search": {}}]
220
225
  if prompt.system:
221
226
  body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
222
227
 
@@ -264,9 +269,8 @@ class _SharedGemini:
264
269
  pass
265
270
 
266
271
 
267
- class GeminiPro(_SharedGemini, llm.Model):
268
- def execute(self, prompt, stream, response, conversation):
269
- key = self.get_key()
272
+ class GeminiPro(_SharedGemini, llm.KeyModel):
273
+ def execute(self, prompt, stream, response, conversation, key):
270
274
  url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model_id}:streamGenerateContent"
271
275
  gathered = []
272
276
  body = self.build_request_body(prompt, conversation)
@@ -275,7 +279,7 @@ class GeminiPro(_SharedGemini, llm.Model):
275
279
  "POST",
276
280
  url,
277
281
  timeout=None,
278
- headers={"x-goog-api-key": key},
282
+ headers={"x-goog-api-key": self.get_key(key)},
279
283
  json=body,
280
284
  ) as http_response:
281
285
  events = ijson.sendable_list()
@@ -297,9 +301,8 @@ class GeminiPro(_SharedGemini, llm.Model):
297
301
  self.set_usage(response)
298
302
 
299
303
 
300
- class AsyncGeminiPro(_SharedGemini, llm.AsyncModel):
301
- async def execute(self, prompt, stream, response, conversation):
302
- key = self.get_key()
304
+ class AsyncGeminiPro(_SharedGemini, llm.AsyncKeyModel):
305
+ async def execute(self, prompt, stream, response, conversation, key):
303
306
  url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model_id}:streamGenerateContent"
304
307
  gathered = []
305
308
  body = self.build_request_body(prompt, conversation)
@@ -309,7 +312,7 @@ class AsyncGeminiPro(_SharedGemini, llm.AsyncModel):
309
312
  "POST",
310
313
  url,
311
314
  timeout=None,
312
- headers={"x-goog-api-key": key},
315
+ headers={"x-goog-api-key": self.get_key(key)},
313
316
  json=body,
314
317
  ) as http_response:
315
318
  events = ijson.sendable_list()
@@ -1,7 +0,0 @@
1
- llm_gemini.py,sha256=sCouoSbzOe4GoTsskAKJZjhDTxRYqSxgNuODwi2O1z0,12752
2
- llm_gemini-0.9.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
3
- llm_gemini-0.9.dist-info/METADATA,sha256=UEr_dRMMSev9YY9U34QMQHnhuyRPM7E7sHT1o8uA0qg,6808
4
- llm_gemini-0.9.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
5
- llm_gemini-0.9.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
6
- llm_gemini-0.9.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
7
- llm_gemini-0.9.dist-info/RECORD,,