llm-gemini 0.6__py3-none-any.whl → 0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.6.dist-info → llm_gemini-0.8.dist-info}/METADATA +2 -1
- llm_gemini-0.8.dist-info/RECORD +7 -0
- llm_gemini.py +4 -2
- llm_gemini-0.6.dist-info/RECORD +0 -7
- {llm_gemini-0.6.dist-info → llm_gemini-0.8.dist-info}/LICENSE +0 -0
- {llm_gemini-0.6.dist-info → llm_gemini-0.8.dist-info}/WHEEL +0 -0
- {llm_gemini-0.6.dist-info → llm_gemini-0.8.dist-info}/entry_points.txt +0 -0
- {llm_gemini-0.6.dist-info → llm_gemini-0.8.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.8
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -63,6 +63,7 @@ Other models are:
|
|
63
63
|
- `gemini-exp-1114` - recent experimental #1
|
64
64
|
- `gemini-exp-1121` - recent experimental #2
|
65
65
|
- `gemini-exp-1206` - recent experimental #3
|
66
|
+
- `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
|
66
67
|
|
67
68
|
### Images, audio and video
|
68
69
|
|
@@ -0,0 +1,7 @@
|
|
1
|
+
llm_gemini.py,sha256=6xRF1uP64O-nYAGgKytFh1Wj0N-cRny4bs69GmTwJLU,11408
|
2
|
+
llm_gemini-0.8.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
+
llm_gemini-0.8.dist-info/METADATA,sha256=cCkBf00ebzzwl2wyUxOTuwTWfG_peiB7mtD-8bzHBkc,5530
|
4
|
+
llm_gemini-0.8.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
5
|
+
llm_gemini-0.8.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
+
llm_gemini-0.8.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
+
llm_gemini-0.8.dist-info/RECORD,,
|
llm_gemini.py
CHANGED
@@ -40,6 +40,8 @@ def register_models(register):
|
|
40
40
|
"gemini-exp-1114",
|
41
41
|
"gemini-exp-1121",
|
42
42
|
"gemini-exp-1206",
|
43
|
+
"gemini-2.0-flash-exp",
|
44
|
+
"gemini-2.0-flash-thinking-exp-1219",
|
43
45
|
]:
|
44
46
|
register(GeminiPro(model_id), AsyncGeminiPro(model_id))
|
45
47
|
|
@@ -153,7 +155,7 @@ class _SharedGemini:
|
|
153
155
|
if response.prompt.prompt:
|
154
156
|
parts.append({"text": response.prompt.prompt})
|
155
157
|
messages.append({"role": "user", "parts": parts})
|
156
|
-
messages.append({"role": "model", "parts": [{"text": response.
|
158
|
+
messages.append({"role": "model", "parts": [{"text": response.text_or_raise()}]})
|
157
159
|
|
158
160
|
parts = []
|
159
161
|
if prompt.prompt:
|
@@ -333,4 +335,4 @@ class GeminiEmbeddingModel(llm.EmbeddingModel):
|
|
333
335
|
)
|
334
336
|
|
335
337
|
response.raise_for_status()
|
336
|
-
return [item["values"] for item in response.json()["embeddings"]]
|
338
|
+
return [item["values"] for item in response.json()["embeddings"]]
|
llm_gemini-0.6.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
llm_gemini.py,sha256=Ft2-5cyIF7Vy-f0EVilG_H3W06B9oxYUn-QdDpRkxXg,11322
|
2
|
-
llm_gemini-0.6.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
-
llm_gemini-0.6.dist-info/METADATA,sha256=d8dW-QN_PvRxbjCDABrnEfSzTeLksI7WWB36kl2FH68,5380
|
4
|
-
llm_gemini-0.6.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
5
|
-
llm_gemini-0.6.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
-
llm_gemini-0.6.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
-
llm_gemini-0.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|