llm-gemini 0.8__py3-none-any.whl → 0.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.8.dist-info → llm_gemini-0.9.dist-info}/METADATA +19 -2
- llm_gemini-0.9.dist-info/RECORD +7 -0
- {llm_gemini-0.8.dist-info → llm_gemini-0.9.dist-info}/WHEEL +1 -1
- llm_gemini.py +40 -4
- llm_gemini-0.8.dist-info/RECORD +0 -7
- {llm_gemini-0.8.dist-info → llm_gemini-0.9.dist-info}/LICENSE +0 -0
- {llm_gemini-0.8.dist-info → llm_gemini-0.9.dist-info}/entry_points.txt +0 -0
- {llm_gemini-0.8.dist-info → llm_gemini-0.9.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.2
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.9
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -64,6 +64,9 @@ Other models are:
|
|
64
64
|
- `gemini-exp-1121` - recent experimental #2
|
65
65
|
- `gemini-exp-1206` - recent experimental #3
|
66
66
|
- `gemini-2.0-flash-exp` - [Gemini 2.0 Flash](https://blog.google/technology/google-deepmind/google-gemini-ai-update-december-2024/#gemini-2-0-flash)
|
67
|
+
- `learnlm-1.5-pro-experimental` - "an experimental task-specific model that has been trained to align with learning science principles" - [more details here](https://ai.google.dev/gemini-api/docs/learnlm).
|
68
|
+
- `gemini-2.0-flash-thinking-exp-1219` - experimental "thinking" model from December 2024
|
69
|
+
- `gemini-2.0-flash-thinking-exp-01-21` - experimental "thinking" model from January 2025
|
67
70
|
|
68
71
|
### Images, audio and video
|
69
72
|
|
@@ -113,6 +116,20 @@ To enable this feature, use `-o code_execution 1`:
|
|
113
116
|
llm -m gemini-1.5-pro-latest -o code_execution 1 \
|
114
117
|
'use python to calculate (factorial of 13) * 3'
|
115
118
|
```
|
119
|
+
### Google search
|
120
|
+
|
121
|
+
Some Gemini models support [Grounding with Google Search](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#web-ground-gemini), where the model can run a Google search and use the results as part of answering a prompt.
|
122
|
+
|
123
|
+
Using this feature may incur additional requirements in terms of how you use the results. Consult [Google's documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#web-ground-gemini) for more details.
|
124
|
+
|
125
|
+
To run a prompt with Google search enabled, use `-o google_search 1`:
|
126
|
+
|
127
|
+
```bash
|
128
|
+
llm -m gemini-1.5-pro-latest -o google_search 1 \
|
129
|
+
'What happened in Ireland today?'
|
130
|
+
```
|
131
|
+
|
132
|
+
Use `llm logs -c --json` after running a prompt to see the full JSON response, which includes [additional information](https://github.com/simonw/llm-gemini/pull/29#issuecomment-2606201877) about grounded results.
|
116
133
|
|
117
134
|
### Chat
|
118
135
|
|
@@ -0,0 +1,7 @@
|
|
1
|
+
llm_gemini.py,sha256=sCouoSbzOe4GoTsskAKJZjhDTxRYqSxgNuODwi2O1z0,12752
|
2
|
+
llm_gemini-0.9.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
+
llm_gemini-0.9.dist-info/METADATA,sha256=UEr_dRMMSev9YY9U34QMQHnhuyRPM7E7sHT1o8uA0qg,6808
|
4
|
+
llm_gemini-0.9.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
5
|
+
llm_gemini-0.9.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
+
llm_gemini-0.9.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
+
llm_gemini-0.9.dist-info/RECORD,,
|
llm_gemini.py
CHANGED
@@ -23,6 +23,17 @@ SAFETY_SETTINGS = [
|
|
23
23
|
},
|
24
24
|
]
|
25
25
|
|
26
|
+
# https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#supported_models_2
|
27
|
+
GOOGLE_SEARCH_MODELS = {
|
28
|
+
"gemini-1.5-pro-latest",
|
29
|
+
"gemini-1.5-flash-latest",
|
30
|
+
"gemini-1.5-pro-001",
|
31
|
+
"gemini-1.5-flash-001",
|
32
|
+
"gemini-1.5-pro-002",
|
33
|
+
"gemini-1.5-flash-002",
|
34
|
+
"gemini-2.0-flash-exp",
|
35
|
+
}
|
36
|
+
|
26
37
|
|
27
38
|
@llm.hookimpl
|
28
39
|
def register_models(register):
|
@@ -41,9 +52,15 @@ def register_models(register):
|
|
41
52
|
"gemini-exp-1121",
|
42
53
|
"gemini-exp-1206",
|
43
54
|
"gemini-2.0-flash-exp",
|
55
|
+
"learnlm-1.5-pro-experimental",
|
44
56
|
"gemini-2.0-flash-thinking-exp-1219",
|
57
|
+
"gemini-2.0-flash-thinking-exp-01-21",
|
45
58
|
]:
|
46
|
-
|
59
|
+
can_google_search = model_id in GOOGLE_SEARCH_MODELS
|
60
|
+
register(
|
61
|
+
GeminiPro(model_id, can_google_search=can_google_search),
|
62
|
+
AsyncGeminiPro(model_id, can_google_search=can_google_search),
|
63
|
+
)
|
47
64
|
|
48
65
|
|
49
66
|
def resolve_type(attachment):
|
@@ -51,6 +68,8 @@ def resolve_type(attachment):
|
|
51
68
|
# https://github.com/simonw/llm/issues/587#issuecomment-2439785140
|
52
69
|
if mime_type == "audio/mpeg":
|
53
70
|
mime_type = "audio/mp3"
|
71
|
+
if mime_type == "application/ogg":
|
72
|
+
mime_type = "audio/ogg"
|
54
73
|
return mime_type
|
55
74
|
|
56
75
|
|
@@ -60,6 +79,9 @@ class _SharedGemini:
|
|
60
79
|
can_stream = True
|
61
80
|
|
62
81
|
attachment_types = (
|
82
|
+
# Text
|
83
|
+
"text/plain",
|
84
|
+
"text/csv",
|
63
85
|
# PDF
|
64
86
|
"application/pdf",
|
65
87
|
# Images
|
@@ -74,6 +96,7 @@ class _SharedGemini:
|
|
74
96
|
"audio/aiff",
|
75
97
|
"audio/aac",
|
76
98
|
"audio/ogg",
|
99
|
+
"application/ogg",
|
77
100
|
"audio/flac",
|
78
101
|
"audio/mpeg", # Treated as audio/mp3
|
79
102
|
# Video
|
@@ -134,8 +157,17 @@ class _SharedGemini:
|
|
134
157
|
default=None,
|
135
158
|
)
|
136
159
|
|
137
|
-
|
160
|
+
class OptionsWithGoogleSearch(Options):
|
161
|
+
google_search: Optional[bool] = Field(
|
162
|
+
description="Enables the model to use Google Search to improve the accuracy and recency of responses from the model",
|
163
|
+
default=None,
|
164
|
+
)
|
165
|
+
|
166
|
+
def __init__(self, model_id, can_google_search=False):
|
138
167
|
self.model_id = model_id
|
168
|
+
self.can_google_search = can_google_search
|
169
|
+
if can_google_search:
|
170
|
+
self.Options = self.OptionsWithGoogleSearch
|
139
171
|
|
140
172
|
def build_messages(self, prompt, conversation):
|
141
173
|
messages = []
|
@@ -155,7 +187,9 @@ class _SharedGemini:
|
|
155
187
|
if response.prompt.prompt:
|
156
188
|
parts.append({"text": response.prompt.prompt})
|
157
189
|
messages.append({"role": "user", "parts": parts})
|
158
|
-
messages.append(
|
190
|
+
messages.append(
|
191
|
+
{"role": "model", "parts": [{"text": response.text_or_raise()}]}
|
192
|
+
)
|
159
193
|
|
160
194
|
parts = []
|
161
195
|
if prompt.prompt:
|
@@ -181,6 +215,8 @@ class _SharedGemini:
|
|
181
215
|
}
|
182
216
|
if prompt.options and prompt.options.code_execution:
|
183
217
|
body["tools"] = [{"codeExecution": {}}]
|
218
|
+
if prompt.options and self.can_google_search and prompt.options.google_search:
|
219
|
+
body["tools"] = [{"google_search_retrieval": {}}]
|
184
220
|
if prompt.system:
|
185
221
|
body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
|
186
222
|
|
@@ -335,4 +371,4 @@ class GeminiEmbeddingModel(llm.EmbeddingModel):
|
|
335
371
|
)
|
336
372
|
|
337
373
|
response.raise_for_status()
|
338
|
-
return [item["values"] for item in response.json()["embeddings"]]
|
374
|
+
return [item["values"] for item in response.json()["embeddings"]]
|
llm_gemini-0.8.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
llm_gemini.py,sha256=6xRF1uP64O-nYAGgKytFh1Wj0N-cRny4bs69GmTwJLU,11408
|
2
|
-
llm_gemini-0.8.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
-
llm_gemini-0.8.dist-info/METADATA,sha256=cCkBf00ebzzwl2wyUxOTuwTWfG_peiB7mtD-8bzHBkc,5530
|
4
|
-
llm_gemini-0.8.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
5
|
-
llm_gemini-0.8.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
-
llm_gemini-0.8.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
-
llm_gemini-0.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|