llm-gemini 0.2__py3-none-any.whl → 0.3a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.2.dist-info → llm_gemini-0.3a0.dist-info}/METADATA +24 -8
- llm_gemini-0.3a0.dist-info/RECORD +7 -0
- {llm_gemini-0.2.dist-info → llm_gemini-0.3a0.dist-info}/WHEEL +1 -1
- llm_gemini.py +66 -7
- llm_gemini-0.2.dist-info/RECORD +0 -7
- {llm_gemini-0.2.dist-info → llm_gemini-0.3a0.dist-info}/LICENSE +0 -0
- {llm_gemini-0.2.dist-info → llm_gemini-0.3a0.dist-info}/entry_points.txt +0 -0
- {llm_gemini-0.2.dist-info → llm_gemini-0.3a0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3a0
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
|
|
11
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
License-File: LICENSE
|
14
|
-
Requires-Dist: llm
|
14
|
+
Requires-Dist: llm >=0.17a0
|
15
15
|
Requires-Dist: httpx
|
16
16
|
Requires-Dist: ijson
|
17
17
|
Provides-Extra: test
|
@@ -43,23 +43,39 @@ llm keys set gemini
|
|
43
43
|
<paste key here>
|
44
44
|
```
|
45
45
|
|
46
|
-
Now run the model using `-m gemini-pro`, for example:
|
46
|
+
Now run the model using `-m gemini-1.5-pro-latest`, for example:
|
47
47
|
|
48
48
|
```bash
|
49
|
-
llm -m gemini-pro "A joke about a pelican and a walrus"
|
49
|
+
llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
|
50
50
|
```
|
51
51
|
|
52
|
-
>
|
52
|
+
> A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
|
53
53
|
>
|
54
|
-
>
|
54
|
+
> "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
|
55
|
+
>
|
56
|
+
> The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
|
55
57
|
|
56
58
|
To chat interactively with the model, run `llm chat`:
|
57
59
|
|
58
60
|
```bash
|
59
|
-
llm chat -m gemini-pro
|
61
|
+
llm chat -m gemini-1.5-pro-latest
|
60
62
|
```
|
61
63
|
|
62
|
-
|
64
|
+
Other models are:
|
65
|
+
|
66
|
+
- `gemini-1.5-flash-latest`
|
67
|
+
- gemini-1.5-flash-8b-latest` - the least expensive
|
68
|
+
|
69
|
+
Gemini models are multi-modal. You can provide images, audio or video files as input like this:
|
70
|
+
|
71
|
+
```bash
|
72
|
+
llm -m gemini-1.5-flash-latest 'extract text' -a image.jpg
|
73
|
+
```
|
74
|
+
Or with a URL:
|
75
|
+
```bash
|
76
|
+
llm -m gemini-1.5-flash-8b-latest 'describe image' \
|
77
|
+
-a https://static.simonwillison.net/static/2024/pelicans.jpg
|
78
|
+
```
|
63
79
|
|
64
80
|
### Embeddings
|
65
81
|
|
@@ -0,0 +1,7 @@
|
|
1
|
+
llm_gemini.py,sha256=nmatZLQyVUUwoaiUloPycKKDbLMzGo4mcXeJwjzAENA,5881
|
2
|
+
llm_gemini-0.3a0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
+
llm_gemini-0.3a0.dist-info/METADATA,sha256=pIxt4pF2XgDJSVi2RNuaOc4fgU2pRYWnasijmry8d_E,3618
|
4
|
+
llm_gemini-0.3a0.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
|
5
|
+
llm_gemini-0.3a0.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
+
llm_gemini-0.3a0.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
+
llm_gemini-0.3a0.dist-info/RECORD,,
|
llm_gemini.py
CHANGED
@@ -37,22 +37,81 @@ def register_models(register):
|
|
37
37
|
register(GeminiPro("gemini-1.5-flash-8b-001"))
|
38
38
|
|
39
39
|
|
40
|
+
def resolve_type(attachment):
|
41
|
+
mime_type = attachment.resolve_type()
|
42
|
+
# https://github.com/simonw/llm/issues/587#issuecomment-2439785140
|
43
|
+
if mime_type == "audio/mpeg":
|
44
|
+
mime_type = "audio/mp3"
|
45
|
+
return mime_type
|
46
|
+
|
47
|
+
|
40
48
|
class GeminiPro(llm.Model):
|
41
49
|
can_stream = True
|
42
50
|
|
51
|
+
attachment_types = (
|
52
|
+
# PDF
|
53
|
+
"application/pdf",
|
54
|
+
# Images
|
55
|
+
"image/png",
|
56
|
+
"image/jpeg",
|
57
|
+
"image/webp",
|
58
|
+
"image/heic",
|
59
|
+
"image/heif",
|
60
|
+
# Audio
|
61
|
+
"audio/wav",
|
62
|
+
"audio/mp3",
|
63
|
+
"audio/aiff",
|
64
|
+
"audio/aac",
|
65
|
+
"audio/ogg",
|
66
|
+
"audio/flac",
|
67
|
+
"audio/mpeg", # Treated as audio/mp3
|
68
|
+
# Video
|
69
|
+
"video/mp4",
|
70
|
+
"video/mpeg",
|
71
|
+
"video/mov",
|
72
|
+
"video/avi",
|
73
|
+
"video/x-flv",
|
74
|
+
"video/mpg",
|
75
|
+
"video/webm",
|
76
|
+
"video/wmv",
|
77
|
+
"video/3gpp",
|
78
|
+
)
|
79
|
+
|
43
80
|
def __init__(self, model_id):
|
44
81
|
self.model_id = model_id
|
45
82
|
|
46
83
|
def build_messages(self, prompt, conversation):
|
47
|
-
if not conversation:
|
48
|
-
return [{"role": "user", "parts": [{"text": prompt.prompt}]}]
|
49
84
|
messages = []
|
50
|
-
|
51
|
-
|
52
|
-
|
85
|
+
if conversation:
|
86
|
+
for response in conversation.responses:
|
87
|
+
parts = []
|
88
|
+
for attachment in response.attachments:
|
89
|
+
mime_type = resolve_type(attachment)
|
90
|
+
parts.append(
|
91
|
+
{
|
92
|
+
"inlineData": {
|
93
|
+
"data": attachment.base64_content(),
|
94
|
+
"mimeType": mime_type,
|
95
|
+
}
|
96
|
+
}
|
97
|
+
)
|
98
|
+
parts.append({"text": response.prompt.prompt})
|
99
|
+
messages.append({"role": "user", "parts": parts})
|
100
|
+
messages.append({"role": "model", "parts": [{"text": response.text()}]})
|
101
|
+
|
102
|
+
parts = [{"text": prompt.prompt}]
|
103
|
+
for attachment in prompt.attachments:
|
104
|
+
mime_type = resolve_type(attachment)
|
105
|
+
parts.append(
|
106
|
+
{
|
107
|
+
"inlineData": {
|
108
|
+
"data": attachment.base64_content(),
|
109
|
+
"mimeType": mime_type,
|
110
|
+
}
|
111
|
+
}
|
53
112
|
)
|
54
|
-
|
55
|
-
messages.append({"role": "user", "parts":
|
113
|
+
|
114
|
+
messages.append({"role": "user", "parts": parts})
|
56
115
|
return messages
|
57
116
|
|
58
117
|
def execute(self, prompt, stream, response, conversation):
|
llm_gemini-0.2.dist-info/RECORD
DELETED
@@ -1,7 +0,0 @@
|
|
1
|
-
llm_gemini.py,sha256=38ONnvzgDWJIE17ODeQd87UWsgvJSeTsDyHpLBTp9og,4305
|
2
|
-
llm_gemini-0.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
-
llm_gemini-0.2.dist-info/METADATA,sha256=rVokMpbsBeOsCR59GzpyXcBlj99KFo3g4pO767oyi_k,3059
|
4
|
-
llm_gemini-0.2.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
5
|
-
llm_gemini-0.2.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
-
llm_gemini-0.2.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
-
llm_gemini-0.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|