llm-gemini 0.1a2__py3-none-any.whl → 0.1a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llm_gemini-0.1a2.dist-info → llm_gemini-0.1a3.dist-info}/METADATA +23 -1
- llm_gemini-0.1a3.dist-info/RECORD +7 -0
- llm_gemini.py +42 -0
- llm_gemini-0.1a2.dist-info/RECORD +0 -7
- {llm_gemini-0.1a2.dist-info → llm_gemini-0.1a3.dist-info}/LICENSE +0 -0
- {llm_gemini-0.1a2.dist-info → llm_gemini-0.1a3.dist-info}/WHEEL +0 -0
- {llm_gemini-0.1a2.dist-info → llm_gemini-0.1a3.dist-info}/entry_points.txt +0 -0
- {llm_gemini-0.1a2.dist-info → llm_gemini-0.1a3.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: llm-gemini
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.1a3
|
4
4
|
Summary: LLM plugin to access Google's Gemini family of models
|
5
5
|
Author: Simon Willison
|
6
6
|
License: Apache-2.0
|
@@ -61,6 +61,28 @@ llm chat -m gemini-pro
|
|
61
61
|
|
62
62
|
If you have access to the Gemini 1.5 Pro preview you can use `-m gemini-1.5-pro-latest` to work with that model.
|
63
63
|
|
64
|
+
### Embeddings
|
65
|
+
|
66
|
+
The plugin also adds support for the `text-embedding-004` embedding model.
|
67
|
+
|
68
|
+
Run that against a single string like this:
|
69
|
+
```bash
|
70
|
+
llm embed -m text-embedding-004 -c 'hello world'
|
71
|
+
```
|
72
|
+
This returns a JSON array of 768 numbers.
|
73
|
+
|
74
|
+
This command will embed every `README.md` file in child directories of the current directory and store the results in a SQLite database called `embed.db` in a collection called `readmes`:
|
75
|
+
|
76
|
+
```bash
|
77
|
+
llm embed-multi readmes --files . '*/README.md' -d embed.db -m text-embedding-004
|
78
|
+
```
|
79
|
+
You can then run similarity searches against that collection like this:
|
80
|
+
```bash
|
81
|
+
llm similar readmes -c 'upload csvs to stuff' -d embed.db
|
82
|
+
```
|
83
|
+
|
84
|
+
See the [LLM embeddings documentation](https://llm.datasette.io/en/stable/embeddings/cli.html) for further details.
|
85
|
+
|
64
86
|
## Development
|
65
87
|
|
66
88
|
To set up this plugin locally, first checkout the code. Then create a new virtual environment:
|
@@ -0,0 +1,7 @@
|
|
1
|
+
llm_gemini.py,sha256=uwvFPki6KrMLPqHJsfLi4eKk_4kj7HJCWSnWIjRJzgM,3961
|
2
|
+
llm_gemini-0.1a3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
+
llm_gemini-0.1a3.dist-info/METADATA,sha256=SF6uTnGVgucK_UrAghpFrXC_rQ5UBOPPtV2Tvz6Lp9E,3045
|
4
|
+
llm_gemini-0.1a3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
5
|
+
llm_gemini-0.1a3.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
+
llm_gemini-0.1a3.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
+
llm_gemini-0.1a3.dist-info/RECORD,,
|
llm_gemini.py
CHANGED
@@ -83,3 +83,45 @@ class GeminiPro(llm.Model):
|
|
83
83
|
gathered.append(event)
|
84
84
|
events.clear()
|
85
85
|
response.response_json = gathered
|
86
|
+
|
87
|
+
|
88
|
+
@llm.hookimpl
|
89
|
+
def register_embedding_models(register):
|
90
|
+
register(
|
91
|
+
GeminiEmbeddingModel("text-embedding-004", "text-embedding-004"),
|
92
|
+
)
|
93
|
+
|
94
|
+
|
95
|
+
class GeminiEmbeddingModel(llm.EmbeddingModel):
|
96
|
+
needs_key = "gemini"
|
97
|
+
key_env_var = "LLM_GEMINI_KEY"
|
98
|
+
batch_size = 20
|
99
|
+
|
100
|
+
def __init__(self, model_id, gemini_model_id):
|
101
|
+
self.model_id = model_id
|
102
|
+
self.gemini_model_id = gemini_model_id
|
103
|
+
|
104
|
+
def embed_batch(self, items):
|
105
|
+
headers = {
|
106
|
+
"Content-Type": "application/json",
|
107
|
+
}
|
108
|
+
data = {
|
109
|
+
"requests": [
|
110
|
+
{
|
111
|
+
"model": "models/" + self.gemini_model_id,
|
112
|
+
"content": {"parts": [{"text": item}]},
|
113
|
+
}
|
114
|
+
for item in items
|
115
|
+
]
|
116
|
+
}
|
117
|
+
|
118
|
+
with httpx.Client() as client:
|
119
|
+
response = client.post(
|
120
|
+
f"https://generativelanguage.googleapis.com/v1beta/models/{self.gemini_model_id}:batchEmbedContents?key={self.get_key()}",
|
121
|
+
headers=headers,
|
122
|
+
json=data,
|
123
|
+
timeout=None,
|
124
|
+
)
|
125
|
+
|
126
|
+
response.raise_for_status()
|
127
|
+
return [item["values"] for item in response.json()["embeddings"]]
|
@@ -1,7 +0,0 @@
|
|
1
|
-
llm_gemini.py,sha256=D5EeLRGOTiIiDnChXVaJQcOL1AQNZNpvBYDGXjulMFA,2745
|
2
|
-
llm_gemini-0.1a2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
3
|
-
llm_gemini-0.1a2.dist-info/METADATA,sha256=ptgaTC5N3g0z1NDbzNJypnfy4V44W-jteo9bcQJsjeU,2262
|
4
|
-
llm_gemini-0.1a2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
5
|
-
llm_gemini-0.1a2.dist-info/entry_points.txt,sha256=n544bpgUPIBc5l_cnwsTxPc3gMGJHPtAyqBNp-CkMWk,26
|
6
|
-
llm_gemini-0.1a2.dist-info/top_level.txt,sha256=WUQmG6_2QKbT_8W4HH93qyKl_0SUteL4Ra6_PhyNGKU,11
|
7
|
-
llm_gemini-0.1a2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|