llm-gemini 0.2__tar.gz → 0.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm-gemini
3
- Version: 0.2
3
+ Version: 0.3
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: llm
14
+ Requires-Dist: llm>=0.17
15
15
  Requires-Dist: httpx
16
16
  Requires-Dist: ijson
17
17
  Provides-Extra: test
@@ -43,23 +43,66 @@ llm keys set gemini
43
43
  <paste key here>
44
44
  ```
45
45
 
46
- Now run the model using `-m gemini-pro`, for example:
46
+ Now run the model using `-m gemini-1.5-pro-latest`, for example:
47
47
 
48
48
  ```bash
49
- llm -m gemini-pro "A joke about a pelican and a walrus"
49
+ llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
50
50
  ```
51
51
 
52
- > Why did the pelican get mad at the walrus?
52
+ > A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
53
53
  >
54
- > Because he called him a hippo-crit.
54
+ > "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
55
+ >
56
+ > The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
57
+
58
+ ### Images, audio and video
59
+
60
+ Gemini models are multi-modal. You can provide images, audio or video files as input like this:
61
+
62
+ ```bash
63
+ llm -m gemini-1.5-flash-latest 'extract text' -a image.jpg
64
+ ```
65
+ Or with a URL:
66
+ ```bash
67
+ llm -m gemini-1.5-flash-8b-latest 'describe image' \
68
+ -a https://static.simonwillison.net/static/2024/pelicans.jpg
69
+ ```
70
+ Audio works too:
71
+
72
+ ```bash
73
+ llm -m gemini-1.5-pro-latest 'transcribe audio' -a audio.mp3
74
+ ```
75
+
76
+ And video:
77
+
78
+ ```bash
79
+ llm -m gemini-1.5-pro-latest 'describe what happens' -a video.mp4
80
+ ```
81
+
82
+ ## Code execution
83
+
84
+ Gemini models can [write and execute code](https://ai.google.dev/gemini-api/docs/code-execution) - they can decide to write Python code, execute it in a secure sandbox and use the result as part of their response.
85
+
86
+ To enable this feature, use `-o code_execution 1`:
87
+
88
+ ```bash
89
+ llm -m gemini-1.5-pro-latest -o code_execution 1 \
90
+ 'use python to calculate (factorial of 13) * 3'
91
+ ```
92
+
93
+ ### Chat
55
94
 
56
95
  To chat interactively with the model, run `llm chat`:
57
96
 
58
97
  ```bash
59
- llm chat -m gemini-pro
98
+ llm chat -m gemini-1.5-pro-latest
60
99
  ```
61
100
 
62
- If you have access to the Gemini 1.5 Pro preview you can use `-m gemini-1.5-pro-latest` to work with that model.
101
+ Other models are:
102
+
103
+ - `gemini-1.5-flash-latest`
104
+ - gemini-1.5-flash-8b-latest` - the least expensive
105
+
63
106
 
64
107
  ### Embeddings
65
108
 
@@ -24,23 +24,66 @@ llm keys set gemini
24
24
  <paste key here>
25
25
  ```
26
26
 
27
- Now run the model using `-m gemini-pro`, for example:
27
+ Now run the model using `-m gemini-1.5-pro-latest`, for example:
28
28
 
29
29
  ```bash
30
- llm -m gemini-pro "A joke about a pelican and a walrus"
30
+ llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
31
31
  ```
32
32
 
33
- > Why did the pelican get mad at the walrus?
33
+ > A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
34
34
  >
35
- > Because he called him a hippo-crit.
35
+ > "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
36
+ >
37
+ > The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
38
+
39
+ ### Images, audio and video
40
+
41
+ Gemini models are multi-modal. You can provide images, audio or video files as input like this:
42
+
43
+ ```bash
44
+ llm -m gemini-1.5-flash-latest 'extract text' -a image.jpg
45
+ ```
46
+ Or with a URL:
47
+ ```bash
48
+ llm -m gemini-1.5-flash-8b-latest 'describe image' \
49
+ -a https://static.simonwillison.net/static/2024/pelicans.jpg
50
+ ```
51
+ Audio works too:
52
+
53
+ ```bash
54
+ llm -m gemini-1.5-pro-latest 'transcribe audio' -a audio.mp3
55
+ ```
56
+
57
+ And video:
58
+
59
+ ```bash
60
+ llm -m gemini-1.5-pro-latest 'describe what happens' -a video.mp4
61
+ ```
62
+
63
+ ## Code execution
64
+
65
+ Gemini models can [write and execute code](https://ai.google.dev/gemini-api/docs/code-execution) - they can decide to write Python code, execute it in a secure sandbox and use the result as part of their response.
66
+
67
+ To enable this feature, use `-o code_execution 1`:
68
+
69
+ ```bash
70
+ llm -m gemini-1.5-pro-latest -o code_execution 1 \
71
+ 'use python to calculate (factorial of 13) * 3'
72
+ ```
73
+
74
+ ### Chat
36
75
 
37
76
  To chat interactively with the model, run `llm chat`:
38
77
 
39
78
  ```bash
40
- llm chat -m gemini-pro
79
+ llm chat -m gemini-1.5-pro-latest
41
80
  ```
42
81
 
43
- If you have access to the Gemini 1.5 Pro preview you can use `-m gemini-1.5-pro-latest` to work with that model.
82
+ Other models are:
83
+
84
+ - `gemini-1.5-flash-latest`
85
+ - gemini-1.5-flash-8b-latest` - the least expensive
86
+
44
87
 
45
88
  ### Embeddings
46
89
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llm-gemini
3
- Version: 0.2
3
+ Version: 0.3
4
4
  Summary: LLM plugin to access Google's Gemini family of models
5
5
  Author: Simon Willison
6
6
  License: Apache-2.0
@@ -11,7 +11,7 @@ Project-URL: CI, https://github.com/simonw/llm-gemini/actions
11
11
  Classifier: License :: OSI Approved :: Apache Software License
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: llm
14
+ Requires-Dist: llm>=0.17
15
15
  Requires-Dist: httpx
16
16
  Requires-Dist: ijson
17
17
  Provides-Extra: test
@@ -43,23 +43,66 @@ llm keys set gemini
43
43
  <paste key here>
44
44
  ```
45
45
 
46
- Now run the model using `-m gemini-pro`, for example:
46
+ Now run the model using `-m gemini-1.5-pro-latest`, for example:
47
47
 
48
48
  ```bash
49
- llm -m gemini-pro "A joke about a pelican and a walrus"
49
+ llm -m gemini-1.5-pro-latest "A joke about a pelican and a walrus"
50
50
  ```
51
51
 
52
- > Why did the pelican get mad at the walrus?
52
+ > A pelican walks into a seafood restaurant with a huge fish hanging out of its beak. The walrus, sitting at the bar, eyes it enviously.
53
53
  >
54
- > Because he called him a hippo-crit.
54
+ > "Hey," the walrus says, "That looks delicious! What kind of fish is that?"
55
+ >
56
+ > The pelican taps its beak thoughtfully. "I believe," it says, "it's a billfish."
57
+
58
+ ### Images, audio and video
59
+
60
+ Gemini models are multi-modal. You can provide images, audio or video files as input like this:
61
+
62
+ ```bash
63
+ llm -m gemini-1.5-flash-latest 'extract text' -a image.jpg
64
+ ```
65
+ Or with a URL:
66
+ ```bash
67
+ llm -m gemini-1.5-flash-8b-latest 'describe image' \
68
+ -a https://static.simonwillison.net/static/2024/pelicans.jpg
69
+ ```
70
+ Audio works too:
71
+
72
+ ```bash
73
+ llm -m gemini-1.5-pro-latest 'transcribe audio' -a audio.mp3
74
+ ```
75
+
76
+ And video:
77
+
78
+ ```bash
79
+ llm -m gemini-1.5-pro-latest 'describe what happens' -a video.mp4
80
+ ```
81
+
82
+ ## Code execution
83
+
84
+ Gemini models can [write and execute code](https://ai.google.dev/gemini-api/docs/code-execution) - they can decide to write Python code, execute it in a secure sandbox and use the result as part of their response.
85
+
86
+ To enable this feature, use `-o code_execution 1`:
87
+
88
+ ```bash
89
+ llm -m gemini-1.5-pro-latest -o code_execution 1 \
90
+ 'use python to calculate (factorial of 13) * 3'
91
+ ```
92
+
93
+ ### Chat
55
94
 
56
95
  To chat interactively with the model, run `llm chat`:
57
96
 
58
97
  ```bash
59
- llm chat -m gemini-pro
98
+ llm chat -m gemini-1.5-pro-latest
60
99
  ```
61
100
 
62
- If you have access to the Gemini 1.5 Pro preview you can use `-m gemini-1.5-pro-latest` to work with that model.
101
+ Other models are:
102
+
103
+ - `gemini-1.5-flash-latest`
104
+ - gemini-1.5-flash-8b-latest` - the least expensive
105
+
63
106
 
64
107
  ### Embeddings
65
108
 
@@ -1,4 +1,4 @@
1
- llm
1
+ llm>=0.17
2
2
  httpx
3
3
  ijson
4
4
 
@@ -0,0 +1,253 @@
1
+ import httpx
2
+ import ijson
3
+ import llm
4
+ from pydantic import Field
5
+ from typing import Optional
6
+
7
+ import urllib.parse
8
+
9
+ # We disable all of these to avoid random unexpected errors
10
+ SAFETY_SETTINGS = [
11
+ {
12
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
13
+ "threshold": "BLOCK_NONE",
14
+ },
15
+ {
16
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
17
+ "threshold": "BLOCK_NONE",
18
+ },
19
+ {
20
+ "category": "HARM_CATEGORY_HATE_SPEECH",
21
+ "threshold": "BLOCK_NONE",
22
+ },
23
+ {
24
+ "category": "HARM_CATEGORY_HARASSMENT",
25
+ "threshold": "BLOCK_NONE",
26
+ },
27
+ ]
28
+
29
+
30
+ @llm.hookimpl
31
+ def register_models(register):
32
+ register(GeminiPro("gemini-pro"))
33
+ register(GeminiPro("gemini-1.5-pro-latest"))
34
+ register(GeminiPro("gemini-1.5-flash-latest"))
35
+ register(GeminiPro("gemini-1.5-pro-001"))
36
+ register(GeminiPro("gemini-1.5-flash-001"))
37
+ register(GeminiPro("gemini-1.5-pro-002"))
38
+ register(GeminiPro("gemini-1.5-flash-002"))
39
+ register(GeminiPro("gemini-1.5-flash-8b-latest"))
40
+ register(GeminiPro("gemini-1.5-flash-8b-001"))
41
+
42
+
43
+ def resolve_type(attachment):
44
+ mime_type = attachment.resolve_type()
45
+ # https://github.com/simonw/llm/issues/587#issuecomment-2439785140
46
+ if mime_type == "audio/mpeg":
47
+ mime_type = "audio/mp3"
48
+ return mime_type
49
+
50
+
51
+ class GeminiPro(llm.Model):
52
+ needs_key = "gemini"
53
+ key_env_var = "LLM_GEMINI_KEY"
54
+ can_stream = True
55
+
56
+ attachment_types = (
57
+ # PDF
58
+ "application/pdf",
59
+ # Images
60
+ "image/png",
61
+ "image/jpeg",
62
+ "image/webp",
63
+ "image/heic",
64
+ "image/heif",
65
+ # Audio
66
+ "audio/wav",
67
+ "audio/mp3",
68
+ "audio/aiff",
69
+ "audio/aac",
70
+ "audio/ogg",
71
+ "audio/flac",
72
+ "audio/mpeg", # Treated as audio/mp3
73
+ # Video
74
+ "video/mp4",
75
+ "video/mpeg",
76
+ "video/mov",
77
+ "video/avi",
78
+ "video/x-flv",
79
+ "video/mpg",
80
+ "video/webm",
81
+ "video/wmv",
82
+ "video/3gpp",
83
+ )
84
+
85
+ class Options(llm.Options):
86
+ code_execution: Optional[bool] = Field(
87
+ description="Enables the model to generate and run Python code",
88
+ default=None,
89
+ )
90
+ temperature: Optional[float] = Field(
91
+ description="Controls the randomness of the output. Use higher values for more creative responses, and lower values for more deterministic responses.",
92
+ default=None,
93
+ ge=0.0,
94
+ le=2.0,
95
+ )
96
+ max_output_tokens: Optional[int] = Field(
97
+ description="Sets the maximum number of tokens to include in a candidate.",
98
+ default=None,
99
+ )
100
+ top_p: Optional[float] = Field(
101
+ description="Changes how the model selects tokens for output. Tokens are selected from the most to least probable until the sum of their probabilities equals the topP value.",
102
+ default=None,
103
+ ge=0.0,
104
+ le=1.0,
105
+ )
106
+ top_k: Optional[int] = Field(
107
+ description="Changes how the model selects tokens for output. A topK of 1 means the selected token is the most probable among all the tokens in the model's vocabulary, while a topK of 3 means that the next token is selected from among the 3 most probable using the temperature.",
108
+ default=None,
109
+ ge=1,
110
+ )
111
+
112
+ def __init__(self, model_id):
113
+ self.model_id = model_id
114
+
115
+ def build_messages(self, prompt, conversation):
116
+ messages = []
117
+ if conversation:
118
+ for response in conversation.responses:
119
+ parts = []
120
+ for attachment in response.attachments:
121
+ mime_type = resolve_type(attachment)
122
+ parts.append(
123
+ {
124
+ "inlineData": {
125
+ "data": attachment.base64_content(),
126
+ "mimeType": mime_type,
127
+ }
128
+ }
129
+ )
130
+ parts.append({"text": response.prompt.prompt})
131
+ messages.append({"role": "user", "parts": parts})
132
+ messages.append({"role": "model", "parts": [{"text": response.text()}]})
133
+
134
+ parts = [{"text": prompt.prompt}]
135
+ for attachment in prompt.attachments:
136
+ mime_type = resolve_type(attachment)
137
+ parts.append(
138
+ {
139
+ "inlineData": {
140
+ "data": attachment.base64_content(),
141
+ "mimeType": mime_type,
142
+ }
143
+ }
144
+ )
145
+
146
+ messages.append({"role": "user", "parts": parts})
147
+ return messages
148
+
149
+ def execute(self, prompt, stream, response, conversation):
150
+ key = self.get_key()
151
+ url = "https://generativelanguage.googleapis.com/v1beta/models/{}:streamGenerateContent?".format(
152
+ self.model_id
153
+ ) + urllib.parse.urlencode(
154
+ {"key": key}
155
+ )
156
+ gathered = []
157
+ body = {
158
+ "contents": self.build_messages(prompt, conversation),
159
+ "safetySettings": SAFETY_SETTINGS,
160
+ }
161
+ if prompt.options and prompt.options.code_execution:
162
+ body["tools"] = [{"codeExecution": {}}]
163
+ if prompt.system:
164
+ body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
165
+
166
+ config_map = {
167
+ "temperature": "temperature",
168
+ "max_output_tokens": "maxOutputTokens",
169
+ "top_p": "topP",
170
+ "top_k": "topK",
171
+ }
172
+ # If any of those are set in prompt.options...
173
+ if any(
174
+ getattr(prompt.options, key, None) is not None for key in config_map.keys()
175
+ ):
176
+ generation_config = {}
177
+ for key, other_key in config_map.items():
178
+ config_value = getattr(prompt.options, key, None)
179
+ if config_value is not None:
180
+ generation_config[other_key] = config_value
181
+ body["generationConfig"] = generation_config
182
+
183
+ with httpx.stream(
184
+ "POST",
185
+ url,
186
+ timeout=None,
187
+ json=body,
188
+ ) as http_response:
189
+ events = ijson.sendable_list()
190
+ coro = ijson.items_coro(events, "item")
191
+ for chunk in http_response.iter_bytes():
192
+ coro.send(chunk)
193
+ if events:
194
+ event = events[0]
195
+ if isinstance(event, dict) and "error" in event:
196
+ raise llm.ModelError(event["error"]["message"])
197
+ try:
198
+ part = event["candidates"][0]["content"]["parts"][0]
199
+ if "text" in part:
200
+ yield part["text"]
201
+ elif "executableCode" in part:
202
+ # For code_execution
203
+ yield f'```{part["executableCode"]["language"].lower()}\n{part["executableCode"]["code"].strip()}\n```\n'
204
+ elif "codeExecutionResult" in part:
205
+ # For code_execution
206
+ yield f'```\n{part["codeExecutionResult"]["output"].strip()}\n```\n'
207
+ except KeyError:
208
+ yield ""
209
+ gathered.append(event)
210
+ events.clear()
211
+ response.response_json = gathered
212
+
213
+
214
+ @llm.hookimpl
215
+ def register_embedding_models(register):
216
+ register(
217
+ GeminiEmbeddingModel("text-embedding-004", "text-embedding-004"),
218
+ )
219
+
220
+
221
+ class GeminiEmbeddingModel(llm.EmbeddingModel):
222
+ needs_key = "gemini"
223
+ key_env_var = "LLM_GEMINI_KEY"
224
+ batch_size = 20
225
+
226
+ def __init__(self, model_id, gemini_model_id):
227
+ self.model_id = model_id
228
+ self.gemini_model_id = gemini_model_id
229
+
230
+ def embed_batch(self, items):
231
+ headers = {
232
+ "Content-Type": "application/json",
233
+ }
234
+ data = {
235
+ "requests": [
236
+ {
237
+ "model": "models/" + self.gemini_model_id,
238
+ "content": {"parts": [{"text": item}]},
239
+ }
240
+ for item in items
241
+ ]
242
+ }
243
+
244
+ with httpx.Client() as client:
245
+ response = client.post(
246
+ f"https://generativelanguage.googleapis.com/v1beta/models/{self.gemini_model_id}:batchEmbedContents?key={self.get_key()}",
247
+ headers=headers,
248
+ json=data,
249
+ timeout=None,
250
+ )
251
+
252
+ response.raise_for_status()
253
+ return [item["values"] for item in response.json()["embeddings"]]
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llm-gemini"
3
- version = "0.2"
3
+ version = "0.3"
4
4
  description = "LLM plugin to access Google's Gemini family of models"
5
5
  readme = "README.md"
6
6
  authors = [{name = "Simon Willison"}]
@@ -9,7 +9,7 @@ classifiers = [
9
9
  "License :: OSI Approved :: Apache Software License"
10
10
  ]
11
11
  dependencies = [
12
- "llm",
12
+ "llm>=0.17",
13
13
  "httpx",
14
14
  "ijson"
15
15
  ]
@@ -1,134 +0,0 @@
1
- import httpx
2
- import ijson
3
- import llm
4
- import urllib.parse
5
-
6
- # We disable all of these to avoid random unexpected errors
7
- SAFETY_SETTINGS = [
8
- {
9
- "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
10
- "threshold": "BLOCK_NONE",
11
- },
12
- {
13
- "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
14
- "threshold": "BLOCK_NONE",
15
- },
16
- {
17
- "category": "HARM_CATEGORY_HATE_SPEECH",
18
- "threshold": "BLOCK_NONE",
19
- },
20
- {
21
- "category": "HARM_CATEGORY_HARASSMENT",
22
- "threshold": "BLOCK_NONE",
23
- },
24
- ]
25
-
26
-
27
- @llm.hookimpl
28
- def register_models(register):
29
- register(GeminiPro("gemini-pro"))
30
- register(GeminiPro("gemini-1.5-pro-latest"))
31
- register(GeminiPro("gemini-1.5-flash-latest"))
32
- register(GeminiPro("gemini-1.5-pro-001"))
33
- register(GeminiPro("gemini-1.5-flash-001"))
34
- register(GeminiPro("gemini-1.5-pro-002"))
35
- register(GeminiPro("gemini-1.5-flash-002"))
36
- register(GeminiPro("gemini-1.5-flash-8b-latest"))
37
- register(GeminiPro("gemini-1.5-flash-8b-001"))
38
-
39
-
40
- class GeminiPro(llm.Model):
41
- can_stream = True
42
-
43
- def __init__(self, model_id):
44
- self.model_id = model_id
45
-
46
- def build_messages(self, prompt, conversation):
47
- if not conversation:
48
- return [{"role": "user", "parts": [{"text": prompt.prompt}]}]
49
- messages = []
50
- for response in conversation.responses:
51
- messages.append(
52
- {"role": "user", "parts": [{"text": response.prompt.prompt}]}
53
- )
54
- messages.append({"role": "model", "parts": [{"text": response.text()}]})
55
- messages.append({"role": "user", "parts": [{"text": prompt.prompt}]})
56
- return messages
57
-
58
- def execute(self, prompt, stream, response, conversation):
59
- key = llm.get_key("", "gemini", "LLM_GEMINI_KEY")
60
- url = "https://generativelanguage.googleapis.com/v1beta/models/{}:streamGenerateContent?".format(
61
- self.model_id
62
- ) + urllib.parse.urlencode(
63
- {"key": key}
64
- )
65
- gathered = []
66
- body = {
67
- "contents": self.build_messages(prompt, conversation),
68
- "safetySettings": SAFETY_SETTINGS,
69
- }
70
- if prompt.system:
71
- body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
72
- with httpx.stream(
73
- "POST",
74
- url,
75
- timeout=None,
76
- json=body,
77
- ) as http_response:
78
- events = ijson.sendable_list()
79
- coro = ijson.items_coro(events, "item")
80
- for chunk in http_response.iter_bytes():
81
- coro.send(chunk)
82
- if events:
83
- event = events[0]
84
- if isinstance(event, dict) and "error" in event:
85
- raise llm.ModelError(event["error"]["message"])
86
- try:
87
- yield event["candidates"][0]["content"]["parts"][0]["text"]
88
- except KeyError:
89
- yield ""
90
- gathered.append(event)
91
- events.clear()
92
- response.response_json = gathered
93
-
94
-
95
- @llm.hookimpl
96
- def register_embedding_models(register):
97
- register(
98
- GeminiEmbeddingModel("text-embedding-004", "text-embedding-004"),
99
- )
100
-
101
-
102
- class GeminiEmbeddingModel(llm.EmbeddingModel):
103
- needs_key = "gemini"
104
- key_env_var = "LLM_GEMINI_KEY"
105
- batch_size = 20
106
-
107
- def __init__(self, model_id, gemini_model_id):
108
- self.model_id = model_id
109
- self.gemini_model_id = gemini_model_id
110
-
111
- def embed_batch(self, items):
112
- headers = {
113
- "Content-Type": "application/json",
114
- }
115
- data = {
116
- "requests": [
117
- {
118
- "model": "models/" + self.gemini_model_id,
119
- "content": {"parts": [{"text": item}]},
120
- }
121
- for item in items
122
- ]
123
- }
124
-
125
- with httpx.Client() as client:
126
- response = client.post(
127
- f"https://generativelanguage.googleapis.com/v1beta/models/{self.gemini_model_id}:batchEmbedContents?key={self.get_key()}",
128
- headers=headers,
129
- json=data,
130
- timeout=None,
131
- )
132
-
133
- response.raise_for_status()
134
- return [item["values"] for item in response.json()["embeddings"]]
File without changes
File without changes
File without changes