symbolicai 0.20.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- symai/__init__.py +96 -64
- symai/backend/base.py +93 -80
- symai/backend/engines/drawing/engine_bfl.py +12 -11
- symai/backend/engines/drawing/engine_gpt_image.py +108 -87
- symai/backend/engines/embedding/engine_llama_cpp.py +25 -28
- symai/backend/engines/embedding/engine_openai.py +3 -5
- symai/backend/engines/execute/engine_python.py +6 -5
- symai/backend/engines/files/engine_io.py +74 -67
- symai/backend/engines/imagecaptioning/engine_blip2.py +3 -3
- symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +54 -38
- symai/backend/engines/index/engine_pinecone.py +23 -24
- symai/backend/engines/index/engine_vectordb.py +16 -14
- symai/backend/engines/lean/engine_lean4.py +38 -34
- symai/backend/engines/neurosymbolic/__init__.py +41 -13
- symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +262 -182
- symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +263 -191
- symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +53 -49
- symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +212 -211
- symai/backend/engines/neurosymbolic/engine_groq.py +87 -63
- symai/backend/engines/neurosymbolic/engine_huggingface.py +21 -24
- symai/backend/engines/neurosymbolic/engine_llama_cpp.py +117 -48
- symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +256 -229
- symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +270 -150
- symai/backend/engines/ocr/engine_apilayer.py +6 -8
- symai/backend/engines/output/engine_stdout.py +1 -4
- symai/backend/engines/search/engine_openai.py +7 -7
- symai/backend/engines/search/engine_perplexity.py +5 -5
- symai/backend/engines/search/engine_serpapi.py +12 -14
- symai/backend/engines/speech_to_text/engine_local_whisper.py +20 -27
- symai/backend/engines/symbolic/engine_wolframalpha.py +3 -3
- symai/backend/engines/text_to_speech/engine_openai.py +5 -7
- symai/backend/engines/text_vision/engine_clip.py +7 -11
- symai/backend/engines/userinput/engine_console.py +3 -3
- symai/backend/engines/webscraping/engine_requests.py +81 -48
- symai/backend/mixin/__init__.py +13 -0
- symai/backend/mixin/anthropic.py +4 -2
- symai/backend/mixin/deepseek.py +2 -0
- symai/backend/mixin/google.py +2 -0
- symai/backend/mixin/openai.py +11 -3
- symai/backend/settings.py +83 -16
- symai/chat.py +101 -78
- symai/collect/__init__.py +7 -1
- symai/collect/dynamic.py +77 -69
- symai/collect/pipeline.py +35 -27
- symai/collect/stats.py +75 -63
- symai/components.py +198 -169
- symai/constraints.py +15 -12
- symai/core.py +698 -359
- symai/core_ext.py +32 -34
- symai/endpoints/api.py +80 -73
- symai/extended/.DS_Store +0 -0
- symai/extended/__init__.py +46 -12
- symai/extended/api_builder.py +11 -8
- symai/extended/arxiv_pdf_parser.py +13 -12
- symai/extended/bibtex_parser.py +2 -3
- symai/extended/conversation.py +101 -90
- symai/extended/document.py +17 -10
- symai/extended/file_merger.py +18 -13
- symai/extended/graph.py +18 -13
- symai/extended/html_style_template.py +2 -4
- symai/extended/interfaces/blip_2.py +1 -2
- symai/extended/interfaces/clip.py +1 -2
- symai/extended/interfaces/console.py +7 -1
- symai/extended/interfaces/dall_e.py +1 -1
- symai/extended/interfaces/flux.py +1 -1
- symai/extended/interfaces/gpt_image.py +1 -1
- symai/extended/interfaces/input.py +1 -1
- symai/extended/interfaces/llava.py +0 -1
- symai/extended/interfaces/naive_vectordb.py +7 -8
- symai/extended/interfaces/naive_webscraping.py +1 -1
- symai/extended/interfaces/ocr.py +1 -1
- symai/extended/interfaces/pinecone.py +6 -5
- symai/extended/interfaces/serpapi.py +1 -1
- symai/extended/interfaces/terminal.py +2 -3
- symai/extended/interfaces/tts.py +1 -1
- symai/extended/interfaces/whisper.py +1 -1
- symai/extended/interfaces/wolframalpha.py +1 -1
- symai/extended/metrics/__init__.py +11 -1
- symai/extended/metrics/similarity.py +11 -13
- symai/extended/os_command.py +17 -16
- symai/extended/packages/__init__.py +29 -3
- symai/extended/packages/symdev.py +19 -16
- symai/extended/packages/sympkg.py +12 -9
- symai/extended/packages/symrun.py +21 -19
- symai/extended/repo_cloner.py +11 -10
- symai/extended/seo_query_optimizer.py +1 -2
- symai/extended/solver.py +20 -23
- symai/extended/summarizer.py +4 -3
- symai/extended/taypan_interpreter.py +10 -12
- symai/extended/vectordb.py +99 -82
- symai/formatter/__init__.py +9 -1
- symai/formatter/formatter.py +12 -16
- symai/formatter/regex.py +62 -63
- symai/functional.py +176 -122
- symai/imports.py +136 -127
- symai/interfaces.py +56 -27
- symai/memory.py +14 -13
- symai/misc/console.py +49 -39
- symai/misc/loader.py +5 -3
- symai/models/__init__.py +17 -1
- symai/models/base.py +269 -181
- symai/models/errors.py +0 -1
- symai/ops/__init__.py +32 -22
- symai/ops/measures.py +11 -15
- symai/ops/primitives.py +348 -228
- symai/post_processors.py +32 -28
- symai/pre_processors.py +39 -41
- symai/processor.py +6 -4
- symai/prompts.py +59 -45
- symai/server/huggingface_server.py +23 -20
- symai/server/llama_cpp_server.py +7 -5
- symai/shell.py +3 -4
- symai/shellsv.py +499 -375
- symai/strategy.py +517 -287
- symai/symbol.py +111 -116
- symai/utils.py +42 -36
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/METADATA +4 -2
- symbolicai-1.0.0.dist-info/RECORD +163 -0
- symbolicai-0.20.2.dist-info/RECORD +0 -162
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/WHEEL +0 -0
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/entry_points.txt +0 -0
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
import base64
|
|
2
|
+
import contextlib
|
|
2
3
|
import logging
|
|
3
4
|
import tempfile
|
|
4
5
|
from pathlib import Path
|
|
5
|
-
from typing import Optional
|
|
6
6
|
|
|
7
7
|
import openai
|
|
8
8
|
import requests
|
|
9
9
|
|
|
10
10
|
from ....symbol import Result
|
|
11
|
+
from ....utils import UserMessage
|
|
11
12
|
from ...base import Engine
|
|
12
13
|
from ...settings import SYMAI_CONFIG
|
|
13
14
|
|
|
@@ -31,15 +32,16 @@ class GPTImageResult(Result):
|
|
|
31
32
|
for item in value.data:
|
|
32
33
|
has_url = hasattr(item, "url")
|
|
33
34
|
has_b64 = hasattr(item, "b64_json")
|
|
34
|
-
|
|
35
|
+
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp_file:
|
|
36
|
+
path = tmp_file.name
|
|
35
37
|
if has_url and item.url is not None:
|
|
36
38
|
request = requests.get(item.url, allow_redirects=True)
|
|
37
39
|
request.raise_for_status()
|
|
38
|
-
with open(
|
|
40
|
+
with Path(path).open("wb") as f:
|
|
39
41
|
f.write(request.content)
|
|
40
42
|
elif has_b64 and item.b64_json is not None:
|
|
41
43
|
raw = base64.b64decode(item.b64_json)
|
|
42
|
-
with open(
|
|
44
|
+
with Path(path).open("wb") as f:
|
|
43
45
|
f.write(raw)
|
|
44
46
|
imgs.append(path)
|
|
45
47
|
self._value = imgs
|
|
@@ -47,14 +49,14 @@ class GPTImageResult(Result):
|
|
|
47
49
|
|
|
48
50
|
class GPTImageEngine(Engine):
|
|
49
51
|
"""
|
|
50
|
-
A drop
|
|
52
|
+
A drop-in engine for OpenAI's unified Images API,
|
|
51
53
|
supporting gpt-image-1, dall-e-2, dall-e-3,
|
|
52
54
|
with all the extra parameters (background, moderation, etc).
|
|
53
55
|
"""
|
|
54
56
|
def __init__(
|
|
55
57
|
self,
|
|
56
|
-
api_key:
|
|
57
|
-
model:
|
|
58
|
+
api_key: str | None = None,
|
|
59
|
+
model: str | None = None,
|
|
58
60
|
):
|
|
59
61
|
super().__init__()
|
|
60
62
|
self.config = SYMAI_CONFIG
|
|
@@ -83,7 +85,7 @@ class GPTImageEngine(Engine):
|
|
|
83
85
|
|
|
84
86
|
def command(self, *args, **kwargs):
|
|
85
87
|
"""
|
|
86
|
-
Allow hot
|
|
88
|
+
Allow hot-swapping API key or model at runtime.
|
|
87
89
|
"""
|
|
88
90
|
super().command(*args, **kwargs)
|
|
89
91
|
if "DRAWING_ENGINE_API_KEY" in kwargs:
|
|
@@ -105,98 +107,117 @@ class GPTImageEngine(Engine):
|
|
|
105
107
|
operation = kwargs.get("operation")
|
|
106
108
|
|
|
107
109
|
if operation is None:
|
|
108
|
-
|
|
110
|
+
UserMessage("Operation not specified!", raise_with=ValueError)
|
|
109
111
|
|
|
110
112
|
n = kwargs.get("n", 1)
|
|
111
113
|
|
|
112
|
-
|
|
113
|
-
if isinstance(kwargs["size"], int):
|
|
114
|
-
s = kwargs["size"]
|
|
115
|
-
kwargs["size"] = f"{s}x{s}"
|
|
114
|
+
self._normalize_size(kwargs)
|
|
116
115
|
|
|
117
116
|
except_remedy = kwargs.get("except_remedy", None)
|
|
118
117
|
|
|
119
118
|
callback = None
|
|
120
119
|
try:
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
if model == "dall-e-3":
|
|
130
|
-
create_kwargs["response_format"] = kwargs.get("response_format", "url")
|
|
131
|
-
create_kwargs["quality"] = kwargs.get("quality", "standard")
|
|
132
|
-
create_kwargs["style"] = kwargs.get("style", "vivid")
|
|
133
|
-
|
|
134
|
-
if model.startswith("gpt-image-"):
|
|
135
|
-
create_kwargs["quality"] = kwargs.get("quality", "medium")
|
|
136
|
-
create_kwargs["moderation"] = kwargs.get("moderation", "auto")
|
|
137
|
-
create_kwargs["background"] = kwargs.get("background", "auto")
|
|
138
|
-
create_kwargs["output_format"] = kwargs.get("output_compression", "png")
|
|
139
|
-
if create_kwargs["output_format"] == "jpeg" or create_kwargs["output_format"] == "webp":
|
|
140
|
-
create_kwargs["output_compression"] = kwargs.get("output_compression", "100")
|
|
141
|
-
|
|
142
|
-
callback = openai.images.generate
|
|
143
|
-
res = openai.images.generate(**create_kwargs)
|
|
144
|
-
|
|
145
|
-
elif operation == "variation":
|
|
146
|
-
assert "image_path" in kwargs, "image_path required for variation"
|
|
147
|
-
callback = openai.images.create_variation
|
|
148
|
-
with open(kwargs["image_path"], "rb") as img:
|
|
149
|
-
res = openai.images.create_variation(
|
|
150
|
-
model=model,
|
|
151
|
-
image=img,
|
|
152
|
-
n=n,
|
|
153
|
-
size=kwargs.get("size"),
|
|
154
|
-
response_format=kwargs.get("response_format", "url"),
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
elif operation == "edit":
|
|
158
|
-
assert "image_path" in kwargs, "image_path required for edit"
|
|
159
|
-
# allow either a single path or a list of paths
|
|
160
|
-
img_paths = kwargs["image_path"]
|
|
161
|
-
if not isinstance(img_paths, (list, tuple)):
|
|
162
|
-
img_paths = [img_paths]
|
|
163
|
-
# open all images
|
|
164
|
-
image_files = [open(p, "rb") for p in img_paths]
|
|
165
|
-
# optional mask (only for the first image)
|
|
166
|
-
mask_file = None
|
|
167
|
-
if "mask_path" in kwargs and kwargs["mask_path"] is not None:
|
|
168
|
-
mask_file = open(kwargs["mask_path"], "rb")
|
|
169
|
-
# construct API args
|
|
170
|
-
edit_kwargs = {
|
|
171
|
-
"model": model,
|
|
172
|
-
"image": image_files if len(image_files) > 1 else image_files[0],
|
|
173
|
-
"prompt": prompt,
|
|
174
|
-
"n": n,
|
|
175
|
-
"size": kwargs.get("size"),
|
|
176
|
-
}
|
|
177
|
-
|
|
178
|
-
if model.startswith("gpt-image-"):
|
|
179
|
-
edit_kwargs["quality"] = kwargs.get("quality", "auto")
|
|
180
|
-
|
|
181
|
-
if mask_file:
|
|
182
|
-
edit_kwargs["mask"] = mask_file
|
|
183
|
-
callback = openai.images.edit
|
|
184
|
-
|
|
185
|
-
res = openai.images.edit(**edit_kwargs)
|
|
186
|
-
# clean up file handles
|
|
187
|
-
for f in image_files:
|
|
188
|
-
f.close()
|
|
189
|
-
if mask_file:
|
|
190
|
-
mask_file.close()
|
|
191
|
-
else:
|
|
192
|
-
raise ValueError(f"Unknown image operation: {operation}")
|
|
193
|
-
|
|
120
|
+
callback = self._resolve_callback(operation)
|
|
121
|
+
callback, res = self._dispatch_operation(
|
|
122
|
+
operation=operation,
|
|
123
|
+
prompt=prompt,
|
|
124
|
+
model=model,
|
|
125
|
+
n=n,
|
|
126
|
+
kwargs=kwargs,
|
|
127
|
+
)
|
|
194
128
|
except Exception as e:
|
|
195
129
|
if except_remedy is None:
|
|
196
130
|
raise
|
|
197
131
|
res = except_remedy(self, e, callback, argument)
|
|
198
132
|
|
|
199
|
-
# wrap it up
|
|
200
133
|
metadata = {}
|
|
201
134
|
result = GPTImageResult(res)
|
|
202
135
|
return [result], metadata
|
|
136
|
+
|
|
137
|
+
def _normalize_size(self, kwargs):
|
|
138
|
+
if "size" in kwargs and isinstance(kwargs["size"], int):
|
|
139
|
+
s = kwargs["size"]
|
|
140
|
+
kwargs["size"] = f"{s}x{s}"
|
|
141
|
+
|
|
142
|
+
def _resolve_callback(self, operation):
|
|
143
|
+
if operation == "create":
|
|
144
|
+
return openai.images.generate
|
|
145
|
+
if operation == "variation":
|
|
146
|
+
return openai.images.create_variation
|
|
147
|
+
if operation == "edit":
|
|
148
|
+
return openai.images.edit
|
|
149
|
+
UserMessage(f"Unknown image operation: {operation}", raise_with=ValueError)
|
|
150
|
+
return openai.images.generate
|
|
151
|
+
|
|
152
|
+
def _dispatch_operation(self, operation, prompt, model, n, kwargs):
|
|
153
|
+
if operation == "create":
|
|
154
|
+
return self._execute_create(prompt, model, n, kwargs)
|
|
155
|
+
if operation == "variation":
|
|
156
|
+
return self._execute_variation(model, n, kwargs)
|
|
157
|
+
if operation == "edit":
|
|
158
|
+
return self._execute_edit(prompt, model, n, kwargs)
|
|
159
|
+
return UserMessage(f"Unknown image operation: {operation}", raise_with=ValueError)
|
|
160
|
+
|
|
161
|
+
def _execute_create(self, prompt, model, n, kwargs):
|
|
162
|
+
create_kwargs = {
|
|
163
|
+
"model": model,
|
|
164
|
+
"prompt": prompt,
|
|
165
|
+
"n": n,
|
|
166
|
+
"size": kwargs.get("size"),
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
if model == "dall-e-3":
|
|
170
|
+
create_kwargs["response_format"] = kwargs.get("response_format", "url")
|
|
171
|
+
create_kwargs["quality"] = kwargs.get("quality", "standard")
|
|
172
|
+
create_kwargs["style"] = kwargs.get("style", "vivid")
|
|
173
|
+
|
|
174
|
+
if model.startswith("gpt-image-"):
|
|
175
|
+
create_kwargs["quality"] = kwargs.get("quality", "medium")
|
|
176
|
+
create_kwargs["moderation"] = kwargs.get("moderation", "auto")
|
|
177
|
+
create_kwargs["background"] = kwargs.get("background", "auto")
|
|
178
|
+
create_kwargs["output_format"] = kwargs.get("output_compression", "png")
|
|
179
|
+
if create_kwargs["output_format"] == "jpeg" or create_kwargs["output_format"] == "webp":
|
|
180
|
+
create_kwargs["output_compression"] = kwargs.get("output_compression", "100")
|
|
181
|
+
|
|
182
|
+
callback = openai.images.generate
|
|
183
|
+
return callback, callback(**create_kwargs)
|
|
184
|
+
|
|
185
|
+
def _execute_variation(self, model, n, kwargs):
|
|
186
|
+
assert "image_path" in kwargs, "image_path required for variation"
|
|
187
|
+
callback = openai.images.create_variation
|
|
188
|
+
with Path(kwargs["image_path"]).open("rb") as img:
|
|
189
|
+
result = callback(
|
|
190
|
+
model=model,
|
|
191
|
+
image=img,
|
|
192
|
+
n=n,
|
|
193
|
+
size=kwargs.get("size"),
|
|
194
|
+
response_format=kwargs.get("response_format", "url"),
|
|
195
|
+
)
|
|
196
|
+
return callback, result
|
|
197
|
+
|
|
198
|
+
def _execute_edit(self, prompt, model, n, kwargs):
|
|
199
|
+
assert "image_path" in kwargs, "image_path required for edit"
|
|
200
|
+
img_paths = kwargs["image_path"]
|
|
201
|
+
if not isinstance(img_paths, (list, tuple)):
|
|
202
|
+
img_paths = [img_paths]
|
|
203
|
+
with contextlib.ExitStack() as stack:
|
|
204
|
+
image_files = [stack.enter_context(Path(p).open("rb")) for p in img_paths]
|
|
205
|
+
mask_file = None
|
|
206
|
+
if "mask_path" in kwargs and kwargs["mask_path"] is not None:
|
|
207
|
+
mask_file = stack.enter_context(Path(kwargs["mask_path"]).open("rb"))
|
|
208
|
+
edit_kwargs = {
|
|
209
|
+
"model": model,
|
|
210
|
+
"image": image_files if len(image_files) > 1 else image_files[0],
|
|
211
|
+
"prompt": prompt,
|
|
212
|
+
"n": n,
|
|
213
|
+
"size": kwargs.get("size"),
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if model.startswith("gpt-image-"):
|
|
217
|
+
edit_kwargs["quality"] = kwargs.get("quality", "auto")
|
|
218
|
+
|
|
219
|
+
if mask_file:
|
|
220
|
+
edit_kwargs["mask"] = mask_file
|
|
221
|
+
callback = openai.images.edit
|
|
222
|
+
result = callback(**edit_kwargs)
|
|
223
|
+
return callback, result
|
|
@@ -1,14 +1,12 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import logging
|
|
3
|
-
from
|
|
4
|
-
from typing import Optional
|
|
3
|
+
from typing import Any, ClassVar
|
|
5
4
|
|
|
6
5
|
import aiohttp
|
|
7
6
|
import nest_asyncio
|
|
8
|
-
import numpy as np
|
|
9
7
|
|
|
10
8
|
from ....core_ext import retry
|
|
11
|
-
from ....utils import
|
|
9
|
+
from ....utils import UserMessage
|
|
12
10
|
from ...base import Engine
|
|
13
11
|
from ...settings import SYMAI_CONFIG, SYMSERVER_CONFIG
|
|
14
12
|
|
|
@@ -18,7 +16,7 @@ logging.getLogger("httpx").setLevel(logging.ERROR)
|
|
|
18
16
|
logging.getLogger("httpcore").setLevel(logging.ERROR)
|
|
19
17
|
|
|
20
18
|
class LlamaCppEmbeddingEngine(Engine):
|
|
21
|
-
_retry_params = {
|
|
19
|
+
_retry_params: ClassVar[dict[str, Any]] = {
|
|
22
20
|
'tries': 5,
|
|
23
21
|
'delay': 2,
|
|
24
22
|
'max_delay': 60,
|
|
@@ -26,7 +24,7 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
26
24
|
'jitter': (1, 5),
|
|
27
25
|
'graceful': True
|
|
28
26
|
}
|
|
29
|
-
_timeout_params = {
|
|
27
|
+
_timeout_params: ClassVar[dict[str, Any]] = {
|
|
30
28
|
'read': None,
|
|
31
29
|
'connect': None,
|
|
32
30
|
}
|
|
@@ -41,7 +39,7 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
41
39
|
if self.id() != 'embedding':
|
|
42
40
|
return
|
|
43
41
|
if not SYMSERVER_CONFIG.get('online'):
|
|
44
|
-
|
|
42
|
+
UserMessage('You are using the llama.cpp embedding engine, but the server endpoint is not started. Please start the server with `symserver [--args]`.', raise_with=ValueError)
|
|
45
43
|
|
|
46
44
|
self.server_endpoint = f"http://{SYMSERVER_CONFIG.get('--host')}:{SYMSERVER_CONFIG.get('--port')}"
|
|
47
45
|
self.timeout_params = self._validate_timeout_params(timeout_params)
|
|
@@ -60,13 +58,13 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
60
58
|
|
|
61
59
|
def _validate_timeout_params(self, timeout_params):
|
|
62
60
|
if not isinstance(timeout_params, dict):
|
|
63
|
-
|
|
61
|
+
UserMessage("timeout_params must be a dictionary", raise_with=ValueError)
|
|
64
62
|
assert all(key in timeout_params for key in ['read', 'connect']), "Available keys: ['read', 'connect']"
|
|
65
63
|
return timeout_params
|
|
66
64
|
|
|
67
65
|
def _validate_retry_params(self, retry_params):
|
|
68
66
|
if not isinstance(retry_params, dict):
|
|
69
|
-
|
|
67
|
+
UserMessage("retry_params must be a dictionary", raise_with=ValueError)
|
|
70
68
|
assert all(key in retry_params for key in ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']), \
|
|
71
69
|
"Available keys: ['tries', 'delay', 'max_delay', 'backoff', 'jitter', 'graceful']"
|
|
72
70
|
return retry_params
|
|
@@ -77,14 +75,16 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
77
75
|
try:
|
|
78
76
|
current_loop = asyncio.get_event_loop()
|
|
79
77
|
if current_loop.is_closed():
|
|
80
|
-
|
|
78
|
+
msg = "Event loop is closed."
|
|
79
|
+
UserMessage(msg)
|
|
80
|
+
raise RuntimeError(msg)
|
|
81
81
|
return current_loop
|
|
82
82
|
except RuntimeError:
|
|
83
83
|
new_loop = asyncio.new_event_loop()
|
|
84
84
|
asyncio.set_event_loop(new_loop)
|
|
85
85
|
return new_loop
|
|
86
86
|
|
|
87
|
-
async def _arequest(self, text: str) -> dict:
|
|
87
|
+
async def _arequest(self, text: str, embd_normalize: str) -> dict:
|
|
88
88
|
"""Makes an async HTTP request to the llama.cpp server."""
|
|
89
89
|
@retry(**self.retry_params)
|
|
90
90
|
async def _make_request():
|
|
@@ -92,14 +92,13 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
92
92
|
sock_connect=self.timeout_params['connect'],
|
|
93
93
|
sock_read=self.timeout_params['read']
|
|
94
94
|
)
|
|
95
|
-
async with aiohttp.ClientSession(timeout=timeout) as session
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
return await res.json()
|
|
95
|
+
async with aiohttp.ClientSession(timeout=timeout) as session, session.post(
|
|
96
|
+
f"{self.server_endpoint}/v1/embeddings",
|
|
97
|
+
json={"content": text, "embd_normalize": embd_normalize}
|
|
98
|
+
) as res:
|
|
99
|
+
if res.status != 200:
|
|
100
|
+
UserMessage(f"Request failed with status code: {res.status}", raise_with=ValueError)
|
|
101
|
+
return await res.json()
|
|
103
102
|
|
|
104
103
|
return await _make_request()
|
|
105
104
|
|
|
@@ -108,23 +107,21 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
108
107
|
kwargs = argument.kwargs
|
|
109
108
|
|
|
110
109
|
inp = prepared_input if isinstance(prepared_input, list) else [prepared_input]
|
|
110
|
+
embd_normalize = kwargs.get('embd_normalize', -1) # -1 = no normalization
|
|
111
|
+
|
|
111
112
|
new_dim = kwargs.get('new_dim')
|
|
113
|
+
if new_dim:
|
|
114
|
+
UserMessage("new_dim is not yet supported", raise_with=NotImplementedError)
|
|
112
115
|
|
|
113
116
|
nest_asyncio.apply()
|
|
114
117
|
loop = self._get_event_loop()
|
|
115
118
|
|
|
116
119
|
try:
|
|
117
|
-
res = loop.run_until_complete(self._arequest(inp))
|
|
120
|
+
res = loop.run_until_complete(self._arequest(inp, embd_normalize))
|
|
118
121
|
except Exception as e:
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
if new_dim:
|
|
122
|
-
raise NotImplementedError("new_dim is not yet supported")
|
|
122
|
+
UserMessage(f"Request failed with error: {e!s}", raise_with=ValueError)
|
|
123
123
|
|
|
124
|
-
if res is not None
|
|
125
|
-
output = [r["embedding"] for r in res["data"]]
|
|
126
|
-
else:
|
|
127
|
-
output = None
|
|
124
|
+
output = [r["embedding"] for r in res] if res is not None else None # B x 1 x D
|
|
128
125
|
metadata = {'raw_output': res}
|
|
129
126
|
|
|
130
127
|
return [output], metadata
|
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Optional
|
|
3
2
|
|
|
4
3
|
import numpy as np
|
|
5
4
|
import openai
|
|
@@ -16,7 +15,7 @@ logging.getLogger("httpcore").setLevel(logging.ERROR)
|
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
class EmbeddingEngine(Engine, OpenAIMixin):
|
|
19
|
-
def __init__(self, api_key:
|
|
18
|
+
def __init__(self, api_key: str | None = None, model: str | None = None):
|
|
20
19
|
super().__init__()
|
|
21
20
|
logger = logging.getLogger('openai')
|
|
22
21
|
logger.setLevel(logging.WARNING)
|
|
@@ -79,6 +78,5 @@ class EmbeddingEngine(Engine, OpenAIMixin):
|
|
|
79
78
|
if norm == 0:
|
|
80
79
|
return x.tolist()
|
|
81
80
|
return (x / norm).tolist()
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
return np.where(norm == 0, x, x / norm).tolist()
|
|
81
|
+
norm = np.linalg.norm(x, 2, axis=1, keepdims=True)
|
|
82
|
+
return np.where(norm == 0, x, x / norm).tolist()
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import traceback
|
|
3
|
+
|
|
1
4
|
from ....symbol import Result
|
|
2
5
|
from ...base import Engine
|
|
3
6
|
|
|
4
7
|
|
|
5
8
|
def full_stack():
|
|
6
|
-
import sys
|
|
7
|
-
import traceback
|
|
8
9
|
exc = sys.exc_info()[0]
|
|
9
10
|
stack = traceback.extract_stack()[-10:-1] # last one would be full_stack()
|
|
10
11
|
if exc is not None: # i.e. an exception is present
|
|
@@ -68,9 +69,9 @@ class PythonEngine(Engine):
|
|
|
68
69
|
def forward(self, argument):
|
|
69
70
|
code = argument.prop.prepared_input
|
|
70
71
|
kwargs = argument.kwargs
|
|
71
|
-
globals_ = kwargs
|
|
72
|
-
locals_ = kwargs
|
|
73
|
-
input_handler = kwargs
|
|
72
|
+
globals_ = kwargs.get('globals', {})
|
|
73
|
+
locals_ = kwargs.get('locals', {})
|
|
74
|
+
input_handler = kwargs.get('input_handler')
|
|
74
75
|
if input_handler:
|
|
75
76
|
input_handler((code,))
|
|
76
77
|
|