symbolicai 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- symai/__init__.py +198 -134
- symai/backend/base.py +51 -51
- symai/backend/engines/drawing/engine_bfl.py +33 -33
- symai/backend/engines/drawing/engine_gpt_image.py +4 -10
- symai/backend/engines/embedding/engine_llama_cpp.py +50 -35
- symai/backend/engines/embedding/engine_openai.py +22 -16
- symai/backend/engines/execute/engine_python.py +16 -16
- symai/backend/engines/files/engine_io.py +51 -49
- symai/backend/engines/imagecaptioning/engine_blip2.py +27 -23
- symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +53 -46
- symai/backend/engines/index/engine_pinecone.py +116 -88
- symai/backend/engines/index/engine_qdrant.py +1011 -0
- symai/backend/engines/index/engine_vectordb.py +78 -52
- symai/backend/engines/lean/engine_lean4.py +65 -25
- symai/backend/engines/neurosymbolic/__init__.py +28 -28
- symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +137 -135
- symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +145 -152
- symai/backend/engines/neurosymbolic/engine_cerebras.py +328 -0
- symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +75 -49
- symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +199 -155
- symai/backend/engines/neurosymbolic/engine_groq.py +106 -72
- symai/backend/engines/neurosymbolic/engine_huggingface.py +100 -67
- symai/backend/engines/neurosymbolic/engine_llama_cpp.py +121 -93
- symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +213 -132
- symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +180 -137
- symai/backend/engines/ocr/engine_apilayer.py +18 -20
- symai/backend/engines/output/engine_stdout.py +9 -9
- symai/backend/engines/{webscraping → scrape}/engine_requests.py +25 -11
- symai/backend/engines/search/engine_openai.py +95 -83
- symai/backend/engines/search/engine_parallel.py +665 -0
- symai/backend/engines/search/engine_perplexity.py +40 -41
- symai/backend/engines/search/engine_serpapi.py +33 -28
- symai/backend/engines/speech_to_text/engine_local_whisper.py +37 -27
- symai/backend/engines/symbolic/engine_wolframalpha.py +14 -8
- symai/backend/engines/text_to_speech/engine_openai.py +15 -19
- symai/backend/engines/text_vision/engine_clip.py +34 -28
- symai/backend/engines/userinput/engine_console.py +3 -4
- symai/backend/mixin/anthropic.py +48 -40
- symai/backend/mixin/deepseek.py +4 -5
- symai/backend/mixin/google.py +5 -4
- symai/backend/mixin/groq.py +2 -4
- symai/backend/mixin/openai.py +132 -110
- symai/backend/settings.py +14 -14
- symai/chat.py +164 -94
- symai/collect/dynamic.py +13 -11
- symai/collect/pipeline.py +39 -31
- symai/collect/stats.py +109 -69
- symai/components.py +556 -238
- symai/constraints.py +14 -5
- symai/core.py +1495 -1210
- symai/core_ext.py +55 -50
- symai/endpoints/api.py +113 -58
- symai/extended/api_builder.py +22 -17
- symai/extended/arxiv_pdf_parser.py +13 -5
- symai/extended/bibtex_parser.py +8 -4
- symai/extended/conversation.py +88 -69
- symai/extended/document.py +40 -27
- symai/extended/file_merger.py +45 -7
- symai/extended/graph.py +38 -24
- symai/extended/html_style_template.py +17 -11
- symai/extended/interfaces/blip_2.py +1 -1
- symai/extended/interfaces/clip.py +4 -2
- symai/extended/interfaces/console.py +5 -3
- symai/extended/interfaces/dall_e.py +3 -1
- symai/extended/interfaces/file.py +2 -0
- symai/extended/interfaces/flux.py +3 -1
- symai/extended/interfaces/gpt_image.py +15 -6
- symai/extended/interfaces/input.py +2 -1
- symai/extended/interfaces/llava.py +1 -1
- symai/extended/interfaces/{naive_webscraping.py → naive_scrape.py} +3 -2
- symai/extended/interfaces/naive_vectordb.py +2 -2
- symai/extended/interfaces/ocr.py +4 -2
- symai/extended/interfaces/openai_search.py +2 -0
- symai/extended/interfaces/parallel.py +30 -0
- symai/extended/interfaces/perplexity.py +2 -0
- symai/extended/interfaces/pinecone.py +6 -4
- symai/extended/interfaces/python.py +2 -0
- symai/extended/interfaces/serpapi.py +2 -0
- symai/extended/interfaces/terminal.py +0 -1
- symai/extended/interfaces/tts.py +2 -1
- symai/extended/interfaces/whisper.py +2 -1
- symai/extended/interfaces/wolframalpha.py +1 -0
- symai/extended/metrics/__init__.py +1 -1
- symai/extended/metrics/similarity.py +5 -2
- symai/extended/os_command.py +31 -22
- symai/extended/packages/symdev.py +39 -34
- symai/extended/packages/sympkg.py +30 -27
- symai/extended/packages/symrun.py +46 -35
- symai/extended/repo_cloner.py +10 -9
- symai/extended/seo_query_optimizer.py +15 -12
- symai/extended/solver.py +104 -76
- symai/extended/summarizer.py +8 -7
- symai/extended/taypan_interpreter.py +10 -9
- symai/extended/vectordb.py +28 -15
- symai/formatter/formatter.py +39 -31
- symai/formatter/regex.py +46 -44
- symai/functional.py +184 -86
- symai/imports.py +85 -51
- symai/interfaces.py +1 -1
- symai/memory.py +33 -24
- symai/menu/screen.py +28 -19
- symai/misc/console.py +27 -27
- symai/misc/loader.py +4 -3
- symai/models/base.py +147 -76
- symai/models/errors.py +1 -1
- symai/ops/__init__.py +1 -1
- symai/ops/measures.py +17 -14
- symai/ops/primitives.py +933 -635
- symai/post_processors.py +28 -24
- symai/pre_processors.py +58 -52
- symai/processor.py +15 -9
- symai/prompts.py +714 -649
- symai/server/huggingface_server.py +115 -32
- symai/server/llama_cpp_server.py +14 -6
- symai/server/qdrant_server.py +206 -0
- symai/shell.py +98 -39
- symai/shellsv.py +307 -223
- symai/strategy.py +135 -81
- symai/symbol.py +276 -225
- symai/utils.py +62 -46
- {symbolicai-1.0.0.dist-info → symbolicai-1.1.0.dist-info}/METADATA +19 -9
- symbolicai-1.1.0.dist-info/RECORD +168 -0
- symbolicai-1.0.0.dist-info/RECORD +0 -163
- {symbolicai-1.0.0.dist-info → symbolicai-1.1.0.dist-info}/WHEEL +0 -0
- {symbolicai-1.0.0.dist-info → symbolicai-1.1.0.dist-info}/entry_points.txt +0 -0
- {symbolicai-1.0.0.dist-info → symbolicai-1.1.0.dist-info}/licenses/LICENSE +0 -0
- {symbolicai-1.0.0.dist-info → symbolicai-1.1.0.dist-info}/top_level.txt +0 -0
|
@@ -32,72 +32,76 @@ class VectorDBResult(Result):
|
|
|
32
32
|
try:
|
|
33
33
|
res = self._to_symbol(res).ast()
|
|
34
34
|
except Exception as e:
|
|
35
|
-
message = [
|
|
35
|
+
message = [
|
|
36
|
+
"Sorry, failed to interact with index. Please check index name and try again later:",
|
|
37
|
+
str(e),
|
|
38
|
+
]
|
|
36
39
|
# package the message for the IndexResult class
|
|
37
|
-
res = {
|
|
38
|
-
return [v[
|
|
40
|
+
res = {"matches": [{"metadata": {"text": "\n".join(message)}}]}
|
|
41
|
+
return [v["metadata"]["text"] for v in res]
|
|
39
42
|
|
|
40
43
|
def _unpack_matches(self):
|
|
41
44
|
if not self.value:
|
|
42
45
|
return
|
|
43
46
|
for i, match in enumerate(self.value):
|
|
44
47
|
match_value = match.strip()
|
|
45
|
-
if match_value.startswith(
|
|
46
|
-
m = match_value.split(
|
|
47
|
-
splits = m.split(
|
|
48
|
-
assert len(splits) >= 2, f
|
|
48
|
+
if match_value.startswith("# ----[FILE_START]") and "# ----[FILE_END]" in match_value:
|
|
49
|
+
m = match_value.split("[FILE_CONTENT]:")[-1].strip()
|
|
50
|
+
splits = m.split("# ----[FILE_END]")
|
|
51
|
+
assert len(splits) >= 2, f"Invalid file format: {splits}"
|
|
49
52
|
content = splits[0]
|
|
50
|
-
file_name =
|
|
53
|
+
file_name = ",".join(splits[1:]) # TODO: check why there are multiple file names
|
|
51
54
|
yield file_name.strip(), content.strip()
|
|
52
55
|
else:
|
|
53
|
-
yield i+1, match_value
|
|
56
|
+
yield i + 1, match_value
|
|
54
57
|
|
|
55
58
|
def __str__(self):
|
|
56
|
-
str_view =
|
|
59
|
+
str_view = ""
|
|
57
60
|
for filename, content in self._unpack_matches():
|
|
58
61
|
# indent each line of the content
|
|
59
|
-
content_view =
|
|
60
|
-
str_view += f
|
|
61
|
-
return f
|
|
62
|
+
content_view = "\n".join([" " + line for line in content.split("\n")])
|
|
63
|
+
str_view += f"* {filename}\n{content_view}\n\n"
|
|
64
|
+
return f"""
|
|
62
65
|
[RESULT]
|
|
63
|
-
{
|
|
66
|
+
{"-=-" * 13}
|
|
64
67
|
|
|
65
68
|
Query: {self._query}
|
|
66
69
|
|
|
67
|
-
{
|
|
70
|
+
{"-=-" * 13}
|
|
68
71
|
|
|
69
72
|
Matches:
|
|
70
73
|
|
|
71
74
|
{str_view}
|
|
72
|
-
{
|
|
73
|
-
|
|
75
|
+
{"-=-" * 13}
|
|
76
|
+
"""
|
|
74
77
|
|
|
75
78
|
def _repr_html_(self) -> str:
|
|
76
79
|
# return a nicely styled HTML list results based on retrieved documents
|
|
77
|
-
doc_str =
|
|
80
|
+
doc_str = ""
|
|
78
81
|
for filename, content in self._unpack_matches():
|
|
79
82
|
doc_str += f'<li><a href="{filename}"><b>{filename}</a></b><br>{content}</li>\n'
|
|
80
|
-
return f
|
|
83
|
+
return f"<ul>{doc_str}</ul>"
|
|
81
84
|
|
|
82
85
|
|
|
83
86
|
class VectorDBIndexEngine(Engine):
|
|
84
87
|
# Updated default values to be congruent with VectorDB's defaults
|
|
85
|
-
_default_index_name =
|
|
88
|
+
_default_index_name = "dataindex"
|
|
86
89
|
_default_index_dims = 768
|
|
87
90
|
_default_index_top_k = 5
|
|
88
|
-
_default_index_metric =
|
|
91
|
+
_default_index_metric = "cosine"
|
|
89
92
|
_index_dict: ClassVar[dict[str, object]] = {}
|
|
90
93
|
_index_storage_file: ClassVar[str | None] = None
|
|
94
|
+
|
|
91
95
|
def __init__(
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
96
|
+
self,
|
|
97
|
+
index_name=_default_index_name,
|
|
98
|
+
index_dims=_default_index_dims,
|
|
99
|
+
index_top_k=_default_index_top_k,
|
|
100
|
+
index_metric=_default_index_metric,
|
|
101
|
+
index_dict=_index_dict,
|
|
102
|
+
index_storage_file=_index_storage_file,
|
|
103
|
+
**_kwargs,
|
|
104
|
+
):
|
|
101
105
|
super().__init__()
|
|
102
106
|
self.config = deepcopy(SYMAI_CONFIG)
|
|
103
107
|
self.index_name = index_name
|
|
@@ -112,9 +116,12 @@ class VectorDBIndexEngine(Engine):
|
|
|
112
116
|
self.name = self.__class__.__name__
|
|
113
117
|
|
|
114
118
|
def id(self) -> str:
|
|
115
|
-
if
|
|
116
|
-
|
|
117
|
-
|
|
119
|
+
if (
|
|
120
|
+
not self.config["INDEXING_ENGINE_API_KEY"]
|
|
121
|
+
or self.config["INDEXING_ENGINE_API_KEY"] == ""
|
|
122
|
+
):
|
|
123
|
+
return "index"
|
|
124
|
+
return super().id() # default to unregistered
|
|
118
125
|
|
|
119
126
|
def forward(self, argument):
|
|
120
127
|
query = argument.prop.prepared_input
|
|
@@ -130,34 +137,51 @@ class VectorDBIndexEngine(Engine):
|
|
|
130
137
|
|
|
131
138
|
self._init(index_name, top_k, index_dims, metric)
|
|
132
139
|
|
|
133
|
-
if operation ==
|
|
140
|
+
if operation == "search":
|
|
134
141
|
if isinstance(query, list) and len(query) > 1:
|
|
135
|
-
UserMessage(
|
|
142
|
+
UserMessage(
|
|
143
|
+
"VectorDB indexing engine does not support multiple queries. Pass a single string query instead.",
|
|
144
|
+
raise_with=ValueError,
|
|
145
|
+
)
|
|
136
146
|
query_vector = self.index[index_name].embedding_function([query])[0]
|
|
137
|
-
results = self.index[index_name](
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
147
|
+
results = self.index[index_name](
|
|
148
|
+
vector=query_vector, top_k=top_k, return_similarities=similarities
|
|
149
|
+
)
|
|
150
|
+
rsp = [{"metadata": {"text": result}} for result in results]
|
|
151
|
+
elif operation == "add":
|
|
152
|
+
assert isinstance(query, list), (
|
|
153
|
+
"VectorDB indexing requires a list of queries at insertion, even if there is only one query."
|
|
154
|
+
)
|
|
141
155
|
documents = []
|
|
142
156
|
vectors = []
|
|
143
157
|
for q in query:
|
|
144
158
|
vectors.append(self.index[index_name].embedding_function([q])[0])
|
|
145
159
|
documents.append(q)
|
|
146
160
|
self.index[index_name].add(documents=documents, vectors=vectors)
|
|
147
|
-
elif operation ==
|
|
148
|
-
assert kwargs,
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
161
|
+
elif operation == "config":
|
|
162
|
+
assert kwargs, (
|
|
163
|
+
"Please provide a configuration by passing the appropriate kwargs. Currently, only `load`, `save`, `purge`."
|
|
164
|
+
)
|
|
165
|
+
maybe_as_prompt = kwargs.get("prompt")
|
|
166
|
+
if kwargs.get("load", maybe_as_prompt == "load"):
|
|
167
|
+
assert storage_file, (
|
|
168
|
+
"Please provide a `storage_file` path to load the pre-computed index."
|
|
169
|
+
)
|
|
152
170
|
self.load(index_name, storage_file, index_dims, top_k, metric)
|
|
153
|
-
elif kwargs.get(
|
|
171
|
+
elif kwargs.get("save", maybe_as_prompt == "save"):
|
|
154
172
|
self.save(index_name, storage_file)
|
|
155
|
-
elif kwargs.get(
|
|
173
|
+
elif kwargs.get("purge", maybe_as_prompt == "purge"):
|
|
156
174
|
self.purge(index_name)
|
|
157
175
|
else:
|
|
158
|
-
UserMessage(
|
|
176
|
+
UserMessage(
|
|
177
|
+
'Invalid configuration; please use either "load", "save", or "purge".',
|
|
178
|
+
raise_with=ValueError,
|
|
179
|
+
)
|
|
159
180
|
else:
|
|
160
|
-
UserMessage(
|
|
181
|
+
UserMessage(
|
|
182
|
+
'Invalid operation; please use either "search", "add", or "config".',
|
|
183
|
+
raise_with=ValueError,
|
|
184
|
+
)
|
|
161
185
|
|
|
162
186
|
metadata = {}
|
|
163
187
|
rsp = VectorDBResult(rsp, query[0], None)
|
|
@@ -171,11 +195,13 @@ class VectorDBIndexEngine(Engine):
|
|
|
171
195
|
index_dims=index_dims,
|
|
172
196
|
top_k=top_k,
|
|
173
197
|
similarity_metric=metric,
|
|
174
|
-
embedding_model=embedding_model
|
|
198
|
+
embedding_model=embedding_model, # @NOTE: the VectorDBIndexEngine class uses precomputed embeddings so the model is not needed in the VectorDB class
|
|
175
199
|
)
|
|
176
200
|
|
|
177
201
|
def prepare(self, argument):
|
|
178
|
-
assert not argument.prop.processed_input,
|
|
202
|
+
assert not argument.prop.processed_input, (
|
|
203
|
+
"VectorDB indexing engine does not support processed_input."
|
|
204
|
+
)
|
|
179
205
|
argument.prop.prepared_input = argument.prop.prompt
|
|
180
206
|
argument.prop.limit = 1
|
|
181
207
|
|
|
@@ -188,11 +214,11 @@ class VectorDBIndexEngine(Engine):
|
|
|
188
214
|
index_name=index_name,
|
|
189
215
|
)
|
|
190
216
|
|
|
191
|
-
def save(self, index_name
|
|
217
|
+
def save(self, index_name=None, storage_file=None):
|
|
192
218
|
index_name = index_name or self.index_name
|
|
193
219
|
storage_file = storage_file or self._index_storage_file
|
|
194
220
|
self.index[index_name].save(storage_file)
|
|
195
221
|
|
|
196
|
-
def purge(self, index_name
|
|
222
|
+
def purge(self, index_name=None):
|
|
197
223
|
index_name = index_name or self.index_name
|
|
198
224
|
self.index[index_name].purge(index_name)
|
|
@@ -18,6 +18,7 @@ class LeanResult(Result):
|
|
|
18
18
|
Attributes:
|
|
19
19
|
_value (Dict[str, str]): A dictionary containing the output of the Lean execution.
|
|
20
20
|
"""
|
|
21
|
+
|
|
21
22
|
def __init__(self, value: dict[str, str]) -> None:
|
|
22
23
|
"""
|
|
23
24
|
Initializes a new LeanResult instance.
|
|
@@ -28,6 +29,7 @@ class LeanResult(Result):
|
|
|
28
29
|
super().__init__(value)
|
|
29
30
|
self._value = value
|
|
30
31
|
|
|
32
|
+
|
|
31
33
|
class LeanEngine(Engine):
|
|
32
34
|
"""
|
|
33
35
|
Engine for executing Lean code within a Docker container, providing SSH access for execution.
|
|
@@ -43,10 +45,10 @@ class LeanEngine(Engine):
|
|
|
43
45
|
|
|
44
46
|
def __init__(
|
|
45
47
|
self,
|
|
46
|
-
ssh_host: str =
|
|
48
|
+
ssh_host: str = "localhost",
|
|
47
49
|
ssh_port: int = 2222,
|
|
48
|
-
ssh_user: str =
|
|
49
|
-
ssh_key_path: str =
|
|
50
|
+
ssh_user: str = "root",
|
|
51
|
+
ssh_key_path: str = "~/.ssh/id_rsa",
|
|
50
52
|
) -> None:
|
|
51
53
|
"""
|
|
52
54
|
Initializes the LeanEngine with SSH and Docker configurations.
|
|
@@ -74,7 +76,7 @@ class LeanEngine(Engine):
|
|
|
74
76
|
Returns:
|
|
75
77
|
str: The identifier of the LeanEngine, 'lean4'.
|
|
76
78
|
"""
|
|
77
|
-
return
|
|
79
|
+
return "lean4"
|
|
78
80
|
|
|
79
81
|
def _ensure_container(self) -> docker.models.containers.Container:
|
|
80
82
|
"""
|
|
@@ -86,10 +88,14 @@ class LeanEngine(Engine):
|
|
|
86
88
|
container_name: str = "lean-container"
|
|
87
89
|
|
|
88
90
|
try:
|
|
89
|
-
existing_container: docker.models.containers.Container =
|
|
91
|
+
existing_container: docker.models.containers.Container = (
|
|
92
|
+
self.docker_client.containers.get(container_name)
|
|
93
|
+
)
|
|
90
94
|
existing_container.remove(force=True)
|
|
91
95
|
except docker.errors.NotFound:
|
|
92
|
-
UserMessage(
|
|
96
|
+
UserMessage(
|
|
97
|
+
f"No existing container named '{container_name}' found. Proceeding to create a new one."
|
|
98
|
+
)
|
|
93
99
|
|
|
94
100
|
dockerfile: str = """
|
|
95
101
|
FROM buildpack-deps:buster
|
|
@@ -117,14 +123,15 @@ class LeanEngine(Engine):
|
|
|
117
123
|
dockerfile_path = Path(temp_dockerfile.name)
|
|
118
124
|
|
|
119
125
|
image: docker.models.images.Image
|
|
120
|
-
image, _ = self.docker_client.images.build(
|
|
126
|
+
image, _ = self.docker_client.images.build(
|
|
127
|
+
path=str(dockerfile_path.parent),
|
|
128
|
+
dockerfile=str(dockerfile_path),
|
|
129
|
+
tag="lean4-container-image",
|
|
130
|
+
)
|
|
121
131
|
dockerfile_path.unlink()
|
|
122
132
|
|
|
123
133
|
container: docker.models.containers.Container = self.docker_client.containers.run(
|
|
124
|
-
image.id,
|
|
125
|
-
detach=True,
|
|
126
|
-
name=container_name,
|
|
127
|
-
ports={'22/tcp': self.ssh_port}
|
|
134
|
+
image.id, detach=True, name=container_name, ports={"22/tcp": self.ssh_port}
|
|
128
135
|
)
|
|
129
136
|
return container
|
|
130
137
|
|
|
@@ -134,13 +141,39 @@ class LeanEngine(Engine):
|
|
|
134
141
|
and configuring the container to accept SSH connections using the generated key.
|
|
135
142
|
"""
|
|
136
143
|
if not self.ssh_key_path.exists():
|
|
137
|
-
subprocess.run(
|
|
144
|
+
subprocess.run(
|
|
145
|
+
["ssh-keygen", "-t", "rsa", "-b", "2048", "-f", str(self.ssh_key_path), "-N", ""],
|
|
146
|
+
check=True,
|
|
147
|
+
)
|
|
138
148
|
|
|
139
|
-
subprocess.run(
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
subprocess.run(
|
|
149
|
+
subprocess.run(
|
|
150
|
+
["docker", "exec", self.container.id, "mkdir", "-p", "/root/.ssh"], check=True
|
|
151
|
+
)
|
|
152
|
+
public_key_path = self.ssh_key_path.parent / f"{self.ssh_key_path.name}.pub"
|
|
153
|
+
subprocess.run(
|
|
154
|
+
[
|
|
155
|
+
"docker",
|
|
156
|
+
"cp",
|
|
157
|
+
str(public_key_path),
|
|
158
|
+
f"{self.container.id}:/root/.ssh/authorized_keys",
|
|
159
|
+
],
|
|
160
|
+
check=True,
|
|
161
|
+
)
|
|
162
|
+
subprocess.run(
|
|
163
|
+
["docker", "exec", self.container.id, "chmod", "600", "/root/.ssh/authorized_keys"],
|
|
164
|
+
check=True,
|
|
165
|
+
)
|
|
166
|
+
subprocess.run(
|
|
167
|
+
[
|
|
168
|
+
"docker",
|
|
169
|
+
"exec",
|
|
170
|
+
self.container.id,
|
|
171
|
+
"chown",
|
|
172
|
+
"root:root",
|
|
173
|
+
"/root/.ssh/authorized_keys",
|
|
174
|
+
],
|
|
175
|
+
check=True,
|
|
176
|
+
)
|
|
144
177
|
|
|
145
178
|
def forward(self, argument: Any) -> tuple[list[LeanResult], dict]:
|
|
146
179
|
"""
|
|
@@ -167,12 +200,12 @@ class LeanEngine(Engine):
|
|
|
167
200
|
metadata.update(exec_metadata)
|
|
168
201
|
|
|
169
202
|
if output:
|
|
170
|
-
rsp = LeanResult({
|
|
203
|
+
rsp = LeanResult({"output": output})
|
|
171
204
|
else:
|
|
172
|
-
metadata[
|
|
205
|
+
metadata["status"] = "no_output"
|
|
173
206
|
except Exception as e:
|
|
174
207
|
err = str(e)
|
|
175
|
-
metadata.update({
|
|
208
|
+
metadata.update({"status": "error", "message": err})
|
|
176
209
|
UserMessage(f"Error during Lean execution: {err}")
|
|
177
210
|
finally:
|
|
178
211
|
if tmpfile_path and tmpfile_path.exists():
|
|
@@ -196,12 +229,19 @@ class LeanEngine(Engine):
|
|
|
196
229
|
try:
|
|
197
230
|
ssh = paramiko.SSHClient()
|
|
198
231
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
|
199
|
-
ssh.connect(
|
|
232
|
+
ssh.connect(
|
|
233
|
+
self.ssh_host,
|
|
234
|
+
port=self.ssh_port,
|
|
235
|
+
username=self.ssh_user,
|
|
236
|
+
key_filename=str(self.ssh_key_path),
|
|
237
|
+
)
|
|
200
238
|
|
|
201
239
|
elan_path: str = "/usr/local/elan/bin/elan"
|
|
202
240
|
lean_path: str = "/usr/local/elan/bin/lean"
|
|
203
241
|
|
|
204
|
-
_stdin, stdout, stderr = ssh.exec_command(
|
|
242
|
+
_stdin, stdout, stderr = ssh.exec_command(
|
|
243
|
+
f"{elan_path} default stable && {lean_path} --version"
|
|
244
|
+
)
|
|
205
245
|
output: str = stdout.read().decode()
|
|
206
246
|
error: str = stderr.read().decode()
|
|
207
247
|
UserMessage(f"SSH Command Output: {output}")
|
|
@@ -220,10 +260,10 @@ class LeanEngine(Engine):
|
|
|
220
260
|
ssh.close()
|
|
221
261
|
|
|
222
262
|
if "error" in output.lower() or "error" in error.lower():
|
|
223
|
-
return output, {
|
|
263
|
+
return output, {"status": "failure"}
|
|
224
264
|
if not output and not error:
|
|
225
|
-
return "Lean program halted successfully with no output.", {
|
|
226
|
-
return output, {
|
|
265
|
+
return "Lean program halted successfully with no output.", {"status": "success"}
|
|
266
|
+
return output, {"status": "success"}
|
|
227
267
|
|
|
228
268
|
except Exception as e:
|
|
229
269
|
UserMessage(f"SSH command execution failed: {e!s}", raise_with=RuntimeError)
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
from ...mixin import (
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
2
|
+
ANTHROPIC_CHAT_MODELS,
|
|
3
|
+
ANTHROPIC_REASONING_MODELS,
|
|
4
|
+
DEEPSEEK_CHAT_MODELS,
|
|
5
|
+
DEEPSEEK_REASONING_MODELS,
|
|
6
|
+
GOOGLE_CHAT_MODELS,
|
|
7
|
+
GOOGLE_REASONING_MODELS,
|
|
8
|
+
GROQ_CHAT_MODELS,
|
|
9
|
+
GROQ_REASONING_MODELS,
|
|
10
|
+
OPENAI_CHAT_MODELS,
|
|
11
|
+
OPENAI_REASONING_MODELS,
|
|
12
12
|
)
|
|
13
13
|
from .engine_anthropic_claudeX_chat import ClaudeXChatEngine
|
|
14
14
|
from .engine_anthropic_claudeX_reasoning import ClaudeXReasoningEngine
|
|
@@ -31,22 +31,22 @@ ENGINE_MAPPING = {
|
|
|
31
31
|
}
|
|
32
32
|
|
|
33
33
|
__all__ = [
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
34
|
+
"ANTHROPIC_CHAT_MODELS",
|
|
35
|
+
"ANTHROPIC_REASONING_MODELS",
|
|
36
|
+
"DEEPSEEK_CHAT_MODELS",
|
|
37
|
+
"DEEPSEEK_REASONING_MODELS",
|
|
38
|
+
"ENGINE_MAPPING",
|
|
39
|
+
"GOOGLE_CHAT_MODELS",
|
|
40
|
+
"GOOGLE_REASONING_MODELS",
|
|
41
|
+
"GROQ_CHAT_MODELS",
|
|
42
|
+
"GROQ_REASONING_MODELS",
|
|
43
|
+
"OPENAI_CHAT_MODELS",
|
|
44
|
+
"OPENAI_REASONING_MODELS",
|
|
45
|
+
"ClaudeXChatEngine",
|
|
46
|
+
"ClaudeXReasoningEngine",
|
|
47
|
+
"DeepSeekXReasoningEngine",
|
|
48
|
+
"GPTXChatEngine",
|
|
49
|
+
"GPTXReasoningEngine",
|
|
50
|
+
"GeminiXReasoningEngine",
|
|
51
|
+
"GroqEngine",
|
|
52
52
|
]
|