symbolicai 0.20.1__py3-none-any.whl → 0.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- symai/__init__.py +1 -1
- symai/backend/engines/embedding/engine_llama_cpp.py +8 -7
- symai/backend/engines/neurosymbolic/engine_llama_cpp.py +78 -7
- symai/backend/engines/search/engine_openai.py +5 -0
- symai/functional.py +5 -2
- {symbolicai-0.20.1.dist-info → symbolicai-0.21.0.dist-info}/METADATA +1 -1
- {symbolicai-0.20.1.dist-info → symbolicai-0.21.0.dist-info}/RECORD +11 -11
- {symbolicai-0.20.1.dist-info → symbolicai-0.21.0.dist-info}/WHEEL +0 -0
- {symbolicai-0.20.1.dist-info → symbolicai-0.21.0.dist-info}/entry_points.txt +0 -0
- {symbolicai-0.20.1.dist-info → symbolicai-0.21.0.dist-info}/licenses/LICENSE +0 -0
- {symbolicai-0.20.1.dist-info → symbolicai-0.21.0.dist-info}/top_level.txt +0 -0
symai/__init__.py
CHANGED
|
@@ -84,7 +84,7 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
84
84
|
asyncio.set_event_loop(new_loop)
|
|
85
85
|
return new_loop
|
|
86
86
|
|
|
87
|
-
async def _arequest(self, text: str) -> dict:
|
|
87
|
+
async def _arequest(self, text: str, embd_normalize: str) -> dict:
|
|
88
88
|
"""Makes an async HTTP request to the llama.cpp server."""
|
|
89
89
|
@retry(**self.retry_params)
|
|
90
90
|
async def _make_request():
|
|
@@ -95,7 +95,7 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
95
95
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
|
96
96
|
async with session.post(
|
|
97
97
|
f"{self.server_endpoint}/v1/embeddings",
|
|
98
|
-
json={"
|
|
98
|
+
json={"content": text, "embd_normalize": embd_normalize}
|
|
99
99
|
) as res:
|
|
100
100
|
if res.status != 200:
|
|
101
101
|
raise ValueError(f"Request failed with status code: {res.status}")
|
|
@@ -108,21 +108,22 @@ class LlamaCppEmbeddingEngine(Engine):
|
|
|
108
108
|
kwargs = argument.kwargs
|
|
109
109
|
|
|
110
110
|
inp = prepared_input if isinstance(prepared_input, list) else [prepared_input]
|
|
111
|
+
embd_normalize = kwargs.get('embd_normalize', -1) # -1 = no normalization
|
|
112
|
+
|
|
111
113
|
new_dim = kwargs.get('new_dim')
|
|
114
|
+
if new_dim:
|
|
115
|
+
raise NotImplementedError("new_dim is not yet supported")
|
|
112
116
|
|
|
113
117
|
nest_asyncio.apply()
|
|
114
118
|
loop = self._get_event_loop()
|
|
115
119
|
|
|
116
120
|
try:
|
|
117
|
-
res = loop.run_until_complete(self._arequest(inp))
|
|
121
|
+
res = loop.run_until_complete(self._arequest(inp, embd_normalize))
|
|
118
122
|
except Exception as e:
|
|
119
123
|
raise ValueError(f"Request failed with error: {str(e)}")
|
|
120
124
|
|
|
121
|
-
if new_dim:
|
|
122
|
-
raise NotImplementedError("new_dim is not yet supported")
|
|
123
|
-
|
|
124
125
|
if res is not None:
|
|
125
|
-
output = [r["embedding"] for r in res
|
|
126
|
+
output = [r["embedding"] for r in res] # B x 1 x D
|
|
126
127
|
else:
|
|
127
128
|
output = None
|
|
128
129
|
metadata = {'raw_output': res}
|
|
@@ -26,8 +26,8 @@ class LlamaCppTokenizer:
|
|
|
26
26
|
@staticmethod
|
|
27
27
|
async def _encode(text: str) -> list[int]:
|
|
28
28
|
async with aiohttp.ClientSession() as session:
|
|
29
|
-
async with session.post(f"{LlamaCppTokenizer._server_endpoint}/
|
|
30
|
-
"
|
|
29
|
+
async with session.post(f"{LlamaCppTokenizer._server_endpoint}/tokenize", json={
|
|
30
|
+
"content": text,
|
|
31
31
|
}) as res:
|
|
32
32
|
if res.status != 200:
|
|
33
33
|
CustomUserWarning(f"Request failed with status code: {res.status}", raise_with=ValueError)
|
|
@@ -46,13 +46,13 @@ class LlamaCppTokenizer:
|
|
|
46
46
|
@staticmethod
|
|
47
47
|
async def _decode(tokens: list[int]) -> str:
|
|
48
48
|
async with aiohttp.ClientSession() as session:
|
|
49
|
-
async with session.post(f"{LlamaCppTokenizer._server_endpoint}/
|
|
49
|
+
async with session.post(f"{LlamaCppTokenizer._server_endpoint}/detokenize", json={
|
|
50
50
|
"tokens": tokens,
|
|
51
51
|
}) as res:
|
|
52
52
|
if res.status != 200:
|
|
53
53
|
CustomUserWarning(f"Request failed with status code: {res.status}", raise_with=ValueError)
|
|
54
54
|
res = await res.json()
|
|
55
|
-
return res['
|
|
55
|
+
return res['content']
|
|
56
56
|
|
|
57
57
|
@staticmethod
|
|
58
58
|
def decode(tokens: list[int]) -> str:
|
|
@@ -148,7 +148,7 @@ class LlamaCppEngine(Engine):
|
|
|
148
148
|
def _prepare_request_payload(self, argument: Argument) -> dict:
|
|
149
149
|
"""Prepares the request payload from the argument."""
|
|
150
150
|
kwargs = argument.kwargs
|
|
151
|
-
|
|
151
|
+
payload = {
|
|
152
152
|
"messages": argument.prop.prepared_input,
|
|
153
153
|
"temperature": kwargs.get('temperature', 0.6),
|
|
154
154
|
"frequency_penalty": kwargs.get('frequency_penalty', 0),
|
|
@@ -162,12 +162,28 @@ class LlamaCppEngine(Engine):
|
|
|
162
162
|
"repeat_penalty": kwargs.get('repeat_penalty', 1),
|
|
163
163
|
"logits_bias": kwargs.get('logits_bias'),
|
|
164
164
|
"logprobs": kwargs.get('logprobs', False),
|
|
165
|
-
"functions": kwargs.get('functions'),
|
|
166
|
-
"function_call": kwargs.get('function_call'),
|
|
167
165
|
"grammar": kwargs.get('grammar'),
|
|
168
166
|
"response_format": kwargs.get('response_format'),
|
|
169
167
|
}
|
|
170
168
|
|
|
169
|
+
model = SYMSERVER_CONFIG.get('-m') or SYMSERVER_CONFIG.get('--model')
|
|
170
|
+
if model:
|
|
171
|
+
payload["model"] = model
|
|
172
|
+
|
|
173
|
+
tools = kwargs.get('tools')
|
|
174
|
+
if tools:
|
|
175
|
+
payload["tools"] = tools
|
|
176
|
+
|
|
177
|
+
tool_choice = kwargs.get('tool_choice')
|
|
178
|
+
if tool_choice is not None:
|
|
179
|
+
payload["tool_choice"] = tool_choice
|
|
180
|
+
|
|
181
|
+
extra_body = kwargs.get('extra_body')
|
|
182
|
+
if isinstance(extra_body, dict):
|
|
183
|
+
payload.update(extra_body)
|
|
184
|
+
|
|
185
|
+
return payload
|
|
186
|
+
|
|
171
187
|
async def _arequest(self, payload: dict) -> dict:
|
|
172
188
|
"""Makes an async HTTP request to the llama.cpp server."""
|
|
173
189
|
@retry(**self.retry_params)
|
|
@@ -187,6 +203,19 @@ class LlamaCppEngine(Engine):
|
|
|
187
203
|
|
|
188
204
|
return await _make_request()
|
|
189
205
|
|
|
206
|
+
@classmethod
|
|
207
|
+
def _extract_thinking(cls, response):
|
|
208
|
+
"""Extract reasoning traces from llama.cpp responses."""
|
|
209
|
+
if not isinstance(response, dict):
|
|
210
|
+
return None
|
|
211
|
+
choices = response.get('choices', [])
|
|
212
|
+
if not isinstance(choices, list) or not choices:
|
|
213
|
+
return None
|
|
214
|
+
for choice in choices:
|
|
215
|
+
if isinstance(choice, dict) and isinstance(choice.get('message'), dict):
|
|
216
|
+
return choice['message'].get('reasoning_content')
|
|
217
|
+
return None
|
|
218
|
+
|
|
190
219
|
def forward(self, argument):
|
|
191
220
|
payload = self._prepare_request_payload(argument)
|
|
192
221
|
|
|
@@ -200,11 +229,53 @@ class LlamaCppEngine(Engine):
|
|
|
200
229
|
|
|
201
230
|
metadata = {'raw_output': res}
|
|
202
231
|
|
|
232
|
+
if payload.get('tools'):
|
|
233
|
+
metadata = self._process_tool_calls(res, metadata)
|
|
234
|
+
|
|
235
|
+
thinking = self._extract_thinking(res)
|
|
236
|
+
if thinking:
|
|
237
|
+
metadata['thinking'] = thinking
|
|
238
|
+
|
|
203
239
|
output = [r['message']['content'] for r in res['choices']]
|
|
204
240
|
output = output if isinstance(argument.prop.prepared_input, list) else output[0]
|
|
205
241
|
|
|
206
242
|
return output, metadata
|
|
207
243
|
|
|
244
|
+
@staticmethod
|
|
245
|
+
def _process_tool_calls(res, metadata):
|
|
246
|
+
choices = res.get('choices') if isinstance(res, dict) else None
|
|
247
|
+
if not choices:
|
|
248
|
+
return metadata
|
|
249
|
+
hit = False
|
|
250
|
+
for choice in choices:
|
|
251
|
+
if not isinstance(choice, dict):
|
|
252
|
+
continue
|
|
253
|
+
message = choice.get('message') or {}
|
|
254
|
+
tool_calls = message.get('tool_calls') or []
|
|
255
|
+
if not tool_calls:
|
|
256
|
+
continue
|
|
257
|
+
for tool_call in tool_calls:
|
|
258
|
+
if not isinstance(tool_call, dict):
|
|
259
|
+
continue
|
|
260
|
+
function = tool_call.get('function') or {}
|
|
261
|
+
if hit:
|
|
262
|
+
CustomUserWarning("Multiple function calls detected in the response but only the first one will be processed.")
|
|
263
|
+
return metadata
|
|
264
|
+
arguments = function.get('arguments')
|
|
265
|
+
try:
|
|
266
|
+
args_dict = json.loads(arguments) if isinstance(arguments, str) else arguments or {}
|
|
267
|
+
except json.JSONDecodeError:
|
|
268
|
+
args_dict = {}
|
|
269
|
+
metadata['function_call'] = {
|
|
270
|
+
'name': function.get('name'),
|
|
271
|
+
'arguments': args_dict or {}
|
|
272
|
+
}
|
|
273
|
+
hit = True
|
|
274
|
+
break
|
|
275
|
+
if hit:
|
|
276
|
+
break
|
|
277
|
+
return metadata
|
|
278
|
+
|
|
208
279
|
def _prepare_raw_input(self, argument):
|
|
209
280
|
if not argument.prop.processed_input:
|
|
210
281
|
CustomUserWarning('Need to provide a prompt instruction to the engine if raw_input is enabled.', raise_with=ValueError)
|
|
@@ -334,6 +334,7 @@ class GPTXSearchEngine(Engine):
|
|
|
334
334
|
}
|
|
335
335
|
|
|
336
336
|
self.model = kwargs.get('model', self.model) # Important for MetadataTracker to work correctly
|
|
337
|
+
|
|
337
338
|
payload = {
|
|
338
339
|
"model": self.model,
|
|
339
340
|
"input": messages,
|
|
@@ -341,6 +342,10 @@ class GPTXSearchEngine(Engine):
|
|
|
341
342
|
"tool_choice": {"type": "web_search"} if self.model not in OPENAI_REASONING_MODELS else "auto" # force the use of web search tool for non-reasoning models
|
|
342
343
|
}
|
|
343
344
|
|
|
345
|
+
if self.model in OPENAI_REASONING_MODELS:
|
|
346
|
+
reasoning = kwargs.get('reasoning', { "effort": "low", "summary": "auto" })
|
|
347
|
+
payload['reasoning'] = reasoning
|
|
348
|
+
|
|
344
349
|
try:
|
|
345
350
|
res = self.client.responses.create(**payload)
|
|
346
351
|
res = SearchResult(res.dict())
|
symai/functional.py
CHANGED
|
@@ -171,7 +171,10 @@ def _execute_query_fallback(func, instance, argument, error=None, stack_trace=No
|
|
|
171
171
|
This matches the fallback logic used in _process_query by handling errors consistently,
|
|
172
172
|
providing error context to the fallback function, and maintaining the same return format.
|
|
173
173
|
"""
|
|
174
|
-
|
|
174
|
+
try:
|
|
175
|
+
rsp = func(instance, error=error, stack_trace=stack_trace, *argument.args, **argument.signature_kwargs)
|
|
176
|
+
except Exception:
|
|
177
|
+
raise error # re-raise the original error
|
|
175
178
|
if rsp is not None:
|
|
176
179
|
# fallback was implemented
|
|
177
180
|
rsp = dict(data=rsp, error=error, stack_trace=stack_trace)
|
|
@@ -242,7 +245,7 @@ def _process_query(
|
|
|
242
245
|
func: Callable,
|
|
243
246
|
constraints: List[Callable] = [],
|
|
244
247
|
default: Optional[object] = None,
|
|
245
|
-
limit: int =
|
|
248
|
+
limit: int | None = None,
|
|
246
249
|
trials: int = 1,
|
|
247
250
|
pre_processors: Optional[List[PreProcessor]] = None,
|
|
248
251
|
post_processors: Optional[List[PostProcessor]] = None,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: symbolicai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.21.0
|
|
4
4
|
Summary: A Neurosymbolic Perspective on Large Language Models
|
|
5
5
|
Author-email: Marius-Constantin Dinu <marius@extensity.ai>, Leoveanu-Condrei Claudiu <leo@extensity.ai>
|
|
6
6
|
License: BSD 3-Clause License
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
symai/TERMS_OF_SERVICE.md,sha256=HN42UXVI_wAVDHjMShzy_k7xAsbjXaATNeMKcIte_eg,91409
|
|
2
|
-
symai/__init__.py,sha256=
|
|
2
|
+
symai/__init__.py,sha256=QrAr-FX1eF1p2a0kDDY-JUcpZgoJpvdGmpdvKJD7WQA,16464
|
|
3
3
|
symai/chat.py,sha256=vqEe7NqSWdzr9ixkko_094SR1LIbgPLcZxQ8W7782N4,12775
|
|
4
4
|
symai/components.py,sha256=vgIq-cC8rqZG9PAPUB52Y5RGFEKrxFUCWzqzrPzLNvw,52232
|
|
5
5
|
symai/constraints.py,sha256=S1ywLB8nFQy4-beDoJz6IvLTiZHGR8Fu5RNTY4v5zG0,1641
|
|
@@ -7,7 +7,7 @@ symai/context.py,sha256=4M69MJOeWSdPTr2Y9teoNTs-nEvpzcAcr7900UgORXA,189
|
|
|
7
7
|
symai/core.py,sha256=1g45AjJ5wkz1cNTbtoDbd8QlOUc-v-3sWNmDTxaeqY0,69041
|
|
8
8
|
symai/core_ext.py,sha256=binru2AjB8K-arbNLiu1wnNodtFxgqk26b-iLVhPoSU,9322
|
|
9
9
|
symai/exceptions.py,sha256=BxpxI8q3-7Uh_Kg9Xi2PhF6RR6CofxV1h8R07j4v47U,165
|
|
10
|
-
symai/functional.py,sha256=
|
|
10
|
+
symai/functional.py,sha256=0N3f5aQ8TjZ82k7GRl0X6sXVpfplQUNU3RFky3EgKho,18963
|
|
11
11
|
symai/imports.py,sha256=npVGz9portPb3enIUtLYiwtdhSPVz6ctHLPreS6Jtvo,16354
|
|
12
12
|
symai/interfaces.py,sha256=MwnRvd-0QrMc2t_MLv05ZZASeWEIgbWxadiVOGbPbOQ,2898
|
|
13
13
|
symai/memory.py,sha256=Svie0ozSMOElMzcyAGnShc3VOQYpkiCEN4ZLoc2ofHM,3674
|
|
@@ -30,7 +30,7 @@ symai/backend/engines/crawler/engine_selenium.py,sha256=daCEXYh7WSlwbJypM-51rrOm
|
|
|
30
30
|
symai/backend/engines/drawing/engine_bfl.py,sha256=dnw-V9DVrulYlhzwPF2Tu-2wfLBEJVvYt9CGq3SQnqA,4478
|
|
31
31
|
symai/backend/engines/drawing/engine_dall_e.py,sha256=CKcMfy69kXSs2I92PqNfgRoz5LZYH-1nMhEVIRZKLFk,4896
|
|
32
32
|
symai/backend/engines/drawing/engine_gpt_image.py,sha256=0rooYI-NrbpUeCRMzYMHPNzOCpr_-j7ShWzJ7_dZ9MU,7502
|
|
33
|
-
symai/backend/engines/embedding/engine_llama_cpp.py,sha256=
|
|
33
|
+
symai/backend/engines/embedding/engine_llama_cpp.py,sha256=H9rL-ZErvVU5W7nJTXrCSpudjhgQRONoQUNbjC-sA_Y,5182
|
|
34
34
|
symai/backend/engines/embedding/engine_openai.py,sha256=0Tw45QvRbaH7jpOimx_e3IcvvJ50bv0Au8lxMCj5Hvo,3322
|
|
35
35
|
symai/backend/engines/embedding/engine_plugin_embeddings.py,sha256=TLOaYAw_1AWMwqcHydP41aWIYSGhguOJgI0rMu1Fl00,623
|
|
36
36
|
symai/backend/engines/execute/engine_python.py,sha256=3rPMBcrCrjggVm7JtGQjAywBZ6C_3TXXNwmKBjGFEqE,3752
|
|
@@ -50,13 +50,13 @@ symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py,sha256=ZaZvAVp
|
|
|
50
50
|
symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py,sha256=kxGWYsxnQkpsm40HB1MUGggWmrWz8avCY3jvNrohaKw,25622
|
|
51
51
|
symai/backend/engines/neurosymbolic/engine_groq.py,sha256=CsPd2TOHgd-gAIRJvFbTTy_otUvMdDxJPH7DsfZAooM,12176
|
|
52
52
|
symai/backend/engines/neurosymbolic/engine_huggingface.py,sha256=XIu9BnQo-J2flXFCCKwCJJmVozU9WDNkPndmpi-DlzE,7920
|
|
53
|
-
symai/backend/engines/neurosymbolic/engine_llama_cpp.py,sha256=
|
|
53
|
+
symai/backend/engines/neurosymbolic/engine_llama_cpp.py,sha256=DnF_f3nB5TYpHPAlwVJwi-Ax2L9JR978QGHCXv8ts7k,13833
|
|
54
54
|
symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py,sha256=Y-auxUFC4W9dfRzzgI3_rbWbPiOx4xfvKS4sM0KxP40,25250
|
|
55
55
|
symai/backend/engines/neurosymbolic/engine_openai_gptX_completion.py,sha256=YgxRoitmDz2de_W7rkhVXYEkDqTJQlgxK4f8tWlt88Q,13840
|
|
56
56
|
symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py,sha256=QVbyZybUPSAQHiA66V6we2W2dAsk52g1kJ7kMdGqb9I,22951
|
|
57
57
|
symai/backend/engines/ocr/engine_apilayer.py,sha256=hZo4lk0ECRIzaGEpmCSNjR5Xrh8mwkKMD2ddpdgioVU,2399
|
|
58
58
|
symai/backend/engines/output/engine_stdout.py,sha256=2hhyhMHFJTfjVRaODYd_5XPnV9pT03URcpYbeMY_USU,951
|
|
59
|
-
symai/backend/engines/search/engine_openai.py,sha256=
|
|
59
|
+
symai/backend/engines/search/engine_openai.py,sha256=ps_BNwiC1K1NeBJ6oaoyRIDiYksOqi53J7gGKaobue8,14136
|
|
60
60
|
symai/backend/engines/search/engine_perplexity.py,sha256=yxuhGaA38d1FRbLv6piLll0QDxCCyBVK6eeomjYNryM,4157
|
|
61
61
|
symai/backend/engines/search/engine_serpapi.py,sha256=UqvGHs1J9BOv05C0FJUQjbz29_VuWncIkeDwlRPUilU,3698
|
|
62
62
|
symai/backend/engines/speech_to_text/engine_local_whisper.py,sha256=LRsXliCpHDFPFaE-vPky3-DLkmYwmwe2mxfF0Brz4Wg,8220
|
|
@@ -154,9 +154,9 @@ symai/ops/primitives.py,sha256=EaB2Ekx9yGNDaQa3aKS5KpuEr5awAUbO3OcBbufI-l4,11072
|
|
|
154
154
|
symai/server/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
155
155
|
symai/server/huggingface_server.py,sha256=UpSBflnQaenDjY1AAn5LUYeg5J4gJLWiMuC5DcoIV3E,8743
|
|
156
156
|
symai/server/llama_cpp_server.py,sha256=qVCldTdcQhK2YCu7sDNSYziu1p2AQieqMFfY028-yOc,2049
|
|
157
|
-
symbolicai-0.
|
|
158
|
-
symbolicai-0.
|
|
159
|
-
symbolicai-0.
|
|
160
|
-
symbolicai-0.
|
|
161
|
-
symbolicai-0.
|
|
162
|
-
symbolicai-0.
|
|
157
|
+
symbolicai-0.21.0.dist-info/licenses/LICENSE,sha256=9vRFudlJ1ghVfra5lcCUIYQCqnZSYcBLjLHbGRsrQCs,1505
|
|
158
|
+
symbolicai-0.21.0.dist-info/METADATA,sha256=DvZzrNpr8abyJkWHr2GBW1oyM6WgZafUVoUfJzZBX28,23122
|
|
159
|
+
symbolicai-0.21.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
160
|
+
symbolicai-0.21.0.dist-info/entry_points.txt,sha256=JV5sdydIfUZdDF6QBEQHiZHod6XNPjCjpWQrXh7gTAw,261
|
|
161
|
+
symbolicai-0.21.0.dist-info/top_level.txt,sha256=bOoIDfpDIvCQtQgXcwVKJvxAKwsxpxo2IL4z92rNJjw,6
|
|
162
|
+
symbolicai-0.21.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|