symbolicai 0.20.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- symai/__init__.py +96 -64
- symai/backend/base.py +93 -80
- symai/backend/engines/drawing/engine_bfl.py +12 -11
- symai/backend/engines/drawing/engine_gpt_image.py +108 -87
- symai/backend/engines/embedding/engine_llama_cpp.py +25 -28
- symai/backend/engines/embedding/engine_openai.py +3 -5
- symai/backend/engines/execute/engine_python.py +6 -5
- symai/backend/engines/files/engine_io.py +74 -67
- symai/backend/engines/imagecaptioning/engine_blip2.py +3 -3
- symai/backend/engines/imagecaptioning/engine_llavacpp_client.py +54 -38
- symai/backend/engines/index/engine_pinecone.py +23 -24
- symai/backend/engines/index/engine_vectordb.py +16 -14
- symai/backend/engines/lean/engine_lean4.py +38 -34
- symai/backend/engines/neurosymbolic/__init__.py +41 -13
- symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py +262 -182
- symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py +263 -191
- symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py +53 -49
- symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py +212 -211
- symai/backend/engines/neurosymbolic/engine_groq.py +87 -63
- symai/backend/engines/neurosymbolic/engine_huggingface.py +21 -24
- symai/backend/engines/neurosymbolic/engine_llama_cpp.py +117 -48
- symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py +256 -229
- symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py +270 -150
- symai/backend/engines/ocr/engine_apilayer.py +6 -8
- symai/backend/engines/output/engine_stdout.py +1 -4
- symai/backend/engines/search/engine_openai.py +7 -7
- symai/backend/engines/search/engine_perplexity.py +5 -5
- symai/backend/engines/search/engine_serpapi.py +12 -14
- symai/backend/engines/speech_to_text/engine_local_whisper.py +20 -27
- symai/backend/engines/symbolic/engine_wolframalpha.py +3 -3
- symai/backend/engines/text_to_speech/engine_openai.py +5 -7
- symai/backend/engines/text_vision/engine_clip.py +7 -11
- symai/backend/engines/userinput/engine_console.py +3 -3
- symai/backend/engines/webscraping/engine_requests.py +81 -48
- symai/backend/mixin/__init__.py +13 -0
- symai/backend/mixin/anthropic.py +4 -2
- symai/backend/mixin/deepseek.py +2 -0
- symai/backend/mixin/google.py +2 -0
- symai/backend/mixin/openai.py +11 -3
- symai/backend/settings.py +83 -16
- symai/chat.py +101 -78
- symai/collect/__init__.py +7 -1
- symai/collect/dynamic.py +77 -69
- symai/collect/pipeline.py +35 -27
- symai/collect/stats.py +75 -63
- symai/components.py +198 -169
- symai/constraints.py +15 -12
- symai/core.py +698 -359
- symai/core_ext.py +32 -34
- symai/endpoints/api.py +80 -73
- symai/extended/.DS_Store +0 -0
- symai/extended/__init__.py +46 -12
- symai/extended/api_builder.py +11 -8
- symai/extended/arxiv_pdf_parser.py +13 -12
- symai/extended/bibtex_parser.py +2 -3
- symai/extended/conversation.py +101 -90
- symai/extended/document.py +17 -10
- symai/extended/file_merger.py +18 -13
- symai/extended/graph.py +18 -13
- symai/extended/html_style_template.py +2 -4
- symai/extended/interfaces/blip_2.py +1 -2
- symai/extended/interfaces/clip.py +1 -2
- symai/extended/interfaces/console.py +7 -1
- symai/extended/interfaces/dall_e.py +1 -1
- symai/extended/interfaces/flux.py +1 -1
- symai/extended/interfaces/gpt_image.py +1 -1
- symai/extended/interfaces/input.py +1 -1
- symai/extended/interfaces/llava.py +0 -1
- symai/extended/interfaces/naive_vectordb.py +7 -8
- symai/extended/interfaces/naive_webscraping.py +1 -1
- symai/extended/interfaces/ocr.py +1 -1
- symai/extended/interfaces/pinecone.py +6 -5
- symai/extended/interfaces/serpapi.py +1 -1
- symai/extended/interfaces/terminal.py +2 -3
- symai/extended/interfaces/tts.py +1 -1
- symai/extended/interfaces/whisper.py +1 -1
- symai/extended/interfaces/wolframalpha.py +1 -1
- symai/extended/metrics/__init__.py +11 -1
- symai/extended/metrics/similarity.py +11 -13
- symai/extended/os_command.py +17 -16
- symai/extended/packages/__init__.py +29 -3
- symai/extended/packages/symdev.py +19 -16
- symai/extended/packages/sympkg.py +12 -9
- symai/extended/packages/symrun.py +21 -19
- symai/extended/repo_cloner.py +11 -10
- symai/extended/seo_query_optimizer.py +1 -2
- symai/extended/solver.py +20 -23
- symai/extended/summarizer.py +4 -3
- symai/extended/taypan_interpreter.py +10 -12
- symai/extended/vectordb.py +99 -82
- symai/formatter/__init__.py +9 -1
- symai/formatter/formatter.py +12 -16
- symai/formatter/regex.py +62 -63
- symai/functional.py +176 -122
- symai/imports.py +136 -127
- symai/interfaces.py +56 -27
- symai/memory.py +14 -13
- symai/misc/console.py +49 -39
- symai/misc/loader.py +5 -3
- symai/models/__init__.py +17 -1
- symai/models/base.py +269 -181
- symai/models/errors.py +0 -1
- symai/ops/__init__.py +32 -22
- symai/ops/measures.py +11 -15
- symai/ops/primitives.py +348 -228
- symai/post_processors.py +32 -28
- symai/pre_processors.py +39 -41
- symai/processor.py +6 -4
- symai/prompts.py +59 -45
- symai/server/huggingface_server.py +23 -20
- symai/server/llama_cpp_server.py +7 -5
- symai/shell.py +3 -4
- symai/shellsv.py +499 -375
- symai/strategy.py +517 -287
- symai/symbol.py +111 -116
- symai/utils.py +42 -36
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/METADATA +4 -2
- symbolicai-1.0.0.dist-info/RECORD +163 -0
- symbolicai-0.20.2.dist-info/RECORD +0 -162
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/WHEEL +0 -0
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/entry_points.txt +0 -0
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {symbolicai-0.20.2.dist-info → symbolicai-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,15 +1,10 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
import re
|
|
3
2
|
from copy import deepcopy
|
|
4
|
-
from typing import List, Optional
|
|
5
3
|
|
|
6
|
-
from annotated_types import Not
|
|
7
4
|
from openai import OpenAI
|
|
8
5
|
|
|
9
6
|
from ....components import SelfPrompt
|
|
10
|
-
from ....
|
|
11
|
-
from ....symbol import Symbol
|
|
12
|
-
from ....utils import CustomUserWarning, encode_media_frames
|
|
7
|
+
from ....utils import UserMessage
|
|
13
8
|
from ...base import Engine
|
|
14
9
|
from ...mixin.deepseek import DeepSeekMixin
|
|
15
10
|
from ...settings import SYMAI_CONFIG
|
|
@@ -22,7 +17,7 @@ logging.getLogger("httpcore").setLevel(logging.ERROR)
|
|
|
22
17
|
|
|
23
18
|
|
|
24
19
|
class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
25
|
-
def __init__(self, api_key:
|
|
20
|
+
def __init__(self, api_key: str | None = None, model: str | None = None):
|
|
26
21
|
super().__init__()
|
|
27
22
|
self.config = deepcopy(SYMAI_CONFIG)
|
|
28
23
|
# In case we use EngineRepository.register to inject the api_key and model => dynamically change the engine at runtime
|
|
@@ -42,7 +37,7 @@ class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
|
42
37
|
try:
|
|
43
38
|
self.client = OpenAI(api_key=self.api_key, base_url="https://api.deepseek.com")
|
|
44
39
|
except Exception as e:
|
|
45
|
-
|
|
40
|
+
UserMessage(f'Failed to initialize the DeepSeek client. Please check your library version. Caused by: {e}', raise_with=RuntimeError)
|
|
46
41
|
|
|
47
42
|
def id(self) -> str:
|
|
48
43
|
if self.config.get('NEUROSYMBOLIC_ENGINE_MODEL') and \
|
|
@@ -59,14 +54,14 @@ class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
|
59
54
|
if 'seed' in kwargs:
|
|
60
55
|
self.seed = kwargs['seed']
|
|
61
56
|
|
|
62
|
-
def compute_required_tokens(self,
|
|
63
|
-
|
|
57
|
+
def compute_required_tokens(self, _messages):
|
|
58
|
+
UserMessage('Method "compute_required_tokens" not implemented for DeepSeekXReasoningEngine.', raise_with=NotImplementedError)
|
|
64
59
|
|
|
65
|
-
def compute_remaining_tokens(self,
|
|
66
|
-
|
|
60
|
+
def compute_remaining_tokens(self, _prompts: list) -> int:
|
|
61
|
+
UserMessage('Method "compute_remaining_tokens" not implemented for DeepSeekXReasoningEngine.', raise_with=NotImplementedError)
|
|
67
62
|
|
|
68
|
-
def truncate(self,
|
|
69
|
-
|
|
63
|
+
def truncate(self, _prompts: list[dict], _truncation_percentage: float | None, _truncation_type: str) -> list[dict]:
|
|
64
|
+
UserMessage('Method "truncate" not implemented for DeepSeekXReasoningEngine.', raise_with=NotImplementedError)
|
|
70
65
|
|
|
71
66
|
def forward(self, argument):
|
|
72
67
|
kwargs = argument.kwargs
|
|
@@ -80,18 +75,18 @@ class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
|
80
75
|
except Exception as e:
|
|
81
76
|
if self.api_key is None or self.api_key == '':
|
|
82
77
|
msg = 'DeepSeek API key is not set. Please set it in the config file or pass it as an argument to the command method.'
|
|
83
|
-
|
|
78
|
+
UserMessage(msg)
|
|
84
79
|
if self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] is None or self.config['NEUROSYMBOLIC_ENGINE_API_KEY'] == '':
|
|
85
|
-
|
|
80
|
+
UserMessage(msg, raise_with=ValueError)
|
|
86
81
|
self.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
|
|
87
82
|
|
|
88
83
|
callback = self.client.chat.completions.create
|
|
89
|
-
kwargs['model'] = kwargs
|
|
84
|
+
kwargs['model'] = kwargs.get('model', self.model)
|
|
90
85
|
|
|
91
86
|
if except_remedy is not None:
|
|
92
87
|
res = except_remedy(self, e, callback, argument)
|
|
93
88
|
else:
|
|
94
|
-
|
|
89
|
+
UserMessage(f'Error during generation. Caused by: {e}', raise_with=ValueError)
|
|
95
90
|
|
|
96
91
|
reasoning_content = res.choices[0].message.reasoning_content
|
|
97
92
|
content = res.choices[0].message.content
|
|
@@ -101,36 +96,32 @@ class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
|
101
96
|
|
|
102
97
|
def _prepare_raw_input(self, argument):
|
|
103
98
|
if not argument.prop.processed_input:
|
|
104
|
-
|
|
99
|
+
UserMessage('A prompt instruction is required for DeepSeekXReasoningEngine when raw_input is enabled.', raise_with=ValueError)
|
|
105
100
|
value = argument.prop.processed_input
|
|
106
101
|
# convert to dict if not already
|
|
107
|
-
if
|
|
108
|
-
if
|
|
102
|
+
if not isinstance(value, list):
|
|
103
|
+
if not isinstance(value, dict):
|
|
109
104
|
value = {'role': 'user', 'content': str(value)}
|
|
110
105
|
value = [value]
|
|
111
106
|
return value
|
|
112
107
|
|
|
113
|
-
def
|
|
114
|
-
if argument.prop.raw_input:
|
|
115
|
-
argument.prop.prepared_input = self._prepare_raw_input(argument)
|
|
116
|
-
return
|
|
117
|
-
|
|
108
|
+
def _build_system_prompt(self, argument):
|
|
118
109
|
_non_verbose_output = """<META_INSTRUCTION/>\nYou do not output anything else, like verbose preambles or post explanation, such as "Sure, let me...", "Hope that was helpful...", "Yes, I can help you with that...", etc. Consider well formatted output, e.g. for sentences use punctuation, spaces etc. or for code use indentation, etc. Never add meta instructions information to your output!\n\n"""
|
|
119
|
-
user: str = ""
|
|
120
110
|
system: str = ""
|
|
111
|
+
prop = argument.prop
|
|
121
112
|
|
|
122
|
-
if
|
|
113
|
+
if prop.suppress_verbose_output:
|
|
123
114
|
system += _non_verbose_output
|
|
124
115
|
system = f'{system}\n' if system and len(system) > 0 else ''
|
|
125
116
|
|
|
126
|
-
if
|
|
127
|
-
_rsp_fmt =
|
|
117
|
+
if prop.response_format:
|
|
118
|
+
_rsp_fmt = prop.response_format
|
|
128
119
|
if not (_rsp_fmt.get('type') is not None):
|
|
129
|
-
|
|
120
|
+
UserMessage('Response format type is required! Expected format `{"type": "json_object"}` or other supported types.', raise_with=AssertionError)
|
|
130
121
|
system += _non_verbose_output
|
|
131
122
|
system += f'<RESPONSE_FORMAT/>\n{_rsp_fmt["type"]}\n\n'
|
|
132
123
|
|
|
133
|
-
ref =
|
|
124
|
+
ref = prop.instance
|
|
134
125
|
static_ctxt, dyn_ctxt = ref.global_context
|
|
135
126
|
if len(static_ctxt) > 0:
|
|
136
127
|
system += f"<STATIC CONTEXT/>\n{static_ctxt}\n\n"
|
|
@@ -138,36 +129,49 @@ class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
|
138
129
|
if len(dyn_ctxt) > 0:
|
|
139
130
|
system += f"<DYNAMIC CONTEXT/>\n{dyn_ctxt}\n\n"
|
|
140
131
|
|
|
141
|
-
payload =
|
|
142
|
-
if
|
|
143
|
-
system += f"<ADDITIONAL CONTEXT/>\n{
|
|
132
|
+
payload = prop.payload
|
|
133
|
+
if prop.payload:
|
|
134
|
+
system += f"<ADDITIONAL CONTEXT/>\n{payload!s}\n\n"
|
|
144
135
|
|
|
145
|
-
examples:
|
|
136
|
+
examples: list[str] = prop.examples
|
|
146
137
|
if examples and len(examples) > 0:
|
|
147
|
-
system += f"<EXAMPLES/>\n{
|
|
138
|
+
system += f"<EXAMPLES/>\n{examples!s}\n\n"
|
|
148
139
|
|
|
149
|
-
if
|
|
150
|
-
val = str(
|
|
140
|
+
if prop.prompt is not None and len(prop.prompt) > 0:
|
|
141
|
+
val = str(prop.prompt)
|
|
151
142
|
system += f"<INSTRUCTION/>\n{val}\n\n"
|
|
152
143
|
|
|
153
|
-
|
|
144
|
+
if prop.template_suffix:
|
|
145
|
+
system += f' You will only generate content for the placeholder `{prop.template_suffix!s}` following the instructions and the provided context information.\n\n'
|
|
154
146
|
|
|
155
|
-
|
|
156
|
-
system += f' You will only generate content for the placeholder `{str(argument.prop.template_suffix)}` following the instructions and the provided context information.\n\n'
|
|
147
|
+
return system
|
|
157
148
|
|
|
158
|
-
|
|
149
|
+
def _build_user_prompt(self, argument):
|
|
150
|
+
return {"role": "user", "content": f"{argument.prop.processed_input!s}"}
|
|
159
151
|
|
|
160
|
-
|
|
161
|
-
|
|
152
|
+
def _apply_self_prompt(self, argument, system, user_prompt):
|
|
153
|
+
prop = argument.prop
|
|
154
|
+
if prop.instance._kwargs.get('self_prompt', False) or prop.self_prompt:
|
|
162
155
|
self_prompter = SelfPrompt()
|
|
163
156
|
|
|
164
|
-
res = self_prompter({'user':
|
|
157
|
+
res = self_prompter({'user': user_prompt['content'], 'system': system})
|
|
165
158
|
if res is None:
|
|
166
|
-
|
|
159
|
+
UserMessage("Self-prompting failed for DeepSeekXReasoningEngine.", raise_with=ValueError)
|
|
167
160
|
|
|
168
161
|
user_prompt = { "role": "user", "content": res['user'] }
|
|
169
162
|
system = res['system']
|
|
170
163
|
|
|
164
|
+
return system, user_prompt
|
|
165
|
+
|
|
166
|
+
def prepare(self, argument):
|
|
167
|
+
if argument.prop.raw_input:
|
|
168
|
+
argument.prop.prepared_input = self._prepare_raw_input(argument)
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
system = self._build_system_prompt(argument)
|
|
172
|
+
user_prompt = self._build_user_prompt(argument)
|
|
173
|
+
system, user_prompt = self._apply_self_prompt(argument, system, user_prompt)
|
|
174
|
+
|
|
171
175
|
argument.prop.prepared_input = [
|
|
172
176
|
{ "role": "system", "content": system },
|
|
173
177
|
user_prompt,
|
|
@@ -177,8 +181,8 @@ class DeepSeekXReasoningEngine(Engine, DeepSeekMixin):
|
|
|
177
181
|
"""Prepares the request payload from the argument."""
|
|
178
182
|
kwargs = argument.kwargs
|
|
179
183
|
# 16/03/2025
|
|
180
|
-
# Not Supported Features
|
|
181
|
-
# Not Supported Parameters
|
|
184
|
+
# Not Supported Features: Function Call、Json Output、FIM (Beta)
|
|
185
|
+
# Not Supported Parameters: temperature、top_p、presence_penalty、frequency_penalty、logprobs、top_logprobs
|
|
182
186
|
return {
|
|
183
187
|
"model": kwargs.get('model', self.model),
|
|
184
188
|
"seed": kwargs.get('seed', self.seed),
|