symbolicai 0.17.5__py3-none-any.whl → 0.18.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- symai/__init__.py +1 -1
- symai/backend/engines/neurosymbolic/__init__.py +4 -0
- symai/backend/engines/neurosymbolic/engine_groq.py +10 -25
- symai/backend/engines/search/engine_openai.py +185 -35
- symai/backend/mixin/__init__.py +2 -0
- symai/backend/mixin/groq.py +10 -0
- symai/components.py +18 -11
- symai/misc/console.py +5 -5
- symai/shellsv.py +1 -0
- symai/utils.py +3 -4
- {symbolicai-0.17.5.dist-info → symbolicai-0.18.0.dist-info}/METADATA +1 -1
- {symbolicai-0.17.5.dist-info → symbolicai-0.18.0.dist-info}/RECORD +15 -14
- {symbolicai-0.17.5.dist-info → symbolicai-0.18.0.dist-info}/WHEEL +0 -0
- {symbolicai-0.17.5.dist-info → symbolicai-0.18.0.dist-info}/entry_points.txt +0 -0
- {symbolicai-0.17.5.dist-info → symbolicai-0.18.0.dist-info}/top_level.txt +0 -0
symai/__init__.py
CHANGED
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
from ...mixin import (ANTHROPIC_CHAT_MODELS, ANTHROPIC_REASONING_MODELS,
|
|
2
2
|
DEEPSEEK_CHAT_MODELS, DEEPSEEK_REASONING_MODELS,
|
|
3
3
|
GOOGLE_CHAT_MODELS, GOOGLE_REASONING_MODELS,
|
|
4
|
+
GROQ_CHAT_MODELS, GROQ_REASONING_MODELS,
|
|
4
5
|
OPENAI_CHAT_MODELS, OPENAI_REASONING_MODELS)
|
|
5
6
|
from .engine_anthropic_claudeX_chat import ClaudeXChatEngine
|
|
6
7
|
from .engine_anthropic_claudeX_reasoning import ClaudeXReasoningEngine
|
|
7
8
|
from .engine_deepseekX_reasoning import DeepSeekXReasoningEngine
|
|
8
9
|
from .engine_google_geminiX_reasoning import GeminiXReasoningEngine
|
|
10
|
+
from .engine_groq import GroqEngine
|
|
9
11
|
from .engine_openai_gptX_chat import GPTXChatEngine
|
|
10
12
|
from .engine_openai_gptX_reasoning import GPTXReasoningEngine
|
|
11
13
|
|
|
@@ -17,4 +19,6 @@ ENGINE_MAPPING = {
|
|
|
17
19
|
**{model_name: GeminiXReasoningEngine for model_name in GOOGLE_REASONING_MODELS},
|
|
18
20
|
**{model_name: GPTXChatEngine for model_name in OPENAI_CHAT_MODELS},
|
|
19
21
|
**{model_name: GPTXReasoningEngine for model_name in OPENAI_REASONING_MODELS},
|
|
22
|
+
**{model_name: GroqEngine for model_name in GROQ_CHAT_MODELS},
|
|
23
|
+
**{model_name: GroqEngine for model_name in GROQ_REASONING_MODELS},
|
|
20
24
|
}
|
|
@@ -29,7 +29,7 @@ class GroqEngine(Engine):
|
|
|
29
29
|
if self.id() != 'neurosymbolic':
|
|
30
30
|
return # do not initialize if not neurosymbolic; avoids conflict with llama.cpp check in EngineRepository.register_from_package
|
|
31
31
|
openai.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
|
|
32
|
-
self.model = self.config['NEUROSYMBOLIC_ENGINE_MODEL']
|
|
32
|
+
self.model = self.config['NEUROSYMBOLIC_ENGINE_MODEL'] # Keep the original config name to avoid confusion in downstream tasks
|
|
33
33
|
self.seed = None
|
|
34
34
|
self.name = self.__class__.__name__
|
|
35
35
|
|
|
@@ -49,7 +49,7 @@ class GroqEngine(Engine):
|
|
|
49
49
|
if 'NEUROSYMBOLIC_ENGINE_API_KEY' in kwargs:
|
|
50
50
|
openai.api_key = kwargs['NEUROSYMBOLIC_ENGINE_API_KEY']
|
|
51
51
|
if 'NEUROSYMBOLIC_ENGINE_MODEL' in kwargs:
|
|
52
|
-
self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
|
|
52
|
+
self.model = kwargs['NEUROSYMBOLIC_ENGINE_MODEL']
|
|
53
53
|
if 'seed' in kwargs:
|
|
54
54
|
self.seed = kwargs['seed']
|
|
55
55
|
|
|
@@ -59,18 +59,9 @@ class GroqEngine(Engine):
|
|
|
59
59
|
def compute_remaining_tokens(self, prompts: list) -> int:
|
|
60
60
|
raise NotImplementedError("Token counting not implemented for this engine.")
|
|
61
61
|
|
|
62
|
-
def
|
|
63
|
-
"""Handle
|
|
64
|
-
|
|
65
|
-
pattern = r'<<vision:(.*?):>>'
|
|
66
|
-
return re.findall(pattern, text)
|
|
67
|
-
raise NotImplementedError("Image content handling not implemented for this engine.")
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def _remove_vision_pattern(self, text: str) -> str:
|
|
71
|
-
"""Remove vision patterns from text."""
|
|
72
|
-
pattern = r'<<vision:(.*?):>>'
|
|
73
|
-
return re.sub(pattern, '', text)
|
|
62
|
+
def _handle_prefix(self, model_name: str) -> str:
|
|
63
|
+
"""Handle prefix for model name."""
|
|
64
|
+
return model_name.replace('groq:', '')
|
|
74
65
|
|
|
75
66
|
def _extract_thinking_content(self, output: list[str]) -> tuple[str | None, list[str]]:
|
|
76
67
|
"""Extract thinking content from model output if present and return cleaned output."""
|
|
@@ -115,7 +106,7 @@ class GroqEngine(Engine):
|
|
|
115
106
|
openai.api_key = self.config['NEUROSYMBOLIC_ENGINE_API_KEY']
|
|
116
107
|
|
|
117
108
|
callback = self.client.chat.completions.create
|
|
118
|
-
kwargs['model'] = kwargs['model'] if 'model' in kwargs else self.model
|
|
109
|
+
kwargs['model'] = self._handle_prefix(kwargs['model']) if 'model' in kwargs else self._handle_prefix(self.model)
|
|
119
110
|
|
|
120
111
|
if except_remedy is not None:
|
|
121
112
|
res = except_remedy(self, e, callback, argument)
|
|
@@ -160,12 +151,6 @@ class GroqEngine(Engine):
|
|
|
160
151
|
if argument.prop.response_format:
|
|
161
152
|
_rsp_fmt = argument.prop.response_format
|
|
162
153
|
assert _rsp_fmt.get('type') is not None, 'Expected format `{ "type": "json_object" }`! We are using the OpenAI compatible API for Groq. See more here: https://console.groq.com/docs/tool-use'
|
|
163
|
-
if _rsp_fmt["type"] == "json_object":
|
|
164
|
-
# OpenAI docs:
|
|
165
|
-
# "Important: when using JSON mode, you must also instruct the model
|
|
166
|
-
# to produce JSON yourself via a system or user message"
|
|
167
|
-
# Assuming this stays true even for this engine
|
|
168
|
-
system += f'<RESPONSE_FORMAT/>\nYou are a helpful assistant designed to output JSON.\n\n'
|
|
169
154
|
|
|
170
155
|
ref = argument.prop.instance
|
|
171
156
|
static_ctxt, dyn_ctxt = ref.global_context
|
|
@@ -256,11 +241,11 @@ class GroqEngine(Engine):
|
|
|
256
241
|
|
|
257
242
|
payload = {
|
|
258
243
|
"messages": messages,
|
|
259
|
-
"model": kwargs.get('model', self.model),
|
|
244
|
+
"model": self._handle_prefix(kwargs.get('model', self.model)),
|
|
260
245
|
"seed": kwargs.get('seed', self.seed),
|
|
261
246
|
"max_completion_tokens": kwargs.get('max_completion_tokens'),
|
|
262
247
|
"stop": kwargs.get('stop'),
|
|
263
|
-
"temperature": kwargs.get('temperature',
|
|
248
|
+
"temperature": kwargs.get('temperature', 1), # Default temperature for gpt-oss-120b
|
|
264
249
|
"frequency_penalty": kwargs.get('frequency_penalty', 0),
|
|
265
250
|
"presence_penalty": kwargs.get('presence_penalty', 0),
|
|
266
251
|
"reasoning_effort": kwargs.get('reasoning_effort'), # Field available only for qwen3 models
|
|
@@ -268,11 +253,11 @@ class GroqEngine(Engine):
|
|
|
268
253
|
"top_p": kwargs.get('top_p', 1),
|
|
269
254
|
"n": n,
|
|
270
255
|
"tools": kwargs.get('tools'),
|
|
271
|
-
"tool_choice": kwargs.get('tool_choice', 'none'),
|
|
256
|
+
"tool_choice": kwargs.get('tool_choice', 'auto' if kwargs.get('tools') else 'none'),
|
|
272
257
|
"response_format": kwargs.get('response_format'),
|
|
273
258
|
}
|
|
274
259
|
|
|
275
|
-
if not self.model.startswith('qwen'):
|
|
260
|
+
if not self._handle_prefix(self.model).startswith('qwen'):
|
|
276
261
|
del payload['reasoning_effort']
|
|
277
262
|
|
|
278
263
|
return payload
|
|
@@ -1,14 +1,18 @@
|
|
|
1
|
+
import hashlib
|
|
1
2
|
import json
|
|
2
3
|
import logging
|
|
3
|
-
import
|
|
4
|
+
import re
|
|
4
5
|
from copy import deepcopy
|
|
5
6
|
from dataclasses import dataclass
|
|
7
|
+
from urllib.parse import parse_qsl, urlencode, urlsplit, urlunsplit
|
|
8
|
+
|
|
9
|
+
from openai import OpenAI
|
|
6
10
|
|
|
7
11
|
from ....symbol import Result
|
|
8
12
|
from ....utils import CustomUserWarning
|
|
9
13
|
from ...base import Engine
|
|
10
|
-
from ...settings import SYMAI_CONFIG
|
|
11
14
|
from ...mixin import OPENAI_CHAT_MODELS, OPENAI_REASONING_MODELS
|
|
15
|
+
from ...settings import SYMAI_CONFIG
|
|
12
16
|
|
|
13
17
|
logging.getLogger("requests").setLevel(logging.ERROR)
|
|
14
18
|
logging.getLogger("urllib3").setLevel(logging.ERROR)
|
|
@@ -16,9 +20,13 @@ logging.getLogger("httpx").setLevel(logging.ERROR)
|
|
|
16
20
|
logging.getLogger("httpcore").setLevel(logging.ERROR)
|
|
17
21
|
|
|
18
22
|
|
|
23
|
+
TRACKING_KEYS = {
|
|
24
|
+
"utm_source" # so far I've only seen this one
|
|
25
|
+
}
|
|
26
|
+
|
|
19
27
|
@dataclass
|
|
20
28
|
class Citation:
|
|
21
|
-
id:
|
|
29
|
+
id: int
|
|
22
30
|
title: str
|
|
23
31
|
url: str
|
|
24
32
|
start: int
|
|
@@ -34,42 +42,187 @@ class SearchResult(Result):
|
|
|
34
42
|
if value.get('error'):
|
|
35
43
|
CustomUserWarning(value['error'], raise_with=ValueError)
|
|
36
44
|
try:
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
url=annotation.get('url', ''),
|
|
49
|
-
)
|
|
50
|
-
if citation not in citations:
|
|
51
|
-
citations.append(citation)
|
|
52
|
-
self._value = output['content'][0]['text']
|
|
53
|
-
delta = 0
|
|
54
|
-
for citation in citations:
|
|
55
|
-
self._value = self._value[:citation.start - delta] + citation.id + self._value[citation.end - delta:]
|
|
56
|
-
delta += (citation.end - citation.start) - len(citation.id)
|
|
57
|
-
self._citations = citations
|
|
45
|
+
text, annotations = self._extract_text_and_annotations(value)
|
|
46
|
+
if text is None:
|
|
47
|
+
self._value = None
|
|
48
|
+
self._citations = []
|
|
49
|
+
return
|
|
50
|
+
replaced_text, ordered, starts_ends = self._insert_citation_markers(text, annotations)
|
|
51
|
+
self._value = replaced_text
|
|
52
|
+
self._citations = [
|
|
53
|
+
Citation(id=cid, title=title, url=url, start=starts_ends[cid][0], end=starts_ends[cid][1])
|
|
54
|
+
for cid, title, url in ordered
|
|
55
|
+
]
|
|
58
56
|
|
|
59
57
|
except Exception as e:
|
|
60
58
|
self._value = None
|
|
61
59
|
CustomUserWarning(f"Failed to parse response: {e}", raise_with=ValueError)
|
|
62
60
|
|
|
61
|
+
def _extract_text(self, value) -> str | None:
|
|
62
|
+
if isinstance(value.get('output_text'), str) and value.get('output_text'):
|
|
63
|
+
return value.get('output_text')
|
|
64
|
+
text = None
|
|
65
|
+
for output in value.get('output', []):
|
|
66
|
+
if output.get('type') == 'message' and output.get('content'):
|
|
67
|
+
content0 = output['content'][0]
|
|
68
|
+
if 'text' in content0 and content0['text']:
|
|
69
|
+
text = content0['text']
|
|
70
|
+
return text
|
|
71
|
+
|
|
72
|
+
def _extract_text_and_annotations(self, value):
|
|
73
|
+
segments = []
|
|
74
|
+
global_annotations = []
|
|
75
|
+
pos = 0
|
|
76
|
+
for output in value.get('output', []) or []:
|
|
77
|
+
if output.get('type') != 'message' or not output.get('content'):
|
|
78
|
+
continue
|
|
79
|
+
for content in output.get('content', []) or []:
|
|
80
|
+
seg_text = content.get('text') or ''
|
|
81
|
+
if not isinstance(seg_text, str):
|
|
82
|
+
continue
|
|
83
|
+
for ann in (content.get('annotations') or []):
|
|
84
|
+
if ann.get('type') == 'url_citation' and ann.get('url'):
|
|
85
|
+
start = ann.get('start_index', 0)
|
|
86
|
+
end = ann.get('end_index', 0)
|
|
87
|
+
global_annotations.append({
|
|
88
|
+
'type': 'url_citation',
|
|
89
|
+
'url': ann.get('url'),
|
|
90
|
+
'title': (ann.get('title') or '').strip(),
|
|
91
|
+
'start_index': pos + int(start),
|
|
92
|
+
'end_index': pos + int(end),
|
|
93
|
+
})
|
|
94
|
+
segments.append(seg_text)
|
|
95
|
+
pos += len(seg_text)
|
|
96
|
+
|
|
97
|
+
built_text = ''.join(segments) if segments else None
|
|
98
|
+
# Prefer top-level output_text if present AND segments are empty (no way to compute indices)
|
|
99
|
+
if not built_text and isinstance(value.get('output_text'), str):
|
|
100
|
+
return value.get('output_text'), []
|
|
101
|
+
return built_text, global_annotations
|
|
102
|
+
|
|
103
|
+
def _normalize_url(self, u: str) -> str:
|
|
104
|
+
parts = urlsplit(u)
|
|
105
|
+
scheme = parts.scheme.lower()
|
|
106
|
+
netloc = parts.netloc.lower()
|
|
107
|
+
path = parts.path.rstrip('/') or '/'
|
|
108
|
+
q = []
|
|
109
|
+
for k, v in parse_qsl(parts.query, keep_blank_values=True):
|
|
110
|
+
kl = k.lower()
|
|
111
|
+
if kl in TRACKING_KEYS or kl.startswith('utm_'):
|
|
112
|
+
continue
|
|
113
|
+
q.append((k, v))
|
|
114
|
+
query = urlencode(q, doseq=True)
|
|
115
|
+
fragment = ''
|
|
116
|
+
return urlunsplit((scheme, netloc, path, query, fragment))
|
|
117
|
+
|
|
118
|
+
def _make_title_map(self, annotations):
|
|
119
|
+
m = {}
|
|
120
|
+
for a in annotations or []:
|
|
121
|
+
url = a.get('url')
|
|
122
|
+
if not url:
|
|
123
|
+
continue
|
|
124
|
+
nu = self._normalize_url(url)
|
|
125
|
+
title = (a.get('title') or '').strip()
|
|
126
|
+
if nu not in m and title:
|
|
127
|
+
m[nu] = title
|
|
128
|
+
return m
|
|
129
|
+
|
|
130
|
+
def _hostname(self, u: str) -> str:
|
|
131
|
+
return urlsplit(u).netloc
|
|
132
|
+
|
|
133
|
+
def _short_hash_id(self, nu: str, length=6) -> str:
|
|
134
|
+
return hashlib.sha1(nu.encode('utf-8')).hexdigest()[:length]
|
|
135
|
+
|
|
136
|
+
def _insert_citation_markers(self, text: str, annotations):
|
|
137
|
+
title_map = self._make_title_map(annotations)
|
|
138
|
+
id_map: dict[str, int] = {}
|
|
139
|
+
first_span: dict[int, tuple[int, int]] = {}
|
|
140
|
+
ordered: list[tuple[int, str, str]] = [] # (id, title, normalized_url)
|
|
141
|
+
next_id = 1
|
|
142
|
+
|
|
143
|
+
url_anns = [a for a in annotations or [] if a.get('type') == 'url_citation' and a.get('url')]
|
|
144
|
+
url_anns.sort(key=lambda a: int(a.get('start_index', 0)))
|
|
145
|
+
|
|
146
|
+
pieces: list[str] = []
|
|
147
|
+
cursor = 0
|
|
148
|
+
out_len = 0 # length of output built so far (after cleaning and prior markers)
|
|
149
|
+
|
|
150
|
+
def _get_id(nu: str) -> int:
|
|
151
|
+
nonlocal next_id
|
|
152
|
+
if nu not in id_map:
|
|
153
|
+
cid = next_id
|
|
154
|
+
id_map[nu] = cid
|
|
155
|
+
title = title_map.get(nu) or self._hostname(nu)
|
|
156
|
+
ordered.append((cid, title, nu))
|
|
157
|
+
next_id += 1
|
|
158
|
+
return id_map[nu]
|
|
159
|
+
|
|
160
|
+
for ann in url_anns:
|
|
161
|
+
start = int(ann.get('start_index', 0))
|
|
162
|
+
end = int(ann.get('end_index', 0))
|
|
163
|
+
if end <= cursor:
|
|
164
|
+
continue # skip overlapping or backwards spans
|
|
165
|
+
url = ann.get('url')
|
|
166
|
+
nu = self._normalize_url(url)
|
|
167
|
+
cid = _get_id(nu)
|
|
168
|
+
title = title_map.get(nu) or self._hostname(nu)
|
|
169
|
+
|
|
170
|
+
prefix = text[cursor:start]
|
|
171
|
+
prefix_clean = self._strip_markdown_links(prefix)
|
|
172
|
+
pieces.append(prefix_clean)
|
|
173
|
+
out_len += len(prefix_clean)
|
|
174
|
+
|
|
175
|
+
span_text = text[start:end]
|
|
176
|
+
span_clean = self._strip_markdown_links(span_text)
|
|
177
|
+
span_end_out = out_len + len(span_clean)
|
|
178
|
+
pieces.append(span_clean)
|
|
179
|
+
out_len = span_end_out
|
|
180
|
+
|
|
181
|
+
marker = f"[{cid}] ({title})\n"
|
|
182
|
+
marker_start_out = out_len
|
|
183
|
+
marker_end_out = out_len + len(marker)
|
|
184
|
+
if cid not in first_span:
|
|
185
|
+
first_span[cid] = (marker_start_out, marker_end_out)
|
|
186
|
+
pieces.append(marker)
|
|
187
|
+
out_len = marker_end_out
|
|
188
|
+
cursor = end
|
|
189
|
+
|
|
190
|
+
tail_clean = self._strip_markdown_links(text[cursor:])
|
|
191
|
+
pieces.append(tail_clean)
|
|
192
|
+
replaced = ''.join(pieces)
|
|
193
|
+
|
|
194
|
+
starts_ends = {cid: first_span.get(cid, (0, 0)) for cid, _, _ in ordered}
|
|
195
|
+
return replaced, ordered, starts_ends
|
|
196
|
+
|
|
197
|
+
def _strip_markdown_links(self, text: str) -> str:
|
|
198
|
+
# Remove ([text](http...)) including surrounding parentheses
|
|
199
|
+
pattern_paren = re.compile(r"\(\s*\[[^\]]+\]\(https?://[^)]+\)\s*\)")
|
|
200
|
+
text = pattern_paren.sub('', text)
|
|
201
|
+
# Remove bare [text](http...)
|
|
202
|
+
pattern_bare = re.compile(r"\[[^\]]+\]\(https?://[^)]+\)")
|
|
203
|
+
text = pattern_bare.sub('', text)
|
|
204
|
+
# Remove parentheses that became empty or contain only commas/whitespace like (, , )
|
|
205
|
+
pattern_empty_paren = re.compile(r"\(\s*\)")
|
|
206
|
+
text = pattern_empty_paren.sub('', text)
|
|
207
|
+
pattern_commas_only = re.compile(r"\(\s*(,\s*)+\)")
|
|
208
|
+
text = pattern_commas_only.sub('', text)
|
|
209
|
+
# Collapse potential double spaces resulting from removals
|
|
210
|
+
return re.sub(r"\s{2,}", " ", text).strip()
|
|
211
|
+
|
|
63
212
|
def __str__(self) -> str:
|
|
213
|
+
if isinstance(self._value, str) and self._value:
|
|
214
|
+
return self._value
|
|
64
215
|
try:
|
|
65
216
|
return json.dumps(self.raw, indent=2)
|
|
66
217
|
except TypeError:
|
|
67
218
|
return str(self.raw)
|
|
68
219
|
|
|
69
220
|
def _repr_html_(self) -> str:
|
|
221
|
+
if isinstance(self._value, str) and self._value:
|
|
222
|
+
return f"<pre>{self._value}</pre>"
|
|
70
223
|
try:
|
|
71
224
|
return f"<pre>{json.dumps(self.raw, indent=2)}</pre>"
|
|
72
|
-
except Exception
|
|
225
|
+
except Exception:
|
|
73
226
|
return f"<pre>{str(self.raw)}</pre>"
|
|
74
227
|
|
|
75
228
|
def get_citations(self) -> list[Citation]:
|
|
@@ -86,6 +239,10 @@ class GPTXSearchEngine(Engine):
|
|
|
86
239
|
self.api_key = self.config.get('SEARCH_ENGINE_API_KEY')
|
|
87
240
|
self.model = self.config.get('SEARCH_ENGINE_MODEL', 'gpt-4.1') # Default to gpt-4.1 as per docs
|
|
88
241
|
self.name = self.__class__.__name__
|
|
242
|
+
try:
|
|
243
|
+
self.client = OpenAI(api_key=self.api_key)
|
|
244
|
+
except Exception as e:
|
|
245
|
+
CustomUserWarning(f"Failed to initialize OpenAI client: {e}", raise_with=ValueError)
|
|
89
246
|
|
|
90
247
|
def id(self) -> str:
|
|
91
248
|
if self.config.get('SEARCH_ENGINE_API_KEY') and \
|
|
@@ -117,19 +274,12 @@ class GPTXSearchEngine(Engine):
|
|
|
117
274
|
"model": self.model,
|
|
118
275
|
"input": messages,
|
|
119
276
|
"tools": [tool_definition],
|
|
120
|
-
"tool_choice": {"type": "web_search_preview"} # force the use of web search tool
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
headers = {
|
|
124
|
-
"Authorization": f"Bearer {self.api_key}",
|
|
125
|
-
"Content-Type": "application/json",
|
|
126
|
-
"OpenAI-Beta": "assistants=v1" # Required for some beta features, might be useful
|
|
277
|
+
"tool_choice": {"type": "web_search_preview"} if self.model not in OPENAI_REASONING_MODELS else "auto" # force the use of web search tool for non-reasoning models
|
|
127
278
|
}
|
|
128
|
-
api_url = "https://api.openai.com/v1/responses"
|
|
129
279
|
|
|
130
280
|
try:
|
|
131
|
-
res =
|
|
132
|
-
res = SearchResult(res.
|
|
281
|
+
res = self.client.responses.create(**payload)
|
|
282
|
+
res = SearchResult(res.dict())
|
|
133
283
|
except Exception as e:
|
|
134
284
|
CustomUserWarning(f"Failed to make request: {e}", raise_with=ValueError)
|
|
135
285
|
|
symai/backend/mixin/__init__.py
CHANGED
|
@@ -4,5 +4,7 @@ from .deepseek import SUPPORTED_CHAT_MODELS as DEEPSEEK_CHAT_MODELS
|
|
|
4
4
|
from .deepseek import SUPPORTED_REASONING_MODELS as DEEPSEEK_REASONING_MODELS
|
|
5
5
|
from .google import SUPPORTED_CHAT_MODELS as GOOGLE_CHAT_MODELS
|
|
6
6
|
from .google import SUPPORTED_REASONING_MODELS as GOOGLE_REASONING_MODELS
|
|
7
|
+
from .groq import SUPPORTED_CHAT_MODELS as GROQ_CHAT_MODELS
|
|
8
|
+
from .groq import SUPPORTED_REASONING_MODELS as GROQ_REASONING_MODELS
|
|
7
9
|
from .openai import SUPPORTED_CHAT_MODELS as OPENAI_CHAT_MODELS
|
|
8
10
|
from .openai import SUPPORTED_REASONING_MODELS as OPENAI_REASONING_MODELS
|
symai/components.py
CHANGED
|
@@ -1147,8 +1147,17 @@ class MetadataTracker(Expression):
|
|
|
1147
1147
|
# Note on try/except:
|
|
1148
1148
|
# The unpacking shouldn't fail; if it fails, it's likely the API response format has changed and we need to know that ASAP
|
|
1149
1149
|
for (_, engine_name, model_name), metadata in self._metadata.items():
|
|
1150
|
-
|
|
1151
|
-
|
|
1150
|
+
try:
|
|
1151
|
+
if engine_name == "GroqEngine":
|
|
1152
|
+
usage = metadata["raw_output"].usage
|
|
1153
|
+
token_details[(engine_name, model_name)]["usage"]["completion_tokens"] += usage.completion_tokens
|
|
1154
|
+
token_details[(engine_name, model_name)]["usage"]["prompt_tokens"] += usage.prompt_tokens
|
|
1155
|
+
token_details[(engine_name, model_name)]["usage"]["total_tokens"] += usage.total_tokens
|
|
1156
|
+
token_details[(engine_name, model_name)]["usage"]["total_calls"] += 1
|
|
1157
|
+
#!: Backward compatibility for components like `RuntimeInfo`
|
|
1158
|
+
token_details[(engine_name, model_name)]["prompt_breakdown"]["cached_tokens"] += 0 # Assignment not allowed with defualtdict
|
|
1159
|
+
token_details[(engine_name, model_name)]["completion_breakdown"]["reasoning_tokens"] += 0
|
|
1160
|
+
elif engine_name in ("GPTXChatEngine", "GPTXReasoningEngine"):
|
|
1152
1161
|
usage = metadata["raw_output"].usage
|
|
1153
1162
|
token_details[(engine_name, model_name)]["usage"]["completion_tokens"] += usage.completion_tokens
|
|
1154
1163
|
token_details[(engine_name, model_name)]["usage"]["prompt_tokens"] += usage.prompt_tokens
|
|
@@ -1160,10 +1169,7 @@ class MetadataTracker(Expression):
|
|
|
1160
1169
|
token_details[(engine_name, model_name)]["completion_breakdown"]["reasoning_tokens"] += usage.completion_tokens_details.reasoning_tokens
|
|
1161
1170
|
token_details[(engine_name, model_name)]["prompt_breakdown"]["audio_tokens"] += usage.prompt_tokens_details.audio_tokens
|
|
1162
1171
|
token_details[(engine_name, model_name)]["prompt_breakdown"]["cached_tokens"] += usage.prompt_tokens_details.cached_tokens
|
|
1163
|
-
|
|
1164
|
-
CustomUserWarning(f"Failed to parse metadata for {engine_name}: {e}", raise_with=AttributeError)
|
|
1165
|
-
elif engine_name == "GPTXSearchEngine":
|
|
1166
|
-
try:
|
|
1172
|
+
elif engine_name == "GPTXSearchEngine":
|
|
1167
1173
|
usage = metadata["raw_output"].usage
|
|
1168
1174
|
token_details[(engine_name, model_name)]["usage"]["prompt_tokens"] += usage.input_tokens
|
|
1169
1175
|
token_details[(engine_name, model_name)]["usage"]["completion_tokens"] += usage.output_tokens
|
|
@@ -1171,11 +1177,11 @@ class MetadataTracker(Expression):
|
|
|
1171
1177
|
token_details[(engine_name, model_name)]["usage"]["total_calls"] += 1
|
|
1172
1178
|
token_details[(engine_name, model_name)]["prompt_breakdown"]["cached_tokens"] += usage.input_tokens_details.cached_tokens
|
|
1173
1179
|
token_details[(engine_name, model_name)]["completion_breakdown"]["reasoning_tokens"] += usage.output_tokens_details.reasoning_tokens
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1180
|
+
else:
|
|
1181
|
+
logger.warning(f"Tracking {engine_name} is not supported.")
|
|
1182
|
+
continue
|
|
1183
|
+
except Exception as e:
|
|
1184
|
+
CustomUserWarning(f"Failed to parse metadata for {engine_name}: {e}", raise_with=AttributeError)
|
|
1179
1185
|
|
|
1180
1186
|
# Convert to normal dict
|
|
1181
1187
|
return {**token_details}
|
|
@@ -1193,6 +1199,7 @@ class MetadataTracker(Expression):
|
|
|
1193
1199
|
# Skipz first entry
|
|
1194
1200
|
for (_, engine_name), metadata in list(self._metadata.items())[1:]:
|
|
1195
1201
|
if engine_name not in ("GPTXChatEngine", "GPTXReasoningEngine", "GPTXSearchEngine"):
|
|
1202
|
+
logger.warning(f"Metadata accumulation for {engine_name} is not supported. Try `.usage` instead for now.")
|
|
1196
1203
|
continue
|
|
1197
1204
|
|
|
1198
1205
|
# Accumulate time if it exists
|
symai/misc/console.py
CHANGED
|
@@ -2,6 +2,7 @@ import re
|
|
|
2
2
|
import pygments
|
|
3
3
|
import logging
|
|
4
4
|
|
|
5
|
+
#@TODO: refactor to use rich instead of prompt_toolkit
|
|
5
6
|
from html import escape as escape_html
|
|
6
7
|
from pygments.lexers.python import PythonLexer
|
|
7
8
|
from pygments.lexers.javascript import JavascriptLexer
|
|
@@ -49,8 +50,8 @@ class ConsoleStyle(object):
|
|
|
49
50
|
message = str(message)
|
|
50
51
|
if self.logging:
|
|
51
52
|
logger.debug(message)
|
|
52
|
-
|
|
53
|
-
|
|
53
|
+
# Prepare safe content for HTML printing without mutating the original
|
|
54
|
+
content_for_html = escape_html(message) if escape else message
|
|
54
55
|
style = self.style_types.get(self.style_type, self.style_types['default'])
|
|
55
56
|
|
|
56
57
|
if style == self.style_types['code']:
|
|
@@ -80,7 +81,6 @@ class ConsoleStyle(object):
|
|
|
80
81
|
elif style == self.style_types['default']:
|
|
81
82
|
print(message)
|
|
82
83
|
elif style == self.style_types['custom']:
|
|
83
|
-
print(HTML(f'<style fg="{self.color}">{
|
|
84
|
+
print(HTML(f'<style fg="{self.color}">{content_for_html}</style>'))
|
|
84
85
|
else:
|
|
85
|
-
print(HTML(f'<style fg="{style}">{
|
|
86
|
-
|
|
86
|
+
print(HTML(f'<style fg="{style}">{content_for_html}</style>'))
|
symai/shellsv.py
CHANGED
|
@@ -13,6 +13,7 @@ import traceback
|
|
|
13
13
|
from pathlib import Path
|
|
14
14
|
from typing import Iterable, Tuple
|
|
15
15
|
|
|
16
|
+
#@TODO: refactor to use rich instead of prompt_toolkit
|
|
16
17
|
from prompt_toolkit import HTML, PromptSession, print_formatted_text
|
|
17
18
|
from prompt_toolkit.completion import Completer, Completion, WordCompleter
|
|
18
19
|
from prompt_toolkit.history import History
|
symai/utils.py
CHANGED
|
@@ -224,9 +224,7 @@ class RuntimeInfo:
|
|
|
224
224
|
try:
|
|
225
225
|
return RuntimeInfo.from_usage_stats(tracker.usage, total_elapsed_time)
|
|
226
226
|
except Exception as e:
|
|
227
|
-
|
|
228
|
-
CustomUserWarning(f"Failed to parse metadata; returning empty RuntimeInfo: {e}")
|
|
229
|
-
return RuntimeInfo(0, 0, 0, 0, 0, 0, 0, 0)
|
|
227
|
+
CustomUserWarning(f"Failed to parse metadata: {e}", raise_with=ValueError)
|
|
230
228
|
return RuntimeInfo(0, 0, 0, 0, 0, 0, 0, 0)
|
|
231
229
|
|
|
232
230
|
@staticmethod
|
|
@@ -234,12 +232,13 @@ class RuntimeInfo:
|
|
|
234
232
|
if usage_stats is not None:
|
|
235
233
|
usage_per_engine = {}
|
|
236
234
|
for (engine_name, model_name), data in usage_stats.items():
|
|
235
|
+
#!: This object interacts with `MetadataTracker`; its fields are mandatory and handled there
|
|
237
236
|
data = Box(data)
|
|
238
237
|
usage_per_engine[(engine_name, model_name)] = RuntimeInfo(
|
|
239
238
|
total_elapsed_time=total_elapsed_time,
|
|
240
239
|
prompt_tokens=data.usage.prompt_tokens,
|
|
241
240
|
completion_tokens=data.usage.completion_tokens,
|
|
242
|
-
reasoning_tokens=
|
|
241
|
+
reasoning_tokens=data.completion_breakdown.reasoning_tokens,
|
|
243
242
|
cached_tokens=data.prompt_breakdown.cached_tokens,
|
|
244
243
|
total_calls=data.usage.total_calls,
|
|
245
244
|
total_tokens=data.usage.total_tokens,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: symbolicai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.18.0
|
|
4
4
|
Summary: A Neurosymbolic Perspective on Large Language Models
|
|
5
5
|
Author-email: Marius-Constantin Dinu <marius@extensity.ai>, Leoveanu-Condrei Claudiu <leo@extensity.ai>
|
|
6
6
|
Project-URL: Homepage, https://extensity.ai
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
symai/TERMS_OF_SERVICE.md,sha256=HN42UXVI_wAVDHjMShzy_k7xAsbjXaATNeMKcIte_eg,91409
|
|
2
|
-
symai/__init__.py,sha256=
|
|
2
|
+
symai/__init__.py,sha256=xXbdl7oTmx1KKaCLI3R4_2oktrKmNTy1ommzZ5ZsW34,16464
|
|
3
3
|
symai/chat.py,sha256=vqEe7NqSWdzr9ixkko_094SR1LIbgPLcZxQ8W7782N4,12775
|
|
4
|
-
symai/components.py,sha256=
|
|
4
|
+
symai/components.py,sha256=YazuyQasU7P3dvUHn-h5A54D-lf6wD_Hqc8Y6Gnd11g,51440
|
|
5
5
|
symai/constraints.py,sha256=S1ywLB8nFQy4-beDoJz6IvLTiZHGR8Fu5RNTY4v5zG0,1641
|
|
6
6
|
symai/core.py,sha256=1g45AjJ5wkz1cNTbtoDbd8QlOUc-v-3sWNmDTxaeqY0,69041
|
|
7
7
|
symai/core_ext.py,sha256=binru2AjB8K-arbNLiu1wnNodtFxgqk26b-iLVhPoSU,9322
|
|
@@ -15,11 +15,11 @@ symai/pre_processors.py,sha256=08C1FfjwI5bhxk8Xt6aB5wPizB5o3DEFkTh4S8PkS5Q,16886
|
|
|
15
15
|
symai/processor.py,sha256=ZV6uQwybUhCJCTdvBwVxMmleuX0EUVOQHZSvsm5F8pw,1586
|
|
16
16
|
symai/prompts.py,sha256=OZWW4_S6yf2mpwuiHWlcZz82ITKkYEq4-DIzBHalJnI,89831
|
|
17
17
|
symai/shell.py,sha256=fzWvnEovQartpO3UhjLcH9TcydgxXzf3YyfuLlU_GDI,6237
|
|
18
|
-
symai/shellsv.py,sha256=
|
|
18
|
+
symai/shellsv.py,sha256=Y552dkQrKrnMhwDyK9wJSA1bnJF_bh60iMBom-eAbsM,37277
|
|
19
19
|
symai/strategy.py,sha256=D2DD5mTgp2aDzIXNdKU1FwoSdnIWcKYL2Yv7Tue-Sy8,37885
|
|
20
20
|
symai/symbol.py,sha256=unRyZj_2KepkV6xTLYLWzYUkBZh2J5UB-OHyPhFn7uE,40637
|
|
21
21
|
symai/symsh.md,sha256=QwY_-fX0Ge7Aazul0xde2DuF2FZLw_elxrkXR3kuKDQ,1245
|
|
22
|
-
symai/utils.py,sha256=
|
|
22
|
+
symai/utils.py,sha256=chZwVcjHnDizYFlmdK6MMPAa7W40kqpya9zctAwtYTI,9813
|
|
23
23
|
symai/backend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
24
|
symai/backend/base.py,sha256=7zXaZtByUXfOqxqEvbdmB_E0GYvvpwfj3FPsf3EZP0Y,7523
|
|
25
25
|
symai/backend/settings.py,sha256=YCt0AJgWM2egRg3hLz4S1vEypVHm654oXxgzNvLSLj8,3213
|
|
@@ -42,12 +42,12 @@ symai/backend/engines/imagecaptioning/engine_llavacpp_client.py,sha256=ZbW6sxFE9
|
|
|
42
42
|
symai/backend/engines/index/engine_pinecone.py,sha256=Horf9lzw2QLMdYvvBLeAu3MOyiTbNo30en28Ifm4AVA,8956
|
|
43
43
|
symai/backend/engines/index/engine_vectordb.py,sha256=q0jUvmAh1AEPIE5fpayjSmSg-U9W1OffDXkx4tkFvNI,8048
|
|
44
44
|
symai/backend/engines/lean/engine_lean4.py,sha256=1ZZOzw1kDAJH6hMCyDtK50LAYSsoU8pApWVPI_9Eul0,9440
|
|
45
|
-
symai/backend/engines/neurosymbolic/__init__.py,sha256=
|
|
45
|
+
symai/backend/engines/neurosymbolic/__init__.py,sha256=mZpsX-UVc86nYjn1hxyJM1AvVq5BvZK8sr5PU-QgwVU,1438
|
|
46
46
|
symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_chat.py,sha256=01BdmObww3NzbYfb8OscLnwrxoPW48JkLWQOFeopURM,18728
|
|
47
47
|
symai/backend/engines/neurosymbolic/engine_anthropic_claudeX_reasoning.py,sha256=ZPSksJjm3dnJBZKFDFp9HJpD5Nd2F2HxIwoZzo1Ders,19772
|
|
48
48
|
symai/backend/engines/neurosymbolic/engine_deepseekX_reasoning.py,sha256=ZaZvAVpgv5GYjE2yZmYhP5zUnEKak5I1mndRfiGfr6U,8995
|
|
49
49
|
symai/backend/engines/neurosymbolic/engine_google_geminiX_reasoning.py,sha256=kxGWYsxnQkpsm40HB1MUGggWmrWz8avCY3jvNrohaKw,25622
|
|
50
|
-
symai/backend/engines/neurosymbolic/engine_groq.py,sha256=
|
|
50
|
+
symai/backend/engines/neurosymbolic/engine_groq.py,sha256=g_Xnl8UezHF2dksFc4MYw3DCQqh8nQFUhVIkGCXegtc,11748
|
|
51
51
|
symai/backend/engines/neurosymbolic/engine_huggingface.py,sha256=XIu9BnQo-J2flXFCCKwCJJmVozU9WDNkPndmpi-DlzE,7920
|
|
52
52
|
symai/backend/engines/neurosymbolic/engine_llama_cpp.py,sha256=ZbHaRYOb7QSD0OrFURp4PT0KJgQk3gdJMD_Itxn7QcU,11323
|
|
53
53
|
symai/backend/engines/neurosymbolic/engine_openai_gptX_chat.py,sha256=Y-auxUFC4W9dfRzzgI3_rbWbPiOx4xfvKS4sM0KxP40,25250
|
|
@@ -55,7 +55,7 @@ symai/backend/engines/neurosymbolic/engine_openai_gptX_completion.py,sha256=YgxR
|
|
|
55
55
|
symai/backend/engines/neurosymbolic/engine_openai_gptX_reasoning.py,sha256=QVbyZybUPSAQHiA66V6we2W2dAsk52g1kJ7kMdGqb9I,22951
|
|
56
56
|
symai/backend/engines/ocr/engine_apilayer.py,sha256=hZo4lk0ECRIzaGEpmCSNjR5Xrh8mwkKMD2ddpdgioVU,2399
|
|
57
57
|
symai/backend/engines/output/engine_stdout.py,sha256=2hhyhMHFJTfjVRaODYd_5XPnV9pT03URcpYbeMY_USU,951
|
|
58
|
-
symai/backend/engines/search/engine_openai.py,sha256=
|
|
58
|
+
symai/backend/engines/search/engine_openai.py,sha256=QeWCu5ofJFZwIWSolIYAlVN1KjFngM8GpSUhxPP4bps,11688
|
|
59
59
|
symai/backend/engines/search/engine_perplexity.py,sha256=yxuhGaA38d1FRbLv6piLll0QDxCCyBVK6eeomjYNryM,4157
|
|
60
60
|
symai/backend/engines/search/engine_serpapi.py,sha256=UqvGHs1J9BOv05C0FJUQjbz29_VuWncIkeDwlRPUilU,3698
|
|
61
61
|
symai/backend/engines/speech_to_text/engine_local_whisper.py,sha256=LRsXliCpHDFPFaE-vPky3-DLkmYwmwe2mxfF0Brz4Wg,8220
|
|
@@ -64,10 +64,11 @@ symai/backend/engines/text_to_speech/engine_openai.py,sha256=rq34pTr4bRU-HeA84Av
|
|
|
64
64
|
symai/backend/engines/text_vision/engine_clip.py,sha256=EUwlom2e7m_efCK2zuPbe1TzyT9CPRlY0mkFTCmXp0U,3740
|
|
65
65
|
symai/backend/engines/userinput/engine_console.py,sha256=FwOakooxCc4oaQv6nYd-uIG2SxJRUI3n64cIs3B82FY,770
|
|
66
66
|
symai/backend/engines/webscraping/engine_requests.py,sha256=qsEAiEZJWLFXqhFfBCsQbSvFWgjA-rXDKMxu8Ezdl_8,4914
|
|
67
|
-
symai/backend/mixin/__init__.py,sha256=
|
|
67
|
+
symai/backend/mixin/__init__.py,sha256=ischewsMtIFanU30N32ac2Eb8u4hjWxuEb6mrniUv6Y,702
|
|
68
68
|
symai/backend/mixin/anthropic.py,sha256=k_7gTvbKrucMW54MR7q8S9RbaQ39AaV2uvgil5F-dnM,1936
|
|
69
69
|
symai/backend/mixin/deepseek.py,sha256=U-xtUjR9dFTkmiJPAF5_tyuTxpnUxv5gki9WjTfrVL4,379
|
|
70
70
|
symai/backend/mixin/google.py,sha256=aCQDxo_F0_mQGb8h2iYhQmOlo7NuF2IhY85CYro-m4k,453
|
|
71
|
+
symai/backend/mixin/groq.py,sha256=ZXbJcAMR6mHiA5FfnpWivb0l71cgNR-5pYRtWNe8Nmc,232
|
|
71
72
|
symai/backend/mixin/openai.py,sha256=ZzteR8wIJoo4sLtNtea-Bb1IAGD0WLJ3cqGG5GGAJN4,4638
|
|
72
73
|
symai/collect/__init__.py,sha256=eaLjpARuQCa_ChZjyogYoh-w9-xXuNxRAFND2liLvZk,104
|
|
73
74
|
symai/collect/dynamic.py,sha256=3VDMZ-EJKE-GsqhyuPrwbfoLD9LFec1QfuVr7nOKMfE,3928
|
|
@@ -141,7 +142,7 @@ symai/formatter/regex.py,sha256=POf4anhw2FovCQinq3yFWGNcWXf3diIV8yVrp9adieA,9924
|
|
|
141
142
|
symai/menu/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
142
143
|
symai/menu/screen.py,sha256=YmCfKEt76GGv4aQ1ikeC3C7xEw5HJn18jhfZuYAoaCs,1758
|
|
143
144
|
symai/misc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
144
|
-
symai/misc/console.py,sha256=
|
|
145
|
+
symai/misc/console.py,sha256=hwAET_vwX7bwxmrQ6lgUbZhv3ME5_4vMtOWeCAHg5no,3052
|
|
145
146
|
symai/misc/loader.py,sha256=7lyIMIvU6Ywo_xt-TM8Xqhc6W4tY67U5XzxSkuAYZi8,1635
|
|
146
147
|
symai/models/__init__.py,sha256=QCYmMOhHk3t6HhTApBMxIeS3dX4_bKfHNr6a9LzRa8s,163
|
|
147
148
|
symai/models/base.py,sha256=lnkcCwJfv_Yg5kiLRazbVq9jIRKhNR75W8_S9NBVmMo,40881
|
|
@@ -152,8 +153,8 @@ symai/ops/primitives.py,sha256=EaB2Ekx9yGNDaQa3aKS5KpuEr5awAUbO3OcBbufI-l4,11072
|
|
|
152
153
|
symai/server/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
153
154
|
symai/server/huggingface_server.py,sha256=UpSBflnQaenDjY1AAn5LUYeg5J4gJLWiMuC5DcoIV3E,8743
|
|
154
155
|
symai/server/llama_cpp_server.py,sha256=qVCldTdcQhK2YCu7sDNSYziu1p2AQieqMFfY028-yOc,2049
|
|
155
|
-
symbolicai-0.
|
|
156
|
-
symbolicai-0.
|
|
157
|
-
symbolicai-0.
|
|
158
|
-
symbolicai-0.
|
|
159
|
-
symbolicai-0.
|
|
156
|
+
symbolicai-0.18.0.dist-info/METADATA,sha256=EpPFn8hwc36E-UwtnlQlnAX8qR4Te-rqxfBemyTfiK8,21327
|
|
157
|
+
symbolicai-0.18.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
158
|
+
symbolicai-0.18.0.dist-info/entry_points.txt,sha256=JV5sdydIfUZdDF6QBEQHiZHod6XNPjCjpWQrXh7gTAw,261
|
|
159
|
+
symbolicai-0.18.0.dist-info/top_level.txt,sha256=bOoIDfpDIvCQtQgXcwVKJvxAKwsxpxo2IL4z92rNJjw,6
|
|
160
|
+
symbolicai-0.18.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|