syntaxmatrix 2.5.5.1__tar.gz → 2.5.5.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/PKG-INFO +1 -1
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/SyntaxMatrix.egg-info/PKG-INFO +1 -1
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/setup.py +1 -1
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/__init__.py +1 -1
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/agentic/agents.py +86 -120
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/core.py +2 -2
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/routes.py +2 -2
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/templates/dashboard.html +12 -19
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/LICENSE.txt +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/README.md +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/SyntaxMatrix.egg-info/SOURCES.txt +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/SyntaxMatrix.egg-info/dependency_links.txt +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/SyntaxMatrix.egg-info/requires.txt +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/SyntaxMatrix.egg-info/top_level.txt +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/pyproject.toml +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/setup.cfg +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/agentic/agent_tools.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/agentic/code_tools_registry.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/agentic/model_templates.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/auth.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/bootstrap.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/commentary.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/dataset_preprocessing.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/db.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/display.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/emailer.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/file_processor.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/generate_page.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/gpt_models_latest.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/history_store.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/kernel_manager.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/llm_store.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/models.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/plottings.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/profiles.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/project_root.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/session.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/settings/__init__.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/settings/default.yaml +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/settings/logging.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/settings/model_map.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/settings/prompts.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/settings/string_navbar.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/smiv.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/smpv.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/css/style.css +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/docs.md +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/icons/favicon.png +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/icons/hero_bg.jpg +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/icons/logo.png +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/icons/svg_497526.svg +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/icons/svg_497528.svg +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/js/chat.js +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/js/sidebar.js +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/static/js/widgets.js +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/templates/code_cell.html +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/templates/docs.html +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/templates/error.html +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/templates/login.html +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/templates/register.html +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/themes.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/ui_modes.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/utils.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vector_db.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/__init__.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/__init__.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/milvus_adapter.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/pgvector_adapter.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/sqlite_adapter.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/base.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/registry.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectorizer.py +0 -0
- {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/workspace_db.py +0 -0
|
@@ -8,7 +8,7 @@ with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
|
|
|
8
8
|
|
|
9
9
|
setup(
|
|
10
10
|
name="syntaxmatrix",
|
|
11
|
-
version="2.5.5.
|
|
11
|
+
version="2.5.5.2",
|
|
12
12
|
author="Bob Nti",
|
|
13
13
|
author_email="bob.nti@syntaxmatrix.net",
|
|
14
14
|
description="SyntaxMUI: A customizable framework for Python AI Assistant Projects.",
|
|
@@ -25,7 +25,7 @@ warning = _app_instance.warning
|
|
|
25
25
|
|
|
26
26
|
set_user_icon = _app_instance.set_user_icon
|
|
27
27
|
set_bot_icon = _app_instance.set_bot_icon
|
|
28
|
-
|
|
28
|
+
set_favicon = _app_instance.set_favicon
|
|
29
29
|
set_project_name = _app_instance.set_project_name
|
|
30
30
|
set_site_title = _app_instance.set_site_title
|
|
31
31
|
set_site_logo = _app_instance.set_site_logo
|
|
@@ -11,6 +11,7 @@ from .. import profiles as _prof
|
|
|
11
11
|
from ..gpt_models_latest import set_args as _set_args, extract_output_text as _out
|
|
12
12
|
from google.genai import types
|
|
13
13
|
import tiktoken
|
|
14
|
+
from google.genai.errors import APIError
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
def token_calculator(total_input_content, llm_profile):
|
|
@@ -39,8 +40,7 @@ def token_calculator(total_input_content, llm_profile):
|
|
|
39
40
|
else:
|
|
40
41
|
enc = tiktoken.encoding_for_model(_model)
|
|
41
42
|
input_prompt_tokens = len(enc.encode(total_input_content))
|
|
42
|
-
return input_prompt_tokens
|
|
43
|
-
|
|
43
|
+
return input_prompt_tokens
|
|
44
44
|
|
|
45
45
|
def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1, max_tokens=4096):
|
|
46
46
|
"""
|
|
@@ -94,83 +94,82 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
94
94
|
|
|
95
95
|
# Google
|
|
96
96
|
def google_generate_code():
|
|
97
|
+
nonlocal usage
|
|
98
|
+
"""
|
|
99
|
+
Generates content using the Gemini API and calculates token usage
|
|
100
|
+
including Context Overhead for consistency.
|
|
101
|
+
"""
|
|
102
|
+
|
|
97
103
|
try:
|
|
104
|
+
# 1. Client Initialization
|
|
98
105
|
config = types.GenerateContentConfig(
|
|
99
106
|
system_instruction=system_prompt,
|
|
100
|
-
temperature=temperature,
|
|
107
|
+
temperature=temperature,
|
|
101
108
|
max_output_tokens=max_tokens,
|
|
102
|
-
response_mime_type="text/plain",
|
|
103
109
|
)
|
|
104
|
-
|
|
110
|
+
|
|
111
|
+
# 2. API Call
|
|
105
112
|
resp = _client.models.generate_content(
|
|
106
113
|
model=_model,
|
|
107
|
-
contents=user_prompt,
|
|
108
|
-
config=config
|
|
114
|
+
contents=[user_prompt],
|
|
115
|
+
config=config,
|
|
109
116
|
)
|
|
110
|
-
|
|
111
|
-
|
|
117
|
+
|
|
118
|
+
print("\n888888888888888888888888888888\n")
|
|
119
|
+
print("RESPONSE:\n", resp)
|
|
120
|
+
print("\n888888888888888888888888888888\n")
|
|
121
|
+
|
|
122
|
+
print("\n888888888888888888888888888888\n")
|
|
123
|
+
print("USAGEMETADATA:\n", resp.usage)
|
|
124
|
+
print("\n888888888888888888888888888888\n")
|
|
125
|
+
|
|
126
|
+
# 3. Token Usage Capture and Context Overhead Calculation
|
|
127
|
+
um = getattr(resp, "usage_metadata", None)
|
|
112
128
|
if um is not None:
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
])
|
|
119
|
-
candidates = _get_usage_val(um, [
|
|
120
|
-
"candidates_token_count",
|
|
121
|
-
"candidatesTokenCount",
|
|
122
|
-
"output_tokens",
|
|
123
|
-
])
|
|
124
|
-
total = _get_usage_val(um, [
|
|
125
|
-
"total_token_count",
|
|
126
|
-
"totalTokenCount",
|
|
127
|
-
"total_tokens",
|
|
128
|
-
])
|
|
129
|
-
|
|
130
|
-
usage["input_tokens"] = prompt
|
|
131
|
-
usage["output_tokens"] = candidates
|
|
132
|
-
usage["total_tokens"] = total
|
|
133
|
-
|
|
134
|
-
# Fallback: if output not provided but prompt + total exist,
|
|
135
|
-
# derive it as total - prompt
|
|
136
|
-
if usage["output_tokens"] is None and prompt is not None and total is not None:
|
|
137
|
-
try:
|
|
138
|
-
usage["output_tokens"] = int(total) - int(prompt)
|
|
139
|
-
except Exception:
|
|
140
|
-
# if it somehow fails, just leave it as None
|
|
141
|
-
pass
|
|
142
|
-
|
|
143
|
-
# 1) Fast path: SDK convenience property
|
|
129
|
+
usage["input_tokens"] = _get_usage_val(um, ["prompt_token_count", "promptTokenCount", "input_tokens"])
|
|
130
|
+
usage["output_tokens"] = _get_usage_val(um, ["candidates_token_count", "candidatesTokenCount", "output_tokens"])
|
|
131
|
+
usage["total_tokens"] = _get_usage_val(um, ["total_token_count", "totalTokenCount", "total_tokens"])
|
|
132
|
+
|
|
133
|
+
# 4. Response Extraction (same robust logic as before)
|
|
144
134
|
text = getattr(resp, "text", None)
|
|
145
135
|
if isinstance(text, str) and text.strip():
|
|
146
136
|
return text.strip()
|
|
137
|
+
|
|
147
138
|
chunks = []
|
|
148
139
|
candidates = getattr(resp, "candidates", None) or []
|
|
149
140
|
for cand in candidates:
|
|
150
141
|
content = getattr(cand, "content", None)
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
142
|
+
if content:
|
|
143
|
+
parts = getattr(content, "parts", None) or []
|
|
144
|
+
for part in parts:
|
|
145
|
+
t = getattr(part, "text", None)
|
|
146
|
+
if t:
|
|
147
|
+
chunks.append(str(t))
|
|
148
|
+
|
|
156
149
|
text = "\n".join(chunks).strip()
|
|
157
150
|
if text:
|
|
158
151
|
return text
|
|
159
152
|
|
|
160
|
-
#
|
|
153
|
+
# 5. Handle blocked response
|
|
161
154
|
fb = getattr(resp, "prompt_feedback", None)
|
|
162
155
|
block_reason = getattr(fb, "block_reason", None) if fb else None
|
|
163
|
-
if block_reason:
|
|
164
|
-
raise RuntimeError(f"{_model}
|
|
165
|
-
raise RuntimeError(f"{_model} to
|
|
156
|
+
if block_reason and block_reason != types.BlockedReason.REASON_UNSPECIFIED:
|
|
157
|
+
raise RuntimeError(f"{_model} blocked the response. Reason: {block_reason.name}")
|
|
158
|
+
raise RuntimeError(f"{_model} failed to return content due to insufficient data.")
|
|
166
159
|
|
|
160
|
+
except APIError as e:
|
|
161
|
+
error_msg = f"Gemini API Error: {e}"
|
|
162
|
+
|
|
167
163
|
except Exception as e:
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
164
|
+
error_msg = f"An unexpected error occurred during API call or processing: {e}"
|
|
165
|
+
|
|
166
|
+
# --- Return the error message wrapped in the required output code structure ---
|
|
167
|
+
msg = f"I smxAI have instructed {error_msg}\n"
|
|
168
|
+
return (
|
|
169
|
+
f"# {msg}\n"
|
|
170
|
+
"from syntaxmatrix.display import show\n"
|
|
171
|
+
f"show({msg!r})\n"
|
|
172
|
+
)
|
|
174
173
|
|
|
175
174
|
# OpenAI Responses API
|
|
176
175
|
def gpt_models_latest_generate_code():
|
|
@@ -196,34 +195,11 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
196
195
|
verbosity=reasoning_and_verbosity()[1],
|
|
197
196
|
)
|
|
198
197
|
resp = _client.responses.create(**args)
|
|
199
|
-
|
|
200
|
-
# --- Capture token usage (prompt, completion, and total tokens) ---
|
|
201
|
-
u = getattr(resp, "usage", None)
|
|
202
|
-
# If usage is not None, extract tokens directly from the response
|
|
203
|
-
if u is not None:
|
|
204
|
-
usage["input_tokens"] = getattr(u, "prompt_tokens", None) or getattr(u, "promptTokenCount", None)
|
|
205
|
-
usage["output_tokens"] = getattr(u, "completion_tokens", None) or getattr(u, "completionTokenCount", None)
|
|
206
|
-
usage["total_tokens"] = getattr(u, "total_tokens", None) or getattr(u, "totalTokenCount", None)
|
|
207
198
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
output_text = _out(resp).strip()
|
|
213
|
-
|
|
214
|
-
# Calculate input tokens based on the prompt
|
|
215
|
-
encoding = tiktoken.get_encoding("cl100k_base") # Use GPT-5's encoding
|
|
216
|
-
usage["input_tokens"] = len(encoding.encode(prompt_text))
|
|
217
|
-
|
|
218
|
-
# Calculate output tokens based on the model's response
|
|
219
|
-
usage["output_tokens"] = len(encoding.encode(output_text))
|
|
220
|
-
|
|
221
|
-
# Total tokens is the sum of input + output
|
|
222
|
-
usage["total_tokens"] = usage["input_tokens"] + usage["output_tokens"]
|
|
223
|
-
|
|
224
|
-
# If tokens are still missing, log the issue
|
|
225
|
-
if usage["input_tokens"] is None or usage["output_tokens"] is None:
|
|
226
|
-
raise RuntimeError(f"Missing token data in OpenAI response for model {_model}. Tokens: {usage}")
|
|
199
|
+
um = resp.usage
|
|
200
|
+
usage["input_tokens"] = um.input_tokens
|
|
201
|
+
usage["output_tokens"] = um.output_tokens
|
|
202
|
+
usage["total_tokens"] = um.total_tokens
|
|
227
203
|
|
|
228
204
|
code = _out(resp).strip()
|
|
229
205
|
if code:
|
|
@@ -231,17 +207,25 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
231
207
|
|
|
232
208
|
# Try to surface any block reason (safety / policy / etc.)
|
|
233
209
|
block_reason = None
|
|
234
|
-
output =
|
|
210
|
+
output = resp.get("output")
|
|
235
211
|
for item in output:
|
|
236
212
|
fr = getattr(item, "finish_reason", None)
|
|
237
213
|
if fr and fr != "stop":
|
|
238
214
|
block_reason = fr
|
|
239
215
|
break
|
|
240
|
-
|
|
241
216
|
if block_reason:
|
|
242
217
|
raise RuntimeError(f"{_model} stopped with reason: {block_reason}")
|
|
243
218
|
raise RuntimeError(f"{_model} returned an empty response in this section due to insufficient data.")
|
|
244
219
|
|
|
220
|
+
except APIError as e:
|
|
221
|
+
# IMPORTANT: return VALID PYTHON so the dashboard can show the error
|
|
222
|
+
msg = f"I smxAI have instructed {e}"
|
|
223
|
+
return (
|
|
224
|
+
f"# {msg}\n"
|
|
225
|
+
"from syntaxmatrix.display import show\n"
|
|
226
|
+
f"show({msg!r})\n"
|
|
227
|
+
)
|
|
228
|
+
|
|
245
229
|
except Exception as e:
|
|
246
230
|
# IMPORTANT: return VALID PYTHON so the dashboard can show the error
|
|
247
231
|
msg = f"I smxAI have instructed {e}"
|
|
@@ -252,7 +236,8 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
252
236
|
)
|
|
253
237
|
|
|
254
238
|
# Anthropic
|
|
255
|
-
def anthropic_generate_code():
|
|
239
|
+
def anthropic_generate_code():
|
|
240
|
+
nonlocal usage
|
|
256
241
|
try:
|
|
257
242
|
resp = _client.messages.create(
|
|
258
243
|
model=_model,
|
|
@@ -263,36 +248,11 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
263
248
|
{"role": "user", "content": user_prompt}
|
|
264
249
|
]
|
|
265
250
|
)
|
|
266
|
-
|
|
267
|
-
um =
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
"prompt_token_count",
|
|
272
|
-
"promptTokenCount",
|
|
273
|
-
"input_tokens",
|
|
274
|
-
])
|
|
275
|
-
candidates = _get_usage_val(um, [
|
|
276
|
-
"candidates_token_count",
|
|
277
|
-
"candidatesTokenCount",
|
|
278
|
-
"output_tokens",
|
|
279
|
-
])
|
|
280
|
-
total = _get_usage_val(um, [
|
|
281
|
-
"total_token_count",
|
|
282
|
-
"totalTokenCount",
|
|
283
|
-
"total_tokens",
|
|
284
|
-
])
|
|
285
|
-
|
|
286
|
-
usage["input_tokens"] = prompt
|
|
287
|
-
usage["output_tokens"] = candidates
|
|
288
|
-
usage["total_tokens"] = total
|
|
289
|
-
|
|
290
|
-
if usage["output_tokens"] is None and prompt is not None and total is not None:
|
|
291
|
-
try:
|
|
292
|
-
usage["output_tokens"] = int(total) - int(prompt)
|
|
293
|
-
except Exception:
|
|
294
|
-
# if it somehow fails, just leave it as None
|
|
295
|
-
pass
|
|
251
|
+
|
|
252
|
+
um = resp.usage
|
|
253
|
+
usage["input_tokens"] = um.input_tokens
|
|
254
|
+
usage["output_tokens"] = um.output_tokens
|
|
255
|
+
usage["total_tokens"] = um.input_tokens + um.output_tokens
|
|
296
256
|
|
|
297
257
|
# Extract plain text from Claude-style content blocks
|
|
298
258
|
text_blocks = []
|
|
@@ -323,6 +283,7 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
323
283
|
|
|
324
284
|
# OpenAI Chat Completions
|
|
325
285
|
def openai_sdk_generate_code():
|
|
286
|
+
nonlocal usage
|
|
326
287
|
try:
|
|
327
288
|
resp = _client.chat.completions.create(
|
|
328
289
|
model=_model,
|
|
@@ -333,10 +294,13 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
333
294
|
temperature=temperature,
|
|
334
295
|
max_tokens=max_tokens,
|
|
335
296
|
)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
um = resp.usage
|
|
301
|
+
usage["input_tokens"] = um.prompt_tokens
|
|
302
|
+
usage["output_tokens"] = um.completion_tokens
|
|
303
|
+
usage["total_tokens"] = um.total_tokens
|
|
340
304
|
|
|
341
305
|
text = resp.choices[0].message.content
|
|
342
306
|
if text:
|
|
@@ -364,7 +328,9 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
364
328
|
"from syntaxmatrix.display import show\n"
|
|
365
329
|
f"show({msg!r})\n"
|
|
366
330
|
)
|
|
367
|
-
|
|
331
|
+
|
|
332
|
+
# print("TTOOKKEENN: ", token_calculator(system_prompt + user_prompt, coding_profile))
|
|
333
|
+
|
|
368
334
|
if _provider == "google":
|
|
369
335
|
code = google_generate_code()
|
|
370
336
|
elif _provider == "openai" and _model in GPT_MODELS_LATEST:
|
|
@@ -326,8 +326,8 @@ class SyntaxMUI:
|
|
|
326
326
|
def set_project_name(self, project_name):
|
|
327
327
|
self.project_name = project_name
|
|
328
328
|
|
|
329
|
-
|
|
330
|
-
|
|
329
|
+
def set_favicon(self, icon):
|
|
330
|
+
self.favicon = icon
|
|
331
331
|
|
|
332
332
|
def set_site_logo(self, logo):
|
|
333
333
|
self.site_logo = logo
|
|
@@ -205,7 +205,6 @@ def setup_routes(smx):
|
|
|
205
205
|
)
|
|
206
206
|
return resp
|
|
207
207
|
|
|
208
|
-
|
|
209
208
|
def head_html():
|
|
210
209
|
# Determine a contrasting mobile text color based on the sidebar background.
|
|
211
210
|
mobile_text_color = smx.theme["nav_text"]
|
|
@@ -497,8 +496,9 @@ def setup_routes(smx):
|
|
|
497
496
|
padding: 2px 8px;
|
|
498
497
|
color:cyan;
|
|
499
498
|
}}
|
|
500
|
-
</style>
|
|
501
499
|
|
|
500
|
+
</style>
|
|
501
|
+
|
|
502
502
|
<!-- Add MathJax -->
|
|
503
503
|
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
|
|
504
504
|
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
|
|
@@ -680,29 +680,23 @@
|
|
|
680
680
|
<br><br>
|
|
681
681
|
<div class="refined-qblock">
|
|
682
682
|
<span class="refined-q-label"><b>My Thought Process:</b></span><br>
|
|
683
|
-
<span class="refined-q">{{ refined_question }}</span>
|
|
683
|
+
<span class="refined-q">{{ refined_question|safe }}</span>
|
|
684
684
|
</div><br>
|
|
685
685
|
<div class="refined-qblock">
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
686
|
+
<b>Tasks Performed: </b><br>
|
|
687
|
+
{% for task in tasks %}
|
|
688
|
+
<span class="refined-q">- {{ task.replace('_', ' ').capitalize() + ', ' }}</span><br>
|
|
689
|
+
{% endfor %}
|
|
690
690
|
</div><br>
|
|
691
691
|
{% if llm_usage %}
|
|
692
692
|
<div class="refined-qblock">
|
|
693
|
-
<
|
|
694
|
-
|
|
695
|
-
<
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
Total: {{ llm_usage.total_tokens }}
|
|
701
|
-
</span>
|
|
702
|
-
{% else %}
|
|
703
|
-
<span class="refined-q"><em>No usage information available.</em></span>
|
|
704
|
-
{% endif %}
|
|
705
|
-
</div><br>
|
|
693
|
+
<b>LLM: </b><span>{{ llm_usage.provider.capitalize() }} | {{ llm_usage.model }}</span><br>
|
|
694
|
+
<b>Token Usage: </b>
|
|
695
|
+
<li>- Input Tokens: {{ llm_usage.input_tokens }}</li>
|
|
696
|
+
<li>- Output Tokens: {{ llm_usage.output_tokens }}</li>
|
|
697
|
+
<li>- Total Tokens: {{ llm_usage.total_tokens }}</li>
|
|
698
|
+
</ul>
|
|
699
|
+
</div>
|
|
706
700
|
{% endif %}
|
|
707
701
|
{% endif %}
|
|
708
702
|
{% if ai_outputs %}
|
|
@@ -715,7 +709,6 @@
|
|
|
715
709
|
</div>
|
|
716
710
|
{% endfor %}
|
|
717
711
|
{% endif %}
|
|
718
|
-
|
|
719
712
|
{% if ai_code %}
|
|
720
713
|
<div>
|
|
721
714
|
<a href="#" onclick="toggleCode();return false;" id="toggle-link">Show Code</a>
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/milvus_adapter.py
RENAMED
|
File without changes
|
{syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/pgvector_adapter.py
RENAMED
|
File without changes
|
{syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.2}/syntaxmatrix/vectordb/adapters/sqlite_adapter.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|