syntaxmatrix 2.5.5.1__tar.gz → 2.5.5.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/PKG-INFO +1 -1
  2. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/SyntaxMatrix.egg-info/PKG-INFO +1 -1
  3. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/setup.py +1 -1
  4. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/__init__.py +1 -1
  5. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/agentic/agents.py +78 -121
  6. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/core.py +2 -2
  7. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/routes.py +2 -2
  8. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/templates/dashboard.html +12 -19
  9. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/LICENSE.txt +0 -0
  10. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/README.md +0 -0
  11. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/SyntaxMatrix.egg-info/SOURCES.txt +0 -0
  12. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/SyntaxMatrix.egg-info/dependency_links.txt +0 -0
  13. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/SyntaxMatrix.egg-info/requires.txt +0 -0
  14. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/SyntaxMatrix.egg-info/top_level.txt +0 -0
  15. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/pyproject.toml +0 -0
  16. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/setup.cfg +0 -0
  17. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/agentic/agent_tools.py +0 -0
  18. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/agentic/code_tools_registry.py +0 -0
  19. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/agentic/model_templates.py +0 -0
  20. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/auth.py +0 -0
  21. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/bootstrap.py +0 -0
  22. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/commentary.py +0 -0
  23. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/dataset_preprocessing.py +0 -0
  24. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/db.py +0 -0
  25. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/display.py +0 -0
  26. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/emailer.py +0 -0
  27. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/file_processor.py +0 -0
  28. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/generate_page.py +0 -0
  29. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/gpt_models_latest.py +0 -0
  30. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/history_store.py +0 -0
  31. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/kernel_manager.py +0 -0
  32. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/llm_store.py +0 -0
  33. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/models.py +0 -0
  34. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/plottings.py +0 -0
  35. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/profiles.py +0 -0
  36. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/project_root.py +0 -0
  37. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/session.py +0 -0
  38. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/settings/__init__.py +0 -0
  39. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/settings/default.yaml +0 -0
  40. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/settings/logging.py +0 -0
  41. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/settings/model_map.py +0 -0
  42. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/settings/prompts.py +0 -0
  43. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/settings/string_navbar.py +0 -0
  44. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/smiv.py +0 -0
  45. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/smpv.py +0 -0
  46. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/css/style.css +0 -0
  47. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/docs.md +0 -0
  48. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/icons/favicon.png +0 -0
  49. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/icons/hero_bg.jpg +0 -0
  50. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/icons/logo.png +0 -0
  51. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/icons/svg_497526.svg +0 -0
  52. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/icons/svg_497528.svg +0 -0
  53. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/js/chat.js +0 -0
  54. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/js/sidebar.js +0 -0
  55. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/static/js/widgets.js +0 -0
  56. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/templates/code_cell.html +0 -0
  57. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/templates/docs.html +0 -0
  58. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/templates/error.html +0 -0
  59. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/templates/login.html +0 -0
  60. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/templates/register.html +0 -0
  61. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/themes.py +0 -0
  62. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/ui_modes.py +0 -0
  63. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/utils.py +0 -0
  64. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vector_db.py +0 -0
  65. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/__init__.py +0 -0
  66. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/adapters/__init__.py +0 -0
  67. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/adapters/milvus_adapter.py +0 -0
  68. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/adapters/pgvector_adapter.py +0 -0
  69. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/adapters/sqlite_adapter.py +0 -0
  70. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/base.py +0 -0
  71. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectordb/registry.py +0 -0
  72. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/vectorizer.py +0 -0
  73. {syntaxmatrix-2.5.5.1 → syntaxmatrix-2.5.5.3}/syntaxmatrix/workspace_db.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: syntaxmatrix
3
- Version: 2.5.5.1
3
+ Version: 2.5.5.3
4
4
  Summary: SyntaxMUI: A customizable framework for Python AI Assistant Projects.
5
5
  Author: Bob Nti
6
6
  Author-email: bob.nti@syntaxmatrix.net
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: syntaxmatrix
3
- Version: 2.5.5.1
3
+ Version: 2.5.5.3
4
4
  Summary: SyntaxMUI: A customizable framework for Python AI Assistant Projects.
5
5
  Author: Bob Nti
6
6
  Author-email: bob.nti@syntaxmatrix.net
@@ -8,7 +8,7 @@ with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
8
8
 
9
9
  setup(
10
10
  name="syntaxmatrix",
11
- version="2.5.5.1",
11
+ version="2.5.5.3",
12
12
  author="Bob Nti",
13
13
  author_email="bob.nti@syntaxmatrix.net",
14
14
  description="SyntaxMUI: A customizable framework for Python AI Assistant Projects.",
@@ -25,7 +25,7 @@ warning = _app_instance.warning
25
25
 
26
26
  set_user_icon = _app_instance.set_user_icon
27
27
  set_bot_icon = _app_instance.set_bot_icon
28
- # set_favicon = _app_instance.set_favicon
28
+ set_favicon = _app_instance.set_favicon
29
29
  set_project_name = _app_instance.set_project_name
30
30
  set_site_title = _app_instance.set_site_title
31
31
  set_site_logo = _app_instance.set_site_logo
@@ -11,6 +11,7 @@ from .. import profiles as _prof
11
11
  from ..gpt_models_latest import set_args as _set_args, extract_output_text as _out
12
12
  from google.genai import types
13
13
  import tiktoken
14
+ from google.genai.errors import APIError
14
15
 
15
16
 
16
17
  def token_calculator(total_input_content, llm_profile):
@@ -39,8 +40,7 @@ def token_calculator(total_input_content, llm_profile):
39
40
  else:
40
41
  enc = tiktoken.encoding_for_model(_model)
41
42
  input_prompt_tokens = len(enc.encode(total_input_content))
42
- return input_prompt_tokens
43
-
43
+ return input_prompt_tokens
44
44
 
45
45
  def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1, max_tokens=4096):
46
46
  """
@@ -94,83 +94,73 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
94
94
 
95
95
  # Google
96
96
  def google_generate_code():
97
+ nonlocal usage
98
+ """
99
+ Generates content using the Gemini API and calculates token usage
100
+ including Context Overhead for consistency.
101
+ """
102
+
97
103
  try:
104
+ # 1. Client Initialization
98
105
  config = types.GenerateContentConfig(
99
106
  system_instruction=system_prompt,
100
- temperature=temperature,
107
+ temperature=temperature,
101
108
  max_output_tokens=max_tokens,
102
- response_mime_type="text/plain",
103
109
  )
104
-
110
+
111
+ # 2. API Call
105
112
  resp = _client.models.generate_content(
106
113
  model=_model,
107
- contents=user_prompt,
108
- config=config
114
+ contents=[user_prompt],
115
+ config=config,
109
116
  )
110
- # --- capture usage for Gemini / python-genai ---
111
- um = getattr(resp, "usage_metadata", None) or getattr(resp, "usageMetadata", None)
112
- if um is not None:
113
- # Try all known names, then fall back to arithmetic
114
- prompt = _get_usage_val(um, [
115
- "prompt_token_count",
116
- "promptTokenCount",
117
- "input_tokens",
118
- ])
119
- candidates = _get_usage_val(um, [
120
- "candidates_token_count",
121
- "candidatesTokenCount",
122
- "output_tokens",
123
- ])
124
- total = _get_usage_val(um, [
125
- "total_token_count",
126
- "totalTokenCount",
127
- "total_tokens",
128
- ])
129
-
130
- usage["input_tokens"] = prompt
131
- usage["output_tokens"] = candidates
132
- usage["total_tokens"] = total
133
-
134
- # Fallback: if output not provided but prompt + total exist,
135
- # derive it as total - prompt
136
- if usage["output_tokens"] is None and prompt is not None and total is not None:
137
- try:
138
- usage["output_tokens"] = int(total) - int(prompt)
139
- except Exception:
140
- # if it somehow fails, just leave it as None
141
- pass
142
-
143
- # 1) Fast path: SDK convenience property
117
+
118
+ # 3. Token Usage Capture and Context Overhead Calculation
119
+ um = resp.usage_metadata
120
+ usage["input_tokens"] = um.prompt_token_count
121
+ usage["output_tokens"] = um.thoughts_token_count
122
+ usage["total_tokens"] = um.total_token_count
123
+
124
+ # 4. Response Extraction (same robust logic as before)
144
125
  text = getattr(resp, "text", None)
145
126
  if isinstance(text, str) and text.strip():
146
127
  return text.strip()
128
+
147
129
  chunks = []
148
130
  candidates = getattr(resp, "candidates", None) or []
149
131
  for cand in candidates:
150
132
  content = getattr(cand, "content", None)
151
- parts = getattr(content, "parts", None) or []
152
- for part in parts:
153
- t = getattr(part, "text", None)
154
- if t:
155
- chunks.append(str(t))
133
+ if content:
134
+ parts = getattr(content, "parts", None) or []
135
+ for part in parts:
136
+ t = getattr(part, "text", None)
137
+ if t:
138
+ chunks.append(str(t))
139
+
156
140
  text = "\n".join(chunks).strip()
157
141
  if text:
158
142
  return text
159
143
 
160
- # Try to surface any block reason (safety / policy / etc.)
144
+ # 5. Handle blocked response
161
145
  fb = getattr(resp, "prompt_feedback", None)
162
146
  block_reason = getattr(fb, "block_reason", None) if fb else None
163
- if block_reason:
164
- raise RuntimeError(f"{_model} to blocked the response. Reason: {block_reason}")
165
- raise RuntimeError(f"{_model} to say nothing in this section due to insufficient data.")
147
+ if block_reason and block_reason != types.BlockedReason.REASON_UNSPECIFIED:
148
+ raise RuntimeError(f"{_model} blocked the response. Reason: {block_reason.name}")
149
+ raise RuntimeError(f"{_model} failed to return content due to insufficient data.")
166
150
 
151
+ except APIError as e:
152
+ error_msg = f"Gemini API Error: {e}"
153
+
167
154
  except Exception as e:
168
- msg = f"I smxAI have instructed {e}\n"
169
- return (
170
- f"# {msg}\n"
171
- "from syntaxmatrix.display import show\n"
172
- f"show({msg!r})\n"
173
- )
155
+ error_msg = f"An unexpected error occurred during API call or processing: {e}"
156
+
157
+ # --- Return the error message wrapped in the required output code structure ---
158
+ msg = f"I smxAI have instructed {error_msg}\n"
159
+ return (
160
+ f"# {msg}\n"
161
+ "from syntaxmatrix.display import show\n"
162
+ f"show({msg!r})\n"
163
+ )
174
164
 
175
165
  # OpenAI Responses API
176
166
  def gpt_models_latest_generate_code():
@@ -196,34 +186,11 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
196
186
  verbosity=reasoning_and_verbosity()[1],
197
187
  )
198
188
  resp = _client.responses.create(**args)
199
-
200
- # --- Capture token usage (prompt, completion, and total tokens) ---
201
- u = getattr(resp, "usage", None)
202
- # If usage is not None, extract tokens directly from the response
203
- if u is not None:
204
- usage["input_tokens"] = getattr(u, "prompt_tokens", None) or getattr(u, "promptTokenCount", None)
205
- usage["output_tokens"] = getattr(u, "completion_tokens", None) or getattr(u, "completionTokenCount", None)
206
- usage["total_tokens"] = getattr(u, "total_tokens", None) or getattr(u, "totalTokenCount", None)
207
189
 
208
- # --- If missing input/output tokens, fallback logic ---
209
- if usage["input_tokens"] is None or usage["output_tokens"] is None:
210
- # Use the raw response and fallback to manually calculate tokens
211
- prompt_text = user_prompt
212
- output_text = _out(resp).strip()
213
-
214
- # Calculate input tokens based on the prompt
215
- encoding = tiktoken.get_encoding("cl100k_base") # Use GPT-5's encoding
216
- usage["input_tokens"] = len(encoding.encode(prompt_text))
217
-
218
- # Calculate output tokens based on the model's response
219
- usage["output_tokens"] = len(encoding.encode(output_text))
220
-
221
- # Total tokens is the sum of input + output
222
- usage["total_tokens"] = usage["input_tokens"] + usage["output_tokens"]
223
-
224
- # If tokens are still missing, log the issue
225
- if usage["input_tokens"] is None or usage["output_tokens"] is None:
226
- raise RuntimeError(f"Missing token data in OpenAI response for model {_model}. Tokens: {usage}")
190
+ um = resp.usage
191
+ usage["input_tokens"] = um.input_tokens
192
+ usage["output_tokens"] = um.output_tokens
193
+ usage["total_tokens"] = um.total_tokens
227
194
 
228
195
  code = _out(resp).strip()
229
196
  if code:
@@ -231,17 +198,25 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
231
198
 
232
199
  # Try to surface any block reason (safety / policy / etc.)
233
200
  block_reason = None
234
- output = getattr(resp, "output", None) or []
201
+ output = resp.get("output")
235
202
  for item in output:
236
203
  fr = getattr(item, "finish_reason", None)
237
204
  if fr and fr != "stop":
238
205
  block_reason = fr
239
206
  break
240
-
241
207
  if block_reason:
242
208
  raise RuntimeError(f"{_model} stopped with reason: {block_reason}")
243
209
  raise RuntimeError(f"{_model} returned an empty response in this section due to insufficient data.")
244
210
 
211
+ except APIError as e:
212
+ # IMPORTANT: return VALID PYTHON so the dashboard can show the error
213
+ msg = f"I smxAI have instructed {e}"
214
+ return (
215
+ f"# {msg}\n"
216
+ "from syntaxmatrix.display import show\n"
217
+ f"show({msg!r})\n"
218
+ )
219
+
245
220
  except Exception as e:
246
221
  # IMPORTANT: return VALID PYTHON so the dashboard can show the error
247
222
  msg = f"I smxAI have instructed {e}"
@@ -252,7 +227,8 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
252
227
  )
253
228
 
254
229
  # Anthropic
255
- def anthropic_generate_code():
230
+ def anthropic_generate_code():
231
+ nonlocal usage
256
232
  try:
257
233
  resp = _client.messages.create(
258
234
  model=_model,
@@ -263,36 +239,11 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
263
239
  {"role": "user", "content": user_prompt}
264
240
  ]
265
241
  )
266
- # usage in Responses API is usually a dict-like object
267
- um = getattr(resp, "usage", None)
268
- if um is not None:
269
- # Try all known names, then fall back to arithmetic
270
- prompt = _get_usage_val(um, [
271
- "prompt_token_count",
272
- "promptTokenCount",
273
- "input_tokens",
274
- ])
275
- candidates = _get_usage_val(um, [
276
- "candidates_token_count",
277
- "candidatesTokenCount",
278
- "output_tokens",
279
- ])
280
- total = _get_usage_val(um, [
281
- "total_token_count",
282
- "totalTokenCount",
283
- "total_tokens",
284
- ])
285
-
286
- usage["input_tokens"] = prompt
287
- usage["output_tokens"] = candidates
288
- usage["total_tokens"] = total
289
-
290
- if usage["output_tokens"] is None and prompt is not None and total is not None:
291
- try:
292
- usage["output_tokens"] = int(total) - int(prompt)
293
- except Exception:
294
- # if it somehow fails, just leave it as None
295
- pass
242
+
243
+ um = resp.usage
244
+ usage["input_tokens"] = um.input_tokens
245
+ usage["output_tokens"] = um.output_tokens
246
+ usage["total_tokens"] = um.input_tokens + um.output_tokens
296
247
 
297
248
  # Extract plain text from Claude-style content blocks
298
249
  text_blocks = []
@@ -323,6 +274,7 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
323
274
 
324
275
  # OpenAI Chat Completions
325
276
  def openai_sdk_generate_code():
277
+ nonlocal usage
326
278
  try:
327
279
  resp = _client.chat.completions.create(
328
280
  model=_model,
@@ -333,10 +285,13 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
333
285
  temperature=temperature,
334
286
  max_tokens=max_tokens,
335
287
  )
336
- um = getattr(resp, "usage", None)
337
- usage["input_tokens"] = _get_usage_val(um, ["prompt_tokens", "input_tokens"])
338
- usage["output_tokens"] = _get_usage_val(um, ["completion_tokens", "output_tokens"])
339
- usage["total_tokens"] = _get_usage_val(um, ["total_tokens"])
288
+
289
+
290
+
291
+ um = resp.usage
292
+ usage["input_tokens"] = um.prompt_tokens
293
+ usage["output_tokens"] = um.completion_tokens
294
+ usage["total_tokens"] = um.total_tokens
340
295
 
341
296
  text = resp.choices[0].message.content
342
297
  if text:
@@ -364,7 +319,9 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
364
319
  "from syntaxmatrix.display import show\n"
365
320
  f"show({msg!r})\n"
366
321
  )
367
-
322
+
323
+ # print("TTOOKKEENN: ", token_calculator(system_prompt + user_prompt, coding_profile))
324
+
368
325
  if _provider == "google":
369
326
  code = google_generate_code()
370
327
  elif _provider == "openai" and _model in GPT_MODELS_LATEST:
@@ -326,8 +326,8 @@ class SyntaxMUI:
326
326
  def set_project_name(self, project_name):
327
327
  self.project_name = project_name
328
328
 
329
- # def set_favicon(self, icon):
330
- # self.favicon = icon
329
+ def set_favicon(self, icon):
330
+ self.favicon = icon
331
331
 
332
332
  def set_site_logo(self, logo):
333
333
  self.site_logo = logo
@@ -205,7 +205,6 @@ def setup_routes(smx):
205
205
  )
206
206
  return resp
207
207
 
208
-
209
208
  def head_html():
210
209
  # Determine a contrasting mobile text color based on the sidebar background.
211
210
  mobile_text_color = smx.theme["nav_text"]
@@ -497,8 +496,9 @@ def setup_routes(smx):
497
496
  padding: 2px 8px;
498
497
  color:cyan;
499
498
  }}
500
- </style>
501
499
 
500
+ </style>
501
+
502
502
  <!-- Add MathJax -->
503
503
  <script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
504
504
  <script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
@@ -680,29 +680,23 @@
680
680
  <br><br>
681
681
  <div class="refined-qblock">
682
682
  <span class="refined-q-label"><b>My Thought Process:</b></span><br>
683
- <span class="refined-q">{{ refined_question }}</span>
683
+ <span class="refined-q">{{ refined_question|safe }}</span>
684
684
  </div><br>
685
685
  <div class="refined-qblock">
686
- <span class="refined-q-label"><b>Tasks Performed:</b></span><br>
687
- {% for task in tasks %}
688
- <span class="refined-q">{{ task.replace("_", " ").capitalize() + ", " }}</span>
689
- {% endfor %}
686
+ <b>Tasks Performed: </b><br>
687
+ {% for task in tasks %}
688
+ <span class="refined-q">- {{ task.replace('_', ' ').capitalize() + ', ' }}</span><br>
689
+ {% endfor %}
690
690
  </div><br>
691
691
  {% if llm_usage %}
692
692
  <div class="refined-qblock">
693
- <span class="refined-q-label"><b>LLM Tokens:</b></span><br>
694
- {% if llm_usage %}
695
- <span class="refined-q">
696
- Provider: {{ llm_usage.provider }},
697
- Model: {{ llm_usage.model }},
698
- Input: {{ llm_usage.input_tokens }},
699
- Output: {{ llm_usage.output_tokens }},
700
- Total: {{ llm_usage.total_tokens }}
701
- </span>
702
- {% else %}
703
- <span class="refined-q"><em>No usage information available.</em></span>
704
- {% endif %}
705
- </div><br>
693
+ <b>LLM: </b><span>{{ llm_usage.provider.capitalize() }} | {{ llm_usage.model }}</span><br>
694
+ <b>Token Usage: </b>
695
+ <li>- Input Tokens: {{ llm_usage.input_tokens }}</li>
696
+ <li>- Output Tokens: {{ llm_usage.output_tokens }}</li>
697
+ <li>- Total Tokens: {{ llm_usage.total_tokens }}</li>
698
+ </ul>
699
+ </div>
706
700
  {% endif %}
707
701
  {% endif %}
708
702
  {% if ai_outputs %}
@@ -715,7 +709,6 @@
715
709
  </div>
716
710
  {% endfor %}
717
711
  {% endif %}
718
-
719
712
  {% if ai_code %}
720
713
  <div>
721
714
  <a href="#" onclick="toggleCode();return false;" id="toggle-link">Show Code</a>
File without changes
File without changes