syntaxmatrix 2.5.8.2__py3-none-any.whl → 2.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. syntaxmatrix/agentic/agents.py +1149 -54
  2. syntaxmatrix/agentic/agents_orchestrer.py +326 -0
  3. syntaxmatrix/agentic/code_tools_registry.py +27 -32
  4. syntaxmatrix/commentary.py +16 -16
  5. syntaxmatrix/core.py +145 -75
  6. syntaxmatrix/db.py +416 -4
  7. syntaxmatrix/{display.py → display_html.py} +2 -6
  8. syntaxmatrix/gpt_models_latest.py +1 -1
  9. syntaxmatrix/media/__init__.py +0 -0
  10. syntaxmatrix/media/media_pixabay.py +277 -0
  11. syntaxmatrix/models.py +1 -1
  12. syntaxmatrix/page_builder_defaults.py +183 -0
  13. syntaxmatrix/page_builder_generation.py +1122 -0
  14. syntaxmatrix/page_layout_contract.py +644 -0
  15. syntaxmatrix/page_patch_publish.py +1471 -0
  16. syntaxmatrix/preface.py +128 -8
  17. syntaxmatrix/profiles.py +26 -13
  18. syntaxmatrix/routes.py +1475 -429
  19. syntaxmatrix/selftest_page_templates.py +360 -0
  20. syntaxmatrix/settings/client_items.py +28 -0
  21. syntaxmatrix/settings/model_map.py +1022 -208
  22. syntaxmatrix/settings/prompts.py +328 -130
  23. syntaxmatrix/static/assets/hero-default.svg +22 -0
  24. syntaxmatrix/static/icons/bot-icon.png +0 -0
  25. syntaxmatrix/static/icons/favicon.png +0 -0
  26. syntaxmatrix/static/icons/logo.png +0 -0
  27. syntaxmatrix/static/icons/logo3.png +0 -0
  28. syntaxmatrix/templates/admin_branding.html +104 -0
  29. syntaxmatrix/templates/admin_secretes.html +108 -0
  30. syntaxmatrix/templates/dashboard.html +116 -72
  31. syntaxmatrix/templates/edit_page.html +2535 -0
  32. syntaxmatrix/utils.py +2365 -2411
  33. {syntaxmatrix-2.5.8.2.dist-info → syntaxmatrix-2.6.1.dist-info}/METADATA +6 -2
  34. {syntaxmatrix-2.5.8.2.dist-info → syntaxmatrix-2.6.1.dist-info}/RECORD +37 -24
  35. syntaxmatrix/generate_page.py +0 -644
  36. syntaxmatrix/static/icons/hero_bg.jpg +0 -0
  37. {syntaxmatrix-2.5.8.2.dist-info → syntaxmatrix-2.6.1.dist-info}/WHEEL +0 -0
  38. {syntaxmatrix-2.5.8.2.dist-info → syntaxmatrix-2.6.1.dist-info}/licenses/LICENSE.txt +0 -0
  39. {syntaxmatrix-2.5.8.2.dist-info → syntaxmatrix-2.6.1.dist-info}/top_level.txt +0 -0
syntaxmatrix/core.py CHANGED
@@ -16,7 +16,6 @@ from .file_processor import process_admin_pdf_files
16
16
  from google.genai import types
17
17
  from .vector_db import query_embeddings
18
18
  from .vectorizer import embed_text
19
- from syntaxmatrix.settings.prompts import SMXAI_CHAT_ID, SMXAI_CHAT_INSTRUCTIONS, SMXAI_WEBSITE_DESCRIPTION
20
19
  from typing import List, Generator
21
20
  from .auth import init_auth_db
22
21
  from . import profiles as _prof
@@ -28,20 +27,26 @@ from html import unescape
28
27
  from .plottings import render_plotly, pyplot, describe_plotly, describe_matplotlib
29
28
  from threading import RLock
30
29
  from syntaxmatrix.settings.model_map import GPT_MODELS_LATEST
31
-
30
+ from syntaxmatrix.settings.prompts import(
31
+ SMXAI_CHAT_IDENTITY,
32
+ SMXAI_CHAT_INSTRUCTIONS,
33
+ SMXAI_WEBSITE_DESCRIPTION,
34
+ )
35
+ from syntaxmatrix.settings.client_items import read_client_file, getenv_api_key
32
36
 
33
37
  # ──────── framework‐local storage paths ────────
34
38
  # this ensures the key & data always live under the package dir,
35
- # regardless of where the developer `cd` into before launching.
36
39
  _CLIENT_DIR = detect_project_root()
37
40
  _HISTORY_DIR = os.path.join(_CLIENT_DIR, "smx_history")
38
41
  os.makedirs(_HISTORY_DIR, exist_ok=True)
39
42
 
43
+ _BRANDING_DIR = os.path.join(_CLIENT_DIR, "branding")
44
+ os.makedirs(_BRANDING_DIR, exist_ok=True)
45
+
40
46
  _SECRET_PATH = os.path.join(_CLIENT_DIR, ".smx_secret_key")
41
47
 
42
- _CLIENT_DOTENV_PATH = os.path.join(str(_CLIENT_DIR.parent), ".env")
43
- if os.path.isfile(_CLIENT_DOTENV_PATH):
44
- load_dotenv(_CLIENT_DOTENV_PATH, override=True)
48
+ # OPENAI_API_KEY = getenv_api_key(_CLIENT_DIR, "OPENAI_API_KEY"))
49
+ # dotenv_content = read_client_file(_CLIENT_DIR, ".env")
45
50
 
46
51
  _ICONS_PATH = os.path.join(_CLIENT_DIR, "static", "icons")
47
52
  os.makedirs(_ICONS_PATH, exist_ok=True)
@@ -53,9 +58,9 @@ class SyntaxMUI:
53
58
  host="127.0.0.1",
54
59
  port="5080",
55
60
  user_icon="👩🏿‍🦲",
56
- bot_icon="<img src='/static/icons/favicon.png' width=20' alt='bot'/>",
61
+ bot_icon="<img src='/static/icons/bot-icon.png' width=20' alt='bot'/>",
57
62
  favicon="/static/icons/favicon.png",
58
- site_logo="<img src='/static/icons/logo.png' width='30' alt='logo'/>",
63
+ site_logo="<img src='/static/icons/logo.png' width='45' alt='logo'/>",
59
64
  site_title="SyntaxMatrix",
60
65
  project_name="smxAI",
61
66
  theme_name="light",
@@ -70,13 +75,15 @@ class SyntaxMUI:
70
75
  self.bot_icon = bot_icon
71
76
  self.site_logo = site_logo
72
77
  self.favicon = favicon
78
+ self._default_site_logo = site_logo
79
+ self._default_favicon = favicon
73
80
  self.site_title = site_title
74
81
  self.project_name = project_name
75
82
  self.ui_mode = ui_mode
76
83
  self.theme_toggle_enabled = False
77
84
  self.user_files_enabled = False
78
85
  self.registration_enabled = False
79
- self.smxai_identity = SMXAI_CHAT_ID
86
+ self.smxai_identity = SMXAI_CHAT_IDENTITY
80
87
  self.smxai_instructions = SMXAI_CHAT_INSTRUCTIONS
81
88
  self.website_description = SMXAI_WEBSITE_DESCRIPTION
82
89
  self._eda_output = {} # {chat_id: html}
@@ -97,11 +104,18 @@ class SyntaxMUI:
97
104
  self._last_llm_usage = None
98
105
  routes.setup_routes(self)
99
106
 
100
- self._admin_profile = {}
101
- self._chat_profile = {}
102
- self._coding_profile = {}
103
- self._classification_profile = {}
104
- self._summarization_profile = {}
107
+ # Apply client branding overrides if present on disk
108
+ self._apply_branding_from_disk()
109
+
110
+ # LLM Profiles
111
+ self.admin_profile = {}
112
+ self.chat_profile = {}
113
+ self.classifier_profile = {}
114
+ self.summarizer_profile = {}
115
+ self.coder_profile = {}
116
+ self.imagetexter_profile = {}
117
+ self.textimager_profile = {}
118
+ self.imagereditor_profile = {}
105
119
 
106
120
  self._gpt_models_latest_prev_resp_ids = {}
107
121
  self.is_streaming = False
@@ -324,23 +338,49 @@ class SyntaxMUI:
324
338
  col_html += "</div>"
325
339
  return col_html
326
340
 
341
+ # Site Branding
327
342
  def set_site_title(self, title):
328
343
  self.site_title = title
329
-
330
344
  def set_project_name(self, project_name):
331
345
  self.project_name = project_name
332
-
333
346
  def set_favicon(self, icon):
334
347
  self.favicon = icon
335
-
336
348
  def set_site_logo(self, logo):
337
349
  self.site_logo = logo
338
-
339
350
  def set_user_icon(self, icon):
340
351
  self.user_icon = icon
341
-
342
352
  def set_bot_icon(self, icon):
343
353
  self.bot_icon = icon
354
+
355
+ def _apply_branding_from_disk(self):
356
+ """
357
+ If a client logo/favicon exists in syntaxmatrixdir/branding/,
358
+ use it; otherwise keep the framework defaults.
359
+ """
360
+ branding_dir = os.path.join(_CLIENT_DIR, "branding")
361
+
362
+ def _pick(basename: str):
363
+ for ext in (".png", ".jpg", ".jpeg"):
364
+ fn = f"{basename}{ext}"
365
+ p = os.path.join(branding_dir, fn)
366
+ if os.path.exists(p):
367
+ return fn
368
+ return None
369
+
370
+ logo_fn = _pick("logo")
371
+ fav_fn = _pick("favicon")
372
+
373
+ if logo_fn:
374
+ # Use client-served endpoint (added in routes.py below)
375
+ self.site_logo = f"<img src='/branding/{logo_fn}' width='45' alt='logo'/>"
376
+ else:
377
+ self.site_logo = getattr(self, "_default_site_logo", self.site_logo)
378
+
379
+ if fav_fn:
380
+ self.favicon = f"/branding/{fav_fn}"
381
+ else:
382
+ self.favicon = getattr(self, "_default_favicon", self.favicon)
383
+
344
384
 
345
385
  def text_input(self, key, id, label, placeholder=""):
346
386
  if not placeholder:
@@ -596,18 +636,17 @@ class SyntaxMUI:
596
636
 
597
637
 
598
638
  def classify_query_intent(self, query: str) -> str:
599
- from syntaxmatrix.gpt_models_latest import extract_output_text as _out, set_args
600
639
 
601
- if not self._classification_profile:
602
- classification_profile = _prof.get_profile('classification') or _prof.get_profile('admin')
603
- if not classification_profile:
604
- return {"Error": "Set a profile for Classification"}
605
- self._classification_profile = classification_profile
606
- self._classification_profile['client'] = _prof.get_client(classification_profile)
640
+ if not self.classifier_profile:
641
+ classifier_profile = _prof.get_profile('classifier') or _prof.get_profile('chat') or _prof.get_profile('summarizer') or _prof.get_profile('admin')
642
+ if not classifier_profile:
643
+ return "Error: Set a profile for Classification"
644
+ self.classifier_profile = classifier_profile
645
+ self.classifier_profile['client'] = _prof.get_client(classifier_profile)
607
646
 
608
- _client = self._classification_profile['client']
609
- _provider = self._classification_profile['provider']
610
- _model = self._classification_profile['model']
647
+ _client = self.classifier_profile['client']
648
+ _provider = self.classifier_profile['provider']
649
+ _model = self.classifier_profile['model']
611
650
 
612
651
  # New instruction format with hybrid option
613
652
  _intent_profile = "You are an intent classifier. Respond ONLY with the intent name."
@@ -701,35 +740,36 @@ class SyntaxMUI:
701
740
 
702
741
  def generate_contextual_title(self, chat_history):
703
742
 
704
- if not self._summarization_profile:
705
- summarization_profile = _prof.get_profile('summarization') or _prof.get_profile('chat') or _prof.get_profile('admin')
706
- if not summarization_profile:
707
- return {"Error": "Chat profile not set yet."}
743
+ if not self.summarizer_profile:
744
+ summarizer_profile = _prof.get_profile('summarizer') or _prof.get_profile('classifier') or _prof.get_profile('chat') or _prof.get_profile('admin')
745
+ if not summarizer_profile:
746
+ return "<code style='color:red;'>Error: No Agent setup yet.</code>"
708
747
 
709
- self._summarization_profile = summarization_profile
710
- self._summarization_profile['client'] = _prof.get_client(summarization_profile)
748
+ self.summarizer_profile = summarizer_profile
749
+ self.summarizer_profile['client'] = _prof.get_client(summarizer_profile)
711
750
 
712
751
  conversation = "\n".join([f"{role}: {msg}" for role, msg in chat_history])
713
752
  _title_profile = "You are a title generator that creates concise and relevant titles for the given conversations."
753
+
714
754
  _instructions = f"""
715
755
  Generate a contextual title (5 short words max) from the given Conversation History
716
756
  The title should be concise - with no preamble, relevant, and capture the essence of this Conversation: \n{conversation}.\n\n
717
757
  return only the title.
718
758
  """
719
759
 
720
- _client = self._summarization_profile['client']
721
- _provider = self._summarization_profile['provider']
722
- _model = self._summarization_profile['model']
760
+ _client = self.summarizer_profile['client']
761
+ _provider = self.summarizer_profile['provider']
762
+ _model = self.summarizer_profile['model']
723
763
 
724
764
  def google_generated_title():
725
765
  try:
726
766
  response = _client.models.generate_content(
727
767
  model=_model,
728
- contents=f"{_title_profile}\n{_instructions}"
768
+ contents=f"{_title_profile}\n\n{_instructions}"
729
769
  )
730
770
  return response.text.strip()
731
771
  except Exception as e:
732
- return f"Summary agent error!"
772
+ return f"Google Summary agent error!"
733
773
 
734
774
  def gpt_models_latest_generated_title():
735
775
  try:
@@ -744,7 +784,7 @@ class SyntaxMUI:
744
784
  resp = _client.responses.create(**args)
745
785
  return _out(resp).strip()
746
786
  except Exception as e:
747
- return f"Summary agent error!"
787
+ return f"OpenAI 5s Summary agent error!"
748
788
 
749
789
  def anthropic_generated_title():
750
790
  try:
@@ -757,7 +797,7 @@ class SyntaxMUI:
757
797
  )
758
798
  return response.content[0].text.strip()
759
799
  except Exception as e:
760
- return f"Summary agent error!"
800
+ return f"Anthropic Summary agent error!"
761
801
 
762
802
  def openai_sdk_generated_title():
763
803
  prompt = [
@@ -771,10 +811,14 @@ class SyntaxMUI:
771
811
  temperature=0.3,
772
812
  max_tokens=50
773
813
  )
774
- title = response.choices[0].message.content.strip().lower()
775
- return title if title else ""
814
+ print("\nRESPONSE:\n", response)
815
+
816
+ title = response.choices[0].message.content
817
+
818
+ print("\nTITLE:\n", title)
819
+ return title
776
820
  except Exception as e:
777
- return f"Summary agent error!"
821
+ return f"SDK Summary agent error!"
778
822
 
779
823
  if _provider == "google":
780
824
  title = google_generated_title()
@@ -796,18 +840,18 @@ class SyntaxMUI:
796
840
 
797
841
  def process_query_stream(self, query: str, context: str, history: list, stream=True) -> Generator[str, None, None]:
798
842
 
799
- if not self._chat_profile:
843
+ if not self.chat_profile:
800
844
  chat_profile = _prof.get_profile("chat") or _prof.get_profile("admin")
801
845
  if not chat_profile:
802
846
  yield """<p style='color:red;'>Error: Chat profile is not configured. Add a chat profile inside the admin panel or contact your administrator.</p>
803
847
  """
804
848
  return None
805
- self._chat_profile = chat_profile
806
- self._chat_profile['client'] = _prof.get_client(chat_profile)
849
+ self.chat_profile = chat_profile
850
+ self.chat_profile['client'] = _prof.get_client(chat_profile)
807
851
 
808
- _provider = self._chat_profile['provider']
809
- _client = self._chat_profile['client']
810
- _model = self._chat_profile['model']
852
+ _provider = self.chat_profile['provider']
853
+ _client = self.chat_profile['client']
854
+ _model = self.chat_profile['model']
811
855
 
812
856
  _contents = f"""
813
857
  {self.smxai_instructions}\n\n
@@ -881,18 +925,18 @@ class SyntaxMUI:
881
925
 
882
926
  def process_query(self, query, context, history, stream=False):
883
927
 
884
- if not self._chat_profile:
928
+ if not self.chat_profile:
885
929
  chat_profile = _prof.get_profile("chat") or _prof.get_profile("admin")
886
930
  if not chat_profile:
887
931
  return """<p style='color:red;'>Error: Chat profile is not configured. Add a chat profile inside the admin panel or contact your administrator.</p>
888
932
  """
889
933
  return
890
934
 
891
- self._chat_profile = chat_profile
892
- self._chat_profile['client'] = _prof.get_client(chat_profile)
893
- _provider = self._chat_profile['provider']
894
- _client = self._chat_profile['client']
895
- _model = self._chat_profile['model']
935
+ self.chat_profile = chat_profile
936
+ self.chat_profile['client'] = _prof.get_client(chat_profile)
937
+ _provider = self.chat_profile['provider']
938
+ _client = self.chat_profile['client']
939
+ _model = self.chat_profile['model']
896
940
  _contents = f"""
897
941
  {self.smxai_instructions}\n\n
898
942
  Question: {query}\n
@@ -1031,7 +1075,7 @@ class SyntaxMUI:
1031
1075
  return code
1032
1076
  _prompt = f"```python\n{code}\n```"
1033
1077
 
1034
- repair_profile = _prof.get_profile("vision2text") or _prof.get_profile("admin")
1078
+ repair_profile = self.coder_profile
1035
1079
  if not repair_profile:
1036
1080
  return (
1037
1081
  '<div class="smx-alert smx-alert-warn">'
@@ -1141,22 +1185,27 @@ class SyntaxMUI:
1141
1185
  tasks = [str(t).strip().lower() for t in tasks if str(t).strip()]
1142
1186
 
1143
1187
  ai_profile = """
1144
- - You are a Python expert specializing in Data Science (DS) and Machine Learning (ML).
1145
- - Your task is to generate a single, complete, production-quality, executable Python script for a Jupyter-like Python kernel, based on the given instructions.
1188
+ - You are a Python expert specialising in Data Science (DS) and Machine Learning (ML).
1189
+ - Your task is to generate a single, complete, production-ready Python script that can be executed in a Jupyter-like Python kernel, based on the given instructions.
1146
1190
  - The dataset is already loaded as a pandas DataFrame named `df` (no file I/O or file uploads).
1147
- - Make a copy of `df` and name it `df_copy`.
1148
- - Make sure `df_copy` is preprocessed and cleaned, and name it `df_cleaned`, if not already done so.
1191
+ - Make a copy of `df` and name it `df_copy`.
1192
+ - Make sure `df_copy` is preprocessed and cleaned, and name it `df_cleaned`, if not already done so.
1149
1193
  - Work only with `df_cleaned` to perform the ML tasks described in the given context.
1150
- - Select your features and targets, from `df_cleaned`, with care and name it `required_cols`
1151
- - Create your 'df_filtered by doing: df_filtered = df_cleaned[required_cols].
1152
- - Use the {TEMPLATE_CATALOGUE} below to educate yourself on which visualizations you will implement in the code, and ensure the implementations are in the code you generate.
1153
- - The final output MUST BE the complete, executable Python code only, enclosed in a single markdown code block (```python ... ```), and MUST BE able to fulfill the user's request: {tasks}.
1194
+
1195
+ - Always treat modelling as features `X` and target `y`:
1196
+ * Choose ONE target column in `df_cleaned` (the value to be predicted) and refer to it as `target_col` or `y`.
1197
+ * Build the feature matrix `X` from `df_cleaned` WITHOUT including the target column or any direct transformation of it.
1198
+ * Examples of forbidden feature leakage: if predicting `sellingprice`, do NOT include `sellingprice`, `log_sellingprice`, `margin = sellingprice - mmr`, or any other direct function of `sellingprice` in `X`.
1199
+ * You may create target-derived columns (margins, flags, percentage differences) for descriptive tables or plots, but NEVER use them as model inputs.
1200
+
1201
+ - When you need a modelling frame, define `required_cols = [target_col] + feature_cols` where `feature_cols` excludes the target and its transforms, and then create `df_filtered = df_cleaned[required_cols]`.
1202
+
1203
+ - Use the {TEMPLATE_CATALOGUE} below to educate yourself about available helper functions and reference code, and ensure the implementations are in the code you generate.
1204
+ - The final output MUST BE the complete, executable Python code for the requested analysis, wrapped in a single fenced Python code block (```python ... ```), and MUST BE able to fulfil the user's request: {tasks}.
1154
1205
  - Do not include any explanatory text or markdown outside the code block.
1155
1206
  """
1156
1207
 
1157
1208
  TEMPLATE_CATALOGUE = """
1158
- ### Available SyntaxMatrix templates (use these instead of inventing new helpers)
1159
-
1160
1209
  Visualisation templates:
1161
1210
  - viz_pie(df, category_col=None, top_k=8): pie/donut shares within a category.
1162
1211
  - viz_stacked_bar(df, x=None, hue=None, normalise=True): composition across groups.
@@ -1316,9 +1365,9 @@ class SyntaxMUI:
1316
1365
  - And ends with at least 3 visible output (`show(...)` and/or `plt.show()`).
1317
1366
  """)
1318
1367
 
1319
- if not self._coding_profile:
1320
- coding_profile = _prof.get_profile("coding") # or _prof.get_profile("admin")
1321
- if not coding_profile:
1368
+ if not self.coder_profile:
1369
+ _coder_profile = _prof.get_profile("coder")
1370
+ if not _coder_profile:
1322
1371
  return (
1323
1372
  '<div class="smx-alert smx-alert-warn">'
1324
1373
  'No LLM profile configured for <code>coding</code> <br>'
@@ -1326,11 +1375,11 @@ class SyntaxMUI:
1326
1375
  '</div>'
1327
1376
  )
1328
1377
 
1329
- self._coding_profile = coding_profile
1330
- self._coding_profile['client'] = _prof.get_client(coding_profile)
1378
+ self.coder_profile = _coder_profile
1379
+ self.coder_profile['client'] = _prof.get_client(_coder_profile)
1331
1380
 
1332
1381
  # code = mlearning_agent(instructions, ai_profile, self._coding_profile)
1333
- code, usage = mlearning_agent(instructions, ai_profile, self._coding_profile)
1382
+ code, usage = mlearning_agent(instructions, ai_profile, self.coder_profile)
1334
1383
  self._last_llm_usage = usage
1335
1384
 
1336
1385
  if code:
@@ -1378,7 +1427,22 @@ class SyntaxMUI:
1378
1427
  return code.strip()
1379
1428
 
1380
1429
  return "Error: AI code generation failed."
1430
+
1381
1431
 
1432
+ def get_image_generator_profile(self):
1433
+ if not self._fullvision_profile:
1434
+ fullvision_profile = _prof.get_profile("fullvision")
1435
+ if not fullvision_profile:
1436
+ return (
1437
+ '<div class="smx-alert smx-alert-warn">'
1438
+ 'No Full Vision profile configured for <code>coding</code> <br>'
1439
+ 'Please, add it inside the admin panel or contact your Administrator.'
1440
+ '</div>'
1441
+ )
1442
+ self._fullvision_profile = fullvision_profile
1443
+ self._fullvision_profile['client'] = _prof.get_client(fullvision_profile)
1444
+
1445
+ return self._fullvision_profile
1382
1446
 
1383
1447
  def sanitize_rough_to_markdown_task(self, rough: str) -> str:
1384
1448
  """
@@ -1414,7 +1478,13 @@ class SyntaxMUI:
1414
1478
 
1415
1479
  # Drop optional <rough> wrapper
1416
1480
  return out.replace("<rough>", "").replace("</rough>", "").strip()
1417
-
1481
+
1482
+ def current_profile(self, agency):
1483
+ current_profile = _prof.get_profile(agency) or _prof.get_profile('admin')
1484
+ if not current_profile:
1485
+ return "Error: Configure the valid LLM profile."
1486
+ current_profile['client'] = _prof.get_client(current_profile)
1487
+ return current_profile
1418
1488
 
1419
1489
  def run(self):
1420
1490
  url = f"http://{self.host}:{self.port}/"