syntaxmatrix 2.5.6__py3-none-any.whl → 2.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. syntaxmatrix/agentic/agents.py +1220 -169
  2. syntaxmatrix/agentic/agents_orchestrer.py +326 -0
  3. syntaxmatrix/agentic/code_tools_registry.py +27 -32
  4. syntaxmatrix/commentary.py +16 -16
  5. syntaxmatrix/core.py +185 -81
  6. syntaxmatrix/db.py +460 -4
  7. syntaxmatrix/{display.py → display_html.py} +2 -6
  8. syntaxmatrix/gpt_models_latest.py +1 -1
  9. syntaxmatrix/media/__init__.py +0 -0
  10. syntaxmatrix/media/media_pixabay.py +277 -0
  11. syntaxmatrix/models.py +1 -1
  12. syntaxmatrix/page_builder_defaults.py +183 -0
  13. syntaxmatrix/page_builder_generation.py +1122 -0
  14. syntaxmatrix/page_layout_contract.py +644 -0
  15. syntaxmatrix/page_patch_publish.py +1471 -0
  16. syntaxmatrix/preface.py +142 -21
  17. syntaxmatrix/profiles.py +28 -10
  18. syntaxmatrix/routes.py +1740 -453
  19. syntaxmatrix/selftest_page_templates.py +360 -0
  20. syntaxmatrix/settings/client_items.py +28 -0
  21. syntaxmatrix/settings/model_map.py +1022 -207
  22. syntaxmatrix/settings/prompts.py +328 -130
  23. syntaxmatrix/static/assets/hero-default.svg +22 -0
  24. syntaxmatrix/static/icons/bot-icon.png +0 -0
  25. syntaxmatrix/static/icons/favicon.png +0 -0
  26. syntaxmatrix/static/icons/logo.png +0 -0
  27. syntaxmatrix/static/icons/logo3.png +0 -0
  28. syntaxmatrix/templates/admin_branding.html +104 -0
  29. syntaxmatrix/templates/admin_features.html +63 -0
  30. syntaxmatrix/templates/admin_secretes.html +108 -0
  31. syntaxmatrix/templates/dashboard.html +296 -133
  32. syntaxmatrix/templates/dataset_resize.html +535 -0
  33. syntaxmatrix/templates/edit_page.html +2535 -0
  34. syntaxmatrix/utils.py +2431 -2383
  35. {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.6.2.dist-info}/METADATA +6 -2
  36. {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.6.2.dist-info}/RECORD +39 -24
  37. syntaxmatrix/generate_page.py +0 -644
  38. syntaxmatrix/static/icons/hero_bg.jpg +0 -0
  39. {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.6.2.dist-info}/WHEEL +0 -0
  40. {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.6.2.dist-info}/licenses/LICENSE.txt +0 -0
  41. {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.6.2.dist-info}/top_level.txt +0 -0
syntaxmatrix/core.py CHANGED
@@ -16,7 +16,6 @@ from .file_processor import process_admin_pdf_files
16
16
  from google.genai import types
17
17
  from .vector_db import query_embeddings
18
18
  from .vectorizer import embed_text
19
- from syntaxmatrix.settings.prompts import SMXAI_CHAT_ID, SMXAI_CHAT_INSTRUCTIONS, SMXAI_WEBSITE_DESCRIPTION
20
19
  from typing import List, Generator
21
20
  from .auth import init_auth_db
22
21
  from . import profiles as _prof
@@ -28,20 +27,26 @@ from html import unescape
28
27
  from .plottings import render_plotly, pyplot, describe_plotly, describe_matplotlib
29
28
  from threading import RLock
30
29
  from syntaxmatrix.settings.model_map import GPT_MODELS_LATEST
31
-
30
+ from syntaxmatrix.settings.prompts import(
31
+ SMXAI_CHAT_IDENTITY,
32
+ SMXAI_CHAT_INSTRUCTIONS,
33
+ SMXAI_WEBSITE_DESCRIPTION,
34
+ )
35
+ from syntaxmatrix.settings.client_items import read_client_file, getenv_api_key
32
36
 
33
37
  # ──────── framework‐local storage paths ────────
34
38
  # this ensures the key & data always live under the package dir,
35
- # regardless of where the developer `cd` into before launching.
36
39
  _CLIENT_DIR = detect_project_root()
37
40
  _HISTORY_DIR = os.path.join(_CLIENT_DIR, "smx_history")
38
41
  os.makedirs(_HISTORY_DIR, exist_ok=True)
39
42
 
43
+ _BRANDING_DIR = os.path.join(_CLIENT_DIR, "branding")
44
+ os.makedirs(_BRANDING_DIR, exist_ok=True)
45
+
40
46
  _SECRET_PATH = os.path.join(_CLIENT_DIR, ".smx_secret_key")
41
47
 
42
- _CLIENT_DOTENV_PATH = os.path.join(str(_CLIENT_DIR.parent), ".env")
43
- if os.path.isfile(_CLIENT_DOTENV_PATH):
44
- load_dotenv(_CLIENT_DOTENV_PATH, override=True)
48
+ # OPENAI_API_KEY = getenv_api_key(_CLIENT_DIR, "OPENAI_API_KEY"))
49
+ # dotenv_content = read_client_file(_CLIENT_DIR, ".env")
45
50
 
46
51
  _ICONS_PATH = os.path.join(_CLIENT_DIR, "static", "icons")
47
52
  os.makedirs(_ICONS_PATH, exist_ok=True)
@@ -53,9 +58,9 @@ class SyntaxMUI:
53
58
  host="127.0.0.1",
54
59
  port="5080",
55
60
  user_icon="👩🏿‍🦲",
56
- bot_icon="<img src='/static/icons/favicon.png' width=20' alt='bot'/>",
57
- favicon="", # /static/icons/favicon.png",
58
- site_logo="<img src='/static/icons/logo.png' width='30' alt='logo'/>",
61
+ bot_icon="<img src='/static/icons/bot-icon.png' width=20' alt='bot'/>",
62
+ favicon="/static/icons/favicon.png",
63
+ site_logo="<img src='/static/icons/logo.png' width='45' alt='logo'/>",
59
64
  site_title="SyntaxMatrix",
60
65
  project_name="smxAI",
61
66
  theme_name="light",
@@ -70,13 +75,15 @@ class SyntaxMUI:
70
75
  self.bot_icon = bot_icon
71
76
  self.site_logo = site_logo
72
77
  self.favicon = favicon
78
+ self._default_site_logo = site_logo
79
+ self._default_favicon = favicon
73
80
  self.site_title = site_title
74
81
  self.project_name = project_name
75
82
  self.ui_mode = ui_mode
76
83
  self.theme_toggle_enabled = False
77
84
  self.user_files_enabled = False
78
85
  self.registration_enabled = False
79
- self.smxai_identity = SMXAI_CHAT_ID
86
+ self.smxai_identity = SMXAI_CHAT_IDENTITY
80
87
  self.smxai_instructions = SMXAI_CHAT_INSTRUCTIONS
81
88
  self.website_description = SMXAI_WEBSITE_DESCRIPTION
82
89
  self._eda_output = {} # {chat_id: html}
@@ -97,15 +104,23 @@ class SyntaxMUI:
97
104
  self._last_llm_usage = None
98
105
  routes.setup_routes(self)
99
106
 
100
- self._admin_profile = {}
101
- self._chat_profile = {}
102
- self._coding_profile = {}
103
- self._classification_profile = {}
104
- self._summarization_profile = {}
107
+ # Apply client branding overrides if present on disk
108
+ self._apply_branding_from_disk()
109
+
110
+ # LLM Profiles
111
+ self.admin_profile = {}
112
+ self.chat_profile = {}
113
+ self.classifier_profile = {}
114
+ self.summarizer_profile = {}
115
+ self.coder_profile = {}
116
+ self.imagetexter_profile = {}
117
+ self.textimager_profile = {}
118
+ self.imageeditor_profile = {}
105
119
 
106
120
  self._gpt_models_latest_prev_resp_ids = {}
107
121
  self.is_streaming = False
108
122
  self.stream_args = {}
123
+ self._apply_feature_flags_from_db()
109
124
 
110
125
  self._recent_visual_summaries = []
111
126
 
@@ -315,6 +330,24 @@ class SyntaxMUI:
315
330
 
316
331
  def enable_registration(self):
317
332
  self.registration_enabled = True
333
+
334
+ def _apply_feature_flags_from_db(self):
335
+ """
336
+ Pull persisted toggles from app_settings.
337
+ """
338
+ def _truthy(v):
339
+ return str(v or "").strip().lower() in ("1", "true", "yes", "on")
340
+
341
+ try:
342
+ stream_v = db.get_setting("feature.stream_mode", "0")
343
+ user_files_v = db.get_setting("feature.user_files", "0")
344
+
345
+ self.is_streaming = _truthy(stream_v)
346
+ self.user_files_enabled = _truthy(user_files_v)
347
+ except Exception:
348
+ # Keep defaults if DB isn't ready for any reason
349
+ pass
350
+
318
351
 
319
352
  @staticmethod
320
353
  def columns(components):
@@ -324,23 +357,49 @@ class SyntaxMUI:
324
357
  col_html += "</div>"
325
358
  return col_html
326
359
 
360
+ # Site Branding
327
361
  def set_site_title(self, title):
328
362
  self.site_title = title
329
-
330
363
  def set_project_name(self, project_name):
331
364
  self.project_name = project_name
332
-
333
365
  def set_favicon(self, icon):
334
366
  self.favicon = icon
335
-
336
367
  def set_site_logo(self, logo):
337
368
  self.site_logo = logo
338
-
339
369
  def set_user_icon(self, icon):
340
370
  self.user_icon = icon
341
-
342
371
  def set_bot_icon(self, icon):
343
372
  self.bot_icon = icon
373
+
374
+ def _apply_branding_from_disk(self):
375
+ """
376
+ If a client logo/favicon exists in syntaxmatrixdir/branding/,
377
+ use it; otherwise keep the framework defaults.
378
+ """
379
+ branding_dir = os.path.join(_CLIENT_DIR, "branding")
380
+
381
+ def _pick(basename: str):
382
+ for ext in (".png", ".jpg", ".jpeg"):
383
+ fn = f"{basename}{ext}"
384
+ p = os.path.join(branding_dir, fn)
385
+ if os.path.exists(p):
386
+ return fn
387
+ return None
388
+
389
+ logo_fn = _pick("logo")
390
+ fav_fn = _pick("favicon")
391
+
392
+ if logo_fn:
393
+ # Use client-served endpoint (added in routes.py below)
394
+ self.site_logo = f"<img src='/branding/{logo_fn}' width='45' alt='logo'/>"
395
+ else:
396
+ self.site_logo = getattr(self, "_default_site_logo", self.site_logo)
397
+
398
+ if fav_fn:
399
+ self.favicon = f"/branding/{fav_fn}"
400
+ else:
401
+ self.favicon = getattr(self, "_default_favicon", self.favicon)
402
+
344
403
 
345
404
  def text_input(self, key, id, label, placeholder=""):
346
405
  if not placeholder:
@@ -596,18 +655,17 @@ class SyntaxMUI:
596
655
 
597
656
 
598
657
  def classify_query_intent(self, query: str) -> str:
599
- from syntaxmatrix.gpt_models_latest import extract_output_text as _out, set_args
600
658
 
601
- if not self._classification_profile:
602
- classification_profile = _prof.get_profile('classification') or _prof.get_profile('chat') or _prof.get_profile('admin')
603
- if not classification_profile:
604
- return {"Error": "Set a profile for Classification"}
605
- self._classification_profile = classification_profile
606
- self._classification_profile['client'] = _prof.get_client(classification_profile)
659
+ if not self.classifier_profile:
660
+ classifier_profile = _prof.get_profile('classifier') or _prof.get_profile('chat') or _prof.get_profile('summarizer') or _prof.get_profile('admin')
661
+ if not classifier_profile:
662
+ return "Error: Set a profile for Classification"
663
+ self.classifier_profile = classifier_profile
664
+ self.classifier_profile['client'] = _prof.get_client(classifier_profile)
607
665
 
608
- _client = self._classification_profile['client']
609
- _provider = self._classification_profile['provider']
610
- _model = self._classification_profile['model']
666
+ _client = self.classifier_profile['client']
667
+ _provider = self.classifier_profile['provider']
668
+ _model = self.classifier_profile['model']
611
669
 
612
670
  # New instruction format with hybrid option
613
671
  _intent_profile = "You are an intent classifier. Respond ONLY with the intent name."
@@ -701,35 +759,36 @@ class SyntaxMUI:
701
759
 
702
760
  def generate_contextual_title(self, chat_history):
703
761
 
704
- if not self._summarization_profile:
705
- summarization_profile = _prof.get_profile('summarization') or _prof.get_profile('chat') or _prof.get_profile('admin')
706
- if not summarization_profile:
707
- return {"Error": "Chat profile not set yet."}
762
+ if not self.summarizer_profile:
763
+ summarizer_profile = _prof.get_profile('summarizer') or _prof.get_profile('classifier') or _prof.get_profile('chat') or _prof.get_profile('admin')
764
+ if not summarizer_profile:
765
+ return "<code style='color:red;'>Error: No Agent setup yet.</code>"
708
766
 
709
- self._summarization_profile = summarization_profile
710
- self._summarization_profile['client'] = _prof.get_client(summarization_profile)
767
+ self.summarizer_profile = summarizer_profile
768
+ self.summarizer_profile['client'] = _prof.get_client(summarizer_profile)
711
769
 
712
770
  conversation = "\n".join([f"{role}: {msg}" for role, msg in chat_history])
713
771
  _title_profile = "You are a title generator that creates concise and relevant titles for the given conversations."
772
+
714
773
  _instructions = f"""
715
774
  Generate a contextual title (5 short words max) from the given Conversation History
716
775
  The title should be concise - with no preamble, relevant, and capture the essence of this Conversation: \n{conversation}.\n\n
717
776
  return only the title.
718
777
  """
719
778
 
720
- _client = self._summarization_profile['client']
721
- _provider = self._summarization_profile['provider']
722
- _model = self._summarization_profile['model']
779
+ _client = self.summarizer_profile['client']
780
+ _provider = self.summarizer_profile['provider']
781
+ _model = self.summarizer_profile['model']
723
782
 
724
783
  def google_generated_title():
725
784
  try:
726
785
  response = _client.models.generate_content(
727
786
  model=_model,
728
- contents=f"{_title_profile}\n{_instructions}"
787
+ contents=f"{_title_profile}\n\n{_instructions}"
729
788
  )
730
789
  return response.text.strip()
731
790
  except Exception as e:
732
- return f"Summary agent error!"
791
+ return f"Google Summary agent error!"
733
792
 
734
793
  def gpt_models_latest_generated_title():
735
794
  try:
@@ -744,7 +803,7 @@ class SyntaxMUI:
744
803
  resp = _client.responses.create(**args)
745
804
  return _out(resp).strip()
746
805
  except Exception as e:
747
- return f"Summary agent error!"
806
+ return f"OpenAI 5s Summary agent error!"
748
807
 
749
808
  def anthropic_generated_title():
750
809
  try:
@@ -757,7 +816,7 @@ class SyntaxMUI:
757
816
  )
758
817
  return response.content[0].text.strip()
759
818
  except Exception as e:
760
- return f"Summary agent error!"
819
+ return f"Anthropic Summary agent error!"
761
820
 
762
821
  def openai_sdk_generated_title():
763
822
  prompt = [
@@ -771,10 +830,14 @@ class SyntaxMUI:
771
830
  temperature=0.3,
772
831
  max_tokens=50
773
832
  )
774
- title = response.choices[0].message.content.strip().lower()
775
- return title if title else ""
833
+ print("\nRESPONSE:\n", response)
834
+
835
+ title = response.choices[0].message.content
836
+
837
+ print("\nTITLE:\n", title)
838
+ return title
776
839
  except Exception as e:
777
- return f"Summary agent error!"
840
+ return f"SDK Summary agent error!"
778
841
 
779
842
  if _provider == "google":
780
843
  title = google_generated_title()
@@ -796,18 +859,18 @@ class SyntaxMUI:
796
859
 
797
860
  def process_query_stream(self, query: str, context: str, history: list, stream=True) -> Generator[str, None, None]:
798
861
 
799
- if not self._chat_profile:
862
+ if not self.chat_profile:
800
863
  chat_profile = _prof.get_profile("chat") or _prof.get_profile("admin")
801
864
  if not chat_profile:
802
865
  yield """<p style='color:red;'>Error: Chat profile is not configured. Add a chat profile inside the admin panel or contact your administrator.</p>
803
866
  """
804
867
  return None
805
- self._chat_profile = chat_profile
806
- self._chat_profile['client'] = _prof.get_client(chat_profile)
868
+ self.chat_profile = chat_profile
869
+ self.chat_profile['client'] = _prof.get_client(chat_profile)
807
870
 
808
- _provider = self._chat_profile['provider']
809
- _client = self._chat_profile['client']
810
- _model = self._chat_profile['model']
871
+ _provider = self.chat_profile['provider']
872
+ _client = self.chat_profile['client']
873
+ _model = self.chat_profile['model']
811
874
 
812
875
  _contents = f"""
813
876
  {self.smxai_instructions}\n\n
@@ -881,18 +944,18 @@ class SyntaxMUI:
881
944
 
882
945
  def process_query(self, query, context, history, stream=False):
883
946
 
884
- if not self._chat_profile:
947
+ if not self.chat_profile:
885
948
  chat_profile = _prof.get_profile("chat") or _prof.get_profile("admin")
886
949
  if not chat_profile:
887
950
  return """<p style='color:red;'>Error: Chat profile is not configured. Add a chat profile inside the admin panel or contact your administrator.</p>
888
951
  """
889
952
  return
890
953
 
891
- self._chat_profile = chat_profile
892
- self._chat_profile['client'] = _prof.get_client(chat_profile)
893
- _provider = self._chat_profile['provider']
894
- _client = self._chat_profile['client']
895
- _model = self._chat_profile['model']
954
+ self.chat_profile = chat_profile
955
+ self.chat_profile['client'] = _prof.get_client(chat_profile)
956
+ _provider = self.chat_profile['provider']
957
+ _client = self.chat_profile['client']
958
+ _model = self.chat_profile['model']
896
959
  _contents = f"""
897
960
  {self.smxai_instructions}\n\n
898
961
  Question: {query}\n
@@ -1031,7 +1094,7 @@ class SyntaxMUI:
1031
1094
  return code
1032
1095
  _prompt = f"```python\n{code}\n```"
1033
1096
 
1034
- repair_profile = _prof.get_profile("vision2text") or _prof.get_profile("admin")
1097
+ repair_profile = self.coder_profile
1035
1098
  if not repair_profile:
1036
1099
  return (
1037
1100
  '<div class="smx-alert smx-alert-warn">'
@@ -1141,21 +1204,28 @@ class SyntaxMUI:
1141
1204
  tasks = [str(t).strip().lower() for t in tasks if str(t).strip()]
1142
1205
 
1143
1206
  ai_profile = """
1144
- - You are a Python expert specializing in data science and machine learning.
1145
- - Your task is to generate a single, complete, production-quality, executable Python script for a Jupyter-like Python kernel, based on the given instructions.
1207
+ - You are a Python expert specialising in Data Science (DS) and Machine Learning (ML).
1208
+ - Your task is to generate a single, complete, production-ready Python script that can be executed in a Jupyter-like Python kernel, based on the given instructions.
1146
1209
  - The dataset is already loaded as a pandas DataFrame named `df` (no file I/O or file uploads).
1147
- - Make a copy of `df` and name it `df_copy`. Make sure `df_copy` is preprocessed and cleaned, named `df_cleaned`, if not already done so. Then use `df_cleaned` to perform the ML tasks described in the given context.
1148
- - Select your features and target, from `df_cleaned`, with care and name it `required_cols`
1149
- - Create your 'df_filtered by doing: df_filtered = df_cleaned[required_cols].
1150
- - Use the {TEMPLATE_CATALOGUE} below to educate yourself on which visualizations you will implement in the code.
1151
- - The final output MUST be the complete, executable Python code only, enclosed in a single markdown code block (```python ... ```), which is required to fulfill the user's request. See the {tasks} below.
1210
+ - Make a copy of `df` and name it `df_copy`.
1211
+ - Make sure `df_copy` is preprocessed and cleaned, and name it `df_cleaned`, if not already done so.
1212
+ - Work only with `df_cleaned` to perform the ML tasks described in the given context.
1213
+
1214
+ - Always treat modelling as features `X` and target `y`:
1215
+ * Choose ONE target column in `df_cleaned` (the value to be predicted) and refer to it as `target_col` or `y`.
1216
+ * Build the feature matrix `X` from `df_cleaned` WITHOUT including the target column or any direct transformation of it.
1217
+ * Examples of forbidden feature leakage: if predicting `sellingprice`, do NOT include `sellingprice`, `log_sellingprice`, `margin = sellingprice - mmr`, or any other direct function of `sellingprice` in `X`.
1218
+ * You may create target-derived columns (margins, flags, percentage differences) for descriptive tables or plots, but NEVER use them as model inputs.
1219
+
1220
+ - When you need a modelling frame, define `required_cols = [target_col] + feature_cols` where `feature_cols` excludes the target and its transforms, and then create `df_filtered = df_cleaned[required_cols]`.
1221
+
1222
+ - Use the {TEMPLATE_CATALOGUE} below to educate yourself about available helper functions and reference code, and ensure the implementations are in the code you generate.
1223
+ - The final output MUST BE the complete, executable Python code for the requested analysis, wrapped in a single fenced Python code block (```python ... ```), and MUST BE able to fulfil the user's request: {tasks}.
1152
1224
  - Do not include any explanatory text or markdown outside the code block.
1153
1225
  """
1154
1226
 
1155
1227
  TEMPLATE_CATALOGUE = """
1156
- ### Available SyntaxMatrix templates (use these instead of inventing new helpers)
1157
-
1158
- Visualisation templates (dataset-agnostic):
1228
+ Visualisation templates:
1159
1229
  - viz_pie(df, category_col=None, top_k=8): pie/donut shares within a category.
1160
1230
  - viz_stacked_bar(df, x=None, hue=None, normalise=True): composition across groups.
1161
1231
  - viz_count_bar(df, category_col=None, top_k=12): counts/denominators by category.
@@ -1195,9 +1265,9 @@ class SyntaxMUI:
1195
1265
 
1196
1266
  """
1197
1267
  ### Template rules
1198
- - You MAY call a template if it matches the task.
1268
+ - You MAY call 1 or more templates if they matche the task.
1199
1269
  - Do NOT invent template names.
1200
- - If no template fits, write minimal direct pandas/sklearn/seaborn code instead.
1270
+ - If no template fits, write minimal direct pandas/sklearn/seaborn code instead, for visualization.
1201
1271
  - Keep the solution short: avoid writing wrappers/utilities already handled by SyntaxMatrix hardener.
1202
1272
 
1203
1273
  #### Template selection hint examples:
@@ -1220,8 +1290,7 @@ class SyntaxMUI:
1220
1290
  set `random_state=42` where relevant.
1221
1291
  4) Be defensive, but avoid hard-failing on optional fields:
1222
1292
  - If the primary column, needed to answer the question, is missing, review your copy of the `df` again.
1223
- Make sure that you selected the proper column.
1224
- Never use a column/variable which isn't available or defined.
1293
+ - Make sure that you selected the proper column. Never use a column/variable which isn't available or defined.
1225
1294
  - If a secondary/extra column is missing, show a warning with `show(...)` and continue using available fields.
1226
1295
  - Handle missing values sensibly (drop rows for simple EDA; use `ColumnTransformer` + `SimpleImputer` for modelling).
1227
1296
  - For categorical features in ML, use `OneHotEncoder(handle_unknown="ignore")`
@@ -1253,6 +1322,20 @@ class SyntaxMUI:
1253
1322
  11) You MUST NOT reference any column outside Available columns: {AVAILABLE_COLUMNS}.
1254
1323
  12) If asked to predict/classify, choose the target by matching the task text to Allowed columns
1255
1324
  and never invent a new name.
1325
+ 13) Treat df as the primary dataset you must work with.
1326
+ 14) The dataset is already loaded as df (no file I/O or file uploads).
1327
+ 15) All outputs must be visible to the user via the provided show(...) helper.
1328
+ 16) Never use print(...); use show(...) instead.
1329
+ 17) You MUST NOT read from or write to local files, folders, or external storage.
1330
+ - Do not call open(...), Path(...).write_text/write_bytes, or similar file APIs.
1331
+ - Do not use df.to_csv(...), df.to_excel(...), df.to_parquet(...),
1332
+ df.to_pickle(...), df.to_json(...), df.to_hdf(...), or any other
1333
+ method that writes to disk.
1334
+ - Do not call joblib.dump(...), pickle.dump(...), torch.save(...),
1335
+ numpy.save(...), numpy.savetxt(...), or similar saving functions.
1336
+ - Do not call plt.savefig(..., 'somefile.png') or any variant that
1337
+ writes an image to a filename. Plots must be rendered in-memory only.
1338
+ 18) Keep everything in memory and surface results via show(...) or plots.
1256
1339
 
1257
1340
  #### Cohort rules
1258
1341
  When you generate plots for cohorts or categories, you MUST obey these rules:
@@ -1301,21 +1384,21 @@ class SyntaxMUI:
1301
1384
  - And ends with at least 3 visible output (`show(...)` and/or `plt.show()`).
1302
1385
  """)
1303
1386
 
1304
- if not self._coding_profile:
1305
- coding_profile = _prof.get_profile("coding") or _prof.get_profile("admin")
1306
- if not coding_profile:
1387
+ if not self.coder_profile:
1388
+ _coder_profile = _prof.get_profile("coder")
1389
+ if not _coder_profile:
1307
1390
  return (
1308
1391
  '<div class="smx-alert smx-alert-warn">'
1309
- 'No LLM profile configured for <code>coding</code> (or <code>admin</code>). <br>'
1392
+ 'No LLM profile configured for <code>coding</code> <br>'
1310
1393
  'Please, add the LLM profile inside the admin panel or contact your Administrator.'
1311
1394
  '</div>'
1312
1395
  )
1313
1396
 
1314
- self._coding_profile = coding_profile
1315
- self._coding_profile['client'] = _prof.get_client(coding_profile)
1397
+ self.coder_profile = _coder_profile
1398
+ self.coder_profile['client'] = _prof.get_client(_coder_profile)
1316
1399
 
1317
1400
  # code = mlearning_agent(instructions, ai_profile, self._coding_profile)
1318
- code, usage = mlearning_agent(instructions, ai_profile, self._coding_profile)
1401
+ code, usage = mlearning_agent(instructions, ai_profile, self.coder_profile)
1319
1402
  self._last_llm_usage = usage
1320
1403
 
1321
1404
  if code:
@@ -1363,7 +1446,22 @@ class SyntaxMUI:
1363
1446
  return code.strip()
1364
1447
 
1365
1448
  return "Error: AI code generation failed."
1449
+
1366
1450
 
1451
+ def get_image_generator_profile(self):
1452
+ if not self._fullvision_profile:
1453
+ fullvision_profile = _prof.get_profile("fullvision")
1454
+ if not fullvision_profile:
1455
+ return (
1456
+ '<div class="smx-alert smx-alert-warn">'
1457
+ 'No Full Vision profile configured for <code>coding</code> <br>'
1458
+ 'Please, add it inside the admin panel or contact your Administrator.'
1459
+ '</div>'
1460
+ )
1461
+ self._fullvision_profile = fullvision_profile
1462
+ self._fullvision_profile['client'] = _prof.get_client(fullvision_profile)
1463
+
1464
+ return self._fullvision_profile
1367
1465
 
1368
1466
  def sanitize_rough_to_markdown_task(self, rough: str) -> str:
1369
1467
  """
@@ -1399,7 +1497,13 @@ class SyntaxMUI:
1399
1497
 
1400
1498
  # Drop optional <rough> wrapper
1401
1499
  return out.replace("<rough>", "").replace("</rough>", "").strip()
1402
-
1500
+
1501
+ def current_profile(self, agency):
1502
+ current_profile = _prof.get_profile(agency) or _prof.get_profile('admin')
1503
+ if not current_profile:
1504
+ return "Error: Configure the valid LLM profile."
1505
+ current_profile['client'] = _prof.get_client(current_profile)
1506
+ return current_profile
1403
1507
 
1404
1508
  def run(self):
1405
1509
  url = f"http://{self.host}:{self.port}/"