syntaxmatrix 2.5.8.1__py3-none-any.whl → 2.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- syntaxmatrix/agentic/agents.py +1150 -55
- syntaxmatrix/agentic/agents_orchestrer.py +326 -0
- syntaxmatrix/agentic/code_tools_registry.py +27 -32
- syntaxmatrix/commentary.py +16 -16
- syntaxmatrix/core.py +107 -70
- syntaxmatrix/db.py +416 -4
- syntaxmatrix/{display.py → display_html.py} +2 -6
- syntaxmatrix/gpt_models_latest.py +1 -1
- syntaxmatrix/media/__init__.py +0 -0
- syntaxmatrix/media/media_pixabay.py +277 -0
- syntaxmatrix/models.py +1 -1
- syntaxmatrix/page_builder_defaults.py +183 -0
- syntaxmatrix/page_builder_generation.py +1122 -0
- syntaxmatrix/page_layout_contract.py +644 -0
- syntaxmatrix/page_patch_publish.py +1471 -0
- syntaxmatrix/preface.py +128 -8
- syntaxmatrix/profiles.py +28 -10
- syntaxmatrix/routes.py +1347 -427
- syntaxmatrix/selftest_page_templates.py +360 -0
- syntaxmatrix/settings/client_items.py +28 -0
- syntaxmatrix/settings/model_map.py +1022 -208
- syntaxmatrix/settings/prompts.py +328 -130
- syntaxmatrix/static/assets/hero-default.svg +22 -0
- syntaxmatrix/static/icons/bot-icon.png +0 -0
- syntaxmatrix/static/icons/favicon.png +0 -0
- syntaxmatrix/static/icons/logo.png +0 -0
- syntaxmatrix/static/icons/logo2.png +0 -0
- syntaxmatrix/static/icons/logo3.png +0 -0
- syntaxmatrix/templates/admin_secretes.html +108 -0
- syntaxmatrix/templates/dashboard.html +116 -72
- syntaxmatrix/templates/edit_page.html +2535 -0
- syntaxmatrix/utils.py +2365 -2411
- {syntaxmatrix-2.5.8.1.dist-info → syntaxmatrix-2.6.0.dist-info}/METADATA +6 -2
- {syntaxmatrix-2.5.8.1.dist-info → syntaxmatrix-2.6.0.dist-info}/RECORD +37 -24
- syntaxmatrix/generate_page.py +0 -644
- syntaxmatrix/static/icons/hero_bg.jpg +0 -0
- {syntaxmatrix-2.5.8.1.dist-info → syntaxmatrix-2.6.0.dist-info}/WHEEL +0 -0
- {syntaxmatrix-2.5.8.1.dist-info → syntaxmatrix-2.6.0.dist-info}/licenses/LICENSE.txt +0 -0
- {syntaxmatrix-2.5.8.1.dist-info → syntaxmatrix-2.6.0.dist-info}/top_level.txt +0 -0
syntaxmatrix/core.py
CHANGED
|
@@ -16,7 +16,6 @@ from .file_processor import process_admin_pdf_files
|
|
|
16
16
|
from google.genai import types
|
|
17
17
|
from .vector_db import query_embeddings
|
|
18
18
|
from .vectorizer import embed_text
|
|
19
|
-
from syntaxmatrix.settings.prompts import SMXAI_CHAT_ID, SMXAI_CHAT_INSTRUCTIONS, SMXAI_WEBSITE_DESCRIPTION
|
|
20
19
|
from typing import List, Generator
|
|
21
20
|
from .auth import init_auth_db
|
|
22
21
|
from . import profiles as _prof
|
|
@@ -28,20 +27,23 @@ from html import unescape
|
|
|
28
27
|
from .plottings import render_plotly, pyplot, describe_plotly, describe_matplotlib
|
|
29
28
|
from threading import RLock
|
|
30
29
|
from syntaxmatrix.settings.model_map import GPT_MODELS_LATEST
|
|
31
|
-
|
|
30
|
+
from syntaxmatrix.settings.prompts import(
|
|
31
|
+
SMXAI_CHAT_IDENTITY,
|
|
32
|
+
SMXAI_CHAT_INSTRUCTIONS,
|
|
33
|
+
SMXAI_WEBSITE_DESCRIPTION,
|
|
34
|
+
)
|
|
35
|
+
from syntaxmatrix.settings.client_items import read_client_file, getenv_api_key
|
|
32
36
|
|
|
33
37
|
# ──────── framework‐local storage paths ────────
|
|
34
38
|
# this ensures the key & data always live under the package dir,
|
|
35
|
-
# regardless of where the developer `cd` into before launching.
|
|
36
39
|
_CLIENT_DIR = detect_project_root()
|
|
37
40
|
_HISTORY_DIR = os.path.join(_CLIENT_DIR, "smx_history")
|
|
38
41
|
os.makedirs(_HISTORY_DIR, exist_ok=True)
|
|
39
42
|
|
|
40
43
|
_SECRET_PATH = os.path.join(_CLIENT_DIR, ".smx_secret_key")
|
|
41
44
|
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
load_dotenv(_CLIENT_DOTENV_PATH, override=True)
|
|
45
|
+
# OPENAI_API_KEY = getenv_api_key(_CLIENT_DIR, "OPENAI_API_KEY"))
|
|
46
|
+
# dotenv_content = read_client_file(_CLIENT_DIR, ".env")
|
|
45
47
|
|
|
46
48
|
_ICONS_PATH = os.path.join(_CLIENT_DIR, "static", "icons")
|
|
47
49
|
os.makedirs(_ICONS_PATH, exist_ok=True)
|
|
@@ -53,9 +55,9 @@ class SyntaxMUI:
|
|
|
53
55
|
host="127.0.0.1",
|
|
54
56
|
port="5080",
|
|
55
57
|
user_icon="👩🏿🦲",
|
|
56
|
-
bot_icon="<img src='/static/icons/
|
|
58
|
+
bot_icon="<img src='/static/icons/bot-icon.png' width=20' alt='bot'/>",
|
|
57
59
|
favicon="/static/icons/favicon.png",
|
|
58
|
-
site_logo="<img src='/static/icons/logo.png' width='
|
|
60
|
+
site_logo="<img src='/static/icons/logo.png' width='45' alt='logo'/>",
|
|
59
61
|
site_title="SyntaxMatrix",
|
|
60
62
|
project_name="smxAI",
|
|
61
63
|
theme_name="light",
|
|
@@ -76,7 +78,7 @@ class SyntaxMUI:
|
|
|
76
78
|
self.theme_toggle_enabled = False
|
|
77
79
|
self.user_files_enabled = False
|
|
78
80
|
self.registration_enabled = False
|
|
79
|
-
self.smxai_identity =
|
|
81
|
+
self.smxai_identity = SMXAI_CHAT_IDENTITY
|
|
80
82
|
self.smxai_instructions = SMXAI_CHAT_INSTRUCTIONS
|
|
81
83
|
self.website_description = SMXAI_WEBSITE_DESCRIPTION
|
|
82
84
|
self._eda_output = {} # {chat_id: html}
|
|
@@ -97,11 +99,15 @@ class SyntaxMUI:
|
|
|
97
99
|
self._last_llm_usage = None
|
|
98
100
|
routes.setup_routes(self)
|
|
99
101
|
|
|
100
|
-
|
|
101
|
-
self.
|
|
102
|
-
self.
|
|
103
|
-
self.
|
|
104
|
-
self.
|
|
102
|
+
# LLM Profiles
|
|
103
|
+
self.admin_profile = {}
|
|
104
|
+
self.chat_profile = {}
|
|
105
|
+
self.classifier_profile = {}
|
|
106
|
+
self.summarizer_profile = {}
|
|
107
|
+
self.coder_profile = {}
|
|
108
|
+
self.imagetexter_profile = {}
|
|
109
|
+
self.textimager_profile = {}
|
|
110
|
+
self.imagereditor_profile = {}
|
|
105
111
|
|
|
106
112
|
self._gpt_models_latest_prev_resp_ids = {}
|
|
107
113
|
self.is_streaming = False
|
|
@@ -333,6 +339,7 @@ class SyntaxMUI:
|
|
|
333
339
|
def set_favicon(self, icon):
|
|
334
340
|
self.favicon = icon
|
|
335
341
|
|
|
342
|
+
|
|
336
343
|
def set_site_logo(self, logo):
|
|
337
344
|
self.site_logo = logo
|
|
338
345
|
|
|
@@ -596,18 +603,17 @@ class SyntaxMUI:
|
|
|
596
603
|
|
|
597
604
|
|
|
598
605
|
def classify_query_intent(self, query: str) -> str:
|
|
599
|
-
from syntaxmatrix.gpt_models_latest import extract_output_text as _out, set_args
|
|
600
606
|
|
|
601
|
-
if not self.
|
|
602
|
-
|
|
603
|
-
if not
|
|
604
|
-
return
|
|
605
|
-
self.
|
|
606
|
-
self.
|
|
607
|
+
if not self.classifier_profile:
|
|
608
|
+
classifier_profile = _prof.get_profile('classifier') or _prof.get_profile('chat') or _prof.get_profile('summarizer') or _prof.get_profile('admin')
|
|
609
|
+
if not classifier_profile:
|
|
610
|
+
return "Error: Set a profile for Classification"
|
|
611
|
+
self.classifier_profile = classifier_profile
|
|
612
|
+
self.classifier_profile['client'] = _prof.get_client(classifier_profile)
|
|
607
613
|
|
|
608
|
-
_client = self.
|
|
609
|
-
_provider = self.
|
|
610
|
-
_model = self.
|
|
614
|
+
_client = self.classifier_profile['client']
|
|
615
|
+
_provider = self.classifier_profile['provider']
|
|
616
|
+
_model = self.classifier_profile['model']
|
|
611
617
|
|
|
612
618
|
# New instruction format with hybrid option
|
|
613
619
|
_intent_profile = "You are an intent classifier. Respond ONLY with the intent name."
|
|
@@ -701,35 +707,36 @@ class SyntaxMUI:
|
|
|
701
707
|
|
|
702
708
|
def generate_contextual_title(self, chat_history):
|
|
703
709
|
|
|
704
|
-
if not self.
|
|
705
|
-
|
|
706
|
-
if not
|
|
707
|
-
return
|
|
710
|
+
if not self.summarizer_profile:
|
|
711
|
+
summarizer_profile = _prof.get_profile('summarizer') or _prof.get_profile('classifier') or _prof.get_profile('chat') or _prof.get_profile('admin')
|
|
712
|
+
if not summarizer_profile:
|
|
713
|
+
return "<code style='color:red;'>Error: No Agent setup yet.</code>"
|
|
708
714
|
|
|
709
|
-
self.
|
|
710
|
-
self.
|
|
715
|
+
self.summarizer_profile = summarizer_profile
|
|
716
|
+
self.summarizer_profile['client'] = _prof.get_client(summarizer_profile)
|
|
711
717
|
|
|
712
718
|
conversation = "\n".join([f"{role}: {msg}" for role, msg in chat_history])
|
|
713
719
|
_title_profile = "You are a title generator that creates concise and relevant titles for the given conversations."
|
|
720
|
+
|
|
714
721
|
_instructions = f"""
|
|
715
722
|
Generate a contextual title (5 short words max) from the given Conversation History
|
|
716
723
|
The title should be concise - with no preamble, relevant, and capture the essence of this Conversation: \n{conversation}.\n\n
|
|
717
724
|
return only the title.
|
|
718
725
|
"""
|
|
719
726
|
|
|
720
|
-
_client = self.
|
|
721
|
-
_provider = self.
|
|
722
|
-
_model = self.
|
|
727
|
+
_client = self.summarizer_profile['client']
|
|
728
|
+
_provider = self.summarizer_profile['provider']
|
|
729
|
+
_model = self.summarizer_profile['model']
|
|
723
730
|
|
|
724
731
|
def google_generated_title():
|
|
725
732
|
try:
|
|
726
733
|
response = _client.models.generate_content(
|
|
727
734
|
model=_model,
|
|
728
|
-
contents=f"{_title_profile}\n{_instructions}"
|
|
735
|
+
contents=f"{_title_profile}\n\n{_instructions}"
|
|
729
736
|
)
|
|
730
737
|
return response.text.strip()
|
|
731
738
|
except Exception as e:
|
|
732
|
-
return f"Summary agent error!"
|
|
739
|
+
return f"Google Summary agent error!"
|
|
733
740
|
|
|
734
741
|
def gpt_models_latest_generated_title():
|
|
735
742
|
try:
|
|
@@ -744,7 +751,7 @@ class SyntaxMUI:
|
|
|
744
751
|
resp = _client.responses.create(**args)
|
|
745
752
|
return _out(resp).strip()
|
|
746
753
|
except Exception as e:
|
|
747
|
-
return f"Summary agent error!"
|
|
754
|
+
return f"OpenAI 5s Summary agent error!"
|
|
748
755
|
|
|
749
756
|
def anthropic_generated_title():
|
|
750
757
|
try:
|
|
@@ -757,7 +764,7 @@ class SyntaxMUI:
|
|
|
757
764
|
)
|
|
758
765
|
return response.content[0].text.strip()
|
|
759
766
|
except Exception as e:
|
|
760
|
-
return f"Summary agent error!"
|
|
767
|
+
return f"Anthropic Summary agent error!"
|
|
761
768
|
|
|
762
769
|
def openai_sdk_generated_title():
|
|
763
770
|
prompt = [
|
|
@@ -771,10 +778,14 @@ class SyntaxMUI:
|
|
|
771
778
|
temperature=0.3,
|
|
772
779
|
max_tokens=50
|
|
773
780
|
)
|
|
774
|
-
|
|
775
|
-
|
|
781
|
+
print("\nRESPONSE:\n", response)
|
|
782
|
+
|
|
783
|
+
title = response.choices[0].message.content
|
|
784
|
+
|
|
785
|
+
print("\nTITLE:\n", title)
|
|
786
|
+
return title
|
|
776
787
|
except Exception as e:
|
|
777
|
-
return f"Summary agent error!"
|
|
788
|
+
return f"SDK Summary agent error!"
|
|
778
789
|
|
|
779
790
|
if _provider == "google":
|
|
780
791
|
title = google_generated_title()
|
|
@@ -796,18 +807,18 @@ class SyntaxMUI:
|
|
|
796
807
|
|
|
797
808
|
def process_query_stream(self, query: str, context: str, history: list, stream=True) -> Generator[str, None, None]:
|
|
798
809
|
|
|
799
|
-
if not self.
|
|
810
|
+
if not self.chat_profile:
|
|
800
811
|
chat_profile = _prof.get_profile("chat") or _prof.get_profile("admin")
|
|
801
812
|
if not chat_profile:
|
|
802
813
|
yield """<p style='color:red;'>Error: Chat profile is not configured. Add a chat profile inside the admin panel or contact your administrator.</p>
|
|
803
814
|
"""
|
|
804
815
|
return None
|
|
805
|
-
self.
|
|
806
|
-
self.
|
|
816
|
+
self.chat_profile = chat_profile
|
|
817
|
+
self.chat_profile['client'] = _prof.get_client(chat_profile)
|
|
807
818
|
|
|
808
|
-
_provider = self.
|
|
809
|
-
_client = self.
|
|
810
|
-
_model = self.
|
|
819
|
+
_provider = self.chat_profile['provider']
|
|
820
|
+
_client = self.chat_profile['client']
|
|
821
|
+
_model = self.chat_profile['model']
|
|
811
822
|
|
|
812
823
|
_contents = f"""
|
|
813
824
|
{self.smxai_instructions}\n\n
|
|
@@ -881,18 +892,18 @@ class SyntaxMUI:
|
|
|
881
892
|
|
|
882
893
|
def process_query(self, query, context, history, stream=False):
|
|
883
894
|
|
|
884
|
-
if not self.
|
|
895
|
+
if not self.chat_profile:
|
|
885
896
|
chat_profile = _prof.get_profile("chat") or _prof.get_profile("admin")
|
|
886
897
|
if not chat_profile:
|
|
887
898
|
return """<p style='color:red;'>Error: Chat profile is not configured. Add a chat profile inside the admin panel or contact your administrator.</p>
|
|
888
899
|
"""
|
|
889
900
|
return
|
|
890
901
|
|
|
891
|
-
self.
|
|
892
|
-
self.
|
|
893
|
-
_provider = self.
|
|
894
|
-
_client = self.
|
|
895
|
-
_model = self.
|
|
902
|
+
self.chat_profile = chat_profile
|
|
903
|
+
self.chat_profile['client'] = _prof.get_client(chat_profile)
|
|
904
|
+
_provider = self.chat_profile['provider']
|
|
905
|
+
_client = self.chat_profile['client']
|
|
906
|
+
_model = self.chat_profile['model']
|
|
896
907
|
_contents = f"""
|
|
897
908
|
{self.smxai_instructions}\n\n
|
|
898
909
|
Question: {query}\n
|
|
@@ -1031,7 +1042,7 @@ class SyntaxMUI:
|
|
|
1031
1042
|
return code
|
|
1032
1043
|
_prompt = f"```python\n{code}\n```"
|
|
1033
1044
|
|
|
1034
|
-
repair_profile =
|
|
1045
|
+
repair_profile = self.coder_profile
|
|
1035
1046
|
if not repair_profile:
|
|
1036
1047
|
return (
|
|
1037
1048
|
'<div class="smx-alert smx-alert-warn">'
|
|
@@ -1141,22 +1152,27 @@ class SyntaxMUI:
|
|
|
1141
1152
|
tasks = [str(t).strip().lower() for t in tasks if str(t).strip()]
|
|
1142
1153
|
|
|
1143
1154
|
ai_profile = """
|
|
1144
|
-
- You are a Python expert
|
|
1145
|
-
- Your task is to generate a single, complete, production-
|
|
1155
|
+
- You are a Python expert specialising in Data Science (DS) and Machine Learning (ML).
|
|
1156
|
+
- Your task is to generate a single, complete, production-ready Python script that can be executed in a Jupyter-like Python kernel, based on the given instructions.
|
|
1146
1157
|
- The dataset is already loaded as a pandas DataFrame named `df` (no file I/O or file uploads).
|
|
1147
|
-
- Make a copy of `df` and name it `df_copy`.
|
|
1148
|
-
- Make sure `df_copy` is preprocessed and cleaned, and name it `df_cleaned`, if not already done so.
|
|
1158
|
+
- Make a copy of `df` and name it `df_copy`.
|
|
1159
|
+
- Make sure `df_copy` is preprocessed and cleaned, and name it `df_cleaned`, if not already done so.
|
|
1149
1160
|
- Work only with `df_cleaned` to perform the ML tasks described in the given context.
|
|
1150
|
-
|
|
1151
|
-
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1161
|
+
|
|
1162
|
+
- Always treat modelling as features `X` and target `y`:
|
|
1163
|
+
* Choose ONE target column in `df_cleaned` (the value to be predicted) and refer to it as `target_col` or `y`.
|
|
1164
|
+
* Build the feature matrix `X` from `df_cleaned` WITHOUT including the target column or any direct transformation of it.
|
|
1165
|
+
* Examples of forbidden feature leakage: if predicting `sellingprice`, do NOT include `sellingprice`, `log_sellingprice`, `margin = sellingprice - mmr`, or any other direct function of `sellingprice` in `X`.
|
|
1166
|
+
* You may create target-derived columns (margins, flags, percentage differences) for descriptive tables or plots, but NEVER use them as model inputs.
|
|
1167
|
+
|
|
1168
|
+
- When you need a modelling frame, define `required_cols = [target_col] + feature_cols` where `feature_cols` excludes the target and its transforms, and then create `df_filtered = df_cleaned[required_cols]`.
|
|
1169
|
+
|
|
1170
|
+
- Use the {TEMPLATE_CATALOGUE} below to educate yourself about available helper functions and reference code, and ensure the implementations are in the code you generate.
|
|
1171
|
+
- The final output MUST BE the complete, executable Python code for the requested analysis, wrapped in a single fenced Python code block (```python ... ```), and MUST BE able to fulfil the user's request: {tasks}.
|
|
1154
1172
|
- Do not include any explanatory text or markdown outside the code block.
|
|
1155
1173
|
"""
|
|
1156
1174
|
|
|
1157
1175
|
TEMPLATE_CATALOGUE = """
|
|
1158
|
-
### Available SyntaxMatrix templates (use these instead of inventing new helpers)
|
|
1159
|
-
|
|
1160
1176
|
Visualisation templates:
|
|
1161
1177
|
- viz_pie(df, category_col=None, top_k=8): pie/donut shares within a category.
|
|
1162
1178
|
- viz_stacked_bar(df, x=None, hue=None, normalise=True): composition across groups.
|
|
@@ -1316,9 +1332,9 @@ class SyntaxMUI:
|
|
|
1316
1332
|
- And ends with at least 3 visible output (`show(...)` and/or `plt.show()`).
|
|
1317
1333
|
""")
|
|
1318
1334
|
|
|
1319
|
-
if not self.
|
|
1320
|
-
|
|
1321
|
-
if not
|
|
1335
|
+
if not self.coder_profile:
|
|
1336
|
+
_coder_profile = _prof.get_profile("coder")
|
|
1337
|
+
if not _coder_profile:
|
|
1322
1338
|
return (
|
|
1323
1339
|
'<div class="smx-alert smx-alert-warn">'
|
|
1324
1340
|
'No LLM profile configured for <code>coding</code> <br>'
|
|
@@ -1326,11 +1342,11 @@ class SyntaxMUI:
|
|
|
1326
1342
|
'</div>'
|
|
1327
1343
|
)
|
|
1328
1344
|
|
|
1329
|
-
self.
|
|
1330
|
-
self.
|
|
1345
|
+
self.coder_profile = _coder_profile
|
|
1346
|
+
self.coder_profile['client'] = _prof.get_client(_coder_profile)
|
|
1331
1347
|
|
|
1332
1348
|
# code = mlearning_agent(instructions, ai_profile, self._coding_profile)
|
|
1333
|
-
code, usage = mlearning_agent(instructions, ai_profile, self.
|
|
1349
|
+
code, usage = mlearning_agent(instructions, ai_profile, self.coder_profile)
|
|
1334
1350
|
self._last_llm_usage = usage
|
|
1335
1351
|
|
|
1336
1352
|
if code:
|
|
@@ -1378,7 +1394,22 @@ class SyntaxMUI:
|
|
|
1378
1394
|
return code.strip()
|
|
1379
1395
|
|
|
1380
1396
|
return "Error: AI code generation failed."
|
|
1397
|
+
|
|
1398
|
+
|
|
1399
|
+
def get_image_generator_profile(self):
|
|
1400
|
+
if not self._fullvision_profile:
|
|
1401
|
+
fullvision_profile = _prof.get_profile("fullvision")
|
|
1402
|
+
if not fullvision_profile:
|
|
1403
|
+
return (
|
|
1404
|
+
'<div class="smx-alert smx-alert-warn">'
|
|
1405
|
+
'No Full Vision profile configured for <code>coding</code> <br>'
|
|
1406
|
+
'Please, add it inside the admin panel or contact your Administrator.'
|
|
1407
|
+
'</div>'
|
|
1408
|
+
)
|
|
1409
|
+
self._fullvision_profile = fullvision_profile
|
|
1410
|
+
self._fullvision_profile['client'] = _prof.get_client(fullvision_profile)
|
|
1381
1411
|
|
|
1412
|
+
return self._fullvision_profile
|
|
1382
1413
|
|
|
1383
1414
|
def sanitize_rough_to_markdown_task(self, rough: str) -> str:
|
|
1384
1415
|
"""
|
|
@@ -1414,7 +1445,13 @@ class SyntaxMUI:
|
|
|
1414
1445
|
|
|
1415
1446
|
# Drop optional <rough> wrapper
|
|
1416
1447
|
return out.replace("<rough>", "").replace("</rough>", "").strip()
|
|
1417
|
-
|
|
1448
|
+
|
|
1449
|
+
def current_profile(self, agency):
|
|
1450
|
+
current_profile = _prof.get_profile(agency) or _prof.get_profile('admin')
|
|
1451
|
+
if not current_profile:
|
|
1452
|
+
return "Error: Configure the valid LLM profile."
|
|
1453
|
+
current_profile['client'] = _prof.get_client(current_profile)
|
|
1454
|
+
return current_profile
|
|
1418
1455
|
|
|
1419
1456
|
def run(self):
|
|
1420
1457
|
url = f"http://{self.host}:{self.port}/"
|