syntaxmatrix 2.5.5.5__py3-none-any.whl → 2.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. syntaxmatrix/__init__.py +3 -2
  2. syntaxmatrix/agentic/agents.py +1220 -169
  3. syntaxmatrix/agentic/agents_orchestrer.py +326 -0
  4. syntaxmatrix/agentic/code_tools_registry.py +27 -32
  5. syntaxmatrix/auth.py +142 -5
  6. syntaxmatrix/commentary.py +16 -16
  7. syntaxmatrix/core.py +192 -84
  8. syntaxmatrix/db.py +460 -4
  9. syntaxmatrix/{display.py → display_html.py} +2 -6
  10. syntaxmatrix/gpt_models_latest.py +1 -1
  11. syntaxmatrix/media/__init__.py +0 -0
  12. syntaxmatrix/media/media_pixabay.py +277 -0
  13. syntaxmatrix/models.py +1 -1
  14. syntaxmatrix/page_builder_defaults.py +183 -0
  15. syntaxmatrix/page_builder_generation.py +1122 -0
  16. syntaxmatrix/page_layout_contract.py +644 -0
  17. syntaxmatrix/page_patch_publish.py +1471 -0
  18. syntaxmatrix/preface.py +670 -0
  19. syntaxmatrix/profiles.py +28 -10
  20. syntaxmatrix/routes.py +1941 -593
  21. syntaxmatrix/selftest_page_templates.py +360 -0
  22. syntaxmatrix/settings/client_items.py +28 -0
  23. syntaxmatrix/settings/model_map.py +1022 -207
  24. syntaxmatrix/settings/prompts.py +328 -130
  25. syntaxmatrix/static/assets/hero-default.svg +22 -0
  26. syntaxmatrix/static/icons/bot-icon.png +0 -0
  27. syntaxmatrix/static/icons/favicon.png +0 -0
  28. syntaxmatrix/static/icons/logo.png +0 -0
  29. syntaxmatrix/static/icons/logo3.png +0 -0
  30. syntaxmatrix/templates/admin_branding.html +104 -0
  31. syntaxmatrix/templates/admin_features.html +63 -0
  32. syntaxmatrix/templates/admin_secretes.html +108 -0
  33. syntaxmatrix/templates/change_password.html +124 -0
  34. syntaxmatrix/templates/dashboard.html +296 -131
  35. syntaxmatrix/templates/dataset_resize.html +535 -0
  36. syntaxmatrix/templates/edit_page.html +2535 -0
  37. syntaxmatrix/utils.py +2728 -2835
  38. {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/METADATA +6 -2
  39. {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/RECORD +42 -25
  40. syntaxmatrix/generate_page.py +0 -634
  41. syntaxmatrix/static/icons/hero_bg.jpg +0 -0
  42. {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/WHEEL +0 -0
  43. {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/licenses/LICENSE.txt +0 -0
  44. {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,326 @@
1
+ from typing import Any, Dict
2
+ import pandas as pd
3
+ import numpy as np
4
+ from openai import APIError, OpenAI
5
+ from google import genai
6
+ import io
7
+ import re
8
+ import tiktoken
9
+ from google.genai import types
10
+ from syntaxmatrix import profiles as _prof
11
+ from ..gpt_models_latest import set_args as _set_args, extract_output_text as _out
12
+
13
+ class OrchestrateMLSystem:
14
+ def __init__(self, user_question, dataset_path):
15
+ self.user_query = user_question
16
+ self.dataset_path = dataset_path
17
+ self.df = None
18
+ self.cot_history = {} # To store chain-of-thought history for each agents
19
+
20
+
21
+ def _generate_ml_response(self, profile, sys_prompt, user_prompt, max_output_tokens = 4000, reasoning_summary="auto"):
22
+ """Helper to specifically extract text from Gemini 3 and ignore thought parts."""
23
+
24
+ profile['client'] = _prof.get_client(profile)
25
+ client = profile["client"]
26
+ model = profile["model"]
27
+ provider = profile["provider"].lower()
28
+
29
+ usage = {
30
+ "provider": provider,
31
+ "model": model,
32
+ "input_tokens": None,
33
+ "output_tokens": None,
34
+ "total_tokens": None,
35
+ }
36
+
37
+ def openai_sdk_response():
38
+ response = client.chat.completions.create(
39
+ model=model,
40
+ messages=[
41
+ {"role": "system", "content": sys_prompt},
42
+ {"role": "user", "content": user_prompt}
43
+ ],
44
+ temperature=0.1,
45
+ )
46
+
47
+ full_text = response.choices[0].message.content
48
+
49
+ # Simple heuristic to split thought and text if formatted as such
50
+ thought_match = re.search(r'THOUGHT:(.*)TEXT:', full_text, re.DOTALL)
51
+ if thought_match:
52
+ thought = thought_match.group(1).strip()
53
+ text = full_text[thought_match.end():].strip()
54
+ else:
55
+ thought = ""
56
+ text = full_text.strip()
57
+
58
+ usage["input_tokens"] = response.usage.prompt_tokens
59
+ usage["output_tokens"] = response.usage.completion_tokens
60
+ usage["total_tokens"] = response.usage.total_tokens
61
+
62
+ return thought, text, usage
63
+
64
+
65
+ if provider == "google":
66
+ response = client.models.generate_content(
67
+ model=model,
68
+ contents=f"{sys_prompt}\n\n{user_prompt}",
69
+ config=types.GenerateContentConfig(
70
+ temperature=0.1
71
+ )
72
+ )
73
+ # Gemini 3 returns multiple parts (thought + text).
74
+ # Extract ONLY the text parts to prevent 'thought_signature' warnings.
75
+ parts = response.candidates[0].content.parts
76
+
77
+ thought = "".join([part.thought for part in response.candidates[0].content.parts if hasattr(part, 'thought') and part.thought])
78
+
79
+ text = "".join([part.text for part in parts if part.text is not None])
80
+
81
+ um = response.usage_metadata
82
+ usage["input_tokens"] = um.prompt_token_count
83
+ usage["output_tokens"] = um.candidates_token_count + (um.thoughts_token_count if um.thoughts_token_count is not None else 0)
84
+ usage["total_tokens"] = um.total_token_count
85
+
86
+ return thought, text, usage
87
+
88
+
89
+ if provider == "openai":
90
+ if int(model.split("gpt-")[1][0])>=5:
91
+
92
+ def reasoning_and_verbosity():
93
+ reasoning_effort, verbosity = "medium", "medium"
94
+ if "mini" not in model:
95
+ if model == "gpt-5-nano":
96
+ reasoning_effort = "low"
97
+ else: reasoning_effort, verbosity = "high", "high"
98
+
99
+ return reasoning_effort, verbosity
100
+
101
+ reasoning, verbosity = reasoning_and_verbosity()
102
+ if reasoning in ["medium", "high"]:
103
+ if (max_output_tokens or 0) < 6000:
104
+ max_output_tokens = 6000
105
+
106
+
107
+ # Build request kwargs (NO reasoning.effort — gpt-5-mini rejects it)
108
+ req = dict(
109
+ model=model,
110
+ input=user_prompt,
111
+ instructions=sys_prompt,
112
+ max_output_tokens=max_output_tokens,
113
+ reasoning={
114
+ "effort": reasoning,
115
+ "summary": "auto"
116
+ },
117
+ text={"verbosity": verbosity},
118
+ )
119
+
120
+ # Ask for reasoning summary only if requested; some org/model combos may reject it,
121
+ # so retry without reasoning if that happens.
122
+ if reasoning_summary:
123
+ req["reasoning"] = {"summary": reasoning_summary}
124
+
125
+ try:
126
+ response = client.responses.create(**req)
127
+ except Exception as e:
128
+ msg = str(e)
129
+ # If summaries aren't supported in your setup, retry without reasoning altogether.
130
+ if ("reasoning.summary" in msg) or ("reasoning" in msg and "Unsupported parameter" in msg):
131
+ req.pop("reasoning", None)
132
+ response = client.responses.create(**req)
133
+ else:
134
+ raise
135
+
136
+ # --- Extract raw output text (don't .strip()) ---
137
+ raw_text = getattr(response, "output_text", None) or ""
138
+ if not raw_text:
139
+ # Fallback: walk the output items (covers SDK edge cases)
140
+ chunks = []
141
+ for item in (getattr(response, "output", None) or []):
142
+ if getattr(item, "type", None) == "message":
143
+ for c in (getattr(item, "content", None) or []):
144
+ if getattr(c, "type", None) in ("output_text", "text"):
145
+ chunks.append(getattr(c, "text", "") or "")
146
+ raw_text = "".join(chunks)
147
+
148
+ # --- Reasoning summary (if present) ---
149
+ reasoning = None
150
+ for item in (getattr(response, "output", None) or []):
151
+ if getattr(item, "type", None) == "reasoning":
152
+ summ = getattr(item, "summary", None) or []
153
+ if summ:
154
+ reasoning = getattr(summ[0], "text", None)
155
+ break
156
+
157
+ # --- Return raw code as-is; if model disobeys and wraps ```...```, unwrap once ---
158
+ code = raw_text
159
+ m = re.search(r"```(?:python)?\s*\n(.*?)```", raw_text, flags=re.DOTALL | re.IGNORECASE)
160
+ if m:
161
+ code = m.group(1) # keep inner code exactly (no strip)
162
+
163
+ # --- Usage ---
164
+ if getattr(response, "usage", None):
165
+ um = response.usage
166
+ usage["input_tokens"] = getattr(um, "input_tokens", None)
167
+ usage["output_tokens"] = getattr(um, "output_tokens", None)
168
+ usage["total_tokens"] = getattr(um, "total_tokens", None)
169
+
170
+ return reasoning, code, usage
171
+ else:
172
+ return openai_sdk_response()
173
+
174
+
175
+ if provider == "anthropic":
176
+ msg = client.messages.create(
177
+ model=model,
178
+ max_tokens=max_output_tokens,
179
+ system=sys_prompt, # top-level system param (Messages API)
180
+ messages=[{"role": "user", "content": user_prompt}],
181
+ temperature=0.1,
182
+ )
183
+
184
+ # Claude returns content blocks; join only text blocks.
185
+ # IMPORTANT: return raw code exactly as provided (no .strip()).
186
+ code_parts = []
187
+ for block in (getattr(msg, "content", None) or []):
188
+ btype = getattr(block, "type", None)
189
+ if btype == "text":
190
+ code_parts.append(getattr(block, "text", "") or "")
191
+ raw_code = "".join(code_parts)
192
+
193
+ # Usage
194
+ um = getattr(msg, "usage", None)
195
+ if um is not None:
196
+ usage["input_tokens"] = getattr(um, "input_tokens", None)
197
+ usage["output_tokens"] = getattr(um, "output_tokens", None)
198
+ if usage["input_tokens"] is not None and usage["output_tokens"] is not None:
199
+ usage["total_tokens"] = usage["input_tokens"] + usage["output_tokens"]
200
+
201
+ # No separate reasoning returned in this minimal mode
202
+ return None, raw_code, usage
203
+
204
+ return openai_sdk_response()
205
+
206
+
207
+ def operator_agent(self) -> str:
208
+ """The main entry point. Returns a pure string of runnable code."""
209
+ # 1. Initialize DataFrame
210
+ df = pd.read_csv(self.dataset_path)
211
+
212
+ # 2. Dynamic Context Generation
213
+ buffer = io.StringIO()
214
+ df.info(buf=buffer)
215
+ df_context = f"""
216
+ SCHEMA: {buffer.getvalue()}
217
+ NULLS: {df.isnull().sum().to_string()}
218
+ STATS: {df.describe(include='all').to_string()}"
219
+ SAMPLE: {df.head(3).to_string()}
220
+ """
221
+
222
+ # 2. Refiner Agent (Generates Task Specs + CoT)
223
+ ref_thought, ref_tasks, ref_usage = self.refiner_agent(df_context, self.user_query)
224
+ self.cot_history["Refiner"] = {"thought": ref_thought, "tasks": ref_tasks, "usage": ref_usage}
225
+
226
+ # 3. Coder Agent (Generates Runnable Code + CoT)
227
+ thought, text, usage = self.coder_agent(df_context, ref_tasks)
228
+ self.cot_history["Coder"] = {"thought": thought, "code": text, "usage": usage}
229
+ # Return structured result
230
+ return {
231
+ "specs_cot": ref_tasks,
232
+ "python_code": text,
233
+ "token_usage": self.cot_history,
234
+ }
235
+
236
+
237
+ def refiner_agent(self, context, query):
238
+ """Categorizes the ML job and selects the specific Viz Template."""
239
+
240
+ # refined_profile = _prof.get_profile("imagetexter") or _prof.get_profile("admin")
241
+ # refined_profile["client"] = _prof.get_client(refined_profile)
242
+
243
+ sys_prompt = f"""
244
+ You are the 'Refiner Agent'. Your role is to transform a natural language query into a strict ML technical specification.
245
+ """
246
+ user_prompt = f"""
247
+ DATASET CONTEXT:
248
+ {context}
249
+
250
+ USER QUERY:
251
+ {query}
252
+
253
+ STEP 1: Identify the ML Job Category from this master list:
254
+ - Supervised: Classification, Regression.
255
+ - Unsupervised: Clustering, Dimensionality Reduction, Anomaly Detection, Density Estimation.
256
+ - Hybrid/Other: RL, Recommendation, Ranking, Time Series Forecasting, Seq2Seq, Generative Modeling, Similarity Learning.
257
+ - Advanced: Meta-Learning, Causal Inference, Survival Analysis, Multi-Task Learning, Imitation Learning.
258
+
259
+ STEP 2: Map the logic.
260
+ - Identify X (features) and y (target).
261
+ - If the user asks for a derived column (e.g., 'If A > B then 1'), provide the specific NumPy/Pandas logic.
262
+
263
+ STEP 3: Select the Visualization Template. Provide the coder with the specific plotting code structure for that category:
264
+ - CLASSIFICATION: Confusion Matrix Heatmap, ROC Curve, Feature Importance Bar.
265
+ - REGRESSION: Pred vs. Actual Scatter, Residuals Plot, Error Distribution.
266
+ - CLUSTERING: PCA-based Cluster Scatter, Elbow/Silhouette Plot.
267
+ - TIME SERIES: Line plot (Train/Test/Forecast), Seasonal Decomposition.
268
+ - ANOMALY: Scatter with Outlier Highlighting.
269
+ - RECSYS/RANKING: Precision@K or Recall@K Curves.
270
+
271
+ STEP 4: Chain-of-Thought (CoT) for Coder:
272
+ 1. Mandatory Cleaning: Check for Nulls. Impute numbers with median, strings with mode.
273
+ 2. Encoding: Convert objects to categories/numbers.
274
+ 3. Scaling: Apply StandardScaler for distance-based tasks.
275
+ 4. Modeling: Select the best algorithm for the task (e.g., XGBoost, K-Means, ARIMA).
276
+ 5. Evaluation: Calculate appropriate metrics (F1, RMSE, R2, etc.).
277
+
278
+ Pre-empt loop-holds: If the query is vague, assume reasonable defaults based on the column types.
279
+ Return ONLY the refined technical specification.
280
+ """
281
+ refiner_profile = _prof.get_profile("classifier") or _prof.get_profile("chat") or _prof.get_profile("admin")
282
+ if not refiner_profile:
283
+ return "Error: Set an appropriate classifier profile"
284
+
285
+ thought, text, usage = self._generate_ml_response(refiner_profile, sys_prompt, user_prompt)
286
+ return thought, text, usage
287
+
288
+
289
+ def coder_agent(self, context, refined_tasks):
290
+ """Generates the code using the spec. Task-agnostic."""
291
+
292
+ system_prompt = f"""
293
+ You are the 'Coder Agent'. Generate a robust, production-ready Python script.
294
+ """
295
+ user_prompt = f"""
296
+ DATASET CONTEXT:
297
+ {context}
298
+
299
+ REFINED ML TASKS & SPECS:
300
+ {refined_tasks}
301
+
302
+ CONSTRAINTS:
303
+ - Assume 'df' is already loaded in the namespace. Do not use pd.read_csv.
304
+ - Include all up-to-dateimports (pandas, numpy, sklearn, matplotlib, seaborn, etc.).
305
+ - Avoid any deprecated mehods, imports, statements.
306
+ - FIRST: Implement the 'Cold Start' cleaning and preprocessing logic (Check for nulls and impute based on column types in context).
307
+ - SECOND: (Modeling) Implement the ML pipeline (Split, Scale, Train, Predict).
308
+ - THIRD: Implement the visualizations using the specific templates requested.
309
+ - Return ONLY the executable Python code. NO MARKDOWN (```), NO EXPLANATION.
310
+
311
+ PLOTS:
312
+ - Ensure that you display all the figures that you plotted nicely using plt.show.
313
+
314
+ TABLES:
315
+ - Use html tables for tabular data.
316
+ """
317
+ coder_profile = _prof.get_profile("coder")
318
+ if not coder_profile:
319
+ return "Error!: Set an appropriate coder profile"
320
+
321
+ thought, raw_code, usage = self._generate_ml_response(coder_profile ,system_prompt, user_prompt)
322
+
323
+ # Robustly strip any potential markdown formatting
324
+ code = re.sub(r'```python|```', '', raw_code).strip()
325
+ return thought, code, usage
326
+
@@ -1,37 +1,32 @@
1
- # syntaxmatrix/code_tools_registry.py
2
- from typing import Dict, Any
3
- from .agent_tools import CodeTool
4
- from ..utils import (
5
- strip_python_dotenv, fix_predict_calls_records_arg, fix_values_sum_numeric_only_bug,
6
- fix_fstring_backslash_paths, ensure_os_import, fix_numeric_sum, ensure_os_import,
7
- fix_numeric_sum, fix_concat_empty_list, fix_confusion_matrix_for_multilabel
8
- )
1
+ # # syntaxmatrix/code_tools_registry.py
2
+ # from typing import Dict, Any
3
+ # from .agent_tools import CodeTool
4
+ # # from ..utils import (
5
+ # # strip_python_dotenv, fix_predict_calls_records_arg, fix_values_sum_numeric_only_bug,
6
+ # # fix_fstring_backslash_paths, ensure_os_import, fix_numeric_sum, ensure_os_import,
7
+ # # fix_numeric_sum, fix_concat_empty_list, fix_confusion_matrix_for_multilabel
8
+ # # )
9
9
 
10
- def _wrap(fn):
11
- return lambda code, ctx: fn(code)
10
+ # def _wrap(fn):
11
+ # return lambda code, ctx: fn(code)
12
12
 
13
- # 1) early sanitiser
14
- EARLY_SANITIZERS = [
15
- CodeTool("strip_dotenv", "sanitize_early", _wrap(strip_python_dotenv), priority=0),
16
- ]
17
-
18
- # 2) Domain and Plotting patches
19
- DOMAIN_AND_PLOTTING_PATCHES = [
13
+ # # 2) Domain and Plotting patches
14
+ # DOMAIN_AND_PLOTTING_PATCHES = [
20
15
 
21
- ]
16
+ # ]
22
17
 
23
- # 3) syntax/data fixers
24
- SYNTAX_AND_REPAIR = [
25
- CodeTool("fix_predict_records", "syntax_fixes", _wrap(fix_predict_calls_records_arg), priority=10),
26
- CodeTool("fix_values_sum_bug", "syntax_fixes", _wrap(fix_values_sum_numeric_only_bug), priority=20),
27
- CodeTool("fix_fstring_paths", "syntax_fixes", _wrap(fix_fstring_backslash_paths), priority=30),
28
- CodeTool("ensure_os_import", "syntax_fixes", _wrap(ensure_os_import), priority=40),
29
- CodeTool("fix_numeric_sum", "syntax_fixes", _wrap(fix_numeric_sum), priority=50),
30
- CodeTool("fix_concat_empty_list", "syntax_fixes", _wrap(fix_concat_empty_list), priority=60),
31
- CodeTool("fix_cm_multilabel", "syntax_fixes", _wrap(fix_confusion_matrix_for_multilabel), priority=65),
32
- ]
18
+ # # 3) syntax/data fixers
19
+ # SYNTAX_AND_REPAIR = [
20
+ # CodeTool("fix_predict_records", "syntax_fixes", _wrap(fix_predict_calls_records_arg), priority=10),
21
+ # CodeTool("fix_values_sum_bug", "syntax_fixes", _wrap(fix_values_sum_numeric_only_bug), priority=20),
22
+ # CodeTool("fix_fstring_paths", "syntax_fixes", _wrap(fix_fstring_backslash_paths), priority=30),
23
+ # CodeTool("ensure_os_import", "syntax_fixes", _wrap(ensure_os_import), priority=40),
24
+ # CodeTool("fix_numeric_sum", "syntax_fixes", _wrap(fix_numeric_sum), priority=50),
25
+ # CodeTool("fix_concat_empty_list", "syntax_fixes", _wrap(fix_concat_empty_list), priority=60),
26
+ # CodeTool("fix_cm_multilabel", "syntax_fixes", _wrap(fix_confusion_matrix_for_multilabel), priority=65),
27
+ # ]
33
28
 
34
- # 4) final SANITIZERS catch-all
35
- FINAL_SANITIZERS = [
36
- # CodeTool("repair_cell", "final_repair", _wrap(_smx_repair_python_cell), priority=999),
37
- ]
29
+ # # 4) final SANITIZERS catch-all
30
+ # FINAL_SANITIZERS = [
31
+ # # CodeTool("repair_cell", "final_repair", _wrap(_smx_repair_python_cell), priority=999),
32
+ # ]
syntaxmatrix/auth.py CHANGED
@@ -41,6 +41,19 @@ def init_auth_db():
41
41
  );
42
42
  """)
43
43
 
44
+ # Ensure new must_reset_password flag exists for mandatory first-login reset
45
+ try:
46
+ cur = conn.execute("PRAGMA table_info(users)")
47
+ cols = [row[1] for row in cur.fetchall()]
48
+ if "must_reset_password" not in cols:
49
+ conn.execute(
50
+ "ALTER TABLE users "
51
+ "ADD COLUMN must_reset_password INTEGER NOT NULL DEFAULT 0"
52
+ )
53
+ except Exception:
54
+ # Best-effort migration; if this fails we still let the app start
55
+ pass
56
+
44
57
  # --- Roles table ---
45
58
  conn.execute("""
46
59
  CREATE TABLE IF NOT EXISTS roles (
@@ -302,6 +315,60 @@ def register_user(email:str, username:str, password:str, role:str = "user") -> b
302
315
  finally:
303
316
  conn.close()
304
317
 
318
+ def set_must_reset_by_email(email: str, must_reset: bool = True) -> None:
319
+ """
320
+ Mark a user account as requiring a password reset (or clear the flag) by email.
321
+ Used when an admin creates a user with a temporary password.
322
+ """
323
+ if not email:
324
+ return
325
+ conn = _get_conn()
326
+ try:
327
+ conn.execute(
328
+ "UPDATE users SET must_reset_password = ? WHERE email = ?",
329
+ (1 if must_reset else 0, email),
330
+ )
331
+ conn.commit()
332
+ finally:
333
+ conn.close()
334
+
335
+
336
+ def user_must_reset_password(user_id: int) -> bool:
337
+ """
338
+ Check whether this user is currently forced to change their password.
339
+ """
340
+ if not user_id:
341
+ return False
342
+ conn = _get_conn()
343
+ try:
344
+ cur = conn.execute(
345
+ "SELECT must_reset_password FROM users WHERE id = ?",
346
+ (user_id,),
347
+ )
348
+ row = cur.fetchone()
349
+ finally:
350
+ conn.close()
351
+ if not row:
352
+ return False
353
+ return bool(row[0])
354
+
355
+
356
+ def clear_must_reset(user_id: int) -> None:
357
+ """
358
+ Clear the mandatory-reset flag (called after the user has changed their password).
359
+ """
360
+ if not user_id:
361
+ return
362
+ conn = _get_conn()
363
+ try:
364
+ conn.execute(
365
+ "UPDATE users SET must_reset_password = 0 WHERE id = ?",
366
+ (user_id,),
367
+ )
368
+ conn.commit()
369
+ finally:
370
+ conn.close()
371
+
305
372
  def authenticate(email:str, password:str) -> Optional[Dict]:
306
373
  """Return user dict if creds match, else None."""
307
374
  conn = _get_conn()
@@ -315,15 +382,85 @@ def authenticate(email:str, password:str) -> Optional[Dict]:
315
382
  return {"id": row[0], "email":row[1], "username": row[2], "role": row[4]}
316
383
  return None
317
384
 
385
+ def verify_password(user_id: int, candidate: str) -> bool:
386
+ """
387
+ Check whether `candidate` matches the current password of the user.
388
+ Used by the change-password flow.
389
+ """
390
+ if not user_id or not candidate:
391
+ return False
392
+
393
+ conn = _get_conn()
394
+ try:
395
+ cur = conn.execute(
396
+ "SELECT password FROM users WHERE id = ?",
397
+ (user_id,),
398
+ )
399
+ row = cur.fetchone()
400
+ finally:
401
+ conn.close()
402
+
403
+ if not row:
404
+ return False
405
+ return check_password_hash(row[0], candidate)
406
+
407
+
408
+ def update_password(user_id: int, new_password: str) -> None:
409
+ """
410
+ Overwrite the user's password with a new hash.
411
+ """
412
+ if not user_id or not new_password:
413
+ return
414
+
415
+ hashed = generate_password_hash(new_password)
416
+ conn = _get_conn()
417
+ try:
418
+ conn.execute(
419
+ "UPDATE users SET password = ? WHERE id = ?",
420
+ (hashed, user_id),
421
+ )
422
+ conn.commit()
423
+ finally:
424
+ conn.close()
425
+
426
+ def update_password(user_id: int, new_password: str) -> bool:
427
+ """
428
+ Update the stored password hash for a given user id.
429
+ Returns True on success, False if something goes wrong.
430
+ """
431
+ hashed = generate_password_hash(new_password)
432
+ conn = _get_conn()
433
+ try:
434
+ conn.execute(
435
+ "UPDATE users SET password = ? WHERE id = ?",
436
+ (hashed, user_id),
437
+ )
438
+ conn.commit()
439
+ return True
440
+ except Exception:
441
+ # We do not raise inside auth; caller shows a friendly message instead.
442
+ return False
443
+ finally:
444
+ conn.close()
445
+
318
446
  def login_required(f):
319
447
  @wraps(f)
320
- def decorated(*args, **kwargs):
321
- if not session.get("user_id"):
322
- flash("Please log in to access this page.")
448
+ def wrapper(*args, **kwargs):
449
+ if "user_id" not in session:
450
+ flash("Please log in.")
323
451
  return redirect(url_for("login", next=request.path))
324
- return f(*args, **kwargs)
325
- return decorated
326
452
 
453
+ # If the account is flagged for a mandatory reset, force the user
454
+ # to the change-password screen before allowing anything else.
455
+ if session.get("must_reset_password") and request.endpoint not in (
456
+ "change_password",
457
+ "logout",
458
+ ):
459
+ flash("Please set a new password before continuing.")
460
+ return redirect(url_for("change_password"))
461
+
462
+ return f(*args, **kwargs)
463
+ return wrapper
327
464
 
328
465
  def admin_required(view):
329
466
  @wraps(view)
@@ -159,18 +159,17 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
159
159
  send figures + text; otherwise fall back to a text-only prompt grounded by labels.
160
160
  """
161
161
 
162
- _SYSTEM_VISION = (
163
- "You are a data analyst. Write a detailed analysis that explains what the "
164
- "already-rendered visuals mean for the user's question. "
165
- "You use information visible in the attached figures and the provided context strings (texs, field names, labels). "
166
- "You interprete the output without preamble."
167
- )
162
+ _SYSTEM_VISION = ("""
163
+ You are a plots, graphs, and tables data analyst. You analyse and interprete in details and give your responses in plain english what the already-rendered plots and visuals mean as a response to the question. If the relevant information is made available, then, you must first answer the question explicitly and then proceed to explain the plots and tables.
164
+ Use the information visible in the attached figures and the provided context strings (texts, tables, plot field names, labels).
165
+ You should provide interpretations without prelude or preamble.
166
+ """)
168
167
 
169
168
  _USER_TMPL_VISION = """
170
169
  question:
171
170
  {q}
172
171
 
173
- Visible context strings (titles, axes, legends, headers):
172
+ Visible context strings (tables, plots: titles, axes, legends, headers):
174
173
  {ctx}
175
174
 
176
175
  Write a comprehensive conclusion (~250-350 words) as follows:
@@ -181,7 +180,7 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
181
180
  As you reference the visuals, you should interprete them in a way to show how they answer the question.
182
181
  - <b>Limitations</b>
183
182
  1 bullet; avoid quoting numbers unless present in context.
184
- - <b>Next step</b>
183
+ - <b>Recommendations</b>
185
184
  1 bullet.
186
185
  """
187
186
 
@@ -191,18 +190,19 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
191
190
  ctx=json.dumps(visible, ensure_ascii=False, indent=2)
192
191
  )
193
192
 
194
- prof = _prof.get_profile("vision2text") or _prof.get_profile("admin")
195
- if not prof:
193
+ commentary_profile = _prof.get_profile("imagetexter") or _prof.get_profile("admin")
194
+ if not commentary_profile:
196
195
  return (
197
196
  "<div class='smx-alert smx-alert-warn'>"
198
- "No LLM profile is configured for Image2Text. Please, do that in the Admin panel or contact your Administrator."
197
+ "Error! Set an appropriate ImageTexter profile inside your Admin Panel.\n "
198
+ "If that persists, contact your Administrator."
199
199
  "</div>"
200
200
  )
201
201
 
202
- prof['client'] = _prof.get_client(prof)
203
- _client = prof["client"]
204
- _provider = prof["provider"].lower()
205
- _model = prof["model"]
202
+ commentary_profile['client'] = _prof.get_client(commentary_profile)
203
+ _client = commentary_profile["client"]
204
+ _provider = commentary_profile["provider"].lower()
205
+ _model = commentary_profile["model"]
206
206
 
207
207
  try:
208
208
  #1 Google
@@ -322,7 +322,7 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
322
322
  def wrap_html(card_text: str) -> str:
323
323
  return f"""
324
324
  <div class="smx-commentary-card" style="margin-top:1rem;padding:1rem;border:1px solid #e5e7eb;border-radius:0.75rem;background:#fafafa">
325
- <div style="font-weight:600;margin-bottom:0.5rem;">smxAI Feedback</div>
325
+ <div style="font-weight:600;margin-bottom:0.5rem;">smx-Orion Feedback</div>
326
326
  <div class="prose" style="white-space:pre-wrap;line-height:1.45">{card_text}</div>
327
327
  </div>
328
328
  """.strip()