syntaxmatrix 2.5.6__py3-none-any.whl → 2.5.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- syntaxmatrix/agentic/agents.py +80 -116
- syntaxmatrix/core.py +28 -13
- syntaxmatrix/preface.py +14 -13
- syntaxmatrix/routes.py +3 -4
- syntaxmatrix/settings/model_map.py +4 -2
- syntaxmatrix/templates/dashboard.html +63 -31
- syntaxmatrix/utils.py +62 -25
- {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.5.7.dist-info}/METADATA +1 -1
- {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.5.7.dist-info}/RECORD +12 -12
- {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.5.7.dist-info}/WHEEL +0 -0
- {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.5.7.dist-info}/licenses/LICENSE.txt +0 -0
- {syntaxmatrix-2.5.6.dist-info → syntaxmatrix-2.5.7.dist-info}/top_level.txt +0 -0
syntaxmatrix/agentic/agents.py
CHANGED
|
@@ -42,7 +42,7 @@ def token_calculator(total_input_content, llm_profile):
|
|
|
42
42
|
input_prompt_tokens = len(enc.encode(total_input_content))
|
|
43
43
|
return input_prompt_tokens
|
|
44
44
|
|
|
45
|
-
def mlearning_agent(user_prompt, system_prompt, coding_profile
|
|
45
|
+
def mlearning_agent(user_prompt, system_prompt, coding_profile):
|
|
46
46
|
"""
|
|
47
47
|
Returns:
|
|
48
48
|
(text, usage_dict)
|
|
@@ -95,72 +95,41 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
95
95
|
# Google
|
|
96
96
|
def google_generate_code():
|
|
97
97
|
nonlocal usage
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
)
|
|
98
|
+
config = types.GenerateContentConfig(
|
|
99
|
+
system_instruction=system_prompt,
|
|
100
|
+
# Optional: Force the model to generate a Python code block as JSON
|
|
101
|
+
response_mime_type="application/json",
|
|
102
|
+
response_schema=types.Schema(
|
|
103
|
+
type=types.Type.OBJECT,
|
|
104
|
+
properties={
|
|
105
|
+
"code": types.Schema(type=types.Type.STRING, description="The runnable Python code."),
|
|
106
|
+
"explanation": types.Schema(type=types.Type.STRING, description="A brief explanation of the code."),
|
|
107
|
+
},
|
|
108
|
+
required=["code"]
|
|
109
|
+
),
|
|
110
|
+
)
|
|
110
111
|
|
|
111
|
-
|
|
112
|
-
|
|
112
|
+
try:
|
|
113
|
+
response = _client.models.generate_content(
|
|
113
114
|
model=_model,
|
|
114
|
-
contents=
|
|
115
|
+
contents=user_prompt,
|
|
115
116
|
config=config,
|
|
116
117
|
)
|
|
118
|
+
except Exception as e:
|
|
119
|
+
return f"An error occurred during API call: {e}"
|
|
117
120
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
# 4. Response Extraction (same robust logic as before)
|
|
125
|
-
text = getattr(resp, "text", None)
|
|
126
|
-
if isinstance(text, str) and text.strip():
|
|
127
|
-
return text.strip()
|
|
128
|
-
|
|
129
|
-
chunks = []
|
|
130
|
-
candidates = getattr(resp, "candidates", None) or []
|
|
131
|
-
for cand in candidates:
|
|
132
|
-
content = getattr(cand, "content", None)
|
|
133
|
-
if content:
|
|
134
|
-
parts = getattr(content, "parts", None) or []
|
|
135
|
-
for part in parts:
|
|
136
|
-
t = getattr(part, "text", None)
|
|
137
|
-
if t:
|
|
138
|
-
chunks.append(str(t))
|
|
139
|
-
|
|
140
|
-
text = "\n".join(chunks).strip()
|
|
141
|
-
if text:
|
|
142
|
-
return text
|
|
143
|
-
|
|
144
|
-
# 5. Handle blocked response
|
|
145
|
-
fb = getattr(resp, "prompt_feedback", None)
|
|
146
|
-
block_reason = getattr(fb, "block_reason", None) if fb else None
|
|
147
|
-
if block_reason and block_reason != types.BlockedReason.REASON_UNSPECIFIED:
|
|
148
|
-
raise RuntimeError(f"{_model} blocked the response. Reason: {block_reason.name}")
|
|
149
|
-
raise RuntimeError(f"{_model} failed to return content due to insufficient data.")
|
|
121
|
+
# 3. Token Usage Capture and Context Overhead Calculation
|
|
122
|
+
um = response.usage_metadata
|
|
123
|
+
usage["input_tokens"] = um.prompt_token_count
|
|
124
|
+
usage["output_tokens"] = um.candidates_token_count + um.thoughts_token_count
|
|
125
|
+
usage["total_tokens"] = um.total_token_count
|
|
150
126
|
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
127
|
+
try:
|
|
128
|
+
# The response text will be a JSON string due to the config.
|
|
129
|
+
response_json = json.loads(response.text)
|
|
130
|
+
return response_json.get("code", "Error: Code field not found in response.")
|
|
154
131
|
except Exception as e:
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
# --- Return the error message wrapped in the required output code structure ---
|
|
158
|
-
msg = f"I smxAI have instructed {error_msg}\n"
|
|
159
|
-
return (
|
|
160
|
-
f"# {msg}\n"
|
|
161
|
-
"from syntaxmatrix.display import show\n"
|
|
162
|
-
f"show({msg!r})\n"
|
|
163
|
-
)
|
|
132
|
+
return f"Error parsing response as JSON: {e}\nRaw Response: {response.text}"
|
|
164
133
|
|
|
165
134
|
# OpenAI Responses API
|
|
166
135
|
def gpt_models_latest_generate_code():
|
|
@@ -225,15 +194,14 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
225
194
|
"from syntaxmatrix.display import show\n"
|
|
226
195
|
f"show({msg!r})\n"
|
|
227
196
|
)
|
|
228
|
-
|
|
197
|
+
|
|
229
198
|
# Anthropic
|
|
230
199
|
def anthropic_generate_code():
|
|
231
200
|
nonlocal usage
|
|
232
201
|
try:
|
|
233
202
|
resp = _client.messages.create(
|
|
234
203
|
model=_model,
|
|
235
|
-
|
|
236
|
-
temperature=temperature,
|
|
204
|
+
temperature=0,
|
|
237
205
|
system=system_prompt,
|
|
238
206
|
messages=[
|
|
239
207
|
{"role": "user", "content": user_prompt}
|
|
@@ -276,40 +244,44 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
276
244
|
def openai_sdk_generate_code():
|
|
277
245
|
nonlocal usage
|
|
278
246
|
try:
|
|
279
|
-
|
|
247
|
+
response = None
|
|
248
|
+
if _model == "deepseek-reasoner":
|
|
249
|
+
response = _client.chat.completions.create(
|
|
250
|
+
model=_model,
|
|
251
|
+
messages=[
|
|
252
|
+
{"role": "system", "content": system_prompt},
|
|
253
|
+
{"role": "user", "content": user_prompt},
|
|
254
|
+
],
|
|
255
|
+
extra_body={"thinking": {"type": "enabled"}},
|
|
256
|
+
temperature=0,
|
|
257
|
+
stream=False
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
response = _client.chat.completions.create(
|
|
280
261
|
model=_model,
|
|
281
262
|
messages=[
|
|
282
263
|
{"role": "system", "content": system_prompt},
|
|
283
264
|
{"role": "user", "content": user_prompt},
|
|
284
265
|
],
|
|
285
|
-
|
|
286
|
-
|
|
266
|
+
extra_body={"thinking": {"type": "enabled"}},
|
|
267
|
+
temperature=0,
|
|
268
|
+
stream=False
|
|
287
269
|
)
|
|
270
|
+
content = response.choices[0].message.content
|
|
288
271
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
um = resp.usage
|
|
272
|
+
um = response.usage
|
|
292
273
|
usage["input_tokens"] = um.prompt_tokens
|
|
293
274
|
usage["output_tokens"] = um.completion_tokens
|
|
294
275
|
usage["total_tokens"] = um.total_tokens
|
|
295
276
|
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
first = choices[0]
|
|
305
|
-
fr = getattr(first, "finish_reason", None)
|
|
306
|
-
if fr and fr != "stop":
|
|
307
|
-
block_reason = fr
|
|
308
|
-
|
|
309
|
-
if block_reason:
|
|
310
|
-
raise RuntimeError(f"{_model} stopped with reason: {block_reason}")
|
|
311
|
-
# Fallback: nothing useful came back
|
|
312
|
-
raise RuntimeError(f"{_model} returned nothing in this section due to insufficient data.")
|
|
277
|
+
code_match = re.search(r"```(?:python)?\n(.*?)```", content, re.DOTALL)
|
|
278
|
+
|
|
279
|
+
if code_match:
|
|
280
|
+
return code_match.group(1).strip()
|
|
281
|
+
else:
|
|
282
|
+
# If no markdown blocks are found, return the raw content
|
|
283
|
+
# (assuming the model obeyed instructions to output only code)
|
|
284
|
+
return content.strip()
|
|
313
285
|
|
|
314
286
|
except Exception as e:
|
|
315
287
|
# IMPORTANT: return VALID PYTHON so the dashboard can show the error
|
|
@@ -318,9 +290,7 @@ def mlearning_agent(user_prompt, system_prompt, coding_profile, temperature=0.1,
|
|
|
318
290
|
f"# {msg}\n"
|
|
319
291
|
"from syntaxmatrix.display import show\n"
|
|
320
292
|
f"show({msg!r})\n"
|
|
321
|
-
)
|
|
322
|
-
|
|
323
|
-
# print("TTOOKKEENN: ", token_calculator(system_prompt + user_prompt, coding_profile))
|
|
293
|
+
)
|
|
324
294
|
|
|
325
295
|
if _provider == "google":
|
|
326
296
|
code = google_generate_code()
|
|
@@ -425,11 +395,12 @@ def refine_question_agent(raw_question: str, dataset_context: str | None = None)
|
|
|
425
395
|
|
|
426
396
|
return "Configure LLM Profiles or contact your administrator."
|
|
427
397
|
|
|
428
|
-
system_prompt = (
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
398
|
+
system_prompt = ("""
|
|
399
|
+
- You are a Machine Learning (ML) and Data Science (DS) expert.
|
|
400
|
+
- You rewrite user questions into clear ML job specifications to help AI assistant generate Python code that provides solution to the user question when it is run. Most user questions are vague. So, your goal is to ensure that your output guards the assistant agains making potential errors that you anticipated could arise due to the nature of the question.
|
|
401
|
+
- If a dataset summary is provided, use it to respect column and help you rewrite the question properly.
|
|
402
|
+
- DO NOT write andy prelude or preamble"
|
|
403
|
+
""")
|
|
433
404
|
|
|
434
405
|
user_prompt = f"User question:\n{raw_question}\n\n"
|
|
435
406
|
if dataset_context:
|
|
@@ -446,20 +417,7 @@ def refine_question_agent(raw_question: str, dataset_context: str | None = None)
|
|
|
446
417
|
|
|
447
418
|
|
|
448
419
|
def classify_ml_job_agent(refined_question, dataset_profile):
|
|
449
|
-
|
|
450
|
-
Instructs an LLM (gemini-2.5-flash) to analyze a task description
|
|
451
|
-
and return a list of associated machine learning job/task types.
|
|
452
|
-
This version uses a highly extensive, generalized list of ML jobs
|
|
453
|
-
to ensure robustness across all domains (NLP, CV, RL, etc.).
|
|
454
|
-
|
|
455
|
-
Args:
|
|
456
|
-
task_description: The detailed description of the statistical/ML task.
|
|
457
|
-
|
|
458
|
-
Returns:
|
|
459
|
-
A list of strings identifying the relevant ML jobs. Returns an empty
|
|
460
|
-
list if the API call fails or the output cannot be parsed.
|
|
461
|
-
"""
|
|
462
|
-
|
|
420
|
+
import ast
|
|
463
421
|
def ml_response(user_prompt, system_prompt, profile):
|
|
464
422
|
_profile = profile # _prof.get_profile["admin"]
|
|
465
423
|
|
|
@@ -571,10 +529,14 @@ def classify_ml_job_agent(refined_question, dataset_profile):
|
|
|
571
529
|
|
|
572
530
|
return "Configure LLM Profiles or contact your administrator."
|
|
573
531
|
|
|
574
|
-
system_prompt = (
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
532
|
+
system_prompt = ("""
|
|
533
|
+
You are a strict machine learning task classifier for an ML workbench.
|
|
534
|
+
Your goal is to correctly label the user's task specifications with the most relevant tags from a fixed list.
|
|
535
|
+
You Must always have 'data_preprocessing' as the 1st tag. Then add up to 4 more, as needed, to make 5 max.
|
|
536
|
+
Your list should be 2-5 tags long. If no relevant tag, default to ["data_preprocessing"]
|
|
537
|
+
If tasks specs and `df` don't match (of different industries, return ['context mismatch']
|
|
538
|
+
You should return only your list of tags, no prelude or preamble.
|
|
539
|
+
""")
|
|
578
540
|
|
|
579
541
|
# --- 1. Define the Master List of ML Tasks (Generalized) ---
|
|
580
542
|
ml_task_list = [
|
|
@@ -592,7 +554,7 @@ def classify_ml_job_agent(refined_question, dataset_profile):
|
|
|
592
554
|
"generative_modeling", "causal_inference", "risk_modeling", "graph_analysis",
|
|
593
555
|
|
|
594
556
|
# Foundational/Pipeline Steps
|
|
595
|
-
"
|
|
557
|
+
"data_preprocessing", "feature_engineering", "statistical_inference",
|
|
596
558
|
"model_validation", "hyperparameter_tuning"
|
|
597
559
|
]
|
|
598
560
|
|
|
@@ -600,7 +562,7 @@ def classify_ml_job_agent(refined_question, dataset_profile):
|
|
|
600
562
|
task_description = refined_question
|
|
601
563
|
|
|
602
564
|
user_prompt = f"""
|
|
603
|
-
Analyze the following task description:
|
|
565
|
+
Analyze and classify the following task description:
|
|
604
566
|
---
|
|
605
567
|
{task_description}
|
|
606
568
|
---
|
|
@@ -625,9 +587,11 @@ def classify_ml_job_agent(refined_question, dataset_profile):
|
|
|
625
587
|
|
|
626
588
|
llm_profile['client'] = _prof.get_client(llm_profile)
|
|
627
589
|
|
|
628
|
-
# Extract raw content
|
|
629
590
|
tasks = ml_response(user_prompt, system_prompt, llm_profile)
|
|
630
|
-
|
|
591
|
+
try:
|
|
592
|
+
return ast.literal_eval(tasks)
|
|
593
|
+
except Exception:
|
|
594
|
+
return tasks
|
|
631
595
|
|
|
632
596
|
|
|
633
597
|
def text_formatter_agent(text):
|
syntaxmatrix/core.py
CHANGED
|
@@ -599,7 +599,7 @@ class SyntaxMUI:
|
|
|
599
599
|
from syntaxmatrix.gpt_models_latest import extract_output_text as _out, set_args
|
|
600
600
|
|
|
601
601
|
if not self._classification_profile:
|
|
602
|
-
classification_profile = _prof.get_profile('classification') or _prof.get_profile('
|
|
602
|
+
classification_profile = _prof.get_profile('classification') or _prof.get_profile('admin')
|
|
603
603
|
if not classification_profile:
|
|
604
604
|
return {"Error": "Set a profile for Classification"}
|
|
605
605
|
self._classification_profile = classification_profile
|
|
@@ -1141,21 +1141,23 @@ class SyntaxMUI:
|
|
|
1141
1141
|
tasks = [str(t).strip().lower() for t in tasks if str(t).strip()]
|
|
1142
1142
|
|
|
1143
1143
|
ai_profile = """
|
|
1144
|
-
- You are a Python expert specializing in
|
|
1144
|
+
- You are a Python expert specializing in Data Science (DS) and Machine Learning (ML).
|
|
1145
1145
|
- Your task is to generate a single, complete, production-quality, executable Python script for a Jupyter-like Python kernel, based on the given instructions.
|
|
1146
1146
|
- The dataset is already loaded as a pandas DataFrame named `df` (no file I/O or file uploads).
|
|
1147
|
-
- Make a copy of `df` and name it `df_copy`.
|
|
1148
|
-
-
|
|
1147
|
+
- Make a copy of `df` and name it `df_copy`.
|
|
1148
|
+
- Make sure `df_copy` is preprocessed and cleaned, and name it `df_cleaned`, if not already done so.
|
|
1149
|
+
- Work only with `df_cleaned` to perform the ML tasks described in the given context.
|
|
1150
|
+
- Select your features and targets, from `df_cleaned`, with care and name it `required_cols`
|
|
1149
1151
|
- Create your 'df_filtered by doing: df_filtered = df_cleaned[required_cols].
|
|
1150
|
-
- Use the {TEMPLATE_CATALOGUE} below to educate yourself on which visualizations you will implement in the code.
|
|
1151
|
-
- The final output MUST
|
|
1152
|
+
- Use the {TEMPLATE_CATALOGUE} below to educate yourself on which visualizations you will implement in the code, and ensure the implementations are in the code you generate.
|
|
1153
|
+
- The final output MUST BE the complete, executable Python code only, enclosed in a single markdown code block (```python ... ```), and MUST BE able to fulfill the user's request: {tasks}.
|
|
1152
1154
|
- Do not include any explanatory text or markdown outside the code block.
|
|
1153
1155
|
"""
|
|
1154
1156
|
|
|
1155
1157
|
TEMPLATE_CATALOGUE = """
|
|
1156
1158
|
### Available SyntaxMatrix templates (use these instead of inventing new helpers)
|
|
1157
1159
|
|
|
1158
|
-
Visualisation templates
|
|
1160
|
+
Visualisation templates:
|
|
1159
1161
|
- viz_pie(df, category_col=None, top_k=8): pie/donut shares within a category.
|
|
1160
1162
|
- viz_stacked_bar(df, x=None, hue=None, normalise=True): composition across groups.
|
|
1161
1163
|
- viz_count_bar(df, category_col=None, top_k=12): counts/denominators by category.
|
|
@@ -1195,9 +1197,9 @@ class SyntaxMUI:
|
|
|
1195
1197
|
|
|
1196
1198
|
"""
|
|
1197
1199
|
### Template rules
|
|
1198
|
-
- You MAY call
|
|
1200
|
+
- You MAY call 1 or more templates if they matche the task.
|
|
1199
1201
|
- Do NOT invent template names.
|
|
1200
|
-
- If no template fits, write minimal direct pandas/sklearn/seaborn code instead.
|
|
1202
|
+
- If no template fits, write minimal direct pandas/sklearn/seaborn code instead, for visualization.
|
|
1201
1203
|
- Keep the solution short: avoid writing wrappers/utilities already handled by SyntaxMatrix hardener.
|
|
1202
1204
|
|
|
1203
1205
|
#### Template selection hint examples:
|
|
@@ -1220,8 +1222,7 @@ class SyntaxMUI:
|
|
|
1220
1222
|
set `random_state=42` where relevant.
|
|
1221
1223
|
4) Be defensive, but avoid hard-failing on optional fields:
|
|
1222
1224
|
- If the primary column, needed to answer the question, is missing, review your copy of the `df` again.
|
|
1223
|
-
Make sure that you selected the proper column.
|
|
1224
|
-
Never use a column/variable which isn't available or defined.
|
|
1225
|
+
- Make sure that you selected the proper column. Never use a column/variable which isn't available or defined.
|
|
1225
1226
|
- If a secondary/extra column is missing, show a warning with `show(...)` and continue using available fields.
|
|
1226
1227
|
- Handle missing values sensibly (drop rows for simple EDA; use `ColumnTransformer` + `SimpleImputer` for modelling).
|
|
1227
1228
|
- For categorical features in ML, use `OneHotEncoder(handle_unknown="ignore")`
|
|
@@ -1253,6 +1254,20 @@ class SyntaxMUI:
|
|
|
1253
1254
|
11) You MUST NOT reference any column outside Available columns: {AVAILABLE_COLUMNS}.
|
|
1254
1255
|
12) If asked to predict/classify, choose the target by matching the task text to Allowed columns
|
|
1255
1256
|
and never invent a new name.
|
|
1257
|
+
13) Treat df as the primary dataset you must work with.
|
|
1258
|
+
14) The dataset is already loaded as df (no file I/O or file uploads).
|
|
1259
|
+
15) All outputs must be visible to the user via the provided show(...) helper.
|
|
1260
|
+
16) Never use print(...); use show(...) instead.
|
|
1261
|
+
17) You MUST NOT read from or write to local files, folders, or external storage.
|
|
1262
|
+
- Do not call open(...), Path(...).write_text/write_bytes, or similar file APIs.
|
|
1263
|
+
- Do not use df.to_csv(...), df.to_excel(...), df.to_parquet(...),
|
|
1264
|
+
df.to_pickle(...), df.to_json(...), df.to_hdf(...), or any other
|
|
1265
|
+
method that writes to disk.
|
|
1266
|
+
- Do not call joblib.dump(...), pickle.dump(...), torch.save(...),
|
|
1267
|
+
numpy.save(...), numpy.savetxt(...), or similar saving functions.
|
|
1268
|
+
- Do not call plt.savefig(..., 'somefile.png') or any variant that
|
|
1269
|
+
writes an image to a filename. Plots must be rendered in-memory only.
|
|
1270
|
+
18) Keep everything in memory and surface results via show(...) or plots.
|
|
1256
1271
|
|
|
1257
1272
|
#### Cohort rules
|
|
1258
1273
|
When you generate plots for cohorts or categories, you MUST obey these rules:
|
|
@@ -1302,11 +1317,11 @@ class SyntaxMUI:
|
|
|
1302
1317
|
""")
|
|
1303
1318
|
|
|
1304
1319
|
if not self._coding_profile:
|
|
1305
|
-
coding_profile = _prof.get_profile("coding") or _prof.get_profile("admin")
|
|
1320
|
+
coding_profile = _prof.get_profile("coding") # or _prof.get_profile("admin")
|
|
1306
1321
|
if not coding_profile:
|
|
1307
1322
|
return (
|
|
1308
1323
|
'<div class="smx-alert smx-alert-warn">'
|
|
1309
|
-
'No LLM profile configured for <code>coding</code>
|
|
1324
|
+
'No LLM profile configured for <code>coding</code> <br>'
|
|
1310
1325
|
'Please, add the LLM profile inside the admin panel or contact your Administrator.'
|
|
1311
1326
|
'</div>'
|
|
1312
1327
|
)
|
syntaxmatrix/preface.py
CHANGED
|
@@ -453,23 +453,24 @@ def _safe_concat(objs, **kwargs):
|
|
|
453
453
|
except Exception as e:
|
|
454
454
|
smx_show(f'⚠ concat skipped: {e}')
|
|
455
455
|
return _pd.DataFrame()
|
|
456
|
-
|
|
456
|
+
|
|
457
457
|
|
|
458
458
|
def _SMX_OHE(**k):
|
|
459
459
|
# normalise arg name across sklearn versions
|
|
460
|
-
if
|
|
461
|
-
k[
|
|
462
|
-
k.setdefault(
|
|
463
|
-
k.setdefault(
|
|
460
|
+
if "sparse" in k and "sparse_output" not in k:
|
|
461
|
+
k["sparse_output"] = k.pop("sparse")
|
|
462
|
+
k.setdefault("handle_unknown", "ignore")
|
|
463
|
+
k.setdefault("sparse_output", False)
|
|
464
464
|
try:
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
465
|
+
if "sparse_output" not in inspect.signature(OneHotEncoder).parameters:
|
|
466
|
+
if "sparse_output" in k:
|
|
467
|
+
k["sparse"] = k.pop("sparse_output")
|
|
468
|
+
return OneHotEncoder(**k)
|
|
469
|
+
except TypeError:
|
|
470
|
+
if "sparse_output" in k:
|
|
471
|
+
k["sparse"] = k.pop("sparse_output")
|
|
472
|
+
return OneHotEncoder(**k)
|
|
473
|
+
|
|
473
474
|
|
|
474
475
|
def _SMX_mm(a, b):
|
|
475
476
|
try:
|
syntaxmatrix/routes.py
CHANGED
|
@@ -3047,7 +3047,7 @@ def setup_routes(smx):
|
|
|
3047
3047
|
}) + "\n\n"
|
|
3048
3048
|
|
|
3049
3049
|
except GeneratorExit:
|
|
3050
|
-
|
|
3050
|
+
return "Client aborted the stream."
|
|
3051
3051
|
except Exception as e:
|
|
3052
3052
|
smx.error(f"Stream error: {e}")
|
|
3053
3053
|
yield "data: " + json.dumps({"event": "error", "error": str(e)}) + "\n\n"
|
|
@@ -6513,8 +6513,7 @@ def setup_routes(smx):
|
|
|
6513
6513
|
cell["highlighted_code"] = Markup(_pygmentize(cell["code"]))
|
|
6514
6514
|
|
|
6515
6515
|
highlighted_ai_code = _pygmentize(ai_code)
|
|
6516
|
-
|
|
6517
|
-
|
|
6516
|
+
|
|
6518
6517
|
return render_template(
|
|
6519
6518
|
"dashboard.html",
|
|
6520
6519
|
section=section,
|
|
@@ -6525,7 +6524,7 @@ def setup_routes(smx):
|
|
|
6525
6524
|
highlighted_ai_code=highlighted_ai_code if ai_code else None,
|
|
6526
6525
|
askai_question=smx.sanitize_rough_to_markdown_task(askai_question),
|
|
6527
6526
|
refined_question=refined_question,
|
|
6528
|
-
tasks=
|
|
6527
|
+
tasks=tags,
|
|
6529
6528
|
data_cells=data_cells,
|
|
6530
6529
|
session_id=session_id,
|
|
6531
6530
|
llm_usage=llm_usage
|
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
position: fixed;
|
|
16
16
|
top: 0; left: 0;
|
|
17
17
|
width: 200px; height: 100vh;
|
|
18
|
-
background: #
|
|
18
|
+
background: #a3a4a5ff;
|
|
19
19
|
border-right: 1px solid #ccc;
|
|
20
20
|
padding: 26px 10px 10px 14px;
|
|
21
21
|
box-sizing: border-box;
|
|
@@ -56,7 +56,7 @@
|
|
|
56
56
|
min-height: 100vh;
|
|
57
57
|
box-sizing: border-box;
|
|
58
58
|
overflow-x: auto;
|
|
59
|
-
background: #
|
|
59
|
+
background: #dff0f5ff;
|
|
60
60
|
font-size: clamp(0.98rem, 2vw, 1.07rem);
|
|
61
61
|
}
|
|
62
62
|
.dashboard-tabs {
|
|
@@ -85,7 +85,7 @@
|
|
|
85
85
|
top: 2px;
|
|
86
86
|
}
|
|
87
87
|
.dashboard-content {
|
|
88
|
-
background: #
|
|
88
|
+
background: #bbbbbd;
|
|
89
89
|
width: 100%;
|
|
90
90
|
padding: 10px;
|
|
91
91
|
border-radius: 0 0 10px 10px;
|
|
@@ -95,6 +95,10 @@
|
|
|
95
95
|
overflow-x: auto;
|
|
96
96
|
margin-right: 1vw;
|
|
97
97
|
}
|
|
98
|
+
|
|
99
|
+
textarea#askai{
|
|
100
|
+
background: #e5e5e5cd;
|
|
101
|
+
}
|
|
98
102
|
|
|
99
103
|
.smx-table {
|
|
100
104
|
padding: clamp(3px, 1vw, 9px) clamp(4px, 2vw, 13px);
|
|
@@ -590,6 +594,31 @@
|
|
|
590
594
|
.eda-card h3{ color: #1f2937 !important; }
|
|
591
595
|
.smx-stat h4{ color: #64748b !important; }
|
|
592
596
|
</style>
|
|
597
|
+
<style>
|
|
598
|
+
div.li > li {
|
|
599
|
+
margin-left: 35px;
|
|
600
|
+
}
|
|
601
|
+
/* 1. Style the arrow specifically */
|
|
602
|
+
.toggle-arrow {
|
|
603
|
+
display: inline-block; /* CRITICAL: Allows the element to rotate */
|
|
604
|
+
transition: transform 0.2s; /* Makes the rotation smooth */
|
|
605
|
+
margin-right: 6px; /* Spacing between arrow and text */
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
/* 2. Rotate and color when the menu is OPEN */
|
|
609
|
+
details[open] summary .toggle-arrow {
|
|
610
|
+
transform: rotate(90deg); /* Rotates the arrow downwards */
|
|
611
|
+
color: #007acc; /* Changes color to blue */
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
/* 3. (Optional) Remove default browser markers to avoid double arrows */
|
|
615
|
+
details > summary {
|
|
616
|
+
list-style: none;
|
|
617
|
+
}
|
|
618
|
+
details > summary::-webkit-details-marker {
|
|
619
|
+
display: none;
|
|
620
|
+
}
|
|
621
|
+
</style>
|
|
593
622
|
|
|
594
623
|
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
|
|
595
624
|
</head>
|
|
@@ -611,8 +640,7 @@
|
|
|
611
640
|
<div class="dashboard-main">
|
|
612
641
|
<button id="sidebarToggle" class="sidebar-toggle" aria-label="Open menu"></button>
|
|
613
642
|
<ul class="dashboard-tabs">
|
|
614
|
-
<li class="{{ 'active' if section == 'explore' else '' }}"><a href="/dashboard?section=explore">Explore</a></li>
|
|
615
|
-
|
|
643
|
+
<li class="{{ 'active' if section == 'explore' else '' }}"><a href="/dashboard?section=explore">Explore</a></li>
|
|
616
644
|
</ul>
|
|
617
645
|
<div class="dashboard-content">
|
|
618
646
|
<div class="explore-card">
|
|
@@ -679,37 +707,41 @@
|
|
|
679
707
|
-->
|
|
680
708
|
<br><br>
|
|
681
709
|
<div class="refined-qblock">
|
|
682
|
-
|
|
683
|
-
<
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
<
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
710
|
+
<details>
|
|
711
|
+
<summary class="refined-q-label" style="cursor: pointer; list-style: none;">
|
|
712
|
+
<span class="toggle-arrow">▶</span>
|
|
713
|
+
<b>Thought Process</b>
|
|
714
|
+
</summary>
|
|
715
|
+
<div class="li" style="margin-top: 10px; padding-left: 14px; border-left: 3px solid #e0e5ee;">
|
|
716
|
+
<span class="refined-q">{{ refined_question|safe }}</span>
|
|
717
|
+
<br><br>
|
|
718
|
+
{% if tasks %}
|
|
719
|
+
<b>Tasks Performed: </b>
|
|
720
|
+
{% for task in tasks %}
|
|
721
|
+
<li>{{ task.replace('_', ' ').capitalize() }}</li>
|
|
722
|
+
{% endfor %}
|
|
723
|
+
{% endif %}
|
|
724
|
+
<br><br>
|
|
725
|
+
{% if llm_usage %}
|
|
726
|
+
<b>LLM: </b><span>{{ llm_usage.provider }} | {{ llm_usage.model }}</span><br>
|
|
727
|
+
<b>Token Usage: </b>
|
|
728
|
+
<li>Input Tokens: {{ llm_usage.input_tokens }}</li>
|
|
729
|
+
<li>Output Tokens: {{ llm_usage.output_tokens }}</li>
|
|
730
|
+
<li>Total Tokens: {{ llm_usage.total_tokens }}</li>
|
|
731
|
+
</div>
|
|
732
|
+
{% endif %}
|
|
733
|
+
</div>
|
|
734
|
+
</details>
|
|
703
735
|
{% endif %}
|
|
704
736
|
{% if ai_outputs %}
|
|
705
737
|
<div class="d-flex align-items-center justify-content-between" style="margin: 12px;">
|
|
706
738
|
<h3 class="m-0">Result</h3>
|
|
739
|
+
{% for html_block in ai_outputs %}
|
|
740
|
+
<div class="ai-output" style="margin-bottom:18px;overflow-x:auto; max-width:100%;">
|
|
741
|
+
{{ html_block | safe }}
|
|
742
|
+
</div>
|
|
743
|
+
{% endfor %}
|
|
707
744
|
</div>
|
|
708
|
-
{% for html_block in ai_outputs %}
|
|
709
|
-
<div class="ai-output" style="margin-bottom:18px;overflow-x:auto; max-width:100%;">
|
|
710
|
-
{{ html_block | safe }}
|
|
711
|
-
</div>
|
|
712
|
-
{% endfor %}
|
|
713
745
|
{% endif %}
|
|
714
746
|
{% if ai_code %}
|
|
715
747
|
<div>
|
syntaxmatrix/utils.py
CHANGED
|
@@ -155,31 +155,6 @@ def harden_ai_code(code: str) -> str:
|
|
|
155
155
|
# Remove any LLM-added try/except blocks (hardener adds its own)
|
|
156
156
|
import re
|
|
157
157
|
|
|
158
|
-
def strip_placeholders(code: str) -> str:
|
|
159
|
-
code = re.sub(r"\bshow\(\s*\.\.\.\s*\)",
|
|
160
|
-
"show('⚠ Block skipped due to an error.')",
|
|
161
|
-
code)
|
|
162
|
-
code = re.sub(r"\breturn\s+\.\.\.", "return None", code)
|
|
163
|
-
return code
|
|
164
|
-
|
|
165
|
-
def _SMX_OHE(**k):
|
|
166
|
-
# normalise arg name across sklearn versions
|
|
167
|
-
if "sparse" in k and "sparse_output" not in k:
|
|
168
|
-
k["sparse_output"] = k.pop("sparse")
|
|
169
|
-
# default behaviour we want
|
|
170
|
-
k.setdefault("handle_unknown", "ignore")
|
|
171
|
-
k.setdefault("sparse_output", False)
|
|
172
|
-
try:
|
|
173
|
-
# if running on old sklearn without sparse_output, translate back
|
|
174
|
-
if "sparse_output" not in inspect.signature(OneHotEncoder).parameters:
|
|
175
|
-
if "sparse_output" in k:
|
|
176
|
-
k["sparse"] = k.pop("sparse_output")
|
|
177
|
-
return OneHotEncoder(**k)
|
|
178
|
-
except TypeError:
|
|
179
|
-
# final fallback: try legacy name
|
|
180
|
-
if "sparse_output" in k:
|
|
181
|
-
k["sparse"] = k.pop("sparse_output")
|
|
182
|
-
return OneHotEncoder(**k)
|
|
183
158
|
|
|
184
159
|
def _strip_stray_backrefs(code: str) -> str:
|
|
185
160
|
code = re.sub(r'(?m)^\s*\\\d+\s*', '', code)
|
|
@@ -497,6 +472,67 @@ def harden_ai_code(code: str) -> str:
|
|
|
497
472
|
"""
|
|
498
473
|
)
|
|
499
474
|
|
|
475
|
+
def _strip_file_io_ops(code: str) -> str:
|
|
476
|
+
"""
|
|
477
|
+
Remove obvious local file I/O operations in LLM code
|
|
478
|
+
so nothing writes to the container filesystem.
|
|
479
|
+
"""
|
|
480
|
+
# 1) Methods like df.to_csv(...), df.to_excel(...), etc.
|
|
481
|
+
FILE_WRITE_METHODS = (
|
|
482
|
+
"to_csv", "to_excel", "to_pickle", "to_parquet",
|
|
483
|
+
"to_json", "to_hdf",
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
for mname in FILE_WRITE_METHODS:
|
|
487
|
+
pat = re.compile(
|
|
488
|
+
rf"(?m)^(\s*)([A-Za-z_][A-Za-z0-9_\.]*)\s*\.\s*{mname}\s*\([^)]*\)\s*$"
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
def _repl(match):
|
|
492
|
+
indent = match.group(1)
|
|
493
|
+
expr = match.group(2)
|
|
494
|
+
return f"{indent}# [SMX] stripped file write: {expr}.{mname}(...)"
|
|
495
|
+
|
|
496
|
+
code = pat.sub(_repl, code)
|
|
497
|
+
|
|
498
|
+
# 2) plt.savefig(...) calls
|
|
499
|
+
pat_savefig = re.compile(r"(?m)^(\s*)(plt\.savefig\s*\([^)]*\)\s*)$")
|
|
500
|
+
code = pat_savefig.sub(
|
|
501
|
+
lambda m: f"{m.group(1)}# [SMX] stripped savefig: {m.group(2).strip()}",
|
|
502
|
+
code,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# 3) with open(..., 'w'/'wb') as f:
|
|
506
|
+
pat_with_open = re.compile(
|
|
507
|
+
r"(?m)^(\s*)with\s+open\([^)]*['\"]w[b]?['\"][^)]*\)\s+as\s+([A-Za-z_][A-Za-z0-9_]*)\s*:\s*$"
|
|
508
|
+
)
|
|
509
|
+
|
|
510
|
+
def _with_open_repl(match):
|
|
511
|
+
indent = match.group(1)
|
|
512
|
+
var = match.group(2)
|
|
513
|
+
return f"{indent}if False: # [SMX] file write stripped (was: with open(... as {var}))"
|
|
514
|
+
|
|
515
|
+
code = pat_with_open.sub(_with_open_repl, code)
|
|
516
|
+
|
|
517
|
+
# 4) joblib.dump(...), pickle.dump(...)
|
|
518
|
+
for mod in ("joblib", "pickle"):
|
|
519
|
+
pat = re.compile(rf"(?m)^(\s*){mod}\.dump\s*\([^)]*\)\s*$")
|
|
520
|
+
code = pat.sub(
|
|
521
|
+
lambda m: f"{m.group(1)}# [SMX] stripped {mod}.dump(...)",
|
|
522
|
+
code,
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
# 5) bare open(..., 'w'/'wb') calls
|
|
526
|
+
pat_open = re.compile(
|
|
527
|
+
r"(?m)^(\s*)open\([^)]*['\"]w[b]?['\"][^)]*\)\s*$"
|
|
528
|
+
)
|
|
529
|
+
code = pat_open.sub(
|
|
530
|
+
lambda m: f"{m.group(1)}# [SMX] stripped open(..., 'w'/'wb')",
|
|
531
|
+
code,
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
return code
|
|
535
|
+
|
|
500
536
|
# Register and run patches once per execution
|
|
501
537
|
for _patch in (
|
|
502
538
|
_smx_patch_mean_squared_error_squared_kw,
|
|
@@ -598,6 +634,7 @@ def harden_ai_code(code: str) -> str:
|
|
|
598
634
|
fixed = _wrap_metric_calls(fixed)
|
|
599
635
|
fixed = _fix_unexpected_indent(fixed)
|
|
600
636
|
fixed = _patch_feature_coef_dataframe(fixed)
|
|
637
|
+
fixed = _strip_file_io_ops(fixed)
|
|
601
638
|
|
|
602
639
|
# Import shared preface helpers once and wrap the LLM body safely
|
|
603
640
|
header = "from syntaxmatrix.preface import *\n\n"
|
|
@@ -2,7 +2,7 @@ syntaxmatrix/__init__.py,sha256=_LnTrYAW2tbYA37Y233Vv4OMOk8NUnoJi-1yzFyHxEI,2573
|
|
|
2
2
|
syntaxmatrix/auth.py,sha256=SCD6uWojXjj9yjUTKzgV5kBYe6ZkXASEG2VopLFkEtM,18140
|
|
3
3
|
syntaxmatrix/bootstrap.py,sha256=Y7ZNg-Z3ecrr1iYem5EMzPmGstXnEKmO9kqKVoOoljo,817
|
|
4
4
|
syntaxmatrix/commentary.py,sha256=3uSlbaQ1zl-gYtEtEpFbv2M-IH-HSdFdMvhxa7UCNHk,12025
|
|
5
|
-
syntaxmatrix/core.py,sha256=
|
|
5
|
+
syntaxmatrix/core.py,sha256=7o5givPq7Io9DCnJQjTy-izgYvJivE2POxvG45i3dBk,61338
|
|
6
6
|
syntaxmatrix/dataset_preprocessing.py,sha256=wtV4MWzkyfOsBHTsS0H1gqHho77ZQHGDI9skJryyZWA,8732
|
|
7
7
|
syntaxmatrix/db.py,sha256=xkCpyhFxnAwrnZCTd13NkJsahVze0i4egjMcbB7kPfs,5000
|
|
8
8
|
syntaxmatrix/display.py,sha256=TgMrE5WW80VlLcL_XvEz936mekFccJgLTfzbCIozSc8,3728
|
|
@@ -15,28 +15,28 @@ syntaxmatrix/kernel_manager.py,sha256=sE9zwuqEZq10Q4ySpGn0ilx-ui7cmZw-LEK8GxK-Hh
|
|
|
15
15
|
syntaxmatrix/llm_store.py,sha256=c22-ahR_PmZVWB5OAKPVr01YI9rWPWDd_aSEMujhAic,7500
|
|
16
16
|
syntaxmatrix/models.py,sha256=-yGj4fALYqyQqxIiB0Eh6xWSlr9GfwuoDlzAWUikye8,533
|
|
17
17
|
syntaxmatrix/plottings.py,sha256=MjHQ9T1_oC5oyr4_wkM2GJDrpjp0sbvudbs2lGaMyzk,6103
|
|
18
|
-
syntaxmatrix/preface.py,sha256=
|
|
18
|
+
syntaxmatrix/preface.py,sha256=EOK3lflMJ-0B6SRJtVXhzZjhvu-bfXzw-sy1TbTYOVs,17009
|
|
19
19
|
syntaxmatrix/profiles.py,sha256=0-lky7Wj-WQlP5CbvTyw1tI2M0FiqhhTkLZYLRhD5AU,2251
|
|
20
20
|
syntaxmatrix/project_root.py,sha256=1ckvbFVV1szHtHsfSCoGcImHkRwbfszmPG1kGh9ZZlE,2227
|
|
21
|
-
syntaxmatrix/routes.py,sha256=
|
|
21
|
+
syntaxmatrix/routes.py,sha256=kkLn6uOfOiD8-Kl2_JKs0qcxBHkuJd6cFih9v8KuTWk,302912
|
|
22
22
|
syntaxmatrix/session.py,sha256=v0qgxnVM_LEaNvZQJSa-13Q2eiwc3RDnjd2SahNnHQk,599
|
|
23
23
|
syntaxmatrix/smiv.py,sha256=1lSN3UYpXvYoVNd6VrkY5iZuF_nDxD6xxvLnTn9wcbQ,1405
|
|
24
24
|
syntaxmatrix/smpv.py,sha256=rrCgYqfjBaK2n5qzfQyXK3bHFMvgNcCIqPaXquOLtDM,3600
|
|
25
25
|
syntaxmatrix/themes.py,sha256=qa90vPZTuNNKB37loZhChQfu5QqkaJG4JxgI_4QgCxw,3576
|
|
26
26
|
syntaxmatrix/ui_modes.py,sha256=5lfKK3AKAB-JQCWfi1GRYp4sQqg4Z0fC3RJ8G3VGCMw,152
|
|
27
|
-
syntaxmatrix/utils.py,sha256=
|
|
27
|
+
syntaxmatrix/utils.py,sha256=0iTu9XbUN1HsZModWmyexYrXAzjox7gpHyYV7SmW-PM,123555
|
|
28
28
|
syntaxmatrix/vector_db.py,sha256=ozvOcMHt52xFAvcp-vAqT69kECPq9BwL8Rzgq3AJaMs,5824
|
|
29
29
|
syntaxmatrix/vectorizer.py,sha256=5w_UQiUIirm_W-Q9TcaEI8LTcTYIuDBdKfz79T1aZ8g,1366
|
|
30
30
|
syntaxmatrix/workspace_db.py,sha256=Xu9OlW8wo3iaH5Y88ZMdLOf-fiZxF1NBb5rAw3KcbfY,4715
|
|
31
31
|
syntaxmatrix/agentic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
32
|
syntaxmatrix/agentic/agent_tools.py,sha256=yQwavONP23ziMxNQf3j2Y4TVo_LxEsiAWecKuBK8WDg,866
|
|
33
|
-
syntaxmatrix/agentic/agents.py,sha256=
|
|
33
|
+
syntaxmatrix/agentic/agents.py,sha256=m4eqeCf4fVRodCa1Pgc3LwT4EPGip4E-7n1k1HxeKxA,29898
|
|
34
34
|
syntaxmatrix/agentic/code_tools_registry.py,sha256=Wp4-KHtp0BUVciqSbmionBsQMVFOnvJPruBJeNiuwkk,1564
|
|
35
35
|
syntaxmatrix/agentic/model_templates.py,sha256=A3ROE3BHkvnU9cxqSGjlCBIw9U15zRaTKgK-WxcZtUI,76033
|
|
36
36
|
syntaxmatrix/settings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
37
|
syntaxmatrix/settings/default.yaml,sha256=BznvF1D06VMPbT6UX3MQ4zUkXxTXLnAA53aUu8G4O38,569
|
|
38
38
|
syntaxmatrix/settings/logging.py,sha256=U8iTDFv0H1ECdIzH9He2CtOVlK1x5KHCk126Zn5Vi7M,1362
|
|
39
|
-
syntaxmatrix/settings/model_map.py,sha256=
|
|
39
|
+
syntaxmatrix/settings/model_map.py,sha256=M6EPTPw2m88RFLNlzxt6vQzEtdr66NcHz0t4zB7QfkU,12028
|
|
40
40
|
syntaxmatrix/settings/prompts.py,sha256=dLNijnw9UHlAg5qxcSaLPhTmR7SdDDyOFcMKhlCA4eQ,21695
|
|
41
41
|
syntaxmatrix/settings/string_navbar.py,sha256=NqgTzo3J9rRI4c278VG6kpoViFfmi2FKmL6sO0R-bus,83
|
|
42
42
|
syntaxmatrix/static/docs.md,sha256=rWlKjNcpS2cs5DElGNYuaA-XXdGZnRGMXx62nACvDwE,11105
|
|
@@ -51,7 +51,7 @@ syntaxmatrix/static/js/sidebar.js,sha256=zHp4skKLY2Dlqx7aLPQ8_cR0iTRT17W0SC2TR38
|
|
|
51
51
|
syntaxmatrix/static/js/widgets.js,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
52
|
syntaxmatrix/templates/change_password.html,sha256=YWEcnwJLccLyKGzQxIrc0xuP-p00BtEIwcYq4oFvJ-0,3332
|
|
53
53
|
syntaxmatrix/templates/code_cell.html,sha256=LOr9VjvNQcOGKKJ1ecpcZh3C3qsUxBHueg2iQtpdxl8,638
|
|
54
|
-
syntaxmatrix/templates/dashboard.html,sha256=
|
|
54
|
+
syntaxmatrix/templates/dashboard.html,sha256=lR0wUtozTh5bDHbPSiywJiypiT_nNfzvEJJLfWckE0w,32272
|
|
55
55
|
syntaxmatrix/templates/docs.html,sha256=KVi5JrZD3gwOduiZhAz7hQrKY9SrQ_bsHOODj0Nj09s,3552
|
|
56
56
|
syntaxmatrix/templates/error.html,sha256=Iu5ykHnhw8jrxVBNn6B95e90W5u9I2hySCiLtaoOJMs,3290
|
|
57
57
|
syntaxmatrix/templates/login.html,sha256=V_bWHozS1xCeHPsvAAfaGG-_2lAE7K8d05IarQN1PS8,2677
|
|
@@ -63,8 +63,8 @@ syntaxmatrix/vectordb/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5
|
|
|
63
63
|
syntaxmatrix/vectordb/adapters/milvus_adapter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
64
64
|
syntaxmatrix/vectordb/adapters/pgvector_adapter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
65
65
|
syntaxmatrix/vectordb/adapters/sqlite_adapter.py,sha256=L8M2qHfwZRAFVxWeurUVdHaJXz6F5xTUSWh3uy6TSUs,6035
|
|
66
|
-
syntaxmatrix-2.5.
|
|
67
|
-
syntaxmatrix-2.5.
|
|
68
|
-
syntaxmatrix-2.5.
|
|
69
|
-
syntaxmatrix-2.5.
|
|
70
|
-
syntaxmatrix-2.5.
|
|
66
|
+
syntaxmatrix-2.5.7.dist-info/licenses/LICENSE.txt,sha256=j1P8naTdy1JMxTC80XYQjbyAQnuOlpDusCUhncrvpy8,1083
|
|
67
|
+
syntaxmatrix-2.5.7.dist-info/METADATA,sha256=2_xno8Sgx4iERRKmk5GfgluVQDY_94aPFdmx3NC85dw,18090
|
|
68
|
+
syntaxmatrix-2.5.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
69
|
+
syntaxmatrix-2.5.7.dist-info/top_level.txt,sha256=HKP_zkl4V_nt7osC15DlacoBZktHrbZYOqf_pPkF3T8,13
|
|
70
|
+
syntaxmatrix-2.5.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|