syntaxmatrix 2.3.5__py3-none-any.whl → 2.5.5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- syntaxmatrix/agentic/__init__.py +0 -0
- syntaxmatrix/agentic/agent_tools.py +24 -0
- syntaxmatrix/agentic/agents.py +810 -0
- syntaxmatrix/agentic/code_tools_registry.py +37 -0
- syntaxmatrix/agentic/model_templates.py +1790 -0
- syntaxmatrix/commentary.py +134 -112
- syntaxmatrix/core.py +385 -245
- syntaxmatrix/dataset_preprocessing.py +218 -0
- syntaxmatrix/display.py +89 -37
- syntaxmatrix/gpt_models_latest.py +5 -4
- syntaxmatrix/profiles.py +19 -4
- syntaxmatrix/routes.py +947 -141
- syntaxmatrix/settings/model_map.py +38 -30
- syntaxmatrix/static/icons/hero_bg.jpg +0 -0
- syntaxmatrix/templates/dashboard.html +248 -54
- syntaxmatrix/utils.py +2254 -84
- {syntaxmatrix-2.3.5.dist-info → syntaxmatrix-2.5.5.5.dist-info}/METADATA +16 -17
- {syntaxmatrix-2.3.5.dist-info → syntaxmatrix-2.5.5.5.dist-info}/RECORD +21 -15
- syntaxmatrix/model_templates.py +0 -29
- {syntaxmatrix-2.3.5.dist-info → syntaxmatrix-2.5.5.5.dist-info}/WHEEL +0 -0
- {syntaxmatrix-2.3.5.dist-info → syntaxmatrix-2.5.5.5.dist-info}/licenses/LICENSE.txt +0 -0
- {syntaxmatrix-2.3.5.dist-info → syntaxmatrix-2.5.5.5.dist-info}/top_level.txt +0 -0
syntaxmatrix/commentary.py
CHANGED
|
@@ -160,24 +160,29 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
|
|
|
160
160
|
"""
|
|
161
161
|
|
|
162
162
|
_SYSTEM_VISION = (
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
163
|
+
"You are a data analyst. Write a detailed analysis that explains what the "
|
|
164
|
+
"already-rendered visuals mean for the user's question. "
|
|
165
|
+
"You use information visible in the attached figures and the provided context strings (texs, field names, labels). "
|
|
166
|
+
"You interprete the output without preamble."
|
|
167
167
|
)
|
|
168
168
|
|
|
169
|
-
_USER_TMPL_VISION = """
|
|
169
|
+
_USER_TMPL_VISION = """
|
|
170
170
|
question:
|
|
171
171
|
{q}
|
|
172
172
|
|
|
173
173
|
Visible context strings (titles, axes, legends, headers):
|
|
174
174
|
{ctx}
|
|
175
175
|
|
|
176
|
-
Write a
|
|
177
|
-
- <
|
|
178
|
-
|
|
179
|
-
- <
|
|
180
|
-
|
|
176
|
+
Write a comprehensive conclusion (~250-350 words) as follows:
|
|
177
|
+
- <b>Headline</b>
|
|
178
|
+
2-3 sentence answering the question from an overview of all the output.
|
|
179
|
+
- <b>Evidence</b>
|
|
180
|
+
8-10 bullets referencing the (output-texts/tables/panels/axes/legend groups) seen in the output.
|
|
181
|
+
As you reference the visuals, you should interprete them in a way to show how they answer the question.
|
|
182
|
+
- <b>Limitations</b>
|
|
183
|
+
1 bullet; avoid quoting numbers unless present in context.
|
|
184
|
+
- <b>Next step</b>
|
|
185
|
+
1 bullet.
|
|
181
186
|
"""
|
|
182
187
|
|
|
183
188
|
visible = _context_strings(context)
|
|
@@ -185,122 +190,139 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
|
|
|
185
190
|
q=context.get("question",""),
|
|
186
191
|
ctx=json.dumps(visible, ensure_ascii=False, indent=2)
|
|
187
192
|
)
|
|
188
|
-
|
|
189
|
-
prof = _prof.get_profile("
|
|
193
|
+
|
|
194
|
+
prof = _prof.get_profile("vision2text") or _prof.get_profile("admin")
|
|
190
195
|
if not prof:
|
|
191
196
|
return (
|
|
192
197
|
"<div class='smx-alert smx-alert-warn'>"
|
|
193
198
|
"No LLM profile is configured for Image2Text. Please, do that in the Admin panel or contact your Administrator."
|
|
194
199
|
"</div>"
|
|
195
200
|
)
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
201
|
+
|
|
202
|
+
prof['client'] = _prof.get_client(prof)
|
|
203
|
+
_client = prof["client"]
|
|
204
|
+
_provider = prof["provider"].lower()
|
|
205
|
+
_model = prof["model"]
|
|
199
206
|
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
"data": b64
|
|
207
|
+
try:
|
|
208
|
+
#1 Google
|
|
209
|
+
if _provider == "google":
|
|
210
|
+
try:
|
|
211
|
+
input_contents = []
|
|
212
|
+
|
|
213
|
+
# Add text part first
|
|
214
|
+
text_part = {"text": user}
|
|
215
|
+
input_contents.append(text_part)
|
|
216
|
+
|
|
217
|
+
# Add image parts
|
|
218
|
+
for b64 in images_b64:
|
|
219
|
+
if b64:
|
|
220
|
+
image_part = {
|
|
221
|
+
"inline_data": {
|
|
222
|
+
"mime_type": "image/png",
|
|
223
|
+
"data": b64
|
|
224
|
+
}
|
|
219
225
|
}
|
|
220
|
-
|
|
221
|
-
contents.append(image_part)
|
|
222
|
-
|
|
223
|
-
# Correct Gemini API call
|
|
224
|
-
response = _client.models.generate_content(
|
|
225
|
-
model=_model,
|
|
226
|
-
contents=contents
|
|
227
|
-
)
|
|
228
|
-
txt = response.text.strip()
|
|
229
|
-
return txt.strip()
|
|
230
|
-
except Exception as e:
|
|
231
|
-
return f"Google Gemini error: {e}"
|
|
232
|
-
|
|
233
|
-
# Openai
|
|
234
|
-
elif _provider == "openai" and _model in GPT_MODELS_LATEST:
|
|
235
|
-
# Use the Responses API with multimodal input (text + up to 4 images)
|
|
236
|
-
try:
|
|
237
|
-
parts = [{"type": "input_text", "text": user}]
|
|
238
|
-
for b64 in (images_b64 or [])[:4]:
|
|
239
|
-
if b64:
|
|
240
|
-
parts.append({"type": "input_image", "image_url": f"data:image/png;base64,{b64}"})
|
|
241
|
-
|
|
242
|
-
args = set_args(
|
|
243
|
-
model=_model,
|
|
244
|
-
instructions=_SYSTEM_VISION,
|
|
245
|
-
input=[{"role": "user", "content": parts}],
|
|
246
|
-
previous_id=None,
|
|
247
|
-
store=False,
|
|
248
|
-
reasoning_effort="minimal",
|
|
249
|
-
verbosity="low",
|
|
250
|
-
)
|
|
251
|
-
resp = _client.responses.create(**args)
|
|
252
|
-
txt = _out(resp) or ""
|
|
253
|
-
if txt.strip():
|
|
254
|
-
return txt.strip()
|
|
255
|
-
except Exception:
|
|
256
|
-
pass # Fall through to the chat.completions fallback implemented below.
|
|
226
|
+
input_contents.append(image_part)
|
|
257
227
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
228
|
+
response = _client.models.generate_content(
|
|
229
|
+
model=_model,
|
|
230
|
+
contents=input_contents,
|
|
231
|
+
config=types.GenerateContentConfig(
|
|
232
|
+
system_instruction=_SYSTEM_VISION,
|
|
233
|
+
temperature=0.7,
|
|
234
|
+
max_output_tokens=1024,
|
|
235
|
+
),
|
|
236
|
+
)
|
|
237
|
+
txt = response.text.strip()
|
|
238
|
+
return txt.strip()
|
|
239
|
+
except Exception:
|
|
240
|
+
pass
|
|
241
|
+
|
|
242
|
+
#2 Openai
|
|
243
|
+
elif _provider == "openai" and _model in GPT_MODELS_LATEST:
|
|
244
|
+
try:
|
|
245
|
+
input_contents = []
|
|
246
|
+
|
|
247
|
+
text_part = {"type": "input_text", "text": user}
|
|
248
|
+
input_contents.append(text_part)
|
|
249
|
+
for b64 in images_b64:
|
|
250
|
+
if b64:
|
|
251
|
+
image_part = {
|
|
252
|
+
"type": "input_image",
|
|
253
|
+
"image_url": f"data:image/png;base64,{b64}"
|
|
254
|
+
}
|
|
255
|
+
input_contents.append(image_part)
|
|
256
|
+
|
|
257
|
+
args = set_args(
|
|
258
|
+
model=_model,
|
|
259
|
+
instructions=_SYSTEM_VISION,
|
|
260
|
+
input=[{"role": "user", "content": input_contents}],
|
|
261
|
+
previous_id=None,
|
|
262
|
+
store=False,
|
|
263
|
+
reasoning_effort="low",
|
|
264
|
+
verbosity="medium",
|
|
265
|
+
)
|
|
266
|
+
resp = _client.responses.create(**args)
|
|
267
|
+
txt = _out(resp) or ""
|
|
268
|
+
if txt.strip():
|
|
269
|
+
return txt.strip()
|
|
270
|
+
except Exception:
|
|
271
|
+
pass
|
|
272
|
+
|
|
273
|
+
# Anthropic
|
|
274
|
+
elif _provider == "anthropic":
|
|
275
|
+
try:
|
|
276
|
+
input_contents = []
|
|
277
|
+
|
|
278
|
+
text_part = {"type":"text","text": user}
|
|
279
|
+
input_contents.append(text_part)
|
|
296
280
|
|
|
297
|
-
|
|
298
|
-
|
|
281
|
+
for b64 in images_b64:
|
|
282
|
+
if b64:
|
|
283
|
+
image_part = {
|
|
284
|
+
"type":"image_url",
|
|
285
|
+
"image_url":{"url": f"data:image/png;base64,{b64}"}
|
|
286
|
+
}
|
|
287
|
+
input_contents.append(image_part)
|
|
288
|
+
|
|
289
|
+
response = _client.messages.create(
|
|
290
|
+
model=_model,
|
|
291
|
+
max_tokens=1024,
|
|
292
|
+
system=_SYSTEM_VISION,
|
|
293
|
+
messages=[{"role": "user", "content":input_contents}],
|
|
294
|
+
stream=False,
|
|
295
|
+
)
|
|
296
|
+
return response.content[0].text.strip()
|
|
297
|
+
except Exception:
|
|
298
|
+
pass
|
|
299
|
+
|
|
300
|
+
# OpenAI SDK
|
|
301
|
+
else:
|
|
302
|
+
try:
|
|
303
|
+
input_contents = [{"type":"text","text": user}]
|
|
304
|
+
for b64 in images_b64:
|
|
305
|
+
if b64:
|
|
306
|
+
input_contents.append({"type":"image_url","image_url":{"url": f"data:image/png;base64,{b64}"}})
|
|
307
|
+
resp = _client.chat.completions.create(
|
|
308
|
+
model=_model,
|
|
309
|
+
temperature=1,
|
|
310
|
+
messages=[
|
|
311
|
+
{"role":"system","content":_SYSTEM_VISION},
|
|
312
|
+
{"role":"user","content":input_contents},
|
|
313
|
+
],
|
|
314
|
+
max_tokens=1024,
|
|
315
|
+
)
|
|
316
|
+
return (resp.choices[0].message.content or "").strip()
|
|
317
|
+
except Exception:
|
|
318
|
+
pass
|
|
319
|
+
except Exception:
|
|
320
|
+
return "Insufficient context to comment usefully."
|
|
299
321
|
|
|
300
322
|
def wrap_html(card_text: str) -> str:
|
|
301
323
|
return f"""
|
|
302
324
|
<div class="smx-commentary-card" style="margin-top:1rem;padding:1rem;border:1px solid #e5e7eb;border-radius:0.75rem;background:#fafafa">
|
|
303
|
-
<div style="font-weight:600;margin-bottom:0.5rem;">
|
|
325
|
+
<div style="font-weight:600;margin-bottom:0.5rem;">smxAI Feedback</div>
|
|
304
326
|
<div class="prose" style="white-space:pre-wrap;line-height:1.45">{card_text}</div>
|
|
305
327
|
</div>
|
|
306
328
|
""".strip()
|