syntaxmatrix 2.3.5__py3-none-any.whl → 2.5.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -160,24 +160,29 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
160
160
  """
161
161
 
162
162
  _SYSTEM_VISION = (
163
- "You are a data analyst. Write a short, precise commentary in UK English that explains what the "
164
- "already-rendered visuals mean for the user's question. "
165
- "Use information visible in the attached figures and the provided context strings (field names, labels). "
166
- "Do not invent numbers. If the figures/context are insufficient, say: 'Insufficient context to comment usefully.'"
163
+ "You are a data analyst. Write a detailed analysis that explains what the "
164
+ "already-rendered visuals mean for the user's question. "
165
+ "You use information visible in the attached figures and the provided context strings (texs, field names, labels). "
166
+ "You interprete the output without preamble."
167
167
  )
168
168
 
169
- _USER_TMPL_VISION = """\
169
+ _USER_TMPL_VISION = """
170
170
  question:
171
171
  {q}
172
172
 
173
173
  Visible context strings (titles, axes, legends, headers):
174
174
  {ctx}
175
175
 
176
- Write a concise conclusion (~200-260 words) with:
177
- - <strong>Headline</strong> (one sentence answering the question).
178
- - <strong>Evidence</strong> (6-8 bullets referencing panels/axes/legend groups seen in the figures and explaining the plots/tables vis-a-vis the query). Explain all the oupupt comprehensively in details.
179
- - <strong>Limitations</strong> (1 bullets; avoid quoting numbers unless present in context).
180
- - <strong>Next step</strong> (1 bullet).
176
+ Write a comprehensive conclusion (~250-350 words) as follows:
177
+ - <b>Headline</b>
178
+ 2-3 sentence answering the question from an overview of all the output.
179
+ - <b>Evidence</b>
180
+ 8-10 bullets referencing the (output-texts/tables/panels/axes/legend groups) seen in the output.
181
+ As you reference the visuals, you should interprete them in a way to show how they answer the question.
182
+ - <b>Limitations</b>
183
+ 1 bullet; avoid quoting numbers unless present in context.
184
+ - <b>Next step</b>
185
+ 1 bullet.
181
186
  """
182
187
 
183
188
  visible = _context_strings(context)
@@ -185,122 +190,139 @@ def phrase_commentary_vision(context: Dict[str, Any], images_b64: List[str]) ->
185
190
  q=context.get("question",""),
186
191
  ctx=json.dumps(visible, ensure_ascii=False, indent=2)
187
192
  )
188
-
189
- prof = _prof.get_profile("image2text") or _prof.get_profile("admin")
193
+
194
+ prof = _prof.get_profile("vision2text") or _prof.get_profile("admin")
190
195
  if not prof:
191
196
  return (
192
197
  "<div class='smx-alert smx-alert-warn'>"
193
198
  "No LLM profile is configured for Image2Text. Please, do that in the Admin panel or contact your Administrator."
194
199
  "</div>"
195
200
  )
196
- _client = _prof.get_client(prof)
197
- _provider = (prof.get("provider") or "").lower()
198
- _model = prof.get("model") or ""
201
+
202
+ prof['client'] = _prof.get_client(prof)
203
+ _client = prof["client"]
204
+ _provider = prof["provider"].lower()
205
+ _model = prof["model"]
199
206
 
200
- # Google
201
- if _provider == "google":
202
- try:
203
- # Gemini expects a different structure
204
- contents = []
205
-
206
- # Add text part first
207
- text_part = {
208
- "text": _SYSTEM_VISION + "\n\n" + user
209
- }
210
- contents.append(text_part)
211
-
212
- # Add image parts
213
- for b64 in images_b64[:4]:
214
- if b64:
215
- image_part = {
216
- "inline_data": {
217
- "mime_type": "image/png",
218
- "data": b64
207
+ try:
208
+ #1 Google
209
+ if _provider == "google":
210
+ try:
211
+ input_contents = []
212
+
213
+ # Add text part first
214
+ text_part = {"text": user}
215
+ input_contents.append(text_part)
216
+
217
+ # Add image parts
218
+ for b64 in images_b64:
219
+ if b64:
220
+ image_part = {
221
+ "inline_data": {
222
+ "mime_type": "image/png",
223
+ "data": b64
224
+ }
219
225
  }
220
- }
221
- contents.append(image_part)
222
-
223
- # Correct Gemini API call
224
- response = _client.models.generate_content(
225
- model=_model,
226
- contents=contents
227
- )
228
- txt = response.text.strip()
229
- return txt.strip()
230
- except Exception as e:
231
- return f"Google Gemini error: {e}"
232
-
233
- # Openai
234
- elif _provider == "openai" and _model in GPT_MODELS_LATEST:
235
- # Use the Responses API with multimodal input (text + up to 4 images)
236
- try:
237
- parts = [{"type": "input_text", "text": user}]
238
- for b64 in (images_b64 or [])[:4]:
239
- if b64:
240
- parts.append({"type": "input_image", "image_url": f"data:image/png;base64,{b64}"})
241
-
242
- args = set_args(
243
- model=_model,
244
- instructions=_SYSTEM_VISION,
245
- input=[{"role": "user", "content": parts}],
246
- previous_id=None,
247
- store=False,
248
- reasoning_effort="minimal",
249
- verbosity="low",
250
- )
251
- resp = _client.responses.create(**args)
252
- txt = _out(resp) or ""
253
- if txt.strip():
254
- return txt.strip()
255
- except Exception:
256
- pass # Fall through to the chat.completions fallback implemented below.
226
+ input_contents.append(image_part)
257
227
 
258
- # Anthropic
259
- elif _provider == "anthropic":
260
- try:
261
- parts = [{"type":"text","text": user}]
262
- for b64 in images_b64[:4]:
263
- if b64:
264
- parts.append({"type":"image_url","image_url":{"url": f"data:image/png;base64,{b64}"}})
265
-
266
- response = _client.messages.create(
267
- model=_model,
268
- max_tokens=1024,
269
- system=_SYSTEM_VISION,
270
- messages=[{"role": "user", "content":parts}],
271
- stream=False,
272
- )
273
- return response.content[0].text.strip()
274
- except Exception:
275
- pass # Fall through to the chat.completions fallback implemented below.
276
-
277
- # OpenAI SDK
278
- else: # provider in {"openai","xai","deepseek","moonshotai","alibaba"}:
279
- try:
280
- parts = [{"type":"text","text": user}]
281
- for b64 in images_b64[:4]:
282
- if b64:
283
- parts.append({"type":"image_url","image_url":{"url": f"data:image/png;base64,{b64}"}})
284
- resp = _client.chat.completions.create(
285
- model=_model,
286
- temperature=0.3,
287
- messages=[
288
- {"role":"system","content":_SYSTEM_VISION},
289
- {"role":"user","content":parts},
290
- ],
291
- max_tokens=600,
292
- )
293
- return (resp.choices[0].message.content or "").strip()
294
- except Exception:
295
- pass # Fall through to the chat.completions fallback implemented below.
228
+ response = _client.models.generate_content(
229
+ model=_model,
230
+ contents=input_contents,
231
+ config=types.GenerateContentConfig(
232
+ system_instruction=_SYSTEM_VISION,
233
+ temperature=0.7,
234
+ max_output_tokens=1024,
235
+ ),
236
+ )
237
+ txt = response.text.strip()
238
+ return txt.strip()
239
+ except Exception:
240
+ pass
241
+
242
+ #2 Openai
243
+ elif _provider == "openai" and _model in GPT_MODELS_LATEST:
244
+ try:
245
+ input_contents = []
246
+
247
+ text_part = {"type": "input_text", "text": user}
248
+ input_contents.append(text_part)
249
+ for b64 in images_b64:
250
+ if b64:
251
+ image_part = {
252
+ "type": "input_image",
253
+ "image_url": f"data:image/png;base64,{b64}"
254
+ }
255
+ input_contents.append(image_part)
256
+
257
+ args = set_args(
258
+ model=_model,
259
+ instructions=_SYSTEM_VISION,
260
+ input=[{"role": "user", "content": input_contents}],
261
+ previous_id=None,
262
+ store=False,
263
+ reasoning_effort="low",
264
+ verbosity="medium",
265
+ )
266
+ resp = _client.responses.create(**args)
267
+ txt = _out(resp) or ""
268
+ if txt.strip():
269
+ return txt.strip()
270
+ except Exception:
271
+ pass
272
+
273
+ # Anthropic
274
+ elif _provider == "anthropic":
275
+ try:
276
+ input_contents = []
277
+
278
+ text_part = {"type":"text","text": user}
279
+ input_contents.append(text_part)
296
280
 
297
- # Text-only fallback via Responses API
298
- return "Insufficient context to comment usefully."
281
+ for b64 in images_b64:
282
+ if b64:
283
+ image_part = {
284
+ "type":"image_url",
285
+ "image_url":{"url": f"data:image/png;base64,{b64}"}
286
+ }
287
+ input_contents.append(image_part)
288
+
289
+ response = _client.messages.create(
290
+ model=_model,
291
+ max_tokens=1024,
292
+ system=_SYSTEM_VISION,
293
+ messages=[{"role": "user", "content":input_contents}],
294
+ stream=False,
295
+ )
296
+ return response.content[0].text.strip()
297
+ except Exception:
298
+ pass
299
+
300
+ # OpenAI SDK
301
+ else:
302
+ try:
303
+ input_contents = [{"type":"text","text": user}]
304
+ for b64 in images_b64:
305
+ if b64:
306
+ input_contents.append({"type":"image_url","image_url":{"url": f"data:image/png;base64,{b64}"}})
307
+ resp = _client.chat.completions.create(
308
+ model=_model,
309
+ temperature=1,
310
+ messages=[
311
+ {"role":"system","content":_SYSTEM_VISION},
312
+ {"role":"user","content":input_contents},
313
+ ],
314
+ max_tokens=1024,
315
+ )
316
+ return (resp.choices[0].message.content or "").strip()
317
+ except Exception:
318
+ pass
319
+ except Exception:
320
+ return "Insufficient context to comment usefully."
299
321
 
300
322
  def wrap_html(card_text: str) -> str:
301
323
  return f"""
302
324
  <div class="smx-commentary-card" style="margin-top:1rem;padding:1rem;border:1px solid #e5e7eb;border-radius:0.75rem;background:#fafafa">
303
- <div style="font-weight:600;margin-bottom:0.5rem;">Commentary</div>
325
+ <div style="font-weight:600;margin-bottom:0.5rem;">smxAI Feedback</div>
304
326
  <div class="prose" style="white-space:pre-wrap;line-height:1.45">{card_text}</div>
305
327
  </div>
306
328
  """.strip()