syntaxmatrix 2.5.5.5__py3-none-any.whl → 2.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- syntaxmatrix/__init__.py +3 -2
- syntaxmatrix/agentic/agents.py +1220 -169
- syntaxmatrix/agentic/agents_orchestrer.py +326 -0
- syntaxmatrix/agentic/code_tools_registry.py +27 -32
- syntaxmatrix/auth.py +142 -5
- syntaxmatrix/commentary.py +16 -16
- syntaxmatrix/core.py +192 -84
- syntaxmatrix/db.py +460 -4
- syntaxmatrix/{display.py → display_html.py} +2 -6
- syntaxmatrix/gpt_models_latest.py +1 -1
- syntaxmatrix/media/__init__.py +0 -0
- syntaxmatrix/media/media_pixabay.py +277 -0
- syntaxmatrix/models.py +1 -1
- syntaxmatrix/page_builder_defaults.py +183 -0
- syntaxmatrix/page_builder_generation.py +1122 -0
- syntaxmatrix/page_layout_contract.py +644 -0
- syntaxmatrix/page_patch_publish.py +1471 -0
- syntaxmatrix/preface.py +670 -0
- syntaxmatrix/profiles.py +28 -10
- syntaxmatrix/routes.py +1941 -593
- syntaxmatrix/selftest_page_templates.py +360 -0
- syntaxmatrix/settings/client_items.py +28 -0
- syntaxmatrix/settings/model_map.py +1022 -207
- syntaxmatrix/settings/prompts.py +328 -130
- syntaxmatrix/static/assets/hero-default.svg +22 -0
- syntaxmatrix/static/icons/bot-icon.png +0 -0
- syntaxmatrix/static/icons/favicon.png +0 -0
- syntaxmatrix/static/icons/logo.png +0 -0
- syntaxmatrix/static/icons/logo3.png +0 -0
- syntaxmatrix/templates/admin_branding.html +104 -0
- syntaxmatrix/templates/admin_features.html +63 -0
- syntaxmatrix/templates/admin_secretes.html +108 -0
- syntaxmatrix/templates/change_password.html +124 -0
- syntaxmatrix/templates/dashboard.html +296 -131
- syntaxmatrix/templates/dataset_resize.html +535 -0
- syntaxmatrix/templates/edit_page.html +2535 -0
- syntaxmatrix/utils.py +2728 -2835
- {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/METADATA +6 -2
- {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/RECORD +42 -25
- syntaxmatrix/generate_page.py +0 -634
- syntaxmatrix/static/icons/hero_bg.jpg +0 -0
- {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/WHEEL +0 -0
- {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/licenses/LICENSE.txt +0 -0
- {syntaxmatrix-2.5.5.5.dist-info → syntaxmatrix-2.6.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1122 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import io
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
import requests
|
|
10
|
+
from PIL import Image
|
|
11
|
+
from bs4 import BeautifulSoup
|
|
12
|
+
|
|
13
|
+
PIXABAY_API_URL = "https://pixabay.com/api/"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ─────────────────────────────────────────────────────────
|
|
17
|
+
# Icons (inline SVG)
|
|
18
|
+
# ─────────────────────────────────────────────────────────
|
|
19
|
+
_ICON_SVGS: Dict[str, str] = {
|
|
20
|
+
"spark": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
21
|
+
'<path d="M12 2l1.2 6.2L20 12l-6.8 3.8L12 22l-1.2-6.2L4 12l6.8-3.8L12 2z"/></svg>',
|
|
22
|
+
"shield": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
23
|
+
'<path d="M12 2l7 4v6c0 5-3.5 9-7 10-3.5-1-7-5-7-10V6l7-4z"/></svg>',
|
|
24
|
+
"stack": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
25
|
+
'<path d="M12 2l9 5-9 5-9-5 9-5z"/><path d="M3 12l9 5 9-5"/><path d="M3 17l9 5 9-5"/></svg>',
|
|
26
|
+
"chart": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
27
|
+
'<path d="M3 3v18h18"/><path d="M7 14v4"/><path d="M12 10v8"/><path d="M17 6v12"/></svg>',
|
|
28
|
+
"rocket": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
29
|
+
'<path d="M5 13l4 6 6-4c6-4 5-12 5-12S13 2 9 8l-4 5z"/><path d="M9 8l7 7"/>'
|
|
30
|
+
'<path d="M5 13l-2 2"/><path d="M11 19l-2 2"/></svg>',
|
|
31
|
+
"plug": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
32
|
+
'<path d="M9 2v6"/><path d="M15 2v6"/><path d="M7 8h10"/>'
|
|
33
|
+
'<path d="M12 8v7a4 4 0 0 1-4 4H7"/><path d="M12 8v7a4 4 0 0 0 4 4h1"/></svg>',
|
|
34
|
+
"arrow": '<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">'
|
|
35
|
+
'<path d="M5 12h12"/><path d="M13 6l6 6-6 6"/></svg>',
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _slug_title(slug: str) -> str:
|
|
40
|
+
s = (slug or "").strip().replace("_", " ").replace("-", " ")
|
|
41
|
+
s = re.sub(r"\s+", " ", s).strip()
|
|
42
|
+
return (s[:1].upper() + s[1:]) if s else "New page"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _short_site_desc(desc: str, limit: int = 220) -> str:
|
|
46
|
+
d = (desc or "").strip()
|
|
47
|
+
if not d:
|
|
48
|
+
return ""
|
|
49
|
+
d = re.sub(r"\s+", " ", d).strip()
|
|
50
|
+
if len(d) <= limit:
|
|
51
|
+
return d
|
|
52
|
+
cut = d[:limit]
|
|
53
|
+
# cut at end of sentence if possible
|
|
54
|
+
m = re.search(r"[.!?]\s", cut)
|
|
55
|
+
if m:
|
|
56
|
+
return cut[:m.end()].strip()
|
|
57
|
+
return cut.rstrip() + "…"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _page_kind(slug: str) -> str:
|
|
61
|
+
s = (slug or "").lower()
|
|
62
|
+
if any(k in s for k in ["service", "services", "solutions", "what-we-do"]):
|
|
63
|
+
return "services"
|
|
64
|
+
if "about" in s or "company" in s:
|
|
65
|
+
return "about"
|
|
66
|
+
if "pricing" in s or "plans" in s:
|
|
67
|
+
return "pricing"
|
|
68
|
+
if "contact" in s or "get-in-touch" in s:
|
|
69
|
+
return "contact"
|
|
70
|
+
if "docs" in s or "documentation" in s:
|
|
71
|
+
return "docs"
|
|
72
|
+
return "generic"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def build_layout_for_page(page_slug: str, website_description: str) -> Dict[str, Any]:
|
|
76
|
+
"""
|
|
77
|
+
Returns builder layout JSON with non-placeholder copy that fits the page title.
|
|
78
|
+
Includes optional `imgQuery` fields which we can use to fetch Pixabay images.
|
|
79
|
+
"""
|
|
80
|
+
slug = (page_slug or "page").strip().lower()
|
|
81
|
+
title = _slug_title(slug)
|
|
82
|
+
kind = _page_kind(slug)
|
|
83
|
+
site_blurb = _short_site_desc(website_description)
|
|
84
|
+
|
|
85
|
+
def sec(_id: str, _type: str, title_: str, text: str, cols: int, items: List[Dict[str, Any]]):
|
|
86
|
+
return {"id": _id, "type": _type, "title": title_, "text": text, "cols": cols, "items": items or []}
|
|
87
|
+
|
|
88
|
+
def item(_id: str, _type: str, title_: str, text: str, icon: str = "", img_query: str = ""):
|
|
89
|
+
out = {"id": _id, "type": _type, "title": title_, "text": text, "imageUrl": ""}
|
|
90
|
+
if icon:
|
|
91
|
+
out["icon"] = icon
|
|
92
|
+
if img_query:
|
|
93
|
+
out["imgQuery"] = img_query
|
|
94
|
+
return out
|
|
95
|
+
|
|
96
|
+
# Some SyntaxMatrix-aware phrasing (works fine for other clients too)
|
|
97
|
+
if kind == "services":
|
|
98
|
+
hero_text = (
|
|
99
|
+
"Practical AI engineering services: retrieval systems, data workflows, and deployable web UI components."
|
|
100
|
+
if not site_blurb else site_blurb
|
|
101
|
+
)
|
|
102
|
+
return {
|
|
103
|
+
"page": slug,
|
|
104
|
+
"sections": [
|
|
105
|
+
sec(
|
|
106
|
+
"sec_hero",
|
|
107
|
+
"hero",
|
|
108
|
+
title,
|
|
109
|
+
hero_text,
|
|
110
|
+
1,
|
|
111
|
+
[
|
|
112
|
+
item(
|
|
113
|
+
"item_hero_img",
|
|
114
|
+
"card",
|
|
115
|
+
"Build faster with confidence",
|
|
116
|
+
"From strategy to deployment, we ship production-grade features with clean, maintainable code.",
|
|
117
|
+
"rocket",
|
|
118
|
+
"ai dashboard software team",
|
|
119
|
+
),
|
|
120
|
+
],
|
|
121
|
+
),
|
|
122
|
+
sec(
|
|
123
|
+
"sec_services",
|
|
124
|
+
"features",
|
|
125
|
+
"What we can deliver",
|
|
126
|
+
"Core capabilities tailored to your organisation and your users.",
|
|
127
|
+
3,
|
|
128
|
+
[
|
|
129
|
+
item("svc_1", "card", "RAG systems & search", "Chunking, embeddings, vector stores, and evaluation.", "stack", "vector database ai search"),
|
|
130
|
+
item("svc_2", "card", "AI assistants in your app", "Streaming chat, tools, history, and guardrails.", "spark", "chatbot interface ai assistant"),
|
|
131
|
+
item("svc_3", "card", "Admin panel & content ops", "Page management, media handling, audit trails.", "shield", "admin dashboard web app"),
|
|
132
|
+
item("svc_4", "card", "ML lab & analytics", "EDA, modelling, visualisations, downloadable results.", "chart", "data analytics dashboard charts"),
|
|
133
|
+
item("svc_5", "card", "Integrations", "SQL databases, storage buckets, PDFs, CSV, APIs.", "plug", "software integration api"),
|
|
134
|
+
item("svc_6", "card", "Deployment support", "Docker, Gunicorn, GCP Cloud Run patterns.", "rocket", "cloud deployment devops"),
|
|
135
|
+
],
|
|
136
|
+
),
|
|
137
|
+
sec(
|
|
138
|
+
"sec_process",
|
|
139
|
+
"features",
|
|
140
|
+
"How we work",
|
|
141
|
+
"A simple process that keeps delivery predictable.",
|
|
142
|
+
3,
|
|
143
|
+
[
|
|
144
|
+
item("step_1", "card", "Scope", "Clarify outcomes, constraints, and success checks.", "spark", ""),
|
|
145
|
+
item("step_2", "card", "Build", "Implement in small milestones with review points.", "stack", ""),
|
|
146
|
+
item("step_3", "card", "Ship", "Deploy and document so your team can operate it.", "rocket", ""),
|
|
147
|
+
],
|
|
148
|
+
),
|
|
149
|
+
sec(
|
|
150
|
+
"sec_gallery",
|
|
151
|
+
"gallery",
|
|
152
|
+
"In action",
|
|
153
|
+
"A few visuals that match the theme of this page.",
|
|
154
|
+
3,
|
|
155
|
+
[
|
|
156
|
+
item("gal_1", "card", "Product UI", "Example UI visual.", "", "modern web app interface"),
|
|
157
|
+
item("gal_2", "card", "Data work", "Example analytics visual.", "", "data visualisation charts"),
|
|
158
|
+
item("gal_3", "card", "Team", "Example team visual.", "", "software team working"),
|
|
159
|
+
],
|
|
160
|
+
),
|
|
161
|
+
sec(
|
|
162
|
+
"sec_faq",
|
|
163
|
+
"faq",
|
|
164
|
+
"FAQ",
|
|
165
|
+
"Common questions we get before starting.",
|
|
166
|
+
2,
|
|
167
|
+
[
|
|
168
|
+
item("faq_1", "faq", "Do you work with existing systems?", "Yes. We can integrate with your current stack and data sources.", "", ""),
|
|
169
|
+
item("faq_2", "faq", "Can we start small?", "Yes. We can begin with one page/module and scale from there.", "", ""),
|
|
170
|
+
],
|
|
171
|
+
),
|
|
172
|
+
sec(
|
|
173
|
+
"sec_cta",
|
|
174
|
+
"cta",
|
|
175
|
+
"Ready to start?",
|
|
176
|
+
"Tell us what you want this page or feature to achieve, and we'll propose the quickest path.",
|
|
177
|
+
2,
|
|
178
|
+
[
|
|
179
|
+
item("cta_1", "card", "Book a demo", "See a working flow end-to-end.", "arrow", ""),
|
|
180
|
+
item("cta_2", "card", "Contact us", "Share requirements and timelines.", "arrow", ""),
|
|
181
|
+
],
|
|
182
|
+
),
|
|
183
|
+
],
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
# Generic page (still modern copy)
|
|
187
|
+
hero_text = site_blurb or "A modern page generated from your website description and the page title."
|
|
188
|
+
return {
|
|
189
|
+
"page": slug,
|
|
190
|
+
"sections": [
|
|
191
|
+
sec("sec_hero", "hero", title, hero_text, 1, [
|
|
192
|
+
item("item_hero_img", "card", "A clear headline that matches the page", "Add a short, action-focused summary here.", "spark", f"{title} hero background"),
|
|
193
|
+
]),
|
|
194
|
+
sec("sec_features", "features", "Highlights", "Three to six key points for this page topic.", 3, [
|
|
195
|
+
item("f1", "card", "Clear value", "Explain the benefit in one sentence.", "spark", f"{title} concept"),
|
|
196
|
+
item("f2", "card", "Trust signals", "Show proof, experience, or credibility.", "shield", f"{title} professional"),
|
|
197
|
+
item("f3", "card", "Next step", "Give people a simple action to take.", "arrow", f"{title} call to action"),
|
|
198
|
+
]),
|
|
199
|
+
sec("sec_gallery", "gallery", "Gallery", "Relevant imagery for the topic.", 3, [
|
|
200
|
+
item("g1", "card", "Image", "Drop or auto-fetch an image.", "", f"{title} abstract"),
|
|
201
|
+
item("g2", "card", "Image", "Drop or auto-fetch an image.", "", f"{title} modern"),
|
|
202
|
+
item("g3", "card", "Image", "Drop or auto-fetch an image.", "", f"{title} business"),
|
|
203
|
+
]),
|
|
204
|
+
sec("sec_cta", "cta", "Continue", "A short call-to-action to guide the user.", 2, [
|
|
205
|
+
item("c1", "card", "Get started", "Tell people what to do next.", "arrow", ""),
|
|
206
|
+
item("c2", "card", "Learn more", "Point to documentation or contact.", "arrow", ""),
|
|
207
|
+
]),
|
|
208
|
+
],
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
# ─────────────────────────────────────────────────────────
|
|
213
|
+
# Pixabay: search + download once + resize
|
|
214
|
+
# ─────────────────────────────────────────────────────────
|
|
215
|
+
def _is_pixabay_url(url: str) -> bool:
|
|
216
|
+
u = (url or "").strip().lower()
|
|
217
|
+
return u.startswith("https://") and ("pixabay.com" in u)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def _fetch_bytes(url: str, timeout: int = 20) -> bytes:
|
|
221
|
+
if not _is_pixabay_url(url):
|
|
222
|
+
raise ValueError("Only Pixabay URLs are allowed")
|
|
223
|
+
r = requests.get(url, stream=True, timeout=timeout)
|
|
224
|
+
r.raise_for_status()
|
|
225
|
+
return r.content
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _save_image_bytes(img_bytes: bytes, out_path_no_ext: str, max_width: int = 1920) -> Tuple[str, int, int]:
|
|
229
|
+
img = Image.open(io.BytesIO(img_bytes))
|
|
230
|
+
img.load()
|
|
231
|
+
|
|
232
|
+
if img.width > int(max_width or 1920):
|
|
233
|
+
ratio = (int(max_width) / float(img.width))
|
|
234
|
+
new_h = max(1, int(round(img.height * ratio)))
|
|
235
|
+
img = img.resize((int(max_width), new_h), Image.LANCZOS)
|
|
236
|
+
|
|
237
|
+
has_alpha = ("A" in img.getbands())
|
|
238
|
+
ext = ".png" if has_alpha else ".jpg"
|
|
239
|
+
out_path = out_path_no_ext + ext
|
|
240
|
+
os.makedirs(os.path.dirname(out_path), exist_ok=True)
|
|
241
|
+
|
|
242
|
+
if ext == ".jpg":
|
|
243
|
+
rgb = img.convert("RGB") if img.mode != "RGB" else img
|
|
244
|
+
rgb.save(out_path, "JPEG", quality=85, optimise=True, progressive=True)
|
|
245
|
+
else:
|
|
246
|
+
img.save(out_path, "PNG", optimise=True)
|
|
247
|
+
|
|
248
|
+
return out_path, int(img.width), int(img.height)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def _pixabay_search(api_key: str, query: str, *, per_page: int = 12, timeout: int = 15) -> List[Dict[str, Any]]:
|
|
252
|
+
if not api_key:
|
|
253
|
+
return []
|
|
254
|
+
q = (query or "").strip()
|
|
255
|
+
q = re.sub(r"\s+", " ", q)[:100]
|
|
256
|
+
if not q:
|
|
257
|
+
return []
|
|
258
|
+
|
|
259
|
+
params = {
|
|
260
|
+
"key": api_key,
|
|
261
|
+
"q": q,
|
|
262
|
+
"image_type": "photo",
|
|
263
|
+
"orientation": "horizontal",
|
|
264
|
+
"safesearch": "true",
|
|
265
|
+
"editors_choice": "true",
|
|
266
|
+
"order": "popular",
|
|
267
|
+
"per_page": max(3, min(200, int(per_page or 12))),
|
|
268
|
+
"page": 1,
|
|
269
|
+
}
|
|
270
|
+
r = requests.get(PIXABAY_API_URL, params=params, timeout=timeout)
|
|
271
|
+
r.raise_for_status()
|
|
272
|
+
data = r.json() or {}
|
|
273
|
+
return data.get("hits") or []
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def fill_layout_images_from_pixabay(
|
|
277
|
+
layout: Dict[str, Any],
|
|
278
|
+
*,
|
|
279
|
+
api_key: str,
|
|
280
|
+
client_dir: str,
|
|
281
|
+
max_width: int = 1920,
|
|
282
|
+
max_downloads: int = 8,
|
|
283
|
+
) -> Dict[str, Any]:
|
|
284
|
+
"""
|
|
285
|
+
Mutates/returns layout: fills `imageUrl` fields by downloading images into:
|
|
286
|
+
uploads/media/images/imported/
|
|
287
|
+
Uses `imgQuery` if present.
|
|
288
|
+
"""
|
|
289
|
+
if not api_key or not layout:
|
|
290
|
+
return layout
|
|
291
|
+
|
|
292
|
+
imported_dir = os.path.join(client_dir, "uploads", "media", "images", "imported")
|
|
293
|
+
os.makedirs(imported_dir, exist_ok=True)
|
|
294
|
+
|
|
295
|
+
used_ids = set()
|
|
296
|
+
downloads = 0
|
|
297
|
+
|
|
298
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
299
|
+
for s in sections:
|
|
300
|
+
if downloads >= max_downloads:
|
|
301
|
+
break
|
|
302
|
+
items = s.get("items") if isinstance(s.get("items"), list) else []
|
|
303
|
+
for it in items:
|
|
304
|
+
if downloads >= max_downloads:
|
|
305
|
+
break
|
|
306
|
+
if (it.get("imageUrl") or "").strip():
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
q = (it.get("imgQuery") or "").strip()
|
|
310
|
+
if not q:
|
|
311
|
+
continue
|
|
312
|
+
|
|
313
|
+
hits = _pixabay_search(api_key, q)
|
|
314
|
+
if not hits:
|
|
315
|
+
continue
|
|
316
|
+
|
|
317
|
+
# choose first unused hit
|
|
318
|
+
chosen = None
|
|
319
|
+
for h in hits:
|
|
320
|
+
pid = int(h.get("id") or 0)
|
|
321
|
+
if pid and pid not in used_ids:
|
|
322
|
+
chosen = h
|
|
323
|
+
break
|
|
324
|
+
if not chosen:
|
|
325
|
+
continue
|
|
326
|
+
|
|
327
|
+
pid = int(chosen.get("id") or 0)
|
|
328
|
+
used_ids.add(pid)
|
|
329
|
+
|
|
330
|
+
web_u = str(chosen.get("webformatURL") or "").strip()
|
|
331
|
+
large_u = str(chosen.get("largeImageURL") or "").strip()
|
|
332
|
+
|
|
333
|
+
if not web_u:
|
|
334
|
+
continue
|
|
335
|
+
|
|
336
|
+
base = os.path.join(imported_dir, f"pixabay-{pid}")
|
|
337
|
+
# download-once
|
|
338
|
+
existing = None
|
|
339
|
+
for ext in (".jpg", ".png"):
|
|
340
|
+
p = base + ext
|
|
341
|
+
if os.path.exists(p):
|
|
342
|
+
existing = p
|
|
343
|
+
break
|
|
344
|
+
|
|
345
|
+
if existing:
|
|
346
|
+
rel = os.path.relpath(existing, os.path.join(client_dir, "uploads", "media")).replace("\\", "/")
|
|
347
|
+
it["imageUrl"] = f"/uploads/media/{rel}"
|
|
348
|
+
continue
|
|
349
|
+
|
|
350
|
+
# fetch webformat first; if it’s small and large exists, use the larger one
|
|
351
|
+
try:
|
|
352
|
+
b1 = _fetch_bytes(web_u)
|
|
353
|
+
img1 = Image.open(io.BytesIO(b1))
|
|
354
|
+
img1.load()
|
|
355
|
+
chosen_bytes = b1
|
|
356
|
+
|
|
357
|
+
if large_u:
|
|
358
|
+
try:
|
|
359
|
+
b2 = _fetch_bytes(large_u)
|
|
360
|
+
img2 = Image.open(io.BytesIO(b2))
|
|
361
|
+
img2.load()
|
|
362
|
+
if img2.width > img1.width:
|
|
363
|
+
chosen_bytes = b2
|
|
364
|
+
except Exception:
|
|
365
|
+
pass
|
|
366
|
+
|
|
367
|
+
saved_path, _, _ = _save_image_bytes(chosen_bytes, base, max_width=max_width)
|
|
368
|
+
rel = os.path.relpath(saved_path, os.path.join(client_dir, "uploads", "media")).replace("\\", "/")
|
|
369
|
+
it["imageUrl"] = f"/uploads/media/{rel}"
|
|
370
|
+
downloads += 1
|
|
371
|
+
except Exception:
|
|
372
|
+
continue
|
|
373
|
+
|
|
374
|
+
return layout
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
import re
|
|
378
|
+
from typing import Dict, Any, Optional
|
|
379
|
+
|
|
380
|
+
def _extract_hero_image_url_from_layout(layout: Dict[str, Any]) -> str:
|
|
381
|
+
"""Find hero image URL from the saved layout JSON (builder)."""
|
|
382
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
383
|
+
for s in sections:
|
|
384
|
+
if not isinstance(s, dict):
|
|
385
|
+
continue
|
|
386
|
+
if (s.get("type") or "").lower() != "hero":
|
|
387
|
+
continue
|
|
388
|
+
|
|
389
|
+
img_url = (s.get("imageUrl") or "").strip()
|
|
390
|
+
if img_url:
|
|
391
|
+
return img_url
|
|
392
|
+
|
|
393
|
+
items = s.get("items") if isinstance(s.get("items"), list) else []
|
|
394
|
+
if items and isinstance(items[0], dict):
|
|
395
|
+
img_url = (items[0].get("imageUrl") or "").strip()
|
|
396
|
+
if img_url:
|
|
397
|
+
return img_url
|
|
398
|
+
|
|
399
|
+
return ""
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def patch_first_background_image(html: str, new_url: str) -> str:
|
|
403
|
+
"""
|
|
404
|
+
Patch ONLY the first background-image/background url(...) found in the existing HTML.
|
|
405
|
+
This avoids regenerating HTML/CSS (which is what is changing your fonts/colours).
|
|
406
|
+
"""
|
|
407
|
+
if not html or not new_url:
|
|
408
|
+
return html
|
|
409
|
+
|
|
410
|
+
# 1) background-image: url(...)
|
|
411
|
+
pat1 = re.compile(r'background-image\s*:\s*url\((["\']?)[^)]*\1\)', re.IGNORECASE)
|
|
412
|
+
out, n = pat1.subn(f'background-image:url("{new_url}")', html, count=1)
|
|
413
|
+
if n:
|
|
414
|
+
return out
|
|
415
|
+
|
|
416
|
+
# 2) background: ... url(...)
|
|
417
|
+
pat2 = re.compile(r'(background\s*:\s*[^;]*url\((["\']?))([^)]+)(\2\))', re.IGNORECASE)
|
|
418
|
+
def _repl(m: re.Match) -> str:
|
|
419
|
+
return m.group(1) + new_url + m.group(4)
|
|
420
|
+
|
|
421
|
+
out, n = pat2.subn(_repl, html, count=1)
|
|
422
|
+
if n:
|
|
423
|
+
return out
|
|
424
|
+
|
|
425
|
+
# 3) If nothing matched, inject a tiny override (best-effort)
|
|
426
|
+
inject = (
|
|
427
|
+
f'<style id="smx-hero-bg-override">'
|
|
428
|
+
f'.hero-bg{{background-image:url("{new_url}") !important;}}'
|
|
429
|
+
f'.hero{{background-image:url("{new_url}") !important;}}'
|
|
430
|
+
f'</style>'
|
|
431
|
+
)
|
|
432
|
+
if "</head>" in html:
|
|
433
|
+
return html.replace("</head>", inject + "</head>", 1)
|
|
434
|
+
return inject + html
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def _set_text(node, new_text: str) -> bool:
|
|
438
|
+
if not node:
|
|
439
|
+
return False
|
|
440
|
+
new_text = (new_text or "").strip()
|
|
441
|
+
if not new_text:
|
|
442
|
+
return False
|
|
443
|
+
node.clear()
|
|
444
|
+
node.append(new_text)
|
|
445
|
+
return True
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def _html_escape(s: str) -> str:
|
|
449
|
+
s = s or ""
|
|
450
|
+
return (
|
|
451
|
+
s.replace("&", "&")
|
|
452
|
+
.replace("<", "<")
|
|
453
|
+
.replace(">", ">")
|
|
454
|
+
.replace('"', """)
|
|
455
|
+
.replace("'", "'")
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
def patch_section_titles_and_intros(existing_html: str, layout: Dict[str, Any]) -> str:
|
|
460
|
+
"""
|
|
461
|
+
Patch ONLY section <h2> titles + the first <p> intro under each <h2>,
|
|
462
|
+
across the whole page, matching sections BY ORDER.
|
|
463
|
+
Does NOT regenerate HTML/CSS.
|
|
464
|
+
"""
|
|
465
|
+
if not existing_html or not isinstance(layout, dict):
|
|
466
|
+
return existing_html
|
|
467
|
+
|
|
468
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
469
|
+
layout_non_hero = [
|
|
470
|
+
s for s in sections
|
|
471
|
+
if isinstance(s, dict) and (s.get("type") or "").lower() != "hero"
|
|
472
|
+
]
|
|
473
|
+
if not layout_non_hero:
|
|
474
|
+
return existing_html
|
|
475
|
+
|
|
476
|
+
# Split into <section> blocks (non-greedy)
|
|
477
|
+
sec_pat = re.compile(r"(<section\b[^>]*>)(.*?)(</section>)", re.IGNORECASE | re.DOTALL)
|
|
478
|
+
blocks = list(sec_pat.finditer(existing_html))
|
|
479
|
+
if not blocks:
|
|
480
|
+
return existing_html
|
|
481
|
+
|
|
482
|
+
out_parts = []
|
|
483
|
+
last_end = 0
|
|
484
|
+
nonhero_index = 0
|
|
485
|
+
|
|
486
|
+
for m in blocks:
|
|
487
|
+
open_tag = m.group(1)
|
|
488
|
+
inner = m.group(2)
|
|
489
|
+
close_tag = m.group(3)
|
|
490
|
+
|
|
491
|
+
# Keep everything before this section unchanged
|
|
492
|
+
out_parts.append(existing_html[last_end:m.start()])
|
|
493
|
+
last_end = m.end()
|
|
494
|
+
|
|
495
|
+
# Skip hero sections (don’t count them against non-hero layout sections)
|
|
496
|
+
if "hero" in open_tag.lower():
|
|
497
|
+
out_parts.append(open_tag + inner + close_tag)
|
|
498
|
+
continue
|
|
499
|
+
|
|
500
|
+
if nonhero_index >= len(layout_non_hero):
|
|
501
|
+
out_parts.append(open_tag + inner + close_tag)
|
|
502
|
+
continue
|
|
503
|
+
|
|
504
|
+
s = layout_non_hero[nonhero_index]
|
|
505
|
+
nonhero_index += 1
|
|
506
|
+
|
|
507
|
+
new_title = (s.get("title") or "").strip()
|
|
508
|
+
new_text = (s.get("text") or "").strip()
|
|
509
|
+
|
|
510
|
+
patched_inner = inner
|
|
511
|
+
|
|
512
|
+
# 1) Patch first <h2> inside this section
|
|
513
|
+
if new_title:
|
|
514
|
+
h2_pat = re.compile(r"(<h2\b[^>]*>)(.*?)(</h2>)", re.IGNORECASE | re.DOTALL)
|
|
515
|
+
patched_inner, n_h2 = h2_pat.subn(
|
|
516
|
+
lambda mm: mm.group(1) + _html_escape(new_title) + mm.group(3),
|
|
517
|
+
patched_inner,
|
|
518
|
+
count=1
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
# 2) Patch the first <p> AFTER </h2> (section intro)
|
|
522
|
+
if new_text:
|
|
523
|
+
# Only look in the region after the first </h2>, so we don’t edit card paragraphs
|
|
524
|
+
split = re.split(r"(</h2>)", patched_inner, maxsplit=1, flags=re.IGNORECASE)
|
|
525
|
+
if len(split) == 3:
|
|
526
|
+
before_h2 = split[0] + split[1]
|
|
527
|
+
after_h2 = split[2]
|
|
528
|
+
|
|
529
|
+
p_pat = re.compile(r"(<p\b[^>]*>)(.*?)(</p>)", re.IGNORECASE | re.DOTALL)
|
|
530
|
+
after_h2, n_p = p_pat.subn(
|
|
531
|
+
lambda mm: mm.group(1) + _html_escape(new_text) + mm.group(3),
|
|
532
|
+
after_h2,
|
|
533
|
+
count=1
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
patched_inner = before_h2 + after_h2
|
|
537
|
+
|
|
538
|
+
out_parts.append(open_tag + patched_inner + close_tag)
|
|
539
|
+
|
|
540
|
+
# Append trailing HTML after the last section
|
|
541
|
+
out_parts.append(existing_html[last_end:])
|
|
542
|
+
return "".join(out_parts)
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
def patch_page_from_layout(existing_html: str, layout: Dict[str, Any]) -> str:
|
|
546
|
+
"""
|
|
547
|
+
Patch ONLY text/image values in the existing HTML using the builder layout JSON.
|
|
548
|
+
Does NOT regenerate HTML/CSS, so it won’t change fonts/palette/structure.
|
|
549
|
+
"""
|
|
550
|
+
if not existing_html or not isinstance(layout, dict):
|
|
551
|
+
return existing_html
|
|
552
|
+
|
|
553
|
+
soup = BeautifulSoup(existing_html, "html.parser")
|
|
554
|
+
|
|
555
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
556
|
+
|
|
557
|
+
# ---------- HERO ----------
|
|
558
|
+
hero_layout = None
|
|
559
|
+
for s in sections:
|
|
560
|
+
if isinstance(s, dict) and (s.get("type") or "").lower() == "hero":
|
|
561
|
+
hero_layout = s
|
|
562
|
+
break
|
|
563
|
+
|
|
564
|
+
if hero_layout:
|
|
565
|
+
hero_title = (hero_layout.get("title") or "").strip()
|
|
566
|
+
hero_text = (hero_layout.get("text") or "").strip()
|
|
567
|
+
hero_img = (hero_layout.get("imageUrl") or "").strip()
|
|
568
|
+
|
|
569
|
+
# Prefer patching hero background first (your existing logic)
|
|
570
|
+
if hero_img:
|
|
571
|
+
from syntaxmatrix.page_builder_generation import patch_first_background_image
|
|
572
|
+
existing_html = patch_first_background_image(str(soup), hero_img)
|
|
573
|
+
soup = BeautifulSoup(existing_html, "html.parser")
|
|
574
|
+
|
|
575
|
+
# Patch hero headline + hero paragraph (without changing structure)
|
|
576
|
+
h1 = soup.find("h1")
|
|
577
|
+
if hero_title and h1:
|
|
578
|
+
_set_text(h1, hero_title)
|
|
579
|
+
|
|
580
|
+
# Find the first <p> after the <h1> within the same container
|
|
581
|
+
if hero_text and h1:
|
|
582
|
+
p = None
|
|
583
|
+
for sib in h1.find_all_next(["p"], limit=6):
|
|
584
|
+
# ignore very short “kicker” lines
|
|
585
|
+
txt = sib.get_text(" ", strip=True)
|
|
586
|
+
if len(txt) >= 25:
|
|
587
|
+
p = sib
|
|
588
|
+
break
|
|
589
|
+
if p:
|
|
590
|
+
_set_text(p, hero_text)
|
|
591
|
+
|
|
592
|
+
# ---------- OTHER SECTIONS (titles + intro text only) ----------
|
|
593
|
+
layout_non_hero = [s for s in sections if isinstance(s, dict) and (s.get("type") or "").lower() != "hero"]
|
|
594
|
+
|
|
595
|
+
# Collect candidate section headings in the existing HTML
|
|
596
|
+
headings = []
|
|
597
|
+
for h in soup.find_all(["h2", "h3"]):
|
|
598
|
+
if h.find_parent(["header", "nav", "footer"]) is not None:
|
|
599
|
+
continue
|
|
600
|
+
if not h.get_text(strip=True):
|
|
601
|
+
continue
|
|
602
|
+
headings.append(h)
|
|
603
|
+
|
|
604
|
+
for i, s in enumerate(layout_non_hero):
|
|
605
|
+
if i >= len(headings):
|
|
606
|
+
break
|
|
607
|
+
|
|
608
|
+
title = (s.get("title") or "").strip()
|
|
609
|
+
text = (s.get("text") or "").strip()
|
|
610
|
+
|
|
611
|
+
h = headings[i]
|
|
612
|
+
if title:
|
|
613
|
+
_set_text(h, title)
|
|
614
|
+
|
|
615
|
+
if text:
|
|
616
|
+
# patch the first <p> after this heading (within the same section container)
|
|
617
|
+
p = None
|
|
618
|
+
for sib in h.find_all_next(["p"], limit=8):
|
|
619
|
+
if sib.find_parent(["header", "nav", "footer"]) is not None:
|
|
620
|
+
continue
|
|
621
|
+
# stop if we hit the next heading before finding a paragraph
|
|
622
|
+
if sib.find_previous(["h2", "h3"]) is not h:
|
|
623
|
+
break
|
|
624
|
+
p = sib
|
|
625
|
+
break
|
|
626
|
+
if p:
|
|
627
|
+
_set_text(p, text)
|
|
628
|
+
|
|
629
|
+
return str(soup)
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
def _css_safe_hex(c: str) -> str:
|
|
633
|
+
c = (c or "").strip()
|
|
634
|
+
m = re.fullmatch(r"#([0-9a-fA-F]{3}|[0-9a-fA-F]{6})", c)
|
|
635
|
+
if not m:
|
|
636
|
+
return ""
|
|
637
|
+
hx = m.group(0).lower()
|
|
638
|
+
if len(hx) == 4:
|
|
639
|
+
hx = "#" + "".join([ch * 2 for ch in hx[1:]])
|
|
640
|
+
return hx
|
|
641
|
+
|
|
642
|
+
|
|
643
|
+
def _hex_to_rgba(hx: str, a: float) -> str:
|
|
644
|
+
hx = _css_safe_hex(hx)
|
|
645
|
+
if not hx:
|
|
646
|
+
return ""
|
|
647
|
+
r = int(hx[1:3], 16)
|
|
648
|
+
g = int(hx[3:5], 16)
|
|
649
|
+
b = int(hx[5:7], 16)
|
|
650
|
+
a = float(a)
|
|
651
|
+
if a < 0:
|
|
652
|
+
a = 0.0
|
|
653
|
+
if a > 1:
|
|
654
|
+
a = 1.0
|
|
655
|
+
return f"rgba({r},{g},{b},{a:.3f})"
|
|
656
|
+
|
|
657
|
+
|
|
658
|
+
def _css_safe_font(ff: str) -> str:
|
|
659
|
+
ff = (ff or "").strip()
|
|
660
|
+
if not ff:
|
|
661
|
+
return ""
|
|
662
|
+
bad = ["{", "}", ";", "<", ">", "\n", "\r"]
|
|
663
|
+
if any(b in ff for b in bad):
|
|
664
|
+
return ""
|
|
665
|
+
return ff
|
|
666
|
+
|
|
667
|
+
|
|
668
|
+
def _theme_style_from_layout(layout: Dict[str, Any]) -> str:
|
|
669
|
+
theme = layout.get("theme") if isinstance(layout.get("theme"), dict) else {}
|
|
670
|
+
if not theme:
|
|
671
|
+
return ""
|
|
672
|
+
|
|
673
|
+
font_body = _css_safe_font(theme.get("fontBody") or theme.get("bodyFont") or theme.get("font_body") or "")
|
|
674
|
+
font_head = _css_safe_font(theme.get("fontHeading") or theme.get("headingFont") or theme.get("font_heading") or "")
|
|
675
|
+
|
|
676
|
+
accent = _css_safe_hex(theme.get("accent") or "")
|
|
677
|
+
fg = _css_safe_hex(theme.get("fg") or "")
|
|
678
|
+
mut = _css_safe_hex(theme.get("mut") or "")
|
|
679
|
+
bg = _css_safe_hex(theme.get("bg") or "")
|
|
680
|
+
|
|
681
|
+
if not any([font_body, font_head, accent, fg, mut, bg]):
|
|
682
|
+
return ""
|
|
683
|
+
|
|
684
|
+
lines = []
|
|
685
|
+
lines.append(".smxp{")
|
|
686
|
+
if fg:
|
|
687
|
+
lines.append(f" --fg:{fg};")
|
|
688
|
+
lines.append(" color:var(--fg);")
|
|
689
|
+
if mut:
|
|
690
|
+
lines.append(f" --mut:{mut};")
|
|
691
|
+
if bg:
|
|
692
|
+
lines.append(f" --bg:{bg};")
|
|
693
|
+
lines.append(" background:var(--bg);")
|
|
694
|
+
if font_body:
|
|
695
|
+
lines.append(f" font-family:{font_body};")
|
|
696
|
+
lines.append("}")
|
|
697
|
+
|
|
698
|
+
if font_head:
|
|
699
|
+
lines.append(f".smxp h1,.smxp h2,.smxp h3{{font-family:{font_head};}}")
|
|
700
|
+
|
|
701
|
+
if accent:
|
|
702
|
+
soft = _hex_to_rgba(accent, 0.12)
|
|
703
|
+
if soft:
|
|
704
|
+
lines.append(f".smxp .btn{{background:{soft};}}")
|
|
705
|
+
lines.append(f".smxp a{{color:{accent};}}")
|
|
706
|
+
|
|
707
|
+
css = "\n".join(lines)
|
|
708
|
+
return f'<style id="smx-theme" data-smx="theme">\\n{css}\\n</style>'
|
|
709
|
+
|
|
710
|
+
# ─────────────────────────────────────────────────────────
|
|
711
|
+
# Compile layout JSON → modern HTML with animations
|
|
712
|
+
# ─────────────────────────────────────────────────────────
|
|
713
|
+
def compile_layout_to_html(layout: Dict[str, Any], *, page_slug: str) -> str:
|
|
714
|
+
page_id = re.sub(r"[^a-z0-9\-]+", "-", (page_slug or "page").lower()).strip("-") or "page"
|
|
715
|
+
|
|
716
|
+
css = """
|
|
717
|
+
<style>
|
|
718
|
+
.smxp{--r:18px;--bd:rgba(148,163,184,.28);--mut:#94a3b8;--fg:#0f172a;--card:rgba(255,255,255,.72);
|
|
719
|
+
.smxp{
|
|
720
|
+
--r:18px;
|
|
721
|
+
--bd: rgba(148,163,184,.25);
|
|
722
|
+
--fg: #0f172a;
|
|
723
|
+
--mut: #475569; /* <- darker, readable */
|
|
724
|
+
--card: rgba(255,255,255,.78);
|
|
725
|
+
--bg: #f8fafc;
|
|
726
|
+
font-family: system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif;
|
|
727
|
+
background: var(--bg);
|
|
728
|
+
color: var(--fg);
|
|
729
|
+
overflow-x: clip;
|
|
730
|
+
}
|
|
731
|
+
@media (prefers-color-scheme: dark){
|
|
732
|
+
.smxp{
|
|
733
|
+
--fg:#e2e8f0;
|
|
734
|
+
--mut:#a7b3c6;
|
|
735
|
+
--card:rgba(2,6,23,.45);
|
|
736
|
+
--bg: radial-gradient(circle at 20% 10%, rgba(30,64,175,.25), rgba(2,6,23,.95) 55%);
|
|
737
|
+
--bd: rgba(148,163,184,.18);
|
|
738
|
+
}
|
|
739
|
+
}
|
|
740
|
+
|
|
741
|
+
font-family: system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif; color: var(--fg); }
|
|
742
|
+
@media (prefers-color-scheme: dark){
|
|
743
|
+
.smxp{--fg:#e2e8f0;--card:rgba(2,6,23,.45);--bd:rgba(148,163,184,.18);--mut:#a7b3c6;}
|
|
744
|
+
}
|
|
745
|
+
.smxp a{color:inherit}
|
|
746
|
+
.smxp .wrap{max-width:1120px;margin:0 auto;padding:0 18px}
|
|
747
|
+
.smxp .sec{padding:56px 0}
|
|
748
|
+
.smxp .kicker{color:var(--mut);font-size:.95rem;margin:0 0 8px}
|
|
749
|
+
.smxp h1{font-size:clamp(2rem,3.4vw,3.2rem);line-height:1.08;margin:0 0 12px}
|
|
750
|
+
.smxp h2{font-size:clamp(1.4rem,2.2vw,2rem);margin:0 0 10px}
|
|
751
|
+
.smxp p{margin:0;color:var(--mut);line-height:1.6}
|
|
752
|
+
.smxp .hero{padding:72px 0 46px}
|
|
753
|
+
.smxp .heroGrid{display:grid;grid-template-columns:1.15fr .85fr;gap:18px;align-items:center}
|
|
754
|
+
@media (max-width: 860px){.smxp .heroGrid{grid-template-columns:1fr}}
|
|
755
|
+
.smxp .heroCard{border:1px solid var(--bd);border-radius:var(--r);background:var(--card);padding:14px}
|
|
756
|
+
.smxp .btnRow{display:flex;gap:10px;flex-wrap:wrap;margin-top:18px}
|
|
757
|
+
.smxp .btn{display:inline-flex;gap:8px;align-items:center;border-radius:999px;padding:10px 14px;
|
|
758
|
+
border:1px solid var(--bd);text-decoration:none;background:rgba(99,102,241,.12)}
|
|
759
|
+
.smxp .btn:hover{transform:translateY(-1px)}
|
|
760
|
+
.smxp .grid{display:grid;gap:12px}
|
|
761
|
+
.smxp .card{border:1px solid var(--bd);border-radius:var(--r);background:var(--card);padding:14px;min-width:0}
|
|
762
|
+
.smxp .card h3{margin:10px 0 6px;font-size:1.05rem}
|
|
763
|
+
.smxp .icon{width:20px;height:20px;opacity:.9}
|
|
764
|
+
.smxp img{width:100%;height:auto;border-radius:calc(var(--r) - 6px);display:block}
|
|
765
|
+
.smxp .reveal{opacity:0;transform:translateY(14px);transition:opacity .55s ease, transform .55s ease}
|
|
766
|
+
.smxp .reveal.in{opacity:1;transform:none}
|
|
767
|
+
|
|
768
|
+
.smxp .hero{ padding:0; }
|
|
769
|
+
.smxp .hero-banner{
|
|
770
|
+
position:relative;
|
|
771
|
+
width:100%;
|
|
772
|
+
min-height:clamp(380px, 60vh, 680px);
|
|
773
|
+
display:flex;
|
|
774
|
+
align-items:flex-end;
|
|
775
|
+
overflow:hidden;
|
|
776
|
+
}
|
|
777
|
+
.smxp .hero-bg{
|
|
778
|
+
position:absolute; inset:0;
|
|
779
|
+
background-position:center;
|
|
780
|
+
background-size:cover;
|
|
781
|
+
background-repeat:no-repeat;
|
|
782
|
+
transform:scale(1.02);
|
|
783
|
+
filter:saturate(1.02);
|
|
784
|
+
}
|
|
785
|
+
.smxp .hero-overlay{
|
|
786
|
+
position:absolute; inset:0;
|
|
787
|
+
background:linear-gradient(90deg,
|
|
788
|
+
rgba(2,6,23,.62) 0%,
|
|
789
|
+
rgba(2,6,23,.40) 42%,
|
|
790
|
+
rgba(2,6,23,.14) 72%,
|
|
791
|
+
rgba(2,6,23,.02) 100%
|
|
792
|
+
);
|
|
793
|
+
}
|
|
794
|
+
@media (max-width: 860px){
|
|
795
|
+
.smxp .hero-overlay{
|
|
796
|
+
background:linear-gradient(180deg,
|
|
797
|
+
rgba(2,6,23,.16) 0%,
|
|
798
|
+
rgba(2,6,23,.55) 70%,
|
|
799
|
+
rgba(2,6,23,.70) 100%
|
|
800
|
+
);
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
.smxp .hero-content{ position:relative; width:100%; padding:72px 18px 48px; }
|
|
804
|
+
.smxp .hero-panel{
|
|
805
|
+
max-width:760px;
|
|
806
|
+
border:1px solid var(--bd);
|
|
807
|
+
background:rgba(255,255,255,.80);
|
|
808
|
+
border-radius:var(--r);
|
|
809
|
+
padding:18px;
|
|
810
|
+
backdrop-filter: blur(10px);
|
|
811
|
+
}
|
|
812
|
+
@media (prefers-color-scheme: dark){
|
|
813
|
+
.smxp .hero-panel{ background:rgba(2,6,23,.58); }
|
|
814
|
+
}
|
|
815
|
+
.smxp .lead{ margin-top:10px; font-size:1.05rem; line-height:1.65; }
|
|
816
|
+
|
|
817
|
+
</style>
|
|
818
|
+
""".strip()
|
|
819
|
+
|
|
820
|
+
js = f"""
|
|
821
|
+
<script>
|
|
822
|
+
(function(){{
|
|
823
|
+
const root = document.getElementById("smxp-{page_id}");
|
|
824
|
+
if(!root) return;
|
|
825
|
+
const els = root.querySelectorAll(".reveal");
|
|
826
|
+
const io = new IntersectionObserver((entries)=>{{
|
|
827
|
+
entries.forEach(e=>{{ if(e.isIntersecting) e.target.classList.add("in"); }});
|
|
828
|
+
}}, {{ threshold: 0.12 }});
|
|
829
|
+
els.forEach(el=>io.observe(el));
|
|
830
|
+
}})();
|
|
831
|
+
</script>
|
|
832
|
+
""".strip()
|
|
833
|
+
|
|
834
|
+
def esc(s: str) -> str:
|
|
835
|
+
s = s or ""
|
|
836
|
+
s = s.replace("&", "&").replace("<", "<").replace(">", ">")
|
|
837
|
+
s = s.replace('"', """).replace("'", "'")
|
|
838
|
+
return s
|
|
839
|
+
|
|
840
|
+
def icon_svg(name: str) -> str:
|
|
841
|
+
svg = _ICON_SVGS.get((name or "").strip().lower())
|
|
842
|
+
if not svg:
|
|
843
|
+
return ""
|
|
844
|
+
return f'<span class="icon">{svg}</span>'
|
|
845
|
+
|
|
846
|
+
parts: List[str] = [f'<div class="smxp" id="smxp-{page_id}">', css]
|
|
847
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
848
|
+
|
|
849
|
+
# Map first section id by type (used for default Hero CTA anchors)
|
|
850
|
+
sec_id_by_type: Dict[str, str] = {}
|
|
851
|
+
for _s in sections:
|
|
852
|
+
if not isinstance(_s, dict):
|
|
853
|
+
continue
|
|
854
|
+
_t = str(_s.get("type") or "").lower().strip()
|
|
855
|
+
if not _t or _t in sec_id_by_type:
|
|
856
|
+
continue
|
|
857
|
+
_sid = str(_s.get("id") or "").strip() or f"sec_{_t}"
|
|
858
|
+
sec_id_by_type[_t] = _sid
|
|
859
|
+
|
|
860
|
+
def safe_href(h: str) -> str:
|
|
861
|
+
h = (h or "").strip()
|
|
862
|
+
if not h:
|
|
863
|
+
return ""
|
|
864
|
+
low = h.lower()
|
|
865
|
+
if low.startswith("javascript:") or low.startswith("data:"):
|
|
866
|
+
return ""
|
|
867
|
+
return h
|
|
868
|
+
|
|
869
|
+
for s in sections:
|
|
870
|
+
stype = (s.get("type") or "section").lower()
|
|
871
|
+
title = esc(s.get("title") or "")
|
|
872
|
+
text = esc(s.get("text") or "")
|
|
873
|
+
items = s.get("items") if isinstance(s.get("items"), list) else []
|
|
874
|
+
sec_dom_id = (s.get("id") or "").strip()
|
|
875
|
+
if not sec_dom_id:
|
|
876
|
+
sec_dom_id = "sec_hero" if stype == "hero" else f"sec_{stype}"
|
|
877
|
+
|
|
878
|
+
#############################################################################################################
|
|
879
|
+
# Ensure every section has a stable DOM id so patch_page_publish can target it.
|
|
880
|
+
sec_dom_id = (s.get("id") or "").strip()
|
|
881
|
+
if not sec_dom_id:
|
|
882
|
+
sec_dom_id = "sec_hero" if stype == "hero" else f"sec_{stype}"
|
|
883
|
+
sec_id_attr = f' id="{esc(sec_dom_id)}"'
|
|
884
|
+
|
|
885
|
+
# HERO
|
|
886
|
+
if stype == "hero":
|
|
887
|
+
img_url = (s.get("imageUrl") or "").strip()
|
|
888
|
+
if not img_url and items and isinstance(items[0], dict):
|
|
889
|
+
img_url = (items[0].get("imageUrl") or "").strip()
|
|
890
|
+
|
|
891
|
+
bg_style = f' style="background-image:url(\'{esc(img_url)}\')"' if img_url else ""
|
|
892
|
+
|
|
893
|
+
# ---------------------------
|
|
894
|
+
# HERO CTA buttons (NO /admin links)
|
|
895
|
+
# ---------------------------
|
|
896
|
+
cta1_label = (s.get("heroCta1Label") or "").strip() or "Explore features"
|
|
897
|
+
cta2_label = (s.get("heroCta2Label") or "").strip() or "Talk to us"
|
|
898
|
+
|
|
899
|
+
# If the key exists and is blank => hide button
|
|
900
|
+
if "heroCta1Href" in s:
|
|
901
|
+
cta1_href_raw = str(s.get("heroCta1Href") or "")
|
|
902
|
+
else:
|
|
903
|
+
cta1_href_raw = "#" + sec_id_by_type.get("features", "sec_features")
|
|
904
|
+
|
|
905
|
+
if "heroCta2Href" in s:
|
|
906
|
+
cta2_href_raw = str(s.get("heroCta2Href") or "")
|
|
907
|
+
else:
|
|
908
|
+
cta2_href_raw = "#" + sec_id_by_type.get("cta", "sec_cta")
|
|
909
|
+
|
|
910
|
+
cta1_href = safe_href(cta1_href_raw)
|
|
911
|
+
cta2_href = safe_href(cta2_href_raw)
|
|
912
|
+
|
|
913
|
+
btns = []
|
|
914
|
+
if cta1_href:
|
|
915
|
+
btns.append(
|
|
916
|
+
f'<a class="btn" data-smx="hero-cta" data-cta="1" href="{esc(cta1_href)}">'
|
|
917
|
+
f'<span class="icon">{_ICON_SVGS["arrow"]}</span>{esc(cta1_label)}</a>'
|
|
918
|
+
)
|
|
919
|
+
if cta2_href:
|
|
920
|
+
btns.append(
|
|
921
|
+
f'<a class="btn" data-smx="hero-cta" data-cta="2" href="{esc(cta2_href)}">'
|
|
922
|
+
f'<span class="icon">{_ICON_SVGS["arrow"]}</span>{esc(cta2_label)}</a>'
|
|
923
|
+
)
|
|
924
|
+
|
|
925
|
+
btn_row_html = f'<div class="btnRow">{"".join(btns)}</div>' if btns else ""
|
|
926
|
+
|
|
927
|
+
parts.append(
|
|
928
|
+
f'''
|
|
929
|
+
<section id="{esc(sec_dom_id)}" class="hero hero-banner">
|
|
930
|
+
<div class="hero-bg"{bg_style}></div>
|
|
931
|
+
<div class="hero-overlay"></div>
|
|
932
|
+
<div class="wrap hero-content">
|
|
933
|
+
<div class="hero-panel reveal">
|
|
934
|
+
<p class="kicker">Generated page</p>
|
|
935
|
+
<h1>{title}</h1>
|
|
936
|
+
<p class="lead">{text}</p>
|
|
937
|
+
{btn_row_html}
|
|
938
|
+
</div>
|
|
939
|
+
</div>
|
|
940
|
+
</section>
|
|
941
|
+
'''.strip()
|
|
942
|
+
)
|
|
943
|
+
continue
|
|
944
|
+
####################################################################################################
|
|
945
|
+
# Others
|
|
946
|
+
try:
|
|
947
|
+
cols = int(s.get("cols") or 3)
|
|
948
|
+
except Exception:
|
|
949
|
+
cols = 3
|
|
950
|
+
cols = max(1, min(5, cols))
|
|
951
|
+
|
|
952
|
+
cards: List[str] = []
|
|
953
|
+
for it in items:
|
|
954
|
+
if not isinstance(it, dict):
|
|
955
|
+
continue
|
|
956
|
+
it_title = esc(it.get("title") or "")
|
|
957
|
+
it_text = esc(it.get("text") or "")
|
|
958
|
+
img = (it.get("imageUrl") or "").strip()
|
|
959
|
+
ic = icon_svg(it.get("icon") or "")
|
|
960
|
+
|
|
961
|
+
img_html = f'<img loading="lazy" decoding="async" src="{esc(img)}" alt="{it_title}">' if img else ""
|
|
962
|
+
cards.append(
|
|
963
|
+
f'''
|
|
964
|
+
<div class="card reveal">
|
|
965
|
+
{img_html}
|
|
966
|
+
<div style="display:flex;gap:10px;align-items:center;margin-top:{'10px' if img_html else '0'};">
|
|
967
|
+
{ic}
|
|
968
|
+
<h3 style="margin:0">{it_title}</h3>
|
|
969
|
+
</div>
|
|
970
|
+
<p style="margin-top:8px">{it_text}</p>
|
|
971
|
+
</div>
|
|
972
|
+
'''.strip()
|
|
973
|
+
)
|
|
974
|
+
|
|
975
|
+
grid_html = (
|
|
976
|
+
f'<div class="grid" style="grid-template-columns:repeat({cols}, minmax(0, 1fr));">'
|
|
977
|
+
+ "\n".join(cards) +
|
|
978
|
+
"</div>"
|
|
979
|
+
) if cards else ""
|
|
980
|
+
|
|
981
|
+
parts.append(
|
|
982
|
+
f'''
|
|
983
|
+
<section id="{esc(sec_dom_id)}" class="sec">
|
|
984
|
+
<div class="wrap">
|
|
985
|
+
<h2 class="reveal">{title}</h2>
|
|
986
|
+
{'<p class="reveal" style="margin-bottom:14px;">'+text+'</p>' if text else ''}
|
|
987
|
+
{grid_html}
|
|
988
|
+
</div>
|
|
989
|
+
</section>
|
|
990
|
+
'''.strip()
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
parts.append(js)
|
|
994
|
+
parts.append("</div>")
|
|
995
|
+
return "\n\n".join(parts)
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
from bs4 import BeautifulSoup
|
|
999
|
+
from typing import Dict, Any, List, Tuple
|
|
1000
|
+
|
|
1001
|
+
def _layout_non_hero_sections(layout: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
1002
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
1003
|
+
out = []
|
|
1004
|
+
for s in sections:
|
|
1005
|
+
if not isinstance(s, dict):
|
|
1006
|
+
continue
|
|
1007
|
+
if (s.get("type") or "").lower() == "hero":
|
|
1008
|
+
continue
|
|
1009
|
+
out.append(s)
|
|
1010
|
+
return out
|
|
1011
|
+
|
|
1012
|
+
|
|
1013
|
+
def _layout_hero_section(layout: Dict[str, Any]) -> Dict[str, Any] | None:
|
|
1014
|
+
sections = layout.get("sections") if isinstance(layout.get("sections"), list) else []
|
|
1015
|
+
for s in sections:
|
|
1016
|
+
if isinstance(s, dict) and (s.get("type") or "").lower() == "hero":
|
|
1017
|
+
return s
|
|
1018
|
+
return None
|
|
1019
|
+
|
|
1020
|
+
|
|
1021
|
+
def patch_hero_bg_precise(existing_html: str, new_url: str) -> str:
|
|
1022
|
+
"""
|
|
1023
|
+
Patch ONLY the hero background image. Never touches other page content/CSS.
|
|
1024
|
+
Looks for:
|
|
1025
|
+
1) <div class="hero-bg" style="background-image:...">
|
|
1026
|
+
2) <section class="hero ..."> style="background-image:..."
|
|
1027
|
+
"""
|
|
1028
|
+
if not existing_html or not new_url:
|
|
1029
|
+
return existing_html
|
|
1030
|
+
|
|
1031
|
+
soup = BeautifulSoup(existing_html, "html.parser")
|
|
1032
|
+
|
|
1033
|
+
# Case 1: hero-bg div
|
|
1034
|
+
hero_bg = soup.select_one(".hero-bg")
|
|
1035
|
+
if hero_bg:
|
|
1036
|
+
style = hero_bg.get("style") or ""
|
|
1037
|
+
# remove any prior background-image declarations
|
|
1038
|
+
style_parts = [p.strip() for p in style.split(";") if p.strip() and not p.strip().lower().startswith("background-image")]
|
|
1039
|
+
style_parts.append(f'background-image:url("{new_url}")')
|
|
1040
|
+
hero_bg["style"] = "; ".join(style_parts) + ";"
|
|
1041
|
+
return str(soup)
|
|
1042
|
+
|
|
1043
|
+
# Case 2: hero section itself
|
|
1044
|
+
hero_sec = None
|
|
1045
|
+
for sec in soup.find_all("section"):
|
|
1046
|
+
cls = " ".join(sec.get("class") or [])
|
|
1047
|
+
if "hero" in cls.split():
|
|
1048
|
+
hero_sec = sec
|
|
1049
|
+
break
|
|
1050
|
+
|
|
1051
|
+
if hero_sec:
|
|
1052
|
+
style = hero_sec.get("style") or ""
|
|
1053
|
+
style_parts = [p.strip() for p in style.split(";") if p.strip() and not p.strip().lower().startswith("background-image")]
|
|
1054
|
+
style_parts.append(f'background-image:url("{new_url}")')
|
|
1055
|
+
hero_sec["style"] = "; ".join(style_parts) + ";"
|
|
1056
|
+
return str(soup)
|
|
1057
|
+
|
|
1058
|
+
return existing_html
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
def patch_section_titles_intros_changed_only(
|
|
1062
|
+
existing_html: str,
|
|
1063
|
+
old_layout: Dict[str, Any],
|
|
1064
|
+
new_layout: Dict[str, Any],
|
|
1065
|
+
) -> str:
|
|
1066
|
+
"""
|
|
1067
|
+
Patch ONLY section <h2> title + the first intro <p> under it,
|
|
1068
|
+
and ONLY for sections whose title/text changed in the layout.
|
|
1069
|
+
Mapping is by non-hero section order.
|
|
1070
|
+
"""
|
|
1071
|
+
if not existing_html:
|
|
1072
|
+
return existing_html
|
|
1073
|
+
|
|
1074
|
+
old_secs = _layout_non_hero_sections(old_layout or {})
|
|
1075
|
+
new_secs = _layout_non_hero_sections(new_layout or {})
|
|
1076
|
+
if not new_secs:
|
|
1077
|
+
return existing_html
|
|
1078
|
+
|
|
1079
|
+
soup = BeautifulSoup(existing_html, "html.parser")
|
|
1080
|
+
|
|
1081
|
+
# HTML non-hero <section> blocks (skip obvious hero sections)
|
|
1082
|
+
html_secs = []
|
|
1083
|
+
for sec in soup.find_all("section"):
|
|
1084
|
+
cls = " ".join(sec.get("class") or [])
|
|
1085
|
+
if "hero" in cls.split():
|
|
1086
|
+
continue
|
|
1087
|
+
html_secs.append(sec)
|
|
1088
|
+
|
|
1089
|
+
n = min(len(html_secs), len(new_secs))
|
|
1090
|
+
if n <= 0:
|
|
1091
|
+
return existing_html
|
|
1092
|
+
|
|
1093
|
+
for i in range(n):
|
|
1094
|
+
old_s = old_secs[i] if i < len(old_secs) and isinstance(old_secs[i], dict) else {}
|
|
1095
|
+
new_s = new_secs[i] if isinstance(new_secs[i], dict) else {}
|
|
1096
|
+
|
|
1097
|
+
old_title = (old_s.get("title") or "").strip()
|
|
1098
|
+
new_title = (new_s.get("title") or "").strip()
|
|
1099
|
+
|
|
1100
|
+
old_text = (old_s.get("text") or "").strip()
|
|
1101
|
+
new_text = (new_s.get("text") or "").strip()
|
|
1102
|
+
|
|
1103
|
+
sec_tag = html_secs[i]
|
|
1104
|
+
|
|
1105
|
+
# Patch title only if changed and non-empty
|
|
1106
|
+
if new_title and new_title != old_title:
|
|
1107
|
+
h2 = sec_tag.find("h2")
|
|
1108
|
+
if h2:
|
|
1109
|
+
h2.clear()
|
|
1110
|
+
h2.append(new_title)
|
|
1111
|
+
|
|
1112
|
+
# Patch intro text only if changed and non-empty
|
|
1113
|
+
if new_text and new_text != old_text:
|
|
1114
|
+
h2 = sec_tag.find("h2")
|
|
1115
|
+
if h2:
|
|
1116
|
+
p = h2.find_next("p")
|
|
1117
|
+
# ensure this <p> is still inside the same section
|
|
1118
|
+
if p and p.find_parent("section") is sec_tag:
|
|
1119
|
+
p.clear()
|
|
1120
|
+
p.append(new_text)
|
|
1121
|
+
|
|
1122
|
+
return str(soup)
|