bmad-plus 0.2.0 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/CHANGELOG.md +100 -0
  2. package/README.md +1 -0
  3. package/oveanet-pack/seo-audit-360/README.md +59 -53
  4. package/oveanet-pack/seo-audit-360/SKILL.md +171 -0
  5. package/oveanet-pack/seo-audit-360/agent/seo-chief.md +275 -0
  6. package/oveanet-pack/seo-audit-360/agent/seo-judge.md +241 -0
  7. package/oveanet-pack/seo-audit-360/agent/seo-scout.md +171 -0
  8. package/oveanet-pack/seo-audit-360/agent.yaml +69 -70
  9. package/oveanet-pack/seo-audit-360/ref/cwv-thresholds.md +87 -0
  10. package/oveanet-pack/seo-audit-360/ref/eeat-criteria.md +123 -0
  11. package/oveanet-pack/seo-audit-360/ref/geo-signals.md +167 -0
  12. package/oveanet-pack/seo-audit-360/ref/quality-gates.md +133 -0
  13. package/oveanet-pack/seo-audit-360/ref/schema-catalog.md +91 -0
  14. package/oveanet-pack/seo-audit-360/ref/schema-templates.json +356 -0
  15. package/oveanet-pack/seo-audit-360/requirements.txt +14 -0
  16. package/oveanet-pack/seo-audit-360/scripts/install.ps1 +53 -0
  17. package/oveanet-pack/seo-audit-360/scripts/install.sh +48 -0
  18. package/oveanet-pack/seo-audit-360/scripts/seo_apis.py +464 -0
  19. package/oveanet-pack/seo-audit-360/scripts/seo_crawl.py +282 -0
  20. package/oveanet-pack/seo-audit-360/scripts/seo_fetch.py +231 -0
  21. package/oveanet-pack/seo-audit-360/scripts/seo_parse.py +255 -0
  22. package/oveanet-pack/seo-audit-360/scripts/seo_screenshot.py +202 -0
  23. package/oveanet-pack/seo-audit-360/templates/seo-audit-workflow.md +241 -0
  24. package/package.json +1 -1
  25. package/oveanet-pack/seo-audit-360/agent/seo-geo-360-auditor.md +0 -441
  26. package/oveanet-pack/seo-audit-360/templates/llms.txt +0 -73
  27. package/oveanet-pack/seo-audit-360/templates/robots.txt +0 -38
  28. package/oveanet-pack/seo-audit-360/templates/schema-templates.json +0 -116
@@ -0,0 +1,255 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SEO Parse — HTML parser for SEO element extraction.
4
+
5
+ Extracts: title, meta tags, canonicals, headings, images, links (internal/external),
6
+ schema (JSON-LD), Open Graph, Twitter Cards, hreflang, word count, text/code ratio.
7
+
8
+ Author: Laurent Rochetta
9
+ License: MIT
10
+ """
11
+
12
+ import argparse
13
+ import json
14
+ import os
15
+ import re
16
+ import sys
17
+ from typing import Optional
18
+ from urllib.parse import urljoin, urlparse
19
+
20
+ try:
21
+ from bs4 import BeautifulSoup
22
+ except ImportError:
23
+ print("Error: beautifulsoup4 required. Install: pip install beautifulsoup4", file=sys.stderr)
24
+ sys.exit(1)
25
+
26
+ # Use lxml if available for speed, fallback to html.parser
27
+ try:
28
+ import lxml # noqa: F401
29
+ HTML_PARSER = "lxml"
30
+ except ImportError:
31
+ HTML_PARSER = "html.parser"
32
+
33
+
34
+ def parse_html(html: str, base_url: Optional[str] = None) -> dict:
35
+ """
36
+ Parse HTML and extract all SEO-relevant elements.
37
+
38
+ Args:
39
+ html: Raw HTML content
40
+ base_url: Base URL for resolving relative links
41
+
42
+ Returns:
43
+ Comprehensive dictionary of SEO data
44
+ """
45
+ soup = BeautifulSoup(html, HTML_PARSER)
46
+
47
+ result = {
48
+ "title": None,
49
+ "title_length": 0,
50
+ "meta_description": None,
51
+ "meta_description_length": 0,
52
+ "meta_robots": None,
53
+ "meta_viewport": None,
54
+ "canonical": None,
55
+ "headings": {"h1": [], "h2": [], "h3": [], "h4": []},
56
+ "images": [],
57
+ "links": {"internal": [], "external": [], "broken_candidates": []},
58
+ "schema_blocks": [],
59
+ "open_graph": {},
60
+ "twitter_card": {},
61
+ "hreflang": [],
62
+ "word_count": 0,
63
+ "html_size_bytes": len(html.encode("utf-8")),
64
+ "text_ratio": 0.0,
65
+ "has_lang_attr": False,
66
+ "lang": None,
67
+ "scripts_count": 0,
68
+ "stylesheets_count": 0,
69
+ "dom_depth_estimate": 0,
70
+ "security_headers_hints": {},
71
+ }
72
+
73
+ # ── Title ──
74
+ title_tag = soup.find("title")
75
+ if title_tag:
76
+ result["title"] = title_tag.get_text(strip=True)
77
+ result["title_length"] = len(result["title"])
78
+
79
+ # ── Meta Tags ──
80
+ for meta in soup.find_all("meta"):
81
+ name = (meta.get("name") or "").lower()
82
+ property_attr = (meta.get("property") or "").lower()
83
+ content = meta.get("content", "")
84
+
85
+ if name == "description":
86
+ result["meta_description"] = content
87
+ result["meta_description_length"] = len(content)
88
+ elif name == "robots":
89
+ result["meta_robots"] = content
90
+ elif name == "viewport":
91
+ result["meta_viewport"] = content
92
+
93
+ # Open Graph
94
+ if property_attr.startswith("og:"):
95
+ result["open_graph"][property_attr] = content
96
+
97
+ # Twitter Card
98
+ if name.startswith("twitter:"):
99
+ result["twitter_card"][name] = content
100
+
101
+ # ── Language ──
102
+ html_tag = soup.find("html")
103
+ if html_tag and html_tag.get("lang"):
104
+ result["has_lang_attr"] = True
105
+ result["lang"] = html_tag.get("lang")
106
+
107
+ # ── Canonical ──
108
+ canonical = soup.find("link", rel="canonical")
109
+ if canonical:
110
+ result["canonical"] = canonical.get("href")
111
+
112
+ # ── Hreflang ──
113
+ for link in soup.find_all("link", rel="alternate"):
114
+ hreflang = link.get("hreflang")
115
+ if hreflang:
116
+ result["hreflang"].append({
117
+ "lang": hreflang,
118
+ "href": link.get("href"),
119
+ })
120
+
121
+ # ── Headings ──
122
+ for level in ["h1", "h2", "h3", "h4"]:
123
+ for tag in soup.find_all(level):
124
+ text = tag.get_text(strip=True)
125
+ if text:
126
+ result["headings"][level].append(text)
127
+
128
+ # ── Images ──
129
+ for img in soup.find_all("img"):
130
+ src = img.get("src", "")
131
+ if base_url and src:
132
+ src = urljoin(base_url, src)
133
+
134
+ has_alt = img.get("alt") is not None
135
+ alt_text = img.get("alt", "")
136
+ has_dimensions = bool(img.get("width") and img.get("height"))
137
+
138
+ result["images"].append({
139
+ "src": src,
140
+ "alt": alt_text,
141
+ "has_alt": has_alt,
142
+ "alt_empty": has_alt and alt_text.strip() == "",
143
+ "width": img.get("width"),
144
+ "height": img.get("height"),
145
+ "has_dimensions": has_dimensions,
146
+ "loading": img.get("loading"),
147
+ "srcset": img.get("srcset") is not None,
148
+ })
149
+
150
+ # ── Links ──
151
+ if base_url:
152
+ base_domain = urlparse(base_url).netloc
153
+
154
+ for a in soup.find_all("a", href=True):
155
+ href = a.get("href", "")
156
+ if not href or href.startswith("#") or href.startswith("javascript:"):
157
+ continue
158
+
159
+ full_url = urljoin(base_url, href)
160
+ parsed = urlparse(full_url)
161
+
162
+ link_data = {
163
+ "href": full_url,
164
+ "text": a.get_text(strip=True)[:100],
165
+ "rel": a.get("rel", []),
166
+ "is_nofollow": "nofollow" in (a.get("rel") or []),
167
+ "target": a.get("target"),
168
+ }
169
+
170
+ if parsed.netloc == base_domain:
171
+ result["links"]["internal"].append(link_data)
172
+ else:
173
+ result["links"]["external"].append(link_data)
174
+
175
+ # ── Schema (JSON-LD) ──
176
+ for script in soup.find_all("script", type="application/ld+json"):
177
+ try:
178
+ schema_data = json.loads(script.string)
179
+ if isinstance(schema_data, dict):
180
+ result["schema_blocks"].append({
181
+ "type": schema_data.get("@type", "unknown"),
182
+ "data": schema_data,
183
+ })
184
+ elif isinstance(schema_data, list):
185
+ for item in schema_data:
186
+ if isinstance(item, dict):
187
+ result["schema_blocks"].append({
188
+ "type": item.get("@type", "unknown"),
189
+ "data": item,
190
+ })
191
+ except (json.JSONDecodeError, TypeError):
192
+ result["schema_blocks"].append({"type": "PARSE_ERROR", "data": None})
193
+
194
+ # ── Resource Counts ──
195
+ result["scripts_count"] = len(soup.find_all("script"))
196
+ result["stylesheets_count"] = len(soup.find_all("link", rel="stylesheet"))
197
+
198
+ # ── Word Count & Text Ratio ──
199
+ text_soup = BeautifulSoup(html, HTML_PARSER)
200
+ for element in text_soup(["script", "style", "nav", "footer", "header", "noscript"]):
201
+ element.decompose()
202
+
203
+ visible_text = text_soup.get_text(separator=" ", strip=True)
204
+ words = re.findall(r"\b\w+\b", visible_text)
205
+ result["word_count"] = len(words)
206
+
207
+ text_bytes = len(visible_text.encode("utf-8"))
208
+ if result["html_size_bytes"] > 0:
209
+ result["text_ratio"] = round(text_bytes / result["html_size_bytes"], 3)
210
+
211
+ return result
212
+
213
+
214
+ # ── CLI ────────────────────────────────────────────────────────────
215
+
216
+ def main():
217
+ parser = argparse.ArgumentParser(
218
+ description="SEO Parse — HTML parser for SEO analysis (BMAD+ SEO Engine)"
219
+ )
220
+ parser.add_argument("file", nargs="?", help="HTML file to parse")
221
+ parser.add_argument("--url", "-u", help="Base URL for resolving relative links")
222
+ parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
223
+
224
+ args = parser.parse_args()
225
+
226
+ if args.file:
227
+ real_path = os.path.realpath(args.file)
228
+ if not os.path.isfile(real_path):
229
+ print(f"Error: File not found: {args.file}", file=sys.stderr)
230
+ sys.exit(1)
231
+ with open(real_path, "r", encoding="utf-8") as f:
232
+ html = f.read()
233
+ else:
234
+ html = sys.stdin.read()
235
+
236
+ result = parse_html(html, args.url)
237
+
238
+ if args.json:
239
+ print(json.dumps(result, indent=2, ensure_ascii=False))
240
+ else:
241
+ print(f"Title: {result['title']} ({result['title_length']} chars)")
242
+ print(f"Meta Description: {result['meta_description'][:80] + '...' if result['meta_description'] and len(result['meta_description']) > 80 else result['meta_description']}")
243
+ print(f"Canonical: {result['canonical']}")
244
+ print(f"Language: {result['lang']}")
245
+ print(f"H1: {len(result['headings']['h1'])} | H2: {len(result['headings']['h2'])} | H3: {len(result['headings']['h3'])}")
246
+ print(f"Images: {len(result['images'])} (missing alt: {sum(1 for i in result['images'] if not i['has_alt'])})")
247
+ print(f"Internal Links: {len(result['links']['internal'])} | External: {len(result['links']['external'])}")
248
+ print(f"Schema Blocks: {len(result['schema_blocks'])} ({', '.join(s['type'] for s in result['schema_blocks'])})")
249
+ print(f"Word Count: {result['word_count']:,}")
250
+ print(f"Text/HTML Ratio: {result['text_ratio']:.1%}")
251
+ print(f"Scripts: {result['scripts_count']} | Stylesheets: {result['stylesheets_count']}")
252
+
253
+
254
+ if __name__ == "__main__":
255
+ main()
@@ -0,0 +1,202 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SEO Screenshot — Viewport screenshot capture for visual SEO analysis.
4
+
5
+ Features:
6
+ - Mobile and desktop viewport presets
7
+ - Above-the-fold element detection
8
+ - Full-page capture option
9
+ - PNG output with configurable quality
10
+
11
+ Requires: playwright (pip install playwright && playwright install chromium)
12
+
13
+ Author: Laurent Rochetta
14
+ License: MIT
15
+ """
16
+
17
+ import argparse
18
+ import sys
19
+
20
+
21
+ VIEWPORTS = {
22
+ "mobile": {"width": 375, "height": 812, "device_scale_factor": 3, "is_mobile": True},
23
+ "tablet": {"width": 768, "height": 1024, "device_scale_factor": 2, "is_mobile": True},
24
+ "desktop": {"width": 1440, "height": 900, "device_scale_factor": 1, "is_mobile": False},
25
+ "desktop-hd": {"width": 1920, "height": 1080, "device_scale_factor": 1, "is_mobile": False},
26
+ }
27
+
28
+
29
+ def capture_screenshot(
30
+ url: str,
31
+ output: str = "screenshot.png",
32
+ viewport: str = "desktop",
33
+ full_page: bool = False,
34
+ wait_ms: int = 2000,
35
+ ):
36
+ """
37
+ Capture a viewport screenshot of a URL using Playwright.
38
+
39
+ Args:
40
+ url: URL to capture
41
+ output: Output file path (.png)
42
+ viewport: Viewport preset (mobile, tablet, desktop, desktop-hd)
43
+ full_page: Capture full page scroll or just viewport
44
+ wait_ms: Wait time after page load (ms)
45
+ """
46
+ try:
47
+ from playwright.sync_api import sync_playwright
48
+ except ImportError:
49
+ print(
50
+ "Error: playwright required.\n"
51
+ "Install: pip install playwright && playwright install chromium",
52
+ file=sys.stderr,
53
+ )
54
+ sys.exit(1)
55
+
56
+ vp = VIEWPORTS.get(viewport, VIEWPORTS["desktop"])
57
+
58
+ with sync_playwright() as p:
59
+ browser = p.chromium.launch(headless=True)
60
+ context = browser.new_context(
61
+ viewport={"width": vp["width"], "height": vp["height"]},
62
+ device_scale_factor=vp["device_scale_factor"],
63
+ is_mobile=vp["is_mobile"],
64
+ user_agent=(
65
+ "Mozilla/5.0 (iPhone; CPU iPhone OS 17_0 like Mac OS X) "
66
+ "AppleWebKit/605.1.15 Mobile/15E148 Safari/604.1"
67
+ if vp["is_mobile"]
68
+ else "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
69
+ "(KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 BMADSEOEngine/2.0"
70
+ ),
71
+ )
72
+
73
+ page = context.new_page()
74
+
75
+ try:
76
+ page.goto(url, wait_until="networkidle", timeout=30000)
77
+ except Exception:
78
+ # Fallback: wait for load event instead
79
+ page.goto(url, wait_until="load", timeout=30000)
80
+
81
+ # Wait for dynamic content
82
+ page.wait_for_timeout(wait_ms)
83
+
84
+ # Capture screenshot
85
+ page.screenshot(path=output, full_page=full_page)
86
+
87
+ # Gather above-the-fold metrics
88
+ metrics = page.evaluate("""() => {
89
+ const viewportHeight = window.innerHeight;
90
+ const viewportWidth = window.innerWidth;
91
+
92
+ // Find CTAs above the fold
93
+ const ctas = [];
94
+ const buttons = document.querySelectorAll('a, button, [role="button"]');
95
+ buttons.forEach(el => {
96
+ const rect = el.getBoundingClientRect();
97
+ if (rect.top < viewportHeight && rect.bottom > 0) {
98
+ const text = el.textContent.trim().substring(0, 50);
99
+ if (text && (
100
+ /sign.?up|get.?start|try|buy|contact|demo|free|download|subscribe/i.test(text)
101
+ )) {
102
+ ctas.push({
103
+ text: text,
104
+ tag: el.tagName,
105
+ top: Math.round(rect.top),
106
+ visible: rect.width > 0 && rect.height > 0,
107
+ });
108
+ }
109
+ }
110
+ });
111
+
112
+ // Find hero/LCP candidate
113
+ const images = document.querySelectorAll('img');
114
+ let largestImage = null;
115
+ let largestArea = 0;
116
+ images.forEach(img => {
117
+ const rect = img.getBoundingClientRect();
118
+ const area = rect.width * rect.height;
119
+ if (area > largestArea && rect.top < viewportHeight) {
120
+ largestArea = area;
121
+ largestImage = {
122
+ src: img.src.substring(0, 100),
123
+ width: Math.round(rect.width),
124
+ height: Math.round(rect.height),
125
+ top: Math.round(rect.top),
126
+ };
127
+ }
128
+ });
129
+
130
+ // Check for horizontal scroll
131
+ const hasHorizontalScroll = document.documentElement.scrollWidth > viewportWidth;
132
+
133
+ // Font size check
134
+ const body = document.body;
135
+ const bodyFontSize = body ? parseFloat(getComputedStyle(body).fontSize) : 16;
136
+
137
+ return {
138
+ viewportWidth,
139
+ viewportHeight,
140
+ ctas_above_fold: ctas.length,
141
+ cta_details: ctas.slice(0, 5),
142
+ largest_image_above_fold: largestImage,
143
+ has_horizontal_scroll: hasHorizontalScroll,
144
+ body_font_size_px: bodyFontSize,
145
+ dom_element_count: document.querySelectorAll('*').length,
146
+ };
147
+ }""")
148
+
149
+ browser.close()
150
+
151
+ return metrics
152
+
153
+
154
+ # ── CLI ────────────────────────────────────────────────────────────
155
+
156
+ def main():
157
+ parser = argparse.ArgumentParser(
158
+ description="SEO Screenshot — Viewport capture (BMAD+ SEO Engine)"
159
+ )
160
+ parser.add_argument("url", help="URL to capture")
161
+ parser.add_argument("--output", "-o", default="screenshot.png", help="Output file path")
162
+ parser.add_argument(
163
+ "--viewport", "-v",
164
+ choices=list(VIEWPORTS.keys()), default="desktop",
165
+ help="Viewport preset"
166
+ )
167
+ parser.add_argument("--full", action="store_true", help="Capture full page (not just viewport)")
168
+ parser.add_argument("--wait", "-w", type=int, default=2000, help="Wait after load (ms)")
169
+ parser.add_argument("--json", "-j", action="store_true", help="Output metrics as JSON")
170
+
171
+ args = parser.parse_args()
172
+
173
+ import json
174
+
175
+ metrics = capture_screenshot(
176
+ url=args.url,
177
+ output=args.output,
178
+ viewport=args.viewport,
179
+ full_page=args.full,
180
+ wait_ms=args.wait,
181
+ )
182
+
183
+ print(f"Screenshot saved: {args.output}", file=sys.stderr)
184
+
185
+ if args.json:
186
+ print(json.dumps(metrics, indent=2))
187
+ else:
188
+ print(f"\nAbove-the-Fold Analysis ({args.viewport}):")
189
+ print(f" Viewport: {metrics['viewportWidth']}×{metrics['viewportHeight']}")
190
+ print(f" CTAs above fold: {metrics['ctas_above_fold']}")
191
+ for cta in metrics.get("cta_details", []):
192
+ print(f" - \"{cta['text']}\" ({cta['tag']}, top: {cta['top']}px)")
193
+ if metrics.get("largest_image_above_fold"):
194
+ img = metrics["largest_image_above_fold"]
195
+ print(f" Largest image: {img['width']}×{img['height']} at y={img['top']}px")
196
+ print(f" Horizontal scroll: {'⚠️ YES' if metrics['has_horizontal_scroll'] else '✅ No'}")
197
+ print(f" Body font size: {metrics['body_font_size_px']}px {'✅' if metrics['body_font_size_px'] >= 16 else '⚠️ <16px'}")
198
+ print(f" DOM elements: {metrics['dom_element_count']:,}")
199
+
200
+
201
+ if __name__ == "__main__":
202
+ main()
@@ -0,0 +1,241 @@
1
+ # SEO Audit Workflow — BMAD+ SEO Engine v2.0
2
+
3
+ > Author: Laurent Rochetta | By Oveanet × Laurent Rochetta
4
+
5
+ ## Overview
6
+
7
+ This workflow orchestrates the 3 SEO Engine agents (Scout, Judge, Chief) through 6 phases to produce a comprehensive audit with scored results and actionable fixes.
8
+
9
+ ---
10
+
11
+ ## Phase 1 — Reconnaissance (Scout solo)
12
+
13
+ **Duration**: ~2 min | **Agent**: Scout (Crawler role)
14
+
15
+ 1. Fetch homepage + `/robots.txt` + `/sitemap.xml`
16
+ 2. Detect business type (SaaS, e-commerce, local, publisher, agency)
17
+ 3. Mini-crawl: discover 10–25 pages via sitemap + internal links
18
+ 4. Detect rendering architecture (SSR / CSR / hybrid)
19
+ 5. Check for `/llms.txt` and `/llms-full.txt`
20
+
21
+ **Checkpoint**: "Identified a [type] site with [N] pages. Continue full audit?"
22
+
23
+ **Tools**:
24
+ ```bash
25
+ python scripts/seo_fetch.py [URL] --json
26
+ python scripts/seo_crawl.py [URL] --depth 2 --max 25 --json
27
+ ```
28
+
29
+ ---
30
+
31
+ ## Phase 2 — Deep Scan (Scout + Judge in PARALLEL)
32
+
33
+ **Duration**: ~5 min | **Agents**: Scout (Inspector) + Judge (Content Expert + Schema Master)
34
+
35
+ ### Scout inspects (9 categories):
36
+ 1. Crawlability (robots.txt, noindex, crawl depth)
37
+ 2. Indexability (canonicals, duplicates, pagination)
38
+ 3. Security (HTTPS, HSTS, CSP, X-Frame-Options)
39
+ 4. URL Structure (clean URLs, redirects, trailing slashes)
40
+ 5. Mobile (viewport, touch targets, font size)
41
+ 6. Core Web Vitals signals from source HTML
42
+ 7. Structured Data detection (pass to Judge)
43
+ 8. JavaScript rendering (CSR/SSR, SPA detection)
44
+ 9. IndexNow protocol
45
+
46
+ ### Judge analyzes (in parallel):
47
+ 1. E-E-A-T evaluation (Experience, Expertise, Authority, Trust)
48
+ 2. Content quality (word count, readability, keyword optimization)
49
+ 3. Schema validation (JSON-LD, types, deprecation status)
50
+ 4. Image audit (alt text, dimensions, format, lazy loading)
51
+ 5. Internal/external link analysis
52
+ 6. AI content detection markers
53
+
54
+ **Tools**:
55
+ ```bash
56
+ python scripts/seo_parse.py page.html --url [URL] --json
57
+ python scripts/seo_screenshot.py [URL] --viewport mobile --json
58
+ python scripts/seo_screenshot.py [URL] --viewport desktop --json
59
+ ```
60
+
61
+ ---
62
+
63
+ ## Phase 3 — AI Readiness & GEO (Judge solo)
64
+
65
+ **Duration**: ~3 min | **Agent**: Judge (GEO Analyst role)
66
+
67
+ 1. Check AI crawler access (GPTBot, ClaudeBot, PerplexityBot, OAI-SearchBot)
68
+ 2. Verify llms.txt compliance
69
+ 3. Check RSL 1.0 licensing
70
+ 4. Score passage-level citability (134–167 word blocks)
71
+ 5. Analyze brand mention signals
72
+ 6. Compute **AI Readiness Score (0–100)**
73
+
74
+ Reference: `ref/geo-signals.md`
75
+
76
+ ---
77
+
78
+ ## Phase 4 — Scoring & Synthesis (Chief solo)
79
+
80
+ **Duration**: ~2 min | **Agent**: Chief (Scorer role)
81
+
82
+ Compute **SEO Health Score (0–100)** from weighted categories:
83
+
84
+ | Category | Weight |
85
+ |----------|--------|
86
+ | Technical SEO | 20% |
87
+ | Content & E-E-A-T | 22% |
88
+ | On-Page SEO | 18% |
89
+ | Schema | 10% |
90
+ | Performance (CWV) | 12% |
91
+ | AI Readiness (GEO) | 12% |
92
+ | Images | 6% |
93
+
94
+ Output: Score breakdown with visual indicators per category.
95
+
96
+ ---
97
+
98
+ ## Phase 5 — Action Plan & Auto-Fixes (Chief solo)
99
+
100
+ **Duration**: ~3 min | **Agent**: Chief (Strategist role)
101
+
102
+ 1. Classify all issues: 🔴 Critical → 🟠 High → 🟡 Medium → 🟢 Low
103
+ 2. Identify Quick Wins (high impact / low effort)
104
+ 3. Generate 30/60/90-day roadmap
105
+ 4. **Auto-generate code fixes** for:
106
+ - Missing/broken meta tags (title, description)
107
+ - Schema JSON-LD blocks (from templates)
108
+ - robots.txt improvements (AI crawler access)
109
+ - llms.txt file generation
110
+ - Image alt text suggestions
111
+ 5. **Checkpoint**: "Here's the plan. Want me to apply the fixes automatically?"
112
+
113
+ ---
114
+
115
+ ## Phase 5b — PageSpeed Perfection Loop (Scout + Chief iterative)
116
+
117
+ > **This is the battle-tested loop from our oveanet.ch optimization.**
118
+ > Goal: Achieve **100% on all 4 PageSpeed Insights categories** (Performance, Accessibility, Best Practices, SEO).
119
+
120
+ ### The Loop
121
+
122
+ ```
123
+ ┌─────────────────────────────────────┐
124
+ │ 1. Run PageSpeed Insights audit │
125
+ │ (via API or manual) │
126
+ │ │ │
127
+ │ ▼ │
128
+ │ 2. Parse failing audits │
129
+ │ Group by category + priority │
130
+ │ │ │
131
+ │ ▼ │
132
+ │ 3. Apply top-priority fix │
133
+ │ (one fix at a time) │
134
+ │ │ │
135
+ │ ▼ │
136
+ │ 4. Re-run PageSpeed │
137
+ │ Verify fix + no regressions │
138
+ │ │ │
139
+ │ ▼ │
140
+ │ 5. Score improved? │
141
+ │ YES → Continue to next fix │
142
+ │ NO → Revert and try different │
143
+ │ approach │
144
+ │ │ │
145
+ │ ▼ │
146
+ │ 6. All 4 categories = 100%? │
147
+ │ YES → Done ✅ │
148
+ │ NO → Go to step 2 │
149
+ └─────────────────────────────────────┘
150
+ ```
151
+
152
+ ### PageSpeed Fix Priority Order
153
+ 1. **Performance** (hardest — tackle first):
154
+ - Eliminate render-blocking resources
155
+ - Properly size images (WebP/AVIF + responsive)
156
+ - Reduce unused CSS/JS
157
+ - Defer offscreen images
158
+ - Minimize main-thread work
159
+ - Reduce server response time (TTFB)
160
+ - Preload LCP image
161
+
162
+ 2. **Accessibility** (usually quick wins):
163
+ - Add alt text to all images
164
+ - Fix color contrast ratios (4.5:1 minimum)
165
+ - Add ARIA labels to interactive elements
166
+ - Ensure heading hierarchy
167
+ - Add lang attribute to HTML
168
+ - Ensure form labels
169
+
170
+ 3. **Best Practices**:
171
+ - HTTPS + no mixed content
172
+ - No browser errors in console
173
+ - Remove deprecated APIs
174
+ - Add proper CSP headers
175
+
176
+ 4. **SEO** (usually easiest):
177
+ - Add meta description
178
+ - Ensure crawlable links
179
+ - Valid robots.txt
180
+ - Proper viewport meta
181
+ - Descriptive link text
182
+
183
+ ### Key Rules
184
+ - **One fix at a time** — never batch multiple changes, you need to isolate impact
185
+ - **Always re-test** — PageSpeed scores can regress with seemingly unrelated changes
186
+ - **Mobile first** — always test mobile viewport (Google uses mobile for indexing)
187
+ - **Field vs Lab** — Lab scores (Lighthouse) can differ from field data (CrUX). Target lab 100% first
188
+
189
+ ---
190
+
191
+ ## Phase 6 — Monitoring (Scout, optional)
192
+
193
+ **Duration**: ongoing | **Agent**: Scout (Crawler role)
194
+
195
+ 1. Save audit results to `.bmad-seo/history/[domain]-[date].json`
196
+ 2. On re-audit: compare with previous results
197
+ 3. Track: issues resolved, new issues, score evolution
198
+ 4. Generate progress report with deltas
199
+
200
+ ---
201
+
202
+ ## Command Quick Reference
203
+
204
+ ```bash
205
+ # Full audit (all 6 phases)
206
+ /seo full https://example.com
207
+
208
+ # Quick audit (Phases 1-4 only)
209
+ /seo quick https://example.com
210
+
211
+ # Individual commands
212
+ /seo technical https://example.com
213
+ /seo content https://example.com
214
+ /seo geo https://example.com
215
+ /seo schema https://example.com
216
+ /seo images https://example.com
217
+ /seo hreflang https://example.com
218
+
219
+ # PageSpeed perfection loop
220
+ /seo pagespeed https://example.com
221
+
222
+ # Strategic planning
223
+ /seo plan saas|ecommerce|local|publisher|agency
224
+
225
+ # Auto-fix generation
226
+ /seo fix
227
+
228
+ # Monitoring
229
+ /seo history
230
+ /seo compare
231
+ ```
232
+
233
+ ---
234
+
235
+ ## Tips for Best Results
236
+
237
+ 1. **Start with `/seo full`** for the first audit — it gives you the complete picture
238
+ 2. **Use `/seo pagespeed`** after fixing major issues to chase 100% scores
239
+ 3. **Re-run monthly** with `/seo compare` to track progress
240
+ 4. **Feed the AI crawlers**: Allow GPTBot + ClaudeBot + PerplexityBot in robots.txt
241
+ 5. **Check GEO separately**: AI search visibility evolves fast, audit quarterly with `/seo geo`