bmad-plus 0.4.0 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/README.md +12 -56
- package/osint-agent-package/skills/bmad-osint-investigate/osint/SKILL.md +452 -452
- package/osint-agent-package/skills/bmad-osint-investigate/osint/assets/dossier-template.md +116 -116
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/content-extraction.md +100 -100
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/platforms.md +130 -130
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/psychoprofile.md +69 -69
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/tools.md +281 -281
- package/osint-agent-package/skills/bmad-osint-investigate/osint/scripts/mcp-client.py +136 -136
- package/package.json +1 -1
- package/readme-international/README.de.md +1 -1
- package/readme-international/README.es.md +1 -1
- package/readme-international/README.fr.md +1 -1
- package/tools/cli/commands/install.js +74 -46
- package/tools/cli/i18n.js +501 -0
- package/oveanet-pack/animated-website/DEPLOYMENT.md +0 -104
- package/oveanet-pack/animated-website/README.md +0 -63
- package/oveanet-pack/animated-website/agent/animated-website-agent.md +0 -325
- package/oveanet-pack/animated-website/agent.yaml +0 -63
- package/oveanet-pack/animated-website/templates/animated-website-workflow.md +0 -55
- package/oveanet-pack/seo-audit-360/DEPLOYMENT.md +0 -115
- package/oveanet-pack/seo-audit-360/README.md +0 -66
- package/oveanet-pack/seo-audit-360/SKILL.md +0 -171
- package/oveanet-pack/seo-audit-360/agent/seo-chief.md +0 -294
- package/oveanet-pack/seo-audit-360/agent/seo-judge.md +0 -241
- package/oveanet-pack/seo-audit-360/agent/seo-scout.md +0 -171
- package/oveanet-pack/seo-audit-360/agent.yaml +0 -70
- package/oveanet-pack/seo-audit-360/checklist.md +0 -140
- package/oveanet-pack/seo-audit-360/extensions/google-analytics/EXTENSION.md +0 -79
- package/oveanet-pack/seo-audit-360/extensions/google-analytics/ga4_client.py +0 -200
- package/oveanet-pack/seo-audit-360/extensions/google-analytics/requirements.txt +0 -4
- package/oveanet-pack/seo-audit-360/extensions/google-search-console/EXTENSION.md +0 -109
- package/oveanet-pack/seo-audit-360/extensions/google-search-console/gsc_client.py +0 -186
- package/oveanet-pack/seo-audit-360/extensions/google-search-console/requirements.txt +0 -4
- package/oveanet-pack/seo-audit-360/hooks/seo-check.sh +0 -95
- package/oveanet-pack/seo-audit-360/pagespeed-playbook.md +0 -320
- package/oveanet-pack/seo-audit-360/ref/audit-schema.json +0 -187
- package/oveanet-pack/seo-audit-360/ref/cwv-thresholds.md +0 -87
- package/oveanet-pack/seo-audit-360/ref/eeat-criteria.md +0 -123
- package/oveanet-pack/seo-audit-360/ref/geo-signals.md +0 -167
- package/oveanet-pack/seo-audit-360/ref/hreflang-rules.md +0 -153
- package/oveanet-pack/seo-audit-360/ref/quality-gates.md +0 -133
- package/oveanet-pack/seo-audit-360/ref/schema-catalog.md +0 -91
- package/oveanet-pack/seo-audit-360/ref/schema-templates.json +0 -356
- package/oveanet-pack/seo-audit-360/requirements.txt +0 -14
- package/oveanet-pack/seo-audit-360/scripts/__pycache__/seo_crawl.cpython-314.pyc +0 -0
- package/oveanet-pack/seo-audit-360/scripts/__pycache__/seo_parse.cpython-314.pyc +0 -0
- package/oveanet-pack/seo-audit-360/scripts/install.ps1 +0 -53
- package/oveanet-pack/seo-audit-360/scripts/install.sh +0 -48
- package/oveanet-pack/seo-audit-360/scripts/seo_apis.py +0 -464
- package/oveanet-pack/seo-audit-360/scripts/seo_crawl.py +0 -282
- package/oveanet-pack/seo-audit-360/scripts/seo_fetch.py +0 -231
- package/oveanet-pack/seo-audit-360/scripts/seo_parse.py +0 -255
- package/oveanet-pack/seo-audit-360/scripts/seo_report.py +0 -403
- package/oveanet-pack/seo-audit-360/scripts/seo_screenshot.py +0 -202
- package/oveanet-pack/seo-audit-360/templates/seo-audit-workflow.md +0 -241
- package/oveanet-pack/seo-audit-360/tests/__pycache__/test_crawl.cpython-314-pytest-9.0.2.pyc +0 -0
- package/oveanet-pack/seo-audit-360/tests/__pycache__/test_parse.cpython-314-pytest-9.0.2.pyc +0 -0
- package/oveanet-pack/seo-audit-360/tests/fixtures/sample_page.html +0 -62
- package/oveanet-pack/seo-audit-360/tests/test_apis.py +0 -75
- package/oveanet-pack/seo-audit-360/tests/test_crawl.py +0 -121
- package/oveanet-pack/seo-audit-360/tests/test_fetch.py +0 -70
- package/oveanet-pack/seo-audit-360/tests/test_parse.py +0 -184
- package/oveanet-pack/universal-backup/DEPLOYMENT.md +0 -80
- package/oveanet-pack/universal-backup/README.md +0 -58
- package/oveanet-pack/universal-backup/agent/backup-agent.md +0 -71
- package/oveanet-pack/universal-backup/agent.yaml +0 -45
- package/oveanet-pack/universal-backup/templates/backup-workflow.md +0 -51
|
@@ -1,464 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
SEO APIs — Google free API client for live SEO data.
|
|
4
|
-
|
|
5
|
-
Connects to:
|
|
6
|
-
- PageSpeed Insights API v5 (lab scores + audits)
|
|
7
|
-
- Chrome UX Report (CrUX) API (field CWV data)
|
|
8
|
-
- Rich Results Test API (schema validation)
|
|
9
|
-
|
|
10
|
-
Requires: GOOGLE_API_KEY environment variable (free, no OAuth).
|
|
11
|
-
Get one at: https://console.cloud.google.com/apis/credentials
|
|
12
|
-
|
|
13
|
-
Author: Laurent Rochetta
|
|
14
|
-
License: MIT
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
import argparse
|
|
18
|
-
import json
|
|
19
|
-
import os
|
|
20
|
-
import sys
|
|
21
|
-
from typing import Optional
|
|
22
|
-
|
|
23
|
-
try:
|
|
24
|
-
import requests
|
|
25
|
-
except ImportError:
|
|
26
|
-
print("Error: requests required. Install: pip install requests", file=sys.stderr)
|
|
27
|
-
sys.exit(1)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
API_KEY = os.environ.get("GOOGLE_API_KEY", "")
|
|
31
|
-
|
|
32
|
-
PSI_ENDPOINT = "https://www.googleapis.com/pagespeedonline/v5/runPagespeed"
|
|
33
|
-
CRUX_ENDPOINT = "https://chromeuxreport.googleapis.com/v1/records:queryRecord"
|
|
34
|
-
RICH_RESULTS_ENDPOINT = "https://searchconsole.googleapis.com/v1/urlTestingTools/mobileFriendlyTest:run"
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
# ── PageSpeed Insights ─────────────────────────────────────────────
|
|
38
|
-
|
|
39
|
-
def run_pagespeed(url: str, strategy: str = "mobile", categories: Optional[list] = None) -> dict:
|
|
40
|
-
"""
|
|
41
|
-
Run PageSpeed Insights audit.
|
|
42
|
-
|
|
43
|
-
Args:
|
|
44
|
-
url: URL to audit
|
|
45
|
-
strategy: "mobile" or "desktop"
|
|
46
|
-
categories: List of categories (PERFORMANCE, ACCESSIBILITY, BEST_PRACTICES, SEO)
|
|
47
|
-
|
|
48
|
-
Returns:
|
|
49
|
-
Structured result with scores, audits, and opportunities
|
|
50
|
-
"""
|
|
51
|
-
if not API_KEY:
|
|
52
|
-
return {"error": "GOOGLE_API_KEY not set. Get one at https://console.cloud.google.com/apis/credentials"}
|
|
53
|
-
|
|
54
|
-
if categories is None:
|
|
55
|
-
categories = ["PERFORMANCE", "ACCESSIBILITY", "BEST_PRACTICES", "SEO"]
|
|
56
|
-
|
|
57
|
-
params = {
|
|
58
|
-
"url": url,
|
|
59
|
-
"key": API_KEY,
|
|
60
|
-
"strategy": strategy,
|
|
61
|
-
}
|
|
62
|
-
for cat in categories:
|
|
63
|
-
params.setdefault("category", [])
|
|
64
|
-
if isinstance(params["category"], list):
|
|
65
|
-
params["category"].append(cat)
|
|
66
|
-
|
|
67
|
-
# requests needs category as repeated param
|
|
68
|
-
param_str = f"url={url}&key={API_KEY}&strategy={strategy}"
|
|
69
|
-
for cat in categories:
|
|
70
|
-
param_str += f"&category={cat}"
|
|
71
|
-
|
|
72
|
-
try:
|
|
73
|
-
response = requests.get(f"{PSI_ENDPOINT}?{param_str}", timeout=120)
|
|
74
|
-
response.raise_for_status()
|
|
75
|
-
data = response.json()
|
|
76
|
-
except requests.RequestException as e:
|
|
77
|
-
return {"error": f"PSI API request failed: {e}"}
|
|
78
|
-
|
|
79
|
-
# Extract scores
|
|
80
|
-
result = {
|
|
81
|
-
"url": url,
|
|
82
|
-
"strategy": strategy,
|
|
83
|
-
"scores": {},
|
|
84
|
-
"cwv": {},
|
|
85
|
-
"failing_audits": [],
|
|
86
|
-
"opportunities": [],
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
# Category scores
|
|
90
|
-
categories_data = data.get("lighthouseResult", {}).get("categories", {})
|
|
91
|
-
for cat_id, cat_data in categories_data.items():
|
|
92
|
-
score = cat_data.get("score", 0)
|
|
93
|
-
result["scores"][cat_id] = round(score * 100)
|
|
94
|
-
|
|
95
|
-
# Core Web Vitals from Lighthouse
|
|
96
|
-
audits = data.get("lighthouseResult", {}).get("audits", {})
|
|
97
|
-
|
|
98
|
-
cwv_metrics = {
|
|
99
|
-
"largest-contentful-paint": "LCP",
|
|
100
|
-
"interaction-to-next-paint": "INP",
|
|
101
|
-
"cumulative-layout-shift": "CLS",
|
|
102
|
-
"first-contentful-paint": "FCP",
|
|
103
|
-
"total-blocking-time": "TBT",
|
|
104
|
-
"speed-index": "SI",
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
for audit_id, label in cwv_metrics.items():
|
|
108
|
-
if audit_id in audits:
|
|
109
|
-
audit = audits[audit_id]
|
|
110
|
-
result["cwv"][label] = {
|
|
111
|
-
"value": audit.get("numericValue"),
|
|
112
|
-
"display": audit.get("displayValue", ""),
|
|
113
|
-
"score": round(audit.get("score", 0) * 100),
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
# Failing audits (score < 1.0)
|
|
117
|
-
for audit_id, audit in audits.items():
|
|
118
|
-
score = audit.get("score")
|
|
119
|
-
if score is not None and score < 0.9 and audit.get("title"):
|
|
120
|
-
severity = "critical" if score < 0.5 else "warning"
|
|
121
|
-
result["failing_audits"].append({
|
|
122
|
-
"id": audit_id,
|
|
123
|
-
"title": audit.get("title", ""),
|
|
124
|
-
"description": audit.get("description", "")[:200],
|
|
125
|
-
"score": round(score * 100),
|
|
126
|
-
"severity": severity,
|
|
127
|
-
"display_value": audit.get("displayValue", ""),
|
|
128
|
-
})
|
|
129
|
-
|
|
130
|
-
# Sort failures by score (worst first)
|
|
131
|
-
result["failing_audits"].sort(key=lambda x: x["score"])
|
|
132
|
-
|
|
133
|
-
# Opportunities (have savings)
|
|
134
|
-
for audit_id, audit in audits.items():
|
|
135
|
-
details = audit.get("details", {})
|
|
136
|
-
if details.get("type") == "opportunity" and details.get("overallSavingsMs", 0) > 0:
|
|
137
|
-
result["opportunities"].append({
|
|
138
|
-
"id": audit_id,
|
|
139
|
-
"title": audit.get("title", ""),
|
|
140
|
-
"savings_ms": details.get("overallSavingsMs", 0),
|
|
141
|
-
"savings_bytes": details.get("overallSavingsBytes", 0),
|
|
142
|
-
})
|
|
143
|
-
|
|
144
|
-
result["opportunities"].sort(key=lambda x: x["savings_ms"], reverse=True)
|
|
145
|
-
|
|
146
|
-
# Field data (CrUX from PSI response)
|
|
147
|
-
loading_exp = data.get("loadingExperience", {})
|
|
148
|
-
if loading_exp.get("metrics"):
|
|
149
|
-
result["field_data"] = {}
|
|
150
|
-
for metric_id, metric_data in loading_exp["metrics"].items():
|
|
151
|
-
result["field_data"][metric_id] = {
|
|
152
|
-
"percentile": metric_data.get("percentile"),
|
|
153
|
-
"category": metric_data.get("category"),
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
return result
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
# ── CrUX API ───────────────────────────────────────────────────────
|
|
160
|
-
|
|
161
|
-
def run_crux(url: str, form_factor: str = "PHONE") -> dict:
|
|
162
|
-
"""
|
|
163
|
-
Query Chrome UX Report for real-world performance data.
|
|
164
|
-
|
|
165
|
-
Args:
|
|
166
|
-
url: URL or origin to query
|
|
167
|
-
form_factor: PHONE, DESKTOP, or ALL_FORM_FACTORS
|
|
168
|
-
|
|
169
|
-
Returns:
|
|
170
|
-
Field CWV data at 75th percentile
|
|
171
|
-
"""
|
|
172
|
-
if not API_KEY:
|
|
173
|
-
return {"error": "GOOGLE_API_KEY not set"}
|
|
174
|
-
|
|
175
|
-
# Try URL-level first, fall back to origin
|
|
176
|
-
from urllib.parse import urlparse
|
|
177
|
-
parsed = urlparse(url)
|
|
178
|
-
origin = f"{parsed.scheme}://{parsed.netloc}"
|
|
179
|
-
|
|
180
|
-
payload = {
|
|
181
|
-
"url": url,
|
|
182
|
-
"formFactor": form_factor,
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
try:
|
|
186
|
-
response = requests.post(
|
|
187
|
-
f"{CRUX_ENDPOINT}?key={API_KEY}",
|
|
188
|
-
json=payload,
|
|
189
|
-
timeout=30,
|
|
190
|
-
)
|
|
191
|
-
|
|
192
|
-
if response.status_code == 404:
|
|
193
|
-
# No URL-level data, try origin
|
|
194
|
-
payload = {"origin": origin, "formFactor": form_factor}
|
|
195
|
-
response = requests.post(
|
|
196
|
-
f"{CRUX_ENDPOINT}?key={API_KEY}",
|
|
197
|
-
json=payload,
|
|
198
|
-
timeout=30,
|
|
199
|
-
)
|
|
200
|
-
|
|
201
|
-
if response.status_code == 404:
|
|
202
|
-
return {"error": f"No CrUX data available for {url} (not enough traffic)"}
|
|
203
|
-
|
|
204
|
-
response.raise_for_status()
|
|
205
|
-
data = response.json()
|
|
206
|
-
except requests.RequestException as e:
|
|
207
|
-
return {"error": f"CrUX API request failed: {e}"}
|
|
208
|
-
|
|
209
|
-
result = {
|
|
210
|
-
"url": url,
|
|
211
|
-
"form_factor": form_factor,
|
|
212
|
-
"metrics": {},
|
|
213
|
-
"collection_period": {},
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
# Extract metrics
|
|
217
|
-
metrics = data.get("record", {}).get("metrics", {})
|
|
218
|
-
|
|
219
|
-
metric_map = {
|
|
220
|
-
"largest_contentful_paint": "LCP",
|
|
221
|
-
"interaction_to_next_paint": "INP",
|
|
222
|
-
"cumulative_layout_shift": "CLS",
|
|
223
|
-
"first_contentful_paint": "FCP",
|
|
224
|
-
"experimental_time_to_first_byte": "TTFB",
|
|
225
|
-
}
|
|
226
|
-
|
|
227
|
-
for api_name, label in metric_map.items():
|
|
228
|
-
if api_name in metrics:
|
|
229
|
-
m = metrics[api_name]
|
|
230
|
-
p75 = m.get("percentiles", {}).get("p75")
|
|
231
|
-
histogram = m.get("histogram", [])
|
|
232
|
-
|
|
233
|
-
# Calculate good/needs-improvement/poor distribution
|
|
234
|
-
distribution = {}
|
|
235
|
-
for bucket in histogram:
|
|
236
|
-
density = bucket.get("density", 0)
|
|
237
|
-
if bucket.get("end"):
|
|
238
|
-
distribution["good"] = distribution.get("good", 0) + density
|
|
239
|
-
elif "start" in bucket and "end" not in bucket:
|
|
240
|
-
distribution["poor"] = density
|
|
241
|
-
else:
|
|
242
|
-
distribution["needs_improvement"] = density
|
|
243
|
-
|
|
244
|
-
result["metrics"][label] = {
|
|
245
|
-
"p75": p75,
|
|
246
|
-
"distribution": distribution,
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
# Collection period
|
|
250
|
-
period = data.get("record", {}).get("collectionPeriod", {})
|
|
251
|
-
result["collection_period"] = {
|
|
252
|
-
"first_date": period.get("firstDate", {}),
|
|
253
|
-
"last_date": period.get("lastDate", {}),
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
return result
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
# ── Rich Results Test ──────────────────────────────────────────────
|
|
260
|
-
|
|
261
|
-
def run_rich_results_test(url: str) -> dict:
|
|
262
|
-
"""
|
|
263
|
-
Check if a URL is eligible for rich results.
|
|
264
|
-
|
|
265
|
-
Note: This uses the URL Testing Tools API (Mobile-Friendly Test)
|
|
266
|
-
which also returns rich results information. The dedicated Rich Results
|
|
267
|
-
Test API requires OAuth2, so we use this free alternative.
|
|
268
|
-
|
|
269
|
-
Args:
|
|
270
|
-
url: URL to test
|
|
271
|
-
|
|
272
|
-
Returns:
|
|
273
|
-
Mobile-friendly status and detected structured data
|
|
274
|
-
"""
|
|
275
|
-
if not API_KEY:
|
|
276
|
-
return {"error": "GOOGLE_API_KEY not set"}
|
|
277
|
-
|
|
278
|
-
payload = {"url": url}
|
|
279
|
-
|
|
280
|
-
try:
|
|
281
|
-
response = requests.post(
|
|
282
|
-
f"{RICH_RESULTS_ENDPOINT}?key={API_KEY}",
|
|
283
|
-
json=payload,
|
|
284
|
-
timeout=60,
|
|
285
|
-
)
|
|
286
|
-
response.raise_for_status()
|
|
287
|
-
data = response.json()
|
|
288
|
-
except requests.RequestException as e:
|
|
289
|
-
return {"error": f"URL Testing API request failed: {e}"}
|
|
290
|
-
|
|
291
|
-
result = {
|
|
292
|
-
"url": url,
|
|
293
|
-
"mobile_friendly": data.get("mobileFriendliness") == "MOBILE_FRIENDLY",
|
|
294
|
-
"issues": [],
|
|
295
|
-
}
|
|
296
|
-
|
|
297
|
-
for issue in data.get("mobileFriendlyIssues", []):
|
|
298
|
-
result["issues"].append({
|
|
299
|
-
"rule": issue.get("rule", ""),
|
|
300
|
-
})
|
|
301
|
-
|
|
302
|
-
return result
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
# ── Unified Runner ─────────────────────────────────────────────────
|
|
306
|
-
|
|
307
|
-
def run_all(url: str) -> dict:
|
|
308
|
-
"""Run all available API checks and merge results."""
|
|
309
|
-
print(f"Running PageSpeed Insights (mobile)...", file=sys.stderr)
|
|
310
|
-
psi_mobile = run_pagespeed(url, strategy="mobile")
|
|
311
|
-
|
|
312
|
-
print(f"Running PageSpeed Insights (desktop)...", file=sys.stderr)
|
|
313
|
-
psi_desktop = run_pagespeed(url, strategy="desktop")
|
|
314
|
-
|
|
315
|
-
print(f"Running CrUX API...", file=sys.stderr)
|
|
316
|
-
crux = run_crux(url)
|
|
317
|
-
|
|
318
|
-
print(f"Running Mobile-Friendly Test...", file=sys.stderr)
|
|
319
|
-
rich = run_rich_results_test(url)
|
|
320
|
-
|
|
321
|
-
return {
|
|
322
|
-
"url": url,
|
|
323
|
-
"timestamp": __import__("datetime").datetime.utcnow().isoformat() + "Z",
|
|
324
|
-
"pagespeed": {
|
|
325
|
-
"mobile": psi_mobile,
|
|
326
|
-
"desktop": psi_desktop,
|
|
327
|
-
},
|
|
328
|
-
"crux": crux,
|
|
329
|
-
"mobile_friendly": rich,
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
# ── CLI ────────────────────────────────────────────────────────────
|
|
334
|
-
|
|
335
|
-
def print_psi_summary(result: dict, label: str):
|
|
336
|
-
"""Print a human-readable PSI summary."""
|
|
337
|
-
if result.get("error"):
|
|
338
|
-
print(f" Error: {result['error']}")
|
|
339
|
-
return
|
|
340
|
-
|
|
341
|
-
scores = result.get("scores", {})
|
|
342
|
-
print(f"\n {label} Scores:")
|
|
343
|
-
for cat, score in scores.items():
|
|
344
|
-
icon = "🟢" if score >= 90 else "🟡" if score >= 50 else "🔴"
|
|
345
|
-
cat_name = cat.replace("_", " ").replace("-", " ").title()
|
|
346
|
-
print(f" {icon} {cat_name}: {score}/100")
|
|
347
|
-
|
|
348
|
-
cwv = result.get("cwv", {})
|
|
349
|
-
if cwv:
|
|
350
|
-
print(f"\n Core Web Vitals:")
|
|
351
|
-
for metric, data in cwv.items():
|
|
352
|
-
icon = "🟢" if data["score"] >= 90 else "🟡" if data["score"] >= 50 else "🔴"
|
|
353
|
-
print(f" {icon} {metric}: {data['display']} ({data['score']}/100)")
|
|
354
|
-
|
|
355
|
-
failures = result.get("failing_audits", [])[:5]
|
|
356
|
-
if failures:
|
|
357
|
-
print(f"\n Top Failing Audits:")
|
|
358
|
-
for audit in failures:
|
|
359
|
-
icon = "🔴" if audit["severity"] == "critical" else "🟠"
|
|
360
|
-
print(f" {icon} {audit['title']} ({audit['score']}/100)")
|
|
361
|
-
|
|
362
|
-
opps = result.get("opportunities", [])[:3]
|
|
363
|
-
if opps:
|
|
364
|
-
print(f"\n Top Opportunities:")
|
|
365
|
-
for opp in opps:
|
|
366
|
-
print(f" 💡 {opp['title']} (save {opp['savings_ms']}ms)")
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
def main():
|
|
370
|
-
parser = argparse.ArgumentParser(
|
|
371
|
-
description="SEO APIs — Google free API client (BMAD+ SEO Engine)"
|
|
372
|
-
)
|
|
373
|
-
parser.add_argument("url", nargs="?", help="URL to analyze")
|
|
374
|
-
parser.add_argument("--pagespeed", action="store_true", help="Run PageSpeed Insights")
|
|
375
|
-
parser.add_argument("--crux", action="store_true", help="Run CrUX API")
|
|
376
|
-
parser.add_argument("--richtest", action="store_true", help="Run Rich Results Test")
|
|
377
|
-
parser.add_argument("--all", action="store_true", help="Run all APIs")
|
|
378
|
-
parser.add_argument("--strategy", choices=["mobile", "desktop"], default="mobile",
|
|
379
|
-
help="PSI strategy (default: mobile)")
|
|
380
|
-
parser.add_argument("--json", "-j", action="store_true", help="Output as JSON")
|
|
381
|
-
|
|
382
|
-
args = parser.parse_args()
|
|
383
|
-
|
|
384
|
-
if not args.url:
|
|
385
|
-
parser.print_help()
|
|
386
|
-
sys.exit(1)
|
|
387
|
-
|
|
388
|
-
if not API_KEY:
|
|
389
|
-
print("⚠️ GOOGLE_API_KEY not set!", file=sys.stderr)
|
|
390
|
-
print(" Get a free key: https://console.cloud.google.com/apis/credentials", file=sys.stderr)
|
|
391
|
-
print(" Enable: PageSpeed Insights API + Chrome UX Report API", file=sys.stderr)
|
|
392
|
-
print(" Set: export GOOGLE_API_KEY=your_key", file=sys.stderr)
|
|
393
|
-
sys.exit(1)
|
|
394
|
-
|
|
395
|
-
if args.all:
|
|
396
|
-
result = run_all(args.url)
|
|
397
|
-
if args.json:
|
|
398
|
-
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
399
|
-
else:
|
|
400
|
-
print(f"\n{'='*60}")
|
|
401
|
-
print(f"SEO API Report: {args.url}")
|
|
402
|
-
print(f"{'='*60}")
|
|
403
|
-
print_psi_summary(result["pagespeed"]["mobile"], "📱 Mobile")
|
|
404
|
-
print_psi_summary(result["pagespeed"]["desktop"], "🖥️ Desktop")
|
|
405
|
-
|
|
406
|
-
crux = result["crux"]
|
|
407
|
-
if not crux.get("error"):
|
|
408
|
-
print(f"\n 📊 CrUX Field Data:")
|
|
409
|
-
for metric, data in crux.get("metrics", {}).items():
|
|
410
|
-
print(f" {metric}: p75 = {data['p75']}")
|
|
411
|
-
else:
|
|
412
|
-
print(f"\n 📊 CrUX: {crux['error']}")
|
|
413
|
-
|
|
414
|
-
mf = result["mobile_friendly"]
|
|
415
|
-
if not mf.get("error"):
|
|
416
|
-
icon = "✅" if mf["mobile_friendly"] else "❌"
|
|
417
|
-
print(f"\n 📱 Mobile-Friendly: {icon}")
|
|
418
|
-
else:
|
|
419
|
-
print(f"\n 📱 Mobile-Friendly: {mf['error']}")
|
|
420
|
-
|
|
421
|
-
elif args.pagespeed:
|
|
422
|
-
result = run_pagespeed(args.url, strategy=args.strategy)
|
|
423
|
-
if args.json:
|
|
424
|
-
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
425
|
-
else:
|
|
426
|
-
print_psi_summary(result, f"{'📱 Mobile' if args.strategy == 'mobile' else '🖥️ Desktop'}")
|
|
427
|
-
|
|
428
|
-
elif args.crux:
|
|
429
|
-
result = run_crux(args.url)
|
|
430
|
-
if args.json:
|
|
431
|
-
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
432
|
-
else:
|
|
433
|
-
if result.get("error"):
|
|
434
|
-
print(f"Error: {result['error']}")
|
|
435
|
-
else:
|
|
436
|
-
print(f"\nCrUX Field Data: {args.url}")
|
|
437
|
-
for metric, data in result.get("metrics", {}).items():
|
|
438
|
-
print(f" {metric}: p75 = {data['p75']}")
|
|
439
|
-
|
|
440
|
-
elif args.richtest:
|
|
441
|
-
result = run_rich_results_test(args.url)
|
|
442
|
-
if args.json:
|
|
443
|
-
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
444
|
-
else:
|
|
445
|
-
if result.get("error"):
|
|
446
|
-
print(f"Error: {result['error']}")
|
|
447
|
-
else:
|
|
448
|
-
icon = "✅" if result["mobile_friendly"] else "❌"
|
|
449
|
-
print(f"Mobile-Friendly: {icon}")
|
|
450
|
-
if result["issues"]:
|
|
451
|
-
for issue in result["issues"]:
|
|
452
|
-
print(f" ⚠️ {issue['rule']}")
|
|
453
|
-
|
|
454
|
-
else:
|
|
455
|
-
# Default: run pagespeed
|
|
456
|
-
result = run_pagespeed(args.url, strategy=args.strategy)
|
|
457
|
-
if args.json:
|
|
458
|
-
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
459
|
-
else:
|
|
460
|
-
print_psi_summary(result, f"{'📱 Mobile' if args.strategy == 'mobile' else '🖥️ Desktop'}")
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
if __name__ == "__main__":
|
|
464
|
-
main()
|