bmad-plus 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,70 @@
1
+ """
2
+ Tests for seo_fetch.py — SSRF protection, URL handling, error cases.
3
+
4
+ Author: Laurent Rochetta
5
+ """
6
+
7
+ import sys
8
+ import os
9
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "scripts"))
10
+
11
+ from seo_fetch import is_safe_url, fetch_page
12
+
13
+
14
+ class TestSSRFProtection:
15
+ """Test SSRF prevention blocks private/loopback/reserved IPs."""
16
+
17
+ def test_blocks_localhost(self):
18
+ assert is_safe_url("http://127.0.0.1/admin") is False
19
+
20
+ def test_blocks_private_10(self):
21
+ assert is_safe_url("http://10.0.0.1/secret") is False
22
+
23
+ def test_blocks_private_192(self):
24
+ assert is_safe_url("http://192.168.1.1/") is False
25
+
26
+ def test_blocks_private_172(self):
27
+ assert is_safe_url("http://172.16.0.1/") is False
28
+
29
+ def test_allows_public_ip(self):
30
+ assert is_safe_url("https://93.184.216.34/") is True
31
+
32
+ def test_allows_public_domain(self):
33
+ assert is_safe_url("https://example.com/") is True
34
+
35
+ def test_blocks_empty_hostname(self):
36
+ assert is_safe_url("http:///nohost") is False
37
+
38
+ def test_blocks_zero_ip(self):
39
+ assert is_safe_url("http://0.0.0.0/") is False
40
+
41
+
42
+ class TestFetchPage:
43
+ """Test fetch_page function behavior."""
44
+
45
+ def test_normalizes_url_without_scheme(self):
46
+ result = fetch_page("example.com", timeout=5)
47
+ assert result["url"] == "example.com"
48
+ # Should have attempted https://example.com
49
+
50
+ def test_blocks_invalid_scheme(self):
51
+ result = fetch_page("ftp://example.com/file")
52
+ assert result["error"] is not None
53
+ assert "Invalid URL scheme" in result["error"]
54
+
55
+ def test_blocks_ssrf(self):
56
+ result = fetch_page("http://127.0.0.1/admin")
57
+ assert result["error"] is not None
58
+ assert "Blocked" in result["error"]
59
+
60
+ def test_result_structure(self):
61
+ """Verify the result dict has all expected keys."""
62
+ result = fetch_page("https://example.com", timeout=5)
63
+ expected_keys = {"url", "final_url", "status_code", "content", "headers",
64
+ "redirect_chain", "content_length", "response_time_ms", "error"}
65
+ assert expected_keys == set(result.keys())
66
+
67
+ def test_timeout_returns_error(self):
68
+ # Use a non-routable IP to force timeout
69
+ result = fetch_page("http://192.0.2.1/", timeout=1)
70
+ assert result["error"] is not None
@@ -0,0 +1,184 @@
1
+ """
2
+ Tests for seo_parse.py — HTML parsing and SEO element extraction.
3
+
4
+ Author: Laurent Rochetta
5
+ """
6
+
7
+ import json
8
+ import os
9
+ import sys
10
+
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "scripts"))
12
+
13
+ from seo_parse import parse_html
14
+
15
+ FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
16
+
17
+
18
+ def load_fixture(name: str) -> str:
19
+ with open(os.path.join(FIXTURES_DIR, name), "r", encoding="utf-8") as f:
20
+ return f.read()
21
+
22
+
23
+ class TestTitleParsing:
24
+ def test_extracts_title(self):
25
+ result = parse_html(load_fixture("sample_page.html"))
26
+ assert result["title"] == "SEO Test Page — BMAD+ Fixture"
27
+
28
+ def test_title_length(self):
29
+ result = parse_html(load_fixture("sample_page.html"))
30
+ assert result["title_length"] == len("SEO Test Page — BMAD+ Fixture")
31
+
32
+ def test_missing_title(self):
33
+ result = parse_html("<html><body><p>No title</p></body></html>")
34
+ assert result["title"] is None
35
+ assert result["title_length"] == 0
36
+
37
+
38
+ class TestMetaTags:
39
+ def test_extracts_description(self):
40
+ result = parse_html(load_fixture("sample_page.html"))
41
+ assert "test page" in result["meta_description"].lower()
42
+
43
+ def test_extracts_robots(self):
44
+ result = parse_html(load_fixture("sample_page.html"))
45
+ assert result["meta_robots"] == "index, follow"
46
+
47
+ def test_extracts_viewport(self):
48
+ result = parse_html(load_fixture("sample_page.html"))
49
+ assert "width=device-width" in result["meta_viewport"]
50
+
51
+ def test_missing_description(self):
52
+ result = parse_html("<html><head><title>T</title></head><body></body></html>")
53
+ assert result["meta_description"] is None
54
+
55
+
56
+ class TestCanonical:
57
+ def test_extracts_canonical(self):
58
+ result = parse_html(load_fixture("sample_page.html"))
59
+ assert result["canonical"] == "https://example.com/test"
60
+
61
+ def test_missing_canonical(self):
62
+ result = parse_html("<html><body></body></html>")
63
+ assert result["canonical"] is None
64
+
65
+
66
+ class TestHeadings:
67
+ def test_h1_count(self):
68
+ result = parse_html(load_fixture("sample_page.html"))
69
+ assert len(result["headings"]["h1"]) == 1
70
+
71
+ def test_h2_count(self):
72
+ result = parse_html(load_fixture("sample_page.html"))
73
+ assert len(result["headings"]["h2"]) == 2
74
+
75
+ def test_h3_count(self):
76
+ result = parse_html(load_fixture("sample_page.html"))
77
+ assert len(result["headings"]["h3"]) == 1
78
+
79
+ def test_multiple_h1_detection(self):
80
+ html = "<html><body><h1>First</h1><h1>Second</h1></body></html>"
81
+ result = parse_html(html)
82
+ assert len(result["headings"]["h1"]) == 2
83
+
84
+
85
+ class TestImages:
86
+ def test_image_count(self):
87
+ result = parse_html(load_fixture("sample_page.html"))
88
+ assert len(result["images"]) == 3
89
+
90
+ def test_image_with_alt(self):
91
+ result = parse_html(load_fixture("sample_page.html"))
92
+ hero = [i for i in result["images"] if "hero" in i["src"]]
93
+ assert len(hero) == 1
94
+ assert hero[0]["has_alt"] is True
95
+ assert hero[0]["alt"] == "Hero image for testing"
96
+
97
+ def test_image_without_alt(self):
98
+ result = parse_html(load_fixture("sample_page.html"))
99
+ no_alt = [i for i in result["images"] if "no-alt" in i["src"]]
100
+ assert len(no_alt) == 1
101
+ assert no_alt[0]["has_alt"] is False
102
+
103
+ def test_image_with_empty_alt(self):
104
+ result = parse_html(load_fixture("sample_page.html"))
105
+ empty = [i for i in result["images"] if "empty-alt" in i["src"]]
106
+ assert len(empty) == 1
107
+ assert empty[0]["has_alt"] is True
108
+ assert empty[0]["alt_empty"] is True
109
+
110
+
111
+ class TestLinks:
112
+ def test_internal_links(self):
113
+ result = parse_html(load_fixture("sample_page.html"), base_url="https://example.com")
114
+ assert len(result["links"]["internal"]) >= 2
115
+
116
+ def test_external_links(self):
117
+ result = parse_html(load_fixture("sample_page.html"), base_url="https://example.com")
118
+ assert len(result["links"]["external"]) >= 1
119
+
120
+ def test_nofollow_detection(self):
121
+ result = parse_html(load_fixture("sample_page.html"), base_url="https://example.com")
122
+ nofollow = [l for l in result["links"]["external"] if l["is_nofollow"]]
123
+ assert len(nofollow) >= 1
124
+
125
+
126
+ class TestSchema:
127
+ def test_schema_block_count(self):
128
+ result = parse_html(load_fixture("sample_page.html"))
129
+ assert len(result["schema_blocks"]) == 2
130
+
131
+ def test_schema_types(self):
132
+ result = parse_html(load_fixture("sample_page.html"))
133
+ types = [s["type"] for s in result["schema_blocks"]]
134
+ assert "Organization" in types
135
+ assert "BreadcrumbList" in types
136
+
137
+ def test_schema_parse_error(self):
138
+ html = '<html><body><script type="application/ld+json">{invalid json}</script></body></html>'
139
+ result = parse_html(html)
140
+ assert len(result["schema_blocks"]) == 1
141
+ assert result["schema_blocks"][0]["type"] == "PARSE_ERROR"
142
+
143
+
144
+ class TestOpenGraph:
145
+ def test_og_title(self):
146
+ result = parse_html(load_fixture("sample_page.html"))
147
+ assert result["open_graph"].get("og:title") == "SEO Test Page"
148
+
149
+ def test_og_type(self):
150
+ result = parse_html(load_fixture("sample_page.html"))
151
+ assert result["open_graph"].get("og:type") == "website"
152
+
153
+
154
+ class TestHreflang:
155
+ def test_hreflang_count(self):
156
+ result = parse_html(load_fixture("sample_page.html"))
157
+ assert len(result["hreflang"]) == 3 # en, fr, x-default
158
+
159
+ def test_hreflang_languages(self):
160
+ result = parse_html(load_fixture("sample_page.html"))
161
+ langs = [h["lang"] for h in result["hreflang"]]
162
+ assert "en" in langs
163
+ assert "fr" in langs
164
+ assert "x-default" in langs
165
+
166
+
167
+ class TestContentMetrics:
168
+ def test_word_count_positive(self):
169
+ result = parse_html(load_fixture("sample_page.html"))
170
+ assert result["word_count"] > 30
171
+
172
+ def test_text_ratio_range(self):
173
+ result = parse_html(load_fixture("sample_page.html"))
174
+ assert 0 < result["text_ratio"] < 1
175
+
176
+ def test_has_lang_attr(self):
177
+ result = parse_html(load_fixture("sample_page.html"))
178
+ assert result["has_lang_attr"] is True
179
+ assert result["lang"] == "en"
180
+
181
+ def test_html_size(self):
182
+ html = load_fixture("sample_page.html")
183
+ result = parse_html(html)
184
+ assert result["html_size_bytes"] == len(html.encode("utf-8"))
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "$schema": "https://json.schemastore.org/package.json",
3
3
  "name": "bmad-plus",
4
- "version": "0.3.1",
4
+ "version": "0.3.3",
5
5
  "description": "BMAD+ — Augmented AI-Driven Development Framework with multi-role agents, autopilot, and parallel execution",
6
6
  "keywords": [
7
7
  "bmad",