bmad-plus 0.3.3 → 0.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +34 -0
- package/README.md +12 -56
- package/osint-agent-package/skills/bmad-osint-investigate/osint/SKILL.md +452 -452
- package/osint-agent-package/skills/bmad-osint-investigate/osint/assets/dossier-template.md +116 -116
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/content-extraction.md +100 -100
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/platforms.md +130 -130
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/psychoprofile.md +69 -69
- package/osint-agent-package/skills/bmad-osint-investigate/osint/references/tools.md +281 -281
- package/osint-agent-package/skills/bmad-osint-investigate/osint/scripts/mcp-client.py +136 -136
- package/package.json +1 -1
- package/readme-international/README.de.md +1 -1
- package/readme-international/README.es.md +1 -1
- package/readme-international/README.fr.md +1 -1
- package/tools/cli/commands/install.js +74 -46
- package/tools/cli/i18n.js +501 -0
- package/oveanet-pack/animated-website/DEPLOYMENT.md +0 -104
- package/oveanet-pack/animated-website/README.md +0 -63
- package/oveanet-pack/animated-website/agent/animated-website-agent.md +0 -325
- package/oveanet-pack/animated-website/agent.yaml +0 -63
- package/oveanet-pack/animated-website/templates/animated-website-workflow.md +0 -55
- package/oveanet-pack/seo-audit-360/DEPLOYMENT.md +0 -115
- package/oveanet-pack/seo-audit-360/README.md +0 -66
- package/oveanet-pack/seo-audit-360/SKILL.md +0 -171
- package/oveanet-pack/seo-audit-360/agent/seo-chief.md +0 -294
- package/oveanet-pack/seo-audit-360/agent/seo-judge.md +0 -241
- package/oveanet-pack/seo-audit-360/agent/seo-scout.md +0 -171
- package/oveanet-pack/seo-audit-360/agent.yaml +0 -70
- package/oveanet-pack/seo-audit-360/checklist.md +0 -140
- package/oveanet-pack/seo-audit-360/hooks/seo-check.sh +0 -95
- package/oveanet-pack/seo-audit-360/pagespeed-playbook.md +0 -320
- package/oveanet-pack/seo-audit-360/ref/audit-schema.json +0 -187
- package/oveanet-pack/seo-audit-360/ref/cwv-thresholds.md +0 -87
- package/oveanet-pack/seo-audit-360/ref/eeat-criteria.md +0 -123
- package/oveanet-pack/seo-audit-360/ref/geo-signals.md +0 -167
- package/oveanet-pack/seo-audit-360/ref/hreflang-rules.md +0 -153
- package/oveanet-pack/seo-audit-360/ref/quality-gates.md +0 -133
- package/oveanet-pack/seo-audit-360/ref/schema-catalog.md +0 -91
- package/oveanet-pack/seo-audit-360/ref/schema-templates.json +0 -356
- package/oveanet-pack/seo-audit-360/requirements.txt +0 -14
- package/oveanet-pack/seo-audit-360/scripts/__pycache__/seo_crawl.cpython-314.pyc +0 -0
- package/oveanet-pack/seo-audit-360/scripts/__pycache__/seo_parse.cpython-314.pyc +0 -0
- package/oveanet-pack/seo-audit-360/scripts/install.ps1 +0 -53
- package/oveanet-pack/seo-audit-360/scripts/install.sh +0 -48
- package/oveanet-pack/seo-audit-360/scripts/seo_apis.py +0 -464
- package/oveanet-pack/seo-audit-360/scripts/seo_crawl.py +0 -282
- package/oveanet-pack/seo-audit-360/scripts/seo_fetch.py +0 -231
- package/oveanet-pack/seo-audit-360/scripts/seo_parse.py +0 -255
- package/oveanet-pack/seo-audit-360/scripts/seo_report.py +0 -403
- package/oveanet-pack/seo-audit-360/scripts/seo_screenshot.py +0 -202
- package/oveanet-pack/seo-audit-360/templates/seo-audit-workflow.md +0 -241
- package/oveanet-pack/seo-audit-360/tests/__pycache__/test_crawl.cpython-314-pytest-9.0.2.pyc +0 -0
- package/oveanet-pack/seo-audit-360/tests/__pycache__/test_parse.cpython-314-pytest-9.0.2.pyc +0 -0
- package/oveanet-pack/seo-audit-360/tests/fixtures/sample_page.html +0 -62
- package/oveanet-pack/seo-audit-360/tests/test_apis.py +0 -75
- package/oveanet-pack/seo-audit-360/tests/test_crawl.py +0 -121
- package/oveanet-pack/seo-audit-360/tests/test_fetch.py +0 -70
- package/oveanet-pack/seo-audit-360/tests/test_parse.py +0 -184
- package/oveanet-pack/universal-backup/DEPLOYMENT.md +0 -80
- package/oveanet-pack/universal-backup/README.md +0 -58
- package/oveanet-pack/universal-backup/agent/backup-agent.md +0 -71
- package/oveanet-pack/universal-backup/agent.yaml +0 -45
- package/oveanet-pack/universal-backup/templates/backup-workflow.md +0 -51
|
@@ -1,121 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Tests for seo_crawl.py — URL normalization, link extraction, depth limiting.
|
|
3
|
-
|
|
4
|
-
Author: Laurent Rochetta
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import sys
|
|
8
|
-
import os
|
|
9
|
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "scripts"))
|
|
10
|
-
|
|
11
|
-
from seo_crawl import SEOCrawler
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class TestURLNormalization:
|
|
15
|
-
"""Test URL normalization for deduplication."""
|
|
16
|
-
|
|
17
|
-
def setup_method(self):
|
|
18
|
-
self.crawler = SEOCrawler("https://example.com", max_depth=2, max_pages=25)
|
|
19
|
-
|
|
20
|
-
def test_strips_trailing_slash(self):
|
|
21
|
-
assert self.crawler.normalize_url("https://example.com/page/") == "https://example.com/page"
|
|
22
|
-
|
|
23
|
-
def test_preserves_root(self):
|
|
24
|
-
assert self.crawler.normalize_url("https://example.com/") == "https://example.com/"
|
|
25
|
-
|
|
26
|
-
def test_normalizes_scheme(self):
|
|
27
|
-
result = self.crawler.normalize_url("https://example.com/page")
|
|
28
|
-
assert result.startswith("https://")
|
|
29
|
-
|
|
30
|
-
def test_deduplicates(self):
|
|
31
|
-
url1 = self.crawler.normalize_url("https://example.com/page/")
|
|
32
|
-
url2 = self.crawler.normalize_url("https://example.com/page")
|
|
33
|
-
assert url1 == url2
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
class TestInternalDetection:
|
|
37
|
-
def setup_method(self):
|
|
38
|
-
self.crawler = SEOCrawler("https://example.com", max_depth=2, max_pages=25)
|
|
39
|
-
|
|
40
|
-
def test_internal_url(self):
|
|
41
|
-
assert self.crawler.is_internal("https://example.com/about") is True
|
|
42
|
-
|
|
43
|
-
def test_external_url(self):
|
|
44
|
-
assert self.crawler.is_internal("https://other.com/page") is False
|
|
45
|
-
|
|
46
|
-
def test_subdomain_is_external(self):
|
|
47
|
-
assert self.crawler.is_internal("https://blog.example.com/post") is False
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
class TestLinkExtraction:
|
|
51
|
-
def setup_method(self):
|
|
52
|
-
self.crawler = SEOCrawler("https://example.com", max_depth=2, max_pages=25)
|
|
53
|
-
|
|
54
|
-
def test_extracts_internal_links(self):
|
|
55
|
-
html = '''
|
|
56
|
-
<a href="/about">About</a>
|
|
57
|
-
<a href="https://example.com/contact">Contact</a>
|
|
58
|
-
'''
|
|
59
|
-
links = self.crawler.extract_links(html, "https://example.com/")
|
|
60
|
-
assert len(links) == 2
|
|
61
|
-
|
|
62
|
-
def test_ignores_external_links(self):
|
|
63
|
-
html = '<a href="https://other.com/page">External</a>'
|
|
64
|
-
links = self.crawler.extract_links(html, "https://example.com/")
|
|
65
|
-
assert len(links) == 0
|
|
66
|
-
|
|
67
|
-
def test_ignores_anchors(self):
|
|
68
|
-
html = '<a href="#section">Anchor</a>'
|
|
69
|
-
links = self.crawler.extract_links(html, "https://example.com/")
|
|
70
|
-
assert len(links) == 0
|
|
71
|
-
|
|
72
|
-
def test_ignores_javascript(self):
|
|
73
|
-
html = '<a href="javascript:void(0)">JS Link</a>'
|
|
74
|
-
links = self.crawler.extract_links(html, "https://example.com/")
|
|
75
|
-
assert len(links) == 0
|
|
76
|
-
|
|
77
|
-
def test_ignores_mailto(self):
|
|
78
|
-
html = '<a href="mailto:test@example.com">Email</a>'
|
|
79
|
-
links = self.crawler.extract_links(html, "https://example.com/")
|
|
80
|
-
assert len(links) == 0
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
class TestTitleExtraction:
|
|
84
|
-
def setup_method(self):
|
|
85
|
-
self.crawler = SEOCrawler("https://example.com")
|
|
86
|
-
|
|
87
|
-
def test_extracts_title(self):
|
|
88
|
-
html = "<html><head><title>Test Page</title></head><body></body></html>"
|
|
89
|
-
assert self.crawler.extract_title(html) == "Test Page"
|
|
90
|
-
|
|
91
|
-
def test_missing_title(self):
|
|
92
|
-
html = "<html><body></body></html>"
|
|
93
|
-
assert self.crawler.extract_title(html) is None
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
class TestWordCount:
|
|
97
|
-
def setup_method(self):
|
|
98
|
-
self.crawler = SEOCrawler("https://example.com")
|
|
99
|
-
|
|
100
|
-
def test_counts_visible_words(self):
|
|
101
|
-
html = "<html><body><p>This is a test with seven words.</p></body></html>"
|
|
102
|
-
assert self.crawler.count_words(html) == 7
|
|
103
|
-
|
|
104
|
-
def test_excludes_script_content(self):
|
|
105
|
-
html = '<html><body><p>Visible</p><script>var hidden = true;</script></body></html>'
|
|
106
|
-
count = self.crawler.count_words(html)
|
|
107
|
-
assert count == 1 # Only "Visible"
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
class TestCrawlerConfig:
|
|
111
|
-
def test_max_pages_respected(self):
|
|
112
|
-
crawler = SEOCrawler("https://example.com", max_pages=5)
|
|
113
|
-
assert crawler.max_pages == 5
|
|
114
|
-
|
|
115
|
-
def test_max_depth_respected(self):
|
|
116
|
-
crawler = SEOCrawler("https://example.com", max_depth=1)
|
|
117
|
-
assert crawler.max_depth == 1
|
|
118
|
-
|
|
119
|
-
def test_base_domain_extracted(self):
|
|
120
|
-
crawler = SEOCrawler("https://www.example.com/page")
|
|
121
|
-
assert crawler.base_domain == "www.example.com"
|
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Tests for seo_fetch.py — SSRF protection, URL handling, error cases.
|
|
3
|
-
|
|
4
|
-
Author: Laurent Rochetta
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import sys
|
|
8
|
-
import os
|
|
9
|
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "scripts"))
|
|
10
|
-
|
|
11
|
-
from seo_fetch import is_safe_url, fetch_page
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class TestSSRFProtection:
|
|
15
|
-
"""Test SSRF prevention blocks private/loopback/reserved IPs."""
|
|
16
|
-
|
|
17
|
-
def test_blocks_localhost(self):
|
|
18
|
-
assert is_safe_url("http://127.0.0.1/admin") is False
|
|
19
|
-
|
|
20
|
-
def test_blocks_private_10(self):
|
|
21
|
-
assert is_safe_url("http://10.0.0.1/secret") is False
|
|
22
|
-
|
|
23
|
-
def test_blocks_private_192(self):
|
|
24
|
-
assert is_safe_url("http://192.168.1.1/") is False
|
|
25
|
-
|
|
26
|
-
def test_blocks_private_172(self):
|
|
27
|
-
assert is_safe_url("http://172.16.0.1/") is False
|
|
28
|
-
|
|
29
|
-
def test_allows_public_ip(self):
|
|
30
|
-
assert is_safe_url("https://93.184.216.34/") is True
|
|
31
|
-
|
|
32
|
-
def test_allows_public_domain(self):
|
|
33
|
-
assert is_safe_url("https://example.com/") is True
|
|
34
|
-
|
|
35
|
-
def test_blocks_empty_hostname(self):
|
|
36
|
-
assert is_safe_url("http:///nohost") is False
|
|
37
|
-
|
|
38
|
-
def test_blocks_zero_ip(self):
|
|
39
|
-
assert is_safe_url("http://0.0.0.0/") is False
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
class TestFetchPage:
|
|
43
|
-
"""Test fetch_page function behavior."""
|
|
44
|
-
|
|
45
|
-
def test_normalizes_url_without_scheme(self):
|
|
46
|
-
result = fetch_page("example.com", timeout=5)
|
|
47
|
-
assert result["url"] == "example.com"
|
|
48
|
-
# Should have attempted https://example.com
|
|
49
|
-
|
|
50
|
-
def test_blocks_invalid_scheme(self):
|
|
51
|
-
result = fetch_page("ftp://example.com/file")
|
|
52
|
-
assert result["error"] is not None
|
|
53
|
-
assert "Invalid URL scheme" in result["error"]
|
|
54
|
-
|
|
55
|
-
def test_blocks_ssrf(self):
|
|
56
|
-
result = fetch_page("http://127.0.0.1/admin")
|
|
57
|
-
assert result["error"] is not None
|
|
58
|
-
assert "Blocked" in result["error"]
|
|
59
|
-
|
|
60
|
-
def test_result_structure(self):
|
|
61
|
-
"""Verify the result dict has all expected keys."""
|
|
62
|
-
result = fetch_page("https://example.com", timeout=5)
|
|
63
|
-
expected_keys = {"url", "final_url", "status_code", "content", "headers",
|
|
64
|
-
"redirect_chain", "content_length", "response_time_ms", "error"}
|
|
65
|
-
assert expected_keys == set(result.keys())
|
|
66
|
-
|
|
67
|
-
def test_timeout_returns_error(self):
|
|
68
|
-
# Use a non-routable IP to force timeout
|
|
69
|
-
result = fetch_page("http://192.0.2.1/", timeout=1)
|
|
70
|
-
assert result["error"] is not None
|
|
@@ -1,184 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Tests for seo_parse.py — HTML parsing and SEO element extraction.
|
|
3
|
-
|
|
4
|
-
Author: Laurent Rochetta
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import json
|
|
8
|
-
import os
|
|
9
|
-
import sys
|
|
10
|
-
|
|
11
|
-
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "scripts"))
|
|
12
|
-
|
|
13
|
-
from seo_parse import parse_html
|
|
14
|
-
|
|
15
|
-
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), "fixtures")
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
def load_fixture(name: str) -> str:
|
|
19
|
-
with open(os.path.join(FIXTURES_DIR, name), "r", encoding="utf-8") as f:
|
|
20
|
-
return f.read()
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
class TestTitleParsing:
|
|
24
|
-
def test_extracts_title(self):
|
|
25
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
26
|
-
assert result["title"] == "SEO Test Page — BMAD+ Fixture"
|
|
27
|
-
|
|
28
|
-
def test_title_length(self):
|
|
29
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
30
|
-
assert result["title_length"] == len("SEO Test Page — BMAD+ Fixture")
|
|
31
|
-
|
|
32
|
-
def test_missing_title(self):
|
|
33
|
-
result = parse_html("<html><body><p>No title</p></body></html>")
|
|
34
|
-
assert result["title"] is None
|
|
35
|
-
assert result["title_length"] == 0
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
class TestMetaTags:
|
|
39
|
-
def test_extracts_description(self):
|
|
40
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
41
|
-
assert "test page" in result["meta_description"].lower()
|
|
42
|
-
|
|
43
|
-
def test_extracts_robots(self):
|
|
44
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
45
|
-
assert result["meta_robots"] == "index, follow"
|
|
46
|
-
|
|
47
|
-
def test_extracts_viewport(self):
|
|
48
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
49
|
-
assert "width=device-width" in result["meta_viewport"]
|
|
50
|
-
|
|
51
|
-
def test_missing_description(self):
|
|
52
|
-
result = parse_html("<html><head><title>T</title></head><body></body></html>")
|
|
53
|
-
assert result["meta_description"] is None
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
class TestCanonical:
|
|
57
|
-
def test_extracts_canonical(self):
|
|
58
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
59
|
-
assert result["canonical"] == "https://example.com/test"
|
|
60
|
-
|
|
61
|
-
def test_missing_canonical(self):
|
|
62
|
-
result = parse_html("<html><body></body></html>")
|
|
63
|
-
assert result["canonical"] is None
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
class TestHeadings:
|
|
67
|
-
def test_h1_count(self):
|
|
68
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
69
|
-
assert len(result["headings"]["h1"]) == 1
|
|
70
|
-
|
|
71
|
-
def test_h2_count(self):
|
|
72
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
73
|
-
assert len(result["headings"]["h2"]) == 2
|
|
74
|
-
|
|
75
|
-
def test_h3_count(self):
|
|
76
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
77
|
-
assert len(result["headings"]["h3"]) == 1
|
|
78
|
-
|
|
79
|
-
def test_multiple_h1_detection(self):
|
|
80
|
-
html = "<html><body><h1>First</h1><h1>Second</h1></body></html>"
|
|
81
|
-
result = parse_html(html)
|
|
82
|
-
assert len(result["headings"]["h1"]) == 2
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
class TestImages:
|
|
86
|
-
def test_image_count(self):
|
|
87
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
88
|
-
assert len(result["images"]) == 3
|
|
89
|
-
|
|
90
|
-
def test_image_with_alt(self):
|
|
91
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
92
|
-
hero = [i for i in result["images"] if "hero" in i["src"]]
|
|
93
|
-
assert len(hero) == 1
|
|
94
|
-
assert hero[0]["has_alt"] is True
|
|
95
|
-
assert hero[0]["alt"] == "Hero image for testing"
|
|
96
|
-
|
|
97
|
-
def test_image_without_alt(self):
|
|
98
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
99
|
-
no_alt = [i for i in result["images"] if "no-alt" in i["src"]]
|
|
100
|
-
assert len(no_alt) == 1
|
|
101
|
-
assert no_alt[0]["has_alt"] is False
|
|
102
|
-
|
|
103
|
-
def test_image_with_empty_alt(self):
|
|
104
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
105
|
-
empty = [i for i in result["images"] if "empty-alt" in i["src"]]
|
|
106
|
-
assert len(empty) == 1
|
|
107
|
-
assert empty[0]["has_alt"] is True
|
|
108
|
-
assert empty[0]["alt_empty"] is True
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
class TestLinks:
|
|
112
|
-
def test_internal_links(self):
|
|
113
|
-
result = parse_html(load_fixture("sample_page.html"), base_url="https://example.com")
|
|
114
|
-
assert len(result["links"]["internal"]) >= 2
|
|
115
|
-
|
|
116
|
-
def test_external_links(self):
|
|
117
|
-
result = parse_html(load_fixture("sample_page.html"), base_url="https://example.com")
|
|
118
|
-
assert len(result["links"]["external"]) >= 1
|
|
119
|
-
|
|
120
|
-
def test_nofollow_detection(self):
|
|
121
|
-
result = parse_html(load_fixture("sample_page.html"), base_url="https://example.com")
|
|
122
|
-
nofollow = [l for l in result["links"]["external"] if l["is_nofollow"]]
|
|
123
|
-
assert len(nofollow) >= 1
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
class TestSchema:
|
|
127
|
-
def test_schema_block_count(self):
|
|
128
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
129
|
-
assert len(result["schema_blocks"]) == 2
|
|
130
|
-
|
|
131
|
-
def test_schema_types(self):
|
|
132
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
133
|
-
types = [s["type"] for s in result["schema_blocks"]]
|
|
134
|
-
assert "Organization" in types
|
|
135
|
-
assert "BreadcrumbList" in types
|
|
136
|
-
|
|
137
|
-
def test_schema_parse_error(self):
|
|
138
|
-
html = '<html><body><script type="application/ld+json">{invalid json}</script></body></html>'
|
|
139
|
-
result = parse_html(html)
|
|
140
|
-
assert len(result["schema_blocks"]) == 1
|
|
141
|
-
assert result["schema_blocks"][0]["type"] == "PARSE_ERROR"
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
class TestOpenGraph:
|
|
145
|
-
def test_og_title(self):
|
|
146
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
147
|
-
assert result["open_graph"].get("og:title") == "SEO Test Page"
|
|
148
|
-
|
|
149
|
-
def test_og_type(self):
|
|
150
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
151
|
-
assert result["open_graph"].get("og:type") == "website"
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
class TestHreflang:
|
|
155
|
-
def test_hreflang_count(self):
|
|
156
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
157
|
-
assert len(result["hreflang"]) == 3 # en, fr, x-default
|
|
158
|
-
|
|
159
|
-
def test_hreflang_languages(self):
|
|
160
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
161
|
-
langs = [h["lang"] for h in result["hreflang"]]
|
|
162
|
-
assert "en" in langs
|
|
163
|
-
assert "fr" in langs
|
|
164
|
-
assert "x-default" in langs
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
class TestContentMetrics:
|
|
168
|
-
def test_word_count_positive(self):
|
|
169
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
170
|
-
assert result["word_count"] > 30
|
|
171
|
-
|
|
172
|
-
def test_text_ratio_range(self):
|
|
173
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
174
|
-
assert 0 < result["text_ratio"] < 1
|
|
175
|
-
|
|
176
|
-
def test_has_lang_attr(self):
|
|
177
|
-
result = parse_html(load_fixture("sample_page.html"))
|
|
178
|
-
assert result["has_lang_attr"] is True
|
|
179
|
-
assert result["lang"] == "en"
|
|
180
|
-
|
|
181
|
-
def test_html_size(self):
|
|
182
|
-
html = load_fixture("sample_page.html")
|
|
183
|
-
result = parse_html(html)
|
|
184
|
-
assert result["html_size_bytes"] == len(html.encode("utf-8"))
|
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
# 🚀 Guide de déploiement — Universal Backup Agent
|
|
2
|
-
|
|
3
|
-
## Méthode 1 : Intégration BMAD (recommandée)
|
|
4
|
-
|
|
5
|
-
### Étape 1 — Copier l'agent
|
|
6
|
-
Copier `agent/backup-agent.md` dans le dossier agents de votre projet BMAD :
|
|
7
|
-
```
|
|
8
|
-
votre-projet/
|
|
9
|
-
├── _bmad/
|
|
10
|
-
│ └── agents/
|
|
11
|
-
│ └── backup-agent.md ← copier ici
|
|
12
|
-
```
|
|
13
|
-
|
|
14
|
-
### Étape 2 — Déclarer dans le manifest
|
|
15
|
-
Ajouter cette ligne dans `_bmad_config/agent-manifest.csv` :
|
|
16
|
-
```csv
|
|
17
|
-
backup-agent,Universal Backup Manager,backup,Gère les sauvegardes ZIP horodatées du projet,_bmad/agents/backup-agent.md
|
|
18
|
-
```
|
|
19
|
-
|
|
20
|
-
### Étape 3 — Ajouter le workflow Gemini
|
|
21
|
-
Copier `templates/backup-workflow.md` dans :
|
|
22
|
-
```
|
|
23
|
-
votre-projet/.agent/workflows/backup-project.md
|
|
24
|
-
```
|
|
25
|
-
**Important :** Remplacer `%PROJECT_ROOT%` par le chemin réel du projet.
|
|
26
|
-
|
|
27
|
-
---
|
|
28
|
-
|
|
29
|
-
## Méthode 2 : Standalone (sans BMAD)
|
|
30
|
-
|
|
31
|
-
### Option A — Workflow Gemini uniquement
|
|
32
|
-
1. Créer `.agent/workflows/` dans votre projet
|
|
33
|
-
2. Copier `templates/backup-workflow.md` → `.agent/workflows/backup-project.md`
|
|
34
|
-
3. Remplacer `%PROJECT_ROOT%` par le chemin réel
|
|
35
|
-
4. Utiliser avec `/backup-project`
|
|
36
|
-
|
|
37
|
-
### Option B — Commande directe
|
|
38
|
-
Coller cette commande dans votre terminal :
|
|
39
|
-
|
|
40
|
-
**Windows :**
|
|
41
|
-
```powershell
|
|
42
|
-
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
|
|
43
|
-
$projectRoot = "C:\chemin\vers\votre\projet"
|
|
44
|
-
$projectName = Split-Path $projectRoot -Leaf
|
|
45
|
-
$backupDir = "$projectRoot\backups"
|
|
46
|
-
if (!(Test-Path $backupDir)) { New-Item -ItemType Directory -Path $backupDir -Force }
|
|
47
|
-
Get-ChildItem $projectRoot -Exclude "backups","node_modules",".git","vendor","__pycache__" |
|
|
48
|
-
Compress-Archive -DestinationPath "$backupDir\${projectName}_backup_$timestamp.zip" -Force
|
|
49
|
-
```
|
|
50
|
-
|
|
51
|
-
**Linux/Mac :**
|
|
52
|
-
```bash
|
|
53
|
-
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
54
|
-
PROJECT_ROOT="/chemin/vers/votre/projet"
|
|
55
|
-
PROJECT_NAME=$(basename "$PROJECT_ROOT")
|
|
56
|
-
mkdir -p "$PROJECT_ROOT/backups"
|
|
57
|
-
cd "$PROJECT_ROOT" && zip -r "backups/${PROJECT_NAME}_backup_$TIMESTAMP.zip" . \
|
|
58
|
-
-x "backups/*" "node_modules/*" ".git/*" "vendor/*" "__pycache__/*"
|
|
59
|
-
```
|
|
60
|
-
|
|
61
|
-
---
|
|
62
|
-
|
|
63
|
-
## 📁 Exclusions par défaut
|
|
64
|
-
|
|
65
|
-
| Dossier/Pattern | Raison |
|
|
66
|
-
|----------------|--------|
|
|
67
|
-
| `backups/` | Éviter la récursion |
|
|
68
|
-
| `node_modules/` | Dépendances, se réinstallent avec `npm install` |
|
|
69
|
-
| `.git/` | Historique Git, lourd et inutile dans un backup |
|
|
70
|
-
| `vendor/` | Dépendances PHP/Composer |
|
|
71
|
-
| `__pycache__/` | Cache Python compilé |
|
|
72
|
-
| `*.backup_*` | Anciens fichiers de backup individuels |
|
|
73
|
-
|
|
74
|
-
## 🔧 Personnalisation
|
|
75
|
-
|
|
76
|
-
Pour exclure d'autres dossiers, ajoutez-les à la liste `Exclude` :
|
|
77
|
-
```powershell
|
|
78
|
-
# Exemple: exclure aussi "storage" et "tmp"
|
|
79
|
-
Get-ChildItem $projectRoot -Exclude "backups","node_modules",".git","vendor","__pycache__","storage","tmp"
|
|
80
|
-
```
|
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
# 🗂️ Universal Project Backup Agent
|
|
2
|
-
|
|
3
|
-
Agent BMAD universel pour créer des backups ZIP horodatés de n'importe quel projet web.
|
|
4
|
-
|
|
5
|
-
## 📁 Structure
|
|
6
|
-
|
|
7
|
-
```
|
|
8
|
-
Universal Backup Agent/
|
|
9
|
-
├── agent/
|
|
10
|
-
│ └── backup-agent.md # Agent BMAD
|
|
11
|
-
├── templates/
|
|
12
|
-
│ └── backup-workflow.md # Workflow Gemini prêt à copier
|
|
13
|
-
├── README.md # Ce fichier
|
|
14
|
-
└── DEPLOYMENT.md # Guide de déploiement
|
|
15
|
-
```
|
|
16
|
-
|
|
17
|
-
## 🚀 Utilisation rapide
|
|
18
|
-
|
|
19
|
-
### Commande slash
|
|
20
|
-
```
|
|
21
|
-
/backup-project
|
|
22
|
-
```
|
|
23
|
-
|
|
24
|
-
### Commande manuelle (PowerShell)
|
|
25
|
-
```powershell
|
|
26
|
-
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
|
|
27
|
-
$projectRoot = "CHEMIN_DU_PROJET"
|
|
28
|
-
$backupDir = "$projectRoot\backups"
|
|
29
|
-
if (!(Test-Path $backupDir)) { New-Item -ItemType Directory -Path $backupDir -Force }
|
|
30
|
-
Get-ChildItem $projectRoot -Exclude "backups","node_modules",".git","vendor","__pycache__","*.backup_*" |
|
|
31
|
-
Compress-Archive -DestinationPath "$backupDir\backup_$timestamp.zip" -Force
|
|
32
|
-
```
|
|
33
|
-
|
|
34
|
-
### Commande manuelle (Bash/Linux)
|
|
35
|
-
```bash
|
|
36
|
-
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
37
|
-
PROJECT_ROOT="CHEMIN_DU_PROJET"
|
|
38
|
-
mkdir -p "$PROJECT_ROOT/backups"
|
|
39
|
-
cd "$PROJECT_ROOT" && zip -r "backups/backup_$TIMESTAMP.zip" . \
|
|
40
|
-
-x "backups/*" "node_modules/*" ".git/*" "vendor/*" "__pycache__/*" "*.backup_*"
|
|
41
|
-
```
|
|
42
|
-
|
|
43
|
-
## ⚙️ Fonctionnalités
|
|
44
|
-
|
|
45
|
-
- **Horodatage automatique** : chaque backup est nommé avec date + heure
|
|
46
|
-
- **Exclusions intelligentes** : ignore `node_modules`, `.git`, `vendor`, `backups/`, `__pycache__`
|
|
47
|
-
- **Multi-plateforme** : PowerShell (Windows) et Bash (Linux/Mac)
|
|
48
|
-
- **Compatible BMAD** : intégrable comme agent dans le framework BMAD
|
|
49
|
-
- **Workflow Gemini** : fichier `.md` prêt à copier dans `.agent/workflows/`
|
|
50
|
-
|
|
51
|
-
## 📋 Projets compatibles
|
|
52
|
-
|
|
53
|
-
Fonctionne avec tout type de projet :
|
|
54
|
-
- PHP / Laravel / Symfony
|
|
55
|
-
- Node.js / Next.js / Vite
|
|
56
|
-
- Python / Django / Flask
|
|
57
|
-
- HTML/CSS/JS statique
|
|
58
|
-
- WordPress / CMS
|
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
# Agent: Universal Backup Manager
|
|
2
|
-
|
|
3
|
-
## Persona
|
|
4
|
-
Tu es un agent spécialisé dans la sauvegarde et la restauration de projets. Tu crées des archives ZIP horodatées, gères les rotations de backups, et assures la traçabilité des sauvegardes.
|
|
5
|
-
|
|
6
|
-
## Activation
|
|
7
|
-
1. Identifier le dossier racine du projet
|
|
8
|
-
2. Déterminer le système d'exploitation (Windows/Linux/Mac)
|
|
9
|
-
3. Vérifier l'existence du dossier `backups/`
|
|
10
|
-
4. Exécuter la sauvegarde
|
|
11
|
-
|
|
12
|
-
## Menu
|
|
13
|
-
|
|
14
|
-
### 1. `/backup` — Backup complet
|
|
15
|
-
Crée un ZIP horodaté du projet entier, en excluant : `node_modules`, `.git`, `vendor`, `backups/`, `__pycache__`, `*.backup_*`, `dist/node_modules`.
|
|
16
|
-
|
|
17
|
-
**PowerShell (Windows) :**
|
|
18
|
-
```powershell
|
|
19
|
-
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
|
|
20
|
-
$projectRoot = "%PROJECT_ROOT%"
|
|
21
|
-
$projectName = Split-Path $projectRoot -Leaf
|
|
22
|
-
$backupDir = "$projectRoot\backups"
|
|
23
|
-
if (!(Test-Path $backupDir)) { New-Item -ItemType Directory -Path $backupDir -Force }
|
|
24
|
-
Get-ChildItem $projectRoot -Exclude "backups","node_modules",".git","vendor","__pycache__","*.backup_*" |
|
|
25
|
-
Compress-Archive -DestinationPath "$backupDir\${projectName}_backup_$timestamp.zip" -Force
|
|
26
|
-
$size = (Get-Item "$backupDir\${projectName}_backup_$timestamp.zip").Length / 1MB
|
|
27
|
-
Write-Output "✅ Backup: ${projectName}_backup_$timestamp.zip ($([math]::Round($size,2)) MB)"
|
|
28
|
-
```
|
|
29
|
-
|
|
30
|
-
**Bash (Linux/Mac) :**
|
|
31
|
-
```bash
|
|
32
|
-
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
33
|
-
PROJECT_ROOT="%PROJECT_ROOT%"
|
|
34
|
-
PROJECT_NAME=$(basename "$PROJECT_ROOT")
|
|
35
|
-
mkdir -p "$PROJECT_ROOT/backups"
|
|
36
|
-
cd "$PROJECT_ROOT" && zip -r "backups/${PROJECT_NAME}_backup_$TIMESTAMP.zip" . \
|
|
37
|
-
-x "backups/*" "node_modules/*" ".git/*" "vendor/*" "__pycache__/*" "*.backup_*"
|
|
38
|
-
echo "✅ Backup: ${PROJECT_NAME}_backup_$TIMESTAMP.zip"
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
### 2. `/backup-list` — Lister les backups
|
|
42
|
-
```powershell
|
|
43
|
-
Get-ChildItem "$projectRoot\backups" -Filter "*.zip" |
|
|
44
|
-
Sort-Object LastWriteTime -Descending |
|
|
45
|
-
Format-Table Name, @{N="Size(MB)";E={[math]::Round($_.Length/1MB,2)}}, LastWriteTime -AutoSize
|
|
46
|
-
```
|
|
47
|
-
|
|
48
|
-
### 3. `/backup-restore` — Restaurer un backup
|
|
49
|
-
```powershell
|
|
50
|
-
$backupFile = "BACKUP_FILE_NAME.zip"
|
|
51
|
-
$restoreDir = "$projectRoot\restore_$((Get-Date).ToString('yyyyMMdd_HHmmss'))"
|
|
52
|
-
Expand-Archive -Path "$projectRoot\backups\$backupFile" -DestinationPath $restoreDir -Force
|
|
53
|
-
Write-Output "✅ Restored to: $restoreDir"
|
|
54
|
-
```
|
|
55
|
-
|
|
56
|
-
### 4. `/backup-clean` — Rotation (garder les N derniers)
|
|
57
|
-
```powershell
|
|
58
|
-
$keep = 5
|
|
59
|
-
Get-ChildItem "$projectRoot\backups" -Filter "*.zip" |
|
|
60
|
-
Sort-Object LastWriteTime -Descending |
|
|
61
|
-
Select-Object -Skip $keep |
|
|
62
|
-
Remove-Item -Force
|
|
63
|
-
Write-Output "✅ Kept last $keep backups, removed older ones"
|
|
64
|
-
```
|
|
65
|
-
|
|
66
|
-
## Règles
|
|
67
|
-
1. **Toujours demander confirmation** avant de supprimer des backups
|
|
68
|
-
2. **Toujours afficher** la taille du ZIP créé
|
|
69
|
-
3. **Toujours exclure** les dossiers volumineux non essentiels
|
|
70
|
-
4. **Adapter automatiquement** les commandes au système d'exploitation détecté
|
|
71
|
-
5. **Remplacer `%PROJECT_ROOT%`** par le chemin réel du projet
|
|
@@ -1,45 +0,0 @@
|
|
|
1
|
-
name: universal-backup
|
|
2
|
-
version: 1.0.0
|
|
3
|
-
title: "Universal Backup"
|
|
4
|
-
description: "Backup ZIP horodaté multi-plateforme avec exclusions intelligentes"
|
|
5
|
-
author: Laurent ROCHETTA AI
|
|
6
|
-
icon: "🗂️"
|
|
7
|
-
tags: [backup, zip, devops, utilities]
|
|
8
|
-
triggers:
|
|
9
|
-
- "backup"
|
|
10
|
-
- "backup project"
|
|
11
|
-
- "create backup"
|
|
12
|
-
- "save project"
|
|
13
|
-
- "zip project"
|
|
14
|
-
requires:
|
|
15
|
-
tools: []
|
|
16
|
-
scripts: []
|
|
17
|
-
commands:
|
|
18
|
-
- id: backup
|
|
19
|
-
name: "Create Backup"
|
|
20
|
-
description: "Crée un backup ZIP horodaté"
|
|
21
|
-
- id: list
|
|
22
|
-
name: "List Backups"
|
|
23
|
-
description: "Liste les backups existants"
|
|
24
|
-
- id: restore
|
|
25
|
-
name: "Restore Backup"
|
|
26
|
-
description: "Restaure un backup"
|
|
27
|
-
- id: clean
|
|
28
|
-
name: "Clean Backups"
|
|
29
|
-
description: "Supprime les vieux backups"
|
|
30
|
-
platforms:
|
|
31
|
-
bmad:
|
|
32
|
-
target: "_bmad/core/agents/"
|
|
33
|
-
file: "backup-agent.md"
|
|
34
|
-
claude:
|
|
35
|
-
target: ".claude/skills/"
|
|
36
|
-
skillName: "backup"
|
|
37
|
-
gemini:
|
|
38
|
-
target: ".agent/workflows/"
|
|
39
|
-
file: "backup-project.md"
|
|
40
|
-
opencode:
|
|
41
|
-
target: ".opencode/agents/"
|
|
42
|
-
file: "backup-agent.md"
|
|
43
|
-
codex:
|
|
44
|
-
target: ".codex/agents/"
|
|
45
|
-
file: "backup-agent.md"
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
description: Create a full ZIP backup of the current project
|
|
3
|
-
---
|
|
4
|
-
|
|
5
|
-
# Backup Project
|
|
6
|
-
|
|
7
|
-
Creates a timestamped ZIP archive of the entire project in a `backups/` folder at the project root.
|
|
8
|
-
|
|
9
|
-
## Auto-detect
|
|
10
|
-
|
|
11
|
-
The agent should:
|
|
12
|
-
1. Detect the project root from the active workspace
|
|
13
|
-
2. Detect the OS (PowerShell for Windows, Bash for Linux/Mac)
|
|
14
|
-
3. Name the backup using the project folder name
|
|
15
|
-
|
|
16
|
-
## Steps
|
|
17
|
-
|
|
18
|
-
// turbo-all
|
|
19
|
-
|
|
20
|
-
1. Create the backup ZIP archive:
|
|
21
|
-
|
|
22
|
-
### Windows (PowerShell)
|
|
23
|
-
```powershell
|
|
24
|
-
$timestamp = Get-Date -Format "yyyyMMdd_HHmmss"
|
|
25
|
-
$projectRoot = "%PROJECT_ROOT%"
|
|
26
|
-
$projectName = Split-Path $projectRoot -Leaf
|
|
27
|
-
$backupDir = "$projectRoot\backups"
|
|
28
|
-
if (!(Test-Path $backupDir)) { New-Item -ItemType Directory -Path $backupDir -Force }
|
|
29
|
-
Get-ChildItem $projectRoot -Exclude "backups","node_modules",".git","vendor","__pycache__","*.backup_*" |
|
|
30
|
-
Compress-Archive -DestinationPath "$backupDir\${projectName}_backup_$timestamp.zip" -Force
|
|
31
|
-
$size = (Get-Item "$backupDir\${projectName}_backup_$timestamp.zip").Length / 1MB
|
|
32
|
-
Write-Output "Backup: ${projectName}_backup_$timestamp.zip ($([math]::Round($size,2)) MB)"
|
|
33
|
-
```
|
|
34
|
-
|
|
35
|
-
### Linux/Mac (Bash)
|
|
36
|
-
```bash
|
|
37
|
-
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
38
|
-
PROJECT_ROOT="%PROJECT_ROOT%"
|
|
39
|
-
PROJECT_NAME=$(basename "$PROJECT_ROOT")
|
|
40
|
-
mkdir -p "$PROJECT_ROOT/backups"
|
|
41
|
-
cd "$PROJECT_ROOT" && zip -r "backups/${PROJECT_NAME}_backup_$TIMESTAMP.zip" . \
|
|
42
|
-
-x "backups/*" "node_modules/*" ".git/*" "vendor/*" "__pycache__/*" "*.backup_*"
|
|
43
|
-
```
|
|
44
|
-
|
|
45
|
-
2. Confirm the backup was created and report the file name and size.
|
|
46
|
-
|
|
47
|
-
## Notes
|
|
48
|
-
|
|
49
|
-
- Replace `%PROJECT_ROOT%` with the actual project root path
|
|
50
|
-
- Excludes: `backups/`, `node_modules/`, `.git/`, `vendor/`, `__pycache__/`, `*.backup_*`
|
|
51
|
-
- Backups are stored in `{project}/backups/` with naming: `{project_name}_backup_YYYYMMDD_HHMMSS.zip`
|