@flitzrrr/agent-skills 1.0.3 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +2 -2
- package/.github/copilot-instructions.md +59 -0
- package/.lovable +1 -1
- package/AGENTS.md +2 -2
- package/CHEATSHEET.md +84 -86
- package/CLAUDE.md +2 -2
- package/LICENSE +27 -0
- package/README.md +191 -99
- package/bin/build-catalog.js +208 -0
- package/bin/cli.js +7 -3
- package/bin/rebuild-symlinks.js +161 -0
- package/bin/sync-docs.js +147 -0
- package/bin/sync-skills.sh +17 -0
- package/bin/test-cli.js +115 -0
- package/bin/update-wiki.js +102 -0
- package/package.json +9 -2
- package/skills/dispatch-parallel-agents/skill.md +95 -0
- package/skills/execute-work-package/SKILL.md +300 -0
- package/skills/execute-work-package/scripts/start-l4l-oci.sh +75 -0
- package/skills/execute-work-package/tpl-execution-blueprint.md +39 -0
- package/skills/execute-work-package/tpl-execution-digest.md +24 -0
- package/skills/execute-work-package/tpl-implementer-execute-prompt.md +57 -0
- package/skills/execute-work-package/tpl-implementer-preflight-prompt.md +66 -0
- package/skills/product-description-seo/CROSS-SELL.md +31 -0
- package/skills/product-description-seo/KEYWORDS.md +35 -0
- package/skills/product-description-seo/SKILL.md +361 -0
- package/skills/product-description-seo/scripts/analyze_catalog.py +136 -0
- package/skills/product-description-seo/scripts/check_quality.py +204 -0
- package/skills/product-description-seo/scripts/extract_category.py +88 -0
- package/skills/product-description-seo/scripts/track_progress.py +140 -0
- package/skills/product-description-seo/scripts/update_catalog.py +80 -0
- package/skills/product-description-seo/scripts/validate_json.py +87 -0
- package/skills/systematic-debugging/skill.md +87 -0
- package/skills/tob-gh-cli/SKILL.md +71 -0
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Quality-check product descriptions against SEO requirements.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python check_quality.py <updates.json> [--strict] [--min-words 200] [--max-words 350]
|
|
6
|
+
|
|
7
|
+
Checks each description against 8 criteria:
|
|
8
|
+
1. Word count within range
|
|
9
|
+
2. 4-paragraph structure
|
|
10
|
+
3. Focus keyword in first sentence
|
|
11
|
+
4. No banned superlatives
|
|
12
|
+
5. Cross-sell reference present
|
|
13
|
+
6. Formal address (no informal pronouns)
|
|
14
|
+
7. Meta-description-ready opening
|
|
15
|
+
8. Plain text only (no HTML/Markdown)
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
import re
|
|
21
|
+
import argparse
|
|
22
|
+
|
|
23
|
+
DEFAULT_BANNED_WORDS = [
|
|
24
|
+
# English
|
|
25
|
+
"best-in-class", "unparalleled", "revolutionary", "sensational",
|
|
26
|
+
"unmatched", "world-leading", "game-changing",
|
|
27
|
+
# German
|
|
28
|
+
"erstklassig", "herausragend", "unschlagbar", "einzigartig",
|
|
29
|
+
"revolutionaer", "sensationell", "unvergleichlich",
|
|
30
|
+
"hoechstem niveau",
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
# Indicators that a cross-sell reference is present
|
|
34
|
+
CROSS_SELL_INDICATORS = [
|
|
35
|
+
"combination with", "kombination mit", "in combination",
|
|
36
|
+
"pairs well", "complement", "together with", "zusammen mit",
|
|
37
|
+
"ergaenzend", "empfehlen wir", "passend dazu",
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
# Informal pronouns that indicate wrong tone (German du-form)
|
|
41
|
+
INFORMAL_PRONOUNS = [
|
|
42
|
+
r'\bdu\b', r'\bdein\b', r'\bdeine\b', r'\bdeinem\b',
|
|
43
|
+
r'\bdeinen\b', r'\bdeiner\b', r'\bdir\b', r'\bdich\b',
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def check_description(product: dict, min_words: int = 200, max_words: int = 350,
|
|
48
|
+
strict: bool = False) -> dict:
|
|
49
|
+
sku = product.get("sku", "?")
|
|
50
|
+
name = product.get("name", "?")
|
|
51
|
+
text = product.get("beschreibung", product.get("description", ""))
|
|
52
|
+
|
|
53
|
+
results = {"sku": sku, "name": name, "checks": [], "passed": 0, "failed": 0, "warnings": 0}
|
|
54
|
+
|
|
55
|
+
# 1. Word count
|
|
56
|
+
words = len(text.split())
|
|
57
|
+
if min_words <= words <= max_words:
|
|
58
|
+
results["checks"].append(("PASS", f"Word count: {words} (target: {min_words}-{max_words})"))
|
|
59
|
+
results["passed"] += 1
|
|
60
|
+
elif words > max_words:
|
|
61
|
+
results["checks"].append(("WARN", f"Word count: {words} -- above {max_words}, consider trimming"))
|
|
62
|
+
results["warnings"] += 1
|
|
63
|
+
else:
|
|
64
|
+
results["checks"].append(("FAIL", f"Word count: {words} -- below {min_words} minimum"))
|
|
65
|
+
results["failed"] += 1
|
|
66
|
+
|
|
67
|
+
# 2. Paragraph structure
|
|
68
|
+
paragraphs = [p.strip() for p in text.split("\n\n") if p.strip()]
|
|
69
|
+
if len(paragraphs) == 4:
|
|
70
|
+
results["checks"].append(("PASS", f"4-paragraph structure: {len(paragraphs)} paragraphs"))
|
|
71
|
+
results["passed"] += 1
|
|
72
|
+
elif len(paragraphs) >= 3:
|
|
73
|
+
results["checks"].append(("WARN", f"Paragraph structure: {len(paragraphs)} paragraphs (target: 4)"))
|
|
74
|
+
results["warnings"] += 1
|
|
75
|
+
else:
|
|
76
|
+
results["checks"].append(("FAIL", f"Paragraph structure: only {len(paragraphs)} paragraphs (target: 4)"))
|
|
77
|
+
results["failed"] += 1
|
|
78
|
+
|
|
79
|
+
# 3. Focus keyword in first sentence
|
|
80
|
+
first_sentence = text.split(".")[0].lower() if text else ""
|
|
81
|
+
name_parts = name.lower().split()
|
|
82
|
+
main_word = max(name_parts, key=len) if name_parts else ""
|
|
83
|
+
if main_word and main_word in first_sentence:
|
|
84
|
+
results["checks"].append(("PASS", f"Focus keyword in first sentence: '{main_word}' found"))
|
|
85
|
+
results["passed"] += 1
|
|
86
|
+
else:
|
|
87
|
+
results["checks"].append(("FAIL", f"Focus keyword missing in first sentence (expected: '{main_word}')"))
|
|
88
|
+
results["failed"] += 1
|
|
89
|
+
|
|
90
|
+
# 4. Banned superlatives
|
|
91
|
+
text_lower = text.lower()
|
|
92
|
+
found_banned = [w for w in DEFAULT_BANNED_WORDS if w in text_lower]
|
|
93
|
+
if not found_banned:
|
|
94
|
+
results["checks"].append(("PASS", "Tone: no banned superlatives"))
|
|
95
|
+
results["passed"] += 1
|
|
96
|
+
else:
|
|
97
|
+
level = "FAIL" if strict else "WARN"
|
|
98
|
+
results["checks"].append((level, f"Banned superlatives found: {', '.join(found_banned)}"))
|
|
99
|
+
if strict:
|
|
100
|
+
results["failed"] += 1
|
|
101
|
+
else:
|
|
102
|
+
results["warnings"] += 1
|
|
103
|
+
|
|
104
|
+
# 5. Cross-sell reference (warning only — not all products have natural cross-sells)
|
|
105
|
+
has_cross_sell = any(ind in text_lower for ind in CROSS_SELL_INDICATORS)
|
|
106
|
+
if has_cross_sell:
|
|
107
|
+
results["checks"].append(("PASS", "Cross-sell reference present"))
|
|
108
|
+
results["passed"] += 1
|
|
109
|
+
else:
|
|
110
|
+
level = "FAIL" if strict else "WARN"
|
|
111
|
+
results["checks"].append((level, "No cross-sell reference found"))
|
|
112
|
+
if strict:
|
|
113
|
+
results["failed"] += 1
|
|
114
|
+
else:
|
|
115
|
+
results["warnings"] += 1
|
|
116
|
+
|
|
117
|
+
# 6. Formal address
|
|
118
|
+
informal_found = []
|
|
119
|
+
for pattern in INFORMAL_PRONOUNS:
|
|
120
|
+
matches = re.findall(pattern, text_lower)
|
|
121
|
+
informal_found.extend(matches)
|
|
122
|
+
if not informal_found:
|
|
123
|
+
results["checks"].append(("PASS", "Formal address maintained"))
|
|
124
|
+
results["passed"] += 1
|
|
125
|
+
else:
|
|
126
|
+
results["checks"].append(("FAIL", f"Informal pronouns found: {', '.join(set(informal_found))}"))
|
|
127
|
+
results["failed"] += 1
|
|
128
|
+
|
|
129
|
+
# 7. Meta description
|
|
130
|
+
first_155 = text[:155]
|
|
131
|
+
if len(first_155) >= 100 and "." in first_155:
|
|
132
|
+
results["checks"].append(("PASS", f"Meta description: {len(first_155)} chars, contains sentence end"))
|
|
133
|
+
results["passed"] += 1
|
|
134
|
+
else:
|
|
135
|
+
results["checks"].append(("WARN", "Meta description: first 155 chars may need adjustment"))
|
|
136
|
+
results["warnings"] += 1
|
|
137
|
+
|
|
138
|
+
# 8. No HTML/Markdown
|
|
139
|
+
if re.search(r'<[^>]+>|#{1,6}\s|\*\*|__|\[.*\]\(.*\)', text):
|
|
140
|
+
results["checks"].append(("FAIL", "HTML or Markdown detected -- plain text only"))
|
|
141
|
+
results["failed"] += 1
|
|
142
|
+
else:
|
|
143
|
+
results["checks"].append(("PASS", "Plain text format (no HTML/Markdown)"))
|
|
144
|
+
results["passed"] += 1
|
|
145
|
+
|
|
146
|
+
return results
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def main():
|
|
150
|
+
parser = argparse.ArgumentParser(description="Quality-check product descriptions")
|
|
151
|
+
parser.add_argument("updates_path", help="Path to updates JSON file")
|
|
152
|
+
parser.add_argument("--strict", action="store_true", help="Treat warnings as failures")
|
|
153
|
+
parser.add_argument("--min-words", type=int, default=200, help="Minimum word count (default: 200)")
|
|
154
|
+
parser.add_argument("--max-words", type=int, default=350, help="Maximum word count (default: 350)")
|
|
155
|
+
args = parser.parse_args()
|
|
156
|
+
|
|
157
|
+
with open(args.updates_path, "r", encoding="utf-8") as f:
|
|
158
|
+
updates = json.load(f)
|
|
159
|
+
|
|
160
|
+
print(f"{'=' * 70}")
|
|
161
|
+
print(f"QUALITY CHECK -- {len(updates)} descriptions")
|
|
162
|
+
print(f"{'=' * 70}")
|
|
163
|
+
|
|
164
|
+
total_pass = 0
|
|
165
|
+
total_fail = 0
|
|
166
|
+
total_warn = 0
|
|
167
|
+
failed_products = []
|
|
168
|
+
|
|
169
|
+
for product in updates:
|
|
170
|
+
result = check_description(product, args.min_words, args.max_words, args.strict)
|
|
171
|
+
total_pass += result["passed"]
|
|
172
|
+
total_fail += result["failed"]
|
|
173
|
+
total_warn += result["warnings"]
|
|
174
|
+
|
|
175
|
+
status = "OK" if result["failed"] == 0 else "FAIL"
|
|
176
|
+
print(f"\n[{status}] SKU {result['sku']}: {result['name']}"
|
|
177
|
+
f" ({result['passed']}P/{result['failed']}F/{result['warnings']}W)")
|
|
178
|
+
|
|
179
|
+
for check_type, msg in result["checks"]:
|
|
180
|
+
icon = {"PASS": " [+]", "FAIL": " [-]", "WARN": " [!]"}[check_type]
|
|
181
|
+
print(f" {icon} {msg}")
|
|
182
|
+
|
|
183
|
+
if result["failed"] > 0:
|
|
184
|
+
failed_products.append(f"SKU {result['sku']}: {result['name']}")
|
|
185
|
+
|
|
186
|
+
total_checks = total_pass + total_fail + total_warn
|
|
187
|
+
print(f"\n{'=' * 70}")
|
|
188
|
+
print(f"SUMMARY")
|
|
189
|
+
print(f"{'=' * 70}")
|
|
190
|
+
print(f"Products: {len(updates)}")
|
|
191
|
+
print(f"Checks: {total_checks} ({total_pass} passed, {total_fail} failed, {total_warn} warnings)")
|
|
192
|
+
if total_checks > 0:
|
|
193
|
+
print(f"Pass rate: {total_pass * 100 // total_checks}%")
|
|
194
|
+
|
|
195
|
+
if failed_products:
|
|
196
|
+
print(f"\nNeeds rework:")
|
|
197
|
+
for fp in failed_products:
|
|
198
|
+
print(f" -> {fp}")
|
|
199
|
+
|
|
200
|
+
sys.exit(1 if total_fail > 0 else 0)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
if __name__ == "__main__":
|
|
204
|
+
main()
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Extract products of a category as JSON for batch prompting.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python extract_category.py <catalog.json> <category-name> [--limit 8]
|
|
6
|
+
|
|
7
|
+
Supports fuzzy category matching and pagination.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
import sys
|
|
12
|
+
import argparse
|
|
13
|
+
|
|
14
|
+
DESC_FIELD = "beschreibung"
|
|
15
|
+
CAT_FIELD = "kategorieName"
|
|
16
|
+
CAT_FIELD_ALT = "kategorie"
|
|
17
|
+
SKU_FIELD = "sku"
|
|
18
|
+
NAME_FIELD = "name"
|
|
19
|
+
STATUS_FIELD = "status"
|
|
20
|
+
STATUS_ACTIVE = "aktiv"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def main():
|
|
24
|
+
parser = argparse.ArgumentParser(description="Extract category products for batch prompting")
|
|
25
|
+
parser.add_argument("catalog_path", help="Path to catalog JSON file")
|
|
26
|
+
parser.add_argument("category", help="Category name (fuzzy match)")
|
|
27
|
+
parser.add_argument("--limit", "-l", type=int, default=8,
|
|
28
|
+
help="Max products per batch (default: 8)")
|
|
29
|
+
parser.add_argument("--offset", "-o", type=int, default=0,
|
|
30
|
+
help="Skip first N products for pagination")
|
|
31
|
+
parser.add_argument("--thin-first", "-t", action="store_true",
|
|
32
|
+
help="Sort thinnest descriptions first")
|
|
33
|
+
args = parser.parse_args()
|
|
34
|
+
|
|
35
|
+
with open(args.catalog_path, "r", encoding="utf-8") as f:
|
|
36
|
+
data = json.load(f)
|
|
37
|
+
|
|
38
|
+
search = args.category.lower()
|
|
39
|
+
products = [
|
|
40
|
+
p for p in data.get("products", [])
|
|
41
|
+
if p.get(STATUS_FIELD, STATUS_ACTIVE) == STATUS_ACTIVE
|
|
42
|
+
and (search in p.get(CAT_FIELD_ALT, "").lower()
|
|
43
|
+
or search in p.get(CAT_FIELD, "").lower()
|
|
44
|
+
or p.get(CAT_FIELD_ALT, "").lower() == search
|
|
45
|
+
or p.get(CAT_FIELD, "").lower() == search)
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
if not products:
|
|
49
|
+
all_cats = sorted(set(
|
|
50
|
+
p.get(CAT_FIELD, p.get(CAT_FIELD_ALT, "?"))
|
|
51
|
+
for p in data.get("products", [])
|
|
52
|
+
))
|
|
53
|
+
print(f"No active products found for '{args.category}'.", file=sys.stderr)
|
|
54
|
+
print("Available categories:", file=sys.stderr)
|
|
55
|
+
for c in all_cats:
|
|
56
|
+
print(f" - {c}", file=sys.stderr)
|
|
57
|
+
sys.exit(1)
|
|
58
|
+
|
|
59
|
+
if args.thin_first:
|
|
60
|
+
products.sort(key=lambda p: len(p.get(DESC_FIELD, "").split()))
|
|
61
|
+
|
|
62
|
+
batch = products[args.offset:args.offset + args.limit]
|
|
63
|
+
|
|
64
|
+
output = []
|
|
65
|
+
for p in batch:
|
|
66
|
+
entry = {
|
|
67
|
+
SKU_FIELD: p.get(SKU_FIELD, ""),
|
|
68
|
+
NAME_FIELD: p.get(NAME_FIELD, ""),
|
|
69
|
+
DESC_FIELD: p.get(DESC_FIELD, ""),
|
|
70
|
+
}
|
|
71
|
+
# Include optional fields if present
|
|
72
|
+
for field in ["variante", "preis", "preisAufAnfrage", "einheit"]:
|
|
73
|
+
if field in p:
|
|
74
|
+
entry[field] = p[field]
|
|
75
|
+
output.append(entry)
|
|
76
|
+
|
|
77
|
+
total = len(products)
|
|
78
|
+
shown = len(batch)
|
|
79
|
+
remaining = total - args.offset - shown
|
|
80
|
+
|
|
81
|
+
print(json.dumps(output, ensure_ascii=False, indent=2))
|
|
82
|
+
print(f"\n// Category: {args.category} | Shown: {shown}/{total}"
|
|
83
|
+
f" | Offset: {args.offset} | Remaining: {max(0, remaining)}",
|
|
84
|
+
file=sys.stderr)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
if __name__ == "__main__":
|
|
88
|
+
main()
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Track progress of a product description update campaign.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python track_progress.py <catalog.json> [--config product-seo-config.json] [--min-words 200]
|
|
6
|
+
|
|
7
|
+
Shows overall progress, per-priority breakdown, and a next-action queue.
|
|
8
|
+
Priorities are loaded from config; without config all categories are equal.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
import argparse
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
|
|
16
|
+
DESC_FIELD = "beschreibung"
|
|
17
|
+
CAT_FIELD = "kategorieName"
|
|
18
|
+
CAT_FIELD_ALT = "kategorie"
|
|
19
|
+
SKU_FIELD = "sku"
|
|
20
|
+
NAME_FIELD = "name"
|
|
21
|
+
STATUS_FIELD = "status"
|
|
22
|
+
STATUS_ACTIVE = "aktiv"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def load_priorities(config_path: str | None) -> dict[str, int]:
|
|
26
|
+
"""Load priority map from config. Returns {category_name: priority_level}."""
|
|
27
|
+
if not config_path:
|
|
28
|
+
return {}
|
|
29
|
+
try:
|
|
30
|
+
with open(config_path, "r", encoding="utf-8") as f:
|
|
31
|
+
config = json.load(f)
|
|
32
|
+
prio_map = {}
|
|
33
|
+
for level, cats in config.get("priorities", {}).items():
|
|
34
|
+
for cat in cats:
|
|
35
|
+
prio_map[cat] = int(level)
|
|
36
|
+
return prio_map
|
|
37
|
+
except (FileNotFoundError, json.JSONDecodeError):
|
|
38
|
+
return {}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_priority(cat_name: str, prio_map: dict[str, int]) -> int:
|
|
42
|
+
if not prio_map:
|
|
43
|
+
return 1 # No config = all equal priority
|
|
44
|
+
return prio_map.get(cat_name, 3)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def main():
|
|
48
|
+
parser = argparse.ArgumentParser(description="Track description update progress")
|
|
49
|
+
parser.add_argument("catalog_path", help="Path to catalog JSON file")
|
|
50
|
+
parser.add_argument("--config", help="Path to product-seo-config.json")
|
|
51
|
+
parser.add_argument("--min-words", "-m", type=int, default=200,
|
|
52
|
+
help="Word count threshold (default: 200)")
|
|
53
|
+
args = parser.parse_args()
|
|
54
|
+
|
|
55
|
+
with open(args.catalog_path, "r", encoding="utf-8") as f:
|
|
56
|
+
data = json.load(f)
|
|
57
|
+
|
|
58
|
+
prio_map = load_priorities(args.config)
|
|
59
|
+
products = [p for p in data.get("products", [])
|
|
60
|
+
if p.get(STATUS_FIELD, STATUS_ACTIVE) == STATUS_ACTIVE]
|
|
61
|
+
|
|
62
|
+
categories = defaultdict(lambda: {"total": 0, "done": 0, "todo": 0, "products_todo": []})
|
|
63
|
+
|
|
64
|
+
for p in products:
|
|
65
|
+
cat = p.get(CAT_FIELD, p.get(CAT_FIELD_ALT, "Unknown"))
|
|
66
|
+
wc = len(p.get(DESC_FIELD, "").split())
|
|
67
|
+
categories[cat]["total"] += 1
|
|
68
|
+
if wc >= args.min_words:
|
|
69
|
+
categories[cat]["done"] += 1
|
|
70
|
+
else:
|
|
71
|
+
categories[cat]["todo"] += 1
|
|
72
|
+
categories[cat]["products_todo"].append({
|
|
73
|
+
SKU_FIELD: p.get(SKU_FIELD, ""),
|
|
74
|
+
NAME_FIELD: p.get(NAME_FIELD, ""),
|
|
75
|
+
"words": wc,
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
total_products = len(products)
|
|
79
|
+
total_done = sum(c["done"] for c in categories.values())
|
|
80
|
+
total_todo = total_products - total_done
|
|
81
|
+
pct = total_done * 100 // total_products if total_products > 0 else 0
|
|
82
|
+
|
|
83
|
+
bar_len = 40
|
|
84
|
+
filled = bar_len * total_done // total_products if total_products > 0 else 0
|
|
85
|
+
bar = "#" * filled + "-" * (bar_len - filled)
|
|
86
|
+
|
|
87
|
+
print(f"{'=' * 70}")
|
|
88
|
+
print(f"PRODUCT DESCRIPTION PROGRESS")
|
|
89
|
+
print(f"{'=' * 70}")
|
|
90
|
+
print(f"\n [{bar}] {pct}%")
|
|
91
|
+
print(f" {total_done}/{total_products} products with >={args.min_words} words")
|
|
92
|
+
print(f" {total_todo} remaining")
|
|
93
|
+
print(f"\n Estimated effort: ~{total_todo // 6 + 1} batch runs at 5-8 products each")
|
|
94
|
+
|
|
95
|
+
# Determine which priority levels exist
|
|
96
|
+
prio_levels = sorted(set(get_priority(name, prio_map) for name in categories))
|
|
97
|
+
|
|
98
|
+
for prio in prio_levels:
|
|
99
|
+
prio_cats = [(name, stats) for name, stats in categories.items()
|
|
100
|
+
if get_priority(name, prio_map) == prio]
|
|
101
|
+
if not prio_cats:
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
prio_total = sum(s["total"] for _, s in prio_cats)
|
|
105
|
+
prio_done = sum(s["done"] for _, s in prio_cats)
|
|
106
|
+
prio_pct = prio_done * 100 // prio_total if prio_total > 0 else 0
|
|
107
|
+
|
|
108
|
+
label = f"PRIORITY {prio}" if prio_map else "ALL CATEGORIES"
|
|
109
|
+
print(f"\n{'-' * 70}")
|
|
110
|
+
print(f"{label} -- {prio_done}/{prio_total} done ({prio_pct}%)")
|
|
111
|
+
print(f"{'-' * 70}")
|
|
112
|
+
print(f"{'Category':<45} {'Done':>10} {'Open':>7}")
|
|
113
|
+
|
|
114
|
+
for name, stats in sorted(prio_cats, key=lambda x: x[1]["todo"], reverse=True):
|
|
115
|
+
done_str = f"{stats['done']}/{stats['total']}"
|
|
116
|
+
check = " done" if stats["todo"] == 0 else ""
|
|
117
|
+
print(f" {name:<43} {done_str:>10} {stats['todo']:>5} {check}")
|
|
118
|
+
|
|
119
|
+
# Next action queue
|
|
120
|
+
print(f"\n{'=' * 70}")
|
|
121
|
+
print(f"NEXT ACTIONS")
|
|
122
|
+
print(f"{'=' * 70}")
|
|
123
|
+
|
|
124
|
+
shown = 0
|
|
125
|
+
for name, stats in sorted(categories.items(),
|
|
126
|
+
key=lambda x: (get_priority(x[0], prio_map), -x[1]["todo"])):
|
|
127
|
+
if stats["todo"] == 0:
|
|
128
|
+
continue
|
|
129
|
+
prio = get_priority(name, prio_map)
|
|
130
|
+
label = f"Prio {prio}" if prio_map else ""
|
|
131
|
+
print(f"\n -> {name} ({label + ', ' if label else ''}{stats['todo']} open):")
|
|
132
|
+
for p in sorted(stats["products_todo"], key=lambda x: x["words"])[:5]:
|
|
133
|
+
print(f" SKU {p[SKU_FIELD]}: {p[NAME_FIELD]} ({p['words']} words)")
|
|
134
|
+
shown += 1
|
|
135
|
+
if shown >= 5:
|
|
136
|
+
break
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
if __name__ == "__main__":
|
|
140
|
+
main()
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Write updated product descriptions back to a catalog JSON file.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python update_catalog.py <catalog.json> <updates.json>
|
|
6
|
+
|
|
7
|
+
updates.json format:
|
|
8
|
+
[
|
|
9
|
+
{"sku": "401", "beschreibung": "New description text..."},
|
|
10
|
+
{"sku": "402", "beschreibung": "Another description..."}
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
Creates an automatic backup before writing.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import sys
|
|
18
|
+
import argparse
|
|
19
|
+
import shutil
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
|
|
22
|
+
DESC_FIELD = "beschreibung"
|
|
23
|
+
SKU_FIELD = "sku"
|
|
24
|
+
NAME_FIELD = "name"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def main():
|
|
28
|
+
parser = argparse.ArgumentParser(description="Write updated descriptions back to catalog JSON")
|
|
29
|
+
parser.add_argument("catalog_path", help="Path to catalog JSON file")
|
|
30
|
+
parser.add_argument("updates_path", help="Path to updates JSON file")
|
|
31
|
+
args = parser.parse_args()
|
|
32
|
+
|
|
33
|
+
catalog_path = args.catalog_path
|
|
34
|
+
updates_path = args.updates_path
|
|
35
|
+
|
|
36
|
+
with open(catalog_path, "r", encoding="utf-8") as f:
|
|
37
|
+
catalog = json.load(f)
|
|
38
|
+
|
|
39
|
+
with open(updates_path, "r", encoding="utf-8") as f:
|
|
40
|
+
updates = json.load(f)
|
|
41
|
+
|
|
42
|
+
# Support both "beschreibung" and "description" field names in updates
|
|
43
|
+
update_map = {}
|
|
44
|
+
for u in updates:
|
|
45
|
+
desc = u.get(DESC_FIELD, u.get("description", ""))
|
|
46
|
+
update_map[u[SKU_FIELD]] = desc
|
|
47
|
+
|
|
48
|
+
# Create backup
|
|
49
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
50
|
+
backup_path = f"{catalog_path}.backup_{timestamp}"
|
|
51
|
+
shutil.copy2(catalog_path, backup_path)
|
|
52
|
+
print(f"Backup created: {backup_path}")
|
|
53
|
+
|
|
54
|
+
updated = 0
|
|
55
|
+
not_found = []
|
|
56
|
+
|
|
57
|
+
for product in catalog.get("products", []):
|
|
58
|
+
sku = product.get(SKU_FIELD, "")
|
|
59
|
+
if sku in update_map:
|
|
60
|
+
old_wc = len(product.get(DESC_FIELD, "").split())
|
|
61
|
+
product[DESC_FIELD] = update_map[sku]
|
|
62
|
+
new_wc = len(update_map[sku].split())
|
|
63
|
+
print(f" [+] SKU {sku}: {product.get(NAME_FIELD, '?')} ({old_wc} -> {new_wc} words)")
|
|
64
|
+
updated += 1
|
|
65
|
+
del update_map[sku]
|
|
66
|
+
|
|
67
|
+
for sku in update_map:
|
|
68
|
+
not_found.append(sku)
|
|
69
|
+
print(f" [-] SKU {sku}: Not found in catalog")
|
|
70
|
+
|
|
71
|
+
with open(catalog_path, "w", encoding="utf-8") as f:
|
|
72
|
+
json.dump(catalog, f, ensure_ascii=False, indent="\t")
|
|
73
|
+
|
|
74
|
+
print(f"\n{updated} descriptions updated.")
|
|
75
|
+
if not_found:
|
|
76
|
+
print(f"{len(not_found)} SKUs not found: {', '.join(not_found)}")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
if __name__ == "__main__":
|
|
80
|
+
main()
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Validate catalog JSON structure after description updates.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python validate_json.py <catalog.json>
|
|
6
|
+
|
|
7
|
+
Checks: valid JSON, required fields, no duplicate SKUs,
|
|
8
|
+
no HTML in descriptions, valid slug format.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
import re
|
|
14
|
+
from collections import Counter
|
|
15
|
+
|
|
16
|
+
REQUIRED_FIELDS = ["sku", "name", "slug"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def main():
|
|
20
|
+
if len(sys.argv) < 2:
|
|
21
|
+
print("Usage: python validate_json.py <catalog.json>")
|
|
22
|
+
sys.exit(1)
|
|
23
|
+
|
|
24
|
+
path = sys.argv[1]
|
|
25
|
+
errors = []
|
|
26
|
+
warnings = []
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
30
|
+
data = json.load(f)
|
|
31
|
+
except json.JSONDecodeError as e:
|
|
32
|
+
print(f"[-] FATAL: Invalid JSON: {e}")
|
|
33
|
+
sys.exit(1)
|
|
34
|
+
|
|
35
|
+
products = data.get("products", [])
|
|
36
|
+
print(f"Validating {len(products)} products in {path}...")
|
|
37
|
+
|
|
38
|
+
# Required fields
|
|
39
|
+
for i, p in enumerate(products):
|
|
40
|
+
for field in REQUIRED_FIELDS:
|
|
41
|
+
if field not in p:
|
|
42
|
+
errors.append(f"Product #{i} (SKU {p.get('sku', '?')}): missing field '{field}'")
|
|
43
|
+
|
|
44
|
+
# Duplicate SKUs
|
|
45
|
+
skus = [p.get("sku", "") for p in products]
|
|
46
|
+
dupes = [sku for sku, count in Counter(skus).items() if count > 1]
|
|
47
|
+
if dupes:
|
|
48
|
+
errors.append(f"Duplicate SKUs: {', '.join(dupes)}")
|
|
49
|
+
|
|
50
|
+
# Empty descriptions
|
|
51
|
+
desc_field = "beschreibung" if any("beschreibung" in p for p in products[:5]) else "description"
|
|
52
|
+
for p in products:
|
|
53
|
+
if not p.get(desc_field, "").strip():
|
|
54
|
+
warnings.append(f"SKU {p.get('sku', '?')}: empty description")
|
|
55
|
+
|
|
56
|
+
# HTML in descriptions
|
|
57
|
+
for p in products:
|
|
58
|
+
desc = p.get(desc_field, "")
|
|
59
|
+
if re.search(r'<[^>]+>', desc):
|
|
60
|
+
errors.append(f"SKU {p.get('sku', '?')}: HTML tags found in description")
|
|
61
|
+
|
|
62
|
+
# Slug format
|
|
63
|
+
for p in products:
|
|
64
|
+
slug = p.get("slug", "")
|
|
65
|
+
if slug and not re.match(r'^[a-z0-9\-_\.]+$', slug):
|
|
66
|
+
warnings.append(f"SKU {p.get('sku', '?')}: slug '{slug}' contains invalid characters")
|
|
67
|
+
|
|
68
|
+
# Report
|
|
69
|
+
print(f"\n{'=' * 50}")
|
|
70
|
+
if errors:
|
|
71
|
+
print(f"[-] {len(errors)} errors found:")
|
|
72
|
+
for e in errors:
|
|
73
|
+
print(f" [-] {e}")
|
|
74
|
+
if warnings:
|
|
75
|
+
print(f"[!] {len(warnings)} warnings:")
|
|
76
|
+
for w in warnings:
|
|
77
|
+
print(f" [!] {w}")
|
|
78
|
+
if not errors and not warnings:
|
|
79
|
+
print("[+] All validations passed!")
|
|
80
|
+
elif not errors:
|
|
81
|
+
print(f"\n[+] No critical errors. {len(warnings)} warnings.")
|
|
82
|
+
|
|
83
|
+
sys.exit(1 if errors else 0)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
if __name__ == "__main__":
|
|
87
|
+
main()
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: systematic-debugging
|
|
3
|
+
description: Investigate bugs, test failures, and unexpected behavior through root-cause analysis before proposing fixes. Use when encountering any technical issue.
|
|
4
|
+
license: MIT
|
|
5
|
+
compatibility:
|
|
6
|
+
opencode: ">=0.1"
|
|
7
|
+
metadata:
|
|
8
|
+
category: debugging
|
|
9
|
+
phase: investigation
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
# Skill: Systematic Debugging
|
|
13
|
+
|
|
14
|
+
This skill enforces root-cause investigation before any fix attempt. Random fixes waste time and create new bugs.
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## When to Use
|
|
19
|
+
|
|
20
|
+
Use for any technical issue:
|
|
21
|
+
|
|
22
|
+
- Test failures
|
|
23
|
+
- Bugs in production or development
|
|
24
|
+
- Unexpected behavior
|
|
25
|
+
- Performance problems
|
|
26
|
+
- Build failures
|
|
27
|
+
- Integration issues
|
|
28
|
+
|
|
29
|
+
Do **not** skip this skill when:
|
|
30
|
+
|
|
31
|
+
- The issue seems simple (simple bugs have root causes too)
|
|
32
|
+
- You are under time pressure (systematic debugging is faster than thrashing)
|
|
33
|
+
- A fix seems obvious (obvious fixes often mask the real problem)
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Execution Model
|
|
38
|
+
|
|
39
|
+
- **Primary agent** runs this skill directly.
|
|
40
|
+
- **Rationale**: Debugging requires iterative investigation in the same context. Delegating to a subagent loses the accumulated understanding.
|
|
41
|
+
- **Exception**: For multi-component issues spanning independent subsystems, delegate per-subsystem investigation to separate agents via the `dispatch-parallel-agents` skill.
|
|
42
|
+
|
|
43
|
+
---
|
|
44
|
+
|
|
45
|
+
## Workflow
|
|
46
|
+
|
|
47
|
+
### Phase 1: Root Cause Investigation
|
|
48
|
+
|
|
49
|
+
**No fixes are allowed until this phase is complete.**
|
|
50
|
+
|
|
51
|
+
1. **Read error messages carefully** -- stack traces, line numbers, error codes. Do not skip past them.
|
|
52
|
+
2. **Reproduce consistently** -- determine the exact steps. If not reproducible, gather more data instead of guessing.
|
|
53
|
+
3. **Check recent changes** -- `git diff`, recent commits, new dependencies, config changes, environmental differences.
|
|
54
|
+
4. **Gather evidence at component boundaries** -- for multi-layer systems (CI -> build -> deploy, API -> service -> database), add diagnostic logging at each boundary to identify where the failure occurs.
|
|
55
|
+
5. **Trace data flow** -- follow the bad value backward through the call stack to its origin. Fix at the source, not at the symptom.
|
|
56
|
+
|
|
57
|
+
### Phase 2: Pattern Analysis
|
|
58
|
+
|
|
59
|
+
1. **Find working examples** -- locate similar working code in the same codebase.
|
|
60
|
+
2. **Compare against references** -- if implementing a pattern, read the reference implementation completely. Do not skim.
|
|
61
|
+
3. **Identify differences** -- list every difference between working and broken code, however small.
|
|
62
|
+
4. **Understand dependencies** -- what components, settings, config, and environment does this code need?
|
|
63
|
+
|
|
64
|
+
### Phase 3: Hypothesis and Testing
|
|
65
|
+
|
|
66
|
+
1. **Form a single hypothesis** -- state clearly: "I think X is the root cause because Y."
|
|
67
|
+
2. **Test minimally** -- make the smallest possible change to test the hypothesis. One variable at a time.
|
|
68
|
+
3. **Verify before continuing** -- if the hypothesis is wrong, form a new one. Do not stack fixes.
|
|
69
|
+
4. **Acknowledge unknowns** -- if you do not understand something, say so. Do not pretend.
|
|
70
|
+
|
|
71
|
+
### Phase 4: Implementation
|
|
72
|
+
|
|
73
|
+
1. **Fix the root cause, not the symptom.**
|
|
74
|
+
2. **One change at a time** -- no "while I'm here" improvements, no bundled refactoring.
|
|
75
|
+
3. **Verify the fix** -- run the relevant tests, confirm the issue is resolved, confirm no regressions.
|
|
76
|
+
4. **If 3+ fixes have failed** -- stop. The issue is likely architectural, not a bug. Discuss with the user before attempting more fixes.
|
|
77
|
+
|
|
78
|
+
---
|
|
79
|
+
|
|
80
|
+
## Rules
|
|
81
|
+
|
|
82
|
+
1. **No fixes without investigation**: Phase 1 must be complete before proposing any fix.
|
|
83
|
+
2. **One hypothesis at a time**: Do not apply multiple changes simultaneously.
|
|
84
|
+
3. **Evidence over intuition**: Every fix must be justified by evidence from the investigation, not by "it might work."
|
|
85
|
+
4. **Escalate after 3 failed attempts**: Three failed fixes indicate an architectural problem. Stop fixing and discuss with the user.
|
|
86
|
+
5. **Do not increase timeouts as a fix**: Find the real timing issue instead.
|
|
87
|
+
6. **Read error messages completely**: Stack traces contain the answer more often than not.
|