@opendirectory.dev/skills 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/claude-md-generator/.env.example +7 -0
- package/.claude/skills/claude-md-generator/README.md +78 -0
- package/.claude/skills/claude-md-generator/SKILL.md +248 -0
- package/.claude/skills/claude-md-generator/evals/evals.json +35 -0
- package/.claude/skills/claude-md-generator/references/section-guide.md +175 -0
- package/dist/e2e.test.d.ts +1 -0
- package/dist/e2e.test.js +62 -0
- package/dist/fs-adapters.d.ts +4 -0
- package/dist/fs-adapters.js +101 -0
- package/dist/fs-adapters.test.d.ts +1 -0
- package/dist/fs-adapters.test.js +108 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +211 -0
- package/dist/transformers.d.ts +6 -0
- package/dist/transformers.js +2 -0
- package/package.json +25 -0
- package/registry.json +226 -0
- package/skills/blog-cover-image-cli/.github/workflows/publish.yml +19 -0
- package/skills/blog-cover-image-cli/LICENSE +15 -0
- package/skills/blog-cover-image-cli/README.md +126 -0
- package/skills/blog-cover-image-cli/SKILL.md +7 -0
- package/skills/blog-cover-image-cli/agent-skill/blog-cover-generator/README.md +30 -0
- package/skills/blog-cover-image-cli/agent-skill/blog-cover-generator/SKILL.md +72 -0
- package/skills/blog-cover-image-cli/bin/cli.js +226 -0
- package/skills/blog-cover-image-cli/examples/100x_UX_Research_AI_Agent.png +0 -0
- package/skills/blog-cover-image-cli/examples/Firecrawl-supabase-bolt.png +0 -0
- package/skills/blog-cover-image-cli/examples/Git-City_Case_study_Cover_Image.jpg +0 -0
- package/skills/blog-cover-image-cli/examples/THE DISTRIBUTION LAYER (2).png +0 -0
- package/skills/blog-cover-image-cli/examples/canva-perplexity-duolingo-cover-image.png +0 -0
- package/skills/blog-cover-image-cli/examples/gamma-mistral-veed.png +0 -0
- package/skills/blog-cover-image-cli/examples/server-survival-case-study-cover-image(1).png +0 -0
- package/skills/blog-cover-image-cli/examples/viral-meme-automation.png +0 -0
- package/skills/blog-cover-image-cli/index.js +2 -0
- package/skills/blog-cover-image-cli/package-lock.json +2238 -0
- package/skills/blog-cover-image-cli/package.json +37 -0
- package/skills/blog-cover-image-cli/src/geminiGenerator.js +126 -0
- package/skills/blog-cover-image-cli/src/imageValidator.js +54 -0
- package/skills/blog-cover-image-cli/src/logoFetcher.js +86 -0
- package/skills/claude-md-generator/.env.example +7 -0
- package/skills/claude-md-generator/README.md +78 -0
- package/skills/claude-md-generator/SKILL.md +254 -0
- package/skills/claude-md-generator/evals/evals.json +35 -0
- package/skills/claude-md-generator/references/section-guide.md +175 -0
- package/skills/cook-the-blog/README.md +86 -0
- package/skills/cook-the-blog/SKILL.md +130 -0
- package/skills/dependency-update-bot/.env.example +13 -0
- package/skills/dependency-update-bot/README.md +101 -0
- package/skills/dependency-update-bot/SKILL.md +376 -0
- package/skills/dependency-update-bot/evals/evals.json +45 -0
- package/skills/dependency-update-bot/references/changelog-patterns.md +201 -0
- package/skills/docs-from-code/.env.example +13 -0
- package/skills/docs-from-code/README.md +97 -0
- package/skills/docs-from-code/SKILL.md +160 -0
- package/skills/docs-from-code/evals/evals.json +29 -0
- package/skills/docs-from-code/references/extraction-guide.md +174 -0
- package/skills/docs-from-code/references/output-template.md +135 -0
- package/skills/docs-from-code/scripts/extract_py.py +238 -0
- package/skills/docs-from-code/scripts/extract_ts.ts +284 -0
- package/skills/docs-from-code/scripts/package.json +18 -0
- package/skills/explain-this-pr/README.md +74 -0
- package/skills/explain-this-pr/SKILL.md +130 -0
- package/skills/explain-this-pr/evals/evals.json +35 -0
- package/skills/google-trends-api-skills/README.md +78 -0
- package/skills/google-trends-api-skills/SKILL.md +7 -0
- package/skills/google-trends-api-skills/google-trends-api/SKILL.md +163 -0
- package/skills/google-trends-api-skills/google-trends-api/references/api-responses.md +188 -0
- package/skills/google-trends-api-skills/google-trends-api/scripts/discover_keywords.py +344 -0
- package/skills/google-trends-api-skills/seo-keyword-research/SKILL.md +205 -0
- package/skills/google-trends-api-skills/seo-keyword-research/references/keyword-placement-guide.md +89 -0
- package/skills/google-trends-api-skills/seo-keyword-research/references/tech-blog-examples.md +207 -0
- package/skills/google-trends-api-skills/seo-keyword-research/scripts/blog_seo_research.py +373 -0
- package/skills/hackernews-intel/.env.example +33 -0
- package/skills/hackernews-intel/README.md +161 -0
- package/skills/hackernews-intel/SKILL.md +156 -0
- package/skills/hackernews-intel/evals/evals.json +35 -0
- package/skills/hackernews-intel/package.json +15 -0
- package/skills/hackernews-intel/scripts/monitor-hn.js +258 -0
- package/skills/kill-the-standup/.env.example +22 -0
- package/skills/kill-the-standup/README.md +84 -0
- package/skills/kill-the-standup/SKILL.md +169 -0
- package/skills/kill-the-standup/evals/evals.json +35 -0
- package/skills/kill-the-standup/references/standup-format.md +102 -0
- package/skills/linkedin-post-generator/.env.example +14 -0
- package/skills/linkedin-post-generator/README.md +107 -0
- package/skills/linkedin-post-generator/SKILL.md +228 -0
- package/skills/linkedin-post-generator/evals/evals.json +35 -0
- package/skills/linkedin-post-generator/references/linkedin-format.md +216 -0
- package/skills/linkedin-post-generator/references/output-template.md +154 -0
- package/skills/llms-txt-generator/.env.example +18 -0
- package/skills/llms-txt-generator/README.md +142 -0
- package/skills/llms-txt-generator/SKILL.md +176 -0
- package/skills/llms-txt-generator/evals/evals.json +35 -0
- package/skills/llms-txt-generator/references/llms-txt-spec.md +88 -0
- package/skills/llms-txt-generator/references/output-template.md +76 -0
- package/skills/llms-txt-generator/test-output/genzcareer.in/llms.txt +31 -0
- package/skills/luma-attendees-scraper/README.md +170 -0
- package/skills/luma-attendees-scraper/SKILL.md +7 -0
- package/skills/luma-attendees-scraper/luma_attendees_export.js +223 -0
- package/skills/meeting-brief-generator/.env.example +21 -0
- package/skills/meeting-brief-generator/README.md +90 -0
- package/skills/meeting-brief-generator/SKILL.md +275 -0
- package/skills/meeting-brief-generator/evals/evals.json +35 -0
- package/skills/meeting-brief-generator/references/brief-format.md +114 -0
- package/skills/meeting-brief-generator/references/output-template.md +150 -0
- package/skills/meta-ads-skill/README.md +100 -0
- package/skills/meta-ads-skill/SKILL.md +7 -0
- package/skills/meta-ads-skill/meta-ads-skill/SKILL.md +41 -0
- package/skills/meta-ads-skill/meta-ads-skill/references/report_templates.md +47 -0
- package/skills/meta-ads-skill/meta-ads-skill/references/workflows.md +51 -0
- package/skills/meta-ads-skill/meta-ads-skill/scripts/auth_check.py +22 -0
- package/skills/meta-ads-skill/meta-ads-skill/scripts/formatters.py +46 -0
- package/skills/newsletter-digest/.env.example +20 -0
- package/skills/newsletter-digest/README.md +147 -0
- package/skills/newsletter-digest/SKILL.md +221 -0
- package/skills/newsletter-digest/evals/evals.json +35 -0
- package/skills/newsletter-digest/feeds.json +7 -0
- package/skills/newsletter-digest/package.json +15 -0
- package/skills/newsletter-digest/references/digest-format.md +123 -0
- package/skills/newsletter-digest/references/output-template.md +136 -0
- package/skills/newsletter-digest/scripts/fetch-feeds.js +141 -0
- package/skills/newsletter-digest/scripts/ghost-publish.js +147 -0
- package/skills/noise2blog/.env.example +16 -0
- package/skills/noise2blog/README.md +107 -0
- package/skills/noise2blog/SKILL.md +229 -0
- package/skills/noise2blog/evals/evals.json +35 -0
- package/skills/noise2blog/references/blog-format.md +188 -0
- package/skills/noise2blog/references/output-template.md +184 -0
- package/skills/outreach-sequence-builder/.env.example +12 -0
- package/skills/outreach-sequence-builder/README.md +108 -0
- package/skills/outreach-sequence-builder/SKILL.md +248 -0
- package/skills/outreach-sequence-builder/evals/evals.json +36 -0
- package/skills/outreach-sequence-builder/references/output-template.md +171 -0
- package/skills/outreach-sequence-builder/references/sequence-format.md +167 -0
- package/skills/outreach-sequence-builder/references/signal-playbook.md +117 -0
- package/skills/position-me/README.md +71 -0
- package/skills/position-me/SKILL.md +7 -0
- package/skills/position-me/position-me/SKILL.md +50 -0
- package/skills/position-me/position-me/references/EVALUATION_SOP.md +40 -0
- package/skills/position-me/position-me/references/REPORT_TEMPLATE.md +58 -0
- package/skills/position-me/position-me/scripts/extract_links.py +49 -0
- package/skills/pr-description-writer/README.md +81 -0
- package/skills/pr-description-writer/SKILL.md +141 -0
- package/skills/pr-description-writer/evals/evals.json +35 -0
- package/skills/pr-description-writer/references/pr-format-guide.md +145 -0
- package/skills/producthunt-launch-kit/.env.example +7 -0
- package/skills/producthunt-launch-kit/README.md +95 -0
- package/skills/producthunt-launch-kit/SKILL.md +380 -0
- package/skills/producthunt-launch-kit/evals/evals.json +35 -0
- package/skills/producthunt-launch-kit/references/copy-rules.md +124 -0
- package/skills/reddit-icp-monitor/.env.example +16 -0
- package/skills/reddit-icp-monitor/README.md +117 -0
- package/skills/reddit-icp-monitor/SKILL.md +271 -0
- package/skills/reddit-icp-monitor/evals/evals.json +40 -0
- package/skills/reddit-icp-monitor/references/icp-format.md +131 -0
- package/skills/reddit-icp-monitor/references/reply-rules.md +110 -0
- package/skills/reddit-post-engine/.env.example +13 -0
- package/skills/reddit-post-engine/README.md +103 -0
- package/skills/reddit-post-engine/SKILL.md +303 -0
- package/skills/reddit-post-engine/evals/evals.json +35 -0
- package/skills/reddit-post-engine/references/subreddit-playbook.md +156 -0
- package/skills/schema-markup-generator/.env.example +19 -0
- package/skills/schema-markup-generator/README.md +114 -0
- package/skills/schema-markup-generator/SKILL.md +192 -0
- package/skills/schema-markup-generator/evals/evals.json +35 -0
- package/skills/schema-markup-generator/references/json-ld-spec.md +263 -0
- package/skills/schema-markup-generator/references/output-template.md +556 -0
- package/skills/show-hn-writer/.env.example +14 -0
- package/skills/show-hn-writer/README.md +88 -0
- package/skills/show-hn-writer/SKILL.md +303 -0
- package/skills/show-hn-writer/evals/evals.json +35 -0
- package/skills/show-hn-writer/references/hn-rules.md +74 -0
- package/skills/show-hn-writer/references/title-formulas.md +93 -0
- package/skills/stargazer/README.md +79 -0
- package/skills/stargazer/SKILL.md +7 -0
- package/skills/stargazer/stargazer-skill/SKILL.md +58 -0
- package/skills/stargazer/stargazer-skill/assets/.env.example +18 -0
- package/skills/stargazer/stargazer-skill/scripts/convert_to_csv.py +63 -0
- package/skills/stargazer/stargazer-skill/scripts/count_emails.py +52 -0
- package/skills/stargazer/stargazer-skill/scripts/stargazer_deep_extractor.py +450 -0
- package/skills/tweet-thread-from-blog/.env.example +14 -0
- package/skills/tweet-thread-from-blog/README.md +109 -0
- package/skills/tweet-thread-from-blog/SKILL.md +177 -0
- package/skills/tweet-thread-from-blog/evals/evals.json +35 -0
- package/skills/tweet-thread-from-blog/references/output-template.md +193 -0
- package/skills/tweet-thread-from-blog/references/thread-format.md +107 -0
- package/skills/twitter-GTM-find-skill/README.md +43 -0
- package/skills/twitter-GTM-find-skill/SKILL.md +7 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/SKILL.md +37 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/references/icp-checklist.md +35 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/package.json +23 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/run_pipeline.sh +8 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/debug.ts +23 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/extractor.ts +79 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/icp-filter.ts +87 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/index.ts +94 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/scraper.ts +41 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/tsconfig.json +13 -0
- package/skills/yc-intent-radar-skill/README.md +39 -0
- package/skills/yc-intent-radar-skill/SKILL.md +7 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/SKILL.md +59 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/auth.js +29 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/db.js +62 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/export_radar_candidates.js +40 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/package-lock.json +1525 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/package.json +12 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/scraper.js +217 -0
- package/src/e2e.test.ts +35 -0
- package/src/fs-adapters.test.ts +91 -0
- package/src/fs-adapters.ts +65 -0
- package/src/index.ts +182 -0
- package/src/transformers.ts +6 -0
- package/tsconfig.json +8 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# Tech Blog SEO Examples
|
|
2
|
+
|
|
3
|
+
Real-world examples of the SEO keyword research workflow applied to tech and developer-focused blog topics.
|
|
4
|
+
|
|
5
|
+
## Example 1: "AI Code Review Tools"
|
|
6
|
+
|
|
7
|
+
### Step 1: RELATED_QUERIES results
|
|
8
|
+
|
|
9
|
+
```
|
|
10
|
+
Rising:
|
|
11
|
+
>>> "ai code review github" — Breakout <- PRIMARY KEYWORD
|
|
12
|
+
>> "copilot code review" — +320%
|
|
13
|
+
>> "ai pull request review" — +210%
|
|
14
|
+
> "automated code review tools" — +75%
|
|
15
|
+
|
|
16
|
+
Long-tail:
|
|
17
|
+
? "how to use ai for code review"
|
|
18
|
+
? "is ai code review accurate"
|
|
19
|
+
? "what is the best ai code review tool"
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
### Step 2: RELATED_TOPICS results
|
|
23
|
+
|
|
24
|
+
```
|
|
25
|
+
Rising topics:
|
|
26
|
+
^ GitHub Copilot
|
|
27
|
+
^ Code Quality
|
|
28
|
+
^ Static Analysis
|
|
29
|
+
|
|
30
|
+
Top topics:
|
|
31
|
+
- Software Testing
|
|
32
|
+
- DevOps
|
|
33
|
+
- Pull Request
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### Step 3: Generated outline
|
|
37
|
+
|
|
38
|
+
```markdown
|
|
39
|
+
Title: AI Code Review GitHub — Best 10 Tools for Developers (2026)
|
|
40
|
+
|
|
41
|
+
Meta: Discover the best AI code review tools for GitHub. Compare Copilot,
|
|
42
|
+
automated review tools, and learn how AI improves pull request quality.
|
|
43
|
+
|
|
44
|
+
# AI Code Review GitHub — Best 10 Tools for Developers
|
|
45
|
+
|
|
46
|
+
## Introduction
|
|
47
|
+
AI-powered code review on GitHub is transforming how development teams
|
|
48
|
+
maintain code quality...
|
|
49
|
+
|
|
50
|
+
## GitHub Copilot for Code Review
|
|
51
|
+
### How to Use AI for Code Review
|
|
52
|
+
### Is AI Code Review Accurate?
|
|
53
|
+
|
|
54
|
+
## Code Quality with AI Tools
|
|
55
|
+
### What Is the Best AI Code Review Tool?
|
|
56
|
+
### Automated Code Review vs Manual Review
|
|
57
|
+
|
|
58
|
+
## Static Analysis and AI
|
|
59
|
+
### AI-Powered Static Analysis Tools
|
|
60
|
+
### How AI Detects Bugs Before Production
|
|
61
|
+
|
|
62
|
+
## DevOps Integration
|
|
63
|
+
### AI Code Review in CI/CD Pipelines
|
|
64
|
+
### Automated Pull Request Review Setup
|
|
65
|
+
|
|
66
|
+
## Conclusion
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
**Why this works**: "ai code review github" is a Breakout keyword — massive search growth, low competition. First-mover advantage for ranking.
|
|
70
|
+
|
|
71
|
+
---
|
|
72
|
+
|
|
73
|
+
## Example 2: "Rust Programming"
|
|
74
|
+
|
|
75
|
+
### Step 1: RELATED_QUERIES results
|
|
76
|
+
|
|
77
|
+
```
|
|
78
|
+
Rising:
|
|
79
|
+
>>> "rust vs go 2026" — Breakout <- PRIMARY KEYWORD
|
|
80
|
+
>> "rust for web development" — +280%
|
|
81
|
+
>> "learn rust programming" — +145%
|
|
82
|
+
> "rust async programming" — +67%
|
|
83
|
+
|
|
84
|
+
Long-tail:
|
|
85
|
+
? "should i learn rust or go"
|
|
86
|
+
? "what is rust programming used for"
|
|
87
|
+
? "how to start learning rust"
|
|
88
|
+
? "is rust good for backend development"
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Step 2: RELATED_TOPICS results
|
|
92
|
+
|
|
93
|
+
```
|
|
94
|
+
Rising:
|
|
95
|
+
^ WebAssembly
|
|
96
|
+
^ Systems Programming
|
|
97
|
+
^ Memory Safety
|
|
98
|
+
|
|
99
|
+
Top:
|
|
100
|
+
- C++ (comparison)
|
|
101
|
+
- Linux Kernel
|
|
102
|
+
- Performance Optimization
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
### Step 3: Generated outline
|
|
106
|
+
|
|
107
|
+
```markdown
|
|
108
|
+
Title: Rust vs Go 2026 — Complete Comparison for Backend Developers
|
|
109
|
+
|
|
110
|
+
# Rust vs Go 2026 — Complete Comparison for Backend Developers
|
|
111
|
+
|
|
112
|
+
## Introduction
|
|
113
|
+
The Rust vs Go debate in 2026 has intensified as both languages mature...
|
|
114
|
+
|
|
115
|
+
## Systems Programming with Rust
|
|
116
|
+
### What Is Rust Programming Used For?
|
|
117
|
+
### Is Rust Good for Backend Development?
|
|
118
|
+
|
|
119
|
+
## WebAssembly and Rust
|
|
120
|
+
### Rust for Web Development
|
|
121
|
+
### Building WASM Applications with Rust
|
|
122
|
+
|
|
123
|
+
## Memory Safety Comparison
|
|
124
|
+
### How Rust Prevents Memory Bugs
|
|
125
|
+
### Go vs Rust Memory Management
|
|
126
|
+
|
|
127
|
+
## Performance Optimization
|
|
128
|
+
### Rust vs Go Performance Benchmarks
|
|
129
|
+
### Should I Learn Rust or Go?
|
|
130
|
+
|
|
131
|
+
## Conclusion
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
---
|
|
135
|
+
|
|
136
|
+
## Example 3: "Developer Productivity Tools"
|
|
137
|
+
|
|
138
|
+
### Step 1: RELATED_QUERIES results
|
|
139
|
+
|
|
140
|
+
```
|
|
141
|
+
Rising:
|
|
142
|
+
>> "ai developer tools 2026" — +340% <- PRIMARY KEYWORD
|
|
143
|
+
>> "cursor ide review" — +290%
|
|
144
|
+
>> "vscode alternatives 2026" — +180%
|
|
145
|
+
> "developer workflow automation" — +85%
|
|
146
|
+
|
|
147
|
+
Long-tail:
|
|
148
|
+
? "what are the best developer productivity tools"
|
|
149
|
+
? "how to improve developer productivity"
|
|
150
|
+
? "which ide is best for developers in 2026"
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### Step 2: RELATED_TOPICS results
|
|
154
|
+
|
|
155
|
+
```
|
|
156
|
+
Rising:
|
|
157
|
+
^ AI Coding Assistant
|
|
158
|
+
^ IDE (Integrated Development Environment)
|
|
159
|
+
^ Developer Experience
|
|
160
|
+
|
|
161
|
+
Top:
|
|
162
|
+
- Visual Studio Code
|
|
163
|
+
- Terminal
|
|
164
|
+
- Git
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
### Step 3: Generated outline
|
|
168
|
+
|
|
169
|
+
```markdown
|
|
170
|
+
Title: AI Developer Tools 2026 — Top 15 for Maximum Productivity
|
|
171
|
+
|
|
172
|
+
# AI Developer Tools 2026 — Top 15 for Maximum Productivity
|
|
173
|
+
|
|
174
|
+
## Introduction
|
|
175
|
+
AI developer tools in 2026 have fundamentally changed how engineers write,
|
|
176
|
+
review, and ship code...
|
|
177
|
+
|
|
178
|
+
## AI Coding Assistants
|
|
179
|
+
### What Are the Best Developer Productivity Tools?
|
|
180
|
+
### Cursor IDE Review: Is It Worth Switching?
|
|
181
|
+
|
|
182
|
+
## IDE Comparison
|
|
183
|
+
### VSCode Alternatives 2026
|
|
184
|
+
### Which IDE Is Best for Developers in 2026?
|
|
185
|
+
|
|
186
|
+
## Developer Experience
|
|
187
|
+
### How to Improve Developer Productivity
|
|
188
|
+
### Developer Workflow Automation
|
|
189
|
+
|
|
190
|
+
## Terminal and CLI Tools
|
|
191
|
+
### Best Terminal Tools for Developers
|
|
192
|
+
### Git Productivity Tips
|
|
193
|
+
|
|
194
|
+
## Conclusion
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
---
|
|
198
|
+
|
|
199
|
+
## Pattern Summary
|
|
200
|
+
|
|
201
|
+
For tech/developer blogs, the best-performing topics follow this pattern:
|
|
202
|
+
|
|
203
|
+
1. **Comparisons** ("X vs Y") — always high search volume
|
|
204
|
+
2. **Tool lists** ("Best N tools for...") — high click-through rate
|
|
205
|
+
3. **Year-tagged** ("... in 2026") — captures recency searches
|
|
206
|
+
4. **How-to guides** ("How to...") — targets featured snippets
|
|
207
|
+
5. **Emerging tech** (new frameworks, AI tools) — breakout keyword territory
|
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Blog SEO Research Script
|
|
4
|
+
|
|
5
|
+
Complete workflow: research keywords -> build structure -> output blog outline.
|
|
6
|
+
Designed for tech/developer-focused blog content.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python blog_seo_research.py "kubernetes deployment"
|
|
10
|
+
python blog_seo_research.py "AI code review" --geo US
|
|
11
|
+
python blog_seo_research.py "rust programming" --full --output outline.md
|
|
12
|
+
|
|
13
|
+
Requirements:
|
|
14
|
+
pip install requests
|
|
15
|
+
|
|
16
|
+
Environment:
|
|
17
|
+
SERPAPI_KEY - your SerpApi API key (required)
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import argparse
|
|
21
|
+
import json
|
|
22
|
+
import os
|
|
23
|
+
import sys
|
|
24
|
+
from datetime import datetime, timedelta
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
import requests
|
|
29
|
+
except ImportError:
|
|
30
|
+
print("Error: 'requests' package required. Install with: pip install requests")
|
|
31
|
+
sys.exit(1)
|
|
32
|
+
|
|
33
|
+
API_BASE = "https://serpapi.com/search"
|
|
34
|
+
CACHE_DIR = Path.home() / ".cache" / "seo-keyword-research"
|
|
35
|
+
CACHE_DAYS = 7
|
|
36
|
+
CURRENT_YEAR = datetime.now().year
|
|
37
|
+
|
|
38
|
+
QUESTION_WORDS = ("how", "what", "why", "when", "where", "which", "can", "is", "does", "should")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_api_key():
|
|
42
|
+
key = os.environ.get("SERPAPI_KEY")
|
|
43
|
+
if not key:
|
|
44
|
+
print("Error: SERPAPI_KEY environment variable not set.")
|
|
45
|
+
print("Get a free key at https://serpapi.com/ (250 searches/month)")
|
|
46
|
+
sys.exit(1)
|
|
47
|
+
return key
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def cached_api_call(query, data_type, api_key, geo="", date="today 3-m"):
|
|
51
|
+
"""Make an API call with file-based caching."""
|
|
52
|
+
safe = f"{query}_{data_type}_{geo}_{date}".replace(" ", "_").replace("/", "_")
|
|
53
|
+
cache_path = CACHE_DIR / f"{safe}.json"
|
|
54
|
+
|
|
55
|
+
# Check cache
|
|
56
|
+
if cache_path.exists():
|
|
57
|
+
try:
|
|
58
|
+
data = json.loads(cache_path.read_text(encoding="utf-8"))
|
|
59
|
+
cached_at = datetime.fromisoformat(data.get("_cached_at", "2000-01-01"))
|
|
60
|
+
if datetime.now() - cached_at < timedelta(days=CACHE_DAYS):
|
|
61
|
+
return data, True
|
|
62
|
+
except (json.JSONDecodeError, ValueError):
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
# Fresh API call
|
|
66
|
+
params = {
|
|
67
|
+
"engine": "google_trends",
|
|
68
|
+
"q": query,
|
|
69
|
+
"data_type": data_type,
|
|
70
|
+
"date": date,
|
|
71
|
+
"api_key": api_key,
|
|
72
|
+
}
|
|
73
|
+
if geo:
|
|
74
|
+
params["geo"] = geo
|
|
75
|
+
|
|
76
|
+
resp = requests.get(API_BASE, params=params, timeout=30)
|
|
77
|
+
resp.raise_for_status()
|
|
78
|
+
data = resp.json()
|
|
79
|
+
|
|
80
|
+
if data.get("search_metadata", {}).get("status") != "Success":
|
|
81
|
+
return None, False
|
|
82
|
+
|
|
83
|
+
# Save cache
|
|
84
|
+
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
85
|
+
data["_cached_at"] = datetime.now().isoformat()
|
|
86
|
+
cache_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
|
87
|
+
return data, False
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def research_keywords(topic, api_key, geo="", date="today 3-m", full=False):
|
|
91
|
+
"""Run the complete keyword research workflow."""
|
|
92
|
+
credits_used = 0
|
|
93
|
+
results = {
|
|
94
|
+
"topic": topic,
|
|
95
|
+
"primary_keyword": None,
|
|
96
|
+
"priority": None,
|
|
97
|
+
"breakout": [],
|
|
98
|
+
"high_growth": [],
|
|
99
|
+
"moderate": [],
|
|
100
|
+
"long_tail": [],
|
|
101
|
+
"top_queries": [],
|
|
102
|
+
"h2_topics": [],
|
|
103
|
+
"trend": None,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# Step 1: RELATED_QUERIES
|
|
107
|
+
print(f"\n[1/{'3' if full else '2'}] Finding keywords...")
|
|
108
|
+
rq_data, was_cached = cached_api_call(topic, "RELATED_QUERIES", api_key, geo, date)
|
|
109
|
+
if not was_cached:
|
|
110
|
+
credits_used += 1
|
|
111
|
+
|
|
112
|
+
if rq_data:
|
|
113
|
+
rq = rq_data.get("related_queries", {})
|
|
114
|
+
for item in rq.get("rising", []):
|
|
115
|
+
query = item.get("query", "")
|
|
116
|
+
fv = item.get("formatted_value", "")
|
|
117
|
+
|
|
118
|
+
if fv == "Breakout":
|
|
119
|
+
results["breakout"].append({"query": query, "growth": "Breakout (5000%+)"})
|
|
120
|
+
elif "%" in fv:
|
|
121
|
+
pct = int(fv.replace("+", "").replace("%", "").replace(",", ""))
|
|
122
|
+
if pct >= 100:
|
|
123
|
+
results["high_growth"].append({"query": query, "growth": fv})
|
|
124
|
+
elif pct >= 50:
|
|
125
|
+
results["moderate"].append({"query": query, "growth": fv})
|
|
126
|
+
|
|
127
|
+
if query.lower().startswith(QUESTION_WORDS):
|
|
128
|
+
results["long_tail"].append(query)
|
|
129
|
+
|
|
130
|
+
for item in rq.get("top", []):
|
|
131
|
+
query = item.get("query", "")
|
|
132
|
+
results["top_queries"].append({"query": query, "score": item.get("value", 0)})
|
|
133
|
+
if query.lower().startswith(QUESTION_WORDS) and query not in results["long_tail"]:
|
|
134
|
+
results["long_tail"].append(query)
|
|
135
|
+
|
|
136
|
+
# Select primary keyword
|
|
137
|
+
if results["breakout"]:
|
|
138
|
+
results["primary_keyword"] = results["breakout"][0]["query"]
|
|
139
|
+
results["priority"] = "BREAKOUT"
|
|
140
|
+
elif results["high_growth"]:
|
|
141
|
+
results["primary_keyword"] = results["high_growth"][0]["query"]
|
|
142
|
+
results["priority"] = "HIGH_GROWTH"
|
|
143
|
+
elif results["top_queries"]:
|
|
144
|
+
results["primary_keyword"] = results["top_queries"][0]["query"]
|
|
145
|
+
results["priority"] = "TOP"
|
|
146
|
+
else:
|
|
147
|
+
results["primary_keyword"] = topic
|
|
148
|
+
results["priority"] = "ORIGINAL"
|
|
149
|
+
|
|
150
|
+
# Step 2: RELATED_TOPICS
|
|
151
|
+
print(f"[2/{'3' if full else '2'}] Building content structure...")
|
|
152
|
+
rt_data, was_cached = cached_api_call(topic, "RELATED_TOPICS", api_key, geo, date)
|
|
153
|
+
if not was_cached:
|
|
154
|
+
credits_used += 1
|
|
155
|
+
|
|
156
|
+
if rt_data:
|
|
157
|
+
rt = rt_data.get("related_topics", {})
|
|
158
|
+
for item in rt.get("rising", []) + rt.get("top", []):
|
|
159
|
+
if "topic" in item:
|
|
160
|
+
title = item["topic"]["title"]
|
|
161
|
+
if title not in [t["title"] for t in results["h2_topics"]]:
|
|
162
|
+
results["h2_topics"].append({
|
|
163
|
+
"title": title,
|
|
164
|
+
"growth": item.get("formatted_value", ""),
|
|
165
|
+
})
|
|
166
|
+
|
|
167
|
+
# Step 3: TIMESERIES (optional)
|
|
168
|
+
if full:
|
|
169
|
+
print("[3/3] Validating trend direction...")
|
|
170
|
+
ts_data, was_cached = cached_api_call(topic, "TIMESERIES", api_key, geo, "today 12-m")
|
|
171
|
+
if not was_cached:
|
|
172
|
+
credits_used += 1
|
|
173
|
+
|
|
174
|
+
if ts_data:
|
|
175
|
+
timeline = ts_data.get("interest_over_time", {}).get("timeline_data", [])
|
|
176
|
+
values = []
|
|
177
|
+
for entry in timeline:
|
|
178
|
+
if entry.get("values"):
|
|
179
|
+
values.append(entry["values"][0].get("extracted_value", 0))
|
|
180
|
+
|
|
181
|
+
if len(values) >= 4:
|
|
182
|
+
mid = len(values) // 2
|
|
183
|
+
early = sum(values[:mid]) / mid
|
|
184
|
+
recent = sum(values[mid:]) / (len(values) - mid)
|
|
185
|
+
change = ((recent - early) / max(early, 1)) * 100
|
|
186
|
+
|
|
187
|
+
if recent > early * 1.1:
|
|
188
|
+
direction = "RISING"
|
|
189
|
+
elif recent < early * 0.9:
|
|
190
|
+
direction = "DECLINING"
|
|
191
|
+
else:
|
|
192
|
+
direction = "STABLE"
|
|
193
|
+
|
|
194
|
+
results["trend"] = {
|
|
195
|
+
"direction": direction,
|
|
196
|
+
"early_avg": round(early, 1),
|
|
197
|
+
"recent_avg": round(recent, 1),
|
|
198
|
+
"change_pct": round(change, 1),
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
results["credits_used"] = credits_used
|
|
202
|
+
return results
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def generate_outline(results):
|
|
206
|
+
"""Generate a markdown blog outline from research results."""
|
|
207
|
+
pk = results["primary_keyword"]
|
|
208
|
+
h2s = [t["title"] for t in results["h2_topics"][:5]]
|
|
209
|
+
h3s = results["long_tail"][:12]
|
|
210
|
+
|
|
211
|
+
lines = []
|
|
212
|
+
lines.append(f"# Blog Outline: {pk}")
|
|
213
|
+
lines.append("")
|
|
214
|
+
lines.append(f"**Primary Keyword**: {pk} ({results['priority']})")
|
|
215
|
+
lines.append(f"**Target Length**: 1500-2500 words")
|
|
216
|
+
lines.append(f"**Generated**: {datetime.now().strftime('%Y-%m-%d')}")
|
|
217
|
+
lines.append("")
|
|
218
|
+
lines.append("---")
|
|
219
|
+
lines.append("")
|
|
220
|
+
|
|
221
|
+
# Title
|
|
222
|
+
lines.append(f"## Title")
|
|
223
|
+
lines.append(f"```")
|
|
224
|
+
lines.append(f"{pk.title()} — Complete Guide {CURRENT_YEAR}")
|
|
225
|
+
lines.append(f"```")
|
|
226
|
+
lines.append("")
|
|
227
|
+
|
|
228
|
+
# Meta
|
|
229
|
+
secondary = ""
|
|
230
|
+
if results["high_growth"]:
|
|
231
|
+
secondary = results["high_growth"][0]["query"]
|
|
232
|
+
elif results["top_queries"]:
|
|
233
|
+
secondary = results["top_queries"][0]["query"]
|
|
234
|
+
|
|
235
|
+
meta_parts = [f"Learn about {pk}"]
|
|
236
|
+
if h2s:
|
|
237
|
+
meta_parts.append(f"Covers {', '.join(h2s[:3])}")
|
|
238
|
+
meta = ". ".join(meta_parts) + "."
|
|
239
|
+
|
|
240
|
+
lines.append(f"## Meta Description")
|
|
241
|
+
lines.append(f"```")
|
|
242
|
+
lines.append(meta[:160])
|
|
243
|
+
lines.append(f"```")
|
|
244
|
+
lines.append("")
|
|
245
|
+
|
|
246
|
+
# Blog structure
|
|
247
|
+
lines.append(f"## Blog Structure")
|
|
248
|
+
lines.append("")
|
|
249
|
+
lines.append(f"### Introduction (150 words)")
|
|
250
|
+
lines.append(f"- Include \"{pk}\" in first 100 words")
|
|
251
|
+
lines.append(f"- Hook with a problem or trending statistic")
|
|
252
|
+
lines.append(f"- Preview the key topics covered")
|
|
253
|
+
lines.append("")
|
|
254
|
+
|
|
255
|
+
h3_idx = 0
|
|
256
|
+
for i, h2 in enumerate(h2s, 1):
|
|
257
|
+
lines.append(f"### {h2}")
|
|
258
|
+
|
|
259
|
+
# Assign relevant H3s
|
|
260
|
+
assigned = 0
|
|
261
|
+
while h3_idx < len(h3s) and assigned < 3:
|
|
262
|
+
lines.append(f"- H3: {h3s[h3_idx]}")
|
|
263
|
+
lines.append(f" - Answer in 150-200 words")
|
|
264
|
+
h3_idx += 1
|
|
265
|
+
assigned += 1
|
|
266
|
+
|
|
267
|
+
# If no long-tail matched, suggest generic H3s
|
|
268
|
+
if assigned == 0:
|
|
269
|
+
lines.append(f"- H3: What Is {h2}?")
|
|
270
|
+
lines.append(f"- H3: How {h2} Works")
|
|
271
|
+
|
|
272
|
+
lines.append("")
|
|
273
|
+
|
|
274
|
+
lines.append(f"### Conclusion (100 words)")
|
|
275
|
+
lines.append(f"- Summarize key points")
|
|
276
|
+
lines.append(f"- Mention \"{pk}\" once")
|
|
277
|
+
lines.append(f"- Include call-to-action")
|
|
278
|
+
lines.append("")
|
|
279
|
+
|
|
280
|
+
# Keywords summary
|
|
281
|
+
lines.append("---")
|
|
282
|
+
lines.append("")
|
|
283
|
+
lines.append("## Keywords to Include")
|
|
284
|
+
lines.append("")
|
|
285
|
+
lines.append(f"**Primary** (1-2% density): {pk}")
|
|
286
|
+
|
|
287
|
+
secondaries = []
|
|
288
|
+
for kw in (results["high_growth"] + results["moderate"])[:4]:
|
|
289
|
+
secondaries.append(kw["query"])
|
|
290
|
+
if secondaries:
|
|
291
|
+
lines.append(f"**Secondary** (0.5-1% each): {', '.join(secondaries)}")
|
|
292
|
+
|
|
293
|
+
if results["long_tail"]:
|
|
294
|
+
lines.append(f"**Long-tail** (H3 headings): {', '.join(results['long_tail'][:6])}")
|
|
295
|
+
|
|
296
|
+
return "\n".join(lines)
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def print_report(results):
|
|
300
|
+
"""Print a formatted research report to terminal."""
|
|
301
|
+
pk = results["primary_keyword"]
|
|
302
|
+
pr = results["priority"]
|
|
303
|
+
|
|
304
|
+
print("\n" + "=" * 60)
|
|
305
|
+
print(f" SEO KEYWORD RESEARCH: {results['topic']}")
|
|
306
|
+
print("=" * 60)
|
|
307
|
+
print(f"\n PRIMARY KEYWORD: {pk}")
|
|
308
|
+
print(f" PRIORITY: {pr}")
|
|
309
|
+
|
|
310
|
+
if results["trend"]:
|
|
311
|
+
t = results["trend"]
|
|
312
|
+
print(f" TREND: {t['direction']} ({t['change_pct']:+.1f}%)")
|
|
313
|
+
|
|
314
|
+
if results["breakout"]:
|
|
315
|
+
print("\n BREAKOUT KEYWORDS:")
|
|
316
|
+
for kw in results["breakout"]:
|
|
317
|
+
print(f" >>> {kw['query']}")
|
|
318
|
+
|
|
319
|
+
if results["high_growth"]:
|
|
320
|
+
print("\n HIGH-GROWTH KEYWORDS:")
|
|
321
|
+
for kw in results["high_growth"][:5]:
|
|
322
|
+
print(f" >> {kw['query']} ({kw['growth']})")
|
|
323
|
+
|
|
324
|
+
if results["long_tail"]:
|
|
325
|
+
print("\n LONG-TAIL (H3 candidates):")
|
|
326
|
+
for q in results["long_tail"][:6]:
|
|
327
|
+
print(f" ? {q}")
|
|
328
|
+
|
|
329
|
+
if results["h2_topics"]:
|
|
330
|
+
print("\n H2 TOPICS:")
|
|
331
|
+
for t in results["h2_topics"][:5]:
|
|
332
|
+
growth = f" — {t['growth']}" if t["growth"] else ""
|
|
333
|
+
print(f" # {t['title']}{growth}")
|
|
334
|
+
|
|
335
|
+
print(f"\n API CREDITS USED: {results['credits_used']}")
|
|
336
|
+
print("=" * 60)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def main():
|
|
340
|
+
parser = argparse.ArgumentParser(description="Blog SEO keyword research via Google Trends")
|
|
341
|
+
parser.add_argument("topic", help="Blog topic to research")
|
|
342
|
+
parser.add_argument("--geo", default="", help="Geographic filter (e.g., US, GB)")
|
|
343
|
+
parser.add_argument("--date", default="today 3-m", help="Time range (default: today 3-m)")
|
|
344
|
+
parser.add_argument("--full", action="store_true", help="Include trend validation (+1 API credit)")
|
|
345
|
+
parser.add_argument("--output", "-o", help="Save blog outline to file (markdown)")
|
|
346
|
+
parser.add_argument("--json", action="store_true", help="Output raw results as JSON")
|
|
347
|
+
|
|
348
|
+
args = parser.parse_args()
|
|
349
|
+
api_key = get_api_key()
|
|
350
|
+
|
|
351
|
+
print(f"Researching: \"{args.topic}\"")
|
|
352
|
+
print(f"Region: {args.geo or 'Worldwide'} | Date: {args.date}")
|
|
353
|
+
|
|
354
|
+
results = research_keywords(args.topic, api_key, args.geo, args.date, args.full)
|
|
355
|
+
|
|
356
|
+
if args.json:
|
|
357
|
+
output = {k: v for k, v in results.items() if k != "credits_used"}
|
|
358
|
+
print(json.dumps(output, indent=2))
|
|
359
|
+
return
|
|
360
|
+
|
|
361
|
+
print_report(results)
|
|
362
|
+
|
|
363
|
+
# Generate and display outline
|
|
364
|
+
outline = generate_outline(results)
|
|
365
|
+
print("\n" + outline)
|
|
366
|
+
|
|
367
|
+
if args.output:
|
|
368
|
+
Path(args.output).write_text(outline, encoding="utf-8")
|
|
369
|
+
print(f"\nOutline saved to: {args.output}")
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
if __name__ == "__main__":
|
|
373
|
+
main()
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# hackernews-intel -- Environment Variables
|
|
2
|
+
# ==========================================
|
|
3
|
+
|
|
4
|
+
# Required: comma-separated keywords to monitor on Hacker News
|
|
5
|
+
# Examples: "claude code,LLM agents,deno runtime,opentelemetry"
|
|
6
|
+
# Tips:
|
|
7
|
+
# - Use 2-4 word phrases for precision (e.g. "claude code" not "claude")
|
|
8
|
+
# - Keyword matching is case-insensitive
|
|
9
|
+
# - Separate each keyword with a comma, no quotes around individual keywords
|
|
10
|
+
HN_KEYWORDS=your keyword here,another keyword,third keyword
|
|
11
|
+
|
|
12
|
+
# Required: Slack Incoming Webhook URL for alerts
|
|
13
|
+
# Create one at: https://api.slack.com/apps
|
|
14
|
+
# 1. Create or select your app
|
|
15
|
+
# 2. Enable "Incoming Webhooks"
|
|
16
|
+
# 3. Add a webhook to your workspace and select the channel
|
|
17
|
+
# 4. Copy the webhook URL (starts with https://hooks.slack.com/services/)
|
|
18
|
+
SLACK_WEBHOOK=https://hooks.slack.com/services/your/webhook/url
|
|
19
|
+
|
|
20
|
+
# Optional: minimum number of points (upvotes) a post must have to trigger an alert
|
|
21
|
+
# Default: 0 (alert on all matching posts)
|
|
22
|
+
# Set to 5-10 to reduce noise from very new posts
|
|
23
|
+
HN_MIN_POINTS=0
|
|
24
|
+
|
|
25
|
+
# Optional: include HN comments in keyword search (in addition to stories)
|
|
26
|
+
# Default: false — stories only
|
|
27
|
+
# Set to true to also monitor comments (significantly higher volume)
|
|
28
|
+
HN_INCLUDE_COMMENTS=false
|
|
29
|
+
|
|
30
|
+
# Optional: path to the SQLite database file for deduplication
|
|
31
|
+
# Default: ./hn-intel.db (adjacent to the script)
|
|
32
|
+
# Use an absolute path if running from a different working directory
|
|
33
|
+
HN_DB_PATH=./hn-intel.db
|