@opendirectory.dev/skills 0.1.34 → 0.1.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/registry.json +20 -0
- package/skills/gh-issue-to-demand-signal/.env.example +3 -0
- package/skills/gh-issue-to-demand-signal/README.md +118 -0
- package/skills/gh-issue-to-demand-signal/SKILL.md +638 -0
- package/skills/gh-issue-to-demand-signal/evals/evals.json +118 -0
- package/skills/gh-issue-to-demand-signal/references/demand-categories.md +181 -0
- package/skills/gh-issue-to-demand-signal/references/gtm-translation.md +211 -0
- package/skills/npm-downloads-to-leads/.env.example +4 -0
- package/skills/npm-downloads-to-leads/README.md +146 -0
- package/skills/npm-downloads-to-leads/SKILL.md +670 -0
- package/skills/npm-downloads-to-leads/evals/evals.json +119 -0
- package/skills/npm-downloads-to-leads/references/outreach-timing.md +163 -0
- package/skills/npm-downloads-to-leads/references/velocity-scoring.md +136 -0
- package/skills/npm-downloads-to-leads/scripts/fetch.py +372 -0
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"id": "eval_001",
|
|
4
|
+
"name": "Popular OSS repo: full pipeline with 100+ issues, all 6 categories populated",
|
|
5
|
+
"description": "A large, active public repo with hundreds of open issues. Validates the full 8-step workflow, noise filtering, demand scoring, all 6 categories appearing in output, and correct sorting.",
|
|
6
|
+
"input": {
|
|
7
|
+
"prompt": "Scan competitor GitHub issues: https://github.com/facebook/react",
|
|
8
|
+
"env": {
|
|
9
|
+
"GITHUB_TOKEN": "set"
|
|
10
|
+
}
|
|
11
|
+
},
|
|
12
|
+
"expected_behavior": [
|
|
13
|
+
"Parses owner=facebook, repo=react correctly",
|
|
14
|
+
"Fetches 2 pages of issues from GitHub REST API",
|
|
15
|
+
"Checks X-RateLimit-Remaining after first page fetch",
|
|
16
|
+
"Filters out pull requests (pull_request key present), bot-authored issues, chore/deps/bump title patterns, zero-reaction + zero-comment issues",
|
|
17
|
+
"Issue count after filtering is above 10 -- proceeds to clustering",
|
|
18
|
+
"demand_score computed as (reactions_plus1 * 2) + (comments * 0.5) for each issue",
|
|
19
|
+
"ignored_demand field set to true only for issues with reactions >= 10, age_days >= 180, and no planned label",
|
|
20
|
+
"Skill prints filtered issues and asks the AI to classify them into 6 categories",
|
|
21
|
+
"AI writes /tmp/ghd-clusters.json with classified_issues, cluster_themes, and category_counts",
|
|
22
|
+
"All 6 categories appear in category_counts",
|
|
23
|
+
"Top-10 list sorted by demand_score descending",
|
|
24
|
+
"Skill prints top 3 clusters and asks the AI to write a messaging brief",
|
|
25
|
+
"AI writes /tmp/ghd-brief.json with exactly 3 positioning_angles, 3 outreach_hooks, 3 cluster_headlines",
|
|
26
|
+
"Every outreach hook contains a quoted verbatim issue title",
|
|
27
|
+
"Output saved to docs/demand-signals/facebook-react-[date].md",
|
|
28
|
+
"No em dashes in any output section"
|
|
29
|
+
],
|
|
30
|
+
"expected_output": "Full demand gap report with leaderboard, ignored demand section, top-10, cluster deep dives, messaging brief, and GTM angles"
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"id": "eval_002",
|
|
34
|
+
"name": "Small or inactive repo: graceful stop after Step 4",
|
|
35
|
+
"description": "A repo with very few open issues or a niche project where most issues have zero reactions. After noise filtering, fewer than 10 issues remain. Validates graceful stop with a clear explanation.",
|
|
36
|
+
"input": {
|
|
37
|
+
"prompt": "Analyze issues in this repo: https://github.com/nicowillis/tiny-test-repo",
|
|
38
|
+
"env": {
|
|
39
|
+
"GITHUB_TOKEN": "set"
|
|
40
|
+
}
|
|
41
|
+
},
|
|
42
|
+
"expected_behavior": [
|
|
43
|
+
"Fetches issues successfully",
|
|
44
|
+
"Noise filter removes most issues (zero engagement, bot patterns, etc.)",
|
|
45
|
+
"After filtering, fewer than 10 issues remain",
|
|
46
|
+
"Stops at Step 4 -- does NOT proceed to clustering",
|
|
47
|
+
"Tells the user exactly how many issues were found and how many were filtered",
|
|
48
|
+
"Explains why fewer than 10 issues is insufficient for reliable clustering",
|
|
49
|
+
"Suggests trying a larger or more community-engaged repo",
|
|
50
|
+
"No partial output generated"
|
|
51
|
+
],
|
|
52
|
+
"expected_output": "Graceful stop message with issue counts, noise breakdown, and suggestion to try a different repo"
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"id": "eval_003",
|
|
56
|
+
"name": "Private or non-existent repo: stops at Step 3 with exact error",
|
|
57
|
+
"description": "User provides a URL for a repo that either does not exist (404) or is private (403). Validates that the skill stops at Step 3 with a clear, actionable error message.",
|
|
58
|
+
"input": {
|
|
59
|
+
"prompt": "Find demand gaps in https://github.com/nonexistent-org/nonexistent-repo-xyz",
|
|
60
|
+
"env": {
|
|
61
|
+
"GITHUB_TOKEN": "set"
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
"expected_behavior": [
|
|
65
|
+
"Parses owner/repo from URL",
|
|
66
|
+
"Attempts GitHub API fetch",
|
|
67
|
+
"GitHub returns 404",
|
|
68
|
+
"Stops immediately at Step 3",
|
|
69
|
+
"Tells the user: 'Repo not found. Check the URL or slug and try again. Private repos are not accessible without authentication and explicit repo scope.'",
|
|
70
|
+
"Does NOT attempt noise filtering",
|
|
71
|
+
"Does NOT proceed to clustering"
|
|
72
|
+
],
|
|
73
|
+
"expected_output": "Immediate stop at Step 3 with 404 error message. No partial analysis generated."
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"id": "eval_004",
|
|
77
|
+
"name": "owner/repo slug input (no full URL): parses correctly and runs full pipeline",
|
|
78
|
+
"description": "User provides an owner/repo slug instead of a full GitHub URL (e.g. 'vercel/next.js' instead of 'https://github.com/vercel/next.js'). Validates that the slug parsing path works correctly.",
|
|
79
|
+
"input": {
|
|
80
|
+
"prompt": "What are users asking for in vercel/next.js?",
|
|
81
|
+
"env": {
|
|
82
|
+
"GITHUB_TOKEN": "set"
|
|
83
|
+
}
|
|
84
|
+
},
|
|
85
|
+
"expected_behavior": [
|
|
86
|
+
"Step 2 detects no 'github.com' URL in the input",
|
|
87
|
+
"Falls into the owner/repo slug parsing path",
|
|
88
|
+
"Correctly splits 'vercel/next.js' into owner=vercel, repo=next.js",
|
|
89
|
+
"Writes 'vercel/next.js' to /tmp/ghd-target.txt",
|
|
90
|
+
"Step 3 fetches from https://api.github.com/repos/vercel/next.js/issues successfully",
|
|
91
|
+
"Full pipeline runs to completion",
|
|
92
|
+
"Output filename uses 'vercel-next.js' as the repo slug",
|
|
93
|
+
"No error about invalid URL format"
|
|
94
|
+
],
|
|
95
|
+
"expected_output": "Full demand gap report identical in format to a full URL run. Slug parsing is transparent to the output."
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
"id": "eval_005",
|
|
99
|
+
"name": "No GITHUB_TOKEN, rate limit hit: stops with reset time and token instructions",
|
|
100
|
+
"description": "User has no GITHUB_TOKEN and has already hit the 60/hr unauthenticated rate limit. Validates that the skill detects the rate limit header and stops with the reset time and token setup instructions.",
|
|
101
|
+
"input": {
|
|
102
|
+
"prompt": "What are users asking for in facebook/react?",
|
|
103
|
+
"env": {
|
|
104
|
+
"GITHUB_TOKEN": "not set"
|
|
105
|
+
}
|
|
106
|
+
},
|
|
107
|
+
"expected_behavior": [
|
|
108
|
+
"Step 1 notes GITHUB_TOKEN is not set and warns about 60 req/hr limit",
|
|
109
|
+
"Step 3 attempts GitHub API fetch without token",
|
|
110
|
+
"GitHub API returns X-RateLimit-Remaining: 0 header",
|
|
111
|
+
"Skill detects the header value after the first page fetch",
|
|
112
|
+
"Stops immediately with message: exact reset time (converted from X-RateLimit-Reset timestamp) and instructions to add GITHUB_TOKEN at github.com/settings/tokens",
|
|
113
|
+
"Does NOT attempt page 2 fetch",
|
|
114
|
+
"Does NOT proceed to noise filtering or clustering"
|
|
115
|
+
],
|
|
116
|
+
"expected_output": "Stop at Step 3 with rate limit message, reset time, and GitHub token setup link. No analysis generated."
|
|
117
|
+
}
|
|
118
|
+
]
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
# Demand Categories Reference
|
|
2
|
+
|
|
3
|
+
Used by SKILL.md Step 5 to guide AI classification of GitHub issues into one of 6 demand categories.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## The 6 Categories
|
|
8
|
+
|
|
9
|
+
### feature_gap
|
|
10
|
+
|
|
11
|
+
**What it captures:** Functionality the product does not have yet. User is describing something they want to do that is currently impossible.
|
|
12
|
+
|
|
13
|
+
**Signal phrases:**
|
|
14
|
+
- "add support for..."
|
|
15
|
+
- "it would be great if..."
|
|
16
|
+
- "please add..."
|
|
17
|
+
- "feature request:"
|
|
18
|
+
- "allow users to..."
|
|
19
|
+
- "I wish I could..."
|
|
20
|
+
- "would love to see..."
|
|
21
|
+
|
|
22
|
+
**Examples:**
|
|
23
|
+
- "Add support for keyboard shortcuts in the editor"
|
|
24
|
+
- "Allow exporting to PDF format"
|
|
25
|
+
- "Feature request: dark mode"
|
|
26
|
+
- "Support for multiple workspaces per account"
|
|
27
|
+
|
|
28
|
+
**Scoring note:** feature_gap issues with 20+ reactions represent product roadmap signals. These are the gaps your product can claim as intentional design choices.
|
|
29
|
+
|
|
30
|
+
---
|
|
31
|
+
|
|
32
|
+
### bug_pattern
|
|
33
|
+
|
|
34
|
+
**What it captures:** Recurring broken behavior that erodes user trust. Distinct from a one-off error -- the word "pattern" matters. Multiple issues with similar titles indicate a systemic problem.
|
|
35
|
+
|
|
36
|
+
**Signal phrases:**
|
|
37
|
+
- "broken when..."
|
|
38
|
+
- "not working..."
|
|
39
|
+
- "fails to..."
|
|
40
|
+
- "error when..."
|
|
41
|
+
- "crashes if..."
|
|
42
|
+
- "regression in..."
|
|
43
|
+
- "breaks after..."
|
|
44
|
+
|
|
45
|
+
**Examples:**
|
|
46
|
+
- "Login fails when using SSO with Google"
|
|
47
|
+
- "File upload crashes on files over 10MB"
|
|
48
|
+
- "Pagination breaks on mobile"
|
|
49
|
+
- "Notifications not sending after the latest update"
|
|
50
|
+
|
|
51
|
+
**Scoring note:** Bug patterns with high reactions signal trust erosion. If a competitor has 5+ high-reaction bug issues in the same functional area, that area is a liability in their product positioning.
|
|
52
|
+
|
|
53
|
+
---
|
|
54
|
+
|
|
55
|
+
### ux_complaint
|
|
56
|
+
|
|
57
|
+
**What it captures:** Friction, confusion, or workflow problems. The feature exists but it is hard to use, hard to find, or does not match how users actually work.
|
|
58
|
+
|
|
59
|
+
**Signal phrases:**
|
|
60
|
+
- "confusing..."
|
|
61
|
+
- "hard to..."
|
|
62
|
+
- "unclear how to..."
|
|
63
|
+
- "should be easier to..."
|
|
64
|
+
- "the UI for X is..."
|
|
65
|
+
- "annoying that..."
|
|
66
|
+
- "clunky..."
|
|
67
|
+
- "why does X require..."
|
|
68
|
+
|
|
69
|
+
**Examples:**
|
|
70
|
+
- "Confusing navigation between projects"
|
|
71
|
+
- "Hard to find the settings for notifications"
|
|
72
|
+
- "Should be easier to bulk-edit items"
|
|
73
|
+
- "The import flow has too many steps"
|
|
74
|
+
|
|
75
|
+
**Scoring note:** UX complaints are positioning gold. "Confusing to use" is a contrast you can own directly. If their users are calling something confusing, your messaging can address that exact friction without naming the competitor.
|
|
76
|
+
|
|
77
|
+
---
|
|
78
|
+
|
|
79
|
+
### performance
|
|
80
|
+
|
|
81
|
+
**What it captures:** Slowness, timeouts, resource usage, or reliability problems that degrade the experience even when the feature works correctly.
|
|
82
|
+
|
|
83
|
+
**Signal phrases:**
|
|
84
|
+
- "slow..."
|
|
85
|
+
- "timeout..."
|
|
86
|
+
- "takes too long..."
|
|
87
|
+
- "high memory usage..."
|
|
88
|
+
- "performance regression..."
|
|
89
|
+
- "loading forever..."
|
|
90
|
+
- "lags when..."
|
|
91
|
+
- "CPU usage..."
|
|
92
|
+
|
|
93
|
+
**Examples:**
|
|
94
|
+
- "Search is slow on repos with 1000+ files"
|
|
95
|
+
- "Dashboard takes 10+ seconds to load"
|
|
96
|
+
- "Memory usage spikes when processing large files"
|
|
97
|
+
- "Build times increased 3x after v2.0"
|
|
98
|
+
|
|
99
|
+
**Scoring note:** Performance issues cluster by data size or scale. If the complaints mention large repos, large teams, or high-volume usage, the competitor has a scale ceiling. That ceiling is your advantage if you have solved it.
|
|
100
|
+
|
|
101
|
+
---
|
|
102
|
+
|
|
103
|
+
### integration_missing
|
|
104
|
+
|
|
105
|
+
**What it captures:** Requests to connect with other tools, APIs, or platforms. Users want the product to work alongside something else in their stack.
|
|
106
|
+
|
|
107
|
+
**Signal phrases:**
|
|
108
|
+
- "integrate with..."
|
|
109
|
+
- "support for [tool name]..."
|
|
110
|
+
- "webhook..."
|
|
111
|
+
- "API for..."
|
|
112
|
+
- "connect to..."
|
|
113
|
+
- "import from..."
|
|
114
|
+
- "sync with..."
|
|
115
|
+
- "plugin for..."
|
|
116
|
+
|
|
117
|
+
**Examples:**
|
|
118
|
+
- "Integrate with Slack for notifications"
|
|
119
|
+
- "Support GitHub Actions webhook triggers"
|
|
120
|
+
- "Add Zapier integration"
|
|
121
|
+
- "Import from Notion"
|
|
122
|
+
- "VS Code extension"
|
|
123
|
+
|
|
124
|
+
**Scoring note:** Integration requests cluster around the tools their users already use. A high-reaction integration request tells you where their users spend the rest of their day. If you already have that integration, it is a direct switch argument.
|
|
125
|
+
|
|
126
|
+
---
|
|
127
|
+
|
|
128
|
+
### docs_missing
|
|
129
|
+
|
|
130
|
+
**What it captures:** Confusion caused by absent, incomplete, or incorrect documentation. The product may work correctly, but users cannot figure out how to use it.
|
|
131
|
+
|
|
132
|
+
**Signal phrases:**
|
|
133
|
+
- "no documentation for..."
|
|
134
|
+
- "docs are missing..."
|
|
135
|
+
- "unclear how to..."
|
|
136
|
+
- "example for..."
|
|
137
|
+
- "how do I..."
|
|
138
|
+
- "docs don't explain..."
|
|
139
|
+
- "add docs for..."
|
|
140
|
+
- "tutorial for..."
|
|
141
|
+
|
|
142
|
+
**Examples:**
|
|
143
|
+
- "No documentation for the webhook authentication flow"
|
|
144
|
+
- "Missing example for advanced configuration"
|
|
145
|
+
- "How do I set up custom domains? Docs don't cover this."
|
|
146
|
+
- "Add a tutorial for migrating from v1 to v2"
|
|
147
|
+
|
|
148
|
+
**Scoring note:** docs_missing issues often indicate a product that has grown faster than its documentation. High-reaction docs issues in a specific area indicate that the area is both important to users and opaque in practice.
|
|
149
|
+
|
|
150
|
+
---
|
|
151
|
+
|
|
152
|
+
## Classification Rules
|
|
153
|
+
|
|
154
|
+
### One category per issue
|
|
155
|
+
Every issue gets exactly one category. Use the primary pain, not all possible interpretations.
|
|
156
|
+
|
|
157
|
+
- "The export feature is broken and also slow" -- classify as `bug_pattern` (primary pain: it does not work)
|
|
158
|
+
- "Export to PDF is slow" -- classify as `performance` (it works, it is just slow)
|
|
159
|
+
- "Add export to PDF" -- classify as `feature_gap` (it does not exist)
|
|
160
|
+
|
|
161
|
+
### When to use ux_complaint vs docs_missing
|
|
162
|
+
- If the user says "I can't figure out how to X" and the docs don't cover it: `docs_missing`
|
|
163
|
+
- If the user says "X is confusing" or "X is hard to use" and the feature exists: `ux_complaint`
|
|
164
|
+
- If both apply: `ux_complaint` (the UI is the product, the docs are secondary)
|
|
165
|
+
|
|
166
|
+
### When to use bug_pattern vs performance
|
|
167
|
+
- If it does not work at all: `bug_pattern`
|
|
168
|
+
- If it works but is slow or resource-heavy: `performance`
|
|
169
|
+
|
|
170
|
+
---
|
|
171
|
+
|
|
172
|
+
## Category Demand Signal Interpretation
|
|
173
|
+
|
|
174
|
+
| Category | What high demand here tells you |
|
|
175
|
+
|---|---|
|
|
176
|
+
| feature_gap | Their roadmap is behind their users. Name what you have built. |
|
|
177
|
+
| bug_pattern | Trust erosion in a specific area. Position your reliability there. |
|
|
178
|
+
| ux_complaint | Their users are struggling. Position your simplicity there. |
|
|
179
|
+
| performance | They hit a scale ceiling. Position your throughput or response time. |
|
|
180
|
+
| integration_missing | Their users live in a different stack. Show your integration depth. |
|
|
181
|
+
| docs_missing | They ship but do not explain. Position your onboarding and support. |
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
# GTM Translation Reference
|
|
2
|
+
|
|
3
|
+
How to convert demand clusters from competitor GitHub issues into GTM language. Used by SKILL.md Step 6 to guide the messaging brief generation.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## The Translation Problem
|
|
8
|
+
|
|
9
|
+
Raw demand data from GitHub is in user language, not buyer language. A product manager reads:
|
|
10
|
+
|
|
11
|
+
> "Add support for multiple workspaces per account" -- 87 reactions, open 2 years
|
|
12
|
+
|
|
13
|
+
A GTM-trained analyst reads:
|
|
14
|
+
|
|
15
|
+
> "Enterprise buyers need multi-tenancy. This competitor has not shipped it in 2 years. 87 teams are waiting. If you have this, lead with it."
|
|
16
|
+
|
|
17
|
+
This reference document is the bridge between those two readings.
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Translation by Category
|
|
22
|
+
|
|
23
|
+
### feature_gap clusters
|
|
24
|
+
|
|
25
|
+
**What the data shows:** Users want something that does not exist.
|
|
26
|
+
|
|
27
|
+
**GTM translation:**
|
|
28
|
+
- If you have the feature: "We built what [competitor] has not." Name the feature specifically.
|
|
29
|
+
- If you are building it: Use the cluster as proof that the market exists. "87 teams on [competitor] asked for X. We built it."
|
|
30
|
+
- If you do not have it: This is a roadmap signal, not a positioning signal yet.
|
|
31
|
+
|
|
32
|
+
**Outreach hook pattern:**
|
|
33
|
+
```
|
|
34
|
+
"[Number] teams on [competitor] have been asking for [specific feature] for [time].
|
|
35
|
+
We shipped [your feature] in [your product] [time ago / as a core feature].
|
|
36
|
+
[One-line ask]."
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
**Example:**
|
|
40
|
+
```
|
|
41
|
+
"Over 200 teams on [competitor] asked for multi-workspace support. It has been open for 3 years with no planned label.
|
|
42
|
+
We built workspace isolation as a core feature, not an add-on.
|
|
43
|
+
Would a 20-minute call be useful?"
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
### bug_pattern clusters
|
|
49
|
+
|
|
50
|
+
**What the data shows:** Something is reliably broken. High reaction counts on bug issues mean many users hit the same wall.
|
|
51
|
+
|
|
52
|
+
**GTM translation:**
|
|
53
|
+
- Do not say "they are buggy." Say: "Teams in [X workflow] need reliability."
|
|
54
|
+
- Position the broken area as a category where you have invested specifically.
|
|
55
|
+
- The number of reactions is the credibility anchor. Use it.
|
|
56
|
+
|
|
57
|
+
**Outreach hook pattern:**
|
|
58
|
+
```
|
|
59
|
+
"[Number] teams using [competitor] hit [specific bug area].
|
|
60
|
+
[Your product] was built [for/around/specifically to handle] this workflow without [the failure mode].
|
|
61
|
+
[One-line ask]."
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
**Example:**
|
|
65
|
+
```
|
|
66
|
+
"Teams using [competitor] have hit login failures with SSO -- 43 reactions on that issue, open for 18 months.
|
|
67
|
+
We built our auth layer around SSO-first design and have not had an SSO outage in 18 months of production.
|
|
68
|
+
Worth a conversation?"
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
---
|
|
72
|
+
|
|
73
|
+
### ux_complaint clusters
|
|
74
|
+
|
|
75
|
+
**What the data shows:** The feature exists but users cannot use it without friction.
|
|
76
|
+
|
|
77
|
+
**GTM translation:**
|
|
78
|
+
- Simplicity is a product decision, not a feature. Position it as intentional design.
|
|
79
|
+
- Use "setup time" or "time to value" as the measurable proxy for simplicity.
|
|
80
|
+
- Do not say "our UI is better." Say: "Teams get to [outcome] in [time], without [the friction they described]."
|
|
81
|
+
|
|
82
|
+
**Outreach hook pattern:**
|
|
83
|
+
```
|
|
84
|
+
"[Number] users of [competitor] said [specific UX friction in quotes].
|
|
85
|
+
[Your product] gets [persona] to [outcome] in [time] with [fewer steps / no setup / no configuration].
|
|
86
|
+
[One-line ask]."
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
**Example:**
|
|
90
|
+
```
|
|
91
|
+
"158 users of [competitor] called the project navigation 'confusing' -- that issue is 2 years old.
|
|
92
|
+
We redesigned navigation around the workflow, not the feature tree. Teams reach their first result in under 2 minutes.
|
|
93
|
+
Would you like to see it?"
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
### performance clusters
|
|
99
|
+
|
|
100
|
+
**What the data shows:** The product hits a scale ceiling. High-reaction performance issues cluster around a specific bottleneck (large files, large teams, high query volume).
|
|
101
|
+
|
|
102
|
+
**GTM translation:**
|
|
103
|
+
- Name the scale ceiling specifically: "repos over 1000 files", "teams over 50 users", "queries over 10k/day".
|
|
104
|
+
- If you have benchmarks, use them. If not, use the absence of the complaint as proof.
|
|
105
|
+
- Performance positioning works best for technical buyers (engineers, infrastructure teams).
|
|
106
|
+
|
|
107
|
+
**Outreach hook pattern:**
|
|
108
|
+
```
|
|
109
|
+
"Teams at [competitor] hit [specific bottleneck] at [scale threshold].
|
|
110
|
+
[Your product] handles [scale threshold] in [benchmark or time].
|
|
111
|
+
[One-line ask]."
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
**Example:**
|
|
115
|
+
```
|
|
116
|
+
"Engineering teams using [competitor] report 10+ second load times on repos over 500 files -- 67 reactions, no fix in sight.
|
|
117
|
+
We load repos of that size in under 1.5 seconds because we index differently.
|
|
118
|
+
I can show you a benchmark on a repo your size."
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
---
|
|
122
|
+
|
|
123
|
+
### integration_missing clusters
|
|
124
|
+
|
|
125
|
+
**What the data shows:** Users work in tools this competitor does not connect to. High-reaction integration requests tell you exactly what else is in their users' daily stack.
|
|
126
|
+
|
|
127
|
+
**GTM translation:**
|
|
128
|
+
- If you have the integration: make it the lead, not a footnote.
|
|
129
|
+
- The reaction count is the size of the audience that wants this bridge. Name the number.
|
|
130
|
+
- Integration positioning works best when the target tool is a category leader (Slack, Notion, GitHub Actions, Salesforce).
|
|
131
|
+
|
|
132
|
+
**Outreach hook pattern:**
|
|
133
|
+
```
|
|
134
|
+
"[Number] teams on [competitor] asked for [specific integration]. It has been open [time] with no planned label.
|
|
135
|
+
[Your product] connects to [integration] natively -- [brief description of how it works].
|
|
136
|
+
[One-line ask]."
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
**Example:**
|
|
140
|
+
```
|
|
141
|
+
"94 teams on [competitor] asked for Slack integration. The request is 3 years old with no planned label.
|
|
142
|
+
We shipped bidirectional Slack sync 6 months ago -- alerts in Slack, actions back to [your product].
|
|
143
|
+
Worth showing you?"
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
---
|
|
147
|
+
|
|
148
|
+
### docs_missing clusters
|
|
149
|
+
|
|
150
|
+
**What the data shows:** Users cannot figure out how to do something. The product may work, but the path to value is opaque.
|
|
151
|
+
|
|
152
|
+
**GTM translation:**
|
|
153
|
+
- Documentation is a trust signal, not a feature. "We have clear docs" is not a hook.
|
|
154
|
+
- Translate to: "Teams are in production in [time]" or "We have a guided setup for [exact use case they documented badly]."
|
|
155
|
+
- Onboarding speed is the business outcome. Use it.
|
|
156
|
+
|
|
157
|
+
**Outreach hook pattern:**
|
|
158
|
+
```
|
|
159
|
+
"[Number] users on [competitor] asked for documentation on [specific topic].
|
|
160
|
+
We have [specific guide / template / interactive tutorial] for [exact use case].
|
|
161
|
+
Teams using our [guide/setup] are in production in [time].
|
|
162
|
+
[One-line ask]."
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
**Example:**
|
|
166
|
+
```
|
|
167
|
+
"38 teams on [competitor] asked for documentation on webhook authentication. There is still no official guide.
|
|
168
|
+
We have a step-by-step webhook setup guide that gets teams from zero to first event in under 15 minutes.
|
|
169
|
+
I can send it -- it works for any webhook destination, not just ours."
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
## Using Verbatim Issue Language
|
|
175
|
+
|
|
176
|
+
The most effective outreach hooks use the user's exact words from the issue title. This works because:
|
|
177
|
+
|
|
178
|
+
1. It proves you read real feedback, not marketing copy.
|
|
179
|
+
2. It speaks in the buyer's language, not product language.
|
|
180
|
+
3. It names the pain before pitching the solution.
|
|
181
|
+
|
|
182
|
+
**Wrong approach (paraphrased):**
|
|
183
|
+
> "Many teams find navigation confusing in competitor products."
|
|
184
|
+
|
|
185
|
+
**Right approach (verbatim quote):**
|
|
186
|
+
> "158 users said 'confusing navigation between projects' has been open for 2 years. We redesigned for workflow, not feature hierarchy."
|
|
187
|
+
|
|
188
|
+
The quote is the credibility anchor. Do not remove it.
|
|
189
|
+
|
|
190
|
+
---
|
|
191
|
+
|
|
192
|
+
## Ignored Demand: The Highest-Signal Category
|
|
193
|
+
|
|
194
|
+
An issue that meets all three criteria:
|
|
195
|
+
- 10+ reactions (significant real demand)
|
|
196
|
+
- Open 180+ days (competitor has had time to act)
|
|
197
|
+
- No planned/in-progress label (they have explicitly not prioritized it)
|
|
198
|
+
|
|
199
|
+
This is not a feature request. This is a documented unmet need with a timestamp.
|
|
200
|
+
|
|
201
|
+
**GTM translation:** "Their users asked. Their team did not respond. We built it."
|
|
202
|
+
|
|
203
|
+
This framing works across all 6 categories. The combination of volume + age + inaction is the signal. The category determines the messaging angle.
|
|
204
|
+
|
|
205
|
+
**Ignored demand outreach template:**
|
|
206
|
+
```
|
|
207
|
+
"[Competitor]'s users have been asking for [verbatim issue title] for [age].
|
|
208
|
+
[Reaction count] teams upvoted it. There is no planned label.
|
|
209
|
+
[Your product] handles this [natively / differently / as a core feature].
|
|
210
|
+
[One-line ask]."
|
|
211
|
+
```
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
GITHUB_TOKEN= # optional -- github.com/settings/tokens (no scopes needed for public user profiles)
|
|
2
|
+
# Without it: 60 req/hr unauthenticated -- enough for ~10 packages before degrading
|
|
3
|
+
# With it: 5000 req/hr -- handles any reasonable package list without hitting limits
|
|
4
|
+
# The skill degrades gracefully if the limit is hit: shows npm velocity data without GitHub enrichment
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
# npm-downloads-to-leads
|
|
2
|
+
|
|
3
|
+
Give this skill a list of npm packages. It fetches 12 weeks of download data, scores each package by growth velocity, maps maintainers to GitHub and Twitter, and outputs a ranked lead brief per breakout package: who built it, how to reach them, and what to say.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npx "@opendirectory.dev/skills" install npm-downloads-to-leads --target claude
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
### Video Tutorial
|
|
12
|
+
|
|
13
|
+
https://github.com/user-attachments/assets/ee98a1b5-ebc4-452f-bbfb-c434f2935067
|
|
14
|
+
|
|
15
|
+
### Step 1: Download from GitHub
|
|
16
|
+
1. Click the **Code** button on this repo's GitHub page.
|
|
17
|
+
2. Select **Download ZIP**.
|
|
18
|
+
3. Extract the ZIP on your computer.
|
|
19
|
+
|
|
20
|
+
### Step 2: Install in Claude
|
|
21
|
+
1. Open the **Claude desktop app**.
|
|
22
|
+
2. Go to **Customize** in the sidebar.
|
|
23
|
+
3. Click the **Skills** tab, then the **+** button.
|
|
24
|
+
4. Choose **Upload a skill** and drop the folder or ZIP file.
|
|
25
|
+
|
|
26
|
+
Note: Upload the folder that contains the `SKILL.md` file.
|
|
27
|
+
|
|
28
|
+
## What It Does
|
|
29
|
+
|
|
30
|
+
- Fetches 12 weeks of daily download data per package from the npm Downloads API
|
|
31
|
+
- Aggregates to weekly buckets and computes a velocity score: recent growth x acceleration x sweet-spot multiplier
|
|
32
|
+
- Classifies each package as BREAKOUT, WATCHING, steady, established, or too early
|
|
33
|
+
- Fetches maintainer profiles from the npm registry and GitHub API
|
|
34
|
+
- Extracts GitHub followers, Twitter handle, bio, and company from each maintainer's GitHub profile
|
|
35
|
+
- Generates a lead brief per breakout package: growth story, contact signals, why to reach out now, and a suggested first message
|
|
36
|
+
- Saves output to `docs/npm-leads/[date].md`
|
|
37
|
+
|
|
38
|
+
## Requirements
|
|
39
|
+
|
|
40
|
+
| Requirement | Purpose | How to Set Up |
|
|
41
|
+
|---|---|---|
|
|
42
|
+
| GitHub token | Raises rate limit from 60/hr to 5000/hr for maintainer profile lookups | github.com/settings/tokens (no scopes needed for public user profiles) |
|
|
43
|
+
|
|
44
|
+
No external AI API key needed. The npm and npm registry APIs are fully public with no auth. GitHub token is optional but recommended for lists larger than 10 packages.
|
|
45
|
+
|
|
46
|
+
## Setup
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
cp .env.example .env
|
|
50
|
+
# Add GITHUB_TOKEN (optional, recommended for larger package lists)
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## How to Use
|
|
54
|
+
|
|
55
|
+
```
|
|
56
|
+
"Find leads from these npm packages: esbuild, vite, @hono/hono, zod"
|
|
57
|
+
"Track download trends for competitor packages: turbo, nx, lerna"
|
|
58
|
+
"Who maintains these breakout npm packages? bun, oxc-parser, biome"
|
|
59
|
+
"Find evangelists before they are famous: @effect-ts/core, fp-ts, zod"
|
|
60
|
+
"Analyze npm momentum for my space: my-package, competitor-a, competitor-b"
|
|
61
|
+
"Map npm maintainers to Twitter for these packages: ..."
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Include a short description of your product and the skill will tailor the outreach message to your context.
|
|
65
|
+
|
|
66
|
+
## Why Velocity Score, Not Raw Downloads
|
|
67
|
+
|
|
68
|
+
React gets 50 million downloads a week. Its maintainers are already famous, already inundated with outreach, and already aligned with a framework you are likely building on.
|
|
69
|
+
|
|
70
|
+
The velocity score finds the package going from 1K to 8K weekly downloads over 8 weeks. That maintainer just crossed a growth inflection. They are building an audience, they are not yet overwhelmed, and they are in a phase where your product makes a difference to their workflow.
|
|
71
|
+
|
|
72
|
+
The formula: `velocity_score = growth_ratio x acceleration x sweet_spot_multiplier`
|
|
73
|
+
|
|
74
|
+
- `growth_ratio`: recent 4-week average divided by prior 4-week average
|
|
75
|
+
- `acceleration`: last 2 weeks vs mid 2 weeks (is growth speeding up?)
|
|
76
|
+
- `sweet_spot_multiplier`: 1.0 for 500 to 500K weekly downloads, lower for noise floor or established giants
|
|
77
|
+
|
|
78
|
+
Breakout threshold: velocity score above 80 AND 500 to 500K weekly downloads.
|
|
79
|
+
|
|
80
|
+
## The Lead Brief
|
|
81
|
+
|
|
82
|
+
For each breakout and watching package:
|
|
83
|
+
|
|
84
|
+
- **Growth story**: exact download numbers, 8-week comparison, weekly trend
|
|
85
|
+
- **Maintainer profile**: GitHub handle, Twitter, bio, company, follower count
|
|
86
|
+
- **Why reach out now**: specific to this package's growth inflection point
|
|
87
|
+
- **Suggested first message**: names the package, its growth, and connects to your product context
|
|
88
|
+
|
|
89
|
+
## Cost Per Run
|
|
90
|
+
|
|
91
|
+
- npm Downloads API: free, no auth, no rate limit concerns
|
|
92
|
+
- npm Registry API: free, no auth
|
|
93
|
+
- GitHub API: free (60 req/hr unauthenticated, 5000/hr with token)
|
|
94
|
+
- AI analysis: uses the model already running the skill; no additional cost
|
|
95
|
+
- Total: free
|
|
96
|
+
|
|
97
|
+
## Standalone Script
|
|
98
|
+
|
|
99
|
+
Run the data fetching step directly from the terminal without Claude. Useful for scheduled jobs, CI pipelines, or exploring data before generating lead briefs.
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
# Basic usage
|
|
103
|
+
python3 scripts/fetch.py esbuild zod @hono/hono
|
|
104
|
+
|
|
105
|
+
# With product context
|
|
106
|
+
python3 scripts/fetch.py esbuild zod --context "We build a TypeScript DX platform"
|
|
107
|
+
|
|
108
|
+
# From a file (one package per line)
|
|
109
|
+
python3 scripts/fetch.py --file packages.txt --output results.json
|
|
110
|
+
|
|
111
|
+
# Print to stdout
|
|
112
|
+
python3 scripts/fetch.py esbuild zod --stdout | jq '.summary'
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
The script handles Steps 3 to 5 (download fetch, velocity scoring, maintainer enrichment) and writes a JSON file. Open that file with Claude and ask: "Generate lead briefs from this npm data."
|
|
116
|
+
|
|
117
|
+
```bash
|
|
118
|
+
GITHUB_TOKEN=your_token python3 scripts/fetch.py esbuild zod @hono/hono
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
Script output fields per package:
|
|
122
|
+
|
|
123
|
+
- `velocity_score`, `growth_pct`, `recent_4_avg`, `prior_4_avg`, `tier`
|
|
124
|
+
- `weeks`: array of 12 weekly download counts, oldest to newest
|
|
125
|
+
- `profile.description`, `profile.keywords`, `profile.npm_maintainers`
|
|
126
|
+
- `profile.github_users`: array with `username`, `twitter_username`, `followers`, `bio`, `company`
|
|
127
|
+
|
|
128
|
+
## Project Structure
|
|
129
|
+
|
|
130
|
+
```
|
|
131
|
+
npm-downloads-to-leads/
|
|
132
|
+
├── SKILL.md
|
|
133
|
+
├── README.md
|
|
134
|
+
├── .env.example
|
|
135
|
+
├── scripts/
|
|
136
|
+
│ └── fetch.py standalone data fetcher (Steps 3 to 5, no Claude needed)
|
|
137
|
+
├── evals/
|
|
138
|
+
│ └── evals.json
|
|
139
|
+
└── references/
|
|
140
|
+
├── velocity-scoring.md
|
|
141
|
+
└── outreach-timing.md
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
## License
|
|
145
|
+
|
|
146
|
+
MIT
|