@opendirectory.dev/skills 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/claude-md-generator/.env.example +7 -0
- package/.claude/skills/claude-md-generator/README.md +78 -0
- package/.claude/skills/claude-md-generator/SKILL.md +248 -0
- package/.claude/skills/claude-md-generator/evals/evals.json +35 -0
- package/.claude/skills/claude-md-generator/references/section-guide.md +175 -0
- package/dist/e2e.test.d.ts +1 -0
- package/dist/e2e.test.js +62 -0
- package/dist/fs-adapters.d.ts +4 -0
- package/dist/fs-adapters.js +101 -0
- package/dist/fs-adapters.test.d.ts +1 -0
- package/dist/fs-adapters.test.js +108 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +211 -0
- package/dist/transformers.d.ts +6 -0
- package/dist/transformers.js +2 -0
- package/package.json +25 -0
- package/registry.json +226 -0
- package/skills/blog-cover-image-cli/.github/workflows/publish.yml +19 -0
- package/skills/blog-cover-image-cli/LICENSE +15 -0
- package/skills/blog-cover-image-cli/README.md +126 -0
- package/skills/blog-cover-image-cli/SKILL.md +7 -0
- package/skills/blog-cover-image-cli/agent-skill/blog-cover-generator/README.md +30 -0
- package/skills/blog-cover-image-cli/agent-skill/blog-cover-generator/SKILL.md +72 -0
- package/skills/blog-cover-image-cli/bin/cli.js +226 -0
- package/skills/blog-cover-image-cli/examples/100x_UX_Research_AI_Agent.png +0 -0
- package/skills/blog-cover-image-cli/examples/Firecrawl-supabase-bolt.png +0 -0
- package/skills/blog-cover-image-cli/examples/Git-City_Case_study_Cover_Image.jpg +0 -0
- package/skills/blog-cover-image-cli/examples/THE DISTRIBUTION LAYER (2).png +0 -0
- package/skills/blog-cover-image-cli/examples/canva-perplexity-duolingo-cover-image.png +0 -0
- package/skills/blog-cover-image-cli/examples/gamma-mistral-veed.png +0 -0
- package/skills/blog-cover-image-cli/examples/server-survival-case-study-cover-image(1).png +0 -0
- package/skills/blog-cover-image-cli/examples/viral-meme-automation.png +0 -0
- package/skills/blog-cover-image-cli/index.js +2 -0
- package/skills/blog-cover-image-cli/package-lock.json +2238 -0
- package/skills/blog-cover-image-cli/package.json +37 -0
- package/skills/blog-cover-image-cli/src/geminiGenerator.js +126 -0
- package/skills/blog-cover-image-cli/src/imageValidator.js +54 -0
- package/skills/blog-cover-image-cli/src/logoFetcher.js +86 -0
- package/skills/claude-md-generator/.env.example +7 -0
- package/skills/claude-md-generator/README.md +78 -0
- package/skills/claude-md-generator/SKILL.md +254 -0
- package/skills/claude-md-generator/evals/evals.json +35 -0
- package/skills/claude-md-generator/references/section-guide.md +175 -0
- package/skills/cook-the-blog/README.md +86 -0
- package/skills/cook-the-blog/SKILL.md +130 -0
- package/skills/dependency-update-bot/.env.example +13 -0
- package/skills/dependency-update-bot/README.md +101 -0
- package/skills/dependency-update-bot/SKILL.md +376 -0
- package/skills/dependency-update-bot/evals/evals.json +45 -0
- package/skills/dependency-update-bot/references/changelog-patterns.md +201 -0
- package/skills/docs-from-code/.env.example +13 -0
- package/skills/docs-from-code/README.md +97 -0
- package/skills/docs-from-code/SKILL.md +160 -0
- package/skills/docs-from-code/evals/evals.json +29 -0
- package/skills/docs-from-code/references/extraction-guide.md +174 -0
- package/skills/docs-from-code/references/output-template.md +135 -0
- package/skills/docs-from-code/scripts/extract_py.py +238 -0
- package/skills/docs-from-code/scripts/extract_ts.ts +284 -0
- package/skills/docs-from-code/scripts/package.json +18 -0
- package/skills/explain-this-pr/README.md +74 -0
- package/skills/explain-this-pr/SKILL.md +130 -0
- package/skills/explain-this-pr/evals/evals.json +35 -0
- package/skills/google-trends-api-skills/README.md +78 -0
- package/skills/google-trends-api-skills/SKILL.md +7 -0
- package/skills/google-trends-api-skills/google-trends-api/SKILL.md +163 -0
- package/skills/google-trends-api-skills/google-trends-api/references/api-responses.md +188 -0
- package/skills/google-trends-api-skills/google-trends-api/scripts/discover_keywords.py +344 -0
- package/skills/google-trends-api-skills/seo-keyword-research/SKILL.md +205 -0
- package/skills/google-trends-api-skills/seo-keyword-research/references/keyword-placement-guide.md +89 -0
- package/skills/google-trends-api-skills/seo-keyword-research/references/tech-blog-examples.md +207 -0
- package/skills/google-trends-api-skills/seo-keyword-research/scripts/blog_seo_research.py +373 -0
- package/skills/hackernews-intel/.env.example +33 -0
- package/skills/hackernews-intel/README.md +161 -0
- package/skills/hackernews-intel/SKILL.md +156 -0
- package/skills/hackernews-intel/evals/evals.json +35 -0
- package/skills/hackernews-intel/package.json +15 -0
- package/skills/hackernews-intel/scripts/monitor-hn.js +258 -0
- package/skills/kill-the-standup/.env.example +22 -0
- package/skills/kill-the-standup/README.md +84 -0
- package/skills/kill-the-standup/SKILL.md +169 -0
- package/skills/kill-the-standup/evals/evals.json +35 -0
- package/skills/kill-the-standup/references/standup-format.md +102 -0
- package/skills/linkedin-post-generator/.env.example +14 -0
- package/skills/linkedin-post-generator/README.md +107 -0
- package/skills/linkedin-post-generator/SKILL.md +228 -0
- package/skills/linkedin-post-generator/evals/evals.json +35 -0
- package/skills/linkedin-post-generator/references/linkedin-format.md +216 -0
- package/skills/linkedin-post-generator/references/output-template.md +154 -0
- package/skills/llms-txt-generator/.env.example +18 -0
- package/skills/llms-txt-generator/README.md +142 -0
- package/skills/llms-txt-generator/SKILL.md +176 -0
- package/skills/llms-txt-generator/evals/evals.json +35 -0
- package/skills/llms-txt-generator/references/llms-txt-spec.md +88 -0
- package/skills/llms-txt-generator/references/output-template.md +76 -0
- package/skills/llms-txt-generator/test-output/genzcareer.in/llms.txt +31 -0
- package/skills/luma-attendees-scraper/README.md +170 -0
- package/skills/luma-attendees-scraper/SKILL.md +7 -0
- package/skills/luma-attendees-scraper/luma_attendees_export.js +223 -0
- package/skills/meeting-brief-generator/.env.example +21 -0
- package/skills/meeting-brief-generator/README.md +90 -0
- package/skills/meeting-brief-generator/SKILL.md +275 -0
- package/skills/meeting-brief-generator/evals/evals.json +35 -0
- package/skills/meeting-brief-generator/references/brief-format.md +114 -0
- package/skills/meeting-brief-generator/references/output-template.md +150 -0
- package/skills/meta-ads-skill/README.md +100 -0
- package/skills/meta-ads-skill/SKILL.md +7 -0
- package/skills/meta-ads-skill/meta-ads-skill/SKILL.md +41 -0
- package/skills/meta-ads-skill/meta-ads-skill/references/report_templates.md +47 -0
- package/skills/meta-ads-skill/meta-ads-skill/references/workflows.md +51 -0
- package/skills/meta-ads-skill/meta-ads-skill/scripts/auth_check.py +22 -0
- package/skills/meta-ads-skill/meta-ads-skill/scripts/formatters.py +46 -0
- package/skills/newsletter-digest/.env.example +20 -0
- package/skills/newsletter-digest/README.md +147 -0
- package/skills/newsletter-digest/SKILL.md +221 -0
- package/skills/newsletter-digest/evals/evals.json +35 -0
- package/skills/newsletter-digest/feeds.json +7 -0
- package/skills/newsletter-digest/package.json +15 -0
- package/skills/newsletter-digest/references/digest-format.md +123 -0
- package/skills/newsletter-digest/references/output-template.md +136 -0
- package/skills/newsletter-digest/scripts/fetch-feeds.js +141 -0
- package/skills/newsletter-digest/scripts/ghost-publish.js +147 -0
- package/skills/noise2blog/.env.example +16 -0
- package/skills/noise2blog/README.md +107 -0
- package/skills/noise2blog/SKILL.md +229 -0
- package/skills/noise2blog/evals/evals.json +35 -0
- package/skills/noise2blog/references/blog-format.md +188 -0
- package/skills/noise2blog/references/output-template.md +184 -0
- package/skills/outreach-sequence-builder/.env.example +12 -0
- package/skills/outreach-sequence-builder/README.md +108 -0
- package/skills/outreach-sequence-builder/SKILL.md +248 -0
- package/skills/outreach-sequence-builder/evals/evals.json +36 -0
- package/skills/outreach-sequence-builder/references/output-template.md +171 -0
- package/skills/outreach-sequence-builder/references/sequence-format.md +167 -0
- package/skills/outreach-sequence-builder/references/signal-playbook.md +117 -0
- package/skills/position-me/README.md +71 -0
- package/skills/position-me/SKILL.md +7 -0
- package/skills/position-me/position-me/SKILL.md +50 -0
- package/skills/position-me/position-me/references/EVALUATION_SOP.md +40 -0
- package/skills/position-me/position-me/references/REPORT_TEMPLATE.md +58 -0
- package/skills/position-me/position-me/scripts/extract_links.py +49 -0
- package/skills/pr-description-writer/README.md +81 -0
- package/skills/pr-description-writer/SKILL.md +141 -0
- package/skills/pr-description-writer/evals/evals.json +35 -0
- package/skills/pr-description-writer/references/pr-format-guide.md +145 -0
- package/skills/producthunt-launch-kit/.env.example +7 -0
- package/skills/producthunt-launch-kit/README.md +95 -0
- package/skills/producthunt-launch-kit/SKILL.md +380 -0
- package/skills/producthunt-launch-kit/evals/evals.json +35 -0
- package/skills/producthunt-launch-kit/references/copy-rules.md +124 -0
- package/skills/reddit-icp-monitor/.env.example +16 -0
- package/skills/reddit-icp-monitor/README.md +117 -0
- package/skills/reddit-icp-monitor/SKILL.md +271 -0
- package/skills/reddit-icp-monitor/evals/evals.json +40 -0
- package/skills/reddit-icp-monitor/references/icp-format.md +131 -0
- package/skills/reddit-icp-monitor/references/reply-rules.md +110 -0
- package/skills/reddit-post-engine/.env.example +13 -0
- package/skills/reddit-post-engine/README.md +103 -0
- package/skills/reddit-post-engine/SKILL.md +303 -0
- package/skills/reddit-post-engine/evals/evals.json +35 -0
- package/skills/reddit-post-engine/references/subreddit-playbook.md +156 -0
- package/skills/schema-markup-generator/.env.example +19 -0
- package/skills/schema-markup-generator/README.md +114 -0
- package/skills/schema-markup-generator/SKILL.md +192 -0
- package/skills/schema-markup-generator/evals/evals.json +35 -0
- package/skills/schema-markup-generator/references/json-ld-spec.md +263 -0
- package/skills/schema-markup-generator/references/output-template.md +556 -0
- package/skills/show-hn-writer/.env.example +14 -0
- package/skills/show-hn-writer/README.md +88 -0
- package/skills/show-hn-writer/SKILL.md +303 -0
- package/skills/show-hn-writer/evals/evals.json +35 -0
- package/skills/show-hn-writer/references/hn-rules.md +74 -0
- package/skills/show-hn-writer/references/title-formulas.md +93 -0
- package/skills/stargazer/README.md +79 -0
- package/skills/stargazer/SKILL.md +7 -0
- package/skills/stargazer/stargazer-skill/SKILL.md +58 -0
- package/skills/stargazer/stargazer-skill/assets/.env.example +18 -0
- package/skills/stargazer/stargazer-skill/scripts/convert_to_csv.py +63 -0
- package/skills/stargazer/stargazer-skill/scripts/count_emails.py +52 -0
- package/skills/stargazer/stargazer-skill/scripts/stargazer_deep_extractor.py +450 -0
- package/skills/tweet-thread-from-blog/.env.example +14 -0
- package/skills/tweet-thread-from-blog/README.md +109 -0
- package/skills/tweet-thread-from-blog/SKILL.md +177 -0
- package/skills/tweet-thread-from-blog/evals/evals.json +35 -0
- package/skills/tweet-thread-from-blog/references/output-template.md +193 -0
- package/skills/tweet-thread-from-blog/references/thread-format.md +107 -0
- package/skills/twitter-GTM-find-skill/README.md +43 -0
- package/skills/twitter-GTM-find-skill/SKILL.md +7 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/SKILL.md +37 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/references/icp-checklist.md +35 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/package.json +23 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/run_pipeline.sh +8 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/debug.ts +23 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/extractor.ts +79 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/icp-filter.ts +87 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/index.ts +94 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/src/scraper.ts +41 -0
- package/skills/twitter-GTM-find-skill/twitter-GTM-find/scripts/tsconfig.json +13 -0
- package/skills/yc-intent-radar-skill/README.md +39 -0
- package/skills/yc-intent-radar-skill/SKILL.md +7 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/SKILL.md +59 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/auth.js +29 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/db.js +62 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/export_radar_candidates.js +40 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/package-lock.json +1525 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/package.json +12 -0
- package/skills/yc-intent-radar-skill/yc-jobs-scraper/scripts/scraper.js +217 -0
- package/src/e2e.test.ts +35 -0
- package/src/fs-adapters.test.ts +91 -0
- package/src/fs-adapters.ts +65 -0
- package/src/index.ts +182 -0
- package/src/transformers.ts +6 -0
- package/tsconfig.json +8 -0
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
// Write a quick script to debug the exact apify item output
|
|
2
|
+
import { ApifyClient } from 'apify-client';
|
|
3
|
+
import dotenv from 'dotenv';
|
|
4
|
+
dotenv.config();
|
|
5
|
+
|
|
6
|
+
const client = new ApifyClient({
|
|
7
|
+
token: process.env.APIFY_API_TOKEN,
|
|
8
|
+
});
|
|
9
|
+
|
|
10
|
+
async function debug() {
|
|
11
|
+
const input = {
|
|
12
|
+
"query": '("DevRel hiring" OR "GTM hiring" OR "hiring DevRel" OR "hiring GTM" OR "founding GTM" OR "developer advocate hiring") since:2026-03-10 -is:retweet',
|
|
13
|
+
"search_type": "Latest",
|
|
14
|
+
"max_posts": 3
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
console.log('Running debug query...');
|
|
18
|
+
const run = await client.actor("ghSpYIW3L1RvT57NT").call(input);
|
|
19
|
+
const { items } = await client.dataset(run.defaultDatasetId).listItems();
|
|
20
|
+
console.log(JSON.stringify(items[0], null, 2));
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
debug().catch(console.error);
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import { GoogleGenerativeAI, SchemaType } from '@google/generative-ai';
|
|
2
|
+
import dotenv from 'dotenv';
|
|
3
|
+
dotenv.config();
|
|
4
|
+
|
|
5
|
+
const apiKey = process.env.GEMINI_API_KEY || '';
|
|
6
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
7
|
+
|
|
8
|
+
export interface JobExtract {
|
|
9
|
+
isJobPost: boolean;
|
|
10
|
+
role: string | null;
|
|
11
|
+
company: string | null;
|
|
12
|
+
tweetUrl: string | null;
|
|
13
|
+
authorProfileUrl: string | null;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export async function extractJobDetails(tweetText: string, authorName: string, authorBio: string, tweetUrl: string, authorProfileUrl: string): Promise<JobExtract> {
|
|
17
|
+
if (!apiKey) {
|
|
18
|
+
return {
|
|
19
|
+
isJobPost: tweetText.toLowerCase().includes('hiring') || tweetText.toLowerCase().includes('role'),
|
|
20
|
+
role: "Mocked GTM Role",
|
|
21
|
+
company: "Mocked Company Inc.",
|
|
22
|
+
tweetUrl,
|
|
23
|
+
authorProfileUrl
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const prompt = `
|
|
28
|
+
You are an AI assistant designed to extract ONLY TECH AND STARTUP job postings from Twitter data.
|
|
29
|
+
|
|
30
|
+
Your goal is to parse the tweet and the author's profile bio to identify:
|
|
31
|
+
1. Is this a genuine job posting looking to hire someone?
|
|
32
|
+
2. What is the specific role being hired for?
|
|
33
|
+
3. What is the name or domain of the company hiring?
|
|
34
|
+
|
|
35
|
+
STRICT FILTERING RULES:
|
|
36
|
+
- ONLY mark "isJobPost" as TRUE if the company is a software startup, tech company, developer-first tool, SaaS, or similar.
|
|
37
|
+
- REJECT and mark "isJobPost" as FALSE if the role is for traditional industries (e.g. Automotive, Real Estate, Clinics, generic Recruitment Agencies, HR Trainees, Payroll clerks, etc.).
|
|
38
|
+
- REJECT if the author is just praising someone who used to be a "Founding GTM" or similar. It MUST be an active request to hire someone.
|
|
39
|
+
- Ignore people looking for jobs themselves (e.g., "I am looking for a DevRel role").
|
|
40
|
+
|
|
41
|
+
If they don't explicitly name the company in the tweet, infer it from their bio (e.g., "Founder @ CompanyX").
|
|
42
|
+
|
|
43
|
+
Data:
|
|
44
|
+
Tweet Text: "${tweetText}"
|
|
45
|
+
Author Name: "${authorName}"
|
|
46
|
+
Author Bio: "${authorBio}"
|
|
47
|
+
`;
|
|
48
|
+
|
|
49
|
+
try {
|
|
50
|
+
const model = genAI.getGenerativeModel({
|
|
51
|
+
model: "gemini-flash-latest",
|
|
52
|
+
generationConfig: {
|
|
53
|
+
responseMimeType: "application/json",
|
|
54
|
+
responseSchema: {
|
|
55
|
+
type: SchemaType.OBJECT,
|
|
56
|
+
properties: {
|
|
57
|
+
isJobPost: { type: SchemaType.BOOLEAN },
|
|
58
|
+
role: { type: SchemaType.STRING, nullable: true },
|
|
59
|
+
company: { type: SchemaType.STRING, nullable: true }
|
|
60
|
+
},
|
|
61
|
+
required: ["isJobPost"]
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
const result = await model.generateContent(prompt);
|
|
67
|
+
const content = result.response.text();
|
|
68
|
+
|
|
69
|
+
if (!content) throw new Error("No response from LLM");
|
|
70
|
+
|
|
71
|
+
const parsed: JobExtract = JSON.parse(content);
|
|
72
|
+
parsed.tweetUrl = tweetUrl;
|
|
73
|
+
parsed.authorProfileUrl = authorProfileUrl;
|
|
74
|
+
return parsed;
|
|
75
|
+
} catch (error) {
|
|
76
|
+
console.error("Error extracting job details:", error);
|
|
77
|
+
return { isJobPost: false, role: null, company: null, tweetUrl, authorProfileUrl };
|
|
78
|
+
}
|
|
79
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { GoogleGenerativeAI, SchemaType } from '@google/generative-ai';
|
|
2
|
+
import { JobExtract } from './extractor';
|
|
3
|
+
import dotenv from 'dotenv';
|
|
4
|
+
dotenv.config();
|
|
5
|
+
|
|
6
|
+
const apiKey = process.env.GEMINI_API_KEY || '';
|
|
7
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
8
|
+
|
|
9
|
+
export interface IcpResult extends JobExtract {
|
|
10
|
+
isICP: boolean;
|
|
11
|
+
icpReasoning: string;
|
|
12
|
+
companyUrl: string | null;
|
|
13
|
+
estimatedFunding: string | null;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export async function evaluateICP(job: JobExtract): Promise<IcpResult> {
|
|
17
|
+
if (!apiKey) {
|
|
18
|
+
return { ...job, isICP: false, icpReasoning: "No API Key", companyUrl: null, estimatedFunding: null };
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const model = genAI.getGenerativeModel({
|
|
22
|
+
model: "gemini-flash-latest",
|
|
23
|
+
tools: [
|
|
24
|
+
{ googleSearch: {} } as any
|
|
25
|
+
],
|
|
26
|
+
generationConfig: {
|
|
27
|
+
responseMimeType: "application/json",
|
|
28
|
+
responseSchema: {
|
|
29
|
+
type: SchemaType.OBJECT,
|
|
30
|
+
properties: {
|
|
31
|
+
isICP: { type: SchemaType.BOOLEAN },
|
|
32
|
+
icpReasoning: { type: SchemaType.STRING },
|
|
33
|
+
companyUrl: { type: SchemaType.STRING, nullable: true },
|
|
34
|
+
estimatedFunding: { type: SchemaType.STRING, nullable: true }
|
|
35
|
+
},
|
|
36
|
+
required: ["isICP", "icpReasoning"]
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
const prompt = `
|
|
42
|
+
You are an M&A/Sales researcher validating a company against a strict ICP (Ideal Customer Profile) checklist.
|
|
43
|
+
Your goal is to use Google Search to research the company and determine if they are an exact match for our ICP.
|
|
44
|
+
|
|
45
|
+
COMPANY TO RESEARCH: "${job.company}"
|
|
46
|
+
(Role hiring for: "${job.role}")
|
|
47
|
+
|
|
48
|
+
### ICP CHECKLIST:
|
|
49
|
+
1. Developer-First Product
|
|
50
|
+
- WHAT QUALIFIES: API platforms, infrastructure tools, SDKs, dev utilities, AI agents/automation for technical workflows, DevOps, CI/CD, monitoring, analytics, Code editors, Data infrastructure, LLM deployment platforms.
|
|
51
|
+
- AUTO-SKIP (NOT ICP): Consumer apps, B2C SaaS, HR tools, pure Fintech/healthcare, traditional physical industries, recruiting agencies.
|
|
52
|
+
|
|
53
|
+
2. Funding Evidence
|
|
54
|
+
- MINIMUM REQUIREMENT: $100K+ raised (verifiable via Crunchbase, YC, press, founder posts).
|
|
55
|
+
- PREFERRED: $500K-$5M seed/Series A in last 18 months. YC, a16z, Sequoia backed.
|
|
56
|
+
- SKIP IF (NOT ICP): Pure bootstrap, absolutely no funding evidence found.
|
|
57
|
+
|
|
58
|
+
### INSTRUCTIONS:
|
|
59
|
+
1. Use Google Search to find the official website for "${job.company}".
|
|
60
|
+
2. Use Google Search to find funding data (search "${job.company} funding Crunchbase Y Combinator").
|
|
61
|
+
3. Evaluate them strictly against the 2 checklist items above.
|
|
62
|
+
4. If they are a dev-first/infrastructure/AI tool AND have funding, "isICP" must be TRUE.
|
|
63
|
+
5. If they fail EITHER criteria (e.g. consumer app, OR zero funding found), "isICP" must be FALSE.
|
|
64
|
+
6. Provide a concise 1-2 sentence "icpReasoning" explaining exactly what the company does and how much funding you found.
|
|
65
|
+
|
|
66
|
+
Output valid JSON exactly matching the schema.
|
|
67
|
+
`;
|
|
68
|
+
|
|
69
|
+
try {
|
|
70
|
+
const result = await model.generateContent(prompt);
|
|
71
|
+
const content = result.response.text();
|
|
72
|
+
|
|
73
|
+
if (!content) throw new Error("No response from LLM");
|
|
74
|
+
|
|
75
|
+
const parsed = JSON.parse(content);
|
|
76
|
+
return {
|
|
77
|
+
...job,
|
|
78
|
+
isICP: parsed.isICP,
|
|
79
|
+
icpReasoning: parsed.icpReasoning,
|
|
80
|
+
companyUrl: parsed.companyUrl || null,
|
|
81
|
+
estimatedFunding: parsed.estimatedFunding || null
|
|
82
|
+
};
|
|
83
|
+
} catch (error) {
|
|
84
|
+
console.error(`Error evaluating ICP for ${job.company}:`, error);
|
|
85
|
+
return { ...job, isICP: false, icpReasoning: "Error during evaluation", companyUrl: null, estimatedFunding: null };
|
|
86
|
+
}
|
|
87
|
+
}
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { scrapeTwitterJobs } from './scraper';
|
|
2
|
+
import { extractJobDetails, JobExtract } from './extractor';
|
|
3
|
+
import { evaluateICP, IcpResult } from './icp-filter';
|
|
4
|
+
import fs from 'fs';
|
|
5
|
+
|
|
6
|
+
async function main() {
|
|
7
|
+
const maxPosts = parseInt(process.env.MAX_POSTS || '20', 10);
|
|
8
|
+
|
|
9
|
+
const rawTweets = await scrapeTwitterJobs(maxPosts);
|
|
10
|
+
const radarData: JobExtract[] = [];
|
|
11
|
+
|
|
12
|
+
console.log(`\nProcessing ${rawTweets.length} tweets to extract job details...`);
|
|
13
|
+
const seenJobs = new Set<string>();
|
|
14
|
+
const seenTweetIds = new Set<string>();
|
|
15
|
+
|
|
16
|
+
const extractedResults = await Promise.all(rawTweets.map(async (item: any) => {
|
|
17
|
+
const itemAny = item;
|
|
18
|
+
const tweetId = itemAny.tweet_id || itemAny.id_str || itemAny.id || "";
|
|
19
|
+
|
|
20
|
+
const tweetText = itemAny.full_text || itemAny.text || String(itemAny);
|
|
21
|
+
let authorName = "Unknown";
|
|
22
|
+
let authorBio = "Unknown";
|
|
23
|
+
|
|
24
|
+
if (itemAny.user_info) {
|
|
25
|
+
authorName = itemAny.user_info.name || itemAny.user_info.screen_name || "Unknown";
|
|
26
|
+
authorBio = itemAny.user_info.description || "Unknown";
|
|
27
|
+
} else if (itemAny.author) {
|
|
28
|
+
authorName = itemAny.author.name || itemAny.author.userName || "Unknown";
|
|
29
|
+
authorBio = itemAny.author.description || "Unknown";
|
|
30
|
+
} else if (itemAny.user) {
|
|
31
|
+
authorName = itemAny.user.name || itemAny.user.screen_name || "Unknown";
|
|
32
|
+
authorBio = itemAny.user.description || "Unknown";
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const authorHandle = itemAny.screen_name || itemAny.user_info?.screen_name || itemAny.author?.userName || itemAny.user?.screen_name || "";
|
|
36
|
+
const tweetUrl = tweetId && authorHandle ? `https://twitter.com/${authorHandle}/status/${tweetId}` : itemAny.url || "";
|
|
37
|
+
const authorProfileUrl = authorHandle ? `https://twitter.com/${authorHandle}` : "";
|
|
38
|
+
|
|
39
|
+
return {
|
|
40
|
+
tweetId,
|
|
41
|
+
extracted: await extractJobDetails(tweetText, authorName, String(authorBio), tweetUrl, authorProfileUrl)
|
|
42
|
+
};
|
|
43
|
+
}));
|
|
44
|
+
|
|
45
|
+
for (const result of extractedResults) {
|
|
46
|
+
if (result.tweetId && seenTweetIds.has(result.tweetId)) continue;
|
|
47
|
+
if (result.tweetId) seenTweetIds.add(result.tweetId);
|
|
48
|
+
|
|
49
|
+
const { extracted } = result;
|
|
50
|
+
if (extracted.isJobPost && extracted.company && extracted.role) {
|
|
51
|
+
const dedupKey = extracted.company.toLowerCase().replace(/[^a-z0-9]/g, '');
|
|
52
|
+
if (seenJobs.has(dedupKey)) {
|
|
53
|
+
console.log(`⚠️ Skipped Duplicate Company: [Role]: ${extracted.role} | [Company]: ${extracted.company}`);
|
|
54
|
+
continue;
|
|
55
|
+
}
|
|
56
|
+
seenJobs.add(dedupKey);
|
|
57
|
+
|
|
58
|
+
radarData.push(extracted);
|
|
59
|
+
console.log(`✅ Added to Radar: [Role]: ${extracted.role} | [Company]: ${extracted.company}`);
|
|
60
|
+
} else {
|
|
61
|
+
console.log(`❌ Skipped: Not a valid job post or missing info.`);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
fs.writeFileSync('radar-jobs.json', JSON.stringify(radarData, null, 2));
|
|
66
|
+
console.log(`\n🎉 Step 1 Complete: Found ${radarData.length} valid jobs.`);
|
|
67
|
+
|
|
68
|
+
console.log(`\n🔍 Step 2: Evaluating ${radarData.length} companies against OpenClaw ICP...`);
|
|
69
|
+
|
|
70
|
+
const icpData: IcpResult[] = [];
|
|
71
|
+
|
|
72
|
+
const BATCH_SIZE = 5;
|
|
73
|
+
for (let i = 0; i < radarData.length; i += BATCH_SIZE) {
|
|
74
|
+
const batch = radarData.slice(i, i + BATCH_SIZE);
|
|
75
|
+
console.log(`Researching batch ${i/BATCH_SIZE + 1} (${batch.length} companies)...`);
|
|
76
|
+
|
|
77
|
+
const evaluations = await Promise.all(batch.map(job => evaluateICP(job)));
|
|
78
|
+
|
|
79
|
+
for (const evaluation of evaluations) {
|
|
80
|
+
if (evaluation.isICP) {
|
|
81
|
+
console.log(` 🟢 PASSED ICP: ${evaluation.company} - ${evaluation.icpReasoning}`);
|
|
82
|
+
icpData.push(evaluation);
|
|
83
|
+
} else {
|
|
84
|
+
console.log(` 🔴 FAILED ICP: ${evaluation.company} - ${evaluation.icpReasoning}`);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
fs.writeFileSync('openclaw-icp-jobs.json', JSON.stringify(icpData, null, 2));
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
console.log(`\n🎉 Pipeline Complete! Exported ${icpData.length} highly-targeted ICP companies to openclaw-icp-jobs.json.`);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
main().catch(console.error);
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import { ApifyClient } from 'apify-client';
|
|
2
|
+
import dotenv from 'dotenv';
|
|
3
|
+
dotenv.config();
|
|
4
|
+
|
|
5
|
+
const client = new ApifyClient({
|
|
6
|
+
token: process.env.APIFY_API_TOKEN,
|
|
7
|
+
});
|
|
8
|
+
|
|
9
|
+
export async function scrapeTwitterJobs(maxPostsPerQuery: number = 20) {
|
|
10
|
+
console.log('Starting Twitter Job Scraper (Multi-Query Mode)...');
|
|
11
|
+
|
|
12
|
+
const lastWeek = new Date();
|
|
13
|
+
lastWeek.setDate(lastWeek.getDate() - 7);
|
|
14
|
+
const sinceDate = lastWeek.toISOString().split('T')[0];
|
|
15
|
+
|
|
16
|
+
const queries = [
|
|
17
|
+
`("hiring" OR "looking for" OR "open role") ("DevRel" OR "Developer Advocate" OR "DevRel Intern") since:${sinceDate} -is:retweet`,
|
|
18
|
+
`("hiring" OR "looking for" OR "open role") ("GTM" OR "go to market" OR "Founding GTM" OR "GTM Lead") since:${sinceDate} -is:retweet`,
|
|
19
|
+
`("hiring" OR "looking for" OR "open role") ("Growth Lead" OR "Growth Hacker" OR "Head of Growth") since:${sinceDate} -is:retweet`,
|
|
20
|
+
`("hiring" OR "looking for" OR "open role") ("Developer Marketing" OR "Developer Relations" OR "Community Lead") since:${sinceDate} -is:retweet`,
|
|
21
|
+
`("hiring DevRel" OR "GTM Engineer" OR "hiring GTM" OR "hiring founding GTM" OR "developer advocate hiring") since:${sinceDate} -is:retweet`
|
|
22
|
+
];
|
|
23
|
+
|
|
24
|
+
let allItems: any[] = [];
|
|
25
|
+
|
|
26
|
+
for (const [index, query] of queries.entries()) {
|
|
27
|
+
console.log(`\n--- Running Query ${index + 1}/5 ---`);
|
|
28
|
+
console.log(`Executing query: ${query}`);
|
|
29
|
+
const input = {
|
|
30
|
+
"query": query,
|
|
31
|
+
"search_type": "Latest",
|
|
32
|
+
"max_posts": maxPostsPerQuery
|
|
33
|
+
};
|
|
34
|
+
const run = await client.actor("ghSpYIW3L1RvT57NT").call(input);
|
|
35
|
+
const { items } = await client.dataset(run.defaultDatasetId).listItems();
|
|
36
|
+
console.log(`Found ${items.length} tweets for this query.`);
|
|
37
|
+
allItems = allItems.concat(items);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
return allItems;
|
|
41
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "CommonJS",
|
|
5
|
+
"rootDir": "./src",
|
|
6
|
+
"outDir": "./dist",
|
|
7
|
+
"strict": true,
|
|
8
|
+
"esModuleInterop": true,
|
|
9
|
+
"skipLibCheck": true,
|
|
10
|
+
"forceConsistentCasingInFileNames": true
|
|
11
|
+
},
|
|
12
|
+
"include": ["src/**/*"]
|
|
13
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# YC Intent Radar
|
|
2
|
+
|
|
3
|
+
<img width="1280" height="640" alt="yc-intent-radar-cover" src="https://github.com/user-attachments/assets/2328ae2b-1b5d-45ad-8604-b90721b8d398" />
|
|
4
|
+
|
|
5
|
+
An automated scraper that pulls job listings and company data from YCombinator's Workatastartup platform. It bypasses login bottlenecks by utilizing authenticated sessions and ensures no duplicates are recorded by saving everything directly to a local SQLite database (`jobs.db`).
|
|
6
|
+
|
|
7
|
+
## Features
|
|
8
|
+
- **Deduplication:** Utilizes `better-sqlite3` to store state, ensuring you never scrape the same job twice.
|
|
9
|
+
- **Robust Extraction:** Identifies hidden JSON payloads on YC pages to grab accurate backend `job_id` values.
|
|
10
|
+
- **Filtered Exports:** Includes an export script (`export_radar_candidates.js`) that queries the SQLite database for intent-based hiring (e.g., GTM, DevRel, Growth, Content) and outputs it as a JSON payload for secondary research tools.
|
|
11
|
+
|
|
12
|
+
## Setup
|
|
13
|
+
1. Clone the repository.
|
|
14
|
+
2. Navigate to the `scripts/` directory:
|
|
15
|
+
```bash
|
|
16
|
+
cd scripts
|
|
17
|
+
npm install
|
|
18
|
+
npx playwright install
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
3. **Authenticate (First Time Only):**
|
|
22
|
+
Run the following script and log in to YC via the browser that opens. This creates a `state.json` file.
|
|
23
|
+
```bash
|
|
24
|
+
node auth.js
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
4. **Run the Scraper:**
|
|
28
|
+
```bash
|
|
29
|
+
node scraper.js
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
5. **Export Targeted Jobs:**
|
|
33
|
+
```bash
|
|
34
|
+
node export_radar_candidates.js
|
|
35
|
+
```
|
|
36
|
+
This will query the DB and produce `radar_candidates.json` containing the targeted companies and matching roles.
|
|
37
|
+
|
|
38
|
+
## Note on Sensitive Files
|
|
39
|
+
The `.gitignore` strictly protects your `state.json` (authentication cookies) and `jobs.db` (local history). Do not commit these files to a public repository.
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: yc-jobs-scraper
|
|
3
|
+
description: Scrape daily job listings from YCombinator's Workatastartup platform without duplicates. Use this skill when asked to scrape YC jobs, update the YC companies list, or retrieve the latest startup jobs. It handles authentication, extracts company slugs via Inertia.js JSON payloads, falls back to public YC job pages when necessary, and maintains a local SQLite database to track historical jobs and prevent duplicates.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# YC Jobs Scraper
|
|
7
|
+
|
|
8
|
+
This skill provides a robust architecture for scraping jobs from YCombinator and `workatastartup.com`. It is designed to run automatically, bypass login bottlenecks, and maintain state to never scrape duplicate jobs.
|
|
9
|
+
|
|
10
|
+
## Architecture
|
|
11
|
+
|
|
12
|
+
The scraper uses a hybrid approach to maximize reliability and minimize bot detection:
|
|
13
|
+
|
|
14
|
+
1. **Authentication:** `scripts/auth.js` uses Playwright to let a human log in once and saves the session to `scripts/state.json`.
|
|
15
|
+
2. **Database:** `scripts/db.js` uses `better-sqlite3` to manage `scripts/jobs.db`. It tracks every `company_slug` and `job_id` ever seen.
|
|
16
|
+
3. **Primary Extraction:** `scripts/scraper.js` loads `state.json`, visits YC query URLs, and extracts company slugs from the hidden Inertia.js `data-page` JSON payload.
|
|
17
|
+
4. **Job Extraction (JSON):** It then visits the authenticated company pages (`/companies/[slug]`) to extract jobs from the backend JSON payload to ensure we get the real `job_id` for accurate deduplication.
|
|
18
|
+
5. **Job Extraction (Fallback):** If the JSON extraction fails, it falls back to parsing public HTML job cards from `ycombinator.com/companies/[slug]/jobs`.
|
|
19
|
+
|
|
20
|
+
## Workflows
|
|
21
|
+
|
|
22
|
+
### 1. First-Time Setup
|
|
23
|
+
If this is the first time running the scraper in an environment, or if `node_modules` is missing:
|
|
24
|
+
```bash
|
|
25
|
+
cd @path/scripts
|
|
26
|
+
npm install
|
|
27
|
+
npx playwright install
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
### 2. Authentication (Manual Step)
|
|
31
|
+
If `scripts/state.json` is missing or expired, the scraper will fail. You must instruct the human user to run the authentication script manually:
|
|
32
|
+
```bash
|
|
33
|
+
cd @path/scripts
|
|
34
|
+
node auth.js
|
|
35
|
+
```
|
|
36
|
+
Tell the user a browser will open, and they must log in. Playwright will automatically save the cookies/tokens to `state.json`.
|
|
37
|
+
|
|
38
|
+
### 3. Running the Daily Scraper
|
|
39
|
+
To scrape for new companies and jobs:
|
|
40
|
+
```bash
|
|
41
|
+
cd @path/scripts
|
|
42
|
+
node scraper.js
|
|
43
|
+
```
|
|
44
|
+
This script will output exactly how many new companies and new jobs were found. Because of `jobs.db`, running it multiple times consecutively will result in `0 new jobs found`.
|
|
45
|
+
|
|
46
|
+
### 4. Querying the Database
|
|
47
|
+
If you need to analyze the scraped data or view the companies/jobs, you can query `scripts/jobs.db` directly using `better-sqlite3`.
|
|
48
|
+
|
|
49
|
+
**Example: Count Companies**
|
|
50
|
+
```bash
|
|
51
|
+
cd @path/scripts
|
|
52
|
+
node -e "const db = require('better-sqlite3')('jobs.db'); console.log('Companies:', db.prepare('SELECT COUNT(*) as count FROM companies').get().count);"
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
**Example: View Recent Jobs**
|
|
56
|
+
```bash
|
|
57
|
+
cd @path/scripts
|
|
58
|
+
node -e "const db = require('better-sqlite3')('jobs.db'); const jobs = db.prepare('SELECT title, company_slug, location FROM jobs ORDER BY created_at DESC LIMIT 5').all(); console.table(jobs);"
|
|
59
|
+
```
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
const { chromium } = require('playwright');
|
|
2
|
+
const fs = require('fs');
|
|
3
|
+
|
|
4
|
+
async function loginAndSaveSession() {
|
|
5
|
+
console.log('Opening browser for login...');
|
|
6
|
+
console.log('Please log in manually if you are not prompted.');
|
|
7
|
+
|
|
8
|
+
const browser = await chromium.launch({ headless: false });
|
|
9
|
+
const context = await browser.newContext();
|
|
10
|
+
const page = await context.newPage();
|
|
11
|
+
|
|
12
|
+
console.log('Navigating to YC login page...');
|
|
13
|
+
await page.goto('https://account.ycombinator.com/?continue=https://www.workatastartup.com/');
|
|
14
|
+
|
|
15
|
+
console.log('\n--- ACTION REQUIRED ---');
|
|
16
|
+
console.log('Please log in using your YC credentials in the browser window.');
|
|
17
|
+
console.log('Waiting until you are redirected to the workatastartup.com dashboard...\n');
|
|
18
|
+
|
|
19
|
+
await page.waitForURL('https://www.workatastartup.com/**', { timeout: 0 });
|
|
20
|
+
|
|
21
|
+
console.log('Login successful! Saving session state...');
|
|
22
|
+
|
|
23
|
+
await context.storageState({ path: 'state.json' });
|
|
24
|
+
|
|
25
|
+
console.log('Session saved to state.json. You can now run the scraper!');
|
|
26
|
+
await browser.close();
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
loginAndSaveSession().catch(console.error);
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
const Database = require('better-sqlite3');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
|
|
4
|
+
const dbPath = path.join(__dirname, 'jobs.db');
|
|
5
|
+
const db = new Database(dbPath);
|
|
6
|
+
|
|
7
|
+
db.exec(`
|
|
8
|
+
CREATE TABLE IF NOT EXISTS companies (
|
|
9
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
10
|
+
slug TEXT UNIQUE NOT NULL,
|
|
11
|
+
name TEXT NOT NULL,
|
|
12
|
+
scraped_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
|
13
|
+
);
|
|
14
|
+
|
|
15
|
+
CREATE TABLE IF NOT EXISTS jobs (
|
|
16
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
17
|
+
job_id TEXT UNIQUE NOT NULL,
|
|
18
|
+
company_slug TEXT NOT NULL,
|
|
19
|
+
title TEXT NOT NULL,
|
|
20
|
+
location TEXT,
|
|
21
|
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
22
|
+
FOREIGN KEY (company_slug) REFERENCES companies(slug)
|
|
23
|
+
);
|
|
24
|
+
`);
|
|
25
|
+
|
|
26
|
+
function insertCompany(slug, name) {
|
|
27
|
+
try {
|
|
28
|
+
const stmt = db.prepare('INSERT OR IGNORE INTO companies (slug, name) VALUES (?, ?)');
|
|
29
|
+
const info = stmt.run(slug, name);
|
|
30
|
+
return info.changes > 0;
|
|
31
|
+
} catch (err) {
|
|
32
|
+
console.error(`Error inserting company ${slug}:`, err);
|
|
33
|
+
return false;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
function insertJob(jobId, companySlug, title, location) {
|
|
38
|
+
try {
|
|
39
|
+
const stmt = db.prepare('INSERT OR IGNORE INTO jobs (job_id, company_slug, title, location) VALUES (?, ?, ?, ?)');
|
|
40
|
+
const info = stmt.run(jobId, companySlug, title, location);
|
|
41
|
+
return info.changes > 0;
|
|
42
|
+
} catch (err) {
|
|
43
|
+
console.error(`Error inserting job ${jobId}:`, err);
|
|
44
|
+
return false;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function getCompany(slug) {
|
|
49
|
+
return db.prepare('SELECT * FROM companies WHERE slug = ?').get(slug);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
function getJob(jobId) {
|
|
53
|
+
return db.prepare('SELECT * FROM jobs WHERE job_id = ?').get(jobId);
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
module.exports = {
|
|
57
|
+
db,
|
|
58
|
+
insertCompany,
|
|
59
|
+
insertJob,
|
|
60
|
+
getCompany,
|
|
61
|
+
getJob
|
|
62
|
+
};
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
const Database = require('better-sqlite3');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
const fs = require('fs');
|
|
4
|
+
|
|
5
|
+
const dbPath = path.join(__dirname, 'jobs.db');
|
|
6
|
+
const db = new Database(dbPath);
|
|
7
|
+
|
|
8
|
+
const query = `
|
|
9
|
+
SELECT
|
|
10
|
+
c.name as company_name,
|
|
11
|
+
c.slug as company_slug,
|
|
12
|
+
GROUP_CONCAT(j.title, ' | ') as matching_roles
|
|
13
|
+
FROM jobs j
|
|
14
|
+
JOIN companies c ON j.company_slug = c.slug
|
|
15
|
+
WHERE
|
|
16
|
+
j.title LIKE '%GTM%' OR
|
|
17
|
+
j.title LIKE '%DevRel%' OR
|
|
18
|
+
j.title LIKE '%Growth%' OR
|
|
19
|
+
j.title LIKE '%Content%' OR
|
|
20
|
+
j.title LIKE '%Marketing%' OR
|
|
21
|
+
j.title LIKE '%Developer Advocate%' OR
|
|
22
|
+
j.title LIKE '%Community%'
|
|
23
|
+
GROUP BY c.slug, c.name
|
|
24
|
+
`;
|
|
25
|
+
|
|
26
|
+
const rows = db.prepare(query).all();
|
|
27
|
+
|
|
28
|
+
const candidates = rows.map(row => ({
|
|
29
|
+
company_name: row.company_name,
|
|
30
|
+
company_slug: row.company_slug,
|
|
31
|
+
yc_url: `https://www.ycombinator.com/companies/${row.company_slug}`,
|
|
32
|
+
matching_roles: row.matching_roles
|
|
33
|
+
}));
|
|
34
|
+
|
|
35
|
+
fs.writeFileSync(
|
|
36
|
+
path.join(__dirname, 'radar_candidates.json'),
|
|
37
|
+
JSON.stringify(candidates, null, 2)
|
|
38
|
+
);
|
|
39
|
+
|
|
40
|
+
console.log(`Found ${candidates.length} unique candidate companies hiring for target roles! Saved to radar_candidates.json`);
|