freshcontext-mcp 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,50 @@
1
+ import { chromium } from "playwright";
2
+ import { AdapterResult, ExtractOptions } from "../types.js";
3
+
4
+ export async function githubAdapter(options: ExtractOptions): Promise<AdapterResult> {
5
+ const browser = await chromium.launch({ headless: true });
6
+ const page = await browser.newPage();
7
+
8
+ // Spoof a real browser UA to avoid bot detection
9
+ await page.setExtraHTTPHeaders({
10
+ "User-Agent":
11
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
12
+ });
13
+
14
+ await page.goto(options.url, { waitUntil: "domcontentloaded", timeout: 20000 });
15
+
16
+ // Extract key repo signals — no inner functions to avoid esbuild __name injection
17
+ const data = await page.evaluate(`(function() {
18
+ var readme = (document.querySelector('[data-target="readme-toc.content"]') || document.querySelector('.markdown-body') || {}).textContent || null;
19
+ var starsEl = document.querySelector('[id="repo-stars-counter-star"]') || document.querySelector('.Counter.js-social-count');
20
+ var stars = starsEl ? starsEl.textContent.trim() : null;
21
+ var forksEl = document.querySelector('[id="repo-network-counter"]');
22
+ var forks = forksEl ? forksEl.textContent.trim() : null;
23
+ var commitEl = document.querySelector('relative-time');
24
+ var lastCommit = commitEl ? commitEl.getAttribute('datetime') : null;
25
+ var descEl = document.querySelector('.f4.my-3');
26
+ var description = descEl ? descEl.textContent.trim() : null;
27
+ var topics = Array.from(document.querySelectorAll('.topic-tag')).map(function(t) { return t.textContent.trim(); });
28
+ var langEl = document.querySelector('.color-fg-default.text-bold.mr-1');
29
+ var language = langEl ? langEl.textContent.trim() : null;
30
+ return { readme: readme, stars: stars, forks: forks, lastCommit: lastCommit, description: description, topics: topics, language: language };
31
+ })()`);
32
+ const typedData = data as { readme: string | null; stars: string | null; forks: string | null; lastCommit: string | null; description: string | null; topics: string[]; language: string | null };
33
+
34
+ await browser.close();
35
+
36
+ const raw = [
37
+ `Description: ${typedData.description ?? "N/A"}`,
38
+ `Stars: ${typedData.stars ?? "N/A"} | Forks: ${typedData.forks ?? "N/A"}`,
39
+ `Language: ${typedData.language ?? "N/A"}`,
40
+ `Last commit: ${typedData.lastCommit ?? "N/A"}`,
41
+ `Topics: ${typedData.topics?.join(", ") ?? "none"}`,
42
+ `\n--- README ---\n${typedData.readme ?? "No README found"}`,
43
+ ].join("\n");
44
+
45
+ return {
46
+ raw,
47
+ content_date: typedData.lastCommit ?? null,
48
+ freshness_confidence: typedData.lastCommit ? "high" : "medium",
49
+ };
50
+ }
@@ -0,0 +1,93 @@
1
+ import { chromium } from "playwright";
2
+ import { AdapterResult, ExtractOptions } from "../types.js";
3
+
4
+ export async function hackerNewsAdapter(options: ExtractOptions): Promise<AdapterResult> {
5
+ // If it's an Algolia API URL or search query, use the REST API directly (no browser)
6
+ const url = options.url;
7
+
8
+ if (url.includes("hn.algolia.com/api/") || url.startsWith("hn-search:")) {
9
+ const query = url.startsWith("hn-search:")
10
+ ? url.replace("hn-search:", "").trim()
11
+ : url;
12
+
13
+ const apiUrl = url.includes("hn.algolia.com/api/")
14
+ ? url
15
+ : `https://hn.algolia.com/api/v1/search?query=${encodeURIComponent(query)}&tags=story&hitsPerPage=20`;
16
+
17
+ const res = await fetch(apiUrl);
18
+ if (!res.ok) throw new Error(`HN Algolia API error: ${res.status}`);
19
+ const data = await res.json() as {
20
+ hits: Array<{
21
+ title: string;
22
+ url: string | null;
23
+ points: number;
24
+ num_comments: number;
25
+ author: string;
26
+ created_at: string;
27
+ objectID: string;
28
+ }>;
29
+ };
30
+
31
+ const raw = data.hits
32
+ .map((r, i) =>
33
+ [
34
+ `[${i + 1}] ${r.title ?? "Untitled"}`,
35
+ `URL: ${r.url ?? `https://news.ycombinator.com/item?id=${r.objectID}`}`,
36
+ `Score: ${r.points} points | ${r.num_comments} comments`,
37
+ `Author: ${r.author} | Posted: ${r.created_at}`,
38
+ ].join("\n")
39
+ )
40
+ .join("\n\n")
41
+ .slice(0, options.maxLength ?? 4000);
42
+
43
+ const newest = data.hits.map((r) => r.created_at).sort().reverse()[0] ?? null;
44
+ return { raw, content_date: newest, freshness_confidence: newest ? "high" : "medium" };
45
+ }
46
+
47
+ // Default: browser-based scrape for HN front page or search pages
48
+ const browser = await chromium.launch({ headless: true });
49
+ const page = await browser.newPage();
50
+
51
+ await page.goto(url, { waitUntil: "domcontentloaded", timeout: 20000 });
52
+
53
+ const data = await page.evaluate(`(function() {
54
+ var items = Array.from(document.querySelectorAll('.athing')).slice(0, 20);
55
+ var results = items.map(function(el) {
56
+ var titleLineEl = el.querySelector('.titleline > a');
57
+ var title = titleLineEl ? titleLineEl.textContent.trim() : null;
58
+ var link = titleLineEl ? titleLineEl.getAttribute('href') : null;
59
+ var subtext = el.nextElementSibling;
60
+ var scoreEl = subtext ? subtext.querySelector('.score') : null;
61
+ var score = scoreEl ? scoreEl.textContent.trim() : null;
62
+ var ageEl = subtext ? subtext.querySelector('.age') : null;
63
+ var age = ageEl ? ageEl.getAttribute('title') : null;
64
+ var anchors = subtext ? subtext.querySelectorAll('a') : [];
65
+ var commentLink = anchors.length > 0 ? anchors[anchors.length - 1].textContent.trim() : null;
66
+ return { title: title, link: link, score: score, age: age, commentLink: commentLink };
67
+ });
68
+ return results;
69
+ })()`);
70
+
71
+ await browser.close();
72
+
73
+ const typedData = data as Array<{ title: string | null; link: string | null; score: string | null; age: string | null; commentLink: string | null }>;
74
+
75
+ const raw = typedData
76
+ .map((r, i) =>
77
+ [
78
+ `[${i + 1}] ${r.title ?? "Untitled"}`,
79
+ `URL: ${r.link ?? "N/A"}`,
80
+ `Score: ${r.score ?? "N/A"} | ${r.commentLink ?? ""}`,
81
+ `Posted: ${r.age ?? "unknown"}`,
82
+ ].join("\n")
83
+ )
84
+ .join("\n\n");
85
+
86
+ const newestDate = typedData.map((r) => r.age).filter(Boolean).sort().reverse()[0] ?? null;
87
+
88
+ return {
89
+ raw,
90
+ content_date: newestDate,
91
+ freshness_confidence: newestDate ? "high" : "medium",
92
+ };
93
+ }
@@ -0,0 +1,104 @@
1
+ import { AdapterResult, ExtractOptions } from "../types.js";
2
+
3
+ // Uses npm registry API + PyPI JSON API (no auth needed)
4
+ export async function packageTrendsAdapter(options: ExtractOptions): Promise<AdapterResult> {
5
+ // options.url is the package name or a comma-separated list
6
+ // e.g. "langchain" or "npm:langchain" or "pypi:langchain"
7
+ const raw_input = options.url.replace(/^https?:\/\//, "").trim();
8
+
9
+ // Parse ecosystem prefix
10
+ const parts = raw_input.split(",").map((s) => s.trim());
11
+ const results: string[] = [];
12
+ let latestDate: string | null = null;
13
+
14
+ for (const pkg of parts) {
15
+ const isExplicitPypi = pkg.startsWith("pypi:");
16
+ const isExplicitNpm = pkg.startsWith("npm:");
17
+ const pkgName = pkg.replace(/^(pypi:|npm:)/, "");
18
+
19
+ // Try npm
20
+ if (!isExplicitPypi) {
21
+ try {
22
+ const npmRes = await fetch(`https://registry.npmjs.org/${encodeURIComponent(pkgName)}`, {
23
+ headers: { Accept: "application/json" },
24
+ });
25
+ if (npmRes.ok) {
26
+ const npmData = await npmRes.json() as {
27
+ name: string;
28
+ description?: string;
29
+ "dist-tags"?: { latest?: string };
30
+ time?: Record<string, string>;
31
+ homepage?: string;
32
+ keywords?: string[];
33
+ repository?: { url?: string };
34
+ };
35
+
36
+ const latestVersion = npmData["dist-tags"]?.latest ?? "unknown";
37
+ const modified = npmData.time?.modified ?? null;
38
+ const created = npmData.time?.created ?? null;
39
+ const versions = Object.keys(npmData.time ?? {}).filter((k) => !["created", "modified"].includes(k)).length;
40
+
41
+ if (modified && (!latestDate || modified > latestDate)) latestDate = modified;
42
+
43
+ results.push([
44
+ `📦 [npm] ${npmData.name}`,
45
+ `Latest version: ${latestVersion}`,
46
+ `Total versions: ${versions}`,
47
+ `Description: ${npmData.description ?? "N/A"}`,
48
+ `Keywords: ${npmData.keywords?.join(", ") ?? "none"}`,
49
+ `Created: ${created ?? "unknown"}`,
50
+ `Last updated: ${modified ?? "unknown"}`,
51
+ `Homepage: ${npmData.homepage ?? "N/A"}`,
52
+ ].join("\n"));
53
+ continue;
54
+ }
55
+ } catch { /* fall through to PyPI */ }
56
+ }
57
+
58
+ // Try PyPI
59
+ if (!isExplicitNpm) {
60
+ try {
61
+ const pypiRes = await fetch(`https://pypi.org/pypi/${encodeURIComponent(pkgName)}/json`);
62
+ if (pypiRes.ok) {
63
+ const pypiData = await pypiRes.json() as {
64
+ info: {
65
+ name: string;
66
+ version: string;
67
+ summary?: string;
68
+ keywords?: string;
69
+ home_page?: string;
70
+ project_urls?: Record<string, string>;
71
+ };
72
+ releases?: Record<string, unknown[]>;
73
+ urls?: Array<{ upload_time: string }>;
74
+ };
75
+
76
+ const info = pypiData.info;
77
+ const releaseCount = Object.keys(pypiData.releases ?? {}).length;
78
+ const latestUpload = pypiData.urls?.[0]?.upload_time ?? null;
79
+
80
+ if (latestUpload && (!latestDate || latestUpload > latestDate)) latestDate = latestUpload;
81
+
82
+ results.push([
83
+ `🐍 [PyPI] ${info.name}`,
84
+ `Latest version: ${info.version}`,
85
+ `Total releases: ${releaseCount}`,
86
+ `Description: ${info.summary ?? "N/A"}`,
87
+ `Keywords: ${info.keywords ?? "none"}`,
88
+ `Last release: ${latestUpload ?? "unknown"}`,
89
+ `Homepage: ${info.home_page ?? info.project_urls?.Homepage ?? "N/A"}`,
90
+ ].join("\n"));
91
+ continue;
92
+ }
93
+ } catch { /* not found */ }
94
+ }
95
+
96
+ results.push(`❌ Package not found on npm or PyPI: ${pkgName}`);
97
+ }
98
+
99
+ return {
100
+ raw: results.join("\n\n").slice(0, options.maxLength ?? 5000),
101
+ content_date: latestDate,
102
+ freshness_confidence: latestDate ? "high" : "low",
103
+ };
104
+ }
@@ -0,0 +1,78 @@
1
+ import { AdapterResult, ExtractOptions } from "../types.js";
2
+
3
+ // Uses GitHub Search API (no auth needed for basic search)
4
+ export async function repoSearchAdapter(options: ExtractOptions): Promise<AdapterResult> {
5
+ // options.url is treated as the search query string
6
+ // e.g. "mcp server typescript" or a full GitHub search URL
7
+ let query = options.url;
8
+
9
+ // If it's a full URL, extract the query param
10
+ try {
11
+ const parsed = new URL(options.url);
12
+ if (parsed.hostname === "github.com" && parsed.pathname.includes("/search")) {
13
+ query = parsed.searchParams.get("q") ?? options.url;
14
+ } else if (parsed.hostname === "github.com") {
15
+ // It's a direct URL — not a search
16
+ query = parsed.pathname.replace("/search", "").trim().replace(/^\//, "");
17
+ }
18
+ } catch {
19
+ // plain string query, use as-is
20
+ }
21
+
22
+ const apiUrl = `https://api.github.com/search/repositories?q=${encodeURIComponent(query)}&sort=stars&order=desc&per_page=10`;
23
+
24
+ const res = await fetch(apiUrl, {
25
+ headers: {
26
+ Accept: "application/vnd.github.v3+json",
27
+ "User-Agent": "freshcontext-mcp/0.1.0",
28
+ },
29
+ });
30
+
31
+ if (!res.ok) {
32
+ throw new Error(`GitHub Search API error: ${res.status} ${await res.text()}`);
33
+ }
34
+
35
+ const data = await res.json() as {
36
+ total_count: number;
37
+ items: Array<{
38
+ full_name: string;
39
+ description: string | null;
40
+ html_url: string;
41
+ stargazers_count: number;
42
+ forks_count: number;
43
+ language: string | null;
44
+ topics: string[];
45
+ pushed_at: string;
46
+ created_at: string;
47
+ open_issues_count: number;
48
+ }>;
49
+ };
50
+
51
+ const raw = [
52
+ `Total matching repos: ${data.total_count.toLocaleString()}`,
53
+ `Top ${data.items.length} by stars:\n`,
54
+ ...data.items.map((r, i) =>
55
+ [
56
+ `[${i + 1}] ${r.full_name}`,
57
+ `⭐ ${r.stargazers_count.toLocaleString()} stars | 🍴 ${r.forks_count} forks | Issues: ${r.open_issues_count}`,
58
+ `Language: ${r.language ?? "unknown"}`,
59
+ `Topics: ${r.topics?.join(", ") || "none"}`,
60
+ `Description: ${r.description ?? "N/A"}`,
61
+ `Last push: ${r.pushed_at}`,
62
+ `Created: ${r.created_at}`,
63
+ `URL: ${r.html_url}`,
64
+ ].join("\n")
65
+ ),
66
+ ]
67
+ .join("\n\n")
68
+ .slice(0, options.maxLength ?? 6000);
69
+
70
+ // Most recently pushed repo date as content_date
71
+ const dates = data.items.map((r) => r.pushed_at).sort().reverse();
72
+
73
+ return {
74
+ raw,
75
+ content_date: dates[0] ?? null,
76
+ freshness_confidence: "high",
77
+ };
78
+ }
@@ -0,0 +1,65 @@
1
+ import { chromium } from "playwright";
2
+ import { AdapterResult, ExtractOptions } from "../types.js";
3
+
4
+ export async function scholarAdapter(options: ExtractOptions): Promise<AdapterResult> {
5
+ const browser = await chromium.launch({ headless: true });
6
+ const page = await browser.newPage();
7
+
8
+ await page.setExtraHTTPHeaders({
9
+ "User-Agent":
10
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36",
11
+ });
12
+
13
+ await page.goto(options.url, { waitUntil: "domcontentloaded", timeout: 20000 });
14
+
15
+ const data = await page.evaluate(`(function() {
16
+ var items = Array.from(document.querySelectorAll('.gs_r.gs_or.gs_scl'));
17
+ var results = items.map(function(el) {
18
+ var titleEl = el.querySelector('.gs_rt');
19
+ var title = titleEl ? titleEl.textContent.trim() : null;
20
+ var authorsEl = el.querySelector('.gs_a');
21
+ var authors = authorsEl ? authorsEl.textContent.trim() : null;
22
+ var snippetEl = el.querySelector('.gs_rs');
23
+ var snippet = snippetEl ? snippetEl.textContent.trim() : null;
24
+ var linkEl = el.querySelector('.gs_rt a');
25
+ var link = linkEl ? linkEl.getAttribute('href') : null;
26
+ var yearMatch = authors ? authors.match(/\\b(19|20)\\d{2}\\b/) : null;
27
+ var year = yearMatch ? yearMatch[0] : null;
28
+ return { title: title, authors: authors, snippet: snippet, link: link, year: year };
29
+ });
30
+ return results;
31
+ })()`);
32
+
33
+ await browser.close();
34
+
35
+ const typedData = data as Array<{ title: string | null; authors: string | null; snippet: string | null; link: string | null; year: string | null }>;
36
+
37
+ if (!typedData.length) {
38
+ return {
39
+ raw: "No results found on this Scholar page.",
40
+ content_date: null,
41
+ freshness_confidence: "low",
42
+ };
43
+ }
44
+
45
+ const raw = typedData
46
+ .map((r, i) =>
47
+ [
48
+ `[${i + 1}] ${r.title ?? "Untitled"}`,
49
+ `Authors: ${r.authors ?? "Unknown"}`,
50
+ `Year: ${r.year ?? "Unknown"}`,
51
+ `Snippet: ${r.snippet ?? "N/A"}`,
52
+ `Link: ${r.link ?? "N/A"}`,
53
+ ].join("\n")
54
+ )
55
+ .join("\n\n");
56
+
57
+ const years = typedData.map((r) => r.year).filter(Boolean) as string[];
58
+ const newestYear = years.sort().reverse()[0] ?? null;
59
+
60
+ return {
61
+ raw,
62
+ content_date: newestYear ? `${newestYear}-01-01` : null,
63
+ freshness_confidence: newestYear ? "high" : "low",
64
+ };
65
+ }
@@ -0,0 +1,99 @@
1
+ import { chromium } from "playwright";
2
+ import { AdapterResult, ExtractOptions } from "../types.js";
3
+
4
+ export async function ycAdapter(options: ExtractOptions): Promise<AdapterResult> {
5
+ const browser = await chromium.launch({ headless: true });
6
+ const page = await browser.newPage();
7
+
8
+ // YC company directory is React-rendered — wait for network to settle
9
+ await page.goto(options.url, { waitUntil: "networkidle", timeout: 30000 });
10
+
11
+ // Wait for company cards to appear
12
+ await page.waitForSelector('a[href*="/companies/"]', { timeout: 15000 }).catch(() => null);
13
+
14
+ const data = await page.evaluate(`(function() {
15
+ // YC company cards — robust multi-strategy extraction
16
+ var results = [];
17
+
18
+ // Strategy 1: structured company divs with name + description + batch
19
+ var cards = Array.from(document.querySelectorAll('div[class*="_company_"]'));
20
+
21
+ if (cards.length === 0) {
22
+ // Strategy 2: anchor links to /companies/* pages
23
+ cards = Array.from(document.querySelectorAll('a[href*="/companies/"]'))
24
+ .filter(function(el) {
25
+ return el.querySelector('span, p, div');
26
+ });
27
+ }
28
+
29
+ cards.slice(0, 25).forEach(function(el) {
30
+ var allText = el.innerText || el.textContent || "";
31
+ var lines = allText.split('\\n').map(function(l) { return l.trim(); }).filter(Boolean);
32
+
33
+ // Try to find structured spans
34
+ var spans = Array.from(el.querySelectorAll('span'));
35
+ var name = null, description = null, batch = null;
36
+ var tags = [];
37
+
38
+ spans.forEach(function(s) {
39
+ var t = s.textContent.trim();
40
+ if (!t) return;
41
+ if (s.className && s.className.toString().includes('Name')) name = t;
42
+ else if (s.className && s.className.toString().includes('Desc')) description = t;
43
+ else if (s.className && s.className.toString().includes('Batch')) batch = t;
44
+ else if (s.className && s.className.toString().includes('Tag')) tags.push(t);
45
+ });
46
+
47
+ // Fallback to line parsing
48
+ if (!name && lines.length > 0) name = lines[0];
49
+ if (!description && lines.length > 1) description = lines[1];
50
+
51
+ var link = el.tagName === 'A'
52
+ ? el.getAttribute('href')
53
+ : (el.querySelector('a') ? el.querySelector('a').getAttribute('href') : null);
54
+
55
+ if (name && name.length > 1 && name.length < 80) {
56
+ results.push({ name, description, batch, tags, link });
57
+ }
58
+ });
59
+
60
+ return results;
61
+ })()`);
62
+
63
+ await browser.close();
64
+
65
+ const typedData = data as Array<{
66
+ name: string | null;
67
+ description: string | null;
68
+ batch: string | null;
69
+ tags: string[];
70
+ link: string | null;
71
+ }>;
72
+
73
+ if (!typedData.length) {
74
+ return {
75
+ raw: "No YC companies found — page may have changed structure. Try visiting: " + options.url,
76
+ content_date: null,
77
+ freshness_confidence: "low",
78
+ };
79
+ }
80
+
81
+ const raw = typedData
82
+ .map((r, i) =>
83
+ [
84
+ `[${i + 1}] ${r.name ?? "Unknown"}`,
85
+ `Batch: ${r.batch ?? "Unknown"}`,
86
+ `Tags: ${r.tags?.join(", ") || "none"}`,
87
+ `Description: ${r.description ?? "N/A"}`,
88
+ `Link: ${r.link ? (r.link.startsWith("http") ? r.link : "https://www.ycombinator.com" + r.link) : "N/A"}`,
89
+ ].join("\n")
90
+ )
91
+ .join("\n\n")
92
+ .slice(0, options.maxLength ?? 6000);
93
+
94
+ return {
95
+ raw,
96
+ content_date: new Date().toISOString().split("T")[0],
97
+ freshness_confidence: "high",
98
+ };
99
+ }
package/src/server.ts ADDED
@@ -0,0 +1,179 @@
1
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
2
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
3
+ import { z } from "zod";
4
+ import { githubAdapter } from "./adapters/github.js";
5
+ import { scholarAdapter } from "./adapters/scholar.js";
6
+ import { hackerNewsAdapter } from "./adapters/hackernews.js";
7
+ import { ycAdapter } from "./adapters/yc.js";
8
+ import { repoSearchAdapter } from "./adapters/repoSearch.js";
9
+ import { packageTrendsAdapter } from "./adapters/packageTrends.js";
10
+ import { stampFreshness, formatForLLM } from "./tools/freshnessStamp.js";
11
+
12
+ const server = new McpServer({
13
+ name: "freshcontext-mcp",
14
+ version: "0.1.0",
15
+ });
16
+
17
+ // ─── Tool: extract_github ────────────────────────────────────────────────────
18
+ server.registerTool(
19
+ "extract_github",
20
+ {
21
+ description:
22
+ "Extract real-time data from a GitHub repository — README, stars, forks, language, topics, last commit. Returns timestamped freshcontext.",
23
+ inputSchema: z.object({
24
+ url: z.string().url().describe("Full GitHub repo URL e.g. https://github.com/owner/repo"),
25
+ max_length: z.number().optional().default(6000).describe("Max content length"),
26
+ }),
27
+ annotations: { readOnlyHint: true, openWorldHint: true },
28
+ },
29
+ async ({ url, max_length }) => {
30
+ const result = await githubAdapter({ url, maxLength: max_length });
31
+ const ctx = stampFreshness(result, { url, maxLength: max_length }, "github");
32
+ return { content: [{ type: "text", text: formatForLLM(ctx) }] };
33
+ }
34
+ );
35
+
36
+ // ─── Tool: extract_scholar ───────────────────────────────────────────────────
37
+ server.registerTool(
38
+ "extract_scholar",
39
+ {
40
+ description:
41
+ "Extract research results from a Google Scholar search URL. Returns titles, authors, publication years, and snippets — all timestamped.",
42
+ inputSchema: z.object({
43
+ url: z.string().url().describe("Google Scholar search URL e.g. https://scholar.google.com/scholar?q=..."),
44
+ max_length: z.number().optional().default(6000),
45
+ }),
46
+ annotations: { readOnlyHint: true, openWorldHint: true },
47
+ },
48
+ async ({ url, max_length }) => {
49
+ const result = await scholarAdapter({ url, maxLength: max_length });
50
+ const ctx = stampFreshness(result, { url, maxLength: max_length }, "google_scholar");
51
+ return { content: [{ type: "text", text: formatForLLM(ctx) }] };
52
+ }
53
+ );
54
+
55
+ // ─── Tool: extract_hackernews ────────────────────────────────────────────────
56
+ server.registerTool(
57
+ "extract_hackernews",
58
+ {
59
+ description:
60
+ "Extract top stories or search results from Hacker News. Real-time dev/tech community sentiment with post timestamps.",
61
+ inputSchema: z.object({
62
+ url: z.string().url().describe("HN URL e.g. https://news.ycombinator.com or https://hn.algolia.com/?q=..."),
63
+ max_length: z.number().optional().default(4000),
64
+ }),
65
+ annotations: { readOnlyHint: true, openWorldHint: true },
66
+ },
67
+ async ({ url, max_length }) => {
68
+ const result = await hackerNewsAdapter({ url, maxLength: max_length });
69
+ const ctx = stampFreshness(result, { url, maxLength: max_length }, "hackernews");
70
+ return { content: [{ type: "text", text: formatForLLM(ctx) }] };
71
+ }
72
+ );
73
+
74
+ // ─── Tool: extract_yc ──────────────────────────────────────────────────────────
75
+ server.registerTool(
76
+ "extract_yc",
77
+ {
78
+ description:
79
+ "Scrape YC company listings. Use https://www.ycombinator.com/companies?query=KEYWORD to find startups in a space. Returns name, batch, tags, description per company with freshness timestamp.",
80
+ inputSchema: z.object({
81
+ url: z.string().url().describe("YC companies URL e.g. https://www.ycombinator.com/companies?query=mcp"),
82
+ max_length: z.number().optional().default(6000),
83
+ }),
84
+ annotations: { readOnlyHint: true, openWorldHint: true },
85
+ },
86
+ async ({ url, max_length }) => {
87
+ const result = await ycAdapter({ url, maxLength: max_length });
88
+ const ctx = stampFreshness(result, { url, maxLength: max_length }, "ycombinator");
89
+ return { content: [{ type: "text", text: formatForLLM(ctx) }] };
90
+ }
91
+ );
92
+
93
+ // ─── Tool: search_repos ──────────────────────────────────────────────────────
94
+ server.registerTool(
95
+ "search_repos",
96
+ {
97
+ description:
98
+ "Search GitHub for repositories matching a keyword or topic. Returns top results by stars with activity signals. Use to find competitors, similar tools, or related projects.",
99
+ inputSchema: z.object({
100
+ query: z.string().describe("Search query e.g. 'mcp server typescript' or 'cashflow prediction python'"),
101
+ max_length: z.number().optional().default(6000),
102
+ }),
103
+ annotations: { readOnlyHint: true, openWorldHint: true },
104
+ },
105
+ async ({ query, max_length }) => {
106
+ const result = await repoSearchAdapter({ url: query, maxLength: max_length });
107
+ const ctx = stampFreshness(result, { url: query, maxLength: max_length }, "github_search");
108
+ return { content: [{ type: "text", text: formatForLLM(ctx) }] };
109
+ }
110
+ );
111
+
112
+ // ─── Tool: package_trends ────────────────────────────────────────────────────
113
+ server.registerTool(
114
+ "package_trends",
115
+ {
116
+ description:
117
+ "Look up npm and PyPI package metadata — version history, release cadence, last updated. Use to gauge ecosystem activity around a tool or dependency. Supports comma-separated list of packages.",
118
+ inputSchema: z.object({
119
+ packages: z.string().describe("Package name(s) e.g. 'langchain' or 'npm:zod,pypi:fastapi'"),
120
+ max_length: z.number().optional().default(5000),
121
+ }),
122
+ annotations: { readOnlyHint: true, openWorldHint: true },
123
+ },
124
+ async ({ packages, max_length }) => {
125
+ const result = await packageTrendsAdapter({ url: packages, maxLength: max_length });
126
+ const ctx = stampFreshness(result, { url: packages, maxLength: max_length }, "package_registry");
127
+ return { content: [{ type: "text", text: formatForLLM(ctx) }] };
128
+ }
129
+ );
130
+
131
+ // ─── Tool: extract_landscape ─────────────────────────────────────────────────
132
+ server.registerTool(
133
+ "extract_landscape",
134
+ {
135
+ description:
136
+ "Composite intelligence tool. Given a project idea or keyword, simultaneously queries YC startups, GitHub repos, HN sentiment, and package activity to answer: Who is building this? Is it funded? What's getting traction? Returns a unified timestamped landscape report.",
137
+ inputSchema: z.object({
138
+ topic: z.string().describe("Your project idea or keyword e.g. 'mcp server' or 'cashflow prediction'"),
139
+ max_length: z.number().optional().default(8000),
140
+ }),
141
+ annotations: { readOnlyHint: true, openWorldHint: true },
142
+ },
143
+ async ({ topic, max_length }) => {
144
+ const perSection = Math.floor((max_length ?? 8000) / 4);
145
+
146
+ const [ycResult, repoResult, hnResult, pkgResult] = await Promise.allSettled([
147
+ ycAdapter({ url: `https://www.ycombinator.com/companies?query=${encodeURIComponent(topic)}`, maxLength: perSection }),
148
+ repoSearchAdapter({ url: topic, maxLength: perSection }),
149
+ hackerNewsAdapter({ url: `https://hn.algolia.com/api/v1/search?query=${encodeURIComponent(topic)}&tags=story&hitsPerPage=15`, maxLength: perSection }),
150
+ packageTrendsAdapter({ url: topic, maxLength: perSection }),
151
+ ]);
152
+
153
+ const section = (label: string, result: PromiseSettledResult<{ raw: string; content_date: string | null; freshness_confidence: string }>) =>
154
+ result.status === "fulfilled"
155
+ ? `## ${label}\n${result.value.raw}`
156
+ : `## ${label}\n[Error: ${(result as PromiseRejectedResult).reason}]`;
157
+
158
+ const combined = [
159
+ `# Landscape Report: "${topic}"`,
160
+ `Generated: ${new Date().toISOString()}`,
161
+ "",
162
+ section("🚀 YC Startups in this space", ycResult),
163
+ section("📦 Top GitHub repos", repoResult),
164
+ section("💬 HN sentiment (last month)", hnResult),
165
+ section("📊 Package ecosystem", pkgResult),
166
+ ].join("\n\n");
167
+
168
+ return { content: [{ type: "text", text: combined }] };
169
+ }
170
+ );
171
+
172
+ // ─── Start ───────────────────────────────────────────────────────────────────
173
+ async function main() {
174
+ const transport = new StdioServerTransport();
175
+ await server.connect(transport);
176
+ console.error("freshcontext-mcp running on stdio");
177
+ }
178
+
179
+ main().catch(console.error);