openalmanac 0.2.58 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/setup.js CHANGED
@@ -524,7 +524,7 @@ function printResult(agent, loginResult, mcpChanged, toolCount) {
524
524
  w(row(` ${WHITE_BOLD}Next steps${RST}`));
525
525
  w(empty);
526
526
  w(row(` ${BLUE}1.${RST} Type ${WHITE_BOLD}claude${RST} to start Claude Code`));
527
- w(row(` ${BLUE}2.${RST} Say ${BLUE}"Let's explore <topic> using Almanac"${RST}`));
527
+ w(row(` ${BLUE}2.${RST} Say ${BLUE}"I want to contribute/explore the founders-inc wiki"${RST}`));
528
528
  w(empty);
529
529
  w(` ${BLUE_DIM}\u2570${"─".repeat(innerW)}\u256f${RST}`);
530
530
  w("");
@@ -20,30 +20,79 @@ function coerceJson(schema) {
20
20
  }, schema);
21
21
  }
22
22
  export function registerResearchTools(server) {
23
+ // Discriminated union on `source` — each source's schema only surfaces its own
24
+ // options, so agents get typed autocomplete instead of an opaque options bag.
25
+ //
26
+ // Adding a new source (e.g. hackernews) = append another z.object variant with
27
+ // its own `source: z.literal(...)` and per-source fields. No existing schema
28
+ // changes.
29
+ const WebSearchInput = z.object({
30
+ source: z.literal("web").describe("Generic web search via Google/Serper. Use for general references, news, docs."),
31
+ query: z.string().min(1).describe("Search terms. Supports quoted phrases and site: operators."),
32
+ limit: z.number().int().min(1).max(20).default(10).describe("Max results (1-20, default 10)."),
33
+ });
34
+ const RedditSearchInput = z.object({
35
+ source: z.literal("reddit").describe("Search Reddit — use when the user wants community perspectives, subreddit consensus, lived experiences, or ranked-by-engagement content. Goes through a residential proxy so it sees past Reddit's anti-scraping."),
36
+ subreddit: z.string().optional().describe("Subreddit name without the 'r/' prefix (e.g. 'Harvard'). Omit to search across all of Reddit. Case-insensitive."),
37
+ query: z.string().optional().describe("Optional full-text search terms. Omit to return the subreddit's sorted listing (top posts of the year, etc.)."),
38
+ sort: z.enum(["top", "hot", "new", "rising", "controversial", "relevance", "comments"])
39
+ .default("top")
40
+ .describe("For listings: 'top'/'hot'/'new'/'rising'/'controversial'. For searches: 'relevance'/'top'/'new'/'comments'. Default 'top'."),
41
+ time_range: z.enum(["hour", "day", "week", "month", "year", "all"])
42
+ .default("year")
43
+ .describe("Time window for top/controversial listings and all searches. Default 'year'."),
44
+ limit: z.number().int().min(1).max(100).default(25).describe("Max posts to return (1-100, default 25)."),
45
+ });
23
46
  server.addTool({
24
47
  name: "search_web",
25
- description: "Search the web for sources to cite in articles. Use this to find references before writing. Requires API key. Rate limit: 10/min.",
26
- parameters: z.object({
27
- query: z.string().describe("Search terms"),
28
- limit: z.number().default(10).describe("Max results (1-20, default 10)"),
29
- }),
30
- async execute({ query, limit }) {
48
+ description: "Search the web or a specific community source (Reddit). Discriminated on `source`:\n\n" +
49
+ "- `source: \"web\"` — general web search via Google. Use for news, docs, scholarly references.\n" +
50
+ "- `source: \"reddit\"` — Reddit-aware search returning posts with score, flair, num_comments, permalink. " +
51
+ "Use when the user is asking about community perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
52
+ "Each source exposes its own parameters — follow the schema for the source you pick. " +
53
+ "Rate limit: 10/min. Requires API key.",
54
+ parameters: z.discriminatedUnion("source", [WebSearchInput, RedditSearchInput]),
55
+ async execute(input) {
56
+ if (input.source === "reddit") {
57
+ const params = {
58
+ sort: input.sort,
59
+ time_range: input.time_range,
60
+ limit: input.limit,
61
+ };
62
+ if (input.subreddit)
63
+ params.subreddit = input.subreddit;
64
+ if (input.query)
65
+ params.query = input.query;
66
+ const resp = await request("GET", "/api/research/reddit/search", {
67
+ auth: true,
68
+ params,
69
+ });
70
+ return JSON.stringify(await resp.json(), null, 2);
71
+ }
31
72
  const resp = await request("GET", "/api/research/search", {
32
73
  auth: true,
33
- params: { query, limit },
74
+ params: { query: input.query, limit: input.limit },
34
75
  });
35
76
  return JSON.stringify(await resp.json(), null, 2);
36
77
  },
37
78
  });
38
79
  server.addTool({
39
80
  name: "read_webpage",
40
- description: "Fetch a webpage and return its content as markdown. Use this to read sources found via search_web before citing them in articles. Supports web pages, PDFs, and YouTube videos. Requires API key. Rate limit: 5/min.",
81
+ description: "Fetch a URL and return its content as markdown. Routes automatically based on URL:\n" +
82
+ "- **Reddit threads** (reddit.com/r/{sub}/comments/{id}/...) — returns the post plus top-level threaded comments with scores and authors, via a residential proxy.\n" +
83
+ "- **YouTube videos** — returns title, description, transcript when available.\n" +
84
+ "- **PDFs** — extracts text.\n" +
85
+ "- **LinkedIn posts/profiles** — uses the LinkedIn scraper.\n" +
86
+ "- **Everything else** — generic web scrape with Firecrawl/Jina fallback.\n\n" +
87
+ "Requires API key. Rate limit: 5/min.",
41
88
  parameters: z.object({
42
- url: z.string().describe("URL to read"),
89
+ url: z.string().url().describe("Full URL to read. For Reddit threads, use the canonical reddit.com/r/{sub}/comments/{id}/... form."),
43
90
  max_length: z
44
91
  .number()
92
+ .int()
93
+ .min(1000)
45
94
  .default(20000)
46
- .describe("Max characters to return (default 20000). Use higher values for long-form sources."),
95
+ .describe("Max characters to return (default 20000). Increase for long-form sources; response is truncated with a note when it hits the cap."),
47
96
  }),
48
97
  async execute({ url, max_length }) {
49
98
  const resp = await request("GET", "/api/research/read", {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openalmanac",
3
- "version": "0.2.58",
3
+ "version": "0.3.1",
4
4
  "description": "OpenAlmanac — pull, edit, and push articles to the open knowledge base",
5
5
  "type": "module",
6
6
  "bin": {