openalmanac 0.3.6 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/auth.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  declare const API_BASE: string;
2
2
  declare const API_KEY_PATH: string;
3
- declare const ARTICLES_DIR: string;
4
- export { API_BASE, API_KEY_PATH, ARTICLES_DIR };
3
+ declare const PAGES_DIR: string;
4
+ export { API_BASE, API_KEY_PATH, PAGES_DIR };
5
5
  export declare function getApiKey(): string | null;
6
6
  export declare function requireApiKey(): string;
7
7
  export declare function saveApiKey(key: string): void;
package/dist/auth.js CHANGED
@@ -5,8 +5,8 @@ import { chmodSync } from "node:fs";
5
5
  const API_BASE = process.env.OPENALMANAC_API_BASE || "https://www.openalmanac.org/api/proxy";
6
6
  const API_KEY_DIR = join(homedir(), ".openalmanac");
7
7
  const API_KEY_PATH = join(API_KEY_DIR, "api_key");
8
- const ARTICLES_DIR = join(homedir(), ".openalmanac", "articles");
9
- export { API_BASE, API_KEY_PATH, ARTICLES_DIR };
8
+ const PAGES_DIR = join(homedir(), ".openalmanac", "pages");
9
+ export { API_BASE, API_KEY_PATH, PAGES_DIR };
10
10
  export function getApiKey() {
11
11
  const envKey = process.env.OPENALMANAC_API_KEY;
12
12
  if (envKey)
@@ -1,6 +1,7 @@
1
1
  import { createServer } from "node:http";
2
2
  import { getApiKey, saveApiKey, API_BASE } from "./auth.js";
3
3
  import { openBrowser } from "./browser.js";
4
+ import { EXAMPLE_PROMPT } from "./onboarding-copy.js";
4
5
  const CONNECT_URL_BASE = "https://openalmanac.org/contribute/connect";
5
6
  const LOGIN_TIMEOUT_MS = 120_000;
6
7
  function callbackPage(success) {
@@ -118,7 +119,7 @@ function callbackPage(success) {
118
119
  <script>
119
120
  const steps = [
120
121
  { target: 'line1', prefix: '<span class="prompt">$ </span>', text: 'claude', delay: 600 },
121
- { target: 'line2', prefix: '<span class="prompt">&gt; </span>', text: 'Use almanac tools to write an article on black holes', delay: 400 },
122
+ { target: 'line2', prefix: '<span class="prompt">&gt; </span>', text: ${JSON.stringify(EXAMPLE_PROMPT)}, delay: 400 },
122
123
  ];
123
124
  function type(el, prefix, text, speed, cb) {
124
125
  el.style.display = '';
@@ -0,0 +1 @@
1
+ export declare const EXAMPLE_PROMPT = "Let's explore the Demon Slayer wiki using Almanac";
@@ -0,0 +1,14 @@
1
+ // Onboarding copy shared between setup's "next steps" panel and the
2
+ // post-login connected page rendered by `login-core.ts`.
3
+ //
4
+ // The example prompt names a real wiki on purpose: the goal is to teach the
5
+ // new user that wikis exist as the unit Almanac is organized around, not
6
+ // just that the agent can do "research". If the example wiki changes, both
7
+ // surfaces (`setup.ts` next steps + `login-core.ts` terminal mock) update
8
+ // automatically because they consume this single constant.
9
+ //
10
+ // This lives in its own file rather than `setup.ts` to keep `login-core.ts`
11
+ // from depending on the setup TUI module (`setup.ts` already imports
12
+ // `performLogin` from `login-core.ts`, so adding the reverse edge would
13
+ // create a cycle).
14
+ export const EXAMPLE_PROMPT = "Let's explore the Demon Slayer wiki using Almanac";
package/dist/server.js CHANGED
@@ -20,8 +20,8 @@ export function createServer() {
20
20
  │ │
21
21
  │ Try asking your agent: │
22
22
  │ │
23
- │ → "Write an Almanac article about CORS" │
24
- │ → "Improve the Alan Turing article" │
23
+ │ → "Write an Almanac page about CORS" │
24
+ │ → "Improve the Alan Turing page" │
25
25
  │ │
26
26
  │ Docs: openalmanac.org/contribute │
27
27
  │ │
@@ -32,7 +32,7 @@ export function createServer() {
32
32
  name: "OpenAlmanac",
33
33
  version: pkg.version,
34
34
  instructions: [
35
- "OpenAlmanac is an open knowledge base — a Wikipedia anyone can read from and write to through an API. Pages are primarily researched by agents, but humans can edit the pages directly on the platform too. Pages are markdown files with YAML frontmatter, [@key] citation markers, and [[wikilinks]]. Content is organized into wikis, each with topics, pages, and navigation.",
35
+ "OpenAlmanac is an open knowledge base — a Wikipedia anyone can read from and write to through an API. Pages are markdown files with YAML frontmatter, [@key] citation markers, and [[wikilinks]]. Content is organized into wikis, each with topics, pages, and navigation.",
36
36
  "",
37
37
  "## How this should feel",
38
38
  "",
@@ -48,11 +48,11 @@ export function createServer() {
48
48
  "",
49
49
  "Your answers should feel like living knowledge — with linked entities and images, not plain text walls.",
50
50
  "",
51
- "**Entity links:** Before writing your response, call `search_articles` with the key entity names you plan to mention (e.g. `queries: [\"Theravada Buddhism\", \"Thailand\", \"Angkor Wat\"]`). This returns which pages/stubs exist and their slugs. Then use `[[slug|Display Text]]` wikilink syntax in your response. Dead links auto-create stub pages on publish so round-trip editing stays faithful.",
51
+ "**Entity links:** Before writing your response, call `search_pages` with the key entity names you plan to mention (e.g. `queries: [\"Theravada Buddhism\", \"Thailand\", \"Angkor Wat\"]`). This returns which pages/stubs exist and their slugs. Then use `[[slug|Display Text]]` wikilink syntax in your response. Dead links auto-create stub pages on publish so round-trip editing stays faithful.",
52
52
  "",
53
53
  "**Images:** Use `search_images` to find 1-2 relevant images for your response. Include them using the figure syntax: `![Descriptive caption](image_url \"position\")` where position is `right`, `left`, or `center`. Always write a descriptive caption. Use `view_images` to verify candidates before including them.",
54
54
  "",
55
- "**Keep it efficient:** Batch your entity and image searches into single tool calls (both accept arrays). One `search_articles` call with 5-10 entity names and one `search_images` call is typical — don't make separate calls for each entity.",
55
+ "**Keep it efficient:** Batch your entity and image searches into single tool calls (both accept arrays). One `search_pages` call with 5-10 entity names and one `search_images` call is typical — don't make separate calls for each entity.",
56
56
  "",
57
57
  "**When to skip enrichment:** For short clarifying responses, follow-up questions, or casual conversation, don't search for entities or images. Enrich substantive, informational responses only.",
58
58
  "",
@@ -69,21 +69,21 @@ export function createServer() {
69
69
  "",
70
70
  "## Entry points",
71
71
  "",
72
- "The user is here because they want to dive down rabbit holes and learn about things. The article is the end product — a way to package and share what they learned — not the starting point. The conversation IS the experience. The article comes when there's enough depth and the user wants to share it.",
72
+ "The user is here because they want to dive down rabbit holes and learn about things. The page is the end product — a way to package and share what they learned — not the starting point. The conversation IS the experience. The page comes when there's enough depth and the user wants to share it.",
73
73
  "",
74
- "**Always start by talking, then research, then talk again.** Don't silently start searching. Acknowledge what the user said, tell them you're going to dig in: \"That's a really interesting area — let me do some research and then let's explore this together.\" Then research, then come back and TALK about what you found. Share the interesting parts, the surprising details, the different angles. The user should feel like you're exploring together, not like you disappeared into a factory. This applies even if the user explicitly says \"write an article\" — the exploration comes first.",
74
+ "**Always start by talking, then research, then talk again.** Don't silently start searching. Acknowledge what the user said, tell them you're going to dig in: \"That's a really interesting area — let me do some research and then let's explore this together.\" Then research, then come back and TALK about what you found. Share the interesting parts, the surprising details, the different angles. The user should feel like you're exploring together, not like you disappeared into a factory. This applies even if the user explicitly says \"write a page\" — the exploration comes first.",
75
75
  "",
76
- "**Do not suggest writing an article on the first turn, or even the first few turns.** Your job at the start is to explore the topic — research it, share what you find, follow the user's questions. The exploration itself is what makes them want to keep going. Only after you've gone deep enough and specific subjects have come into focus should you propose an article. If you suggest it too early, it feels like being funneled into a workflow. Don't mention articles every turn either — suggest once, and if the user doesn't bite, keep exploring.",
76
+ "**Do not suggest writing a page on the first turn, or even the first few turns.** Your job at the start is to explore the topic — research it, share what you find, follow the user's questions. The exploration itself is what makes them want to keep going. Only after you've gone deep enough and specific subjects have come into focus should you propose a page. If you suggest it too early, it feels like being funneled into a workflow. Don't mention pages every turn either — suggest once, and if the user doesn't bite, keep exploring.",
77
77
  "",
78
78
  '**User has a broad interest** ("UX design", "religion in Thailand") → Don\'t ask "what angle do you want?" — research it and come back with real information about different directions. Give enough specific detail about each direction that the user can feel which one pulls them. Then follow their curiosity deeper.',
79
79
  "",
80
80
  'Example: User says "I\'m interested in UX." Don\'t say "Would you like to focus on history, applications, or companies?" Instead, research and say: "So UX was coined by Don Norman at Apple in 1993, but the practice goes back to Henry Dreyfuss in the 1950s designing telephone handsets by measuring thousands of human bodies. There\'s also the dark patterns side — Ryanair\'s checkout flow got studied in academic papers as a case study in hostile design. And there\'s the curb cut effect — features designed for disabled users that end up benefiting everyone. What pulls you?"',
81
81
  "",
82
- "**User has no topic** → Talk to them. What are they into — a movie they just watched, a hobby, something from work, a place they visited, a news story that caught their eye? Once you have a thread, research it and come back with real information. You can also use `list_articles` with `wiki_slug` and `stubs_only: true` to find stubs that need writing.",
82
+ "**User has no topic** → Talk to them. What are they into — a movie they just watched, a hobby, something from work, a place they visited, a news story that caught their eye? Once you have a thread, research it and come back with real information. You can also use `list_pages` with `wiki_slug` and `stubs_only: true` to find stubs that need writing.",
83
83
  "",
84
- "**User wants to edit an existing article** → Download it and read it. Look for what's *interesting but underdeveloped* — a one-sentence mention of a controversy probably has a whole story behind it. Share what you find and propose going deeper.",
84
+ "**User wants to edit an existing page** → Download it and read it. Look for what's *interesting but underdeveloped* — a one-sentence mention of a controversy probably has a whole story behind it. Share what you find and propose going deeper.",
85
85
  "",
86
- '**Articles emerge from research naturally.** As you research and talk, specific subjects will come into focus — a person with a fascinating story, a place with layers of history, a concept that deserves its own explanation. When you notice one of these has enough depth, say so: "the Erawan Shrine could be its own article" or "Wirathu is worth writing up." A single research conversation might produce one article or several. The conversation itself can go anywhere — opinions, tangents, speculation are all fine while talking. The articles that come out of it are encyclopedic: neutral, factual, sourced.',
86
+ '**pages emerge from research naturally.** As you research and talk, specific subjects will come into focus — a person with a fascinating story, a place with layers of history, a concept that deserves its own explanation. When you notice one of these has enough depth, say so: "the Erawan Shrine could be its own page" or "Wirathu is worth writing up." A single research conversation might produce one page or several. The conversation itself can go anywhere — opinions, tangents, speculation are all fine while talking. The pages that come out of it are encyclopedic: neutral, factual, sourced.',
87
87
  "",
88
88
  "## Guidelines",
89
89
  "",
@@ -96,9 +96,9 @@ export function createServer() {
96
96
  "",
97
97
  "## Writing flow",
98
98
  "",
99
- "When you've researched enough and a specific article topic has come into focus:",
99
+ "When you've researched enough and a specific page topic has come into focus:",
100
100
  "",
101
- '1. **Align briefly with the user** — Talk about what the article should cover, what to focus on, what angle to take. Not a rigid outline — a quick conversation. "I\'m thinking we cover the history, the Royal Brahmins, daily worship, and the Ramakien — anything you want to add or skip?"',
101
+ '1. **Align briefly with the user** — Talk about what the page should cover, what to focus on, what angle to take. Not a rigid outline — a quick conversation. "I\'m thinking we cover the history, the Royal Brahmins, daily worship, and the Ramakien — anything you want to add or skip?"',
102
102
  "",
103
103
  "2. **Read the writing guidelines** — Fetch https://www.openalmanac.org/writing-guidelines.md and https://www.openalmanac.org/ai-patterns-to-avoid.md before writing a single word.",
104
104
  "",
@@ -106,20 +106,20 @@ export function createServer() {
106
106
  "",
107
107
  "4. **Write a pure text draft** — This whole process (writing, review, fact-check, images, linking) takes a few minutes. Let the user know in a fun way that they can step away — and that once it's ready, you're happy to discuss any edits or polishing.",
108
108
  "",
109
- " Write the full article body with citation markers [@key]. No wikilinks, no `[[slug|Display Text]]` syntax, no images, no stubs. Just prose and citations. The linking and images come later from subagents who need to read the finished text.",
109
+ " Write the full page body with citation markers [@key]. No wikilinks, no `[[slug|Display Text]]` syntax, no images, no stubs. Just prose and citations. The linking and images come later from subagents who need to read the finished text.",
110
110
  "",
111
111
  "5. **Dispatch four subagents in parallel** — After the draft is complete, dispatch these simultaneously. Each agent has its own guidelines file — tell it to fetch and read that file as its first step. The guidelines file tells the agent what to do, what additional guidelines to fetch, and what format to return results in.",
112
112
  "",
113
- " - **Review agent** → tell it to read https://www.openalmanac.org/review-guidelines.md and review the draft at `~/.openalmanac/articles/{wiki_slug}/{slug}.md`",
114
- " - **Fact-check agent** → tell it to read https://www.openalmanac.org/fact-checking-guidelines.md and fact-check the draft at `~/.openalmanac/articles/{wiki_slug}/{slug}.md`",
115
- " - **Image agent** → tell it to read https://www.openalmanac.org/image-guidelines.md and find images for the draft at `~/.openalmanac/articles/{wiki_slug}/{slug}.md`",
116
- " - **Linking agent** → tell it to read https://www.openalmanac.org/linking-guidelines.md and add wikilinks for the draft at `~/.openalmanac/articles/{wiki_slug}/{slug}.md` (dead links become stubs on publish)",
113
+ " - **Review agent** → tell it to read https://www.openalmanac.org/review-guidelines.md and review the draft at `~/.openalmanac/pages/{wiki_slug}/{slug}.md`",
114
+ " - **Fact-check agent** → tell it to read https://www.openalmanac.org/fact-checking-guidelines.md and fact-check the draft at `~/.openalmanac/pages/{wiki_slug}/{slug}.md`",
115
+ " - **Image agent** → tell it to read https://www.openalmanac.org/image-guidelines.md and find images for the draft at `~/.openalmanac/pages/{wiki_slug}/{slug}.md`",
116
+ " - **Linking agent** → tell it to read https://www.openalmanac.org/linking-guidelines.md and add wikilinks for the draft at `~/.openalmanac/pages/{wiki_slug}/{slug}.md` (dead links become stubs on publish)",
117
117
  "",
118
118
  "6. **Integrate** — Present the review and fact-check feedback to the user. Then fix everything in one pass: review issues, fact-check corrections, add images, add wikilinks.",
119
119
  "",
120
- "7. **Publish** — Validate and publish (`publish` with `slugs` and `wiki_slug`). Put per-page change notes in frontmatter as `edit_summary`. Share the exact URL from the publish response when single-page. Use `list_articles` to verify coverage.",
120
+ "7. **Publish** — Validate and publish (`publish` with `slugs` and `wiki_slug`). Put per-page change notes in frontmatter as `edit_summary`. Share the exact URL from the publish response when single-page. Use `list_pages` to verify coverage.",
121
121
  "",
122
- "Why this order: the draft must be finished before subagents run. The linking agent needs to see what entities are actually in the text. The image agent needs to match images to specific content. The review agent needs the complete article. Everything reads the draft.",
122
+ "Why this order: the draft must be finished before subagents run. The linking agent needs to see what entities are actually in the text. The image agent needs to match images to specific content. The review agent needs the complete page. Everything reads the draft.",
123
123
  "",
124
124
  "## Wikilink syntax",
125
125
  "",
@@ -160,23 +160,24 @@ export function createServer() {
160
160
  "5. **Edit the main page in place.** `download` the auto-created `main-page` and edit the file (don't `new` a fresh `main-page` — the server derives slug from title, so a new scaffold would get a different slug and you'd end up with two homepages).",
161
161
  "6. **Seed the topic hierarchy.** `create_topics` with the topics you agreed on.",
162
162
  "7. **Wire navigation.** `update_wiki_settings` with a `nav` array. Each NavItem needs exactly one of `page` / `topic` / `link`. Use `auto: {enabled: true}` on topic NavItems to auto-populate children from the topic DAG.",
163
- "8. **Seed a few stub pages or first articles.** Stubs are fine — scaffold-and-fill-later is a supported workflow.",
163
+ "8. **Seed a few stub pages or first pages.** Stubs are fine — scaffold-and-fill-later is a supported workflow.",
164
+ " For homepage-style timelines, keep stubs in stub surfaces (`::awaiting-composition` / `::stub-list`) rather than recent-entry timelines — front-page activity should reflect real pages, not auto-created unwritten stubs.",
164
165
  "",
165
166
  "The conversation drives the shape of the wiki. Don't over-engineer the topic hierarchy or the nav on turn one. Ship a small coherent starting shape and grow it with the user.",
166
167
  "",
167
168
  "## Technical workflow",
168
169
  "",
169
- "Reading and searching articles is open. Writing requires an API key (from login). Login creates a personal API key linked to your user account, so contributions are attributed to you.",
170
+ "Reading and searching pages is open. Writing requires an API key (from login). Login creates a personal API key linked to your user account, so contributions are attributed to you.",
170
171
  "",
171
- "Core flow: login (once) → `whoami` (confirm identity) → `list_wikis` or `search_articles` (what exists?) → `search_web` + `read_webpage` (research) → `new` (scaffold) or `download` (existing) → edit files under ~/.openalmanac/articles/{wiki_slug}/ → `publish`.",
172
+ "Core flow: login (once) → `whoami` (confirm identity) → `list_wikis` or `search_pages` (what exists?) → `search_web` + `read_webpage` (research) → `new` (scaffold) or `download` (existing) → edit files under ~/.openalmanac/pages/{wiki_slug}/ → `publish`.",
172
173
  "",
173
- "After publishing, share the celebration URL when applicable. Use `list_articles` with `wiki_slug` to browse a wiki's pages.",
174
+ "After publishing, share the celebration URL when applicable. Use `list_pages` with `wiki_slug` to browse a wiki's pages.",
174
175
  "",
175
176
  "When working with tool results, write down any important information you might need later, as the original tool result may be cleared.",
176
177
  "",
177
178
  "## Batching writes",
178
179
  "",
179
- "Most write tools take arrays. Pass a single-element array for one item — there is no separate singular tool. Examples: `create_topics([{ title: \"X\" }])`, `delete_pages({ article_slugs: [\"foo\"] })`, `publish({ slugs: [\"bar\"] })`. Do not call a tool in a loop when an array argument exists.",
180
+ "Most write tools take arrays. Pass a single-element array for one item — there is no separate singular tool. Examples: `create_topics([{ title: \"X\" }])`, `delete_pages({ page_slugs: [\"foo\"] })`, `publish({ slugs: [\"bar\"] })`. Do not call a tool in a loop when an array argument exists.",
180
181
  ].join("\n"),
181
182
  });
182
183
  registerAuthTools(server);
package/dist/setup.js CHANGED
@@ -5,57 +5,25 @@ import { fileURLToPath } from "url";
5
5
  import { spawnSync } from "child_process";
6
6
  import { performLogin } from "./login-core.js";
7
7
  import { getAuthStatus } from "./auth.js";
8
- const TOOL_GROUPS = [
9
- {
10
- name: "Search & Read",
11
- description: "search, read, download, and browse community wiki articles",
12
- tools: [
13
- "mcp__almanac__search_articles",
14
- "mcp__almanac__download",
15
- "mcp__almanac__read",
16
- "mcp__almanac__list_articles",
17
- ],
18
- },
19
- {
20
- name: "Research",
21
- description: "web search, read pages, find images",
22
- tools: [
23
- "mcp__almanac__search_web",
24
- "mcp__almanac__read_webpage",
25
- "mcp__almanac__search_images",
26
- "mcp__almanac__view_images",
27
- ],
28
- },
29
- {
30
- name: "Write & Publish",
31
- description: "create article drafts and publish edits",
32
- tools: [
33
- "mcp__almanac__new",
34
- "mcp__almanac__publish",
35
- ],
36
- },
37
- {
38
- name: "Auth",
39
- description: "login and logout",
40
- tools: ["mcp__almanac__login", "mcp__almanac__logout"],
41
- },
42
- {
43
- name: "Community",
44
- description: "communities and posts",
45
- tools: [
46
- "mcp__almanac__search_communities",
47
- "mcp__almanac__create_community",
48
- "mcp__almanac__create_post",
49
- ],
50
- },
51
- {
52
- name: "People",
53
- description: "search people profiles",
54
- tools: ["mcp__almanac__search_people"],
55
- },
8
+ import { MCP_TOOL_GROUPS, toClaudePermissionName, } from "./tool-registry.js";
9
+ import { EXAMPLE_PROMPT } from "./onboarding-copy.js";
10
+ // MCP-side permission groups come from the shared tool registry so adding a
11
+ // new MCP tool can never silently leave it un-grouped — see
12
+ // `src/tool-registry.ts` for the contract and `test/tool-registry.test.ts`
13
+ // for the drift check that enforces it.
14
+ function mcpGroupToPermissionGroup(group) {
15
+ return {
16
+ name: group.name,
17
+ description: group.description,
18
+ tools: group.tools.map(toClaudePermissionName),
19
+ };
20
+ }
21
+ // Built-in Claude Code tool groups — not MCP tools, so they stay defined
22
+ // here. The user opts into them in the same TUI checkbox screen.
23
+ const CLAUDE_BUILTIN_TOOL_GROUPS = [
56
24
  {
57
25
  name: "Local Files",
58
- description: "read & edit articles in ~/.openalmanac",
26
+ description: "read & edit pages in ~/.openalmanac",
59
27
  tools: [
60
28
  "Read(~/.openalmanac/**)",
61
29
  "Write(~/.openalmanac/**)",
@@ -68,6 +36,10 @@ const TOOL_GROUPS = [
68
36
  tools: ["WebSearch", "WebFetch"],
69
37
  },
70
38
  ];
39
+ const TOOL_GROUPS = [
40
+ ...MCP_TOOL_GROUPS.map(mcpGroupToPermissionGroup),
41
+ ...CLAUDE_BUILTIN_TOOL_GROUPS,
42
+ ];
71
43
  const AGENTS = [
72
44
  { name: "Claude Code", supported: true },
73
45
  { name: "Codex", supported: false },
@@ -102,7 +74,7 @@ const LOGO_LINES = [
102
74
  "\u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u2588\u2588\u2551 \u255a\u2550\u255d \u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2557",
103
75
  "\u255a\u2550\u255d \u255a\u2550\u255d\u255a\u2550\u2550\u2550\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u255d",
104
76
  ];
105
- function printBanner(subtitle = "Write and publish articles with your AI agent") {
77
+ function printBanner(subtitle = "Write and publish pages with your AI agent") {
106
78
  process.stdout.write("\n");
107
79
  for (let i = 0; i < LOGO_LINES.length; i++) {
108
80
  process.stdout.write(`${GRADIENT[i]}${LOGO_LINES[i]}${RST}\n`);
@@ -112,7 +84,7 @@ function printBanner(subtitle = "Write and publish articles with your AI agent")
112
84
  function renderHeader(mode = "default") {
113
85
  printBanner(mode === "reddit"
114
86
  ? "Turn any subreddit into a published wiki"
115
- : "Write and publish articles with your AI agent");
87
+ : "Write and publish pages with your AI agent");
116
88
  }
117
89
  function printBadge() {
118
90
  process.stdout.write(`\n ${ACCENT_BG} almanac ${RST}\n`);
@@ -1052,45 +1024,46 @@ function printResult(clientsLabel, loginResult, configured, alreadyConfigured, t
1052
1024
  w("");
1053
1025
  }
1054
1026
  function getNextSteps(clientsLabel) {
1027
+ const exampleLine = `${BLUE}"${EXAMPLE_PROMPT}"${RST}`;
1055
1028
  if (clientsLabel === "Claude Code") {
1056
1029
  return [
1057
1030
  `Type ${WHITE_BOLD}claude${RST} to start Claude Code`,
1058
- `Say ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1031
+ `Say ${exampleLine}`,
1059
1032
  ];
1060
1033
  }
1061
1034
  if (clientsLabel === "Codex") {
1062
1035
  return [
1063
1036
  `Type ${WHITE_BOLD}codex${RST} to start Codex`,
1064
- `Ask ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1037
+ `Ask ${exampleLine}`,
1065
1038
  ];
1066
1039
  }
1067
1040
  if (clientsLabel === "Cursor") {
1068
1041
  return [
1069
1042
  `Open ${WHITE_BOLD}Cursor${RST} in your project`,
1070
- `Ask ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1043
+ `Ask ${exampleLine}`,
1071
1044
  ];
1072
1045
  }
1073
1046
  if (clientsLabel === "OpenCode") {
1074
1047
  return [
1075
1048
  `Type ${WHITE_BOLD}opencode${RST} to start OpenCode`,
1076
- `Ask ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1049
+ `Ask ${exampleLine}`,
1077
1050
  ];
1078
1051
  }
1079
1052
  if (clientsLabel === "Windsurf") {
1080
1053
  return [
1081
1054
  `Open ${WHITE_BOLD}Windsurf${RST} in your project`,
1082
- `Ask ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1055
+ `Ask ${exampleLine}`,
1083
1056
  ];
1084
1057
  }
1085
1058
  if (clientsLabel === "Claude Desktop") {
1086
1059
  return [
1087
1060
  `Open ${WHITE_BOLD}Claude Desktop${RST}`,
1088
- `Ask ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1061
+ `Ask ${exampleLine}`,
1089
1062
  ];
1090
1063
  }
1091
1064
  return [
1092
1065
  `Open one of your configured agents in this project`,
1093
- `Ask ${BLUE}"Let's explore <topic> using Almanac"${RST}`,
1066
+ `Ask ${exampleLine}`,
1094
1067
  ];
1095
1068
  }
1096
1069
  /* ── Entry point ────────────────────────────────────────────────── */
@@ -0,0 +1,11 @@
1
+ export declare const MCP_TOOL_NAMES: readonly ["search_pages", "search_topics", "list_pages", "download", "new", "publish", "read_page", "delete_pages", "list_topics", "update_topic", "create_topics", "list_wikis", "create_wiki", "get_wiki_settings", "update_wiki_settings", "join_wiki", "get_wiki_membership", "login", "logout", "whoami", "search_web", "read_webpage", "search_images", "view_images"];
2
+ export type McpToolName = (typeof MCP_TOOL_NAMES)[number];
3
+ export declare const INTERACTIVE_TOOL_NAMES: readonly string[];
4
+ export interface McpToolGroup {
5
+ name: string;
6
+ description: string;
7
+ tools: readonly McpToolName[];
8
+ }
9
+ export declare const MCP_TOOL_GROUPS: readonly McpToolGroup[];
10
+ export declare function toClaudePermissionName(name: McpToolName): string;
11
+ export declare const MCP_TOOL_PERMISSION_NAMES: readonly string[];
@@ -0,0 +1,148 @@
1
+ // Single source of truth for the MCP tools the OpenAlmanac server exposes.
2
+ //
3
+ // This file is the contract that ties three otherwise-independent surfaces
4
+ // together:
5
+ //
6
+ // 1. tools/*.ts — the actual `server.addTool({ name: ... })`
7
+ // registrations.
8
+ // 2. setup.ts TOOL_GROUPS — the permission grouping shown in the
9
+ // `npx openalmanac setup` TUI, written into
10
+ // `~/.claude/settings.json` so a user's
11
+ // agent can call these tools without a
12
+ // per-call approval prompt.
13
+ // 3. gui/config.js — the Electron app's allow-list passed to
14
+ // the Claude Code SDK.
15
+ //
16
+ // Drift between these three surfaces was a real bug: the rename refactor
17
+ // added `read_page`, `list_wikis`, `create_wiki`, the topic tools, etc., but
18
+ // only (1) was updated. (2) was still grouping the pre-refactor tool set, so
19
+ // users who ran `setup` had to manually approve every wiki/topic call.
20
+ //
21
+ // The drift test in `test/tool-registry.test.ts` walks every register*Tools
22
+ // function with a fake server, collects the names actually registered, and
23
+ // asserts:
24
+ //
25
+ // - every registered name is in MCP_TOOL_NAMES
26
+ // - every name in MCP_TOOL_NAMES is actually registered
27
+ // - every non-INTERACTIVE name appears in exactly one MCP_TOOL_GROUPS entry
28
+ //
29
+ // Adding a new MCP tool means: register it in tools/*.ts, add its name here,
30
+ // and place it in a group below. Skipping any of those three breaks CI.
31
+ export const MCP_TOOL_NAMES = [
32
+ // Pages
33
+ "search_pages",
34
+ "search_topics",
35
+ "list_pages",
36
+ "download",
37
+ "new",
38
+ "publish",
39
+ "read_page",
40
+ "delete_pages",
41
+ // Topics
42
+ "list_topics",
43
+ "update_topic",
44
+ "create_topics",
45
+ // Wikis
46
+ "list_wikis",
47
+ "create_wiki",
48
+ "get_wiki_settings",
49
+ "update_wiki_settings",
50
+ "join_wiki",
51
+ "get_wiki_membership",
52
+ // Account
53
+ "login",
54
+ "logout",
55
+ "whoami",
56
+ // Research
57
+ "search_web",
58
+ "read_webpage",
59
+ "search_images",
60
+ "view_images",
61
+ ];
62
+ // Tools intentionally excluded from the setup-time permission grant because
63
+ // they require interactive UI / per-call approval. The drift test allows these
64
+ // to be absent from MCP_TOOL_GROUPS — but they must still appear in
65
+ // MCP_TOOL_NAMES if they are actually registered.
66
+ //
67
+ // `register_sources` was a GUI citation-bubble handshake; it is currently
68
+ // commented out in tools/research.ts (REV-62) and therefore not in
69
+ // MCP_TOOL_NAMES. When it comes back, add it both there and here.
70
+ export const INTERACTIVE_TOOL_NAMES = [
71
+ "register_sources",
72
+ ];
73
+ // Permission groupings shown in the setup TUI. Each group is a checkbox the
74
+ // user toggles; checked groups are written into `~/.claude/settings.json`
75
+ // `permissions.allow` so the agent can call them without per-call approval.
76
+ //
77
+ // Group boundaries are user-facing — they should match the user's mental
78
+ // model ("Search & Read", "Write & Publish") rather than the file the tool
79
+ // happens to live in. Every non-INTERACTIVE name in MCP_TOOL_NAMES must
80
+ // appear in exactly one group; the drift test enforces this.
81
+ export const MCP_TOOL_GROUPS = [
82
+ {
83
+ name: "Search & Read",
84
+ description: "search, read, download, and browse pages, topics, and wikis",
85
+ tools: [
86
+ "search_pages",
87
+ "search_topics",
88
+ "list_pages",
89
+ "list_topics",
90
+ "list_wikis",
91
+ "download",
92
+ "read_page",
93
+ ],
94
+ },
95
+ {
96
+ name: "Research",
97
+ description: "web search, read pages, find and view images",
98
+ tools: [
99
+ "search_web",
100
+ "read_webpage",
101
+ "search_images",
102
+ "view_images",
103
+ ],
104
+ },
105
+ {
106
+ name: "Write & Publish",
107
+ description: "create, edit, and publish pages and topics",
108
+ tools: [
109
+ "new",
110
+ "publish",
111
+ "delete_pages",
112
+ "create_topics",
113
+ "update_topic",
114
+ ],
115
+ },
116
+ {
117
+ name: "Wikis",
118
+ description: "create wikis, configure settings, manage membership",
119
+ tools: [
120
+ "create_wiki",
121
+ "get_wiki_settings",
122
+ "update_wiki_settings",
123
+ "join_wiki",
124
+ "get_wiki_membership",
125
+ ],
126
+ },
127
+ {
128
+ name: "Account",
129
+ description: "login, logout, identity",
130
+ tools: [
131
+ "login",
132
+ "logout",
133
+ "whoami",
134
+ ],
135
+ },
136
+ ];
137
+ // Convert a bare MCP tool name into the prefixed form Claude Code uses in
138
+ // `~/.claude/settings.json` permissions and the Electron SDK's allow-list.
139
+ //
140
+ // Example: `read_page` → `mcp__almanac__read_page`.
141
+ export function toClaudePermissionName(name) {
142
+ return `mcp__almanac__${name}`;
143
+ }
144
+ // Full set of allow-list entries for every registered MCP tool, in the
145
+ // `mcp__almanac__*` form Claude Code expects. Consumers (gui/config.js once
146
+ // it's bumped to a registry-aware mcp-ts version) should derive their tool
147
+ // allow-list from this.
148
+ export const MCP_TOOL_PERMISSION_NAMES = MCP_TOOL_NAMES.map(toClaudePermissionName);
@@ -4,7 +4,7 @@ export function registerAuthTools(server) {
4
4
  server.addTool({
5
5
  name: "login",
6
6
  description: "Log in via browser to connect your account and get a personal API key. This is the required " +
7
- "first step before creating or updating articles. Only needs to be called once.\n\n" +
7
+ "first step before creating or updating pages. Only needs to be called once.\n\n" +
8
8
  "If you already have a valid API key, this returns immediately without opening a browser.",
9
9
  async execute() {
10
10
  const result = await performLogin();
@@ -2,12 +2,12 @@ import { z } from "zod";
2
2
  import { readFileSync, writeFileSync, mkdirSync, readdirSync, existsSync, unlinkSync } from "node:fs";
3
3
  import { join } from "node:path";
4
4
  import { stringify as yamlStringify } from "yaml";
5
- import { request, ARTICLES_DIR } from "../auth.js";
5
+ import { request, PAGES_DIR } from "../auth.js";
6
6
  import { openBrowser } from "../browser.js";
7
7
  import { coerceJson } from "../utils.js";
8
8
  const SLUG_RE = /^[a-z0-9]+(-[a-z0-9]+)*$/;
9
9
  function resolvePageDir(wikiSlug) {
10
- return join(ARTICLES_DIR, wikiSlug);
10
+ return join(PAGES_DIR, wikiSlug);
11
11
  }
12
12
  function resolvePagePaths(slug, wikiSlug) {
13
13
  const dir = resolvePageDir(wikiSlug);
@@ -110,6 +110,10 @@ function formatPublishResults(results, targetSlugs, wiki_slug, dry_run) {
110
110
  details.push(`${plan.wikilinks.will_auto_stub.length} new stub(s)`);
111
111
  plan.wikilinks.will_auto_stub.forEach(s => allAutoStubs.add(s));
112
112
  }
113
+ const inBatchLinks = plan.wikilinks.in_batch ?? [];
114
+ if (inBatchLinks.length > 0) {
115
+ details.push(`${inBatchLinks.length} in-batch link(s)`);
116
+ }
113
117
  if (plan.source_keys.orphaned.length > 0) {
114
118
  details.push(`missing source key(s): ${plan.source_keys.orphaned.join(", ")}`);
115
119
  }
@@ -161,7 +165,7 @@ function formatPublishResults(results, targetSlugs, wiki_slug, dry_run) {
161
165
  }
162
166
  export function registerPageTools(server) {
163
167
  server.addTool({
164
- name: "search_articles",
168
+ name: "search_pages",
165
169
  description: "Search OpenAlmanac pages and stubs across all wikis. Use to check existence, find slugs for wikilinks, " +
166
170
  "or discover content. Optional wiki filter to scope results. No authentication needed.",
167
171
  parameters: z.object({
@@ -213,7 +217,7 @@ export function registerPageTools(server) {
213
217
  },
214
218
  });
215
219
  server.addTool({
216
- name: "list_articles",
220
+ name: "list_pages",
217
221
  description: "Browse pages in a wiki. Structured listing, not fuzzy search. " +
218
222
  "Use to see what exists, find stubs, or discover pages by topic. " +
219
223
  "Each returned page includes topic objects with both slug and title.",
@@ -237,7 +241,7 @@ export function registerPageTools(server) {
237
241
  server.addTool({
238
242
  name: "download",
239
243
  description: "Download pages to your local workspace for editing. " +
240
- "Files go to ~/.openalmanac/articles/{wiki_slug}/{slug}.md with a .ref sidecar. " +
244
+ "Files go to ~/.openalmanac/pages/{wiki_slug}/{slug}.md with a .ref sidecar. " +
241
245
  "After editing, use publish to push changes. The .ref file is system-managed — don't edit it.",
242
246
  parameters: z.object({
243
247
  slugs: coerceJson(z.array(z.string()).min(1).max(50)).describe("Page slugs to download"),
@@ -326,9 +330,28 @@ export function registerPageTools(server) {
326
330
  writeFileSync(filePath, `---\n${frontmatter}---\n\n`, "utf-8");
327
331
  created.push(filePath);
328
332
  }
333
+ // Scaffold-time nudge: check if any created pages have matching slugs
334
+ // in the global wiki (Almanac). Fires before writing so the agent can
335
+ // decide to cross-link instead of writing a duplicate treatment.
336
+ const nudges = [];
337
+ if (created.length > 0 && wiki_slug !== "global") {
338
+ const createdSlugs = created.map(p => p.split("/").pop().replace(".md", ""));
339
+ for (const slug of createdSlugs) {
340
+ try {
341
+ const res = await request("GET", `/api/w/global/pages/${slug}`);
342
+ if (res.ok) {
343
+ const page = await res.json();
344
+ nudges.push(`Note: Almanac already has a page "${page.title ?? slug}" (slug: ${slug}). ` +
345
+ `Write your own treatment for this wiki, or cross-link with [[global:${slug}]] instead.`);
346
+ }
347
+ }
348
+ catch { /* page doesn't exist in global wiki — no nudge */ }
349
+ }
350
+ }
329
351
  const parts = [
330
352
  created.length > 0 ? `Created ${created.length} file(s):\n${created.map(p => ` - ${p}`).join("\n")}` : "No new files created.",
331
353
  skipped.length > 0 ? `Skipped:\n${skipped.map(s => ` - ${s}`).join("\n")}` : "",
354
+ nudges.length > 0 ? nudges.join("\n") : "",
332
355
  WRITING_GUIDE,
333
356
  ];
334
357
  return parts.filter(Boolean).join("\n\n");
@@ -400,48 +423,18 @@ export function registerPageTools(server) {
400
423
  return summary;
401
424
  },
402
425
  });
403
- // propose_article — GUI-only handshake. Commented out 2026-04-23 per REV-62.
404
- // Revive when the GUI plan-card proposal flow is in active use.
405
- /*
406
- server.addTool({
407
- name: "propose_article",
408
- description:
409
- "Propose an article before writing it. Structures your proposal with a user-facing summary and a detailed brief. " +
410
- "Do not start writing without proposing first.",
411
- parameters: z.object({
412
- summary: z.string().describe("User-facing summary (3-5 bullet points)"),
413
- details: z.string().describe("Full handoff brief with all sources, key facts, angle"),
414
- title: z.string().describe("Proposed title"),
415
- slug: z.string().describe("Proposed slug (kebab-case)"),
416
- wiki_slug: z.string().default("global").describe("Wiki slug"),
417
- _userChoice: z.enum(["background", "here", "expired", "already_in_progress"]).optional(),
418
- }),
419
- async execute({ summary, details, title, slug, wiki_slug, _userChoice }) {
420
- if (_userChoice === "background") {
421
- return `Article "${title}" is now being written in a background process.`;
422
- }
423
- if (_userChoice === "expired") {
424
- return `Proposal expired. Continue the conversation naturally.`;
425
- }
426
- if (_userChoice === "already_in_progress") {
427
- return `Article "${title}" is already being generated.`;
428
- }
429
- return `Article Proposal: ${title}\n\n${summary}\n\nProceed with writing this article following the writing flow in your instructions.`;
430
- },
431
- });
432
- */
433
426
  server.addTool({
434
- name: "read_article",
427
+ name: "read_page",
435
428
  description: "Read a single page by slug. Returns the full page JSON including content, topics, sources, and infobox. " +
436
429
  "No side effects — use this to read a page without downloading it to disk or joining the wiki. " +
437
430
  "For editing, use `download` instead (it writes local files and handles ref tokens). " +
438
- "For discovery, use `search_articles` instead. No authentication needed.",
431
+ "For discovery, use `search_pages` instead. No authentication needed.",
439
432
  parameters: z.object({
440
433
  wiki_slug: z.string().describe("Wiki slug"),
441
- article_slug: z.string().describe("Page slug"),
434
+ page_slug: z.string().describe("Page slug"),
442
435
  }),
443
- async execute({ wiki_slug, article_slug }) {
444
- const resp = await request("GET", `/api/w/${wiki_slug}/pages/${article_slug}`);
436
+ async execute({ wiki_slug, page_slug }) {
437
+ const resp = await request("GET", `/api/w/${wiki_slug}/pages/${page_slug}`);
445
438
  return JSON.stringify(await resp.json(), null, 2);
446
439
  },
447
440
  });
@@ -451,11 +444,11 @@ export function registerPageTools(server) {
451
444
  "Accepts multiple slugs and deletes them in sequence. Requires moderator or creator access.",
452
445
  parameters: z.object({
453
446
  wiki_slug: z.string().describe("Wiki slug"),
454
- article_slugs: coerceJson(z.array(z.string()).min(1).max(50)).describe("Page slugs to delete (1-50)"),
447
+ page_slugs: coerceJson(z.array(z.string()).min(1).max(50)).describe("Page slugs to delete (1-50)"),
455
448
  }),
456
- async execute({ wiki_slug, article_slugs }) {
449
+ async execute({ wiki_slug, page_slugs }) {
457
450
  const results = [];
458
- for (const slug of article_slugs) {
451
+ for (const slug of page_slugs) {
459
452
  try {
460
453
  // DELETE returns 204 No Content on success
461
454
  await request("DELETE", `/api/w/${wiki_slug}/pages/${slug}`, { auth: true });
@@ -470,7 +463,7 @@ export function registerPageTools(server) {
470
463
  const lines = results.map(r => r.status === "deleted"
471
464
  ? `- ${r.slug}: deleted`
472
465
  : `- ${r.slug}: error — ${r.message}`);
473
- return `Deleted ${deleted}/${article_slugs.length} pages.\n\n${lines.join("\n")}`;
466
+ return `Deleted ${deleted}/${page_slugs.length} pages.\n\n${lines.join("\n")}`;
474
467
  },
475
468
  });
476
469
  }
@@ -4,7 +4,7 @@ import { request } from "../auth.js";
4
4
  import { coerceJson } from "../utils.js";
5
5
  export function registerResearchTools(server) {
6
6
  const SearchWebInput = z.object({
7
- source: z.enum(["web", "reddit"]).describe("Search source. Use 'web' for Google/Serper and 'reddit' for community perspectives via Reddit."),
7
+ source: z.enum(["web", "reddit"]).describe("Search source. Use 'web' for Google/Serper and 'reddit' for public perspectives via Reddit."),
8
8
  query: z.string().min(1).optional().describe("Search terms. Required for source='web'. Optional for source='reddit' — omit it there to return a sorted subreddit listing."),
9
9
  subreddit: z.string().optional().describe("Reddit-only. Subreddit name without the 'r/' prefix (e.g. 'Harvard'). Omit to search across all of Reddit."),
10
10
  sort: z.enum(["top", "hot", "new", "rising", "controversial", "relevance", "comments"])
@@ -34,40 +34,41 @@ export function registerResearchTools(server) {
34
34
  });
35
35
  server.addTool({
36
36
  name: "search_web",
37
- description: "Search the web or a specific community source (Reddit). Pick the source with the `source` field:\n\n" +
37
+ description: "Search the web or Reddit. Pick the source with the `source` field:\n\n" +
38
38
  "- `source: \"web\"` — general web search via Google. Use for news, docs, scholarly references.\n" +
39
39
  "- `source: \"reddit\"` — Reddit-aware search returning posts with score, flair, num_comments, permalink. " +
40
- "Use when the user is asking about community perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
40
+ "Use when the user is asking about public perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
41
41
  "Use only the fields relevant to the source you pick. " +
42
42
  "Rate limit: 10/min. Requires API key.",
43
43
  parameters: SearchWebInput,
44
44
  async execute(input) {
45
45
  if (input.source === "reddit") {
46
- const params = {
46
+ const body = {
47
+ source: "reddit",
47
48
  sort: input.sort ?? "top",
48
49
  time_range: input.time_range ?? "year",
49
50
  limit: input.limit ?? 25,
50
51
  };
51
52
  if (input.subreddit)
52
- params.subreddit = input.subreddit;
53
+ body.subreddit = input.subreddit;
53
54
  if (input.query)
54
- params.query = input.query;
55
- const resp = await request("GET", "/api/research/reddit/search", {
55
+ body.query = input.query;
56
+ const resp = await request("POST", "/api/research/search", {
56
57
  auth: true,
57
- params,
58
+ json: body,
58
59
  });
59
60
  return JSON.stringify(await resp.json(), null, 2);
60
61
  }
61
- const resp = await request("GET", "/api/research/search", {
62
+ const resp = await request("POST", "/api/research/search", {
62
63
  auth: true,
63
- params: { query: input.query.trim(), limit: input.limit ?? 10 },
64
+ json: { source: "web", query: input.query.trim(), limit: input.limit ?? 10 },
64
65
  });
65
66
  return JSON.stringify(await resp.json(), null, 2);
66
67
  },
67
68
  });
68
69
  server.addTool({
69
70
  name: "read_webpage",
70
- description: "Fetch a URL and return its content as markdown. Routes automatically based on URL:\n" +
71
+ description: "Read an external URL and return its content as markdown. Routes automatically based on URL:\n" +
71
72
  "- **Reddit threads** (reddit.com/r/{sub}/comments/{id}/...) — returns the post plus top-level threaded comments with scores and authors, via a residential proxy.\n" +
72
73
  "- **Reddit wiki pages** (reddit.com/r/{sub}/wiki/...) — returns the wiki page as markdown with revision metadata.\n" +
73
74
  "- **YouTube videos** — returns title, description, transcript when available.\n" +
@@ -100,11 +101,11 @@ export function registerResearchTools(server) {
100
101
  });
101
102
  server.addTool({
102
103
  name: "search_images",
103
- description: "Search for images to include in articles. Accepts multiple queries for batch lookup. Returns image URLs, titles, dimensions, and licensing info. " +
104
+ description: "Search for images to include in pages. Accepts multiple queries for batch lookup. Returns image URLs, titles, dimensions, and licensing info. " +
104
105
  "Three sources: 'google' (broad web images, default), 'unsplash' (high-quality stock photos), and 'wikimedia' (free, open-licensed from Wikimedia Commons). " +
105
106
  "Use descriptive search terms. After searching, call view_images on promising candidates to see what they actually show before using them. " +
106
- "External image URLs are automatically persisted when you publish the article — no extra steps needed.\n\n" +
107
- "## Using images in articles\n\n" +
107
+ "External image URLs are automatically persisted when you publish the page — no extra steps needed.\n\n" +
108
+ "## Using images in pages\n\n" +
108
109
  "Images render as figures with visible captions. The alt text becomes the caption — make it descriptive.\n\n" +
109
110
  "**Syntax:** `![Caption text](url \"position\")`\n\n" +
110
111
  "Position options (in the title/quotes):\n" +
@@ -122,7 +123,7 @@ export function registerResearchTools(server) {
122
123
  "- Bad: `![Logo](url)` — Good: `![The OpenAI logo, a stylized spiral](url)`\n\n" +
123
124
  "**Placement rules:**\n" +
124
125
  "- Place 1-3 images per major section — don't overload\n" +
125
- "- First image should appear near the top, illustrating the article's subject\n" +
126
+ "- First image should appear near the top, illustrating the page's subject\n" +
126
127
  "- Spread images throughout, not clustered together\n" +
127
128
  "- For the infobox hero image, set `infobox.header.image_url` in frontmatter instead\n\n" +
128
129
  "Requires login. Rate limit: 10/min.",
package/package.json CHANGED
@@ -1,22 +1,30 @@
1
1
  {
2
2
  "name": "openalmanac",
3
- "version": "0.3.6",
4
- "description": "OpenAlmanac — pull, edit, and push articles to the open knowledge base",
3
+ "version": "0.4.0",
4
+ "description": "OpenAlmanac — pull, edit, and push pages to the open knowledge base",
5
5
  "type": "module",
6
6
  "bin": {
7
7
  "openalmanac": "dist/cli.js"
8
8
  },
9
+ "exports": {
10
+ "./tool-registry": {
11
+ "types": "./dist/tool-registry.d.ts",
12
+ "default": "./dist/tool-registry.js"
13
+ }
14
+ },
9
15
  "scripts": {
10
16
  "build": "tsc",
11
17
  "dev": "tsc --watch",
12
- "start": "node dist/cli.js"
18
+ "start": "node dist/cli.js",
19
+ "test": "vitest run",
20
+ "test:watch": "vitest"
13
21
  },
14
22
  "keywords": [
15
23
  "openalmanac",
16
24
  "mcp",
17
25
  "knowledge-base",
18
26
  "ai",
19
- "articles"
27
+ "pages"
20
28
  ],
21
29
  "license": "MIT",
22
30
  "dependencies": {
@@ -25,8 +33,9 @@
25
33
  "zod": "^3.24.0"
26
34
  },
27
35
  "devDependencies": {
28
- "@types/node": "^22.0.0",
29
- "typescript": "^5.7.0"
36
+ "@types/node": "^25.6.0",
37
+ "typescript": "^5.7.0",
38
+ "vitest": "^3.2.4"
30
39
  },
31
40
  "engines": {
32
41
  "node": ">=18.0.0"
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: reddit-wiki
3
3
  description: Turn any subreddit into a published wiki on Almanac
4
- allowed-tools: Bash(node ${CLAUDE_SKILL_DIR}/scripts/ingest.js *), mcp__almanac__search_articles, mcp__almanac__search_communities, mcp__almanac__list_articles, mcp__almanac__read, mcp__almanac__download, mcp__almanac__new, mcp__almanac__publish, mcp__almanac__search_web, mcp__almanac__read_webpage, mcp__almanac__search_images, mcp__almanac__view_images, mcp__almanac__register_sources, mcp__almanac__login, mcp__almanac__create_community, Read(~/.openalmanac/**), Write(~/.openalmanac/**), Edit(~/.openalmanac/**)
4
+ allowed-tools: Bash(node ${CLAUDE_SKILL_DIR}/scripts/ingest.js *), mcp__almanac__search_pages, mcp__almanac__list_wikis, mcp__almanac__create_wiki, mcp__almanac__list_pages, mcp__almanac__read_page, mcp__almanac__download, mcp__almanac__new, mcp__almanac__publish, mcp__almanac__search_web, mcp__almanac__read_webpage, mcp__almanac__search_images, mcp__almanac__view_images, mcp__almanac__register_sources, mcp__almanac__login, Read(~/.openalmanac/**), Write(~/.openalmanac/**), Edit(~/.openalmanac/**)
5
5
  argument-hint: r/<subreddit>
6
6
  ---
7
7
 
@@ -18,8 +18,8 @@ Never estimate how long things will take. Do show data sizes so the user knows w
18
18
  ## Flow overview
19
19
 
20
20
  Two phases:
21
- 1. **Foundation** — Plan and write 15-20 core articles with images, citations, and wikilinks
22
- 2. **Deep Absorb** — Process the corpus batch by batch, discovering niche topics and enriching existing articles
21
+ 1. **Foundation** — Plan and write 15-20 core pages with images, citations, and wikilinks
22
+ 2. **Deep Absorb** — Process the corpus batch by batch, discovering niche topics and enriching existing pages
23
23
 
24
24
  ## Naming convention
25
25
 
@@ -32,9 +32,9 @@ Two phases:
32
32
 
33
33
  If the user runs `/reddit-wiki` without arguments or asks how it works, explain briefly:
34
34
 
35
- - **What it does:** Takes any subreddit and builds a wiki on Almanac — real articles with citations, images, and links between them. Two phases: a foundation of 15-20 core articles, then a deep pass through the corpus finding niche topics.
35
+ - **What it does:** Takes any subreddit and builds a wiki on Almanac — real pages with citations, images, and links between them. Two phases: a foundation of 15-20 core pages, then a deep pass through the corpus finding niche topics.
36
36
  - **What Almanac is:** An open knowledge base anyone can read and write to. Think Wikipedia's depth meets Reddit's community energy.
37
- - **How it works:** Downloads the subreddit's history, scores posts by quality, then uses AI agents to research and write articles citing the community's own discussions.
37
+ - **How it works:** Downloads the subreddit's history, scores posts by quality, then uses AI agents to research and write pages citing the community's own discussions.
38
38
  - **Data storage:** Everything is stored locally at `~/.openalmanac/corpus/<subreddit>/`. The user can delete it anytime after the wiki is published.
39
39
  - **Any subreddit:** They can pick any subreddit they're interested in. Some smaller or newer subreddits may not have data available — if that happens, you'll suggest alternatives or nearby subreddits that do have data.
40
40
 
@@ -45,8 +45,8 @@ Then end with a single inviting line that asks what they're into and offers to h
45
45
  Extract the subreddit name from the argument (strip `r/` prefix if present). Use the bare name for all API calls and file paths. Use `r/<name>` when talking to the user.
46
46
 
47
47
  Run these three things in parallel (silently — don't narrate the tool calls):
48
- 1. `search_communities("<subreddit_name>")`
49
- 2. `search_articles` with 5-10 key topic terms you'd expect in this community
48
+ 1. `list_wikis()` and look for the subreddit wiki slug
49
+ 2. `search_pages` with 5-10 key topic terms you'd expect in this community
50
50
  3. Get subreddit stats from Arctic Shift:
51
51
 
52
52
  ```bash
@@ -56,7 +56,7 @@ node ${CLAUDE_SKILL_DIR}/scripts/ingest.js $1 count
56
56
  This returns JSON with `total_posts`, `total_comments`, and `estimated_size_mb`.
57
57
 
58
58
  Now greet the user. Tell them:
59
- - What already exists on Almanac for this community (articles, stubs, community)
59
+ - What already exists on Almanac for this community (pages, stubs, community)
60
60
  - Share something genuinely interesting about it if you know anything
61
61
  - Subreddit stats (posts, comments)
62
62
  - The two-phase plan (brief — one line each)
@@ -153,18 +153,18 @@ If the `count` command returns 0 posts, the subreddit may not be indexed. In thi
153
153
  Read 20-30 corpus entries (prioritize high-score posts) to understand the landscape. Also check what already exists:
154
154
 
155
155
  ```
156
- list_articles(community_slug: "<subreddit>", sort: "most_referenced")
156
+ list_pages(wiki_slug: "<subreddit>")
157
157
  ```
158
158
 
159
- Identify 15-20 core articles. **Favor nouns over themes** — specific things people would look up, not vague survey topics.
159
+ Identify 15-20 core pages. **Favor nouns over themes** — specific things people would look up, not vague survey topics.
160
160
 
161
161
  - **~70% nouns:** Specific locks, tools, people, techniques, concepts. "American Lock 1100", "Spool Pin", "Tension Wrench", "LockPickingLawyer". These are the building blocks — what people search for, link to, and learn from.
162
- - **~30% structural themes:** Only the big ones that serve as entry points and tie nouns together. "Belt System", "Lock Picking Basics". Not vague surveys — each should be a real article that teaches something.
162
+ - **~30% structural themes:** Only the big ones that serve as entry points and tie nouns together. "Belt System", "Lock Picking Basics". Not vague surveys — each should be a real page that teaches something.
163
163
 
164
164
  Bad: "Security Pin Mechanics" (vague theme, reads like a textbook chapter)
165
165
  Good: "Spool Pin", "Serrated Pin", "Mushroom Pin" (specific nouns — then link them from a "Security Pins" overview)
166
166
 
167
- Present them to the user grouped by category, but make clear most articles are about specific things:
167
+ Present them to the user grouped by category, but make clear most pages are about specific things:
168
168
 
169
169
  ```
170
170
  Here's what I'd build for the foundation:
@@ -188,21 +188,21 @@ Include your recommendation. Wait for the user to confirm or adjust.
188
188
 
189
189
  ### Topics
190
190
 
191
- The groupings you present (Locks, Components, Techniques, Community) become **community topics** on Almanac. Topics show up as categories on the wiki page and each article gets assigned to one. When you scaffold articles, include the topic in the `new()` call.
191
+ The groupings you present (Locks, Components, Techniques, Community) become **community topics** on Almanac. Topics show up as categories on the wiki page and each page gets assigned to one. When you scaffold pages, include the topic in the `new()` call.
192
192
 
193
- Keep topics broad and few (4-7). They're navigation, not a taxonomy. A topic like "Locks" is good. A topic like "European High-Security Disc Detainer Locks" is too specific — that's an article, not a topic.
193
+ Keep topics broad and few (4-7). They're navigation, not a taxonomy. A topic like "Locks" is good. A topic like "European High-Security Disc Detainer Locks" is too specific — that's a page, not a topic.
194
194
 
195
195
  ### Scaffold entities
196
196
 
197
- Before any writing, scaffold all planned articles as local files:
197
+ Before any writing, scaffold all planned pages as local files:
198
198
 
199
- 1. **Check what exists online:** `search_articles` with ALL planned entity names in one batch call
200
- 2. **Check local folder:** Read `~/.openalmanac/articles/<subreddit>/` to see what's already scaffolded
201
- 3. **Create missing:** `new(articles: [{title, community_slug}, ...])` for everything not found
199
+ 1. **Check what exists online:** `search_pages` with ALL planned entity names in one batch call
200
+ 2. **Check local folder:** Read `~/.openalmanac/pages/<subreddit>/` to see what's already scaffolded
201
+ 3. **Create missing:** `new(pages: [{title, slug?, topics?}, ...], wiki_slug: "<subreddit>")` for everything not found
202
202
 
203
203
  This creates the entity map. Writing agents will check the local folder to know what slugs exist.
204
204
 
205
- ### Write articles
205
+ ### Write pages
206
206
 
207
207
  Tell the user what's happening:
208
208
 
@@ -215,62 +215,62 @@ Kicking off the writing agents:
215
215
  • Agent 4: Community — LockPickingLawyer, Belt System
216
216
  ```
217
217
 
218
- Spin up 4-5 parallel writing agents, ~3-4 articles each. Group by theme so related articles are written by the same agent (better cross-referencing).
218
+ Spin up 4-5 parallel writing agents, ~3-4 pages each. Group by theme so related pages are written by the same agent (better cross-referencing).
219
219
 
220
220
  **Each writing agent's brief must include:**
221
221
 
222
- 1. **Which articles to write** (the scaffolded .md files to fill in)
222
+ 1. **Which pages to write** (the scaffolded .md files to fill in)
223
223
  2. **Corpus entries to read** — point to specific files in `~/.openalmanac/corpus/<subreddit>/` relevant to its topics
224
224
  3. **The entity map** — list all scaffolded slugs so the agent uses correct wikilinks
225
225
  4. **These citation rules:**
226
226
  - Every source MUST have a public URL
227
- - Corpus entries have `citation_key` and `source` (Reddit permalink) in their frontmatter — use them as `[@citation_key]` markers and list them in the article's YAML `sources:` array
227
+ - Corpus entries have `citation_key` and `source` (Reddit permalink) in their frontmatter — use them as `[@citation_key]` markers and list them in the page's YAML `sources:` array
228
228
  - Also use `search_web` and `read_webpage` for additional sources beyond Reddit
229
229
  - NEVER fabricate a URL. If a source has no public URL, do not use it.
230
230
  - Register sources with `register_sources` before writing
231
231
  5. **These wikilink rules:**
232
232
  - Use `[[slug|Display Text]]` syntax for entities that exist (scaffolded or published)
233
- - Before linking to a new entity NOT on the map: `search_articles` to check, then scaffold with `new()` if needed
233
+ - Before linking to a new entity NOT on the map: `search_pages` to check, then scaffold with `new()` if needed
234
234
  - Prefer existing slugs over inventing new ones
235
235
  6. **Writing quality:**
236
236
  - Fetch guidelines from `https://openalmanac.org/writing-guidelines` using `read_webpage`
237
237
  - Write with the community's voice — cite Reddit discussions, not just Wikipedia
238
238
  - Include `[@citation_key]` markers throughout, especially for claims from the corpus
239
- - Articles should feel like they were written by someone who lives in this community
239
+ - pages should feel like they were written by someone who lives in this community
240
240
 
241
241
  **While agents work**, narrate what's happening. Share interesting things you see them finding. Example:
242
242
 
243
243
  ```
244
244
  Agent 2 found a heated 2019 thread about whether LockPickingLawyer's
245
245
  speed picks are realistic for beginners — 400 upvotes, great discussion.
246
- Working that into the article...
246
+ Working that into the page...
247
247
  ```
248
248
 
249
249
  ### Image pass
250
250
 
251
- After all writing agents finish, run parallel haiku-model image agents (one per article):
251
+ After all writing agents finish, run parallel haiku-model image agents (one per page):
252
252
 
253
253
  Each image agent:
254
- 1. Reads the article
254
+ 1. Reads the page
255
255
  2. `search_images` for 1-2 hero image queries
256
256
  3. `view_images` to verify the best candidate
257
- 4. Adds the image URL to the article's frontmatter as `image_url`
257
+ 4. Adds the image URL to the page's frontmatter as `image_url`
258
258
 
259
259
  ### Publish
260
260
 
261
261
  ```
262
- publish(community_slug: "<subreddit>")
262
+ publish(wiki_slug: "<subreddit>")
263
263
  ```
264
264
 
265
- This batch-publishes all articles in the community folder. The backend auto-creates stubs from any dead wikilinks in the articles.
265
+ This batch-publishes all pages in the community folder. The backend auto-creates stubs from any dead wikilinks in the pages.
266
266
 
267
267
  Share the results with enthusiasm:
268
268
 
269
269
  ```
270
- 17 articles live! The wiki now has 35 articles total, plus
270
+ 17 pages live! The wiki now has 35 pages total, plus
271
271
  12 new stubs that emerged from wikilinks.
272
272
 
273
- Check it out: openalmanac.org/communities/<subreddit>/wiki
273
+ Check it out: openalmanac.org/w/<subreddit>
274
274
 
275
275
  You can also browse it in the Almanac desktop app — best way
276
276
  to explore and keep contributing.
@@ -302,10 +302,10 @@ For each batch:
302
302
 
303
303
  1. **Read 50 unabsorbed entries** from the corpus directory (skip any listed in absorb_log)
304
304
  2. **Cluster by theme** — what topics do these entries cover?
305
- 3. **Decide:** Create new articles? Enrich existing ones? Both?
306
- 4. **For existing articles:** `download` them first, then expand with new details/sections
307
- 5. **For new articles:** Scaffold → write → add to wiki
308
- 6. **Image pass** on any new articles (haiku agents)
305
+ 3. **Decide:** Create new pages? Enrich existing ones? Both?
306
+ 4. **For existing pages:** `download` them first, then expand with new details/sections
307
+ 5. **For new pages:** Scaffold → write → add to wiki
308
+ 6. **Image pass** on any new pages (haiku agents)
309
309
  7. **Publish** the batch
310
310
  8. **Update absorb_log.json:**
311
311
  ```json
@@ -313,7 +313,7 @@ For each batch:
313
313
  "entries": {
314
314
  "<filename>": {
315
315
  "absorbed_at": "<ISO timestamp>",
316
- "absorbed_into": ["article-slug-1", "article-slug-2"]
316
+ "absorbed_into": ["page-slug-1", "page-slug-2"]
317
317
  }
318
318
  },
319
319
  "stats": {
@@ -330,12 +330,12 @@ For each batch:
330
330
  Batches 1-5 done. Found some gems:
331
331
  • "Lock Lubricants in Cold Weather" — apparently Houdini
332
332
  lube freezes below -20°F, community recommends graphite
333
- • Expanded the American 1100 article with a detailed
333
+ • Expanded the American 1100 page with a detailed
334
334
  teardown thread from 2017
335
- • New article: "Lockpicking Competitions" — there's a
335
+ • New page: "Lockpicking Competitions" — there's a
336
336
  whole competitive scene
337
337
 
338
- 3 new articles, 4 enriched. Continuing...
338
+ 3 new pages, 4 enriched. Continuing...
339
339
  ```
340
340
 
341
341
  ### When to stop
@@ -348,11 +348,11 @@ Batches 1-5 done. Found some gems:
348
348
  Phase 2 complete. Processed X,XXX entries across N batches.
349
349
 
350
350
  Final wiki:
351
- XX articles (was YY)
351
+ XX pages (was YY)
352
352
  XX remaining stubs
353
353
  XXX+ citations from the community
354
354
 
355
- openalmanac.org/communities/<subreddit>/wiki
355
+ openalmanac.org/w/<subreddit>
356
356
  ```
357
357
 
358
358
  ## Important rules
@@ -364,14 +364,14 @@ openalmanac.org/communities/<subreddit>/wiki
364
364
  - Corpus entries have `citation_key` and `source` in their frontmatter — these are ready to use.
365
365
 
366
366
  ### Entity linking
367
- - Always `search_articles` before creating new entities — check what already exists
368
- - Check the local `~/.openalmanac/articles/<subreddit>/` folder for scaffolded files
367
+ - Always `search_pages` before creating new entities — check what already exists
368
+ - Check the local `~/.openalmanac/pages/<subreddit>/` folder for scaffolded files
369
369
  - Only scaffold with `new()` if the entity doesn't exist anywhere
370
370
  - Use `[[slug|Display Text]]` wikilink syntax
371
371
  - Prefer existing slugs over inventing new ones to avoid duplicates
372
372
 
373
373
  ### Community creation
374
- - If the community doesn't exist on Almanac yet, create it with `create_community`
374
+ - If the wiki doesn't exist on Almanac yet, create it with `create_wiki`
375
375
  - The description should have personality — capture the community's vibe, not a generic taxonomy
376
376
  - Find a good cover image with `search_images`
377
377
 
@@ -385,5 +385,5 @@ openalmanac.org/communities/<subreddit>/wiki
385
385
  - Don't make small talk or ask personal questions
386
386
  - Don't force enthusiasm — if something isn't interesting, don't pretend
387
387
  - Don't go silent for long stretches — narrate what's happening
388
- - Don't ask permission for every article — the user approved the plan, that's consent
388
+ - Don't ask permission for every page — the user approved the plan, that's consent
389
389
  - Don't skip Reddit as a source — the corpus IS the community's voice, cite it