openalmanac 0.3.6 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/dist/auth.d.ts +2 -2
  2. package/dist/auth.js +2 -2
  3. package/dist/cli.js +1 -1
  4. package/dist/instructions.d.ts +1 -0
  5. package/dist/instructions.js +150 -0
  6. package/dist/login-core.js +2 -1
  7. package/dist/onboarding-copy.d.ts +1 -0
  8. package/dist/onboarding-copy.js +14 -0
  9. package/dist/openalmanac_mcp-0.3.1-py3-none-any.whl +0 -0
  10. package/dist/openalmanac_mcp-0.3.1.tar.gz +0 -0
  11. package/dist/openalmanac_mcp-0.3.2-py3-none-any.whl +0 -0
  12. package/dist/openalmanac_mcp-0.3.2.tar.gz +0 -0
  13. package/dist/server.js +5 -150
  14. package/dist/setup/clients.d.ts +10 -0
  15. package/dist/setup/clients.js +291 -0
  16. package/dist/setup/config-files.d.ts +43 -0
  17. package/dist/setup/config-files.js +257 -0
  18. package/dist/setup/index.d.ts +2 -0
  19. package/dist/setup/index.js +55 -0
  20. package/dist/setup/permissions.d.ts +3 -0
  21. package/dist/setup/permissions.js +52 -0
  22. package/dist/{setup.d.ts → setup/reddit.d.ts} +0 -1
  23. package/dist/setup/reddit.js +69 -0
  24. package/dist/setup/tui.d.ts +7 -0
  25. package/dist/setup/tui.js +496 -0
  26. package/dist/setup/types.d.ts +43 -0
  27. package/dist/setup/types.js +1 -0
  28. package/dist/tool-registry.d.ts +11 -0
  29. package/dist/tool-registry.js +148 -0
  30. package/dist/tools/auth.js +1 -1
  31. package/dist/tools/{pages.js → pages/index.js} +39 -202
  32. package/dist/tools/pages/publish-format.d.ts +48 -0
  33. package/dist/tools/pages/publish-format.js +92 -0
  34. package/dist/tools/pages/workspace.d.ts +7 -0
  35. package/dist/tools/pages/workspace.js +14 -0
  36. package/dist/tools/pages/writing-guide.d.ts +1 -0
  37. package/dist/tools/pages/writing-guide.js +56 -0
  38. package/dist/tools/research.js +16 -15
  39. package/package.json +15 -6
  40. package/skills/reddit-wiki/SKILL.md +46 -46
  41. package/dist/setup.js +0 -1243
  42. package/dist/validate.d.ts +0 -971
  43. package/dist/validate.js +0 -154
  44. /package/dist/tools/{pages.d.ts → pages/index.d.ts} +0 -0
@@ -0,0 +1,56 @@
1
+ export const WRITING_GUIDE = `
2
+ ## Page structure
3
+
4
+ \`\`\`yaml
5
+ ---
6
+ title: Page Title
7
+ wiki: wiki-slug
8
+ topics: [topic-one, topic-two]
9
+ sources:
10
+ - key: example-source
11
+ url: https://example.com
12
+ title: Source Title
13
+ accessed_date: "2025-01-15"
14
+ infobox:
15
+ header:
16
+ image_url: https://...
17
+ subtitle: Short tagline
18
+ details:
19
+ - key: Born
20
+ value: January 1, 1990
21
+ ---
22
+
23
+ Page body with [@key] citation markers and [[wikilinks]]...
24
+ \`\`\`
25
+
26
+ ## Wikilinks
27
+
28
+ - Write natural text in double brackets: [[spool pins]], [[pin tumbler locks]]
29
+ - Display text: [[spool-pins|spool pins]]
30
+ - Cross-wiki: [[global:reddit|Reddit]], [[lockpicking:spool-pins|spool pins]]
31
+
32
+ ## Citations
33
+
34
+ - Mark claims with [@key] after punctuation
35
+ - Keys must be kebab-case with at least one hyphen
36
+ - Every source must be referenced; every reference must have a source
37
+
38
+ ## Quoting
39
+
40
+ For any string value with punctuation, quotes, or special characters (common in \`sources[].title\`), use YAML block-literal syntax:
41
+
42
+ \`\`\`yaml
43
+ sources:
44
+ - key: farza-yc
45
+ title: |-
46
+ "I'm joining Y Combinator, again" — Farza Majeed
47
+ url: https://...
48
+ \`\`\`
49
+
50
+ This sidesteps every YAML escaping rule. If you skip this, inner double quotes or em-dashes will break the parser.
51
+
52
+ ## Images
53
+
54
+ Use search_images to find relevant images. Syntax: \`![Caption](url "position")\`
55
+ Positions: "right" (default), "left", "center". Every image needs a descriptive caption.
56
+ `.trim();
@@ -4,7 +4,7 @@ import { request } from "../auth.js";
4
4
  import { coerceJson } from "../utils.js";
5
5
  export function registerResearchTools(server) {
6
6
  const SearchWebInput = z.object({
7
- source: z.enum(["web", "reddit"]).describe("Search source. Use 'web' for Google/Serper and 'reddit' for community perspectives via Reddit."),
7
+ source: z.enum(["web", "reddit"]).describe("Search source. Use 'web' for Google/Serper and 'reddit' for public perspectives via Reddit."),
8
8
  query: z.string().min(1).optional().describe("Search terms. Required for source='web'. Optional for source='reddit' — omit it there to return a sorted subreddit listing."),
9
9
  subreddit: z.string().optional().describe("Reddit-only. Subreddit name without the 'r/' prefix (e.g. 'Harvard'). Omit to search across all of Reddit."),
10
10
  sort: z.enum(["top", "hot", "new", "rising", "controversial", "relevance", "comments"])
@@ -34,40 +34,41 @@ export function registerResearchTools(server) {
34
34
  });
35
35
  server.addTool({
36
36
  name: "search_web",
37
- description: "Search the web or a specific community source (Reddit). Pick the source with the `source` field:\n\n" +
37
+ description: "Search the web or Reddit. Pick the source with the `source` field:\n\n" +
38
38
  "- `source: \"web\"` — general web search via Google. Use for news, docs, scholarly references.\n" +
39
39
  "- `source: \"reddit\"` — Reddit-aware search returning posts with score, flair, num_comments, permalink. " +
40
- "Use when the user is asking about community perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
40
+ "Use when the user is asking about public perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
41
41
  "Use only the fields relevant to the source you pick. " +
42
42
  "Rate limit: 10/min. Requires API key.",
43
43
  parameters: SearchWebInput,
44
44
  async execute(input) {
45
45
  if (input.source === "reddit") {
46
- const params = {
46
+ const body = {
47
+ source: "reddit",
47
48
  sort: input.sort ?? "top",
48
49
  time_range: input.time_range ?? "year",
49
50
  limit: input.limit ?? 25,
50
51
  };
51
52
  if (input.subreddit)
52
- params.subreddit = input.subreddit;
53
+ body.subreddit = input.subreddit;
53
54
  if (input.query)
54
- params.query = input.query;
55
- const resp = await request("GET", "/api/research/reddit/search", {
55
+ body.query = input.query;
56
+ const resp = await request("POST", "/api/research/search", {
56
57
  auth: true,
57
- params,
58
+ json: body,
58
59
  });
59
60
  return JSON.stringify(await resp.json(), null, 2);
60
61
  }
61
- const resp = await request("GET", "/api/research/search", {
62
+ const resp = await request("POST", "/api/research/search", {
62
63
  auth: true,
63
- params: { query: input.query.trim(), limit: input.limit ?? 10 },
64
+ json: { source: "web", query: input.query.trim(), limit: input.limit ?? 10 },
64
65
  });
65
66
  return JSON.stringify(await resp.json(), null, 2);
66
67
  },
67
68
  });
68
69
  server.addTool({
69
70
  name: "read_webpage",
70
- description: "Fetch a URL and return its content as markdown. Routes automatically based on URL:\n" +
71
+ description: "Read an external URL and return its content as markdown. Routes automatically based on URL:\n" +
71
72
  "- **Reddit threads** (reddit.com/r/{sub}/comments/{id}/...) — returns the post plus top-level threaded comments with scores and authors, via a residential proxy.\n" +
72
73
  "- **Reddit wiki pages** (reddit.com/r/{sub}/wiki/...) — returns the wiki page as markdown with revision metadata.\n" +
73
74
  "- **YouTube videos** — returns title, description, transcript when available.\n" +
@@ -100,11 +101,11 @@ export function registerResearchTools(server) {
100
101
  });
101
102
  server.addTool({
102
103
  name: "search_images",
103
- description: "Search for images to include in articles. Accepts multiple queries for batch lookup. Returns image URLs, titles, dimensions, and licensing info. " +
104
+ description: "Search for images to include in pages. Accepts multiple queries for batch lookup. Returns image URLs, titles, dimensions, and licensing info. " +
104
105
  "Three sources: 'google' (broad web images, default), 'unsplash' (high-quality stock photos), and 'wikimedia' (free, open-licensed from Wikimedia Commons). " +
105
106
  "Use descriptive search terms. After searching, call view_images on promising candidates to see what they actually show before using them. " +
106
- "External image URLs are automatically persisted when you publish the article — no extra steps needed.\n\n" +
107
- "## Using images in articles\n\n" +
107
+ "External image URLs are automatically persisted when you publish the page — no extra steps needed.\n\n" +
108
+ "## Using images in pages\n\n" +
108
109
  "Images render as figures with visible captions. The alt text becomes the caption — make it descriptive.\n\n" +
109
110
  "**Syntax:** `![Caption text](url \"position\")`\n\n" +
110
111
  "Position options (in the title/quotes):\n" +
@@ -122,7 +123,7 @@ export function registerResearchTools(server) {
122
123
  "- Bad: `![Logo](url)` — Good: `![The OpenAI logo, a stylized spiral](url)`\n\n" +
123
124
  "**Placement rules:**\n" +
124
125
  "- Place 1-3 images per major section — don't overload\n" +
125
- "- First image should appear near the top, illustrating the article's subject\n" +
126
+ "- First image should appear near the top, illustrating the page's subject\n" +
126
127
  "- Spread images throughout, not clustered together\n" +
127
128
  "- For the infobox hero image, set `infobox.header.image_url` in frontmatter instead\n\n" +
128
129
  "Requires login. Rate limit: 10/min.",
package/package.json CHANGED
@@ -1,22 +1,30 @@
1
1
  {
2
2
  "name": "openalmanac",
3
- "version": "0.3.6",
4
- "description": "OpenAlmanac — pull, edit, and push articles to the open knowledge base",
3
+ "version": "0.4.1",
4
+ "description": "OpenAlmanac — pull, edit, and push pages to the open knowledge base",
5
5
  "type": "module",
6
6
  "bin": {
7
7
  "openalmanac": "dist/cli.js"
8
8
  },
9
+ "exports": {
10
+ "./tool-registry": {
11
+ "types": "./dist/tool-registry.d.ts",
12
+ "default": "./dist/tool-registry.js"
13
+ }
14
+ },
9
15
  "scripts": {
10
16
  "build": "tsc",
11
17
  "dev": "tsc --watch",
12
- "start": "node dist/cli.js"
18
+ "start": "node dist/cli.js",
19
+ "test": "vitest run",
20
+ "test:watch": "vitest"
13
21
  },
14
22
  "keywords": [
15
23
  "openalmanac",
16
24
  "mcp",
17
25
  "knowledge-base",
18
26
  "ai",
19
- "articles"
27
+ "pages"
20
28
  ],
21
29
  "license": "MIT",
22
30
  "dependencies": {
@@ -25,8 +33,9 @@
25
33
  "zod": "^3.24.0"
26
34
  },
27
35
  "devDependencies": {
28
- "@types/node": "^22.0.0",
29
- "typescript": "^5.7.0"
36
+ "@types/node": "^25.6.0",
37
+ "typescript": "^5.7.0",
38
+ "vitest": "^3.2.4"
30
39
  },
31
40
  "engines": {
32
41
  "node": ">=18.0.0"
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: reddit-wiki
3
3
  description: Turn any subreddit into a published wiki on Almanac
4
- allowed-tools: Bash(node ${CLAUDE_SKILL_DIR}/scripts/ingest.js *), mcp__almanac__search_articles, mcp__almanac__search_communities, mcp__almanac__list_articles, mcp__almanac__read, mcp__almanac__download, mcp__almanac__new, mcp__almanac__publish, mcp__almanac__search_web, mcp__almanac__read_webpage, mcp__almanac__search_images, mcp__almanac__view_images, mcp__almanac__register_sources, mcp__almanac__login, mcp__almanac__create_community, Read(~/.openalmanac/**), Write(~/.openalmanac/**), Edit(~/.openalmanac/**)
4
+ allowed-tools: Bash(node ${CLAUDE_SKILL_DIR}/scripts/ingest.js *), mcp__almanac__search_pages, mcp__almanac__list_wikis, mcp__almanac__create_wiki, mcp__almanac__list_pages, mcp__almanac__read_page, mcp__almanac__download, mcp__almanac__new, mcp__almanac__publish, mcp__almanac__search_web, mcp__almanac__read_webpage, mcp__almanac__search_images, mcp__almanac__view_images, mcp__almanac__register_sources, mcp__almanac__login, Read(~/.openalmanac/**), Write(~/.openalmanac/**), Edit(~/.openalmanac/**)
5
5
  argument-hint: r/<subreddit>
6
6
  ---
7
7
 
@@ -18,8 +18,8 @@ Never estimate how long things will take. Do show data sizes so the user knows w
18
18
  ## Flow overview
19
19
 
20
20
  Two phases:
21
- 1. **Foundation** — Plan and write 15-20 core articles with images, citations, and wikilinks
22
- 2. **Deep Absorb** — Process the corpus batch by batch, discovering niche topics and enriching existing articles
21
+ 1. **Foundation** — Plan and write 15-20 core pages with images, citations, and wikilinks
22
+ 2. **Deep Absorb** — Process the corpus batch by batch, discovering niche topics and enriching existing pages
23
23
 
24
24
  ## Naming convention
25
25
 
@@ -32,9 +32,9 @@ Two phases:
32
32
 
33
33
  If the user runs `/reddit-wiki` without arguments or asks how it works, explain briefly:
34
34
 
35
- - **What it does:** Takes any subreddit and builds a wiki on Almanac — real articles with citations, images, and links between them. Two phases: a foundation of 15-20 core articles, then a deep pass through the corpus finding niche topics.
35
+ - **What it does:** Takes any subreddit and builds a wiki on Almanac — real pages with citations, images, and links between them. Two phases: a foundation of 15-20 core pages, then a deep pass through the corpus finding niche topics.
36
36
  - **What Almanac is:** An open knowledge base anyone can read and write to. Think Wikipedia's depth meets Reddit's community energy.
37
- - **How it works:** Downloads the subreddit's history, scores posts by quality, then uses AI agents to research and write articles citing the community's own discussions.
37
+ - **How it works:** Downloads the subreddit's history, scores posts by quality, then uses AI agents to research and write pages citing the community's own discussions.
38
38
  - **Data storage:** Everything is stored locally at `~/.openalmanac/corpus/<subreddit>/`. The user can delete it anytime after the wiki is published.
39
39
  - **Any subreddit:** They can pick any subreddit they're interested in. Some smaller or newer subreddits may not have data available — if that happens, you'll suggest alternatives or nearby subreddits that do have data.
40
40
 
@@ -45,8 +45,8 @@ Then end with a single inviting line that asks what they're into and offers to h
45
45
  Extract the subreddit name from the argument (strip `r/` prefix if present). Use the bare name for all API calls and file paths. Use `r/<name>` when talking to the user.
46
46
 
47
47
  Run these three things in parallel (silently — don't narrate the tool calls):
48
- 1. `search_communities("<subreddit_name>")`
49
- 2. `search_articles` with 5-10 key topic terms you'd expect in this community
48
+ 1. `list_wikis()` and look for the subreddit wiki slug
49
+ 2. `search_pages` with 5-10 key topic terms you'd expect in this community
50
50
  3. Get subreddit stats from Arctic Shift:
51
51
 
52
52
  ```bash
@@ -56,7 +56,7 @@ node ${CLAUDE_SKILL_DIR}/scripts/ingest.js $1 count
56
56
  This returns JSON with `total_posts`, `total_comments`, and `estimated_size_mb`.
57
57
 
58
58
  Now greet the user. Tell them:
59
- - What already exists on Almanac for this community (articles, stubs, community)
59
+ - What already exists on Almanac for this community (pages, stubs, community)
60
60
  - Share something genuinely interesting about it if you know anything
61
61
  - Subreddit stats (posts, comments)
62
62
  - The two-phase plan (brief — one line each)
@@ -153,18 +153,18 @@ If the `count` command returns 0 posts, the subreddit may not be indexed. In thi
153
153
  Read 20-30 corpus entries (prioritize high-score posts) to understand the landscape. Also check what already exists:
154
154
 
155
155
  ```
156
- list_articles(community_slug: "<subreddit>", sort: "most_referenced")
156
+ list_pages(wiki_slug: "<subreddit>")
157
157
  ```
158
158
 
159
- Identify 15-20 core articles. **Favor nouns over themes** — specific things people would look up, not vague survey topics.
159
+ Identify 15-20 core pages. **Favor nouns over themes** — specific things people would look up, not vague survey topics.
160
160
 
161
161
  - **~70% nouns:** Specific locks, tools, people, techniques, concepts. "American Lock 1100", "Spool Pin", "Tension Wrench", "LockPickingLawyer". These are the building blocks — what people search for, link to, and learn from.
162
- - **~30% structural themes:** Only the big ones that serve as entry points and tie nouns together. "Belt System", "Lock Picking Basics". Not vague surveys — each should be a real article that teaches something.
162
+ - **~30% structural themes:** Only the big ones that serve as entry points and tie nouns together. "Belt System", "Lock Picking Basics". Not vague surveys — each should be a real page that teaches something.
163
163
 
164
164
  Bad: "Security Pin Mechanics" (vague theme, reads like a textbook chapter)
165
165
  Good: "Spool Pin", "Serrated Pin", "Mushroom Pin" (specific nouns — then link them from a "Security Pins" overview)
166
166
 
167
- Present them to the user grouped by category, but make clear most articles are about specific things:
167
+ Present them to the user grouped by category, but make clear most pages are about specific things:
168
168
 
169
169
  ```
170
170
  Here's what I'd build for the foundation:
@@ -188,21 +188,21 @@ Include your recommendation. Wait for the user to confirm or adjust.
188
188
 
189
189
  ### Topics
190
190
 
191
- The groupings you present (Locks, Components, Techniques, Community) become **community topics** on Almanac. Topics show up as categories on the wiki page and each article gets assigned to one. When you scaffold articles, include the topic in the `new()` call.
191
+ The groupings you present (Locks, Components, Techniques, Community) become **community topics** on Almanac. Topics show up as categories on the wiki page and each page gets assigned to one. When you scaffold pages, include the topic in the `new()` call.
192
192
 
193
- Keep topics broad and few (4-7). They're navigation, not a taxonomy. A topic like "Locks" is good. A topic like "European High-Security Disc Detainer Locks" is too specific — that's an article, not a topic.
193
+ Keep topics broad and few (4-7). They're navigation, not a taxonomy. A topic like "Locks" is good. A topic like "European High-Security Disc Detainer Locks" is too specific — that's a page, not a topic.
194
194
 
195
195
  ### Scaffold entities
196
196
 
197
- Before any writing, scaffold all planned articles as local files:
197
+ Before any writing, scaffold all planned pages as local files:
198
198
 
199
- 1. **Check what exists online:** `search_articles` with ALL planned entity names in one batch call
200
- 2. **Check local folder:** Read `~/.openalmanac/articles/<subreddit>/` to see what's already scaffolded
201
- 3. **Create missing:** `new(articles: [{title, community_slug}, ...])` for everything not found
199
+ 1. **Check what exists online:** `search_pages` with ALL planned entity names in one batch call
200
+ 2. **Check local folder:** Read `~/.openalmanac/pages/<subreddit>/` to see what's already scaffolded
201
+ 3. **Create missing:** `new(pages: [{title, slug?, topics?}, ...], wiki_slug: "<subreddit>")` for everything not found
202
202
 
203
203
  This creates the entity map. Writing agents will check the local folder to know what slugs exist.
204
204
 
205
- ### Write articles
205
+ ### Write pages
206
206
 
207
207
  Tell the user what's happening:
208
208
 
@@ -215,62 +215,62 @@ Kicking off the writing agents:
215
215
  • Agent 4: Community — LockPickingLawyer, Belt System
216
216
  ```
217
217
 
218
- Spin up 4-5 parallel writing agents, ~3-4 articles each. Group by theme so related articles are written by the same agent (better cross-referencing).
218
+ Spin up 4-5 parallel writing agents, ~3-4 pages each. Group by theme so related pages are written by the same agent (better cross-referencing).
219
219
 
220
220
  **Each writing agent's brief must include:**
221
221
 
222
- 1. **Which articles to write** (the scaffolded .md files to fill in)
222
+ 1. **Which pages to write** (the scaffolded .md files to fill in)
223
223
  2. **Corpus entries to read** — point to specific files in `~/.openalmanac/corpus/<subreddit>/` relevant to its topics
224
224
  3. **The entity map** — list all scaffolded slugs so the agent uses correct wikilinks
225
225
  4. **These citation rules:**
226
226
  - Every source MUST have a public URL
227
- - Corpus entries have `citation_key` and `source` (Reddit permalink) in their frontmatter — use them as `[@citation_key]` markers and list them in the article's YAML `sources:` array
227
+ - Corpus entries have `citation_key` and `source` (Reddit permalink) in their frontmatter — use them as `[@citation_key]` markers and list them in the page's YAML `sources:` array
228
228
  - Also use `search_web` and `read_webpage` for additional sources beyond Reddit
229
229
  - NEVER fabricate a URL. If a source has no public URL, do not use it.
230
230
  - Register sources with `register_sources` before writing
231
231
  5. **These wikilink rules:**
232
232
  - Use `[[slug|Display Text]]` syntax for entities that exist (scaffolded or published)
233
- - Before linking to a new entity NOT on the map: `search_articles` to check, then scaffold with `new()` if needed
233
+ - Before linking to a new entity NOT on the map: `search_pages` to check, then scaffold with `new()` if needed
234
234
  - Prefer existing slugs over inventing new ones
235
235
  6. **Writing quality:**
236
236
  - Fetch guidelines from `https://openalmanac.org/writing-guidelines` using `read_webpage`
237
237
  - Write with the community's voice — cite Reddit discussions, not just Wikipedia
238
238
  - Include `[@citation_key]` markers throughout, especially for claims from the corpus
239
- - Articles should feel like they were written by someone who lives in this community
239
+ - pages should feel like they were written by someone who lives in this community
240
240
 
241
241
  **While agents work**, narrate what's happening. Share interesting things you see them finding. Example:
242
242
 
243
243
  ```
244
244
  Agent 2 found a heated 2019 thread about whether LockPickingLawyer's
245
245
  speed picks are realistic for beginners — 400 upvotes, great discussion.
246
- Working that into the article...
246
+ Working that into the page...
247
247
  ```
248
248
 
249
249
  ### Image pass
250
250
 
251
- After all writing agents finish, run parallel haiku-model image agents (one per article):
251
+ After all writing agents finish, run parallel haiku-model image agents (one per page):
252
252
 
253
253
  Each image agent:
254
- 1. Reads the article
254
+ 1. Reads the page
255
255
  2. `search_images` for 1-2 hero image queries
256
256
  3. `view_images` to verify the best candidate
257
- 4. Adds the image URL to the article's frontmatter as `image_url`
257
+ 4. Adds the image URL to the page's frontmatter as `image_url`
258
258
 
259
259
  ### Publish
260
260
 
261
261
  ```
262
- publish(community_slug: "<subreddit>")
262
+ publish(wiki_slug: "<subreddit>")
263
263
  ```
264
264
 
265
- This batch-publishes all articles in the community folder. The backend auto-creates stubs from any dead wikilinks in the articles.
265
+ This batch-publishes all pages in the community folder. The backend auto-creates stubs from any dead wikilinks in the pages.
266
266
 
267
267
  Share the results with enthusiasm:
268
268
 
269
269
  ```
270
- 17 articles live! The wiki now has 35 articles total, plus
270
+ 17 pages live! The wiki now has 35 pages total, plus
271
271
  12 new stubs that emerged from wikilinks.
272
272
 
273
- Check it out: openalmanac.org/communities/<subreddit>/wiki
273
+ Check it out: openalmanac.org/w/<subreddit>
274
274
 
275
275
  You can also browse it in the Almanac desktop app — best way
276
276
  to explore and keep contributing.
@@ -302,10 +302,10 @@ For each batch:
302
302
 
303
303
  1. **Read 50 unabsorbed entries** from the corpus directory (skip any listed in absorb_log)
304
304
  2. **Cluster by theme** — what topics do these entries cover?
305
- 3. **Decide:** Create new articles? Enrich existing ones? Both?
306
- 4. **For existing articles:** `download` them first, then expand with new details/sections
307
- 5. **For new articles:** Scaffold → write → add to wiki
308
- 6. **Image pass** on any new articles (haiku agents)
305
+ 3. **Decide:** Create new pages? Enrich existing ones? Both?
306
+ 4. **For existing pages:** `download` them first, then expand with new details/sections
307
+ 5. **For new pages:** Scaffold → write → add to wiki
308
+ 6. **Image pass** on any new pages (haiku agents)
309
309
  7. **Publish** the batch
310
310
  8. **Update absorb_log.json:**
311
311
  ```json
@@ -313,7 +313,7 @@ For each batch:
313
313
  "entries": {
314
314
  "<filename>": {
315
315
  "absorbed_at": "<ISO timestamp>",
316
- "absorbed_into": ["article-slug-1", "article-slug-2"]
316
+ "absorbed_into": ["page-slug-1", "page-slug-2"]
317
317
  }
318
318
  },
319
319
  "stats": {
@@ -330,12 +330,12 @@ For each batch:
330
330
  Batches 1-5 done. Found some gems:
331
331
  • "Lock Lubricants in Cold Weather" — apparently Houdini
332
332
  lube freezes below -20°F, community recommends graphite
333
- • Expanded the American 1100 article with a detailed
333
+ • Expanded the American 1100 page with a detailed
334
334
  teardown thread from 2017
335
- • New article: "Lockpicking Competitions" — there's a
335
+ • New page: "Lockpicking Competitions" — there's a
336
336
  whole competitive scene
337
337
 
338
- 3 new articles, 4 enriched. Continuing...
338
+ 3 new pages, 4 enriched. Continuing...
339
339
  ```
340
340
 
341
341
  ### When to stop
@@ -348,11 +348,11 @@ Batches 1-5 done. Found some gems:
348
348
  Phase 2 complete. Processed X,XXX entries across N batches.
349
349
 
350
350
  Final wiki:
351
- XX articles (was YY)
351
+ XX pages (was YY)
352
352
  XX remaining stubs
353
353
  XXX+ citations from the community
354
354
 
355
- openalmanac.org/communities/<subreddit>/wiki
355
+ openalmanac.org/w/<subreddit>
356
356
  ```
357
357
 
358
358
  ## Important rules
@@ -364,14 +364,14 @@ openalmanac.org/communities/<subreddit>/wiki
364
364
  - Corpus entries have `citation_key` and `source` in their frontmatter — these are ready to use.
365
365
 
366
366
  ### Entity linking
367
- - Always `search_articles` before creating new entities — check what already exists
368
- - Check the local `~/.openalmanac/articles/<subreddit>/` folder for scaffolded files
367
+ - Always `search_pages` before creating new entities — check what already exists
368
+ - Check the local `~/.openalmanac/pages/<subreddit>/` folder for scaffolded files
369
369
  - Only scaffold with `new()` if the entity doesn't exist anywhere
370
370
  - Use `[[slug|Display Text]]` wikilink syntax
371
371
  - Prefer existing slugs over inventing new ones to avoid duplicates
372
372
 
373
373
  ### Community creation
374
- - If the community doesn't exist on Almanac yet, create it with `create_community`
374
+ - If the wiki doesn't exist on Almanac yet, create it with `create_wiki`
375
375
  - The description should have personality — capture the community's vibe, not a generic taxonomy
376
376
  - Find a good cover image with `search_images`
377
377
 
@@ -385,5 +385,5 @@ openalmanac.org/communities/<subreddit>/wiki
385
385
  - Don't make small talk or ask personal questions
386
386
  - Don't force enthusiasm — if something isn't interesting, don't pretend
387
387
  - Don't go silent for long stretches — narrate what's happening
388
- - Don't ask permission for every article — the user approved the plan, that's consent
388
+ - Don't ask permission for every page — the user approved the plan, that's consent
389
389
  - Don't skip Reddit as a source — the corpus IS the community's voice, cite it