openalmanac 0.3.5 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,34 +2,12 @@ import { z } from "zod";
2
2
  import { readFileSync, writeFileSync, mkdirSync, readdirSync, existsSync, unlinkSync } from "node:fs";
3
3
  import { join } from "node:path";
4
4
  import { stringify as yamlStringify } from "yaml";
5
- import { request, ARTICLES_DIR } from "../auth.js";
6
- import { validateArticle } from "../validate.js";
5
+ import { request, PAGES_DIR } from "../auth.js";
7
6
  import { openBrowser } from "../browser.js";
7
+ import { coerceJson } from "../utils.js";
8
8
  const SLUG_RE = /^[a-z0-9]+(-[a-z0-9]+)*$/;
9
- function slugify(title) {
10
- return title
11
- .toLowerCase()
12
- .normalize("NFD")
13
- .replace(/[\u0300-\u036f]/g, "")
14
- .replace(/[^a-z0-9]+/g, "-")
15
- .replace(/^-+|-+$/g, "")
16
- .replace(/-{2,}/g, "-");
17
- }
18
- function coerceJson(schema) {
19
- return z.preprocess((val) => {
20
- if (typeof val === "string") {
21
- try {
22
- return JSON.parse(val);
23
- }
24
- catch {
25
- return val;
26
- }
27
- }
28
- return val;
29
- }, schema);
30
- }
31
9
  function resolvePageDir(wikiSlug) {
32
- return join(ARTICLES_DIR, wikiSlug);
10
+ return join(PAGES_DIR, wikiSlug);
33
11
  }
34
12
  function resolvePagePaths(slug, wikiSlug) {
35
13
  const dir = resolvePageDir(wikiSlug);
@@ -76,14 +54,118 @@ Page body with [@key] citation markers and [[wikilinks]]...
76
54
  - Keys must be kebab-case with at least one hyphen
77
55
  - Every source must be referenced; every reference must have a source
78
56
 
57
+ ## Quoting
58
+
59
+ For any string value with punctuation, quotes, or special characters (common in \`sources[].title\`), use YAML block-literal syntax:
60
+
61
+ \`\`\`yaml
62
+ sources:
63
+ - key: farza-yc
64
+ title: |-
65
+ "I'm joining Y Combinator, again" — Farza Majeed
66
+ url: https://...
67
+ \`\`\`
68
+
69
+ This sidesteps every YAML escaping rule. If you skip this, inner double quotes or em-dashes will break the parser.
70
+
79
71
  ## Images
80
72
 
81
73
  Use search_images to find relevant images. Syntax: \`![Caption](url "position")\`
82
74
  Positions: "right" (default), "left", "center". Every image needs a descriptive caption.
83
75
  `.trim();
76
+ function formatPublishResults(results, targetSlugs, wiki_slug, dry_run) {
77
+ const allAutoStubs = new Set();
78
+ const lines = [];
79
+ let okCount = 0;
80
+ let errorCount = 0;
81
+ for (let i = 0; i < results.length; i++) {
82
+ const r = results[i];
83
+ const slug = targetSlugs[i] ?? r.slug;
84
+ if (dry_run && r.plan) {
85
+ const plan = r.plan;
86
+ const hasError = plan.validation.status === "failed" ||
87
+ !plan.authorization.can_write ||
88
+ plan.action === "error";
89
+ if (hasError) {
90
+ errorCount++;
91
+ const reasons = [];
92
+ for (const e of plan.validation.errors) {
93
+ reasons.push(`${e.field}: ${e.message}`);
94
+ }
95
+ if (!plan.authorization.can_write && plan.authorization.reason) {
96
+ reasons.push(`auth: ${plan.authorization.reason}`);
97
+ }
98
+ lines.push(`- ${slug}: **error** — ${reasons.join("; ")}`);
99
+ }
100
+ else {
101
+ okCount++;
102
+ let line = `- ${plan.slug}: **${plan.action}**`;
103
+ if (plan.renamed_from)
104
+ line += ` (rename: ${plan.renamed_from} → ${plan.slug})`;
105
+ const details = [];
106
+ if (plan.source_keys.referenced.length > 0) {
107
+ details.push(`${plan.source_keys.referenced.length} source(s)`);
108
+ }
109
+ if (plan.wikilinks.will_auto_stub.length > 0) {
110
+ details.push(`${plan.wikilinks.will_auto_stub.length} new stub(s)`);
111
+ plan.wikilinks.will_auto_stub.forEach(s => allAutoStubs.add(s));
112
+ }
113
+ const inBatchLinks = plan.wikilinks.in_batch ?? [];
114
+ if (inBatchLinks.length > 0) {
115
+ details.push(`${inBatchLinks.length} in-batch link(s)`);
116
+ }
117
+ if (plan.source_keys.orphaned.length > 0) {
118
+ details.push(`missing source key(s): ${plan.source_keys.orphaned.join(", ")}`);
119
+ }
120
+ if (plan.source_keys.unreferenced.length > 0) {
121
+ details.push(`unreferenced source(s): ${plan.source_keys.unreferenced.join(", ")}`);
122
+ }
123
+ if (details.length > 0)
124
+ line += ` (${details.join(", ")})`;
125
+ lines.push(line);
126
+ }
127
+ }
128
+ else {
129
+ // Real publish result
130
+ if (r.status === "error") {
131
+ errorCount++;
132
+ lines.push(`- ${r.slug}: **error** — ${r.error}`);
133
+ }
134
+ else {
135
+ okCount++;
136
+ // Clean up local files — pre-rename slug names the file
137
+ const fileSlug = r.renamed_from ?? slug;
138
+ const { filePath, refPath } = resolvePagePaths(fileSlug, wiki_slug);
139
+ try {
140
+ unlinkSync(filePath);
141
+ }
142
+ catch { /* ok */ }
143
+ try {
144
+ unlinkSync(refPath);
145
+ }
146
+ catch { /* ok */ }
147
+ let line = `- ${r.slug}: **${r.status}**`;
148
+ if (r.renamed_from)
149
+ line += ` (renamed from ${r.renamed_from})`;
150
+ if (r.stubs_created?.length) {
151
+ r.stubs_created.forEach(s => allAutoStubs.add(s));
152
+ }
153
+ lines.push(line);
154
+ }
155
+ }
156
+ }
157
+ const verb = dry_run ? "Dry-run" : "Published";
158
+ const summary = `${verb}: ${okCount}/${targetSlugs.length} OK${errorCount > 0 ? `, ${errorCount} error(s)` : ""}.`;
159
+ const parts = [summary, "", ...lines];
160
+ if (allAutoStubs.size > 0) {
161
+ const stubVerb = dry_run ? "Stubs that will be auto-created" : "Stubs auto-created";
162
+ parts.push("", `${stubVerb}: ${[...allAutoStubs].join(", ")}`);
163
+ }
164
+ return parts.join("\n");
165
+ }
84
166
  export function registerPageTools(server) {
85
167
  server.addTool({
86
- name: "search_articles",
168
+ name: "search_pages",
87
169
  description: "Search OpenAlmanac pages and stubs across all wikis. Use to check existence, find slugs for wikilinks, " +
88
170
  "or discover content. Optional wiki filter to scope results. No authentication needed.",
89
171
  parameters: z.object({
@@ -135,7 +217,7 @@ export function registerPageTools(server) {
135
217
  },
136
218
  });
137
219
  server.addTool({
138
- name: "list_articles",
220
+ name: "list_pages",
139
221
  description: "Browse pages in a wiki. Structured listing, not fuzzy search. " +
140
222
  "Use to see what exists, find stubs, or discover pages by topic. " +
141
223
  "Each returned page includes topic objects with both slug and title.",
@@ -159,7 +241,7 @@ export function registerPageTools(server) {
159
241
  server.addTool({
160
242
  name: "download",
161
243
  description: "Download pages to your local workspace for editing. " +
162
- "Files go to ~/.openalmanac/articles/{wiki_slug}/{slug}.md with a .ref sidecar. " +
244
+ "Files go to ~/.openalmanac/pages/{wiki_slug}/{slug}.md with a .ref sidecar. " +
163
245
  "After editing, use publish to push changes. The .ref file is system-managed — don't edit it.",
164
246
  parameters: z.object({
165
247
  slugs: coerceJson(z.array(z.string()).min(1).max(50)).describe("Page slugs to download"),
@@ -187,11 +269,19 @@ export function registerPageTools(server) {
187
269
  server.addTool({
188
270
  name: "new",
189
271
  description: "Scaffold new pages locally. Creates .md files with YAML frontmatter and empty bodies. " +
190
- "No .ref file is created (new pages). After writing content, use publish to go live.",
272
+ "No .ref file is created (new pages). After writing content, use publish to go live.\n\n" +
273
+ "Passing `slug` is an identity claim, not just a filename hint. The server will honor it " +
274
+ "at publish time instead of deriving a slug from the title. " +
275
+ "If no slug is provided, the server derives the slug from the title at publish.\n\n" +
276
+ "To edit the auto-generated main-page created by create_wiki, do NOT use `new` — " +
277
+ "use `download` with slug `main-page` to get the page and its ref token, then edit and publish. " +
278
+ "Publishing without a ref token is a create operation and will fail with a slug collision " +
279
+ "because main-page already exists.",
191
280
  parameters: z.object({
192
281
  pages: coerceJson(z.array(z.object({
193
282
  title: z.string().describe("Page title"),
194
- slug: z.string().optional().describe("Optional explicit slug"),
283
+ slug: z.string().optional().describe("Optional explicit slug (kebab-case). When provided, the server uses this slug " +
284
+ "at publish instead of deriving one from the title."),
195
285
  topics: z.array(z.string()).optional().describe("Topic slugs"),
196
286
  })).min(1).max(50)).describe("Pages to scaffold"),
197
287
  wiki_slug: z.string().describe("Wiki slug"),
@@ -202,17 +292,37 @@ export function registerPageTools(server) {
202
292
  const created = [];
203
293
  const skipped = [];
204
294
  for (const item of pages) {
205
- const slug = item.slug || slugify(item.title);
206
- if (!slug || !SLUG_RE.test(slug)) {
207
- skipped.push(`"${item.title}" invalid slug "${slug}"`);
208
- continue;
295
+ // If an explicit slug is provided, validate it and use it for the filename.
296
+ // If none is provided, derive a simple filename from the title for local
297
+ // convenience only — the server will derive the authoritative slug from
298
+ // the title at publish time.
299
+ let fileSlug;
300
+ if (item.slug) {
301
+ if (!SLUG_RE.test(item.slug)) {
302
+ skipped.push(`"${item.title}" → invalid slug "${item.slug}"`);
303
+ continue;
304
+ }
305
+ fileSlug = item.slug;
306
+ }
307
+ else {
308
+ // Local filename only — server derives from title at publish.
309
+ fileSlug = item.title
310
+ .toLowerCase()
311
+ .replace(/[^a-z0-9]+/g, "-")
312
+ .replace(/^-+|-+$/g, "")
313
+ .replace(/-{2,}/g, "-") || "untitled";
209
314
  }
210
- const filePath = join(dir, `${slug}.md`);
315
+ const filePath = join(dir, `${fileSlug}.md`);
211
316
  if (existsSync(filePath)) {
212
- skipped.push(`${slug}.md already exists`);
317
+ skipped.push(`${fileSlug}.md already exists`);
213
318
  continue;
214
319
  }
215
320
  const meta = { title: item.title, wiki: wiki_slug };
321
+ // Embed explicit slug in frontmatter so the server binds it at publish.
322
+ // Without this, changing the title would change the slug; with it, the
323
+ // server uses this slug regardless of the title's derived form.
324
+ if (item.slug)
325
+ meta.slug = item.slug;
216
326
  if (item.topics?.length)
217
327
  meta.topics = item.topics;
218
328
  meta.sources = [];
@@ -220,9 +330,28 @@ export function registerPageTools(server) {
220
330
  writeFileSync(filePath, `---\n${frontmatter}---\n\n`, "utf-8");
221
331
  created.push(filePath);
222
332
  }
333
+ // Scaffold-time nudge: check if any created pages have matching slugs
334
+ // in the global wiki (Almanac). Fires before writing so the agent can
335
+ // decide to cross-link instead of writing a duplicate treatment.
336
+ const nudges = [];
337
+ if (created.length > 0 && wiki_slug !== "global") {
338
+ const createdSlugs = created.map(p => p.split("/").pop().replace(".md", ""));
339
+ for (const slug of createdSlugs) {
340
+ try {
341
+ const res = await request("GET", `/api/w/global/pages/${slug}`);
342
+ if (res.ok) {
343
+ const page = await res.json();
344
+ nudges.push(`Note: Almanac already has a page "${page.title ?? slug}" (slug: ${slug}). ` +
345
+ `Write your own treatment for this wiki, or cross-link with [[global:${slug}]] instead.`);
346
+ }
347
+ }
348
+ catch { /* page doesn't exist in global wiki — no nudge */ }
349
+ }
350
+ }
223
351
  const parts = [
224
352
  created.length > 0 ? `Created ${created.length} file(s):\n${created.map(p => ` - ${p}`).join("\n")}` : "No new files created.",
225
353
  skipped.length > 0 ? `Skipped:\n${skipped.map(s => ` - ${s}`).join("\n")}` : "",
354
+ nudges.length > 0 ? nudges.join("\n") : "",
226
355
  WRITING_GUIDE,
227
356
  ];
228
357
  return parts.filter(Boolean).join("\n\n");
@@ -232,13 +361,19 @@ export function registerPageTools(server) {
232
361
  name: "publish",
233
362
  description: "Publish pages from your local workspace. Reads .md files and their .ref sidecars, " +
234
363
  "sends to the API. Pages with .ref are updates; pages without are new. " +
235
- "Dead wikilinks auto-create stubs. Put edit_summary in frontmatter for change descriptions. Requires login.",
364
+ "Dead wikilinks auto-create stubs. Put edit_summary in frontmatter for change descriptions. Requires login.\n\n" +
365
+ "Set dry_run=true to plan without committing: the backend validates frontmatter, checks authorization, " +
366
+ "resolves wikilinks, cross-checks citation keys, and detects renames — all read-only. " +
367
+ "Caveats: plan reflects state at time of check — permissions and slug availability may change before real publish. " +
368
+ "Rename detection shows the slug derived from the current title; subsequent title edits can change this.",
236
369
  parameters: z.object({
237
370
  slugs: coerceJson(z.array(z.string()).min(1).max(50)).optional()
238
371
  .describe("Specific page slugs to publish"),
239
372
  wiki_slug: z.string().describe("Wiki slug"),
373
+ dry_run: z.boolean().default(false).optional()
374
+ .describe("When true, plan all pages without committing any changes"),
240
375
  }),
241
- async execute({ slugs, wiki_slug }) {
376
+ async execute({ slugs, wiki_slug, dry_run }) {
242
377
  const dir = resolvePageDir(wiki_slug);
243
378
  // Determine which files to publish
244
379
  let targetSlugs;
@@ -262,95 +397,73 @@ export function registerPageTools(server) {
262
397
  throw new Error(`File not found: ${filePath}`);
263
398
  }
264
399
  const content = readFileSync(filePath, "utf-8");
265
- const errors = validateArticle(content);
266
- if (errors.length > 0) {
267
- throw new Error(`Validation failed for ${slug}:\n${errors.map(e => ` ${e.field}: ${e.message}`).join("\n")}`);
268
- }
269
400
  const ref = existsSync(refPath) ? readFileSync(refPath, "utf-8").trim() : null;
270
401
  pages.push({ content, ref });
271
402
  }
272
- const resp = await request("POST", `/api/w/${wiki_slug}/publish`, {
403
+ const endpoint = dry_run
404
+ ? `/api/w/${wiki_slug}/publish?dry_run=true`
405
+ : `/api/w/${wiki_slug}/publish`;
406
+ const resp = await request("POST", endpoint, {
273
407
  auth: true,
274
408
  json: { pages },
275
409
  });
276
410
  const results = (await resp.json());
277
- // Clean up local files for successful publishes
278
- const lines = [];
279
- let okCount = 0;
280
- for (const r of results) {
281
- if (r.status === "error") {
282
- lines.push(`FAILED ${r.slug}: ${r.error}`);
283
- continue;
284
- }
285
- okCount++;
286
- // The local file was named with the pre-rename slug. The server returns
287
- // `renamed_from` on rename so we can clean up the right file without
288
- // relying on request/response index parity.
289
- const published_slug = r.renamed_from ?? r.slug;
290
- const { filePath, refPath } = resolvePagePaths(published_slug, wiki_slug);
291
- try {
292
- unlinkSync(filePath);
293
- }
294
- catch { /* ok */ }
295
- try {
296
- unlinkSync(refPath);
297
- }
298
- catch { /* ok */ }
299
- let detail = `OK ${r.slug}: ${r.status}`;
300
- if (r.renamed_from)
301
- detail += ` (renamed from ${r.renamed_from})`;
302
- if (r.stubs_created?.length)
303
- detail += `\n Stubs created: ${r.stubs_created.join(", ")}`;
304
- lines.push(detail);
305
- // Open browser for single publish
306
- if (targetSlugs.length === 1 && process.env.OPENALMANAC_GUI !== "1") {
411
+ const summary = formatPublishResults(results, targetSlugs, wiki_slug, dry_run ?? false);
412
+ // Open browser on single-page publish success (non-GUI, non-dry-run).
413
+ if (!dry_run && targetSlugs.length === 1 && process.env.OPENALMANAC_GUI !== "1") {
414
+ const r = results[0];
415
+ if (r && r.status !== "error") {
416
+ const resultSlug = r.slug;
307
417
  const url = wiki_slug === "global"
308
- ? `https://www.openalmanac.org/page/${r.slug}?celebrate=true`
309
- : `https://www.openalmanac.org/w/${wiki_slug}/${r.slug}?celebrate=true`;
418
+ ? `https://www.openalmanac.org/page/${resultSlug}?celebrate=true`
419
+ : `https://www.openalmanac.org/w/${wiki_slug}/${resultSlug}?celebrate=true`;
310
420
  openBrowser(url);
311
421
  }
312
422
  }
313
- return `Published ${okCount}/${targetSlugs.length}.\n\n${lines.join("\n\n")}`;
423
+ return summary;
314
424
  },
315
425
  });
316
426
  server.addTool({
317
- name: "propose_article",
318
- description: "Propose an article before writing it. Structures your proposal with a user-facing summary and a detailed brief. " +
319
- "Do not start writing without proposing first.",
427
+ name: "read_page",
428
+ description: "Read a single page by slug. Returns the full page JSON including content, topics, sources, and infobox. " +
429
+ "No side effects use this to read a page without downloading it to disk or joining the wiki. " +
430
+ "For editing, use `download` instead (it writes local files and handles ref tokens). " +
431
+ "For discovery, use `search_pages` instead. No authentication needed.",
320
432
  parameters: z.object({
321
- summary: z.string().describe("User-facing summary (3-5 bullet points)"),
322
- details: z.string().describe("Full handoff brief with all sources, key facts, angle"),
323
- title: z.string().describe("Proposed title"),
324
- slug: z.string().describe("Proposed slug (kebab-case)"),
325
- wiki_slug: z.string().default("global").describe("Wiki slug"),
326
- _userChoice: z.enum(["background", "here", "expired", "already_in_progress"]).optional(),
433
+ wiki_slug: z.string().describe("Wiki slug"),
434
+ page_slug: z.string().describe("Page slug"),
327
435
  }),
328
- async execute({ summary, details, title, slug, wiki_slug, _userChoice }) {
329
- if (_userChoice === "background") {
330
- return `Article "${title}" is now being written in a background process.`;
331
- }
332
- if (_userChoice === "expired") {
333
- return `Proposal expired. Continue the conversation naturally.`;
334
- }
335
- if (_userChoice === "already_in_progress") {
336
- return `Article "${title}" is already being generated.`;
337
- }
338
- return `Article Proposal: ${title}\n\n${summary}\n\nProceed with writing this article following the writing flow in your instructions.`;
436
+ async execute({ wiki_slug, page_slug }) {
437
+ const resp = await request("GET", `/api/w/${wiki_slug}/pages/${page_slug}`);
438
+ return JSON.stringify(await resp.json(), null, 2);
339
439
  },
340
440
  });
341
441
  server.addTool({
342
- name: "resolve",
343
- description: "Check if pages exist before writing wikilinks. Returns status (found/stub/not_found) for each target. " +
344
- "Use this to verify links before publishing.",
442
+ name: "delete_pages",
443
+ description: "⚠️ Permanently deletes pages. Cannot be undone. Confirm with user before calling. " +
444
+ "Accepts multiple slugs and deletes them in sequence. Requires moderator or creator access.",
345
445
  parameters: z.object({
346
446
  wiki_slug: z.string().describe("Wiki slug"),
347
- targets: coerceJson(z.array(z.string()).min(1).max(50)).describe("Link targets to resolve"),
447
+ page_slugs: coerceJson(z.array(z.string()).min(1).max(50)).describe("Page slugs to delete (1-50)"),
348
448
  }),
349
- async execute({ wiki_slug, targets }) {
350
- const resp = await request("POST", `/api/w/${wiki_slug}/resolve`, {
351
- json: { targets },
352
- });
353
- return JSON.stringify(await resp.json(), null, 2);
449
+ async execute({ wiki_slug, page_slugs }) {
450
+ const results = [];
451
+ for (const slug of page_slugs) {
452
+ try {
453
+ // DELETE returns 204 No Content on success
454
+ await request("DELETE", `/api/w/${wiki_slug}/pages/${slug}`, { auth: true });
455
+ results.push({ slug, status: "deleted" });
456
+ }
457
+ catch (err) {
458
+ const message = err instanceof Error ? err.message : String(err);
459
+ results.push({ slug, status: "error", message });
460
+ }
461
+ }
462
+ const deleted = results.filter(r => r.status === "deleted").length;
463
+ const lines = results.map(r => r.status === "deleted"
464
+ ? `- ${r.slug}: deleted`
465
+ : `- ${r.slug}: error — ${r.message}`);
466
+ return `Deleted ${deleted}/${page_slugs.length} pages.\n\n${lines.join("\n")}`;
354
467
  },
355
468
  });
356
469
  }
@@ -1,27 +1,10 @@
1
1
  import { z } from "zod";
2
2
  import { imageContent } from "fastmcp";
3
3
  import { request } from "../auth.js";
4
- /**
5
- * Workaround for Claude Agent SDK MCP transport bug (#18260):
6
- * Array/object parameters are sometimes serialized as JSON strings
7
- * instead of native values. This preprocessor coerces them back.
8
- */
9
- function coerceJson(schema) {
10
- return z.preprocess((val) => {
11
- if (typeof val === "string") {
12
- try {
13
- return JSON.parse(val);
14
- }
15
- catch {
16
- return val;
17
- }
18
- }
19
- return val;
20
- }, schema);
21
- }
4
+ import { coerceJson } from "../utils.js";
22
5
  export function registerResearchTools(server) {
23
6
  const SearchWebInput = z.object({
24
- source: z.enum(["web", "reddit"]).describe("Search source. Use 'web' for Google/Serper and 'reddit' for community perspectives via Reddit."),
7
+ source: z.enum(["web", "reddit"]).describe("Search source. Use 'web' for Google/Serper and 'reddit' for public perspectives via Reddit."),
25
8
  query: z.string().min(1).optional().describe("Search terms. Required for source='web'. Optional for source='reddit' — omit it there to return a sorted subreddit listing."),
26
9
  subreddit: z.string().optional().describe("Reddit-only. Subreddit name without the 'r/' prefix (e.g. 'Harvard'). Omit to search across all of Reddit."),
27
10
  sort: z.enum(["top", "hot", "new", "rising", "controversial", "relevance", "comments"])
@@ -51,40 +34,41 @@ export function registerResearchTools(server) {
51
34
  });
52
35
  server.addTool({
53
36
  name: "search_web",
54
- description: "Search the web or a specific community source (Reddit). Pick the source with the `source` field:\n\n" +
37
+ description: "Search the web or Reddit. Pick the source with the `source` field:\n\n" +
55
38
  "- `source: \"web\"` — general web search via Google. Use for news, docs, scholarly references.\n" +
56
39
  "- `source: \"reddit\"` — Reddit-aware search returning posts with score, flair, num_comments, permalink. " +
57
- "Use when the user is asking about community perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
40
+ "Use when the user is asking about public perspectives, subreddit consensus, or 'what do people think about X'.\n\n" +
58
41
  "Use only the fields relevant to the source you pick. " +
59
42
  "Rate limit: 10/min. Requires API key.",
60
43
  parameters: SearchWebInput,
61
44
  async execute(input) {
62
45
  if (input.source === "reddit") {
63
- const params = {
46
+ const body = {
47
+ source: "reddit",
64
48
  sort: input.sort ?? "top",
65
49
  time_range: input.time_range ?? "year",
66
50
  limit: input.limit ?? 25,
67
51
  };
68
52
  if (input.subreddit)
69
- params.subreddit = input.subreddit;
53
+ body.subreddit = input.subreddit;
70
54
  if (input.query)
71
- params.query = input.query;
72
- const resp = await request("GET", "/api/research/reddit/search", {
55
+ body.query = input.query;
56
+ const resp = await request("POST", "/api/research/search", {
73
57
  auth: true,
74
- params,
58
+ json: body,
75
59
  });
76
60
  return JSON.stringify(await resp.json(), null, 2);
77
61
  }
78
- const resp = await request("GET", "/api/research/search", {
62
+ const resp = await request("POST", "/api/research/search", {
79
63
  auth: true,
80
- params: { query: input.query.trim(), limit: input.limit ?? 10 },
64
+ json: { source: "web", query: input.query.trim(), limit: input.limit ?? 10 },
81
65
  });
82
66
  return JSON.stringify(await resp.json(), null, 2);
83
67
  },
84
68
  });
85
69
  server.addTool({
86
70
  name: "read_webpage",
87
- description: "Fetch a URL and return its content as markdown. Routes automatically based on URL:\n" +
71
+ description: "Read an external URL and return its content as markdown. Routes automatically based on URL:\n" +
88
72
  "- **Reddit threads** (reddit.com/r/{sub}/comments/{id}/...) — returns the post plus top-level threaded comments with scores and authors, via a residential proxy.\n" +
89
73
  "- **Reddit wiki pages** (reddit.com/r/{sub}/wiki/...) — returns the wiki page as markdown with revision metadata.\n" +
90
74
  "- **YouTube videos** — returns title, description, transcript when available.\n" +
@@ -117,11 +101,11 @@ export function registerResearchTools(server) {
117
101
  });
118
102
  server.addTool({
119
103
  name: "search_images",
120
- description: "Search for images to include in articles. Accepts multiple queries for batch lookup. Returns image URLs, titles, dimensions, and licensing info. " +
121
- "Two sources: 'wikimedia' (free, open-licensed images from Wikimedia Commons preferred) and 'google' (broader web images via Google). " +
104
+ description: "Search for images to include in pages. Accepts multiple queries for batch lookup. Returns image URLs, titles, dimensions, and licensing info. " +
105
+ "Three sources: 'google' (broad web images, default), 'unsplash' (high-quality stock photos), and 'wikimedia' (free, open-licensed from Wikimedia Commons). " +
122
106
  "Use descriptive search terms. After searching, call view_images on promising candidates to see what they actually show before using them. " +
123
- "External image URLs are automatically persisted when you publish the article — no extra steps needed.\n\n" +
124
- "## Using images in articles\n\n" +
107
+ "External image URLs are automatically persisted when you publish the page — no extra steps needed.\n\n" +
108
+ "## Using images in pages\n\n" +
125
109
  "Images render as figures with visible captions. The alt text becomes the caption — make it descriptive.\n\n" +
126
110
  "**Syntax:** `![Caption text](url \"position\")`\n\n" +
127
111
  "Position options (in the title/quotes):\n" +
@@ -139,13 +123,13 @@ export function registerResearchTools(server) {
139
123
  "- Bad: `![Logo](url)` — Good: `![The OpenAI logo, a stylized spiral](url)`\n\n" +
140
124
  "**Placement rules:**\n" +
141
125
  "- Place 1-3 images per major section — don't overload\n" +
142
- "- First image should appear near the top, illustrating the article's subject\n" +
126
+ "- First image should appear near the top, illustrating the page's subject\n" +
143
127
  "- Spread images throughout, not clustered together\n" +
144
128
  "- For the infobox hero image, set `infobox.header.image_url` in frontmatter instead\n\n" +
145
129
  "Requires login. Rate limit: 10/min.",
146
130
  parameters: z.object({
147
131
  queries: coerceJson(z.array(z.string()).min(1).max(10)).describe("Image search queries (1-10)"),
148
- source: z.enum(["wikimedia", "google"]).default("wikimedia").describe("Image source: 'wikimedia' (free, open-licensed preferred) or 'google' (broader coverage)"),
132
+ source: z.enum(["wikimedia", "google", "unsplash"]).default("google").describe("Image source: 'google' (broad web images, default), 'unsplash' (high-quality stock photos), or 'wikimedia' (free, open-licensed)"),
149
133
  limit: z.number().default(5).describe("Max results per query (1-10, default 5)"),
150
134
  }),
151
135
  async execute({ queries, source, limit }) {
@@ -186,20 +170,25 @@ export function registerResearchTools(server) {
186
170
  return { content };
187
171
  },
188
172
  });
173
+ // register_sources — GUI citation-bubble handshake. Commented out 2026-04-23 per REV-62.
174
+ // Revive when the GUI citation-bubble flow is re-wired.
175
+ /*
189
176
  server.addTool({
190
- name: "register_sources",
191
- description: "Register sources you plan to cite in your response. Call this BEFORE writing your response text. " +
192
- "In GUI explore sessions this updates the source registry used for citation bubbles. " +
193
- "Use [@key] markers in your response to cite them.",
194
- parameters: z.object({
195
- sources: coerceJson(z.array(z.object({
196
- key: z.string().describe("Citation key — kebab-case, BibTeX-style: {domain}-{title-words}"),
197
- url: z.string().describe("Source URL"),
198
- title: z.string().describe("Source title — include publication name after an em dash when relevant"),
199
- })).min(1)).describe("Sources to register for citation"),
200
- }),
201
- async execute({ sources }) {
202
- return `Registered ${sources.length} source${sources.length === 1 ? "" : "s"}. Use [@key] markers in your response to cite them.`;
203
- },
177
+ name: "register_sources",
178
+ description:
179
+ "Register sources you plan to cite in your response. Call this BEFORE writing your response text. " +
180
+ "In GUI explore sessions this updates the source registry used for citation bubbles. " +
181
+ "Use [@key] markers in your response to cite them.",
182
+ parameters: z.object({
183
+ sources: coerceJson(z.array(z.object({
184
+ key: z.string().describe("Citation key — kebab-case, BibTeX-style: {domain}-{title-words}"),
185
+ url: z.string().describe("Source URL"),
186
+ title: z.string().describe("Source title include publication name after an em dash when relevant"),
187
+ })).min(1)).describe("Sources to register for citation"),
188
+ }),
189
+ async execute({ sources }) {
190
+ return `Registered ${sources.length} source${sources.length === 1 ? "" : "s"}. Use [@key] markers in your response to cite them.`;
191
+ },
204
192
  });
193
+ */
205
194
  }