@satiyap/confluence-reader-mcp 0.1.3 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,14 +3,14 @@
3
3
  [![npm version](https://img.shields.io/npm/v/@satiyap/confluence-reader-mcp.svg)](https://www.npmjs.com/package/@satiyap/confluence-reader-mcp)
4
4
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
5
5
 
6
- An MCP server that lets AI assistants read Confluence Cloud pages, walk page trees recursively, and diff Confluence content against local documentation.
6
+ An MCP server that lets AI assistants read Confluence Cloud pages as markdown, browse page trees, download image attachments, and diff content against local documentation.
7
7
 
8
8
  ## Features
9
9
 
10
- - Fetch a single Confluence page as plain text
11
- - Recursively fetch entire page trees up to a configurable depth
10
+ - Fetch a single Confluence page as proper GitHub-flavored markdown (headings, tables, lists, code blocks)
11
+ - List child pages for recursive traversal
12
+ - Download image attachments by filename
12
13
  - Compare local content against a Confluence page with a unified diff
13
- - Parallel child-page fetching with error resilience
14
14
  - Supports scoped API tokens with Basic Auth
15
15
 
16
16
  ## Setup
@@ -58,21 +58,28 @@ Restart the MCP host to pick up the new server.
58
58
 
59
59
  ### `confluence.fetch_page`
60
60
 
61
- Fetches a Confluence page by URL and returns its content as text. Optionally recurses into child pages.
61
+ Fetches a single Confluence page by URL and returns its content as markdown. Lists any direct child pages at the bottom so the caller can decide which to fetch next.
62
62
 
63
- | Parameter | Type | Default | Description |
64
- |-----------|------|---------|-------------|
65
- | `url` | string | — | Confluence page URL |
66
- | `depth` | number | 0 | Levels of child pages to fetch recursively |
63
+ | Parameter | Type | Description |
64
+ |-----------|------|-------------|
65
+ | `url` | string | Confluence page URL |
66
+
67
+ ### `confluence.list_children`
68
+
69
+ Lists the direct child pages of a Confluence page without fetching their content. Useful for discovering page structure before fetching individual pages.
70
+
71
+ | Parameter | Type | Description |
72
+ |-----------|------|-------------|
73
+ | `url` | string | Confluence page URL |
67
74
 
68
- ### `confluence.fetch_page_tree`
75
+ ### `confluence.fetch_image`
69
76
 
70
- Fetches a page and all its descendants recursively, up to a given depth. Returns a single document with nested headings.
77
+ Downloads an image attachment from a Confluence page by filename. Returns the image as base64-encoded data.
71
78
 
72
- | Parameter | Type | Default | Description |
73
- |-----------|------|---------|-------------|
74
- | `url` | string | — | Confluence page URL |
75
- | `depth` | number | 1 | How many levels of children to fetch |
79
+ | Parameter | Type | Description |
80
+ |-----------|------|-------------|
81
+ | `url` | string | Confluence page URL |
82
+ | `filename` | string | Attachment filename (e.g. `architecture.png`) |
76
83
 
77
84
  ### `confluence.compare`
78
85
 
@@ -88,56 +88,57 @@ export async function fetchChildPages(cfg, pageId) {
88
88
  return all;
89
89
  }
90
90
  /**
91
- * Recursively fetch a page and its descendants up to the given depth.
92
- *
93
- * For each page it:
94
- * 1. Fetches the full page content via fetchPageById
95
- * 2. Discovers child page IDs via fetchChildPages
96
- * 3. Recurses into each child (in parallel) until depth is exhausted
97
- *
98
- * Children at each level are fetched concurrently with a concurrency
99
- * limit to avoid hammering the API. Pages that fail to load are
100
- * included as stubs with an error message instead of aborting the
101
- * entire tree.
102
- *
103
- * @param cfg - Client configuration
104
- * @param pageId - Root page ID to start from
105
- * @param depth - How many levels of children to fetch (0 = root only)
106
- * @param concurrency - Max parallel requests per level (default 5)
107
- * @returns A tree of PageNode objects
91
+ * Fetch attachments for a Confluence page.
92
+ * Returns all attachments (paginates automatically).
108
93
  */
109
- export async function fetchPageTree(cfg, pageId, depth, concurrency = 5) {
110
- const { storageToText } = await import("./transform.js");
111
- const page = await fetchPageById(cfg, pageId);
112
- const storage = page.body?.storage?.value ?? "";
113
- const content = storage ? storageToText(storage) : "";
114
- const children = [];
115
- if (depth > 0) {
116
- const childPages = await fetchChildPages(cfg, pageId);
117
- // Fetch children in parallel, bounded by concurrency limit
118
- const results = await parallelMap(childPages, (child) => fetchPageTree(cfg, child.id, depth - 1, concurrency).catch((err) => ({
119
- id: child.id,
120
- title: child.title ?? `Page ${child.id}`,
121
- content: `[Error fetching page: ${err.message}]`,
122
- children: [],
123
- })), concurrency);
124
- children.push(...results);
94
+ export async function fetchAttachments(cfg, pageId) {
95
+ const base = buildBase(cfg);
96
+ const all = [];
97
+ let cursor;
98
+ while (true) {
99
+ const url = new URL(`${base}/wiki/api/v2/pages/${pageId}/attachments`);
100
+ url.searchParams.set("limit", "50");
101
+ if (cursor)
102
+ url.searchParams.set("cursor", cursor);
103
+ const res = await fetch(url.toString(), {
104
+ method: "GET",
105
+ headers: {
106
+ ...buildAuthHeaders(cfg),
107
+ Accept: "application/json",
108
+ },
109
+ });
110
+ if (!res.ok) {
111
+ const text = await res.text().catch(() => "");
112
+ throw new Error(`Confluence API error ${res.status}: ${text.slice(0, 500)}`);
113
+ }
114
+ const data = (await res.json());
115
+ all.push(...data.results);
116
+ if (!data._links?.next)
117
+ break;
118
+ const nextUrl = new URL(data._links.next, base);
119
+ cursor = nextUrl.searchParams.get("cursor") ?? undefined;
120
+ if (!cursor)
121
+ break;
125
122
  }
126
- return { id: page.id, title: page.title, content, children };
123
+ return all;
127
124
  }
128
125
  /**
129
- * Run an async mapper over items with a concurrency limit.
126
+ * Download an attachment binary by its download link.
127
+ * Returns the raw Buffer and content type.
130
128
  */
131
- async function parallelMap(items, fn, limit) {
132
- const results = new Array(items.length);
133
- let idx = 0;
134
- async function worker() {
135
- while (idx < items.length) {
136
- const i = idx++;
137
- results[i] = await fn(items[i]);
138
- }
129
+ export async function downloadAttachment(cfg, downloadLink) {
130
+ const base = buildBase(cfg);
131
+ const url = `${base}/wiki${downloadLink}`;
132
+ const res = await fetch(url, {
133
+ method: "GET",
134
+ headers: buildAuthHeaders(cfg),
135
+ redirect: "follow",
136
+ });
137
+ if (!res.ok) {
138
+ const text = await res.text().catch(() => "");
139
+ throw new Error(`Attachment download error ${res.status}: ${text.slice(0, 500)}`);
139
140
  }
140
- const workers = Array.from({ length: Math.min(limit, items.length) }, () => worker());
141
- await Promise.all(workers);
142
- return results;
141
+ const contentType = res.headers.get("content-type") ?? "application/octet-stream";
142
+ const arrayBuffer = await res.arrayBuffer();
143
+ return { buffer: Buffer.from(arrayBuffer), contentType };
143
144
  }
@@ -1,35 +1,94 @@
1
+ import TurndownService from "turndown";
2
+ // @ts-expect-error — no type declarations available
3
+ import { gfm } from "turndown-plugin-gfm";
4
+ const turndown = new TurndownService({
5
+ headingStyle: "atx",
6
+ codeBlockStyle: "fenced",
7
+ bulletListMarker: "-",
8
+ });
9
+ turndown.use(gfm);
1
10
  /**
2
- * Convert Confluence storage HTML to plain text
3
- *
4
- * This is a lightweight HTML-to-text converter that:
5
- * - Strips HTML tags
6
- * - Preserves paragraph and heading breaks
7
- * - Decodes common HTML entities
8
- *
9
- * Note: Not a perfect HTML→Markdown converter; intentionally simple for MCP use.
11
+ * Pre-process Confluence storage format HTML into standard HTML
12
+ * that Turndown can handle. Confluence uses custom XML namespaces
13
+ * (ac:, ri:) that DOM parsers and Turndown don't understand.
14
+ */
15
+ function normalizeConfluenceHtml(html) {
16
+ let out = html;
17
+ // Convert ac:layout-section / ac:layout-cell to divs
18
+ out = out.replace(/<ac:layout-section>/gi, "<div>");
19
+ out = out.replace(/<\/ac:layout-section>/gi, "</div>");
20
+ out = out.replace(/<ac:layout-cell>/gi, "<div>");
21
+ out = out.replace(/<\/ac:layout-cell>/gi, "</div>");
22
+ out = out.replace(/<ac:layout>/gi, "<div>");
23
+ out = out.replace(/<\/ac:layout>/gi, "</div>");
24
+ // Convert ac:structured-macro (panels, code blocks, etc.) to divs
25
+ // Preserve the macro name as a data attribute for potential future use
26
+ out = out.replace(/<ac:structured-macro[^>]*ac:name="code"[^>]*>([\s\S]*?)<\/ac:structured-macro>/gi, (_match, inner) => {
27
+ // Extract plain-text-body for code blocks
28
+ const bodyMatch = inner.match(/<ac:plain-text-body>\s*<!\[CDATA\[([\s\S]*?)\]\]>\s*<\/ac:plain-text-body>/i);
29
+ if (bodyMatch) {
30
+ return `<pre><code>${bodyMatch[1]}</code></pre>`;
31
+ }
32
+ return `<pre><code>${inner.replace(/<[^>]+>/g, "")}</code></pre>`;
33
+ });
34
+ // Convert info/note/warning/tip panels to blockquotes
35
+ out = out.replace(/<ac:structured-macro[^>]*ac:name="(info|note|warning|tip|panel)"[^>]*>([\s\S]*?)<\/ac:structured-macro>/gi, (_match, _type, inner) => {
36
+ const bodyMatch = inner.match(/<ac:rich-text-body>([\s\S]*?)<\/ac:rich-text-body>/i);
37
+ return bodyMatch ? `<blockquote>${bodyMatch[1]}</blockquote>` : `<blockquote>${inner}</blockquote>`;
38
+ });
39
+ // Generic: any remaining ac:structured-macro — unwrap to div
40
+ out = out.replace(/<ac:structured-macro[^>]*>/gi, "<div>");
41
+ out = out.replace(/<\/ac:structured-macro>/gi, "</div>");
42
+ // ac:rich-text-body → div
43
+ out = out.replace(/<ac:rich-text-body>/gi, "<div>");
44
+ out = out.replace(/<\/ac:rich-text-body>/gi, "</div>");
45
+ // ac:plain-text-body with CDATA → pre
46
+ out = out.replace(/<ac:plain-text-body>\s*<!\[CDATA\[([\s\S]*?)\]\]>\s*<\/ac:plain-text-body>/gi, (_match, content) => `<pre>${content}</pre>`);
47
+ out = out.replace(/<ac:plain-text-body>/gi, "<pre>");
48
+ out = out.replace(/<\/ac:plain-text-body>/gi, "</pre>");
49
+ // ac:parameter tags — remove entirely
50
+ out = out.replace(/<ac:parameter[^>]*>[\s\S]*?<\/ac:parameter>/gi, "");
51
+ // ac:image → img tag
52
+ out = out.replace(/<ac:image[^>]*>([\s\S]*?)<\/ac:image>/gi, (_match, inner) => {
53
+ const filenameMatch = inner.match(/ri:filename="([^"]+)"/i);
54
+ const filename = filenameMatch ? filenameMatch[1] : "image";
55
+ return `<img alt="${filename}" src="${filename}" />`;
56
+ });
57
+ // ac:link with ri:page → anchor
58
+ out = out.replace(/<ac:link>([\s\S]*?)<\/ac:link>/gi, (_match, inner) => {
59
+ const pageMatch = inner.match(/ri:content-title="([^"]+)"/i);
60
+ const bodyMatch = inner.match(/<ac:link-body>([\s\S]*?)<\/ac:link-body>/i)
61
+ || inner.match(/<ac:plain-text-link-body>\s*<!\[CDATA\[([\s\S]*?)\]\]>\s*<\/ac:plain-text-link-body>/i);
62
+ const title = pageMatch ? pageMatch[1] : "";
63
+ const text = bodyMatch ? bodyMatch[1].replace(/<[^>]+>/g, "") : title;
64
+ return `<a href="#">${text || title}</a>`;
65
+ });
66
+ // ac:emoticon → remove
67
+ out = out.replace(/<ac:emoticon[^>]*\/>/gi, "");
68
+ // ac:task-list / ac:task / ac:task-body → ul/li
69
+ out = out.replace(/<ac:task-list>/gi, "<ul>");
70
+ out = out.replace(/<\/ac:task-list>/gi, "</ul>");
71
+ out = out.replace(/<ac:task>([\s\S]*?)<\/ac:task>/gi, (_match, inner) => {
72
+ const statusMatch = inner.match(/<ac:task-status>([\s\S]*?)<\/ac:task-status>/i);
73
+ const bodyMatch = inner.match(/<ac:task-body>([\s\S]*?)<\/ac:task-body>/i);
74
+ const checked = statusMatch && statusMatch[1].trim() === "complete";
75
+ const body = bodyMatch ? bodyMatch[1] : inner;
76
+ return `<li>${checked ? "[x] " : "[ ] "}${body}</li>`;
77
+ });
78
+ // Remove any remaining ac:* or ri:* tags but keep their text content
79
+ out = out.replace(/<\/?(?:ac|ri):[^>]*>/gi, "");
80
+ // Clean up CDATA remnants
81
+ out = out.replace(/<!\[CDATA\[/g, "");
82
+ out = out.replace(/\]\]>/g, "");
83
+ return out;
84
+ }
85
+ /**
86
+ * Convert Confluence storage format HTML to GitHub-flavored markdown.
10
87
  *
11
88
  * @param storageHtml - Confluence storage format HTML
12
- * @returns Plain text representation
89
+ * @returns Markdown with headings, tables, lists, code blocks, etc.
13
90
  */
14
- export function storageToText(storageHtml) {
15
- // Minimal, safe-ish conversion:
16
- // - strip tags
17
- // - preserve headings/paragraph-ish breaks
18
- // Not a perfect HTML->MD converter; intentionally lightweight for an MCP tool.
19
- const withBreaks = storageHtml
20
- .replace(/<\/(p|h1|h2|h3|h4|li|tr|div)>/gi, "\n")
21
- .replace(/<br\s*\/?>/gi, "\n");
22
- const stripped = withBreaks.replace(/<[^>]+>/g, "");
23
- const decoded = stripped
24
- .replace(/&nbsp;/g, " ")
25
- .replace(/&amp;/g, "&")
26
- .replace(/&lt;/g, "<")
27
- .replace(/&gt;/g, ">")
28
- .replace(/&quot;/g, "\"")
29
- .replace(/&#39;/g, "'");
30
- return decoded
31
- .split("\n")
32
- .map((l) => l.trim())
33
- .filter(Boolean)
34
- .join("\n");
91
+ export function storageToMarkdown(storageHtml) {
92
+ const normalized = normalizeConfluenceHtml(storageHtml);
93
+ return turndown.turndown(normalized);
35
94
  }
package/dist/index.js CHANGED
@@ -3,12 +3,12 @@ import { z } from "zod";
3
3
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
4
4
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
5
5
  import { extractConfluencePageId } from "./confluence/url.js";
6
- import { fetchPageById, fetchPageTree, buildAuthHeaders, buildBase } from "./confluence/client.js";
7
- import { storageToText } from "./confluence/transform.js";
6
+ import { fetchPageById, fetchChildPages, fetchAttachments, downloadAttachment, buildAuthHeaders, buildBase } from "./confluence/client.js";
7
+ import { storageToMarkdown } from "./confluence/transform.js";
8
8
  import { generateUnifiedDiff, generateDiffStats } from "./compare/diff.js";
9
9
  const server = new McpServer({
10
10
  name: "confluence-reader-mcp",
11
- version: "0.1.2"
11
+ version: "0.2.0"
12
12
  });
13
13
  function getEnv(name) {
14
14
  const v = process.env[name];
@@ -39,64 +39,101 @@ function validateEnvironment() {
39
39
  process.exit(1);
40
40
  }
41
41
  }
42
- server.tool("confluence.fetch_page", "Fetch a Confluence page and return it as markdown. Optionally recurse into child pages.", {
42
+ /** Build config from env vars */
43
+ function getCfg() {
44
+ return {
45
+ token: getEnv("CONFLUENCE_TOKEN"),
46
+ email: getEnv("CONFLUENCE_EMAIL"),
47
+ cloudId: getEnv("CONFLUENCE_CLOUD_ID"),
48
+ baseUrl: getEnv("CONFLUENCE_BASE_URL"),
49
+ };
50
+ }
51
+ server.tool("confluence.fetch_page", "Fetch a Confluence page as markdown. Returns the page content and lists any direct child pages so the caller can decide which children to fetch next.", {
43
52
  url: z.string().describe("Confluence page URL"),
44
- depth: z.number().optional().default(0).describe("Levels of child pages to fetch recursively (default: 0, root page only)")
45
- }, async ({ url, depth }) => {
46
- const token = getEnv("CONFLUENCE_TOKEN");
47
- const email = getEnv("CONFLUENCE_EMAIL");
48
- const cloudId = getEnv("CONFLUENCE_CLOUD_ID");
49
- const baseUrl = getEnv("CONFLUENCE_BASE_URL");
50
- const cfg = { token, email, cloudId, baseUrl };
53
+ }, async ({ url }) => {
54
+ const cfg = getCfg();
51
55
  const pageId = extractConfluencePageId(url);
52
- const tree = await fetchPageTree(cfg, pageId, depth);
53
- function renderNode(node, level) {
54
- const heading = "#".repeat(Math.min(level + 1, 6));
55
- const parts = [`${heading} ${node.title}`, node.content];
56
- for (const child of node.children) {
57
- parts.push(renderNode(child, level + 1));
58
- }
59
- return parts.join("\n\n");
60
- }
56
+ const page = await fetchPageById(cfg, pageId);
57
+ const children = await fetchChildPages(cfg, pageId);
58
+ const storage = page.body?.storage?.value ?? "";
59
+ const markdown = storage ? storageToMarkdown(storage) : "";
60
+ const childList = children.length > 0
61
+ ? `\n\n---\n## Child Pages\n${children.map(c => `- ${c.title} (id: ${c.id})`).join("\n")}`
62
+ : "";
61
63
  return {
62
- content: [{ type: "text", text: renderNode(tree, 0) }]
64
+ content: [{
65
+ type: "text",
66
+ text: `# ${page.title}\n\n${markdown}${childList}`
67
+ }]
63
68
  };
64
69
  });
65
- server.tool("confluence.fetch_page_tree", "Fetch a Confluence page and all its child pages recursively up to a specified depth.", {
70
+ server.tool("confluence.list_children", "List the direct child pages of a Confluence page without fetching their content. Useful for discovering page structure before fetching individual pages.", {
71
+ url: z.string().describe("Confluence page URL")
72
+ }, async ({ url }) => {
73
+ const cfg = getCfg();
74
+ const pageId = extractConfluencePageId(url);
75
+ const children = await fetchChildPages(cfg, pageId);
76
+ const lines = children.map(c => `- ${c.title} (id: ${c.id})`);
77
+ const text = lines.length > 0
78
+ ? `Found ${lines.length} child page(s):\n\n${lines.join("\n")}`
79
+ : "No child pages found.";
80
+ return { content: [{ type: "text", text }] };
81
+ });
82
+ server.tool("confluence.fetch_image", "Download an image attachment from a Confluence page by filename. Returns the image as base64-encoded data.", {
66
83
  url: z.string().describe("Confluence page URL"),
67
- depth: z.number().optional().default(1).describe("How many levels deep to fetch child pages (default: 1)")
68
- }, async ({ url, depth }) => {
69
- const token = getEnv("CONFLUENCE_TOKEN");
70
- const email = getEnv("CONFLUENCE_EMAIL");
71
- const cloudId = getEnv("CONFLUENCE_CLOUD_ID");
72
- const baseUrl = getEnv("CONFLUENCE_BASE_URL");
73
- const cfg = { token, email, cloudId, baseUrl };
84
+ filename: z.string().describe("Attachment filename (e.g. 'architecture.png')")
85
+ }, async ({ url, filename }) => {
86
+ const cfg = getCfg();
74
87
  const pageId = extractConfluencePageId(url);
75
- const tree = await fetchPageTree(cfg, pageId, depth);
76
- function renderTree(node, level) {
77
- const heading = "#".repeat(Math.min(level + 1, 6));
78
- const parts = [`${heading} ${node.title}`, node.content];
79
- for (const child of node.children) {
80
- parts.push(renderTree(child, level + 1));
81
- }
82
- return parts.join("\n\n");
88
+ const attachments = await fetchAttachments(cfg, pageId);
89
+ const match = attachments.find(a => a.title.toLowerCase() === filename.toLowerCase());
90
+ if (!match) {
91
+ const available = attachments.map(a => a.title).join(", ");
92
+ return {
93
+ content: [{
94
+ type: "text",
95
+ text: `Attachment "${filename}" not found. Available: ${available || "none"}`
96
+ }]
97
+ };
83
98
  }
99
+ const downloadLink = match.downloadLink ?? match._links?.download;
100
+ if (!downloadLink) {
101
+ return {
102
+ content: [{
103
+ type: "text",
104
+ text: `No download link available for "${filename}".`
105
+ }]
106
+ };
107
+ }
108
+ const { buffer, contentType } = await downloadAttachment(cfg, downloadLink);
109
+ const base64 = buffer.toString("base64");
110
+ // Return as base64 image content
111
+ if (contentType.startsWith("image/")) {
112
+ return {
113
+ content: [{
114
+ type: "image",
115
+ data: base64,
116
+ mimeType: contentType,
117
+ }]
118
+ };
119
+ }
120
+ // Non-image attachment — return as base64 text
84
121
  return {
85
- content: [{ type: "text", text: renderTree(tree, 0) }]
122
+ content: [{
123
+ type: "text",
124
+ text: `Downloaded "${filename}" (${contentType}, ${buffer.length} bytes).\nBase64: ${base64.slice(0, 200)}...`
125
+ }]
86
126
  };
87
127
  });
88
128
  server.tool("confluence.compare", "Compare a local markdown file or string with a Confluence page and show the differences.", {
89
129
  url: z.string().describe("Confluence page URL"),
90
130
  localContent: z.string().describe("Local markdown content to compare against")
91
131
  }, async ({ url, localContent }) => {
92
- const token = getEnv("CONFLUENCE_TOKEN");
93
- const email = getEnv("CONFLUENCE_EMAIL");
94
- const cloudId = getEnv("CONFLUENCE_CLOUD_ID");
95
- const baseUrl = getEnv("CONFLUENCE_BASE_URL");
132
+ const cfg = getCfg();
96
133
  const pageId = extractConfluencePageId(url);
97
- const page = await fetchPageById({ token, email, cloudId, baseUrl }, pageId);
134
+ const page = await fetchPageById(cfg, pageId);
98
135
  const storage = page.body?.storage?.value ?? "";
99
- const confluenceMarkdown = storage ? storageToText(storage) : "";
136
+ const confluenceMarkdown = storage ? storageToMarkdown(storage) : "";
100
137
  const diff = generateUnifiedDiff(confluenceMarkdown.trim(), localContent.trim(), `a/confluence/${page.title}`, `b/local`);
101
138
  const stats = generateDiffStats(confluenceMarkdown.trim(), localContent.trim());
102
139
  const result = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@satiyap/confluence-reader-mcp",
3
- "version": "0.1.3",
3
+ "version": "0.2.0",
4
4
  "description": "MCP server for fetching and comparing Confluence documentation with local files",
5
5
  "author": "satiyap",
6
6
  "license": "MIT",
@@ -32,10 +32,13 @@
32
32
  },
33
33
  "dependencies": {
34
34
  "@modelcontextprotocol/sdk": "^1.0.0",
35
+ "turndown": "^7.2.2",
36
+ "turndown-plugin-gfm": "^1.0.2",
35
37
  "zod": "^3.25.0"
36
38
  },
37
39
  "devDependencies": {
38
40
  "@types/node": "^22.0.0",
41
+ "@types/turndown": "^5.0.6",
39
42
  "tsx": "^4.0.0",
40
43
  "typescript": "^5.0.0"
41
44
  },