akm-cli 0.7.1 → 0.7.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/dist/cli.js +62 -16
- package/dist/commands/history.js +2 -7
- package/dist/commands/info.js +2 -2
- package/dist/commands/installed-stashes.js +45 -1
- package/dist/commands/search.js +2 -2
- package/dist/commands/show.js +4 -19
- package/dist/commands/source-add.js +1 -1
- package/dist/core/common.js +16 -1
- package/dist/core/config.js +18 -3
- package/dist/indexer/db-search.js +33 -39
- package/dist/indexer/db.js +51 -1
- package/dist/indexer/graph-extraction.js +5 -3
- package/dist/indexer/indexer.js +334 -121
- package/dist/indexer/manifest.js +18 -23
- package/dist/indexer/memory-inference.js +47 -58
- package/dist/indexer/metadata.js +253 -21
- package/dist/indexer/search-source.js +11 -5
- package/dist/llm/client.js +61 -1
- package/dist/llm/embedder.js +8 -5
- package/dist/llm/embedders/local.js +8 -2
- package/dist/llm/embedders/remote.js +4 -2
- package/dist/llm/graph-extract.js +4 -4
- package/dist/llm/memory-infer.js +61 -33
- package/dist/llm/metadata-enhance.js +2 -2
- package/dist/output/cli-hints.js +5 -2
- package/dist/output/renderers.js +22 -49
- package/dist/registry/build-index.js +13 -18
- package/dist/setup/setup.js +238 -96
- package/dist/sources/providers/git.js +14 -2
- package/dist/sources/providers/website.js +4 -460
- package/dist/sources/website-ingest.js +470 -0
- package/dist/wiki/wiki.js +11 -1
- package/dist/workflows/parser.js +19 -4
- package/dist/workflows/runs.js +3 -3
- package/docs/README.md +10 -3
- package/docs/migration/release-notes/0.7.0.md +22 -0
- package/package.json +5 -2
|
@@ -1,40 +1,7 @@
|
|
|
1
|
-
import { createHash } from "node:crypto";
|
|
2
|
-
import fs from "node:fs";
|
|
3
|
-
import path from "node:path";
|
|
4
|
-
import { fetchWithRetry, ResponseTooLargeError, readBodyWithByteCap } from "../../core/common";
|
|
5
|
-
import { ConfigError, UsageError } from "../../core/errors";
|
|
6
|
-
import { getRegistryIndexCacheDir } from "../../core/paths";
|
|
7
|
-
import { warn } from "../../core/warn";
|
|
8
1
|
import { registerSourceProvider } from "../provider-factory";
|
|
9
|
-
import {
|
|
10
|
-
/** Refresh website snapshots every 12 hours to balance freshness with scraping load. */
|
|
11
|
-
const CACHE_REFRESH_INTERVAL_MS = 12 * 60 * 60 * 1000;
|
|
12
|
-
/** Allow up to 7 days of stale snapshots when refresh fails so search remains available during outages. */
|
|
13
|
-
const CACHE_STALE_MS = 7 * 24 * 60 * 60 * 1000;
|
|
14
|
-
/** Allow limited breadth-first expansion without letting the crawl queue grow unbounded. */
|
|
15
|
-
const QUEUE_EXPANSION_FACTOR = 5;
|
|
16
|
-
const MAX_PAGES_DEFAULT = 50;
|
|
17
|
-
const MAX_DEPTH_DEFAULT = 3;
|
|
2
|
+
import { ensureWebsiteMirror, getWebsiteCachePaths, validateWebsiteUrl } from "../website-ingest";
|
|
18
3
|
/**
|
|
19
|
-
*
|
|
20
|
-
* almost never useful as agent knowledge sources and a runaway server
|
|
21
|
-
* streaming tens of megabytes would blow memory with no upside.
|
|
22
|
-
*/
|
|
23
|
-
const WEBSITE_PAGE_BYTE_CAP = 5 * 1024 * 1024;
|
|
24
|
-
/**
|
|
25
|
-
* Wall-clock cap for a full crawl (10 minutes). With per-request timeouts
|
|
26
|
-
* of 15s and a `maxPages` default of 50, an unresponsive site could
|
|
27
|
-
* otherwise stall `akm add` for 12.5 minutes with no feedback. Cap the
|
|
28
|
-
* whole crawl and return what we have when time runs out.
|
|
29
|
-
*/
|
|
30
|
-
const WEBSITE_CRAWL_WALL_CLOCK_MS = 10 * 60 * 1000;
|
|
31
|
-
/**
|
|
32
|
-
* Website source provider — scrapes pages into a local mirror so the FTS5
|
|
33
|
-
* indexer can walk them. Implements the v1 {@link SourceProvider} interface
|
|
34
|
-
* (spec §2.1): `{ name, kind, init, path, sync }`.
|
|
35
|
-
*
|
|
36
|
-
* Reading is the indexer's job — this class doesn't implement `search` or
|
|
37
|
-
* `show`.
|
|
4
|
+
* Website source provider — thin adapter over the shared website ingest module.
|
|
38
5
|
*/
|
|
39
6
|
class WebsiteSourceProvider {
|
|
40
7
|
kind = "website";
|
|
@@ -50,434 +17,11 @@ class WebsiteSourceProvider {
|
|
|
50
17
|
// URL validation already happens in the constructor; nothing else to do.
|
|
51
18
|
}
|
|
52
19
|
path() {
|
|
53
|
-
return
|
|
20
|
+
return getWebsiteCachePaths(this.#url).stashDir;
|
|
54
21
|
}
|
|
55
22
|
async sync() {
|
|
56
23
|
await ensureWebsiteMirror(this.#config, { requireStashDir: true });
|
|
57
24
|
}
|
|
58
25
|
}
|
|
59
26
|
registerSourceProvider("website", (config) => new WebsiteSourceProvider(config));
|
|
60
|
-
|
|
61
|
-
const key = createHash("sha256").update(normalizeSiteUrl(siteUrl)).digest("hex").slice(0, 16);
|
|
62
|
-
const rootDir = path.join(getRegistryIndexCacheDir(), `website-${key}`);
|
|
63
|
-
return {
|
|
64
|
-
rootDir,
|
|
65
|
-
stashDir: path.join(rootDir, "stash"),
|
|
66
|
-
manifestPath: path.join(rootDir, "manifest.json"),
|
|
67
|
-
};
|
|
68
|
-
}
|
|
69
|
-
async function ensureWebsiteMirror(config, options) {
|
|
70
|
-
const rawUrl = config.url ?? "";
|
|
71
|
-
const normalizedUrl = validateWebsiteUrl(rawUrl);
|
|
72
|
-
const cachePaths = getCachePaths(normalizedUrl);
|
|
73
|
-
const requireStashDir = options?.requireStashDir === true;
|
|
74
|
-
const force = options?.force === true;
|
|
75
|
-
let mtime = 0;
|
|
76
|
-
try {
|
|
77
|
-
mtime = fs.statSync(cachePaths.manifestPath).mtimeMs;
|
|
78
|
-
}
|
|
79
|
-
catch {
|
|
80
|
-
/* no cached manifest */
|
|
81
|
-
}
|
|
82
|
-
if (!force &&
|
|
83
|
-
mtime &&
|
|
84
|
-
!isExpired(mtime, CACHE_REFRESH_INTERVAL_MS) &&
|
|
85
|
-
(!requireStashDir || hasExtractedSite(cachePaths.stashDir))) {
|
|
86
|
-
return cachePaths;
|
|
87
|
-
}
|
|
88
|
-
try {
|
|
89
|
-
fs.mkdirSync(cachePaths.rootDir, { recursive: true });
|
|
90
|
-
await scrapeWebsiteToStash(normalizedUrl, cachePaths.stashDir, {
|
|
91
|
-
maxPages: coercePositiveInt(config.options?.maxPages, MAX_PAGES_DEFAULT),
|
|
92
|
-
maxDepth: coercePositiveInt(config.options?.maxDepth, MAX_DEPTH_DEFAULT),
|
|
93
|
-
});
|
|
94
|
-
fs.writeFileSync(cachePaths.manifestPath, `${JSON.stringify({ url: normalizedUrl, fetchedAt: new Date().toISOString() }, null, 2)}\n`, { encoding: "utf8", mode: 0o600 });
|
|
95
|
-
return cachePaths;
|
|
96
|
-
}
|
|
97
|
-
catch (err) {
|
|
98
|
-
if (mtime && !isExpired(mtime, CACHE_STALE_MS) && (!requireStashDir || hasExtractedSite(cachePaths.stashDir))) {
|
|
99
|
-
return cachePaths;
|
|
100
|
-
}
|
|
101
|
-
throw err;
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
function hasExtractedSite(stashDir) {
|
|
105
|
-
try {
|
|
106
|
-
const knowledgeDir = path.join(stashDir, "knowledge");
|
|
107
|
-
if (!fs.statSync(stashDir).isDirectory() || !fs.statSync(knowledgeDir).isDirectory())
|
|
108
|
-
return false;
|
|
109
|
-
// Check top-level and one level of subdirectories for .md files
|
|
110
|
-
for (const entry of fs.readdirSync(knowledgeDir, { withFileTypes: true })) {
|
|
111
|
-
if (entry.isFile() && entry.name.endsWith(".md"))
|
|
112
|
-
return true;
|
|
113
|
-
if (entry.isDirectory()) {
|
|
114
|
-
const subEntries = fs.readdirSync(path.join(knowledgeDir, entry.name));
|
|
115
|
-
if (subEntries.some((e) => e.endsWith(".md")))
|
|
116
|
-
return true;
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
return false;
|
|
120
|
-
}
|
|
121
|
-
catch {
|
|
122
|
-
return false;
|
|
123
|
-
}
|
|
124
|
-
}
|
|
125
|
-
async function scrapeWebsiteToStash(startUrl, stashDir, options) {
|
|
126
|
-
const pages = await crawlWebsite(startUrl, options);
|
|
127
|
-
if (pages.length === 0) {
|
|
128
|
-
throw new Error(`No content could be scraped from ${startUrl}`);
|
|
129
|
-
}
|
|
130
|
-
fs.rmSync(stashDir, { recursive: true, force: true });
|
|
131
|
-
const knowledgeDir = path.join(stashDir, "knowledge");
|
|
132
|
-
fs.mkdirSync(knowledgeDir, { recursive: true });
|
|
133
|
-
const usedPaths = new Set();
|
|
134
|
-
for (const page of pages) {
|
|
135
|
-
const relPath = urlToRelativePath(page.url);
|
|
136
|
-
const uniquePath = uniqueSlug(relPath, usedPaths);
|
|
137
|
-
const filePath = path.join(knowledgeDir, `${uniquePath}.md`);
|
|
138
|
-
const dir = path.dirname(filePath);
|
|
139
|
-
if (dir !== knowledgeDir)
|
|
140
|
-
fs.mkdirSync(dir, { recursive: true });
|
|
141
|
-
const slug = uniquePath.split("/").pop() ?? "index";
|
|
142
|
-
fs.writeFileSync(filePath, buildMarkdownSnapshot(page, slug), "utf8");
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
|
-
async function crawlWebsite(startUrl, options) {
|
|
146
|
-
const start = new URL(normalizeSiteUrl(startUrl));
|
|
147
|
-
const allowedOrigin = start.origin;
|
|
148
|
-
const queue = [{ url: start.toString(), depth: 0 }];
|
|
149
|
-
const visited = new Set();
|
|
150
|
-
const pages = [];
|
|
151
|
-
const deadline = Date.now() + WEBSITE_CRAWL_WALL_CLOCK_MS;
|
|
152
|
-
while (queue.length > 0 && pages.length < options.maxPages) {
|
|
153
|
-
if (Date.now() > deadline)
|
|
154
|
-
break;
|
|
155
|
-
const next = queue.shift();
|
|
156
|
-
if (!next)
|
|
157
|
-
break;
|
|
158
|
-
const normalized = normalizeCrawlUrl(next.url);
|
|
159
|
-
if (!normalized || visited.has(normalized))
|
|
160
|
-
continue;
|
|
161
|
-
visited.add(normalized);
|
|
162
|
-
const fetched = await fetchWebsitePage(normalized);
|
|
163
|
-
if (!fetched)
|
|
164
|
-
continue;
|
|
165
|
-
pages.push(fetched.page);
|
|
166
|
-
if (next.depth >= options.maxDepth)
|
|
167
|
-
continue;
|
|
168
|
-
for (const link of fetched.links) {
|
|
169
|
-
if (queue.length + pages.length >= options.maxPages * QUEUE_EXPANSION_FACTOR)
|
|
170
|
-
break;
|
|
171
|
-
if (link.origin !== allowedOrigin)
|
|
172
|
-
continue;
|
|
173
|
-
const candidate = normalizeCrawlUrl(link.toString());
|
|
174
|
-
if (!candidate || visited.has(candidate) || isAssetLikePath(link.pathname))
|
|
175
|
-
continue;
|
|
176
|
-
queue.push({ url: candidate, depth: next.depth + 1 });
|
|
177
|
-
}
|
|
178
|
-
}
|
|
179
|
-
if (Date.now() > deadline) {
|
|
180
|
-
warn("[akm] website crawl stopped at the %ds wall-clock cap with %d/%d pages collected from %s.", WEBSITE_CRAWL_WALL_CLOCK_MS / 1000, pages.length, options.maxPages, startUrl);
|
|
181
|
-
}
|
|
182
|
-
return pages;
|
|
183
|
-
}
|
|
184
|
-
async function fetchWebsitePage(pageUrl) {
|
|
185
|
-
const response = await fetchWithRetry(pageUrl, {
|
|
186
|
-
headers: {
|
|
187
|
-
Accept: "text/html, text/markdown, text/plain;q=0.9, application/xhtml+xml;q=0.8",
|
|
188
|
-
"User-Agent": "akm-cli website provider",
|
|
189
|
-
},
|
|
190
|
-
}, { timeout: 15_000, retries: 1 });
|
|
191
|
-
if (!response.ok) {
|
|
192
|
-
if (response.status === 404)
|
|
193
|
-
return null;
|
|
194
|
-
throw new Error(`Failed to fetch website content (${response.status}) from ${pageUrl}`);
|
|
195
|
-
}
|
|
196
|
-
const contentType = response.headers.get("content-type")?.toLowerCase() ?? "";
|
|
197
|
-
let body;
|
|
198
|
-
try {
|
|
199
|
-
body = await readBodyWithByteCap(response, WEBSITE_PAGE_BYTE_CAP);
|
|
200
|
-
}
|
|
201
|
-
catch (err) {
|
|
202
|
-
if (err instanceof ResponseTooLargeError) {
|
|
203
|
-
// Skip oversized pages rather than aborting the whole crawl.
|
|
204
|
-
return null;
|
|
205
|
-
}
|
|
206
|
-
throw err;
|
|
207
|
-
}
|
|
208
|
-
const finalUrl = normalizeCrawlUrl(response.url || pageUrl) ?? pageUrl;
|
|
209
|
-
if (contentType.includes("text/html") || contentType.includes("application/xhtml+xml") || looksLikeMarkup(body)) {
|
|
210
|
-
const title = extractHtmlTitle(body) || new URL(finalUrl).hostname;
|
|
211
|
-
return {
|
|
212
|
-
page: {
|
|
213
|
-
url: finalUrl,
|
|
214
|
-
title,
|
|
215
|
-
markdown: htmlToMarkdown(body, finalUrl),
|
|
216
|
-
},
|
|
217
|
-
links: extractSameDocumentLinks(body, finalUrl),
|
|
218
|
-
};
|
|
219
|
-
}
|
|
220
|
-
return {
|
|
221
|
-
page: {
|
|
222
|
-
url: finalUrl,
|
|
223
|
-
title: extractTextTitle(body) || new URL(finalUrl).hostname,
|
|
224
|
-
markdown: body.trim(),
|
|
225
|
-
},
|
|
226
|
-
links: [],
|
|
227
|
-
};
|
|
228
|
-
}
|
|
229
|
-
function buildMarkdownSnapshot(page, slug) {
|
|
230
|
-
const title = sanitizeString(page.title, 200) || slug;
|
|
231
|
-
const description = sanitizeString(`Snapshot of ${page.url}`, 500);
|
|
232
|
-
const host = sanitizeString(new URL(page.url).hostname, 120);
|
|
233
|
-
const content = page.markdown.trim() || `Source: ${page.url}`;
|
|
234
|
-
return [
|
|
235
|
-
"---",
|
|
236
|
-
`name: ${JSON.stringify(slug)}`,
|
|
237
|
-
`description: ${JSON.stringify(description)}`,
|
|
238
|
-
`sourceUrl: ${JSON.stringify(page.url)}`,
|
|
239
|
-
`title: ${JSON.stringify(title)}`,
|
|
240
|
-
"tags:",
|
|
241
|
-
` - ${JSON.stringify("website")}`,
|
|
242
|
-
` - ${JSON.stringify(host)}`,
|
|
243
|
-
"---",
|
|
244
|
-
"",
|
|
245
|
-
`# ${title}`,
|
|
246
|
-
"",
|
|
247
|
-
`Source: ${page.url}`,
|
|
248
|
-
"",
|
|
249
|
-
content,
|
|
250
|
-
"",
|
|
251
|
-
].join("\n");
|
|
252
|
-
}
|
|
253
|
-
function validateWebsiteUrl(rawUrl) {
|
|
254
|
-
return validateWebsiteUrlWithError(rawUrl, ConfigError);
|
|
255
|
-
}
|
|
256
|
-
function validateWebsiteInputUrl(rawUrl) {
|
|
257
|
-
return validateWebsiteUrlWithError(rawUrl, UsageError);
|
|
258
|
-
}
|
|
259
|
-
function validateWebsiteUrlWithError(rawUrl, ErrorType) {
|
|
260
|
-
if (!rawUrl) {
|
|
261
|
-
throw new ErrorType("Website provider requires a URL");
|
|
262
|
-
}
|
|
263
|
-
let parsed;
|
|
264
|
-
try {
|
|
265
|
-
parsed = new URL(rawUrl);
|
|
266
|
-
}
|
|
267
|
-
catch {
|
|
268
|
-
throw new ErrorType(`Website URL is not valid: "${rawUrl}"`);
|
|
269
|
-
}
|
|
270
|
-
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") {
|
|
271
|
-
throw new ErrorType(`Website URL must use http:// or https://, got "${parsed.protocol}" in "${rawUrl}"`);
|
|
272
|
-
}
|
|
273
|
-
if (parsed.username || parsed.password) {
|
|
274
|
-
throw new ErrorType("Website URL must not contain embedded credentials");
|
|
275
|
-
}
|
|
276
|
-
parsed.hash = "";
|
|
277
|
-
return normalizeSiteUrl(parsed.toString());
|
|
278
|
-
}
|
|
279
|
-
function normalizeSiteUrl(rawUrl) {
|
|
280
|
-
const parsed = new URL(rawUrl);
|
|
281
|
-
parsed.hash = "";
|
|
282
|
-
if (parsed.pathname !== "/" && parsed.pathname.endsWith("/")) {
|
|
283
|
-
parsed.pathname = parsed.pathname.replace(/\/+$/, "");
|
|
284
|
-
}
|
|
285
|
-
return parsed.toString();
|
|
286
|
-
}
|
|
287
|
-
function normalizeCrawlUrl(rawUrl) {
|
|
288
|
-
try {
|
|
289
|
-
const parsed = new URL(rawUrl);
|
|
290
|
-
if (parsed.protocol !== "http:" && parsed.protocol !== "https:")
|
|
291
|
-
return null;
|
|
292
|
-
parsed.hash = "";
|
|
293
|
-
if (parsed.pathname !== "/" && parsed.pathname.endsWith("/")) {
|
|
294
|
-
parsed.pathname = parsed.pathname.replace(/\/+$/, "");
|
|
295
|
-
}
|
|
296
|
-
return parsed.toString();
|
|
297
|
-
}
|
|
298
|
-
catch {
|
|
299
|
-
return null;
|
|
300
|
-
}
|
|
301
|
-
}
|
|
302
|
-
/** Convert a page URL into a relative file path preserving the URL hierarchy.
|
|
303
|
-
* e.g. https://example.com/docs/guide → docs/guide
|
|
304
|
-
* https://example.com/ → index
|
|
305
|
-
*/
|
|
306
|
-
function urlToRelativePath(rawUrl) {
|
|
307
|
-
const parsed = new URL(rawUrl);
|
|
308
|
-
const segments = parsed.pathname
|
|
309
|
-
.split("/")
|
|
310
|
-
.filter(Boolean)
|
|
311
|
-
.map((segment) => slugifySegment(segment))
|
|
312
|
-
.filter(Boolean);
|
|
313
|
-
if (parsed.search) {
|
|
314
|
-
const querySuffix = slugifySegment(parsed.search.slice(1));
|
|
315
|
-
if (querySuffix && segments.length > 0) {
|
|
316
|
-
segments[segments.length - 1] = `${segments[segments.length - 1]}_${querySuffix}`;
|
|
317
|
-
}
|
|
318
|
-
}
|
|
319
|
-
return segments.length > 0 ? segments.join("/") : "index";
|
|
320
|
-
}
|
|
321
|
-
function slugifySegment(value) {
|
|
322
|
-
return sanitizeString(value, 200)
|
|
323
|
-
.toLowerCase()
|
|
324
|
-
.replace(/[^a-z0-9._-]+/g, "-")
|
|
325
|
-
.replace(/^-+|-+$/g, "");
|
|
326
|
-
}
|
|
327
|
-
function uniqueSlug(base, used) {
|
|
328
|
-
const seed = base || "website";
|
|
329
|
-
let candidate = seed;
|
|
330
|
-
let i = 2;
|
|
331
|
-
while (used.has(candidate)) {
|
|
332
|
-
candidate = `${seed}-${i}`;
|
|
333
|
-
i += 1;
|
|
334
|
-
}
|
|
335
|
-
used.add(candidate);
|
|
336
|
-
return candidate;
|
|
337
|
-
}
|
|
338
|
-
function coercePositiveInt(value, fallback) {
|
|
339
|
-
if (typeof value === "number" && Number.isInteger(value) && value > 0)
|
|
340
|
-
return value;
|
|
341
|
-
if (typeof value === "string") {
|
|
342
|
-
const parsed = Number.parseInt(value, 10);
|
|
343
|
-
if (Number.isInteger(parsed) && parsed > 0)
|
|
344
|
-
return parsed;
|
|
345
|
-
}
|
|
346
|
-
return fallback;
|
|
347
|
-
}
|
|
348
|
-
function looksLikeMarkup(body) {
|
|
349
|
-
return /<html[\s>]|<body[\s>]|<\/[a-z][\w:-]*>/i.test(body);
|
|
350
|
-
}
|
|
351
|
-
function extractHtmlTitle(html) {
|
|
352
|
-
const title = html.match(/<title[^>]*>([\s\S]*?)<\/title>/i)?.[1];
|
|
353
|
-
if (title)
|
|
354
|
-
return decodeHtmlEntities(stripTags(title)).trim();
|
|
355
|
-
const h1 = html.match(/<h1[^>]*>([\s\S]*?)<\/h1>/i)?.[1];
|
|
356
|
-
if (h1)
|
|
357
|
-
return decodeHtmlEntities(stripTags(h1)).trim();
|
|
358
|
-
return undefined;
|
|
359
|
-
}
|
|
360
|
-
function extractTextTitle(text) {
|
|
361
|
-
for (const line of text.split(/\r?\n/)) {
|
|
362
|
-
const trimmed = line.trim();
|
|
363
|
-
if (!trimmed)
|
|
364
|
-
continue;
|
|
365
|
-
if (trimmed.startsWith("#"))
|
|
366
|
-
return trimmed.replace(/^#+\s*/, "");
|
|
367
|
-
return trimmed.slice(0, 120);
|
|
368
|
-
}
|
|
369
|
-
return undefined;
|
|
370
|
-
}
|
|
371
|
-
function extractSameDocumentLinks(html, pageUrl) {
|
|
372
|
-
const links = [];
|
|
373
|
-
const hrefPattern = /<a\b[^>]*href\s*=\s*(['"])(.*?)\1[^>]*>/gi;
|
|
374
|
-
for (const match of html.matchAll(hrefPattern)) {
|
|
375
|
-
const href = match[2]?.trim();
|
|
376
|
-
if (!href || href.startsWith("#"))
|
|
377
|
-
continue;
|
|
378
|
-
try {
|
|
379
|
-
const resolved = new URL(href, pageUrl);
|
|
380
|
-
if (!isSafeLinkUrl(resolved))
|
|
381
|
-
continue;
|
|
382
|
-
links.push(resolved);
|
|
383
|
-
}
|
|
384
|
-
catch {
|
|
385
|
-
/* ignore malformed links */
|
|
386
|
-
}
|
|
387
|
-
}
|
|
388
|
-
return links;
|
|
389
|
-
}
|
|
390
|
-
function htmlToMarkdown(html, pageUrl) {
|
|
391
|
-
let text = html;
|
|
392
|
-
text = stripDangerousBlockTag(text, "script");
|
|
393
|
-
text = stripDangerousBlockTag(text, "style");
|
|
394
|
-
text = stripDangerousBlockTag(text, "noscript");
|
|
395
|
-
text = stripDangerousBlockTag(text, "template");
|
|
396
|
-
text = text.replace(/<pre\b[^>]*><code\b[^>]*>([\s\S]*?)<\/code><\/pre>/gi, (_match, code) => {
|
|
397
|
-
const decoded = decodeHtmlEntities(stripTags(code)).trim();
|
|
398
|
-
return decoded ? `\n\n\`\`\`\n${decoded}\n\`\`\`\n\n` : "\n\n";
|
|
399
|
-
});
|
|
400
|
-
text = text.replace(/<code\b[^>]*>([\s\S]*?)<\/code>/gi, (_match, code) => {
|
|
401
|
-
const decoded = decodeHtmlEntities(stripTags(code)).trim();
|
|
402
|
-
return decoded ? `\`${decoded}\`` : "";
|
|
403
|
-
});
|
|
404
|
-
text = text.replace(/<a\b[^>]*href\s*=\s*(['"])(.*?)\1[^>]*>([\s\S]*?)<\/a>/gi, (_match, _q, href, body) => {
|
|
405
|
-
const label = decodeHtmlEntities(stripTags(body)).trim();
|
|
406
|
-
if (!label)
|
|
407
|
-
return "";
|
|
408
|
-
try {
|
|
409
|
-
const resolved = new URL(href, pageUrl);
|
|
410
|
-
if (!isSafeLinkUrl(resolved))
|
|
411
|
-
return label;
|
|
412
|
-
return `[${label}](${resolved})`;
|
|
413
|
-
}
|
|
414
|
-
catch {
|
|
415
|
-
return label;
|
|
416
|
-
}
|
|
417
|
-
});
|
|
418
|
-
text = text.replace(/<h([1-6])\b[^>]*>([\s\S]*?)<\/h\1>/gi, (_match, level, body) => {
|
|
419
|
-
const heading = decodeHtmlEntities(stripTags(body)).trim();
|
|
420
|
-
return heading ? `\n\n${"#".repeat(Number(level))} ${heading}\n\n` : "\n\n";
|
|
421
|
-
});
|
|
422
|
-
text = text.replace(/<li\b[^>]*>([\s\S]*?)<\/li>/gi, (_match, body) => {
|
|
423
|
-
const item = decodeHtmlEntities(stripTags(body)).trim();
|
|
424
|
-
return item ? `\n- ${item}` : "";
|
|
425
|
-
});
|
|
426
|
-
text = text.replace(/<(p|div|section|article|main|header|footer|blockquote|table|tr)\b[^>]*>/gi, "\n\n");
|
|
427
|
-
text = text.replace(/<\/(p|div|section|article|main|header|footer|blockquote|table|tr)>/gi, "\n\n");
|
|
428
|
-
text = text.replace(/<br\s*\/?>/gi, "\n");
|
|
429
|
-
text = text.replace(/<\/?(ul|ol)\b[^>]*>/gi, "\n");
|
|
430
|
-
text = decodeHtmlEntities(stripTags(text));
|
|
431
|
-
text = text
|
|
432
|
-
.replace(/\r/g, "")
|
|
433
|
-
.replace(/[ \t]+\n/g, "\n")
|
|
434
|
-
.replace(/\n{3,}/g, "\n\n")
|
|
435
|
-
.trim();
|
|
436
|
-
return text;
|
|
437
|
-
}
|
|
438
|
-
function stripTags(value) {
|
|
439
|
-
return value.replace(/<[^>]+>/g, " ");
|
|
440
|
-
}
|
|
441
|
-
function decodeHtmlEntities(value) {
|
|
442
|
-
const namedEntities = {
|
|
443
|
-
nbsp: " ",
|
|
444
|
-
amp: "&",
|
|
445
|
-
lt: "<",
|
|
446
|
-
gt: ">",
|
|
447
|
-
quot: '"',
|
|
448
|
-
apos: "'",
|
|
449
|
-
};
|
|
450
|
-
return value.replace(/&(#x[0-9a-f]+|#\d+|[a-z]+);/gi, (match, entity) => {
|
|
451
|
-
const normalized = String(entity).toLowerCase();
|
|
452
|
-
if (normalized.startsWith("#x")) {
|
|
453
|
-
return safeCodePointToString(Number.parseInt(normalized.slice(2), 16)) ?? match;
|
|
454
|
-
}
|
|
455
|
-
if (normalized.startsWith("#")) {
|
|
456
|
-
return safeCodePointToString(Number.parseInt(normalized.slice(1), 10)) ?? match;
|
|
457
|
-
}
|
|
458
|
-
return namedEntities[normalized] ?? match;
|
|
459
|
-
});
|
|
460
|
-
}
|
|
461
|
-
function isAssetLikePath(pathname) {
|
|
462
|
-
// Keep this list intentionally conservative so docs paths are still crawled
|
|
463
|
-
// unless they clearly point at static assets/binaries.
|
|
464
|
-
return /\.(css|js|json|png|jpe?g|gif|svg|ico|webp|pdf|zip|tar|gz|mp4|mp3|woff2?)$/i.test(pathname);
|
|
465
|
-
}
|
|
466
|
-
function isSafeLinkUrl(url) {
|
|
467
|
-
return url.protocol === "http:" || url.protocol === "https:";
|
|
468
|
-
}
|
|
469
|
-
function stripDangerousBlockTag(value, tagName) {
|
|
470
|
-
const pattern = new RegExp(`<${tagName}\\b[^>]*>[\\s\\S]*?<\\/${tagName}\\s*>`, "gi");
|
|
471
|
-
return value.replace(pattern, "");
|
|
472
|
-
}
|
|
473
|
-
function safeCodePointToString(value) {
|
|
474
|
-
if (!Number.isFinite(value) || value < 0 || value > 0x10ffff)
|
|
475
|
-
return undefined;
|
|
476
|
-
try {
|
|
477
|
-
return String.fromCodePoint(value);
|
|
478
|
-
}
|
|
479
|
-
catch {
|
|
480
|
-
return undefined;
|
|
481
|
-
}
|
|
482
|
-
}
|
|
483
|
-
export { ensureWebsiteMirror, getCachePaths, validateWebsiteInputUrl, validateWebsiteUrl, WebsiteSourceProvider };
|
|
27
|
+
export { WebsiteSourceProvider };
|