@gulibs/safe-coder 0.0.24 → 0.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +447 -15
  2. package/dist/documentation/browser-manager.d.ts +51 -0
  3. package/dist/documentation/browser-manager.d.ts.map +1 -0
  4. package/dist/documentation/browser-manager.js +260 -0
  5. package/dist/documentation/browser-manager.js.map +1 -0
  6. package/dist/documentation/checkpoint-manager.d.ts +38 -0
  7. package/dist/documentation/checkpoint-manager.d.ts.map +1 -0
  8. package/dist/documentation/checkpoint-manager.js +101 -0
  9. package/dist/documentation/checkpoint-manager.js.map +1 -0
  10. package/dist/documentation/doc-crawler.d.ts +103 -1
  11. package/dist/documentation/doc-crawler.d.ts.map +1 -1
  12. package/dist/documentation/doc-crawler.js +931 -151
  13. package/dist/documentation/doc-crawler.js.map +1 -1
  14. package/dist/documentation/llms-txt/detector.d.ts +31 -0
  15. package/dist/documentation/llms-txt/detector.d.ts.map +1 -0
  16. package/dist/documentation/llms-txt/detector.js +77 -0
  17. package/dist/documentation/llms-txt/detector.js.map +1 -0
  18. package/dist/documentation/llms-txt/downloader.d.ts +30 -0
  19. package/dist/documentation/llms-txt/downloader.d.ts.map +1 -0
  20. package/dist/documentation/llms-txt/downloader.js +84 -0
  21. package/dist/documentation/llms-txt/downloader.js.map +1 -0
  22. package/dist/documentation/llms-txt/index.d.ts +4 -0
  23. package/dist/documentation/llms-txt/index.d.ts.map +1 -0
  24. package/dist/documentation/llms-txt/index.js +4 -0
  25. package/dist/documentation/llms-txt/index.js.map +1 -0
  26. package/dist/documentation/llms-txt/parser.d.ts +43 -0
  27. package/dist/documentation/llms-txt/parser.d.ts.map +1 -0
  28. package/dist/documentation/llms-txt/parser.js +177 -0
  29. package/dist/documentation/llms-txt/parser.js.map +1 -0
  30. package/dist/index.js +0 -0
  31. package/dist/server/mcp-server.d.ts.map +1 -1
  32. package/dist/server/mcp-server.js +48 -3
  33. package/dist/server/mcp-server.js.map +1 -1
  34. package/package.json +16 -11
@@ -1,7 +1,15 @@
1
+ import { HttpClient } from '../utils/http-client.js';
1
2
  import { logger } from '../utils/logger.js';
2
3
  import { WebDocumentationBrowser } from './web-doc-browser.js';
4
+ import { LlmsTxtDetector, LlmsTxtDownloader, LlmsTxtParser } from './llms-txt/index.js';
5
+ import { CheckpointManager } from './checkpoint-manager.js';
6
+ import { BrowserManager } from './browser-manager.js';
7
+ import { join } from 'path';
8
+ import { tmpdir } from 'os';
9
+ import * as cheerio from 'cheerio';
3
10
  export class DocumentationCrawler {
4
11
  browser;
12
+ browserManager;
5
13
  visitedUrls;
6
14
  urlQueue;
7
15
  crawledPages;
@@ -9,6 +17,8 @@ export class DocumentationCrawler {
9
17
  options;
10
18
  baseUrl;
11
19
  linkDiscoveryStats;
20
+ checkpointManager;
21
+ pagesSinceLastCheckpoint;
12
22
  DOCUMENTATION_PATTERNS = [
13
23
  /\/docs?\//i,
14
24
  /\/documentation/i,
@@ -39,6 +49,7 @@ export class DocumentationCrawler {
39
49
  this.crawledPages = [];
40
50
  this.errors = [];
41
51
  this.options = {
52
+ crawlStrategy: 'bfs', // Default to breadth-first search
42
53
  maxDepth: 3,
43
54
  maxPages: 50,
44
55
  includePaths: [],
@@ -47,6 +58,10 @@ export class DocumentationCrawler {
47
58
  maxRetries: 2, // Default 2 retries
48
59
  retryDelay: 1000, // Default 1 second delay before retry
49
60
  useBrowserAutomation: false, // Default to HTTP-only for backward compatibility
61
+ skipLlmsTxt: false, // Enable llms.txt detection by default
62
+ workers: 1, // Default to single-threaded crawling
63
+ spaStrategy: 'smart', // Smart SPA handling by default
64
+ spaFallback: 'warn', // Warn users when browser rendering fails
50
65
  };
51
66
  this.baseUrl = new URL('https://example.com');
52
67
  this.linkDiscoveryStats = {
@@ -62,15 +77,19 @@ export class DocumentationCrawler {
62
77
  pagesDiscovered: 0,
63
78
  pagesCrawled: 0,
64
79
  };
80
+ this.pagesSinceLastCheckpoint = 0;
65
81
  }
66
82
  /**
67
83
  * Crawl documentation starting from a root URL
68
84
  * Uses HTTP client (axios) exclusively - no browser automation
69
85
  * For SPA sites that require JavaScript rendering, use Cursor/Claude's built-in browser tools
86
+ * Supports both BFS (breadth-first) and DFS (depth-first) crawl strategies
70
87
  */
71
88
  async crawl(rootUrl, options = {}) {
89
+ const strategy = options.crawlStrategy || 'bfs';
72
90
  logger.info('Starting documentation crawl using HTTP client (axios)', {
73
91
  url: rootUrl,
92
+ strategy,
74
93
  method: 'HTTP GET',
75
94
  client: 'axios/HttpClient',
76
95
  note: 'For SPA sites, use Cursor/Claude browser tools to get rendered content first',
@@ -107,6 +126,27 @@ export class DocumentationCrawler {
107
126
  }
108
127
  // No longer require documentation-only pages - allow any website with extractable content
109
128
  logger.debug('Starting crawl from URL (permissive mode)', { url: rootUrl });
129
+ // Setup checkpoint manager if enabled
130
+ if (this.options.checkpoint?.enabled) {
131
+ const checkpointFile = this.options.checkpoint.file ||
132
+ join(tmpdir(), `safe-coder-checkpoint-${this.sanitizeFilename(rootUrl)}.json`);
133
+ this.checkpointManager = new CheckpointManager(checkpointFile);
134
+ // Try to resume from checkpoint if requested
135
+ if (this.options.resume) {
136
+ const loaded = await this.loadCheckpoint();
137
+ if (loaded) {
138
+ logger.info('Resumed from checkpoint', {
139
+ pagesCrawled: this.crawledPages.length,
140
+ pendingUrls: this.urlQueue.length,
141
+ visitedUrls: this.visitedUrls.size,
142
+ });
143
+ }
144
+ }
145
+ }
146
+ // Try to detect and use llms.txt if available (unless explicitly disabled)
147
+ if (!this.options.skipLlmsTxt) {
148
+ await this.tryLlmsTxt(rootUrl);
149
+ }
110
150
  // Detect SPA and provide warning
111
151
  try {
112
152
  const spaDetection = await this.browser.detectSPA(rootUrl);
@@ -136,144 +176,15 @@ export class DocumentationCrawler {
136
176
  // Start crawling from root
137
177
  this.urlQueue.push({ url: rootUrl, depth: 0 });
138
178
  let maxDepthReached = 0;
139
- // Process queue
179
+ // Process queue - use parallel workers if specified
140
180
  const startTime = Date.now();
141
- let lastProgressLog = Date.now();
142
- const PROGRESS_LOG_INTERVAL = 5000; // Log progress every 5 seconds
143
- while (this.urlQueue.length > 0 && this.crawledPages.length < this.options.maxPages) {
144
- const queued = this.urlQueue.shift();
145
- if (!queued)
146
- break;
147
- const { url, depth } = queued;
148
- // Log progress periodically
149
- const now = Date.now();
150
- if (now - lastProgressLog >= PROGRESS_LOG_INTERVAL) {
151
- const elapsed = ((now - startTime) / 1000).toFixed(1);
152
- const pagesPerSecond = (this.crawledPages.length / elapsed).toFixed(2);
153
- logger.info('Crawl progress', {
154
- pagesCrawled: this.crawledPages.length,
155
- pagesRemaining: this.urlQueue.length,
156
- maxPages: this.options.maxPages,
157
- errors: this.errors.length,
158
- elapsedSeconds: elapsed,
159
- pagesPerSecond,
160
- currentDepth: depth,
161
- maxDepth: this.options.maxDepth,
162
- });
163
- lastProgressLog = now;
164
- }
165
- // Skip if already visited
166
- if (this.visitedUrls.has(url)) {
167
- continue;
168
- }
169
- // Check depth limit
170
- if (depth > this.options.maxDepth) {
171
- continue;
172
- }
173
- // Mark as visited
174
- this.visitedUrls.add(url);
175
- maxDepthReached = Math.max(maxDepthReached, depth);
176
- try {
177
- // Crawl the page using HTTP GET with retry logic
178
- logger.debug('Fetching page via HTTP GET', { url, depth, method: 'HTTP GET', client: 'axios' });
179
- const page = await this.fetchPageWithRetry(url);
180
- // Check if page has minimal content (possible SPA issue)
181
- const contentLength = page.content.length;
182
- const linksCount = page.navigationLinks.length;
183
- if (contentLength < 200 && linksCount < 3) {
184
- logger.warn('Page has minimal content - may be SPA', {
185
- url,
186
- contentLength,
187
- linksCount,
188
- suggestion: 'This page may require JavaScript rendering. Consider using browser automation tools.',
189
- });
190
- }
191
- // Convert to CrawledPage format
192
- const crawledPage = {
193
- url: page.url,
194
- title: page.title,
195
- content: page.content,
196
- depth,
197
- sections: page.sections,
198
- navigationLinks: page.navigationLinks,
199
- headings: page.headings,
200
- codeSamples: page.codeSamples,
201
- };
202
- this.crawledPages.push(crawledPage);
203
- this.linkDiscoveryStats.pagesCrawled++;
204
- const totalLinksOnPage = page.navigationLinks.length;
205
- this.linkDiscoveryStats.totalLinksFound += totalLinksOnPage;
206
- logger.debug('Page fetched and parsed successfully', {
207
- url,
208
- title: page.title.substring(0, 50),
209
- linksFound: totalLinksOnPage,
210
- depth,
211
- });
212
- // Discover and queue new URLs
213
- if (depth < this.options.maxDepth) {
214
- const discoveryResult = this.discoverDocumentationLinks(page, depth + 1);
215
- const newUrls = discoveryResult.discovered;
216
- logger.debug('Link discovery completed', {
217
- url,
218
- totalLinksOnPage,
219
- discovered: newUrls.length,
220
- filtered: discoveryResult.filtered,
221
- alreadyVisited: discoveryResult.alreadyVisited,
222
- notContent: discoveryResult.notContent,
223
- externalDomain: discoveryResult.externalDomain,
224
- excludedPattern: discoveryResult.excludedPattern,
225
- queueLengthBefore: this.urlQueue.length,
226
- });
227
- let queuedCount = 0;
228
- let skippedAlreadyVisited = 0;
229
- for (const newUrl of newUrls) {
230
- if (!this.visitedUrls.has(newUrl.url)) {
231
- // Also check if it's already in the queue to avoid duplicates
232
- const alreadyInQueue = this.urlQueue.some(q => q.url === newUrl.url);
233
- if (!alreadyInQueue) {
234
- this.urlQueue.push(newUrl);
235
- this.linkDiscoveryStats.linksQueued++;
236
- queuedCount++;
237
- }
238
- else {
239
- skippedAlreadyVisited++;
240
- }
241
- }
242
- else {
243
- skippedAlreadyVisited++;
244
- }
245
- }
246
- logger.debug('Links queued', {
247
- url,
248
- queued: queuedCount,
249
- skippedAlreadyVisited,
250
- queueLengthAfter: this.urlQueue.length,
251
- });
252
- }
253
- else {
254
- this.linkDiscoveryStats.linksFiltered.depthLimit += totalLinksOnPage;
255
- }
256
- // Rate limiting
257
- if (this.options.rateLimit > 0 && this.urlQueue.length > 0) {
258
- await this.delay(this.options.rateLimit);
259
- }
260
- }
261
- catch (error) {
262
- const errorMessage = error instanceof Error ? error.message : String(error);
263
- const errorType = this.classifyError(error);
264
- this.errors.push({
265
- url,
266
- error: `${errorType}: ${errorMessage}`,
267
- });
268
- logger.warn('Page crawl failed', {
269
- url,
270
- error: errorMessage,
271
- errorType,
272
- depth,
273
- willContinue: true,
274
- });
275
- // Continue crawling other pages
276
- }
181
+ const workerCount = this.options.workers || 1;
182
+ if (workerCount > 1) {
183
+ logger.info('Using parallel crawling', { workers: workerCount });
184
+ maxDepthReached = await this.crawlWithWorkers(startTime);
185
+ }
186
+ else {
187
+ maxDepthReached = await this.crawlSequential(startTime);
277
188
  }
278
189
  // Update final statistics
279
190
  this.linkDiscoveryStats.pagesDiscovered = this.visitedUrls.size;
@@ -315,6 +226,10 @@ export class DocumentationCrawler {
315
226
  suggestion: 'Consider crawling more pages or a different website',
316
227
  });
317
228
  }
229
+ // Clear checkpoint after successful completion
230
+ if (this.checkpointManager && !abandoned) {
231
+ await this.clearCheckpoint();
232
+ }
318
233
  return {
319
234
  pages: this.crawledPages,
320
235
  totalPages: this.crawledPages.length,
@@ -325,6 +240,221 @@ export class DocumentationCrawler {
325
240
  abandonReason,
326
241
  };
327
242
  }
243
+ /**
244
+ * Sequential crawling (single-threaded)
245
+ */
246
+ async crawlSequential(startTime) {
247
+ let maxDepthReached = 0;
248
+ let lastProgressLog = Date.now();
249
+ const PROGRESS_LOG_INTERVAL = 5000; // Log progress every 5 seconds
250
+ while (this.urlQueue.length > 0 && this.crawledPages.length < this.options.maxPages) {
251
+ // Use different strategies for getting next URL
252
+ // BFS: shift() - take from front (queue behavior)
253
+ // DFS: pop() - take from back (stack behavior)
254
+ const queued = this.options.crawlStrategy === 'dfs' ? this.urlQueue.pop() : this.urlQueue.shift();
255
+ if (!queued)
256
+ break;
257
+ const { url, depth } = queued;
258
+ // Log progress periodically
259
+ const now = Date.now();
260
+ if (now - lastProgressLog >= PROGRESS_LOG_INTERVAL) {
261
+ const elapsed = ((now - startTime) / 1000).toFixed(1);
262
+ const pagesPerSecond = (this.crawledPages.length / elapsed).toFixed(2);
263
+ logger.info('Crawl progress', {
264
+ pagesCrawled: this.crawledPages.length,
265
+ pagesRemaining: this.urlQueue.length,
266
+ maxPages: this.options.maxPages,
267
+ errors: this.errors.length,
268
+ elapsedSeconds: elapsed,
269
+ pagesPerSecond,
270
+ currentDepth: depth,
271
+ maxDepth: this.options.maxDepth,
272
+ });
273
+ lastProgressLog = now;
274
+ }
275
+ // Skip if already visited
276
+ if (this.visitedUrls.has(url)) {
277
+ continue;
278
+ }
279
+ // Check depth limit
280
+ if (depth > this.options.maxDepth) {
281
+ continue;
282
+ }
283
+ // Mark as visited
284
+ this.visitedUrls.add(url);
285
+ maxDepthReached = Math.max(maxDepthReached, depth);
286
+ await this.processPage(url, depth);
287
+ // Rate limiting
288
+ if (this.options.rateLimit > 0 && this.urlQueue.length > 0) {
289
+ await this.delay(this.options.rateLimit);
290
+ }
291
+ }
292
+ return maxDepthReached;
293
+ }
294
+ /**
295
+ * Parallel crawling with multiple workers
296
+ */
297
+ async crawlWithWorkers(startTime) {
298
+ let maxDepthReached = 0;
299
+ let lastProgressLog = Date.now();
300
+ const PROGRESS_LOG_INTERVAL = 5000;
301
+ const workerCount = this.options.workers || 1;
302
+ while (this.urlQueue.length > 0 && this.crawledPages.length < this.options.maxPages) {
303
+ // Log progress periodically
304
+ const now = Date.now();
305
+ if (now - lastProgressLog >= PROGRESS_LOG_INTERVAL) {
306
+ const elapsed = ((now - startTime) / 1000).toFixed(1);
307
+ const pagesPerSecond = (this.crawledPages.length / elapsed).toFixed(2);
308
+ logger.info('Crawl progress (parallel)', {
309
+ pagesCrawled: this.crawledPages.length,
310
+ pagesRemaining: this.urlQueue.length,
311
+ maxPages: this.options.maxPages,
312
+ errors: this.errors.length,
313
+ elapsedSeconds: elapsed,
314
+ pagesPerSecond,
315
+ workers: workerCount,
316
+ });
317
+ lastProgressLog = now;
318
+ }
319
+ // Get batch of URLs to process in parallel
320
+ const batch = [];
321
+ const batchSize = Math.min(workerCount, this.urlQueue.length, this.options.maxPages - this.crawledPages.length);
322
+ for (let i = 0; i < batchSize; i++) {
323
+ const queued = this.options.crawlStrategy === 'dfs' ? this.urlQueue.pop() : this.urlQueue.shift();
324
+ if (!queued)
325
+ break;
326
+ // Skip if already visited
327
+ if (this.visitedUrls.has(queued.url)) {
328
+ continue;
329
+ }
330
+ // Check depth limit
331
+ if (queued.depth > this.options.maxDepth) {
332
+ continue;
333
+ }
334
+ // Mark as visited
335
+ this.visitedUrls.add(queued.url);
336
+ maxDepthReached = Math.max(maxDepthReached, queued.depth);
337
+ batch.push(queued);
338
+ }
339
+ if (batch.length === 0) {
340
+ break;
341
+ }
342
+ // Process batch in parallel
343
+ await Promise.all(batch.map(async (queued) => {
344
+ await this.processPage(queued.url, queued.depth);
345
+ // Rate limiting (per worker)
346
+ if (this.options.rateLimit > 0) {
347
+ await this.delay(this.options.rateLimit);
348
+ }
349
+ }));
350
+ }
351
+ return maxDepthReached;
352
+ }
353
+ /**
354
+ * Process a single page (shared by both sequential and parallel crawling)
355
+ */
356
+ async processPage(url, depth) {
357
+ try {
358
+ // Crawl the page using HTTP GET with retry logic
359
+ logger.debug('Fetching page via HTTP GET', { url, depth, method: 'HTTP GET', client: 'axios' });
360
+ const page = await this.fetchPageWithRetry(url);
361
+ // Check if page has minimal content (possible SPA issue)
362
+ const contentLength = page.content.length;
363
+ const linksCount = page.navigationLinks.length;
364
+ if (contentLength < 200 && linksCount < 3) {
365
+ logger.warn('Page has minimal content - may be SPA', {
366
+ url,
367
+ contentLength,
368
+ linksCount,
369
+ suggestion: 'This page may require JavaScript rendering. Consider using browser automation tools.',
370
+ });
371
+ }
372
+ // Convert to CrawledPage format
373
+ const crawledPage = {
374
+ url: page.url,
375
+ title: page.title,
376
+ content: page.content,
377
+ depth,
378
+ sections: page.sections,
379
+ navigationLinks: page.navigationLinks,
380
+ headings: page.headings,
381
+ codeSamples: page.codeSamples,
382
+ };
383
+ this.crawledPages.push(crawledPage);
384
+ this.linkDiscoveryStats.pagesCrawled++;
385
+ this.pagesSinceLastCheckpoint++;
386
+ // Save checkpoint if interval reached
387
+ if (this.checkpointManager && this.options.checkpoint?.enabled) {
388
+ const interval = this.options.checkpoint.interval || 10;
389
+ if (this.pagesSinceLastCheckpoint >= interval) {
390
+ await this.saveCheckpoint();
391
+ this.pagesSinceLastCheckpoint = 0;
392
+ }
393
+ }
394
+ const totalLinksOnPage = page.navigationLinks.length;
395
+ this.linkDiscoveryStats.totalLinksFound += totalLinksOnPage;
396
+ logger.debug('Page fetched and parsed successfully', {
397
+ url,
398
+ title: page.title.substring(0, 50),
399
+ linksFound: totalLinksOnPage,
400
+ depth,
401
+ });
402
+ // Discover and queue new URLs
403
+ if (depth < this.options.maxDepth) {
404
+ const discoveryResult = this.discoverDocumentationLinks(page, depth + 1);
405
+ const newUrls = discoveryResult.discovered;
406
+ logger.debug('Link discovery completed', {
407
+ url,
408
+ totalLinksOnPage,
409
+ discovered: newUrls.length,
410
+ filtered: discoveryResult.filtered,
411
+ });
412
+ let queuedCount = 0;
413
+ let skippedAlreadyVisited = 0;
414
+ for (const newUrl of newUrls) {
415
+ if (!this.visitedUrls.has(newUrl.url)) {
416
+ // Also check if it's already in the queue to avoid duplicates
417
+ const alreadyInQueue = this.urlQueue.some(q => q.url === newUrl.url);
418
+ if (!alreadyInQueue) {
419
+ this.urlQueue.push(newUrl);
420
+ this.linkDiscoveryStats.linksQueued++;
421
+ queuedCount++;
422
+ }
423
+ else {
424
+ skippedAlreadyVisited++;
425
+ }
426
+ }
427
+ else {
428
+ skippedAlreadyVisited++;
429
+ }
430
+ }
431
+ logger.debug('Links queued', {
432
+ url,
433
+ queued: queuedCount,
434
+ skippedAlreadyVisited,
435
+ queueLengthAfter: this.urlQueue.length,
436
+ });
437
+ }
438
+ else {
439
+ this.linkDiscoveryStats.linksFiltered.depthLimit += totalLinksOnPage;
440
+ }
441
+ }
442
+ catch (error) {
443
+ const errorMessage = error instanceof Error ? error.message : String(error);
444
+ const errorType = this.classifyError(error);
445
+ this.errors.push({
446
+ url,
447
+ error: `${errorType}: ${errorMessage}`,
448
+ });
449
+ logger.warn('Page crawl failed', {
450
+ url,
451
+ error: errorMessage,
452
+ errorType,
453
+ depth,
454
+ willContinue: true,
455
+ });
456
+ }
457
+ }
328
458
  /**
329
459
  * Discover documentation links from a crawled page
330
460
  */
@@ -482,22 +612,48 @@ export class DocumentationCrawler {
482
612
  }
483
613
  /**
484
614
  * Check if crawled content is sufficient for skill generation
485
- * Similar logic to SkillGenerator but here for early validation
615
+ * Enhanced with multi-dimensional quality metrics
486
616
  */
487
617
  canGenerateSkill(pages) {
488
618
  if (pages.length === 0) {
489
619
  return { canGenerate: false, reason: 'empty_pages' };
490
620
  }
621
+ const metrics = this.evaluateContentQuality(pages);
622
+ // All pages are media-only
623
+ if (metrics.mediaOnlyPages === pages.length && !metrics.hasTextContent) {
624
+ return { canGenerate: false, reason: 'media_only' };
625
+ }
626
+ // No pages have sufficient content
627
+ if (!metrics.hasSufficientContent) {
628
+ return { canGenerate: false, reason: 'insufficient_content' };
629
+ }
630
+ // No structured content (headings, sections)
631
+ if (!metrics.hasStructuredContent) {
632
+ return { canGenerate: false, reason: 'no_structured_content' };
633
+ }
634
+ return { canGenerate: true };
635
+ }
636
+ /**
637
+ * Evaluate content quality with multi-dimensional metrics
638
+ */
639
+ evaluateContentQuality(pages) {
491
640
  const MIN_CONTENT_LENGTH = 100;
492
641
  let hasSufficientContent = false;
493
642
  let hasStructuredContent = false;
494
643
  let hasTextContent = false;
495
644
  let mediaOnlyCount = 0;
645
+ let totalContentLength = 0;
646
+ let totalCodeSamples = 0;
647
+ // Track content diversity
648
+ const urlPatterns = new Set();
649
+ const titlePatterns = new Set();
496
650
  for (const page of pages) {
497
651
  const contentLength = (page.content || '').trim().length;
498
652
  const hasHeadings = page.headings && page.headings.length > 0;
499
653
  const hasText = contentLength > 0;
500
- // Check if page is media-only (has images but no text)
654
+ totalContentLength += contentLength;
655
+ totalCodeSamples += (page.codeSamples || []).length;
656
+ // Check if page is media-only
501
657
  const hasImages = /<img[^>]*>/i.test(page.content || '');
502
658
  const hasMedia = hasImages || (page.codeSamples && page.codeSamples.length > 0);
503
659
  if (hasMedia && contentLength < MIN_CONTENT_LENGTH) {
@@ -512,27 +668,111 @@ export class DocumentationCrawler {
512
668
  if (hasText) {
513
669
  hasTextContent = true;
514
670
  }
671
+ // Track diversity
672
+ try {
673
+ const urlPath = new URL(page.url).pathname;
674
+ const pathSegments = urlPath.split('/').filter(s => s);
675
+ if (pathSegments.length > 0) {
676
+ urlPatterns.add(pathSegments[0]);
677
+ }
678
+ }
679
+ catch {
680
+ // Invalid URL, skip
681
+ }
682
+ // Track title diversity
683
+ const titleWords = page.title.toLowerCase().split(/\s+/).slice(0, 3);
684
+ titlePatterns.add(titleWords.join(' '));
515
685
  }
516
- // All pages are media-only
517
- if (mediaOnlyCount === pages.length && !hasTextContent) {
518
- return { canGenerate: false, reason: 'media_only' };
519
- }
520
- // No pages have sufficient content
521
- if (!hasSufficientContent) {
522
- return { canGenerate: false, reason: 'insufficient_content' };
686
+ // Calculate diversity score (0-1)
687
+ const contentDiversity = Math.min(1, (urlPatterns.size + titlePatterns.size) / (pages.length * 0.5));
688
+ // Calculate API coverage score (0-1)
689
+ const pagesWithCode = pages.filter(p => p.codeSamples && p.codeSamples.length > 0).length;
690
+ const apiCoverage = pages.length > 0 ? pagesWithCode / pages.length : 0;
691
+ const avgContentLength = pages.length > 0 ? totalContentLength / pages.length : 0;
692
+ return {
693
+ hasSufficientContent,
694
+ hasStructuredContent,
695
+ hasTextContent,
696
+ mediaOnlyPages: mediaOnlyCount,
697
+ contentDiversity,
698
+ apiCoverage,
699
+ avgContentLength,
700
+ totalCodeSamples,
701
+ };
702
+ }
703
+ /**
704
+ * Check if should continue crawling based on content quality
705
+ */
706
+ shouldContinueCrawling(currentPages, maxPages) {
707
+ if (currentPages >= maxPages) {
708
+ return false;
523
709
  }
524
- // No structured content (headings, sections)
525
- if (!hasStructuredContent) {
526
- return { canGenerate: false, reason: 'no_structured_content' };
710
+ // Evaluate quality every 10 pages
711
+ if (currentPages % 10 === 0 && currentPages > 0) {
712
+ const metrics = this.evaluateContentQuality(this.crawledPages);
713
+ // High quality content - can stop early if we have enough
714
+ if (metrics.hasSufficientContent &&
715
+ metrics.contentDiversity > 0.7 &&
716
+ metrics.apiCoverage > 0.5 &&
717
+ currentPages >= maxPages * 0.5) {
718
+ logger.info('High quality content detected, considering early stop', {
719
+ currentPages,
720
+ maxPages,
721
+ diversity: metrics.contentDiversity.toFixed(2),
722
+ apiCoverage: metrics.apiCoverage.toFixed(2),
723
+ });
724
+ // Continue but log the possibility
725
+ }
726
+ // Low quality warning
727
+ if (currentPages >= maxPages * 0.8 && !metrics.hasSufficientContent) {
728
+ logger.warn('Approaching page limit but content quality is low', {
729
+ currentPages,
730
+ maxPages,
731
+ diversity: metrics.contentDiversity.toFixed(2),
732
+ apiCoverage: metrics.apiCoverage.toFixed(2),
733
+ suggestion: 'Consider increasing maxPages or refining includePaths',
734
+ });
735
+ }
527
736
  }
528
- return { canGenerate: true };
737
+ return currentPages < maxPages;
529
738
  }
530
739
  /**
531
740
  * Fetch a page with retry logic
741
+ * Supports HTML pages, Markdown files, and SPA rendering
532
742
  */
533
743
  async fetchPageWithRetry(url, retryCount = 0) {
534
744
  try {
535
- return await this.browser.browsePage(url);
745
+ // 1. Check if this is a Markdown file
746
+ if (url.endsWith('.md') || url.includes('.md?') || url.includes('.md#')) {
747
+ return await this.extractMarkdownContent(url);
748
+ }
749
+ // 2. Try HTTP crawl first
750
+ const page = await this.browser.browsePage(url);
751
+ // 3. Smart strategy: check if content is sufficient
752
+ if (this.options.spaStrategy === 'smart') {
753
+ const needsBrowser = await this.shouldUseBrowser(page, url);
754
+ if (needsBrowser) {
755
+ logger.info('Content insufficient, switching to browser rendering', {
756
+ url,
757
+ contentLength: page.content.length,
758
+ linksCount: page.navigationLinks.length,
759
+ });
760
+ return await this.fetchWithBrowser(url);
761
+ }
762
+ }
763
+ // 4. Auto strategy: use browser for detected SPA
764
+ if (this.options.spaStrategy === 'auto') {
765
+ const spaDetection = await this.browser.detectSPA(url, page.content);
766
+ if (spaDetection.isSPA && spaDetection.confidence !== 'low') {
767
+ logger.info('SPA detected, using browser rendering', {
768
+ url,
769
+ confidence: spaDetection.confidence,
770
+ indicators: spaDetection.indicators,
771
+ });
772
+ return await this.fetchWithBrowser(url);
773
+ }
774
+ }
775
+ return page;
536
776
  }
537
777
  catch (error) {
538
778
  const errorType = this.classifyError(error);
@@ -553,6 +793,166 @@ export class DocumentationCrawler {
553
793
  throw error;
554
794
  }
555
795
  }
796
+ /**
797
+ * Extract content from Markdown file
798
+ * Converts Markdown structure to WebDocumentationPage format
799
+ */
800
+ async extractMarkdownContent(url) {
801
+ logger.debug('Extracting Markdown content', { url });
802
+ // Fetch raw markdown content
803
+ const httpClient = new HttpClient();
804
+ const response = await httpClient.get(url, {
805
+ responseType: 'text',
806
+ timeout: 30000,
807
+ });
808
+ const markdownContent = response.data;
809
+ // Parse markdown structure
810
+ const parsed = this.parseMarkdown(markdownContent, url);
811
+ return {
812
+ url,
813
+ title: parsed.title,
814
+ content: parsed.content,
815
+ searchableContent: parsed.content, // Add searchable content for consistency
816
+ sections: parsed.sections,
817
+ navigationLinks: parsed.links,
818
+ headings: parsed.headings,
819
+ codeSamples: parsed.codeSamples,
820
+ isDocumentation: true,
821
+ };
822
+ }
823
+ /**
824
+ * Parse Markdown content into structured data
825
+ */
826
+ parseMarkdown(content, url) {
827
+ const lines = content.split('\n');
828
+ let title = '';
829
+ const headings = [];
830
+ const codeSamples = [];
831
+ const sections = [];
832
+ const links = [];
833
+ const contentLines = [];
834
+ // Extract title from first h1
835
+ for (const line of lines) {
836
+ if (line.startsWith('# ')) {
837
+ title = line.substring(2).trim();
838
+ break;
839
+ }
840
+ }
841
+ // Extract headings (h2-h6)
842
+ const headingRegex = /^(#{2,6})\s+(.+)$/;
843
+ for (const line of lines) {
844
+ const match = line.match(headingRegex);
845
+ if (match) {
846
+ const level = match[1].length;
847
+ const text = match[2].trim();
848
+ const id = text.toLowerCase().replace(/[^\w\s-]/g, '').replace(/\s+/g, '-');
849
+ headings.push({
850
+ level: `h${level}`,
851
+ text,
852
+ id,
853
+ });
854
+ }
855
+ }
856
+ // Extract code blocks
857
+ const codeBlockRegex = /```(\w+)?\n([\s\S]*?)```/g;
858
+ let match;
859
+ while ((match = codeBlockRegex.exec(content)) !== null) {
860
+ const language = match[1] || 'text';
861
+ const code = match[2].trim();
862
+ if (code.length > 10) {
863
+ codeSamples.push({
864
+ code,
865
+ language,
866
+ });
867
+ }
868
+ }
869
+ // Extract content (remove code blocks and headings)
870
+ let contentWithoutCode = content.replace(codeBlockRegex, '');
871
+ contentWithoutCode = contentWithoutCode.replace(/^#{1,6}\s+.+$/gm, '');
872
+ for (const para of contentWithoutCode.split('\n\n')) {
873
+ const trimmed = para.trim();
874
+ if (trimmed.length > 20) {
875
+ contentLines.push(trimmed);
876
+ }
877
+ }
878
+ // Extract links (markdown format)
879
+ const linkRegex = /\[([^\]]*)\]\(([^)]+)\)/g;
880
+ while ((match = linkRegex.exec(content)) !== null) {
881
+ const text = match[1];
882
+ const linkUrl = match[2].trim();
883
+ // Skip anchors
884
+ if (linkUrl.startsWith('#')) {
885
+ continue;
886
+ }
887
+ // Resolve relative URLs
888
+ let absoluteUrl;
889
+ try {
890
+ if (linkUrl.startsWith('http://') || linkUrl.startsWith('https://')) {
891
+ absoluteUrl = linkUrl;
892
+ }
893
+ else {
894
+ absoluteUrl = new URL(linkUrl, url).href;
895
+ }
896
+ // Remove fragment
897
+ absoluteUrl = absoluteUrl.split('#')[0];
898
+ // Only include .md URLs to avoid client-side rendered HTML pages
899
+ if (absoluteUrl.endsWith('.md') || absoluteUrl.includes('.md?')) {
900
+ const linkOrigin = new URL(absoluteUrl).origin;
901
+ const baseOrigin = this.baseUrl.origin;
902
+ links.push({
903
+ text,
904
+ url: absoluteUrl,
905
+ isInternal: linkOrigin === baseOrigin,
906
+ });
907
+ }
908
+ }
909
+ catch (error) {
910
+ // Invalid URL, skip
911
+ logger.debug('Invalid URL in markdown link', { url: linkUrl });
912
+ }
913
+ }
914
+ // Build sections from headings
915
+ let currentSection = null;
916
+ let currentContent = [];
917
+ for (const line of lines) {
918
+ const headerMatch = line.match(headingRegex);
919
+ if (headerMatch) {
920
+ // Save previous section
921
+ if (currentSection) {
922
+ currentSection.content = currentContent.join('\n').trim();
923
+ if (currentSection.content.length > 0) {
924
+ sections.push(currentSection);
925
+ }
926
+ }
927
+ // Start new section
928
+ const text = headerMatch[2].trim();
929
+ currentSection = {
930
+ title: text,
931
+ content: '',
932
+ anchor: text.toLowerCase().replace(/[^\w\s-]/g, '').replace(/\s+/g, '-'),
933
+ };
934
+ currentContent = [];
935
+ }
936
+ else if (currentSection) {
937
+ currentContent.push(line);
938
+ }
939
+ }
940
+ // Save last section
941
+ if (currentSection) {
942
+ currentSection.content = currentContent.join('\n').trim();
943
+ if (currentSection.content.length > 0) {
944
+ sections.push(currentSection);
945
+ }
946
+ }
947
+ return {
948
+ title: title || 'Untitled',
949
+ content: contentLines.join('\n\n'),
950
+ headings,
951
+ codeSamples,
952
+ sections,
953
+ links,
954
+ };
955
+ }
556
956
  /**
557
957
  * Classify error type for better error messages
558
958
  */
@@ -625,6 +1025,386 @@ export class DocumentationCrawler {
625
1025
  }
626
1026
  return breakdown;
627
1027
  }
1028
+ /**
1029
+ * Try to detect and use llms.txt for optimized crawling
1030
+ */
1031
+ async tryLlmsTxt(rootUrl) {
1032
+ logger.info('Checking for llms.txt files', { url: rootUrl });
1033
+ try {
1034
+ const detector = new LlmsTxtDetector(rootUrl);
1035
+ const variants = await detector.detectAll();
1036
+ if (variants.length === 0) {
1037
+ logger.info('No llms.txt files found, proceeding with normal crawl');
1038
+ return;
1039
+ }
1040
+ logger.info('Found llms.txt variants', {
1041
+ count: variants.length,
1042
+ variants: variants.map(v => v.variant),
1043
+ });
1044
+ // Download all variants
1045
+ const downloader = new LlmsTxtDownloader();
1046
+ const downloaded = await downloader.downloadAll(variants);
1047
+ if (downloaded.length === 0) {
1048
+ logger.warn('Failed to download any llms.txt variants');
1049
+ return;
1050
+ }
1051
+ // Use the largest variant (most comprehensive)
1052
+ const largest = downloaded.reduce((prev, current) => current.size > prev.size ? current : prev);
1053
+ logger.info('Using llms.txt for URL extraction', {
1054
+ variant: largest.variant,
1055
+ size: largest.size,
1056
+ });
1057
+ // Parse URLs from llms.txt
1058
+ const parser = new LlmsTxtParser(largest.content, rootUrl);
1059
+ const extractedUrls = parser.extractUrls();
1060
+ if (extractedUrls.length > 0) {
1061
+ logger.info('Extracted URLs from llms.txt', {
1062
+ count: extractedUrls.length,
1063
+ });
1064
+ // Add URLs to queue with depth 0
1065
+ for (const url of extractedUrls) {
1066
+ if (this.isValidUrl(url) && !this.visitedUrls.has(url)) {
1067
+ this.urlQueue.push({ url, depth: 0 });
1068
+ }
1069
+ }
1070
+ logger.info('Added llms.txt URLs to crawl queue', {
1071
+ added: this.urlQueue.length,
1072
+ });
1073
+ }
1074
+ else {
1075
+ logger.info('No URLs extracted from llms.txt, using normal crawl');
1076
+ }
1077
+ }
1078
+ catch (error) {
1079
+ const errorMessage = error instanceof Error ? error.message : String(error);
1080
+ logger.warn('llms.txt detection failed, continuing with normal crawl', {
1081
+ error: errorMessage,
1082
+ });
1083
+ // Continue with normal crawling if llms.txt fails
1084
+ }
1085
+ }
1086
+ /**
1087
+ * Check if a URL is valid for crawling
1088
+ */
1089
+ isValidUrl(url) {
1090
+ try {
1091
+ const parsed = new URL(url);
1092
+ // Must be same origin as base URL
1093
+ if (parsed.origin !== this.baseUrl.origin) {
1094
+ return false;
1095
+ }
1096
+ // Must be http or https
1097
+ if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') {
1098
+ return false;
1099
+ }
1100
+ return true;
1101
+ }
1102
+ catch {
1103
+ return false;
1104
+ }
1105
+ }
1106
+ /**
1107
+ * Save checkpoint
1108
+ */
1109
+ async saveCheckpoint() {
1110
+ if (!this.checkpointManager) {
1111
+ return;
1112
+ }
1113
+ const checkpointData = {
1114
+ config: this.options,
1115
+ visitedUrls: Array.from(this.visitedUrls),
1116
+ pendingUrls: this.urlQueue,
1117
+ pagesCrawled: this.crawledPages.length,
1118
+ lastUpdated: new Date().toISOString(),
1119
+ baseUrl: this.baseUrl.href,
1120
+ };
1121
+ try {
1122
+ await this.checkpointManager.saveCheckpoint(checkpointData);
1123
+ }
1124
+ catch (error) {
1125
+ logger.warn('Failed to save checkpoint', {
1126
+ error: error instanceof Error ? error.message : String(error),
1127
+ });
1128
+ }
1129
+ }
1130
+ /**
1131
+ * Load checkpoint and restore state
1132
+ */
1133
+ async loadCheckpoint() {
1134
+ if (!this.checkpointManager) {
1135
+ return false;
1136
+ }
1137
+ try {
1138
+ const data = await this.checkpointManager.loadCheckpoint();
1139
+ if (!data) {
1140
+ logger.info('No checkpoint found to resume from');
1141
+ return false;
1142
+ }
1143
+ // Restore state
1144
+ this.visitedUrls = new Set(data.visitedUrls);
1145
+ this.urlQueue = data.pendingUrls;
1146
+ // Note: crawledPages are not restored as they will be regenerated
1147
+ logger.info('State restored from checkpoint', {
1148
+ visitedUrls: this.visitedUrls.size,
1149
+ pendingUrls: this.urlQueue.length,
1150
+ lastUpdated: data.lastUpdated,
1151
+ });
1152
+ return true;
1153
+ }
1154
+ catch (error) {
1155
+ logger.warn('Failed to load checkpoint', {
1156
+ error: error instanceof Error ? error.message : String(error),
1157
+ });
1158
+ return false;
1159
+ }
1160
+ }
1161
+ /**
1162
+ * Clear checkpoint after successful crawl
1163
+ */
1164
+ async clearCheckpoint() {
1165
+ if (this.checkpointManager) {
1166
+ try {
1167
+ await this.checkpointManager.clearCheckpoint();
1168
+ }
1169
+ catch (error) {
1170
+ logger.debug('Failed to clear checkpoint', {
1171
+ error: error instanceof Error ? error.message : String(error),
1172
+ });
1173
+ }
1174
+ }
1175
+ }
1176
+ /**
1177
+ * Sanitize filename for checkpoint
1178
+ */
1179
+ sanitizeFilename(url) {
1180
+ return url
1181
+ .replace(/[^a-z0-9]/gi, '-')
1182
+ .replace(/-+/g, '-')
1183
+ .substring(0, 64);
1184
+ }
1185
+ /**
1186
+ * Check if browser rendering is needed
1187
+ */
1188
+ async shouldUseBrowser(page, url) {
1189
+ // 1. Content too short
1190
+ if (page.content.length < 200) {
1191
+ logger.debug('Content too short, may need browser', {
1192
+ url,
1193
+ length: page.content.length
1194
+ });
1195
+ return true;
1196
+ }
1197
+ // 2. No navigation links
1198
+ if (page.navigationLinks.length < 3) {
1199
+ logger.debug('Few navigation links, may need browser', {
1200
+ url,
1201
+ links: page.navigationLinks.length
1202
+ });
1203
+ return true;
1204
+ }
1205
+ // 3. SPA detected but content insufficient
1206
+ const spaDetection = await this.browser.detectSPA(url, page.content);
1207
+ if (spaDetection.isSPA && page.content.length < 500) {
1208
+ logger.debug('SPA detected with insufficient content', {
1209
+ url,
1210
+ confidence: spaDetection.confidence,
1211
+ length: page.content.length
1212
+ });
1213
+ return true;
1214
+ }
1215
+ return false;
1216
+ }
1217
+ /**
1218
+ * Fetch page using browser rendering
1219
+ */
1220
+ async fetchWithBrowser(url) {
1221
+ try {
1222
+ // Lazy initialize browser
1223
+ if (!this.browserManager) {
1224
+ this.browserManager = new BrowserManager();
1225
+ await this.browserManager.launch(this.options.browserConfig);
1226
+ }
1227
+ // Render page
1228
+ const result = await this.browserManager.renderPage(url);
1229
+ // Convert to WebDocumentationPage format
1230
+ return this.parseRenderedPage(result);
1231
+ }
1232
+ catch (error) {
1233
+ const errorMsg = error instanceof Error ? error.message : String(error);
1234
+ logger.error('Browser rendering failed', { url, error: errorMsg });
1235
+ // Handle failure based on fallback strategy
1236
+ return this.handleBrowserFailure(url, errorMsg);
1237
+ }
1238
+ }
1239
+ /**
1240
+ * Parse browser-rendered HTML into WebDocumentationPage
1241
+ */
1242
+ parseRenderedPage(result) {
1243
+ const $ = cheerio.load(result.html);
1244
+ // Extract text content (remove scripts and styles)
1245
+ $('script, style, noscript').remove();
1246
+ const bodyText = $('body').text().trim();
1247
+ // Extract headings
1248
+ const headings = [];
1249
+ $('h1, h2, h3, h4, h5, h6').each((_, elem) => {
1250
+ const $elem = $(elem);
1251
+ const tagName = elem.tagName.toLowerCase();
1252
+ const text = $elem.text().trim();
1253
+ const id = $elem.attr('id');
1254
+ if (text) {
1255
+ headings.push({
1256
+ level: tagName,
1257
+ text,
1258
+ id,
1259
+ });
1260
+ }
1261
+ });
1262
+ // Extract code samples
1263
+ const codeSamples = [];
1264
+ $('pre code, code.hljs, .highlight code').each((_, elem) => {
1265
+ const $elem = $(elem);
1266
+ const code = $elem.text().trim();
1267
+ const language = $elem.attr('class')?.match(/language-(\w+)/)?.[1] || 'text';
1268
+ if (code.length > 10) {
1269
+ codeSamples.push({ code, language });
1270
+ }
1271
+ });
1272
+ // Extract sections
1273
+ const sections = [];
1274
+ $('section, article, .section, .content-section').each((_, elem) => {
1275
+ const $elem = $(elem);
1276
+ const heading = $elem.find('h1, h2, h3').first();
1277
+ const title = heading.text().trim() || 'Section';
1278
+ const content = $elem.text().trim();
1279
+ const anchor = heading.attr('id');
1280
+ if (content.length > 50) {
1281
+ sections.push({ title, content, anchor });
1282
+ }
1283
+ });
1284
+ return {
1285
+ url: result.url,
1286
+ title: result.title,
1287
+ content: bodyText,
1288
+ searchableContent: bodyText,
1289
+ sections,
1290
+ navigationLinks: result.links
1291
+ .filter(link => link.url && link.url.startsWith('http'))
1292
+ .map(link => {
1293
+ try {
1294
+ const linkUrl = new URL(link.url);
1295
+ return {
1296
+ text: link.text,
1297
+ url: link.url,
1298
+ isInternal: linkUrl.origin === this.baseUrl.origin,
1299
+ };
1300
+ }
1301
+ catch {
1302
+ return null;
1303
+ }
1304
+ })
1305
+ .filter((link) => link !== null),
1306
+ headings,
1307
+ codeSamples,
1308
+ isDocumentation: true,
1309
+ };
1310
+ }
1311
+ /**
1312
+ * Handle browser rendering failure based on fallback strategy
1313
+ */
1314
+ async handleBrowserFailure(url, error) {
1315
+ const strategy = this.options.spaFallback || 'warn';
1316
+ switch (strategy) {
1317
+ case 'error':
1318
+ throw new Error(`Browser rendering failed for ${url}: ${error}`);
1319
+ case 'skip':
1320
+ logger.warn('Skipping page due to browser failure', { url });
1321
+ return this.createEmptyPage(url);
1322
+ case 'warn':
1323
+ default:
1324
+ logger.warn('Browser rendering failed, returning page with installation guide', { url, error });
1325
+ return this.createPageWithGuide(url, error);
1326
+ }
1327
+ }
1328
+ /**
1329
+ * Create empty page placeholder
1330
+ */
1331
+ createEmptyPage(url) {
1332
+ return {
1333
+ url,
1334
+ title: 'Page Skipped',
1335
+ content: '',
1336
+ searchableContent: '',
1337
+ sections: [],
1338
+ navigationLinks: [],
1339
+ headings: [],
1340
+ codeSamples: [],
1341
+ isDocumentation: false,
1342
+ };
1343
+ }
1344
+ /**
1345
+ * Create page with browser installation guide
1346
+ */
1347
+ createPageWithGuide(url, error) {
1348
+ const guide = `
1349
+ # Browser Rendering Required
1350
+
1351
+ This page appears to be a Single Page Application (SPA) that requires JavaScript rendering.
1352
+
1353
+ ## Error
1354
+ ${error}
1355
+
1356
+ ## Solution
1357
+
1358
+ To crawl SPA sites, you need Chrome/Chromium browser installed:
1359
+
1360
+ ### macOS
1361
+ \`\`\`bash
1362
+ brew install --cask google-chrome
1363
+ \`\`\`
1364
+
1365
+ ### Windows
1366
+ \`\`\`bash
1367
+ winget install Google.Chrome
1368
+ \`\`\`
1369
+
1370
+ ### Linux
1371
+ \`\`\`bash
1372
+ sudo apt install google-chrome-stable
1373
+ \`\`\`
1374
+
1375
+ ### Alternative: Install puppeteer (includes bundled Chromium)
1376
+ \`\`\`bash
1377
+ npm install puppeteer
1378
+ \`\`\`
1379
+
1380
+ ### Alternative: Set browser path
1381
+ \`\`\`bash
1382
+ export CHROME_PATH=/path/to/chrome
1383
+ \`\`\`
1384
+
1385
+ See docs/SPA_BROWSER_SETUP.md for detailed instructions.
1386
+ `.trim();
1387
+ return {
1388
+ url,
1389
+ title: 'Browser Setup Required',
1390
+ content: guide,
1391
+ searchableContent: guide,
1392
+ sections: [{ title: 'Browser Rendering Required', content: guide }],
1393
+ navigationLinks: [],
1394
+ headings: [{ level: 'h1', text: 'Browser Rendering Required' }],
1395
+ codeSamples: [],
1396
+ isDocumentation: false,
1397
+ };
1398
+ }
1399
+ /**
1400
+ * Cleanup resources (browser, checkpoint, etc.)
1401
+ */
1402
+ async cleanup() {
1403
+ if (this.browserManager) {
1404
+ await this.browserManager.close();
1405
+ this.browserManager = undefined;
1406
+ }
1407
+ }
628
1408
  /**
629
1409
  * Delay helper for rate limiting
630
1410
  */