extract-from-sitemap 0.0.18 → 0.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/README.md +5 -9
  2. package/cli.js +41 -265
  3. package/mod.js +307 -0
  4. package/package.json +1 -1
package/README.md CHANGED
@@ -7,6 +7,11 @@ This repo allows you to create a static markdown bundle based on one or multiple
7
7
  3. Set up CI/CD in your repo to automatically update your extracted static files as often as needed. See [CI/CD Setup](#cicd-setup) below.
8
8
  4. Use an agent-rewriter such as [next-agent-rewriter](../next-agent-rewriter) to rewrite agent requests to the appropriate static markdown files. In addition, it's best practice to add a link in your html to show the markdown variant is available, like this: `<link rel="alternate" type="text/markdown" href="{path}.md" title="Docs" />`
9
9
 
10
+ ## File overview
11
+
12
+ - `mod.js` - the root to the npm pacakge to use the sitemap extraction programmatically
13
+ - `cli.js` - the cli usable through `npx extract-from-sitemap`. Adds functionality to have multiple sources and write from and to the file system
14
+
10
15
  ## CI/CD Setup
11
16
 
12
17
  ### GitHub Actions
@@ -107,12 +112,3 @@ This library is in active development. Known limitations:
107
112
  - Some CI systems may require additional git configuration
108
113
 
109
114
  I am working on addressing these issues.
110
-
111
- ## TODO
112
-
113
- - ✅ find auto-run and re-deploy github ci/cd rule
114
- - ✅ ensure `extract-from-sitemap` requires environment variable from github ci (maybe need to run with '--ci' flag or detect somehow)
115
- - set up `parallel-llmtext` to rerun every 5 minutes. if it works: every 12 hours
116
- - also set up auto-deploy workflow to occur AFTER this workflow!
117
- - put files in `public`
118
- - add readme to `parallel-llmtext` that shows this is a template, kinda, and you can choose any other deployment method but cloudflare is preferred
package/cli.js CHANGED
@@ -7,26 +7,7 @@ const crypto = require("crypto");
7
7
  const http = require("http");
8
8
  const { URL, URLSearchParams } = require("url");
9
9
  const os = require("os");
10
- const { extractFromSitemap } = require("./mod.js");
11
-
12
- /**
13
- * @typedef {Object} SourceConfig
14
- * @property {string} title - The title for this source
15
- * @property {string} [origin] - The origin URL to process (optional)
16
- * @property {string} [outDir] - Output directory for this source's extracted files
17
- * @property {boolean} [forceExtract] - Whether to force extraction for this source
18
- * @property {boolean} [keepOriginalUrls] - Whether to keep original URL structure and not save files locally
19
- * @property {Array<{title: string, description: string, filename: string, url: string}>} [customUrls] - Custom URLs to extract for this source
20
- * @property {string} [titleRemovePattern] - Regex pattern to remove from titles (case-insensitive)
21
- */
22
- /**
23
- * @typedef {Object} Config
24
- * @property {string} title - Title of your document
25
- * @property {string} description - Description of the documentation collection
26
- * @property {string} [details] - Optional additional details about the collection
27
- * @property {string} outDir - Top-level output directory for combined llms.txt
28
- * @property {SourceConfig[]} sources - Array of source configurations
29
- */
10
+ const { processLLMTextConfig } = require("./mod.js");
30
11
 
31
12
  const CREDENTIALS_DIR = path.join(os.homedir(), ".llmtext");
32
13
  const API_KEY_FILE = path.join(CREDENTIALS_DIR, "api-key");
@@ -220,7 +201,7 @@ class OAuth {
220
201
 
221
202
  /**
222
203
  * Load configuration from llmtext.json
223
- * @returns {Promise<Config>} The configuration object
204
+ * @returns {Promise<any>} The configuration object
224
205
  */
225
206
  async function loadConfig() {
226
207
  const configPath = path.resolve("llmtext.json");
@@ -459,129 +440,6 @@ async function getApiKey() {
459
440
  return newApiKey;
460
441
  }
461
442
 
462
- /**
463
- * Process custom URLs through extraction API
464
- * @param {Array<{title: string, description: string, filename: string, url: string}>} customUrls - Custom URLs to process
465
- * @param {string} apiKey - API key for authentication
466
- * @returns {Promise<Record<string, any>>} Extracted files
467
- */
468
- async function processCustomUrls(customUrls, apiKey) {
469
- const files = {};
470
-
471
- for (const customUrl of customUrls) {
472
- console.log(`📄 Processing custom URL: ${customUrl.url}`);
473
-
474
- try {
475
- const response = await fetch("https://api.parallel.ai/v1beta/extract", {
476
- method: "POST",
477
- headers: {
478
- "Content-Type": "application/json",
479
- "parallel-beta": "search-extract-2025-10-10",
480
- "x-api-key": apiKey,
481
- },
482
- body: JSON.stringify({
483
- urls: [customUrl.url],
484
- full_content: true,
485
- }),
486
- });
487
-
488
- if (response.ok) {
489
- const result = await response.json();
490
- if (result.results && result.results.length > 0) {
491
- const extracted = result.results[0];
492
- const filename = customUrl.filename + ".md";
493
-
494
- files[filename] = {
495
- content: extracted.full_content || "",
496
- title: customUrl.title,
497
- description: customUrl.description,
498
- extracted: true,
499
- publishedDate: extracted.published_date || "",
500
- status: 200,
501
- tokens: Math.round((extracted.full_content || "").length / 5),
502
- originalUrl: customUrl.url,
503
- };
504
- }
505
- } else {
506
- throw new Error(`${response.status} - ${await response.statusText()}`);
507
- }
508
- } catch (error) {
509
- console.error(
510
- `❌ Error processing custom URL ${customUrl.url}:`,
511
- error.message
512
- );
513
- }
514
- }
515
-
516
- return files;
517
- }
518
-
519
- /**
520
- * Get path prefix for links in llms.txt
521
- * @param {string} topLevelOutDir - Top-level output directory
522
- * @param {string} sourceOutDir - Source-specific output directory
523
- * @returns {string} Path prefix for links
524
- */
525
- function getPathPrefix(topLevelOutDir, sourceOutDir) {
526
- const resolvedTopLevel = path.resolve(topLevelOutDir);
527
- const resolvedSource = path.resolve(sourceOutDir);
528
-
529
- if (resolvedSource === resolvedTopLevel) {
530
- return "";
531
- }
532
-
533
- const relativePath = path.relative(resolvedTopLevel, resolvedSource);
534
- return relativePath || "";
535
- }
536
-
537
- /**
538
- * Generate combined llms.txt from all sources
539
- * @param {string} title - Top-level title
540
- * @param {string} description - Top-level description
541
- * @param {string} [details] - Optional top-level details
542
- * @param {Array<{title: string, files: Record<string, any>, keepOriginalUrls?: boolean, pathPrefix: string}>} allSources - All processed sources
543
- * @returns {string} Combined llms.txt content
544
- */
545
- function generateCombinedLlmsTxt(title, description, details, allSources) {
546
- let combinedTxt = `# ${title}\n\n> ${description}\n\n`;
547
-
548
- if (details) {
549
- combinedTxt += `${details}\n\n`;
550
- }
551
-
552
- for (const source of allSources) {
553
- combinedTxt += `## ${source.title}\n\n`;
554
-
555
- // Sort files by path for consistent ordering
556
- const sortedFiles = Object.entries(source.files).sort(([a], [b]) =>
557
- a.localeCompare(b)
558
- );
559
-
560
- for (const [path, file] of sortedFiles) {
561
- if (file.content || file.title) {
562
- const title = file.title || path.replace(".md", "");
563
- const description = file.description
564
- ? `: ${file.description.replaceAll("\n", " ")}`
565
- : "";
566
-
567
- // Generate link based on keepOriginalUrls and pathPrefix
568
- let link;
569
- if (source.keepOriginalUrls) {
570
- link = file.originalUrl;
571
- } else {
572
- link = source.pathPrefix + (path.startsWith("/") ? path : "/" + path);
573
- }
574
-
575
- combinedTxt += `- [${title}](${link})${description}\n`;
576
- }
577
- }
578
-
579
- combinedTxt += "\n";
580
- }
581
-
582
- return combinedTxt;
583
- }
584
-
585
443
  /**
586
444
  * Clear stored API key credentials
587
445
  */
@@ -598,6 +456,33 @@ async function clearCredentials() {
598
456
  }
599
457
  }
600
458
 
459
+ /**
460
+ * Write file hierarchy to disk
461
+ * @param {Record<string, {content?: string, error?: string}>} fileHierarchy - File hierarchy to write
462
+ */
463
+ function writeFileHierarchy(fileHierarchy) {
464
+ for (const [filePath, item] of Object.entries(fileHierarchy)) {
465
+ try {
466
+ const resolvedPath = path.resolve(filePath);
467
+ const fileDir = path.dirname(resolvedPath);
468
+
469
+ // Create directory if it doesn't exist
470
+ fs.mkdirSync(fileDir, { recursive: true });
471
+
472
+ if (item.content) {
473
+ fs.writeFileSync(resolvedPath, item.content);
474
+ console.log(`📝 Wrote: ${filePath}`);
475
+ } else if (item.error) {
476
+ console.error(`❌ Error for ${filePath}: ${item.error}`);
477
+ }
478
+ } catch (error) {
479
+ console.error(
480
+ `❌ Failed to write ${filePath}: ${error.message || "Unknown error"}`
481
+ );
482
+ }
483
+ }
484
+ }
485
+
601
486
  /**
602
487
  * Main function
603
488
  */
@@ -615,131 +500,22 @@ async function main() {
615
500
  const config = await loadConfig();
616
501
  const apiKey = await getApiKey();
617
502
 
618
- // Ensure top-level output directory exists
619
- fs.mkdirSync(config.outDir, { recursive: true });
620
-
621
- const allSources = [];
622
- let totalTokens = 0;
623
- let totalPages = 0;
624
- let totalErrors = 0;
625
-
626
- // Process each source
627
- for (const [sourceIndex, sourceConfig] of config.sources.entries()) {
628
- const sourceName = `${sourceConfig.title} (source ${sourceIndex + 1})`;
629
-
630
- console.log(
631
- `\n🌐 Processing ${sourceName} (forceExtract: ${sourceConfig.forceExtract}, keepOriginalUrls: ${sourceConfig.keepOriginalUrls})`
632
- );
633
-
634
- // Ensure source output directory exists (if not keeping original URLs)
635
- if (!sourceConfig.keepOriginalUrls) {
636
- fs.mkdirSync(sourceConfig.outDir, { recursive: true });
637
- }
638
-
639
- let sourceFiles = {};
640
-
641
- try {
642
- // Process origin if provided
643
- if (sourceConfig.origin) {
644
- const result = await extractFromSitemap(
645
- sourceConfig.origin,
646
- sourceConfig.forceExtract,
647
- apiKey,
648
- sourceConfig.titleRemovePattern
649
- );
650
-
651
- console.log(
652
- `✅ Extracted ${result.totalPages} pages with ${result.totalTokens} tokens`
653
- );
654
- if (result.errors > 0) {
655
- console.log(`⚠️ ${result.errors} errors occurred`);
656
- }
657
-
658
- sourceFiles = result.files;
659
- totalTokens += result.totalTokens;
660
- totalPages += result.totalPages;
661
- totalErrors += result.errors;
662
- }
663
-
664
- // Process custom URLs for this source
665
- if (sourceConfig.customUrls && sourceConfig.customUrls.length > 0) {
666
- console.log(
667
- `📋 Processing ${sourceConfig.customUrls.length} custom URLs for this source...`
668
- );
669
- const customFiles = await processCustomUrls(
670
- sourceConfig.customUrls,
671
- apiKey
672
- );
673
-
674
- // Merge custom files with sitemap files
675
- sourceFiles = { ...sourceFiles, ...customFiles };
676
-
677
- for (const file of Object.values(customFiles)) {
678
- totalTokens += file.tokens;
679
- totalPages++;
680
- }
681
- }
682
-
683
- // Write files to source directory (only if not keeping original URLs)
684
- if (!sourceConfig.keepOriginalUrls) {
685
- for (const [filePath, file] of Object.entries(sourceFiles)) {
686
- let filename = filePath.startsWith("/")
687
- ? filePath.slice(1)
688
- : filePath;
689
-
690
- const fullFilePath = path.join(sourceConfig.outDir, filename);
691
- const fileDir = path.dirname(fullFilePath);
503
+ console.log("\n🔄 Processing LLMText configuration...");
692
504
 
693
- fs.mkdirSync(fileDir, { recursive: true });
694
- fs.writeFileSync(fullFilePath, file.content);
505
+ // Process the entire config using the new function
506
+ const result = await processLLMTextConfig(config, apiKey);
695
507
 
696
- console.log(
697
- `📝 Wrote: ${path.join(sourceConfig.outDir, filename)} (${
698
- file.tokens
699
- } tokens)`
700
- );
701
- }
702
- } else {
703
- console.log(
704
- `📋 Keeping original URLs - not saving files locally for ${sourceName}`
705
- );
706
- }
707
-
708
- // Calculate path prefix for this source
709
- const pathPrefix = sourceConfig.keepOriginalUrls
710
- ? ""
711
- : getPathPrefix(config.outDir, sourceConfig.outDir);
712
-
713
- // Add to all sources for combined llms.txt
714
- allSources.push({
715
- title: sourceConfig.title,
716
- files: sourceFiles,
717
- keepOriginalUrls: sourceConfig.keepOriginalUrls,
718
- pathPrefix: pathPrefix,
719
- });
720
- } catch (error) {
721
- console.error(`❌ Error processing ${sourceName}:`, error.message);
722
- totalErrors++;
723
- }
724
- }
725
-
726
- // Generate and write combined llms.txt to top-level outDir
727
- if (allSources.length > 0) {
728
- const combinedLlmsTxt = generateCombinedLlmsTxt(
729
- config.title,
730
- config.description,
731
- config.details,
732
- allSources
733
- );
734
- const combinedLlmsTxtPath = path.join(config.outDir, "llms.txt");
735
- fs.writeFileSync(combinedLlmsTxtPath, combinedLlmsTxt);
736
- console.log(`\n📋 Generated combined llms.txt: ${combinedLlmsTxtPath}`);
737
- }
508
+ // Write all files to disk
509
+ console.log("\n📁 Writing files to disk...");
510
+ writeFileHierarchy(result.files);
738
511
 
512
+ // Print summary
739
513
  console.log("\n✨ Extraction completed!");
740
- console.log(`📊 Total: ${totalPages} pages, ${totalTokens} tokens`);
741
- if (totalErrors > 0) {
742
- console.log(`⚠️ Errors: ${totalErrors}`);
514
+ console.log(
515
+ `📊 Total: ${result.stats.totalPages} pages, ${result.stats.totalTokens} tokens`
516
+ );
517
+ if (result.stats.totalErrors > 0) {
518
+ console.log(`⚠️ Errors: ${result.stats.totalErrors}`);
743
519
  }
744
520
  console.log(
745
521
  `📁 Top-level output directory: ${path.resolve(config.outDir)}`
package/mod.js CHANGED
@@ -22,6 +22,40 @@
22
22
  * @property {number} fetchCount - Number of fetch operations performed
23
23
  */
24
24
 
25
+ /**
26
+ * @typedef {Object} SourceConfig
27
+ * @property {string} title - The title for this source
28
+ * @property {string} [origin] - The origin URL to process (optional)
29
+ * @property {string} [outDir] - Output directory for this source's extracted files
30
+ * @property {boolean} [forceExtract] - Whether to force extraction for this source
31
+ * @property {boolean} [keepOriginalUrls] - Whether to keep original URL structure and not save files locally
32
+ * @property {Array<{title: string, description: string, filename: string, url: string}>} [customUrls] - Custom URLs to extract for this source
33
+ * @property {string} [titleRemovePattern] - Regex pattern to remove from titles (case-insensitive)
34
+ */
35
+
36
+ /**
37
+ * @typedef {Object} LLMTextConfig
38
+ * @property {string} title - Title of your document
39
+ * @property {string} description - Description of the documentation collection
40
+ * @property {string} [details] - Optional additional details about the collection
41
+ * @property {string} outDir - Top-level output directory for combined llms.txt
42
+ * @property {SourceConfig[]} sources - Array of source configurations
43
+ */
44
+
45
+ /**
46
+ * @typedef {Object} FileHierarchyItem
47
+ * @property {string} [content] - File content if successful
48
+ * @property {string} [error] - Error message if failed
49
+ */
50
+
51
+ /**
52
+ * @typedef {Object} ProcessedSource
53
+ * @property {string} title - Source title
54
+ * @property {Record<string, FileResult>} files - Extracted files
55
+ * @property {boolean} keepOriginalUrls - Whether to keep original URLs
56
+ * @property {string} pathPrefix - Path prefix for links
57
+ */
58
+
25
59
  /**
26
60
  * Extract content from sitemap URLs with markdown variant detection
27
61
  * @param {string} origin - The origin URL to extract from
@@ -176,6 +210,279 @@ export async function extractFromSitemap(
176
210
  };
177
211
  }
178
212
 
213
+ /**
214
+ * Process custom URLs through extraction API
215
+ * @param {Array<{title: string, description: string, filename: string, url: string}>} customUrls - Custom URLs to process
216
+ * @param {string} apiKey - API key for authentication
217
+ * @returns {Promise<Record<string, FileResult>>} Extracted files
218
+ */
219
+ export async function processCustomUrls(customUrls, apiKey) {
220
+ const files = {};
221
+
222
+ for (const customUrl of customUrls) {
223
+ try {
224
+ const response = await fetch("https://api.parallel.ai/v1beta/extract", {
225
+ method: "POST",
226
+ headers: {
227
+ "Content-Type": "application/json",
228
+ "parallel-beta": "search-extract-2025-10-10",
229
+ "x-api-key": apiKey,
230
+ },
231
+ body: JSON.stringify({
232
+ urls: [customUrl.url],
233
+ full_content: true,
234
+ }),
235
+ });
236
+
237
+ if (response.ok) {
238
+ const result = await response.json();
239
+ if (result.results && result.results.length > 0) {
240
+ const extracted = result.results[0];
241
+ const filename = customUrl.filename + ".md";
242
+
243
+ files[filename] = {
244
+ content: extracted.full_content || "",
245
+ title: customUrl.title,
246
+ description: customUrl.description,
247
+ extracted: true,
248
+ publishedDate: extracted.published_date || "",
249
+ status: 200,
250
+ tokens: Math.round((extracted.full_content || "").length / 5),
251
+ originalUrl: customUrl.url,
252
+ };
253
+ }
254
+ } else {
255
+ throw new Error(`${response.status} - ${await response.statusText()}`);
256
+ }
257
+ } catch (error) {
258
+ const filename = customUrl.filename + ".md";
259
+ files[filename] = {
260
+ error: error instanceof Error ? error.message : "Unknown error",
261
+ content: "",
262
+ title: customUrl.title,
263
+ description: customUrl.description,
264
+ extracted: false,
265
+ status: 0,
266
+ tokens: 0,
267
+ publishedDate: "",
268
+ originalUrl: customUrl.url,
269
+ };
270
+ }
271
+ }
272
+
273
+ return files;
274
+ }
275
+
276
+ /**
277
+ * Process LLMText config and generate file hierarchy
278
+ * @param {LLMTextConfig} config - The LLMText configuration
279
+ * @param {string} apiKey - Parallel API key
280
+ * @returns {Promise<{files: Record<string, FileHierarchyItem>, sources: ProcessedSource[], stats: {totalTokens: number, totalPages: number, totalErrors: number}}>}
281
+ */
282
+ export async function processLLMTextConfig(config, apiKey) {
283
+ const allSources = [];
284
+ let totalTokens = 0;
285
+ let totalPages = 0;
286
+ let totalErrors = 0;
287
+
288
+ // Process each source
289
+ for (const sourceConfig of config.sources) {
290
+ let sourceFiles = {};
291
+
292
+ try {
293
+ // Process origin if provided
294
+ if (sourceConfig.origin) {
295
+ const result = await extractFromSitemap(
296
+ sourceConfig.origin,
297
+ sourceConfig.forceExtract || false,
298
+ apiKey,
299
+ sourceConfig.titleRemovePattern
300
+ );
301
+
302
+ sourceFiles = result.files;
303
+ totalTokens += result.totalTokens;
304
+ totalPages += result.totalPages;
305
+ totalErrors += result.errors;
306
+ }
307
+
308
+ // Process custom URLs for this source
309
+ if (sourceConfig.customUrls && sourceConfig.customUrls.length > 0) {
310
+ const customFiles = await processCustomUrls(
311
+ sourceConfig.customUrls,
312
+ apiKey
313
+ );
314
+
315
+ // Merge custom files with sitemap files
316
+ sourceFiles = { ...sourceFiles, ...customFiles };
317
+
318
+ for (const file of Object.values(customFiles)) {
319
+ totalTokens += file.tokens;
320
+ totalPages++;
321
+ if (file.error) totalErrors++;
322
+ }
323
+ }
324
+
325
+ // Calculate path prefix for this source
326
+ const pathPrefix = sourceConfig.keepOriginalUrls
327
+ ? ""
328
+ : getPathPrefix(config.outDir, sourceConfig.outDir || config.outDir);
329
+
330
+ // Add to all sources
331
+ allSources.push({
332
+ title: sourceConfig.title,
333
+ files: sourceFiles,
334
+ keepOriginalUrls: sourceConfig.keepOriginalUrls || false,
335
+ pathPrefix: pathPrefix,
336
+ outDir: sourceConfig.outDir || config.outDir,
337
+ });
338
+ } catch (error) {
339
+ totalErrors++;
340
+ // Add empty source with error
341
+ allSources.push({
342
+ title: sourceConfig.title,
343
+ files: {
344
+ error: {
345
+ error: error instanceof Error ? error.message : "Unknown error",
346
+ content: "",
347
+ title: "",
348
+ description: "",
349
+ extracted: false,
350
+ status: 0,
351
+ tokens: 0,
352
+ publishedDate: "",
353
+ originalUrl: "",
354
+ },
355
+ },
356
+ keepOriginalUrls: sourceConfig.keepOriginalUrls || false,
357
+ pathPrefix: "",
358
+ outDir: sourceConfig.outDir || config.outDir,
359
+ });
360
+ }
361
+ }
362
+
363
+ // Generate file hierarchy
364
+ const fileHierarchy = {};
365
+
366
+ // Add source files
367
+ for (const source of allSources) {
368
+ if (!source.keepOriginalUrls) {
369
+ for (const [filePath, file] of Object.entries(source.files)) {
370
+ let filename = filePath.startsWith("/") ? filePath.slice(1) : filePath;
371
+ const fullPath = `${source.outDir}/${filename}`;
372
+
373
+ fileHierarchy[fullPath] = file.error
374
+ ? { error: file.error }
375
+ : { content: file.content };
376
+ }
377
+ }
378
+ }
379
+
380
+ // Generate combined llms.txt
381
+ const combinedLlmsTxt = generateCombinedLlmsTxt(
382
+ config.title,
383
+ config.description,
384
+ config.details,
385
+ allSources
386
+ );
387
+
388
+ fileHierarchy[`${config.outDir}/llms.txt`] = {
389
+ content: combinedLlmsTxt,
390
+ };
391
+
392
+ return {
393
+ files: fileHierarchy,
394
+ sources: allSources,
395
+ stats: {
396
+ totalTokens,
397
+ totalPages,
398
+ totalErrors,
399
+ },
400
+ };
401
+ }
402
+
403
+ /**
404
+ * Generate combined llms.txt from all sources
405
+ * @param {string} title - Top-level title
406
+ * @param {string} description - Top-level description
407
+ * @param {string} [details] - Optional top-level details
408
+ * @param {ProcessedSource[]} allSources - All processed sources
409
+ * @returns {string} Combined llms.txt content
410
+ */
411
+ function generateCombinedLlmsTxt(title, description, details, allSources) {
412
+ let combinedTxt = `# ${title}\n\n> ${description}\n\n`;
413
+
414
+ if (details) {
415
+ combinedTxt += `${details}\n\n`;
416
+ }
417
+
418
+ for (const source of allSources) {
419
+ combinedTxt += `## ${source.title}\n\n`;
420
+
421
+ // Sort files by path for consistent ordering
422
+ const sortedFiles = Object.entries(source.files).sort(([a], [b]) =>
423
+ a.localeCompare(b)
424
+ );
425
+
426
+ for (const [path, file] of sortedFiles) {
427
+ if (file.content || file.title) {
428
+ const title = file.title || path.replace(".md", "");
429
+ const description = file.description
430
+ ? `: ${file.description.replaceAll("\n", " ")}`
431
+ : "";
432
+
433
+ // Generate link based on keepOriginalUrls and pathPrefix
434
+ let link;
435
+ if (source.keepOriginalUrls) {
436
+ link = file.originalUrl;
437
+ } else {
438
+ link = source.pathPrefix + (path.startsWith("/") ? path : "/" + path);
439
+ }
440
+
441
+ combinedTxt += `- [${title}](${link})${description}\n`;
442
+ }
443
+ }
444
+
445
+ combinedTxt += "\n";
446
+ }
447
+
448
+ return combinedTxt;
449
+ }
450
+
451
+ /**
452
+ * Get path prefix for links in llms.txt
453
+ * @param {string} topLevelOutDir - Top-level output directory
454
+ * @param {string} sourceOutDir - Source-specific output directory
455
+ * @returns {string} Path prefix for links
456
+ */
457
+ function getPathPrefix(topLevelOutDir, sourceOutDir) {
458
+ // Normalize paths for comparison
459
+ const normalizeSlashes = (p) => p.replace(/\\/g, "/");
460
+ const normalizedTop = normalizeSlashes(topLevelOutDir);
461
+ const normalizedSource = normalizeSlashes(sourceOutDir);
462
+
463
+ if (normalizedSource === normalizedTop) {
464
+ return "";
465
+ }
466
+
467
+ // Calculate relative path
468
+ const topParts = normalizedTop.split("/").filter(Boolean);
469
+ const sourceParts = normalizedSource.split("/").filter(Boolean);
470
+
471
+ // Find common prefix
472
+ let commonLength = 0;
473
+ while (
474
+ commonLength < topParts.length &&
475
+ commonLength < sourceParts.length &&
476
+ topParts[commonLength] === sourceParts[commonLength]
477
+ ) {
478
+ commonLength++;
479
+ }
480
+
481
+ // Build relative path
482
+ const relativeParts = sourceParts.slice(commonLength);
483
+ return relativeParts.length > 0 ? relativeParts.join("/") : "";
484
+ }
485
+
179
486
  /**
180
487
  * Clean title by removing custom pattern if provided
181
488
  * @param {string} title - Original title
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "extract-from-sitemap",
3
3
  "bin": "cli.js",
4
- "version": "0.0.18",
4
+ "version": "0.0.19",
5
5
  "main": "mod.js",
6
6
  "description": "A module and CLI that allows extracting all pages from a sitemap into markdown and a llms.txt, using Parallel.ai APIs.",
7
7
  "files": [