@kenjura/ursa 0.43.0 → 0.45.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ # 0.44.0
2
+ 2025-12-16
3
+
4
+ - Added 'sections' metadata property with hierarchical section structure
5
+
1
6
  # 0.43.0
2
7
  2025-12-14
3
8
 
package/README.md CHANGED
@@ -94,6 +94,26 @@ important-document
94
94
  classes/wizard
95
95
  ```
96
96
 
97
+ ### Large Workloads
98
+
99
+ For sites with many documents (hundreds or thousands), you may need to increase Node.js memory limits:
100
+
101
+ ```bash
102
+ # Increase heap size to 8GB for large sites
103
+ node --max-old-space-size=8192 $(which ursa) serve content
104
+
105
+ # Or use the npm scripts
106
+ npm run serve:large content
107
+ npm run generate:large content
108
+
109
+ # You can also set environment variables to tune batch processing
110
+ URSA_BATCH_SIZE=25 ursa serve content # Process fewer files at once (default: 50)
111
+ ```
112
+
113
+ **Environment Variables for Performance Tuning:**
114
+ - `URSA_BATCH_SIZE` - Number of files to process concurrently (default: 50). Lower values use less memory but are slower.
115
+ - `NODE_OPTIONS="--max-old-space-size=8192"` - Increase Node.js heap size for very large sites.
116
+
97
117
  ## Library Usage
98
118
 
99
119
  ### ES Modules (recommended)
package/bin/ursa.js CHANGED
@@ -38,6 +38,11 @@ yargs(hideBin(process.argv))
38
38
  describe: 'Path to whitelist file containing patterns for files to include',
39
39
  type: 'string'
40
40
  })
41
+ .option('exclude', {
42
+ alias: 'x',
43
+ describe: 'Folders to exclude: comma-separated paths relative to source, or path to file with one folder per line',
44
+ type: 'string'
45
+ })
41
46
  .option('clean', {
42
47
  alias: 'c',
43
48
  describe: 'Ignore cached hashes and regenerate all files',
@@ -50,12 +55,16 @@ yargs(hideBin(process.argv))
50
55
  const meta = argv.meta ? resolve(argv.meta) : PACKAGE_META;
51
56
  const output = resolve(argv.output);
52
57
  const whitelist = argv.whitelist ? resolve(argv.whitelist) : null;
58
+ const exclude = argv.exclude || null;
53
59
  const clean = argv.clean;
54
60
 
55
61
  console.log(`Generating site from ${source} to ${output} using meta from ${meta}`);
56
62
  if (whitelist) {
57
63
  console.log(`Using whitelist: ${whitelist}`);
58
64
  }
65
+ if (exclude) {
66
+ console.log(`Excluding: ${exclude}`);
67
+ }
59
68
  if (clean) {
60
69
  console.log(`Clean build: ignoring cached hashes`);
61
70
  }
@@ -66,6 +75,7 @@ yargs(hideBin(process.argv))
66
75
  _meta: meta,
67
76
  _output: output,
68
77
  _whitelist: whitelist,
78
+ _exclude: exclude,
69
79
  _clean: clean
70
80
  });
71
81
  console.log('Site generation completed successfully!');
@@ -107,6 +117,11 @@ yargs(hideBin(process.argv))
107
117
  describe: 'Path to whitelist file containing patterns for files to include',
108
118
  type: 'string'
109
119
  })
120
+ .option('exclude', {
121
+ alias: 'x',
122
+ describe: 'Folders to exclude: comma-separated paths relative to source, or path to file with one folder per line',
123
+ type: 'string'
124
+ })
110
125
  .option('clean', {
111
126
  alias: 'c',
112
127
  describe: 'Ignore cached hashes and regenerate all files',
@@ -120,6 +135,7 @@ yargs(hideBin(process.argv))
120
135
  const output = resolve(argv.output);
121
136
  const port = argv.port;
122
137
  const whitelist = argv.whitelist ? resolve(argv.whitelist) : null;
138
+ const exclude = argv.exclude || null;
123
139
  const clean = argv.clean;
124
140
 
125
141
  console.log(`Starting development server...`);
@@ -130,6 +146,9 @@ yargs(hideBin(process.argv))
130
146
  if (whitelist) {
131
147
  console.log(`Using whitelist: ${whitelist}`);
132
148
  }
149
+ if (exclude) {
150
+ console.log(`Excluding: ${exclude}`);
151
+ }
133
152
 
134
153
  try {
135
154
  const { serve } = await import('../src/serve.js');
@@ -139,6 +158,7 @@ yargs(hideBin(process.argv))
139
158
  _output: output,
140
159
  port: port,
141
160
  _whitelist: whitelist,
161
+ _exclude: exclude,
142
162
  _clean: clean
143
163
  });
144
164
  } catch (error) {
@@ -9,8 +9,15 @@
9
9
  ${embeddedStyle}
10
10
  </style>
11
11
  <script>
12
- // Embed search index data
12
+ // Search index loaded asynchronously from separate file to reduce page size
13
13
  window.SEARCH_INDEX = ${searchIndex};
14
+ // Lazy load full search index if placeholder is empty
15
+ if (!window.SEARCH_INDEX || window.SEARCH_INDEX.length === 0) {
16
+ fetch('/public/search-index.json')
17
+ .then(r => r.json())
18
+ .then(data => { window.SEARCH_INDEX = data; })
19
+ .catch(() => { window.SEARCH_INDEX = []; });
20
+ }
14
21
  </script>
15
22
  <script src="/public/search.js"></script>
16
23
 
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "@kenjura/ursa",
3
3
  "author": "Andrew London <andrew@kenjura.com>",
4
4
  "type": "module",
5
- "version": "0.43.0",
5
+ "version": "0.45.0",
6
6
  "description": "static site generator from MD/wikitext/YML",
7
7
  "main": "lib/index.js",
8
8
  "bin": {
@@ -11,6 +11,8 @@
11
11
  "scripts": {
12
12
  "serve": "nodemon --config nodemon.json src/serve.js",
13
13
  "serve:debug": "nodemon --config nodemon.json --inspect-brk src/serve.js",
14
+ "serve:large": "node --max-old-space-size=8192 bin/ursa.js serve",
15
+ "generate:large": "node --max-old-space-size=8192 bin/ursa.js generate",
14
16
  "cli:debug": "node --inspect bin/ursa.js",
15
17
  "cli:debug-brk": "node --inspect-brk bin/ursa.js",
16
18
  "start": "node src/index.js",
@@ -1,13 +1,34 @@
1
1
  import { resolve } from "path";
2
2
  import { readdir } from "fs/promises";
3
3
 
4
+ /**
5
+ * Recursively read directory contents.
6
+ * Optimized to be more memory-efficient by using iteration instead of deep recursion.
7
+ * @param {string} dir - Directory to read
8
+ * @returns {Promise<string[]>} Array of file paths
9
+ */
4
10
  export async function recurse(dir) {
5
- const dirents = await readdir(dir, { withFileTypes: true });
6
- const files = await Promise.all(
7
- dirents.map(async (dirent) => {
8
- const res = resolve(dir, dirent.name);
9
- return dirent.isDirectory() ? [res, ...(await recurse(res))] : res;
10
- })
11
- );
12
- return Array.prototype.concat(...files);
11
+ const results = [];
12
+ const stack = [dir];
13
+
14
+ while (stack.length > 0) {
15
+ const currentDir = stack.pop();
16
+ try {
17
+ const dirents = await readdir(currentDir, { withFileTypes: true });
18
+ for (const dirent of dirents) {
19
+ const res = resolve(currentDir, dirent.name);
20
+ if (dirent.isDirectory()) {
21
+ results.push(res);
22
+ stack.push(res);
23
+ } else {
24
+ results.push(res);
25
+ }
26
+ }
27
+ } catch (e) {
28
+ // Skip directories we can't read (permission errors, etc.)
29
+ console.warn(`Warning: Could not read directory ${currentDir}: ${e.message}`);
30
+ }
31
+ }
32
+
33
+ return results;
13
34
  }
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Extract sections from markdown content based on headings
3
+ * Creates a hierarchical structure of sections
4
+ */
5
+
6
+ /**
7
+ * Extract sections from markdown content
8
+ * @param {string} content - The markdown content
9
+ * @returns {Array} Array of section objects with name and optional children
10
+ */
11
+ export function extractSections(content) {
12
+ if (!content) return [];
13
+
14
+ // Match all markdown headings (# to ######)
15
+ // Handles both "# Heading" and "#Heading" formats
16
+ const headingRegex = /^(#{1,6})\s*(.+?)$/gm;
17
+
18
+ const headings = [];
19
+ let match;
20
+
21
+ while ((match = headingRegex.exec(content)) !== null) {
22
+ const level = match[1].length; // Number of # characters
23
+ const name = match[2].trim();
24
+ headings.push({ level, name });
25
+ }
26
+
27
+ if (headings.length === 0) return [];
28
+
29
+ // Build hierarchical structure
30
+ return buildSectionTree(headings);
31
+ }
32
+
33
+ /**
34
+ * Build a hierarchical tree from flat heading list
35
+ * @param {Array} headings - Array of {level, name} objects
36
+ * @returns {Array} Hierarchical section tree
37
+ */
38
+ function buildSectionTree(headings) {
39
+ const root = { level: 0, children: [] };
40
+ const stack = [root];
41
+
42
+ for (const heading of headings) {
43
+ const section = { name: heading.name, level: heading.level, children: [] };
44
+
45
+ // Pop stack until we find a parent with lower level
46
+ while (stack.length > 1 && stack[stack.length - 1].level >= heading.level) {
47
+ stack.pop();
48
+ }
49
+
50
+ // Add to parent's children
51
+ const parent = stack[stack.length - 1];
52
+ parent.children.push(section);
53
+
54
+ // Push this section onto stack (it might have children)
55
+ stack.push(section);
56
+ }
57
+
58
+ // Clean up: remove level and empty children arrays
59
+ cleanupTree(root.children);
60
+
61
+ return root.children;
62
+ }
63
+
64
+ /**
65
+ * Remove level property and empty children arrays from the tree
66
+ */
67
+ function cleanupTree(sections) {
68
+ for (const section of sections) {
69
+ delete section.level;
70
+ if (section.children && section.children.length > 0) {
71
+ cleanupTree(section.children);
72
+ } else {
73
+ delete section.children;
74
+ }
75
+ }
76
+ }
@@ -1,6 +1,77 @@
1
1
  import { recurse } from "../helper/recursive-readdir.js";
2
2
 
3
3
  import { copyFile, mkdir, readdir, readFile, stat } from "fs/promises";
4
+
5
+ // Concurrency limiter for batch processing to avoid memory exhaustion
6
+ const BATCH_SIZE = parseInt(process.env.URSA_BATCH_SIZE || '50', 10);
7
+
8
+ /**
9
+ * Progress reporter that updates lines in place (like pnpm)
10
+ */
11
+ class ProgressReporter {
12
+ constructor() {
13
+ this.lines = {};
14
+ this.isTTY = process.stdout.isTTY;
15
+ }
16
+
17
+ // Update a named status line in place
18
+ status(name, message) {
19
+ if (this.isTTY) {
20
+ // Save cursor, move to line, clear it, write, restore cursor
21
+ const line = `${name}: ${message}`;
22
+ this.lines[name] = line;
23
+ // Clear line and write
24
+ process.stdout.write(`\r\x1b[K${line}`);
25
+ }
26
+ }
27
+
28
+ // Complete a status line (print final state and newline)
29
+ done(name, message) {
30
+ if (this.isTTY) {
31
+ process.stdout.write(`\r\x1b[K${name}: ${message}\n`);
32
+ } else {
33
+ console.log(`${name}: ${message}`);
34
+ }
35
+ delete this.lines[name];
36
+ }
37
+
38
+ // Regular log that doesn't get overwritten
39
+ log(message) {
40
+ if (this.isTTY) {
41
+ // Clear current line first, print message, then newline
42
+ process.stdout.write(`\r\x1b[K${message}\n`);
43
+ } else {
44
+ console.log(message);
45
+ }
46
+ }
47
+
48
+ // Clear all status lines
49
+ clear() {
50
+ if (this.isTTY) {
51
+ process.stdout.write(`\r\x1b[K`);
52
+ }
53
+ }
54
+ }
55
+
56
+ const progress = new ProgressReporter();
57
+
58
+ /**
59
+ * Process items in batches to limit memory usage
60
+ * @param {Array} items - Items to process
61
+ * @param {Function} processor - Async function to process each item
62
+ * @param {number} batchSize - Max concurrent operations
63
+ */
64
+ async function processBatched(items, processor, batchSize = BATCH_SIZE) {
65
+ const results = [];
66
+ for (let i = 0; i < items.length; i += batchSize) {
67
+ const batch = items.slice(i, i + batchSize);
68
+ const batchResults = await Promise.all(batch.map(processor));
69
+ results.push(...batchResults);
70
+ // Allow GC to run between batches
71
+ if (global.gc) global.gc();
72
+ }
73
+ return results;
74
+ }
4
75
  import { getAutomenu } from "../helper/automenu.js";
5
76
  import { filterAsync } from "../helper/filterAsync.js";
6
77
  import { isDirectory } from "../helper/isDirectory.js";
@@ -21,6 +92,7 @@ import {
21
92
  markInactiveLinks,
22
93
  } from "../helper/linkValidator.js";
23
94
  import { getAndIncrementBuildId } from "../helper/ursaConfig.js";
95
+ import { extractSections } from "../helper/sectionExtractor.js";
24
96
 
25
97
  // Helper function to build search index from processed files
26
98
  function buildSearchIndex(jsonCache, source, output) {
@@ -67,15 +139,80 @@ import { createWhitelistFilter } from "../helper/whitelistFilter.js";
67
139
  const DEFAULT_TEMPLATE_NAME =
68
140
  process.env.DEFAULT_TEMPLATE_NAME ?? "default-template";
69
141
 
142
+ /**
143
+ * Parse exclude option - can be comma-separated paths or a file path
144
+ * @param {string} excludeOption - The exclude option value
145
+ * @param {string} source - Source directory path
146
+ * @returns {Promise<Set<string>>} Set of excluded folder paths (normalized)
147
+ */
148
+ async function parseExcludeOption(excludeOption, source) {
149
+ const excludedPaths = new Set();
150
+
151
+ if (!excludeOption) return excludedPaths;
152
+
153
+ // Check if it's a file path (exists as a file)
154
+ const isFile = existsSync(excludeOption) && (await stat(excludeOption)).isFile();
155
+
156
+ let patterns;
157
+ if (isFile) {
158
+ // Read patterns from file (one per line)
159
+ const content = await readFile(excludeOption, 'utf8');
160
+ patterns = content.split('\n')
161
+ .map(line => line.trim())
162
+ .filter(line => line && !line.startsWith('#')); // Skip empty lines and comments
163
+ } else {
164
+ // Treat as comma-separated list
165
+ patterns = excludeOption.split(',').map(p => p.trim()).filter(Boolean);
166
+ }
167
+
168
+ // Normalize patterns to absolute paths
169
+ for (const pattern of patterns) {
170
+ // Remove leading/trailing slashes and normalize
171
+ const normalized = pattern.replace(/^\/+|\/+$/g, '');
172
+ // Store as relative path for easier matching
173
+ excludedPaths.add(normalized);
174
+ }
175
+
176
+ return excludedPaths;
177
+ }
178
+
179
+ /**
180
+ * Create a filter function that excludes files in specified folders
181
+ * @param {Set<string>} excludedPaths - Set of excluded folder paths
182
+ * @param {string} source - Source directory path
183
+ * @returns {Function} Filter function
184
+ */
185
+ function createExcludeFilter(excludedPaths, source) {
186
+ if (excludedPaths.size === 0) {
187
+ return () => true; // No exclusions, allow all
188
+ }
189
+
190
+ return (filePath) => {
191
+ // Get path relative to source
192
+ const relativePath = filePath.replace(source, '').replace(/^\/+/, '');
193
+
194
+ // Check if file is in any excluded folder
195
+ for (const excluded of excludedPaths) {
196
+ if (relativePath === excluded ||
197
+ relativePath.startsWith(excluded + '/') ||
198
+ relativePath.startsWith(excluded + '\\')) {
199
+ return false; // Exclude this file
200
+ }
201
+ }
202
+ return true; // Include this file
203
+ };
204
+ }
205
+
70
206
  export async function generate({
71
207
  _source = join(process.cwd(), "."),
72
208
  _meta = join(process.cwd(), "meta"),
73
209
  _output = join(process.cwd(), "build"),
74
210
  _whitelist = null,
211
+ _exclude = null,
75
212
  _incremental = false, // Legacy flag, now ignored (always incremental)
76
213
  _clean = false, // When true, ignore cache and regenerate all files
77
214
  } = {}) {
78
- console.log({ _source, _meta, _output, _whitelist, _clean });
215
+ console.log({ _source, _meta, _output, _whitelist, _exclude, _clean });
79
216
  const source = resolve(_source) + "/";
80
217
  const meta = resolve(_meta);
81
218
  const output = resolve(_output) + "/";
@@ -89,6 +226,15 @@ export async function generate({
89
226
  : Boolean;
90
227
  let allSourceFilenames = allSourceFilenamesUnfiltered.filter(includeFilter);
91
228
 
229
+ // Apply exclude filter if specified
230
+ if (_exclude) {
231
+ const excludedPaths = await parseExcludeOption(_exclude, source);
232
+ const excludeFilter = createExcludeFilter(excludedPaths, source);
233
+ const beforeCount = allSourceFilenames.length;
234
+ allSourceFilenames = allSourceFilenames.filter(excludeFilter);
235
+ progress.log(`Exclude filter applied: ${beforeCount - allSourceFilenames.length} files excluded`);
236
+ }
237
+
92
238
  // Apply whitelist filter if specified
93
239
  if (_whitelist) {
94
240
  const whitelistFilter = await createWhitelistFilter(_whitelist, source);
@@ -125,13 +271,13 @@ export async function generate({
125
271
 
126
272
  // Build set of valid internal paths for link validation (must be before menu)
127
273
  const validPaths = buildValidPaths(allSourceFilenamesThatAreArticles, source);
128
- console.log(`Built ${validPaths.size} valid paths for link validation`);
274
+ progress.log(`Built ${validPaths.size} valid paths for link validation`);
129
275
 
130
276
  const menu = await getMenu(allSourceFilenames, source, validPaths);
131
277
 
132
278
  // Get and increment build ID from .ursa.json
133
279
  const buildId = getAndIncrementBuildId(resolve(_source));
134
- console.log(`Build #${buildId}`);
280
+ progress.log(`Build #${buildId}`);
135
281
 
136
282
  // Generate footer content
137
283
  const footer = await getFooter(source, _source, buildId);
@@ -140,9 +286,9 @@ export async function generate({
140
286
  let hashCache = new Map();
141
287
  if (!_clean) {
142
288
  hashCache = await loadHashCache(source);
143
- console.log(`Loaded ${hashCache.size} cached content hashes from .ursa folder`);
289
+ progress.log(`Loaded ${hashCache.size} cached content hashes from .ursa folder`);
144
290
  } else {
145
- console.log(`Clean build: ignoring cached hashes`);
291
+ progress.log(`Clean build: ignoring cached hashes`);
146
292
  }
147
293
 
148
294
  // create public folder
@@ -153,285 +299,286 @@ export async function generate({
153
299
  // Track errors for error report
154
300
  const errors = [];
155
301
 
156
- // First pass: collect search index data
302
+ // Search index: built incrementally during article processing (lighter memory footprint)
157
303
  const searchIndex = [];
158
- const jsonCache = new Map();
159
-
160
- // Collect basic data for search index
161
- for (const file of allSourceFilenamesThatAreArticles) {
304
+ // Directory index cache: only stores minimal data needed for directory indices
305
+ // Uses WeakRef-style approach - store only what's needed, clear as we go
306
+ const dirIndexCache = new Map();
307
+
308
+ // Track files that were regenerated (for incremental mode stats)
309
+ let regeneratedCount = 0;
310
+ let skippedCount = 0;
311
+ let processedCount = 0;
312
+ const totalArticles = allSourceFilenamesThatAreArticles.length;
313
+
314
+ progress.log(`Processing ${totalArticles} articles in batches of ${BATCH_SIZE}...`);
315
+
316
+ // Single pass: process all articles with batched concurrency to limit memory usage
317
+ await processBatched(allSourceFilenamesThatAreArticles, async (file) => {
162
318
  try {
319
+ processedCount++;
320
+ const shortFile = file.replace(source, '');
321
+ progress.status('Articles', `${processedCount}/${totalArticles} ${shortFile}`);
322
+
163
323
  const rawBody = await readFile(file, "utf8");
164
324
  const type = parse(file).ext;
165
325
  const ext = extname(file);
166
326
  const base = basename(file, ext);
167
327
  const dir = addTrailingSlash(dirname(file)).replace(source, "");
168
328
 
329
+ // Calculate output paths for this file
330
+ const outputFilename = file
331
+ .replace(source, output)
332
+ .replace(parse(file).ext, ".html");
333
+ const url = '/' + outputFilename.replace(output, '');
334
+
335
+ // Generate URL path relative to output (for search index)
336
+ const relativePath = file.replace(source, '').replace(/\.(md|txt|yml)$/, '.html');
337
+ const searchUrl = relativePath.startsWith('/') ? relativePath : '/' + relativePath;
338
+
169
339
  // Generate title from filename (in title case)
170
340
  const title = toTitleCase(base);
171
341
 
172
- // Generate URL path relative to output
173
- const relativePath = file.replace(source, '').replace(/\.(md|txt|yml)$/, '.html');
174
- const url = relativePath.startsWith('/') ? relativePath : '/' + relativePath;
342
+ // Always add to search index (lightweight: title + path only, content added lazily)
343
+ searchIndex.push({
344
+ title: title,
345
+ path: relativePath,
346
+ url: searchUrl,
347
+ content: '' // Content excerpts built lazily to save memory
348
+ });
349
+
350
+ // Check if file needs regeneration
351
+ const needsRegen = _clean || needsRegeneration(file, rawBody, hashCache);
352
+
353
+ if (!needsRegen) {
354
+ skippedCount++;
355
+ // For directory indices, store minimal data (not full bodyHtml)
356
+ dirIndexCache.set(file, {
357
+ name: base,
358
+ url,
359
+ // Don't store contents or bodyHtml - saves significant memory
360
+ });
361
+ return; // Skip regenerating this file
362
+ }
363
+
364
+ regeneratedCount++;
365
+
366
+ const fileMeta = extractMetadata(rawBody);
367
+ const rawMeta = extractRawMetadata(rawBody);
368
+ const transformedMetadata = await getTransformedMetadata(
369
+ dirname(file),
370
+ fileMeta
371
+ );
175
372
 
176
- // Basic content processing for search (without full rendering)
373
+ // Calculate the document's URL path (e.g., "/character/index.html")
374
+ const docUrlPath = '/' + dir + base + '.html';
375
+
177
376
  const body = renderFile({
178
377
  fileContents: rawBody,
179
378
  type,
180
379
  dirname: dir,
181
380
  basename: base,
182
381
  });
183
-
184
- // Extract text content from body (strip HTML tags for search)
185
- const textContent = body && body.replace && body.replace(/<[^>]*>/g, ' ').replace(/\s+/g, ' ').trim() || 'body is undefined for some reason'
186
- const excerpt = textContent.substring(0, 200); // First 200 chars for preview
187
-
188
- searchIndex.push({
189
- title: title,
190
- path: relativePath,
191
- url: url,
192
- content: excerpt
193
- });
194
- } catch (e) {
195
- console.error(`Error processing ${file} (first pass): ${e.message}`);
196
- errors.push({ file, phase: 'search-index', error: e });
197
- }
198
- }
199
-
200
- console.log(`Built search index with ${searchIndex.length} entries`);
201
382
 
202
- // Track files that were regenerated (for incremental mode stats)
203
- let regeneratedCount = 0;
204
- let skippedCount = 0;
205
-
206
- // Second pass: process individual articles with search data available
207
- await Promise.all(
208
- allSourceFilenamesThatAreArticles.map(async (file) => {
383
+ // Find nearest style.css or _style.css up the tree
384
+ let embeddedStyle = "";
209
385
  try {
210
- const rawBody = await readFile(file, "utf8");
211
- const type = parse(file).ext;
212
- const ext = extname(file);
213
- const base = basename(file, ext);
214
- const dir = addTrailingSlash(dirname(file)).replace(source, "");
215
-
216
- // Calculate output paths for this file
217
- const outputFilename = file
218
- .replace(source, output)
219
- .replace(parse(file).ext, ".html");
220
- const url = '/' + outputFilename.replace(output, '');
221
-
222
- // Skip files that haven't changed (unless --clean flag is set)
223
- if (!_clean && !needsRegeneration(file, rawBody, hashCache)) {
224
- skippedCount++;
225
- // Still need to populate jsonCache for directory indices
226
- const meta = extractMetadata(rawBody);
227
- const body = renderFile({
228
- fileContents: rawBody,
229
- type,
230
- dirname: dir,
231
- basename: base,
232
- });
233
- jsonCache.set(file, {
234
- name: base,
235
- url,
236
- contents: rawBody,
237
- bodyHtml: body,
238
- metadata: meta,
239
- transformedMetadata: '',
240
- });
241
- return; // Skip regenerating this file
386
+ const css = await findStyleCss(resolve(_source, dir));
387
+ if (css) {
388
+ embeddedStyle = css;
242
389
  }
243
-
244
- console.log(`processing article ${file}`);
245
- regeneratedCount++;
246
-
247
- const meta = extractMetadata(rawBody);
248
- const rawMeta = extractRawMetadata(rawBody);
249
- const bodyLessMeta = rawMeta ? rawBody.replace(rawMeta, "") : rawBody;
250
- const transformedMetadata = await getTransformedMetadata(
251
- dirname(file),
252
- meta
253
- );
254
-
255
- // Calculate the document's URL path (e.g., "/character/index.html")
256
- const docUrlPath = '/' + dir + base + '.html';
257
-
258
- // Generate title from filename (in title case)
259
- const title = toTitleCase(base);
260
-
261
- const body = renderFile({
262
- fileContents: rawBody,
263
- type,
264
- dirname: dir,
265
- basename: base,
266
- });
267
-
268
- // Find nearest style.css or _style.css up the tree
269
- let embeddedStyle = "";
270
- try {
271
- const css = await findStyleCss(resolve(_source, dir));
272
- if (css) {
273
- embeddedStyle = css;
274
- }
275
- } catch (e) {
276
- // ignore
277
- console.error(e);
278
- }
279
-
280
- const requestedTemplateName = meta && meta.template;
281
- const template =
282
- templates[requestedTemplateName] || templates[DEFAULT_TEMPLATE_NAME];
283
-
284
- if (!template) {
285
- throw new Error(`Template not found. Requested: "${requestedTemplateName || DEFAULT_TEMPLATE_NAME}". Available templates: ${Object.keys(templates).join(', ') || 'none'}`);
286
- }
287
-
288
- // Insert embeddedStyle just before </head> if present, else at top
289
- let finalHtml = template
290
- .replace("${title}", title)
291
- .replace("${menu}", menu)
292
- .replace("${meta}", JSON.stringify(meta))
293
- .replace("${transformedMetadata}", transformedMetadata)
294
- .replace("${body}", body)
295
- .replace("${embeddedStyle}", embeddedStyle)
296
- .replace("${searchIndex}", JSON.stringify(searchIndex))
297
- .replace("${footer}", footer);
298
-
299
- // Resolve links and mark broken internal links as inactive (debug mode on)
300
- // Pass docUrlPath so relative links can be resolved correctly
301
- finalHtml = markInactiveLinks(finalHtml, validPaths, docUrlPath, false);
390
+ } catch (e) {
391
+ // ignore
392
+ console.error(e);
393
+ }
302
394
 
303
- console.log(`writing article to ${outputFilename}`);
395
+ const requestedTemplateName = fileMeta && fileMeta.template;
396
+ const template =
397
+ templates[requestedTemplateName] || templates[DEFAULT_TEMPLATE_NAME];
304
398
 
305
- await outputFile(outputFilename, finalHtml);
399
+ if (!template) {
400
+ throw new Error(`Template not found. Requested: "${requestedTemplateName || DEFAULT_TEMPLATE_NAME}". Available templates: ${Object.keys(templates).join(', ') || 'none'}`);
401
+ }
306
402
 
307
- // json
403
+ // Build final HTML with all replacements in a single chain to reduce intermediate strings
404
+ let finalHtml = template;
405
+ // Use a map of replacements to minimize string allocations
406
+ const replacements = {
407
+ "${title}": title,
408
+ "${menu}": menu,
409
+ "${meta}": JSON.stringify(fileMeta),
410
+ "${transformedMetadata}": transformedMetadata,
411
+ "${body}": body,
412
+ "${embeddedStyle}": embeddedStyle,
413
+ "${searchIndex}": "[]", // Placeholder - search index written separately as JSON file
414
+ "${footer}": footer
415
+ };
416
+ for (const [key, value] of Object.entries(replacements)) {
417
+ finalHtml = finalHtml.replace(key, value);
418
+ }
308
419
 
309
- const jsonOutputFilename = outputFilename.replace(".html", ".json");
310
- const jsonObject = {
311
- name: base,
312
- url,
313
- contents: rawBody,
314
- // bodyLessMeta: bodyLessMeta,
315
- bodyHtml: body,
316
- metadata: meta,
317
- transformedMetadata,
318
- // html: finalHtml,
319
- };
320
- jsonCache.set(file, jsonObject);
321
- const json = JSON.stringify(jsonObject);
322
- console.log(`writing article to ${jsonOutputFilename}`);
323
- await outputFile(jsonOutputFilename, json);
420
+ // Resolve links and mark broken internal links as inactive
421
+ finalHtml = markInactiveLinks(finalHtml, validPaths, docUrlPath, false);
324
422
 
325
- // xml
423
+ await outputFile(outputFilename, finalHtml);
424
+
425
+ // Clear finalHtml reference to allow GC
426
+ finalHtml = null;
326
427
 
327
- const xmlOutputFilename = outputFilename.replace(".html", ".xml");
328
- const xml = `<article>${o2x(jsonObject)}</article>`;
329
- await outputFile(xmlOutputFilename, xml);
330
-
331
- // Update the content hash for this file
332
- updateHash(file, rawBody, hashCache);
333
- } catch (e) {
334
- console.error(`Error processing ${file} (second pass): ${e.message}`);
335
- errors.push({ file, phase: 'article-generation', error: e });
336
- }
337
- })
338
- );
428
+ // JSON output
429
+ const jsonOutputFilename = outputFilename.replace(".html", ".json");
430
+
431
+ // Extract sections for markdown files
432
+ const sections = type === '.md' ? extractSections(rawBody) : [];
433
+
434
+ const jsonObject = {
435
+ name: base,
436
+ url,
437
+ contents: rawBody,
438
+ bodyHtml: body,
439
+ metadata: fileMeta,
440
+ sections,
441
+ transformedMetadata,
442
+ };
443
+
444
+ // Store minimal data for directory indices
445
+ dirIndexCache.set(file, {
446
+ name: base,
447
+ url,
448
+ });
449
+
450
+ const json = JSON.stringify(jsonObject);
451
+ await outputFile(jsonOutputFilename, json);
339
452
 
340
- // Log build stats
341
- console.log(`Build: ${regeneratedCount} regenerated, ${skippedCount} unchanged`);
453
+ // XML output
454
+ const xmlOutputFilename = outputFilename.replace(".html", ".xml");
455
+ const xml = `<article>${o2x(jsonObject)}</article>`;
456
+ await outputFile(xmlOutputFilename, xml);
457
+
458
+ // Update the content hash for this file
459
+ updateHash(file, rawBody, hashCache);
460
+ } catch (e) {
461
+ progress.log(`Error processing ${file}: ${e.message}`);
462
+ errors.push({ file, phase: 'article-generation', error: e });
463
+ }
464
+ });
342
465
 
343
- console.log(jsonCache.keys());
344
-
345
- // process directory indices
346
- await Promise.all(
347
- allSourceFilenamesThatAreDirectories.map(async (dir) => {
348
- try {
349
- console.log(`processing directory ${dir}`);
466
+ // Complete the articles status line
467
+ progress.done('Articles', `${totalArticles} done (${regeneratedCount} regenerated, ${skippedCount} unchanged)`);
350
468
 
351
- const pathsInThisDirectory = allSourceFilenames.filter((filename) =>
352
- filename.match(new RegExp(`${dir}.+`))
353
- );
469
+ // Write search index as a separate JSON file (not embedded in each page)
470
+ const searchIndexPath = join(output, 'public', 'search-index.json');
471
+ progress.log(`Writing search index with ${searchIndex.length} entries`);
472
+ await outputFile(searchIndexPath, JSON.stringify(searchIndex));
354
473
 
355
- const jsonObjects = pathsInThisDirectory
474
+ // Process directory indices with batched concurrency
475
+ const totalDirs = allSourceFilenamesThatAreDirectories.length;
476
+ let processedDirs = 0;
477
+ progress.log(`Processing ${totalDirs} directories...`);
478
+ await processBatched(allSourceFilenamesThatAreDirectories, async (dirPath) => {
479
+ try {
480
+ processedDirs++;
481
+ const shortDir = dirPath.replace(source, '');
482
+ progress.status('Directories', `${processedDirs}/${totalDirs} ${shortDir}`);
483
+
484
+ const pathsInThisDirectory = allSourceFilenames.filter((filename) =>
485
+ filename.match(new RegExp(`${dirPath}.+`))
486
+ );
487
+
488
+ // Use minimal directory index cache instead of full jsonCache
489
+ const jsonObjects = pathsInThisDirectory
490
+ .map((path) => {
491
+ const object = dirIndexCache.get(path);
492
+ return typeof object === "object" ? object : null;
493
+ })
494
+ .filter((a) => a);
495
+
496
+ const json = JSON.stringify(jsonObjects);
497
+
498
+ const outputFilename = dirPath.replace(source, output) + ".json";
499
+ await outputFile(outputFilename, json);
500
+
501
+ // html
502
+ const htmlOutputFilename = dirPath.replace(source, output) + ".html";
503
+ const indexAlreadyExists = fileExists(htmlOutputFilename);
504
+ if (!indexAlreadyExists) {
505
+ const template = templates["default-template"];
506
+ const indexHtml = `<ul>${pathsInThisDirectory
356
507
  .map((path) => {
357
- const object = jsonCache.get(path);
358
- return typeof object === "object" ? object : null;
508
+ const partialPath = path
509
+ .replace(source, "")
510
+ .replace(parse(path).ext, ".html");
511
+ const name = basename(path, parse(path).ext);
512
+ return `<li><a href="${partialPath}">${name}</a></li>`;
359
513
  })
360
- .filter((a) => a);
361
-
362
- const json = JSON.stringify(jsonObjects);
363
-
364
- const outputFilename = dir.replace(source, output) + ".json";
365
-
366
- console.log(`writing directory index to ${outputFilename}`);
367
- await outputFile(outputFilename, json);
368
-
369
- // html
370
- const htmlOutputFilename = dir.replace(source, output) + ".html";
371
- const indexAlreadyExists = fileExists(htmlOutputFilename);
372
- if (!indexAlreadyExists) {
373
- const template = templates["default-template"]; // TODO: figure out a way to specify template for a directory index
374
- const indexHtml = `<ul>${pathsInThisDirectory
375
- .map((path) => {
376
- const partialPath = path
377
- .replace(source, "")
378
- .replace(parse(path).ext, ".html");
379
- const name = basename(path, parse(path).ext);
380
- return `<li><a href="${partialPath}">${name}</a></li>`;
381
- })
382
- .join("")}</ul>`;
383
- const finalHtml = template
384
- .replace("${menu}", menu)
385
- .replace("${body}", indexHtml)
386
- .replace("${searchIndex}", JSON.stringify(searchIndex))
387
- .replace("${title}", "Index")
388
- .replace("${meta}", "{}")
389
- .replace("${transformedMetadata}", "")
390
- .replace("${embeddedStyle}", "")
391
- .replace("${footer}", footer);
392
- console.log(`writing directory index to ${htmlOutputFilename}`);
393
- await outputFile(htmlOutputFilename, finalHtml);
514
+ .join("")}</ul>`;
515
+ let finalHtml = template;
516
+ const replacements = {
517
+ "${menu}": menu,
518
+ "${body}": indexHtml,
519
+ "${searchIndex}": "[]", // Search index now in separate file
520
+ "${title}": "Index",
521
+ "${meta}": "{}",
522
+ "${transformedMetadata}": "",
523
+ "${embeddedStyle}": "",
524
+ "${footer}": footer
525
+ };
526
+ for (const [key, value] of Object.entries(replacements)) {
527
+ finalHtml = finalHtml.replace(key, value);
394
528
  }
395
- } catch (e) {
396
- console.error(`Error processing directory ${dir}: ${e.message}`);
397
- errors.push({ file: dir, phase: 'directory-index', error: e });
529
+ await outputFile(htmlOutputFilename, finalHtml);
398
530
  }
399
- })
400
- );
531
+ } catch (e) {
532
+ progress.log(`Error processing directory ${dirPath}: ${e.message}`);
533
+ errors.push({ file: dirPath, phase: 'directory-index', error: e });
534
+ }
535
+ });
536
+
537
+ progress.done('Directories', `${totalDirs} done`);
401
538
 
402
- // copy all static files (i.e. images)
539
+ // Clear directory index cache to free memory before processing static files
540
+ dirIndexCache.clear();
541
+
542
+ // copy all static files (i.e. images) with batched concurrency
403
543
  const imageExtensions = /\.(jpg|jpeg|png|gif|webp|svg|ico)/; // static asset extensions
404
544
  const allSourceFilenamesThatAreImages = allSourceFilenames.filter(
405
545
  (filename) => filename.match(imageExtensions)
406
546
  );
407
- await Promise.all(
408
- allSourceFilenamesThatAreImages.map(async (file) => {
409
- try {
410
- // For incremental mode, check if file has changed using file stat as a quick check
411
- if (_incremental) {
412
- const fileStat = await stat(file);
413
- const statKey = `${file}:stat`;
414
- const newStatHash = `${fileStat.size}:${fileStat.mtimeMs}`;
415
- if (hashCache.get(statKey) === newStatHash) {
416
- return; // Skip unchanged static file
417
- }
418
- hashCache.set(statKey, newStatHash);
419
- }
420
-
421
- console.log(`processing static file ${file}`);
547
+ const totalStatic = allSourceFilenamesThatAreImages.length;
548
+ let processedStatic = 0;
549
+ let copiedStatic = 0;
550
+ progress.log(`Processing ${totalStatic} static files...`);
551
+ await processBatched(allSourceFilenamesThatAreImages, async (file) => {
552
+ try {
553
+ processedStatic++;
554
+ const shortFile = file.replace(source, '');
555
+ progress.status('Static files', `${processedStatic}/${totalStatic} ${shortFile}`);
556
+
557
+ // Check if file has changed using file stat as a quick check
558
+ const fileStat = await stat(file);
559
+ const statKey = `${file}:stat`;
560
+ const newStatHash = `${fileStat.size}:${fileStat.mtimeMs}`;
561
+ if (hashCache.get(statKey) === newStatHash) {
562
+ return; // Skip unchanged static file
563
+ }
564
+ hashCache.set(statKey, newStatHash);
565
+ copiedStatic++;
422
566
 
423
- const outputFilename = file.replace(source, output);
567
+ const outputFilename = file.replace(source, output);
424
568
 
425
- console.log(`writing static file to ${outputFilename}`);
569
+ await mkdir(dirname(outputFilename), { recursive: true });
570
+ return await copyFile(file, outputFilename);
571
+ } catch (e) {
572
+ progress.log(`Error processing static file ${file}: ${e.message}`);
573
+ errors.push({ file, phase: 'static-file', error: e });
574
+ }
575
+ });
576
+
577
+ progress.done('Static files', `${totalStatic} done (${copiedStatic} copied)`);
426
578
 
427
- await mkdir(dirname(outputFilename), { recursive: true });
428
- return await copyFile(file, outputFilename);
429
- } catch (e) {
430
- console.error(`Error processing static file ${file}: ${e.message}`);
431
- errors.push({ file, phase: 'static-file', error: e });
432
- }
433
- })
434
- );
579
+ // Automatic index generation for folders without index.html
580
+ progress.log(`Checking for missing index files...`);
581
+ await generateAutoIndices(output, allSourceFilenamesThatAreDirectories, source, templates, menu, footer);
435
582
 
436
583
  // Save the hash cache to .ursa folder in source directory
437
584
  if (hashCache.size > 0) {
@@ -468,10 +615,133 @@ export async function generate({
468
615
  });
469
616
 
470
617
  await outputFile(errorReportPath, report);
471
- console.log(`\nāš ļø ${errors.length} error(s) occurred during generation.`);
472
- console.log(` Error report written to: ${errorReportPath}\n`);
618
+ progress.log(`\nāš ļø ${errors.length} error(s) occurred during generation.`);
619
+ progress.log(` Error report written to: ${errorReportPath}\n`);
620
+ } else {
621
+ progress.log(`\nāœ… Generation complete with no errors.\n`);
622
+ }
623
+ }
624
+
625
+ /**
626
+ * Generate automatic index.html files for folders that don't have one
627
+ * @param {string} output - Output directory path
628
+ * @param {string[]} directories - List of source directories
629
+ * @param {string} source - Source directory path
630
+ * @param {object} templates - Template map
631
+ * @param {string} menu - Rendered menu HTML
632
+ * @param {string} footer - Footer HTML
633
+ */
634
+ async function generateAutoIndices(output, directories, source, templates, menu, footer) {
635
+ // Alternate index file names to look for (in priority order)
636
+ const INDEX_ALTERNATES = ['_index.html', 'home.html', '_home.html'];
637
+
638
+ // Get all output directories (including root)
639
+ const outputDirs = new Set([output]);
640
+ for (const dir of directories) {
641
+ const outputDir = dir.replace(source, output);
642
+ outputDirs.add(outputDir);
643
+ }
644
+
645
+ let generatedCount = 0;
646
+ let renamedCount = 0;
647
+
648
+ for (const dir of outputDirs) {
649
+ const indexPath = join(dir, 'index.html');
650
+
651
+ // Skip if index.html already exists
652
+ if (existsSync(indexPath)) {
653
+ continue;
654
+ }
655
+
656
+ // Get folder name for (foldername).html check
657
+ const folderName = basename(dir);
658
+ const folderNameAlternate = `${folderName}.html`;
659
+
660
+ // Check for alternate index files
661
+ let foundAlternate = null;
662
+ for (const alt of [...INDEX_ALTERNATES, folderNameAlternate]) {
663
+ const altPath = join(dir, alt);
664
+ if (existsSync(altPath)) {
665
+ foundAlternate = altPath;
666
+ break;
667
+ }
668
+ }
669
+
670
+ if (foundAlternate) {
671
+ // Rename/copy alternate to index.html
672
+ try {
673
+ const content = await readFile(foundAlternate, 'utf8');
674
+ await outputFile(indexPath, content);
675
+ renamedCount++;
676
+ progress.status('Auto-index', `Promoted ${basename(foundAlternate)} → index.html in ${dir.replace(output, '')}`);
677
+ } catch (e) {
678
+ progress.log(`Error promoting ${foundAlternate} to index.html: ${e.message}`);
679
+ }
680
+ } else {
681
+ // Generate a simple index listing direct children
682
+ try {
683
+ const children = await readdir(dir, { withFileTypes: true });
684
+
685
+ // Filter to only include relevant files and folders
686
+ const items = children
687
+ .filter(child => {
688
+ // Skip hidden files and index alternates we just checked
689
+ if (child.name.startsWith('.')) return false;
690
+ if (child.name === 'index.html') return false;
691
+ // Include directories and html files
692
+ return child.isDirectory() || child.name.endsWith('.html');
693
+ })
694
+ .map(child => {
695
+ const isDir = child.isDirectory();
696
+ const name = isDir ? child.name : child.name.replace('.html', '');
697
+ const href = isDir ? `${child.name}/` : child.name;
698
+ const displayName = toTitleCase(name);
699
+ const icon = isDir ? 'šŸ“' : 'šŸ“„';
700
+ return `<li>${icon} <a href="${href}">${displayName}</a></li>`;
701
+ });
702
+
703
+ if (items.length === 0) {
704
+ // Empty folder, skip generating index
705
+ continue;
706
+ }
707
+
708
+ const folderDisplayName = dir === output ? 'Home' : toTitleCase(folderName);
709
+ const indexHtml = `<h1>${folderDisplayName}</h1>\n<ul class="auto-index">\n${items.join('\n')}\n</ul>`;
710
+
711
+ const template = templates["default-template"];
712
+ if (!template) {
713
+ progress.log(`Warning: No default template for auto-index in ${dir}`);
714
+ continue;
715
+ }
716
+
717
+ let finalHtml = template;
718
+ const replacements = {
719
+ "${menu}": menu,
720
+ "${body}": indexHtml,
721
+ "${searchIndex}": "[]",
722
+ "${title}": folderDisplayName,
723
+ "${meta}": "{}",
724
+ "${transformedMetadata}": "",
725
+ "${embeddedStyle}": "",
726
+ "${footer}": footer
727
+ };
728
+ for (const [key, value] of Object.entries(replacements)) {
729
+ finalHtml = finalHtml.replace(key, value);
730
+ }
731
+
732
+ await outputFile(indexPath, finalHtml);
733
+ generatedCount++;
734
+ progress.status('Auto-index', `Generated index.html for ${dir.replace(output, '') || '/'}`);
735
+ } catch (e) {
736
+ progress.log(`Error generating auto-index for ${dir}: ${e.message}`);
737
+ }
738
+ }
739
+ }
740
+
741
+ if (generatedCount > 0 || renamedCount > 0) {
742
+ progress.done('Auto-index', `${generatedCount} generated, ${renamedCount} promoted`);
473
743
  } else {
474
- console.log(`\nāœ… Generation complete with no errors.\n`);
744
+ progress.log(`Auto-index: All folders already have index.html`);
475
745
  }
476
746
  }
477
747
 
package/src/serve.js CHANGED
@@ -4,7 +4,7 @@ import { generate } from "./jobs/generate.js";
4
4
  import { join, resolve } from "path";
5
5
  import fs from "fs";
6
6
  import { promises } from "fs";
7
- const { readdir } = promises;
7
+ const { readdir, mkdir } = promises;
8
8
 
9
9
  /**
10
10
  * Configurable serve function for CLI and library use
@@ -15,31 +15,39 @@ export async function serve({
15
15
  _output,
16
16
  port = 8080,
17
17
  _whitelist = null,
18
- _clean = false
18
+ _clean = false,
19
+ _exclude = null
19
20
  } = {}) {
20
21
  const sourceDir = resolve(_source);
21
22
  const metaDir = resolve(_meta);
22
23
  const outputDir = resolve(_output);
23
24
 
24
- console.log({ source: sourceDir, meta: metaDir, output: outputDir, port, whitelist: _whitelist, clean: _clean });
25
+ console.log({ source: sourceDir, meta: metaDir, output: outputDir, port, whitelist: _whitelist, exclude: _exclude, clean: _clean });
25
26
 
26
- // Initial generation (use _clean flag only for initial generation)
27
- console.log("Generating initial site...");
28
- await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _clean });
29
- console.log("Initial generation complete. Starting server...");
30
-
31
- // Start file server
27
+ // Ensure output directory exists and start server immediately
28
+ await mkdir(outputDir, { recursive: true });
32
29
  serveFiles(outputDir, port);
30
+ console.log(`šŸš€ Development server running at http://localhost:${port}`);
31
+ console.log("šŸ“ Serving files from:", outputDir);
32
+ console.log("ā³ Generating site in background...\n");
33
+
34
+ // Initial generation (use _clean flag only for initial generation)
35
+ generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _exclude, _clean })
36
+ .then(() => console.log("\nāœ… Initial generation complete.\n"))
37
+ .catch((error) => console.error("Error during initial generation:", error.message));
33
38
 
34
39
  // Watch for changes
35
- console.log("Watching for file changes...");
40
+ console.log("šŸ‘€ Watching for changes in:");
41
+ console.log(" Source:", sourceDir, "(incremental)");
42
+ console.log(" Meta:", metaDir, "(full rebuild)");
43
+ console.log("\nPress Ctrl+C to stop the server\n");
36
44
 
37
45
  // Meta changes trigger full rebuild (templates, CSS, etc. affect all pages)
38
46
  watch(metaDir, { recursive: true, filter: /\.(js|json|css|html|md|txt|yml|yaml)$/ }, async (evt, name) => {
39
47
  console.log(`Meta files changed! Event: ${evt}, File: ${name}`);
40
48
  console.log("Full rebuild required (meta files affect all pages)...");
41
49
  try {
42
- await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _clean: true });
50
+ await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _exclude, _clean: true });
43
51
  console.log("Regeneration complete.");
44
52
  } catch (error) {
45
53
  console.error("Error during regeneration:", error.message);
@@ -64,7 +72,7 @@ export async function serve({
64
72
  if (isCssChange) {
65
73
  console.log("CSS change detected - full rebuild required...");
66
74
  try {
67
- await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _clean: true });
75
+ await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _exclude, _clean: true });
68
76
  console.log("Regeneration complete.");
69
77
  } catch (error) {
70
78
  console.error("Error during regeneration:", error.message);
@@ -72,20 +80,13 @@ export async function serve({
72
80
  } else {
73
81
  console.log("Incremental rebuild...");
74
82
  try {
75
- await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist });
83
+ await generate({ _source: sourceDir, _meta: metaDir, _output: outputDir, _whitelist, _exclude });
76
84
  console.log("Regeneration complete.");
77
85
  } catch (error) {
78
86
  console.error("Error during regeneration:", error.message);
79
87
  }
80
88
  }
81
89
  });
82
-
83
- console.log(`šŸš€ Development server running at http://localhost:${port}`);
84
- console.log("šŸ“ Serving files from:", outputDir);
85
- console.log("šŸ‘€ Watching for changes in:");
86
- console.log(" Source:", sourceDir, "(incremental)");
87
- console.log(" Meta:", metaDir, "(full rebuild)");
88
- console.log("\nPress Ctrl+C to stop the server");
89
90
  }
90
91
 
91
92
  /**