@cspell/cspell-tools 9.6.2 → 9.6.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/app.mjs +2 -2
- package/dist/{build-Dxf6sFCy.mjs → build-BGL2P0c2.mjs} +63 -63
- package/dist/index.d.mts +0 -1
- package/dist/index.mjs +1 -1
- package/package.json +6 -6
package/dist/app.mjs
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { a as reportCheckChecksumFile, c as toError, d as compressFile, i as logWithTimestamp, l as generateBTrie, n as compile, o as reportChecksumForFiles, r as setLogger, s as updateChecksumForFiles, t as build, u as OSFlags } from "./build-
|
|
1
|
+
import { a as reportCheckChecksumFile, c as toError, d as compressFile, i as logWithTimestamp, l as generateBTrie, n as compile, o as reportChecksumForFiles, r as setLogger, s as updateChecksumForFiles, t as build, u as OSFlags } from "./build-BGL2P0c2.mjs";
|
|
2
2
|
import { readFileSync } from "node:fs";
|
|
3
3
|
import { CommanderError, Option } from "commander";
|
|
4
4
|
import { writeFile } from "node:fs/promises";
|
|
@@ -9,7 +9,7 @@ import YAML from "yaml";
|
|
|
9
9
|
|
|
10
10
|
//#region src/util/globP.ts
|
|
11
11
|
function globP(pattern, options) {
|
|
12
|
-
return glob((Array.isArray(pattern) ? pattern : [pattern]).map((pattern
|
|
12
|
+
return glob((Array.isArray(pattern) ? pattern : [pattern]).map((pattern) => pattern.replaceAll("\\", "/")), options);
|
|
13
13
|
}
|
|
14
14
|
|
|
15
15
|
//#endregion
|
|
@@ -18,14 +18,14 @@ import { Buffer } from "node:buffer";
|
|
|
18
18
|
//#region src/gzip/compressFiles.ts
|
|
19
19
|
const gzip$1 = promisify(gzip);
|
|
20
20
|
const gunzip$1 = promisify(gunzip);
|
|
21
|
-
let OSFlags = /* @__PURE__ */ function(OSFlags
|
|
22
|
-
OSFlags
|
|
23
|
-
OSFlags
|
|
24
|
-
OSFlags
|
|
25
|
-
OSFlags
|
|
26
|
-
OSFlags
|
|
27
|
-
OSFlags
|
|
28
|
-
return OSFlags
|
|
21
|
+
let OSFlags = /* @__PURE__ */ function(OSFlags) {
|
|
22
|
+
OSFlags[OSFlags["auto"] = -1] = "auto";
|
|
23
|
+
OSFlags[OSFlags["FAT"] = 0] = "FAT";
|
|
24
|
+
OSFlags[OSFlags["Unix"] = 3] = "Unix";
|
|
25
|
+
OSFlags[OSFlags["HPFS"] = 6] = "HPFS";
|
|
26
|
+
OSFlags[OSFlags["MACOS"] = 7] = "MACOS";
|
|
27
|
+
OSFlags[OSFlags["NTFS"] = 11] = "NTFS";
|
|
28
|
+
return OSFlags;
|
|
29
29
|
}({});
|
|
30
30
|
const OSSystemIDOffset = 9;
|
|
31
31
|
async function compressFile(file, os) {
|
|
@@ -238,13 +238,13 @@ async function createReader(filename, options) {
|
|
|
238
238
|
//#endregion
|
|
239
239
|
//#region src/compiler/bTrie.ts
|
|
240
240
|
async function generateBTrieFromFile(file, options) {
|
|
241
|
-
const log
|
|
242
|
-
log
|
|
241
|
+
const log = options.logger || console.log.bind(console);
|
|
242
|
+
log(`Processing file: ${file}`);
|
|
243
243
|
const btrie = await createBTrieFromFile(file, options);
|
|
244
244
|
const outFile = bTrieFileName(file, options);
|
|
245
245
|
await mkdir(fsPath.dirname(outFile), { recursive: true });
|
|
246
246
|
await writeFile$1(outFile, btrie);
|
|
247
|
-
log
|
|
247
|
+
log(`Written BTrie to: ${outFile}`);
|
|
248
248
|
return outFile;
|
|
249
249
|
}
|
|
250
250
|
async function generateBTrieFromFiles(files, options) {
|
|
@@ -353,10 +353,10 @@ async function checkShasumFile(filename, files, root) {
|
|
|
353
353
|
const resolvedRoot = resolve(root || ".");
|
|
354
354
|
const fileDir = dirname(resolve(resolvedRoot, filename));
|
|
355
355
|
const shaFiles = await readAndParseShasumFile(filename);
|
|
356
|
-
const relFilesToCheck = (!files ? shaFiles.map(({ filename
|
|
356
|
+
const relFilesToCheck = (!files ? shaFiles.map(({ filename }) => filename) : files).map((f) => relative(fileDir, resolve(fileDir, f)));
|
|
357
357
|
const mapNameToChecksum = new Map(shaFiles.map((r) => [normalizeFilename(r.filename), r.checksum]));
|
|
358
|
-
const results = await Promise.all(relFilesToCheck.map(normalizeFilename).map((filename
|
|
359
|
-
return tryToCheckFile(filename
|
|
358
|
+
const results = await Promise.all(relFilesToCheck.map(normalizeFilename).map((filename) => {
|
|
359
|
+
return tryToCheckFile(filename, resolvedRoot, mapNameToChecksum.get(filename));
|
|
360
360
|
}));
|
|
361
361
|
return {
|
|
362
362
|
passed: !results.some((v) => !v.passed),
|
|
@@ -419,7 +419,7 @@ async function reportChecksumForFiles(files, options) {
|
|
|
419
419
|
async function reportCheckChecksumFile(filename, files, options) {
|
|
420
420
|
const root = options.root;
|
|
421
421
|
const results = (await checkShasumFile(filename, await resolveFileList(files, options.listFile), root)).results;
|
|
422
|
-
const lines = results.map(({ filename
|
|
422
|
+
const lines = results.map(({ filename, passed, error }) => `${filename}: ${passed ? "OK" : "FAILED"} ${error ? "- " + error.message : ""}`.trim());
|
|
423
423
|
const withErrors = results.filter((a) => !a.passed);
|
|
424
424
|
const passed = !withErrors.length;
|
|
425
425
|
if (!passed) lines.push(`shasum: WARNING: ${withErrors.length} computed checksum${withErrors.length > 1 ? "s" : ""} did NOT match`);
|
|
@@ -449,7 +449,7 @@ async function calcUpdateChecksumForFiles(filename, files, options) {
|
|
|
449
449
|
}));
|
|
450
450
|
const entriesToUpdate = new Set([...filesToCheck, ...currentEntries.map((e) => e.filename)]);
|
|
451
451
|
const mustExist = new Set(filesToCheck);
|
|
452
|
-
const checksumMap = new Map(currentEntries.map(({ filename
|
|
452
|
+
const checksumMap = new Map(currentEntries.map(({ filename, checksum }) => [filename, checksum]));
|
|
453
453
|
for (const file of entriesToUpdate) try {
|
|
454
454
|
const checksum = await calcFileChecksum(resolve(root, file));
|
|
455
455
|
checksumMap.set(file, checksum);
|
|
@@ -457,8 +457,8 @@ async function calcUpdateChecksumForFiles(filename, files, options) {
|
|
|
457
457
|
if (mustExist.has(file) || toError(e).code !== "ENOENT") throw e;
|
|
458
458
|
checksumMap.delete(file);
|
|
459
459
|
}
|
|
460
|
-
return [...checksumMap].map(([filename
|
|
461
|
-
filename
|
|
460
|
+
return [...checksumMap].map(([filename, checksum]) => ({
|
|
461
|
+
filename,
|
|
462
462
|
checksum
|
|
463
463
|
})).sort((a, b) => a.filename < b.filename ? -1 : 1).map((e) => `${e.checksum} ${e.filename}`).join("\n") + "\n";
|
|
464
464
|
}
|
|
@@ -533,11 +533,11 @@ function removeVerboseFromRegExp(pattern) {
|
|
|
533
533
|
acc.idx++;
|
|
534
534
|
let escCount = 0;
|
|
535
535
|
while (acc.idx < pattern.length) {
|
|
536
|
-
const char
|
|
537
|
-
acc.result += char
|
|
536
|
+
const char = pattern[acc.idx];
|
|
537
|
+
acc.result += char;
|
|
538
538
|
acc.idx++;
|
|
539
|
-
if (char
|
|
540
|
-
escCount = char
|
|
539
|
+
if (char === "]" && !(escCount & 1)) break;
|
|
540
|
+
escCount = char === "\\" ? escCount + 1 : 0;
|
|
541
541
|
}
|
|
542
542
|
return acc;
|
|
543
543
|
}
|
|
@@ -612,9 +612,9 @@ function buildHasFn(dict) {
|
|
|
612
612
|
async function readFile$1(filename) {
|
|
613
613
|
return await createReader(filename, {});
|
|
614
614
|
}
|
|
615
|
-
function readersToCollection(readers
|
|
616
|
-
const dictReaders = readers
|
|
617
|
-
const nonDictCollection = lineReadersToCollection(readers
|
|
615
|
+
function readersToCollection(readers) {
|
|
616
|
+
const dictReaders = readers.filter(isDictionaryReader).map(dictReaderToCollection);
|
|
617
|
+
const nonDictCollection = lineReadersToCollection(readers.filter((a) => !isDictionaryReader(a)));
|
|
618
618
|
const collections = [...dictReaders, nonDictCollection];
|
|
619
619
|
return {
|
|
620
620
|
size: collections.reduce((s, a) => s + a.size, 0),
|
|
@@ -639,9 +639,9 @@ function dictReaderToCollection(reader) {
|
|
|
639
639
|
has: buildHasFn(reader)
|
|
640
640
|
};
|
|
641
641
|
}
|
|
642
|
-
function lineReadersToCollection(readers
|
|
642
|
+
function lineReadersToCollection(readers) {
|
|
643
643
|
function* words() {
|
|
644
|
-
for (const reader of readers
|
|
644
|
+
for (const reader of readers) yield* reader.lines;
|
|
645
645
|
}
|
|
646
646
|
const dict = parseDictionary(words(), { stripCaseAndAccents: false });
|
|
647
647
|
return {
|
|
@@ -689,7 +689,7 @@ function isSingleLetter(c) {
|
|
|
689
689
|
//#region src/compiler/splitCamelCaseIfAllowed.ts
|
|
690
690
|
const regExpSpaceOrDash = /[- ]+/g;
|
|
691
691
|
const regExpIsNumber = /^\d+$/;
|
|
692
|
-
function splitCamelCaseIfAllowed(word, allowedWords, keepCase, compoundPrefix, minCompoundLength
|
|
692
|
+
function splitCamelCaseIfAllowed(word, allowedWords, keepCase, compoundPrefix, minCompoundLength) {
|
|
693
693
|
const split = [...splitCamelCase(word)];
|
|
694
694
|
if (split.length === 1) return adjustCases(split, allowedWords, keepCase);
|
|
695
695
|
if (split.some((w) => isUnknown(w, allowedWords))) return [word];
|
|
@@ -697,7 +697,7 @@ function splitCamelCaseIfAllowed(word, allowedWords, keepCase, compoundPrefix, m
|
|
|
697
697
|
const adjusted = adjustCases(split, allowedWords, keepCase);
|
|
698
698
|
return !compoundPrefix ? adjusted : adjusted.map((w, i) => {
|
|
699
699
|
const { px, sx } = wordIndexes[i];
|
|
700
|
-
const canCompound = w.length >= minCompoundLength
|
|
700
|
+
const canCompound = w.length >= minCompoundLength;
|
|
701
701
|
const lc = w.toLowerCase();
|
|
702
702
|
const p = canCompound && isSingleLetter(px) ? compoundPrefix : "";
|
|
703
703
|
const s = canCompound && isSingleLetter(sx) ? compoundPrefix : "";
|
|
@@ -721,7 +721,7 @@ function isUnknown(word, allowedWords) {
|
|
|
721
721
|
return !allowedWords.has(word, false);
|
|
722
722
|
}
|
|
723
723
|
function splitCamelCase(word) {
|
|
724
|
-
const splitWords = splitCamelCaseWord(word).filter((word
|
|
724
|
+
const splitWords = splitCamelCaseWord(word).filter((word) => !regExpIsNumber.test(word));
|
|
725
725
|
if (splitWords.length > 1 && regExpSpaceOrDash.test(word)) return splitWords.flatMap((w) => w.split(regExpSpaceOrDash));
|
|
726
726
|
return splitWords;
|
|
727
727
|
}
|
|
@@ -810,7 +810,7 @@ const cSpellToolDirective = "cspell-tools:";
|
|
|
810
810
|
*/
|
|
811
811
|
function createParseFileLineMapper(options) {
|
|
812
812
|
const _options = options || _defaultOptions;
|
|
813
|
-
const { splitKeepBoth = _defaultOptions.splitKeepBoth, allowedSplitWords = _defaultOptions.allowedSplitWords, storeSplitWordsAsCompounds, minCompoundLength
|
|
813
|
+
const { splitKeepBoth = _defaultOptions.splitKeepBoth, allowedSplitWords = _defaultOptions.allowedSplitWords, storeSplitWordsAsCompounds, minCompoundLength = _defaultOptions.minCompoundLength } = _options;
|
|
814
814
|
let { legacy = _defaultOptions.legacy } = _options;
|
|
815
815
|
let { split = _defaultOptions.split, keepCase = legacy ? false : _defaultOptions.keepCase } = _options;
|
|
816
816
|
const compoundFix = storeSplitWordsAsCompounds ? "+" : "";
|
|
@@ -869,7 +869,7 @@ function createParseFileLineMapper(options) {
|
|
|
869
869
|
return line.split("|").map((a) => a.trim()).filter((a) => !!a).filter((a) => !/^[0-9_-]+$/.test(a)).filter((a) => !/^0[xo][0-9A-F]+$/i.test(a));
|
|
870
870
|
}
|
|
871
871
|
function splitWordIntoWords(word) {
|
|
872
|
-
return splitCamelCaseIfAllowed(word, allowedSplitWords, keepCase, compoundFix, minCompoundLength
|
|
872
|
+
return splitCamelCaseIfAllowed(word, allowedSplitWords, keepCase, compoundFix, minCompoundLength);
|
|
873
873
|
}
|
|
874
874
|
function* splitWords(lines) {
|
|
875
875
|
for (const line of lines) {
|
|
@@ -893,10 +893,10 @@ function createParseFileLineMapper(options) {
|
|
|
893
893
|
yield line;
|
|
894
894
|
}
|
|
895
895
|
}
|
|
896
|
-
function* splitLines
|
|
896
|
+
function* splitLines(paragraphs) {
|
|
897
897
|
for (const paragraph of paragraphs) yield* paragraph.split("\n");
|
|
898
898
|
}
|
|
899
|
-
return opCombine(opFilter(isString), splitLines
|
|
899
|
+
return opCombine(opFilter(isString), splitLines, opMap(removeComments), splitWords, opMap(trim), opFilter(filterEmptyLines), unique);
|
|
900
900
|
}
|
|
901
901
|
/**
|
|
902
902
|
* Normalizes a dictionary words based upon prefix / suffixes.
|
|
@@ -930,7 +930,7 @@ function splitLines(lines, options) {
|
|
|
930
930
|
return split();
|
|
931
931
|
}
|
|
932
932
|
async function textFileReader(reader, options) {
|
|
933
|
-
const { legacy, splitWords: split, allowedSplitWords, storeSplitWordsAsCompounds, minCompoundLength
|
|
933
|
+
const { legacy, splitWords: split, allowedSplitWords, storeSplitWordsAsCompounds, minCompoundLength } = options;
|
|
934
934
|
const parseOptions = {
|
|
935
935
|
legacy,
|
|
936
936
|
split,
|
|
@@ -938,7 +938,7 @@ async function textFileReader(reader, options) {
|
|
|
938
938
|
keepCase: void 0,
|
|
939
939
|
allowedSplitWords,
|
|
940
940
|
storeSplitWordsAsCompounds,
|
|
941
|
-
minCompoundLength
|
|
941
|
+
minCompoundLength
|
|
942
942
|
};
|
|
943
943
|
const words = [...parseFileLines(reader.lines, parseOptions)];
|
|
944
944
|
return {
|
|
@@ -1034,7 +1034,7 @@ function* removeDuplicates(words) {
|
|
|
1034
1034
|
const sLcForms = new Set(lcForm);
|
|
1035
1035
|
yield* lcForm;
|
|
1036
1036
|
if (sLcForms.has("*" + lc + "*")) continue;
|
|
1037
|
-
for (const forms
|
|
1037
|
+
for (const forms of mForms.values()) for (const form of forms) {
|
|
1038
1038
|
if (sLcForms.has(form.toLowerCase())) continue;
|
|
1039
1039
|
yield form;
|
|
1040
1040
|
}
|
|
@@ -1047,14 +1047,14 @@ function* removeDuplicates(words) {
|
|
|
1047
1047
|
* required_prefix+
|
|
1048
1048
|
* required_suffix+
|
|
1049
1049
|
*/
|
|
1050
|
-
var Flags = /* @__PURE__ */ function(Flags
|
|
1051
|
-
Flags
|
|
1052
|
-
Flags
|
|
1053
|
-
Flags
|
|
1054
|
-
Flags
|
|
1055
|
-
Flags
|
|
1056
|
-
Flags
|
|
1057
|
-
return Flags
|
|
1050
|
+
var Flags = /* @__PURE__ */ function(Flags) {
|
|
1051
|
+
Flags[Flags["base"] = 0] = "base";
|
|
1052
|
+
Flags[Flags["none"] = 1] = "none";
|
|
1053
|
+
Flags[Flags["both"] = 2] = "both";
|
|
1054
|
+
Flags[Flags["pfx"] = 4] = "pfx";
|
|
1055
|
+
Flags[Flags["sfx"] = 8] = "sfx";
|
|
1056
|
+
Flags[Flags["all"] = 15] = "all";
|
|
1057
|
+
return Flags;
|
|
1058
1058
|
}(Flags || {});
|
|
1059
1059
|
function applyFlags(word, flags) {
|
|
1060
1060
|
if (flags === Flags.none) return [word];
|
|
@@ -1099,22 +1099,22 @@ function removeDuplicateForms(forms) {
|
|
|
1099
1099
|
return [form, applyFlags(form, flag)];
|
|
1100
1100
|
}));
|
|
1101
1101
|
}
|
|
1102
|
-
async function createTargetFile(destFilename, seq, compress
|
|
1103
|
-
const rel
|
|
1104
|
-
getLogger()(`Writing to file ${rel
|
|
1102
|
+
async function createTargetFile(destFilename, seq, compress) {
|
|
1103
|
+
const rel = path.relative(process.cwd(), destFilename).replaceAll(path.sep, "/");
|
|
1104
|
+
getLogger()(`Writing to file ${rel}${compress ? ".gz" : ""}`);
|
|
1105
1105
|
await mkdirp(path.dirname(destFilename));
|
|
1106
|
-
await writeTextToFile(destFilename, seq, compress
|
|
1106
|
+
await writeTextToFile(destFilename, seq, compress);
|
|
1107
1107
|
}
|
|
1108
1108
|
function createTrieCompiler(options) {
|
|
1109
1109
|
return (words) => {
|
|
1110
|
-
const log
|
|
1111
|
-
log
|
|
1110
|
+
const log = getLogger();
|
|
1111
|
+
log("Reading Words into Trie");
|
|
1112
1112
|
const base = options.base ?? 32;
|
|
1113
1113
|
const version = options.trie4 ? 4 : options.trie3 ? 3 : 1;
|
|
1114
1114
|
const root = Trie.buildTrie(words).root;
|
|
1115
|
-
log
|
|
1115
|
+
log("Reduce duplicate word endings");
|
|
1116
1116
|
const trie = Trie.consolidate(root);
|
|
1117
|
-
log
|
|
1117
|
+
log("Trie compilation complete");
|
|
1118
1118
|
return Trie.serializeTrie(trie, {
|
|
1119
1119
|
base,
|
|
1120
1120
|
comment: "Built by cspell-tools.",
|
|
@@ -1194,14 +1194,14 @@ async function buildTargetDictionary(target, options, compileOptions, buildOptio
|
|
|
1194
1194
|
const { format, sources, trieBase, sort = true, generateNonStrict = false, excludeWordsFrom = [], excludeWordsNotFoundIn = [], excludeWordsMatchingRegex } = target;
|
|
1195
1195
|
const { filename, useTrie, generateOnlyCompressedDictionary, checksumRoot } = buildOptions;
|
|
1196
1196
|
const dictionaryDirectives = target.dictionaryDirectives ?? compileOptions.dictionaryDirectives;
|
|
1197
|
-
const removeDuplicates
|
|
1197
|
+
const removeDuplicates = target.removeDuplicates ?? false;
|
|
1198
1198
|
const excludeFromFilter = await createExcludeFilter(excludeWordsFrom);
|
|
1199
1199
|
const includeFromFilter = await createIncludeFilter(excludeWordsNotFoundIn);
|
|
1200
1200
|
const excludeRegexFilter = createExcludeRegexFilter(excludeWordsMatchingRegex);
|
|
1201
1201
|
const excludeFilter = (word) => {
|
|
1202
1202
|
return excludeFromFilter(word) && includeFromFilter(word) && excludeRegexFilter(word);
|
|
1203
1203
|
};
|
|
1204
|
-
const compress
|
|
1204
|
+
const compress = generateOnlyCompressedDictionary;
|
|
1205
1205
|
const filesToProcess = await toArray(pipeAsync(readSourceList(sources, rootDir), opMapAsync((src) => readFileSource(src, options)), opAwaitAsync()));
|
|
1206
1206
|
const normalizer = normalizeTargetWords({
|
|
1207
1207
|
sort: useTrie || sort,
|
|
@@ -1209,7 +1209,7 @@ async function buildTargetDictionary(target, options, compileOptions, buildOptio
|
|
|
1209
1209
|
filter: excludeFilter,
|
|
1210
1210
|
dictionaryDirectives
|
|
1211
1211
|
});
|
|
1212
|
-
const deps = [...calculateDependencies(filename + (compress
|
|
1212
|
+
const deps = [...calculateDependencies(filename + (compress ? ".gz" : ""), filesToProcess, [...excludeWordsFrom, ...excludeWordsNotFoundIn], checksumRoot)];
|
|
1213
1213
|
if (conditional && checksumFile) {
|
|
1214
1214
|
if ((await checkShasumFile(checksumFile, deps, checksumRoot).catch(() => void 0))?.passed) {
|
|
1215
1215
|
logWithTimestamp(`Skip ${target.name}, nothing changed.`);
|
|
@@ -1225,8 +1225,8 @@ async function buildTargetDictionary(target, options, compileOptions, buildOptio
|
|
|
1225
1225
|
sort,
|
|
1226
1226
|
generateNonStrict,
|
|
1227
1227
|
dictionaryDirectives,
|
|
1228
|
-
removeDuplicates
|
|
1229
|
-
}))), compress
|
|
1228
|
+
removeDuplicates
|
|
1229
|
+
}))), compress);
|
|
1230
1230
|
}
|
|
1231
1231
|
await processFiles({
|
|
1232
1232
|
action,
|
|
@@ -1319,7 +1319,7 @@ async function readFileList(fileList) {
|
|
|
1319
1319
|
return (await readTextFile(fileList)).split("\n").map((a) => a.trim()).filter((a) => !!a);
|
|
1320
1320
|
}
|
|
1321
1321
|
async function readFileSource(fileSource, sourceOptions) {
|
|
1322
|
-
const { filename, keepRawCase = sourceOptions.keepRawCase || false, split = sourceOptions.split || false, maxDepth, storeSplitWordsAsCompounds, minCompoundLength
|
|
1322
|
+
const { filename, keepRawCase = sourceOptions.keepRawCase || false, split = sourceOptions.split || false, maxDepth, storeSplitWordsAsCompounds, minCompoundLength } = fileSource;
|
|
1323
1323
|
const legacy = split === "legacy";
|
|
1324
1324
|
const readerOptions = {
|
|
1325
1325
|
maxDepth,
|
|
@@ -1328,7 +1328,7 @@ async function readFileSource(fileSource, sourceOptions) {
|
|
|
1328
1328
|
keepCase: keepRawCase,
|
|
1329
1329
|
allowedSplitWords: await createAllowedSplitWordsFromFiles(fileSource.allowedSplitWords || sourceOptions.allowedSplitWords),
|
|
1330
1330
|
storeSplitWordsAsCompounds,
|
|
1331
|
-
minCompoundLength
|
|
1331
|
+
minCompoundLength
|
|
1332
1332
|
};
|
|
1333
1333
|
logWithTimestamp(`Reading ${path.basename(filename)}`);
|
|
1334
1334
|
const stream = await streamSourceWordsFromFile(filename, readerOptions);
|
|
@@ -1342,7 +1342,7 @@ function normalizeTargetName(name) {
|
|
|
1342
1342
|
return name.replace(/((\.txt|\.dic|\.aff|\.trie)(\.gz)?)?$/, "").replaceAll(/[^\p{L}\p{M}.\w\\/-]/gu, "_");
|
|
1343
1343
|
}
|
|
1344
1344
|
function logProgress(freq = 1e5) {
|
|
1345
|
-
function* logProgress
|
|
1345
|
+
function* logProgress(iter) {
|
|
1346
1346
|
const _freq = freq;
|
|
1347
1347
|
let count = 0;
|
|
1348
1348
|
for (const v of iter) {
|
|
@@ -1351,7 +1351,7 @@ function logProgress(freq = 1e5) {
|
|
|
1351
1351
|
yield v;
|
|
1352
1352
|
}
|
|
1353
1353
|
}
|
|
1354
|
-
return logProgress
|
|
1354
|
+
return logProgress;
|
|
1355
1355
|
}
|
|
1356
1356
|
/**
|
|
1357
1357
|
* @param excludeWordsFrom - List of files to read words from.
|
|
@@ -1434,4 +1434,4 @@ function normalizeRequest(buildInfo, root) {
|
|
|
1434
1434
|
|
|
1435
1435
|
//#endregion
|
|
1436
1436
|
export { reportCheckChecksumFile as a, toError as c, compressFile as d, logWithTimestamp as i, generateBTrie as l, compile as n, reportChecksumForFiles as o, setLogger as r, updateChecksumForFiles as s, build as t, OSFlags as u };
|
|
1437
|
-
//# sourceMappingURL=build-
|
|
1437
|
+
//# sourceMappingURL=build-BGL2P0c2.mjs.map
|
package/dist/index.d.mts
CHANGED
package/dist/index.mjs
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@cspell/cspell-tools",
|
|
3
|
-
"version": "9.6.
|
|
3
|
+
"version": "9.6.3",
|
|
4
4
|
"description": "Tools to assist with the development of cSpell",
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public",
|
|
@@ -64,12 +64,12 @@
|
|
|
64
64
|
},
|
|
65
65
|
"homepage": "https://github.com/streetsidesoftware/cspell/tree/main/packages/cspell-tools#readme",
|
|
66
66
|
"dependencies": {
|
|
67
|
-
"@cspell/cspell-pipe": "9.6.
|
|
68
|
-
"commander": "^14.0.
|
|
67
|
+
"@cspell/cspell-pipe": "9.6.3",
|
|
68
|
+
"commander": "^14.0.3",
|
|
69
69
|
"cosmiconfig": "9.0.0",
|
|
70
|
-
"cspell-trie-lib": "9.6.
|
|
70
|
+
"cspell-trie-lib": "9.6.3",
|
|
71
71
|
"glob": "^13.0.0",
|
|
72
|
-
"hunspell-reader": "9.6.
|
|
72
|
+
"hunspell-reader": "9.6.3",
|
|
73
73
|
"yaml": "^2.8.2"
|
|
74
74
|
},
|
|
75
75
|
"engines": {
|
|
@@ -80,5 +80,5 @@
|
|
|
80
80
|
"ts-json-schema-generator": "^2.4.0"
|
|
81
81
|
},
|
|
82
82
|
"module": "bin.mjs",
|
|
83
|
-
"gitHead": "
|
|
83
|
+
"gitHead": "500b996b6c0a6ff025c42ef98db44776f43a9e72"
|
|
84
84
|
}
|