@cspell/cspell-tools 8.15.4 → 8.15.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -38,7 +38,8 @@
38
38
  },
39
39
  "type": "array"
40
40
  }
41
- ]
41
+ ],
42
+ "description": "Words in the `allowedSplitWords` are considered correct and can be used as a basis for splitting compound words.\n\nIf entries can be split so that all the words in the entry are allowed, then only the individual words are added, otherwise the entire entry is added. This is to prevent misspellings in CamelCase words from being introduced into the dictionary."
42
43
  },
43
44
  "keepRawCase": {
44
45
  "default": false,
@@ -64,6 +65,11 @@
64
65
  ],
65
66
  "default": false,
66
67
  "description": "Split lines into words."
68
+ },
69
+ "storeSplitWordsAsCompounds": {
70
+ "default": false,
71
+ "description": "Words that have been split using the `allowedSplitWords` are added to the dictionary as compoundable words. These words are prefixed / suffixed with `*`.",
72
+ "type": "boolean"
67
73
  }
68
74
  },
69
75
  "required": [
@@ -89,7 +95,8 @@
89
95
  },
90
96
  "type": "array"
91
97
  }
92
- ]
98
+ ],
99
+ "description": "Words in the `allowedSplitWords` are considered correct and can be used as a basis for splitting compound words.\n\nIf entries can be split so that all the words in the entry are allowed, then only the individual words are added, otherwise the entire entry is added. This is to prevent misspellings in CamelCase words from being introduced into the dictionary."
93
100
  },
94
101
  "filename": {
95
102
  "$ref": "#/definitions/FilePath"
@@ -115,6 +122,11 @@
115
122
  ],
116
123
  "default": false,
117
124
  "description": "Split lines into words."
125
+ },
126
+ "storeSplitWordsAsCompounds": {
127
+ "default": false,
128
+ "description": "Words that have been split using the `allowedSplitWords` are added to the dictionary as compoundable words. These words are prefixed / suffixed with `*`.",
129
+ "type": "boolean"
118
130
  }
119
131
  },
120
132
  "required": [
@@ -171,6 +183,11 @@
171
183
  "description": "Name of target, used as the basis of target file name.",
172
184
  "type": "string"
173
185
  },
186
+ "removeDuplicates": {
187
+ "default": false,
188
+ "description": "Remove duplicate words, favor lower case words over mixed case words. Combine compound prefixes where possible.",
189
+ "type": "boolean"
190
+ },
174
191
  "sort": {
175
192
  "default": true,
176
193
  "description": "Sort the words in the resulting dictionary. Does not apply to `trie` based formats.",
@@ -249,6 +266,11 @@
249
266
  "description": "Maximum number of nested Hunspell Rules to apply. This is needed for recursive dictionaries like Hebrew.",
250
267
  "type": "number"
251
268
  },
269
+ "removeDuplicates": {
270
+ "default": false,
271
+ "description": "Remove duplicate words, favor lower case words over mixed case words. Combine compound prefixes where possible.",
272
+ "type": "boolean"
273
+ },
252
274
  "rootDir": {
253
275
  "description": "Specify the directory where all relative paths will resolved against. By default, all relative paths are relative to the location of the config file.",
254
276
  "type": "string"
@@ -271,6 +293,11 @@
271
293
  "default": false,
272
294
  "description": "Split lines into words."
273
295
  },
296
+ "storeSplitWordsAsCompounds": {
297
+ "default": false,
298
+ "description": "Words that have been split using the `allowedSplitWords` are added to the dictionary as compoundable words. These words are prefixed / suffixed with `*`.",
299
+ "type": "boolean"
300
+ },
274
301
  "targets": {
275
302
  "description": "Optional Target Dictionaries to create.",
276
303
  "items": {
@@ -25,5 +25,11 @@ export interface CompileOptions {
25
25
  *
26
26
  */
27
27
  dictionaryDirectives?: string[] | undefined;
28
+ /**
29
+ * Remove duplicate words, favor lower case words over mixed case words.
30
+ * Combine compound prefixes where possible.
31
+ * @default false
32
+ */
33
+ removeDuplicates?: boolean;
28
34
  }
29
35
  //# sourceMappingURL=CompileOptions.d.ts.map
@@ -16,6 +16,7 @@ export interface SourceReaderOptions {
16
16
  legacy?: boolean;
17
17
  keepCase?: boolean;
18
18
  allowedSplitWords: AllowedSplitWordsCollection;
19
+ storeSplitWordsAsCompounds: boolean | undefined;
19
20
  }
20
21
  export type AnnotatedWord = string;
21
22
  export interface SourceReader {
@@ -25,8 +25,8 @@ function splitLines(lines, options) {
25
25
  return split();
26
26
  }
27
27
  async function textFileReader(reader, options) {
28
- const { legacy, splitWords: split, allowedSplitWords } = options;
29
- const words = [...parseFileLines(reader, { legacy, split, allowedSplitWords })];
28
+ const { legacy, splitWords: split, allowedSplitWords, storeSplitWordsAsCompounds } = options;
29
+ const words = [...parseFileLines(reader.lines, { legacy, split, allowedSplitWords, storeSplitWordsAsCompounds })];
30
30
  return {
31
31
  size: words.length,
32
32
  words,
@@ -1,6 +1,6 @@
1
1
  export interface WordsCollection {
2
2
  size: number;
3
- has(words: string): boolean;
3
+ has(words: string, caseSensitive: boolean): boolean;
4
4
  type?: string;
5
5
  }
6
6
  /**
@@ -18,6 +18,7 @@ export async function compile(request, options) {
18
18
  const targetOptions = {
19
19
  sort: request.sort,
20
20
  generateNonStrict: request.generateNonStrict,
21
+ removeDuplicates: request.removeDuplicates,
21
22
  };
22
23
  const conditional = options?.conditionalBuild || false;
23
24
  const checksumFile = resolveChecksumFile(request.checksumFile || conditional, rootDir);
@@ -57,6 +58,7 @@ export async function compileTarget(target, options, compileOptions) {
57
58
  const { format, sources, trieBase, sort = true, generateNonStrict = false, excludeWordsFrom } = target;
58
59
  const targetDirectory = path.resolve(rootDir, target.targetDirectory ?? cwd ?? process.cwd());
59
60
  const dictionaryDirectives = target.dictionaryDirectives ?? compileOptions.dictionaryDirectives;
61
+ const removeDuplicates = target.removeDuplicates ?? false;
60
62
  const excludeFilter = await createExcludeFilter(excludeWordsFrom);
61
63
  const generateNonStrictTrie = target.generateNonStrict ?? true;
62
64
  const name = normalizeTargetName(target.name);
@@ -69,6 +71,7 @@ export async function compileTarget(target, options, compileOptions) {
69
71
  generateNonStrict,
70
72
  filter: excludeFilter,
71
73
  dictionaryDirectives,
74
+ // removeDuplicates, // Add this in if we use it.
72
75
  });
73
76
  const checksumRoot = (checksumFile && path.dirname(checksumFile)) || rootDir;
74
77
  const deps = [...calculateDependencies(filename, filesToProcess, excludeWordsFrom, checksumRoot)];
@@ -88,10 +91,16 @@ export async function compileTarget(target, options, compileOptions) {
88
91
  trie4: format === 'trie4',
89
92
  generateNonStrict: generateNonStrictTrie,
90
93
  dictionaryDirectives: undefined,
94
+ // removeDuplicates, // Add this in if we use it.
91
95
  });
92
96
  }
93
97
  : async (words, dst) => {
94
- return compileWordList(pipe(words, normalizer), dst, { sort, generateNonStrict, dictionaryDirectives });
98
+ return compileWordList(pipe(words, normalizer), dst, {
99
+ sort,
100
+ generateNonStrict,
101
+ dictionaryDirectives,
102
+ removeDuplicates,
103
+ });
95
104
  };
96
105
  await processFiles(action, filesToProcess, filename);
97
106
  logWithTimestamp(`Done compile: ${target.name}`);
@@ -166,7 +175,7 @@ async function readFileList(fileList) {
166
175
  .filter((a) => !!a);
167
176
  }
168
177
  async function readFileSource(fileSource, sourceOptions) {
169
- const { filename, keepRawCase = sourceOptions.keepRawCase || false, split = sourceOptions.split || false, maxDepth, } = fileSource;
178
+ const { filename, keepRawCase = sourceOptions.keepRawCase || false, split = sourceOptions.split || false, maxDepth, storeSplitWordsAsCompounds, } = fileSource;
170
179
  const legacy = split === 'legacy';
171
180
  const splitWords = legacy ? false : split;
172
181
  // console.warn('fileSource: %o,\n targetOptions %o, \n opt: %o', fileSource, targetOptions, opt);
@@ -177,6 +186,7 @@ async function readFileSource(fileSource, sourceOptions) {
177
186
  splitWords,
178
187
  keepCase: keepRawCase,
179
188
  allowedSplitWords,
189
+ storeSplitWordsAsCompounds,
180
190
  };
181
191
  logWithTimestamp(`Reading ${path.basename(filename)}`);
182
192
  const stream = await streamSourceWordsFromFile(filename, readerOptions);
@@ -208,6 +218,6 @@ async function createExcludeFilter(excludeWordsFrom) {
208
218
  if (!excludeWordsFrom || !excludeWordsFrom.length)
209
219
  return () => true;
210
220
  const excludeWords = await createWordsCollectionFromFiles(excludeWordsFrom);
211
- return (word) => !excludeWords.has(word);
221
+ return (word) => !excludeWords.has(word, word.toUpperCase() !== word);
212
222
  }
213
223
  //# sourceMappingURL=compile.js.map
@@ -1,14 +1,15 @@
1
+ import { parseDictionary } from 'cspell-trie-lib';
1
2
  import { createReader } from './Reader.js';
2
3
  import { defaultAllowedSplitWords, defaultExcludeWordsCollection } from './WordsCollection.js';
3
4
  class AllowedSplitWordsImpl {
4
- words;
5
+ collection;
5
6
  size;
6
7
  constructor(collection) {
7
- this.words = collection;
8
+ this.collection = collection;
8
9
  this.size = collection.size;
9
10
  }
10
- has(word) {
11
- return !this.size || this.words.has(word);
11
+ has(word, caseSensitive) {
12
+ return !this.size || this.collection.has(word, caseSensitive);
12
13
  }
13
14
  }
14
15
  export async function createAllowedSplitWordsFromFiles(files) {
@@ -22,9 +23,30 @@ export function createAllowedSplitWords(words) {
22
23
  return defaultAllowedSplitWords;
23
24
  return new AllowedSplitWordsImpl(createWordsCollection(words));
24
25
  }
26
+ function buildHasFn(dict) {
27
+ function has(word, caseSensitive) {
28
+ const r = dict.hasWord(word, true);
29
+ if (r || caseSensitive)
30
+ return r;
31
+ const lc = word.toLowerCase();
32
+ if (lc == word)
33
+ return false;
34
+ return dict.hasWord(lc, true);
35
+ }
36
+ return has;
37
+ }
25
38
  async function readFile(filename) {
26
- const reader = await createReader(filename, {});
27
- return [...reader];
39
+ return await createReader(filename, {});
40
+ }
41
+ function readersToCollection(readers) {
42
+ const dictReaders = readers.filter(isDictionaryReader).map(dictReaderToCollection);
43
+ const nonDictCollection = lineReadersToCollection(readers.filter((a) => !isDictionaryReader(a)));
44
+ const collections = [...dictReaders, nonDictCollection];
45
+ const collection = {
46
+ size: collections.reduce((s, a) => s + a.size, 0),
47
+ has: (word, caseSensitive) => collections.some((a) => a.has(word, caseSensitive)),
48
+ };
49
+ return collection;
28
50
  }
29
51
  const cache = new WeakMap();
30
52
  export async function createWordsCollectionFromFiles(files) {
@@ -33,7 +55,7 @@ export async function createWordsCollectionFromFiles(files) {
33
55
  if (cached)
34
56
  return cached;
35
57
  const sources = await Promise.all(files.map((file) => readFile(file)));
36
- const collection = createWordsCollection(sources.flat());
58
+ const collection = readersToCollection(sources);
37
59
  cache.set(files, collection);
38
60
  return collection;
39
61
  }
@@ -44,17 +66,19 @@ export function createWordsCollection(words) {
44
66
  .map((a) => a.trim())
45
67
  .filter((a) => !!a)
46
68
  .filter((a) => !a.startsWith('#'));
47
- return new Set(arrWords);
69
+ const setOfWords = new Set(arrWords);
70
+ const has = buildHasFn({ hasWord: (word) => setOfWords.has(word) });
71
+ return { size: setOfWords.size, has };
48
72
  }
49
73
  class ExcludeWordsCollectionImpl {
50
- words;
74
+ collection;
51
75
  size;
52
76
  constructor(collection) {
53
- this.words = collection;
77
+ this.collection = collection;
54
78
  this.size = collection.size;
55
79
  }
56
- has(word) {
57
- return this.words.has(word);
80
+ has(word, caseSensitive) {
81
+ return this.collection.has(word, caseSensitive);
58
82
  }
59
83
  }
60
84
  export async function createExcludeWordsCollectionFromFiles(files) {
@@ -66,4 +90,19 @@ export async function createExcludeWordsCollectionFromFiles(files) {
66
90
  export function createExcludeWordsCollection(words) {
67
91
  return new ExcludeWordsCollectionImpl(words ? createWordsCollection(words) : new Set());
68
92
  }
93
+ function isDictionaryReader(reader) {
94
+ return 'hasWord' in reader && !!reader.hasWord;
95
+ }
96
+ function dictReaderToCollection(reader) {
97
+ return { size: reader.size, has: buildHasFn(reader) };
98
+ }
99
+ function lineReadersToCollection(readers) {
100
+ function* words() {
101
+ for (const reader of readers) {
102
+ yield* reader.lines;
103
+ }
104
+ }
105
+ const dict = parseDictionary(words(), { stripCaseAndAccents: false });
106
+ return { size: dict.size, has: buildHasFn(dict) };
107
+ }
69
108
  //# sourceMappingURL=createWordsCollection.js.map
@@ -6,7 +6,7 @@ export function legacyLineToWords(line, keepCase, allowedSplitWords) {
6
6
  // Remove punctuation and non-letters.
7
7
  const filteredLine = line.replaceAll(regNonWord, '|');
8
8
  const wordGroups = filteredLine.split('|');
9
- const words = pipe(wordGroups, opConcatMap((a) => a.split(regExpSpaceOrDash)), opConcatMap((a) => splitCamelCaseIfAllowed(a, allowedSplitWords, keepCase)), opMap((a) => a.trim()), opFilter((a) => !!a), opFilter((s) => !regExpRepeatChars.test(s)));
9
+ const words = pipe(wordGroups, opConcatMap((a) => a.split(regExpSpaceOrDash)), opConcatMap((a) => splitCamelCaseIfAllowed(a, allowedSplitWords, keepCase, '')), opMap((a) => a.trim()), opFilter((a) => !!a), opFilter((s) => !regExpRepeatChars.test(s)));
10
10
  return words;
11
11
  }
12
12
  export function* legacyLinesToWords(lines, keepCase, allowedSplitWords) {
@@ -9,7 +9,11 @@ export interface BaseReader {
9
9
  size: number;
10
10
  type: 'Hunspell' | 'TextFile' | 'Trie';
11
11
  lines: Iterable<AnnotatedWord>;
12
+ readonly hasWord?: (word: string, caseSensitive: boolean) => boolean;
12
13
  }
13
- export interface Reader extends BaseReader, Iterable<string> {
14
+ export interface Reader extends BaseReader {
15
+ }
16
+ export interface DictionaryReader extends BaseReader {
17
+ readonly hasWord: (word: string, caseSensitive: boolean) => boolean;
14
18
  }
15
19
  //# sourceMappingURL=ReaderOptions.d.ts.map
@@ -1,3 +1,3 @@
1
- import type { BaseReader } from './ReaderOptions.js';
2
- export declare function trieFileReader(filename: string): Promise<BaseReader>;
1
+ import type { DictionaryReader } from './ReaderOptions.js';
2
+ export declare function trieFileReader(filename: string): Promise<DictionaryReader>;
3
3
  //# sourceMappingURL=trieFileReader.d.ts.map
@@ -10,6 +10,7 @@ export async function trieFileReader(filename) {
10
10
  return trie.size();
11
11
  },
12
12
  lines: words,
13
+ hasWord: (word, caseSensitive) => trie.hasWord(word, caseSensitive),
13
14
  };
14
15
  }
15
16
  //# sourceMappingURL=trieFileReader.js.map
@@ -1,5 +1,5 @@
1
1
  import type { AllowedSplitWordsCollection } from './WordsCollection.js';
2
2
  export declare const regExpSpaceOrDash: RegExp;
3
3
  export declare const regExpIsNumber: RegExp;
4
- export declare function splitCamelCaseIfAllowed(word: string, allowedWords: AllowedSplitWordsCollection, keepCase: boolean): string[];
4
+ export declare function splitCamelCaseIfAllowed(word: string, allowedWords: AllowedSplitWordsCollection, keepCase: boolean, compoundPrefix: string): string[];
5
5
  //# sourceMappingURL=splitCamelCaseIfAllowed.d.ts.map
@@ -1,21 +1,34 @@
1
- import * as Text from './text.js';
1
+ import { isSingleLetter, splitCamelCaseWord } from './text.js';
2
2
  export const regExpSpaceOrDash = /[- ]+/g;
3
3
  export const regExpIsNumber = /^\d+$/;
4
- export function splitCamelCaseIfAllowed(word, allowedWords, keepCase) {
4
+ export function splitCamelCaseIfAllowed(word, allowedWords, keepCase, compoundPrefix) {
5
5
  const split = [...splitCamelCase(word)];
6
6
  if (split.length == 1)
7
7
  return adjustCases(split, allowedWords, keepCase);
8
- const missing = split.find((w) => isUnknown(w, allowedWords));
9
- if (missing !== undefined)
8
+ const missing = split.some((w) => isUnknown(w, allowedWords));
9
+ if (missing)
10
10
  return [word];
11
- return adjustCases(split, allowedWords, keepCase);
11
+ const wordIndexes = calcWordIndex(word, split);
12
+ const adjusted = adjustCases(split, allowedWords, keepCase);
13
+ return !compoundPrefix
14
+ ? adjusted
15
+ : adjusted.map((w, i) => {
16
+ const { px, sx } = wordIndexes[i];
17
+ const canCompound = w.length > 2;
18
+ const lc = w.toLowerCase();
19
+ const p = canCompound && isSingleLetter(px) ? compoundPrefix : '';
20
+ const s = canCompound && isSingleLetter(sx) ? compoundPrefix : '';
21
+ if (lc.length < 4 || allowedWords.has(w, true))
22
+ return p + w + s;
23
+ return p + lc + s;
24
+ });
12
25
  }
13
26
  function adjustCases(words, allowedWords, keepCase) {
14
27
  return words.map((w) => adjustCase(w, allowedWords, keepCase));
15
28
  }
16
29
  function adjustCase(word, allowedWords, keepCase) {
17
30
  const lc = word.toLowerCase();
18
- if (!allowedWords.has(lc))
31
+ if (!allowedWords.has(lc, true))
19
32
  return word;
20
33
  if (lc === word)
21
34
  return word;
@@ -26,14 +39,27 @@ function adjustCase(word, allowedWords, keepCase) {
26
39
  return word;
27
40
  }
28
41
  function isUnknown(word, allowedWords) {
29
- return !allowedWords.has(word) && !allowedWords.has(word.toLowerCase());
42
+ if (word === 'ERROR') {
43
+ return !allowedWords.has(word, false);
44
+ }
45
+ return !allowedWords.has(word, false);
30
46
  }
31
47
  function splitCamelCase(word) {
32
- const splitWords = Text.splitCamelCaseWord(word, false).filter((word) => !regExpIsNumber.test(word));
48
+ const splitWords = splitCamelCaseWord(word).filter((word) => !regExpIsNumber.test(word));
33
49
  // We only want to preserve this: "New York" and not "Namespace DNSLookup"
34
50
  if (splitWords.length > 1 && regExpSpaceOrDash.test(word)) {
35
51
  return splitWords.flatMap((w) => w.split(regExpSpaceOrDash));
36
52
  }
37
53
  return splitWords;
38
54
  }
55
+ function calcWordIndex(word, words) {
56
+ let i = 0;
57
+ return words.map((w) => {
58
+ const j = word.indexOf(w, i);
59
+ const k = j + w.length;
60
+ const wIndex = { word: w, i: j, px: word[j - 1] || '', sx: word[k] || '' };
61
+ i = k;
62
+ return wIndex;
63
+ });
64
+ }
39
65
  //# sourceMappingURL=splitCamelCaseIfAllowed.js.map
@@ -1,5 +1,10 @@
1
1
  /**
2
2
  * Split camelCase words into an array of strings.
3
3
  */
4
- export declare function splitCamelCaseWord(word: string, autoStem?: boolean): string[];
4
+ export declare function splitCamelCaseWord(word: string): string[];
5
+ /**
6
+ * Split camelCase words into an array of strings, try to fix English words.
7
+ */
8
+ export declare function splitCamelCaseWordAutoStem(word: string): string[];
9
+ export declare function isSingleLetter(c: string): boolean;
5
10
  //# sourceMappingURL=text.d.ts.map
@@ -2,14 +2,27 @@
2
2
  const regExUpperSOrIng = /(\p{Lu}+'?(?:s|ing|ies|es|ings|ed|ning))(?!\p{Ll})/gu;
3
3
  const regExSplitWords = /([\p{Ll}])([\p{Lu}])/gu;
4
4
  const regExSplitWords2 = /(\p{Lu})(\p{Lu}\p{Ll})/gu;
5
+ const regExpIsLetter = /^\p{L}\p{M}{0,2}$/u;
5
6
  /**
6
7
  * Split camelCase words into an array of strings.
7
8
  */
8
- export function splitCamelCaseWord(word, autoStem = true) {
9
- const wPrime = autoStem ? word.replaceAll(regExUpperSOrIng, (s) => s[0] + s.slice(1).toLowerCase()) : word;
10
- const pass1 = wPrime.replaceAll(regExSplitWords, '$1|$2');
9
+ export function splitCamelCaseWord(word) {
10
+ const pass1 = word.replaceAll(regExSplitWords, '$1|$2');
11
11
  const pass2 = pass1.replaceAll(regExSplitWords2, '$1|$2');
12
12
  const pass3 = pass2.replaceAll(/[\d_]+/g, '|');
13
13
  return pass3.split('|').filter((a) => !!a);
14
14
  }
15
+ /**
16
+ * Split camelCase words into an array of strings, try to fix English words.
17
+ */
18
+ export function splitCamelCaseWordAutoStem(word) {
19
+ return splitCamelCaseWord(word.replaceAll(regExUpperSOrIng, tailToLowerCase));
20
+ }
21
+ function tailToLowerCase(word) {
22
+ const letters = [...word];
23
+ return letters[0] + letters.slice(1).join('').toLowerCase();
24
+ }
25
+ export function isSingleLetter(c) {
26
+ return regExpIsLetter.test(c);
27
+ }
15
28
  //# sourceMappingURL=text.js.map
@@ -1,5 +1,6 @@
1
1
  import type { CompileOptions } from './CompileOptions.js';
2
2
  export declare function compileWordList(lines: Iterable<string>, destFilename: string, options: CompileOptions): Promise<void>;
3
+ declare function removeDuplicates(words: Iterable<string>): Iterable<string>;
3
4
  export interface TrieOptions {
4
5
  base?: number;
5
6
  trie3?: boolean;
@@ -10,5 +11,7 @@ export interface CompileTrieOptions extends CompileOptions, TrieOptions {
10
11
  export declare function compileTrie(words: Iterable<string>, destFilename: string, options: CompileTrieOptions): Promise<void>;
11
12
  export declare const __testing__: {
12
13
  wordListHeader: string;
14
+ removeDuplicates: typeof removeDuplicates;
13
15
  };
16
+ export {};
14
17
  //# sourceMappingURL=wordListCompiler.d.ts.map
@@ -21,12 +21,102 @@ export async function compileWordList(lines, destFilename, options) {
21
21
  }
22
22
  function normalize(lines, options) {
23
23
  const filter = normalizeTargetWords(options);
24
- const iter = pipe(lines, filter);
24
+ const cleanLines = options.removeDuplicates ? removeDuplicates(lines) : lines;
25
+ const iter = pipe(cleanLines, filter);
25
26
  if (!options.sort)
26
27
  return iter;
27
28
  const result = new Set(iter);
28
29
  return [...result].sort();
29
30
  }
31
+ function stripCompoundAFix(word) {
32
+ return word.replaceAll('*', '').replaceAll('+', '');
33
+ }
34
+ function* removeDuplicates(words) {
35
+ const wordSet = new Set(words);
36
+ const wordForms = new Map();
37
+ for (const word of wordSet) {
38
+ const lc = stripCompoundAFix(word.toLowerCase());
39
+ const forms = wordForms.get(lc) ?? [];
40
+ forms.push(word);
41
+ wordForms.set(lc, forms);
42
+ }
43
+ for (const forms of wordForms.values()) {
44
+ if (forms.length <= 1) {
45
+ yield* forms;
46
+ continue;
47
+ }
48
+ const mForms = removeDuplicateForms(forms);
49
+ if (mForms.size <= 1) {
50
+ yield* mForms.values();
51
+ continue;
52
+ }
53
+ // Handle upper / lower mix.
54
+ const words = [...mForms.keys()];
55
+ const lc = words[0].toLowerCase();
56
+ const lcForm = mForms.get(lc);
57
+ if (!lcForm) {
58
+ yield* mForms.values();
59
+ continue;
60
+ }
61
+ mForms.delete(lc);
62
+ yield lcForm;
63
+ for (const form of mForms.values()) {
64
+ if (form.toLowerCase() === lcForm)
65
+ continue;
66
+ yield form;
67
+ }
68
+ }
69
+ }
70
+ /**
71
+ * solo
72
+ * optional_prefix*
73
+ * optional_suffix*
74
+ * required_prefix+
75
+ * required_suffix+
76
+ */
77
+ var Flags;
78
+ (function (Flags) {
79
+ Flags[Flags["base"] = 0] = "base";
80
+ Flags[Flags["noPfx"] = 1] = "noPfx";
81
+ Flags[Flags["noSfx"] = 2] = "noSfx";
82
+ Flags[Flags["pfx"] = 4] = "pfx";
83
+ Flags[Flags["sfx"] = 8] = "sfx";
84
+ Flags[Flags["noFix"] = 3] = "noFix";
85
+ Flags[Flags["midFix"] = 12] = "midFix";
86
+ })(Flags || (Flags = {}));
87
+ function applyFlags(word, flags) {
88
+ if (flags === Flags.noFix)
89
+ return word;
90
+ if (flags === (Flags.noFix | Flags.midFix))
91
+ return '*' + word + '*';
92
+ const p = flags & Flags.pfx ? (flags & Flags.noPfx ? '*' : '+') : '';
93
+ const s = flags & Flags.sfx ? (flags & Flags.noSfx ? '*' : '+') : '';
94
+ return s + word + p;
95
+ }
96
+ function removeDuplicateForms(forms) {
97
+ function flags(word, flag = 0) {
98
+ let f = Flags.base;
99
+ const isOptPrefix = word.endsWith('*');
100
+ const isPrefix = !isOptPrefix && word.endsWith('+');
101
+ const isAnyPrefix = isPrefix || isOptPrefix;
102
+ const isOptSuffix = word.startsWith('*');
103
+ const isSuffix = !isOptSuffix && word.startsWith('+');
104
+ const isAnySuffix = isSuffix || isOptSuffix;
105
+ f |= isAnyPrefix ? Flags.pfx : 0;
106
+ f |= !isPrefix ? Flags.noPfx : 0;
107
+ f |= isAnySuffix ? Flags.sfx : 0;
108
+ f |= !isSuffix ? Flags.noSfx : 0;
109
+ return flag | f;
110
+ }
111
+ const m = new Map();
112
+ for (const form of forms) {
113
+ const k = stripCompoundAFix(form);
114
+ m.set(k, flags(form, m.get(k)));
115
+ }
116
+ return new Map([...m.entries()].map(([form, flag]) => {
117
+ return [form, applyFlags(form, flag)];
118
+ }));
119
+ }
30
120
  function createWordListTarget(destFilename) {
31
121
  const target = createTarget(destFilename);
32
122
  return (seq) => target(pipe(seq, opMap((a) => a + '\n')));
@@ -63,5 +153,6 @@ function createTrieTarget(destFilename, options) {
63
153
  }
64
154
  export const __testing__ = {
65
155
  wordListHeader,
156
+ removeDuplicates,
66
157
  };
67
158
  //# sourceMappingURL=wordListCompiler.js.map
@@ -24,6 +24,12 @@ export interface ParseFileOptions {
24
24
  */
25
25
  legacy?: boolean;
26
26
  allowedSplitWords: AllowedSplitWordsCollection;
27
+ /**
28
+ * Words that have been split using the `allowedSplitWords` are added to the dictionary as compoundable words.
29
+ * These words are prefixed / suffixed with `*`.
30
+ * @default undefined
31
+ */
32
+ storeSplitWordsAsCompounds: boolean | undefined;
27
33
  }
28
34
  type ParseFileOptionsRequired = Required<ParseFileOptions>;
29
35
  export declare const defaultParseDictionaryOptions: ParseFileOptionsRequired;
@@ -7,6 +7,7 @@ export function normalizeTargetWords(options) {
7
7
  const lineParser = createDictionaryLineParser({
8
8
  stripCaseAndAccents: options.generateNonStrict,
9
9
  stripCaseAndAccentsOnForbidden: true,
10
+ keepOptionalCompoundCharacter: true,
10
11
  });
11
12
  const operations = [
12
13
  opFilter((a) => !!a),
@@ -44,6 +45,7 @@ const _defaultOptions = {
44
45
  splitKeepBoth: false,
45
46
  // splitSeparator: regExpSplit,
46
47
  allowedSplitWords: { has: () => true, size: 0 },
48
+ storeSplitWordsAsCompounds: undefined,
47
49
  };
48
50
  export const defaultParseDictionaryOptions = Object.freeze(_defaultOptions);
49
51
  export const cSpellToolDirective = 'cspell-tools:';
@@ -56,9 +58,10 @@ export const setOfCSpellDirectiveFlags = ['no-split', 'split', 'keep-case', 'no-
56
58
  */
57
59
  export function createParseFileLineMapper(options) {
58
60
  const _options = options || _defaultOptions;
59
- const { splitKeepBoth = _defaultOptions.splitKeepBoth, allowedSplitWords = _defaultOptions.allowedSplitWords } = _options;
61
+ const { splitKeepBoth = _defaultOptions.splitKeepBoth, allowedSplitWords = _defaultOptions.allowedSplitWords, storeSplitWordsAsCompounds, } = _options;
60
62
  let { legacy = _defaultOptions.legacy } = _options;
61
63
  let { split = _defaultOptions.split, keepCase = legacy ? false : _defaultOptions.keepCase } = _options;
64
+ const compoundFix = storeSplitWordsAsCompounds ? '+' : '';
62
65
  function isString(line) {
63
66
  return typeof line === 'string';
64
67
  }
@@ -131,6 +134,9 @@ export function createParseFileLineMapper(options) {
131
134
  .filter((a) => !/^0[xo][0-9A-F]+$/i.test(a)); // c-style hex/octal digits
132
135
  return lines;
133
136
  }
137
+ function splitWordIntoWords(word) {
138
+ return splitCamelCaseIfAllowed(word, allowedSplitWords, keepCase, compoundFix);
139
+ }
134
140
  function* splitWords(lines) {
135
141
  for (const line of lines) {
136
142
  if (legacy) {
@@ -139,9 +145,7 @@ export function createParseFileLineMapper(options) {
139
145
  }
140
146
  if (split) {
141
147
  const words = splitLine(line);
142
- yield* !allowedSplitWords.size
143
- ? words
144
- : words.flatMap((word) => splitCamelCaseIfAllowed(word, allowedSplitWords, keepCase));
148
+ yield* !allowedSplitWords.size ? words : words.flatMap((word) => splitWordIntoWords(word));
145
149
  if (!splitKeepBoth)
146
150
  continue;
147
151
  }
@@ -76,6 +76,12 @@ export interface CompileTargetOptions {
76
76
  * ```
77
77
  */
78
78
  dictionaryDirectives?: string[] | undefined;
79
+ /**
80
+ * Remove duplicate words, favor lower case words over mixed case words.
81
+ * Combine compound prefixes where possible.
82
+ * @default false
83
+ */
84
+ removeDuplicates?: boolean | undefined;
79
85
  }
80
86
  export interface Target extends CompileTargetOptions {
81
87
  /**
@@ -144,7 +150,22 @@ export interface CompileSourceOptions {
144
150
  * @default false
145
151
  */
146
152
  keepRawCase?: boolean | undefined;
153
+ /**
154
+ * Words in the `allowedSplitWords` are considered correct and can be used
155
+ * as a basis for splitting compound words.
156
+ *
157
+ * If entries can be split so that all the words in the entry are allowed,
158
+ * then only the individual words are added, otherwise the entire entry is added.
159
+ * This is to prevent misspellings in CamelCase words from being introduced into the
160
+ * dictionary.
161
+ */
147
162
  allowedSplitWords?: FilePath | FilePath[] | undefined;
163
+ /**
164
+ * Words that have been split using the `allowedSplitWords` are added to the dictionary as compoundable words.
165
+ * These words are prefixed / suffixed with `*`.
166
+ * @default false
167
+ */
168
+ storeSplitWordsAsCompounds?: boolean | undefined;
148
169
  }
149
170
  export declare const configFileSchemaURL = "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/packages/cspell-tools/cspell-tools.config.schema.json";
150
171
  //# sourceMappingURL=config.d.ts.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@cspell/cspell-tools",
3
- "version": "8.15.4",
3
+ "version": "8.15.5",
4
4
  "description": "Tools to assist with the development of cSpell",
5
5
  "publishConfig": {
6
6
  "access": "public",
@@ -51,12 +51,12 @@
51
51
  },
52
52
  "homepage": "https://github.com/streetsidesoftware/cspell/tree/main/packages/cspell-tools#readme",
53
53
  "dependencies": {
54
- "@cspell/cspell-pipe": "8.15.4",
54
+ "@cspell/cspell-pipe": "8.15.5",
55
55
  "commander": "^12.1.0",
56
56
  "cosmiconfig": "9.0.0",
57
- "cspell-trie-lib": "8.15.4",
57
+ "cspell-trie-lib": "8.15.5",
58
58
  "glob": "^10.4.5",
59
- "hunspell-reader": "8.15.4",
59
+ "hunspell-reader": "8.15.5",
60
60
  "yaml": "^2.6.0"
61
61
  },
62
62
  "engines": {
@@ -67,5 +67,5 @@
67
67
  "ts-json-schema-generator": "^2.3.0"
68
68
  },
69
69
  "module": "bin.mjs",
70
- "gitHead": "27882ec49769126e3725b2cf180fee137c9a6ebe"
70
+ "gitHead": "5f974b2d4d61e2582aba5a1c87fcbc127f5f06bb"
71
71
  }