conlangers-suite-test 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +71 -0
  3. package/dist/collator.d.ts +5 -0
  4. package/dist/conlangers-suite-test.cjs.js +771 -0
  5. package/dist/conlangers-suite-test.es.js +6073 -0
  6. package/dist/escape_mapper.d.ts +11 -0
  7. package/dist/generata/supra_builder.d.ts +14 -0
  8. package/dist/generata/word_builder.d.ts +23 -0
  9. package/dist/index.d.ts +6 -0
  10. package/dist/logger.d.ts +45 -0
  11. package/dist/main.d.ts +22 -0
  12. package/dist/parser.d.ts +79 -0
  13. package/dist/resolvers/canon_graphemes_resolver.d.ts +14 -0
  14. package/dist/resolvers/category_resolver.d.ts +26 -0
  15. package/dist/resolvers/feature_resolver.d.ts +23 -0
  16. package/dist/resolvers/generation_resolver.d.ts +34 -0
  17. package/dist/resolvers/nesca_grammar_stream.d.ts +15 -0
  18. package/dist/resolvers/trans_category_resolver.d.ts +15 -0
  19. package/dist/resolvers/transform_resolver.d.ts +63 -0
  20. package/dist/text_builder.d.ts +31 -0
  21. package/dist/transforma/carryover_associator.d.ts +11 -0
  22. package/dist/transforma/chance_mapper.d.ts +15 -0
  23. package/dist/transforma/greek.d.ts +2 -0
  24. package/dist/transforma/hangul.d.ts +3 -0
  25. package/dist/transforma/lettercase_mapper.d.ts +12 -0
  26. package/dist/transforma/reference_mapper.d.ts +14 -0
  27. package/dist/transforma/transformer.d.ts +66 -0
  28. package/dist/transforma/xsampa.d.ts +3 -0
  29. package/dist/utils/picker_utilities.d.ts +4 -0
  30. package/dist/utils/types.d.ts +161 -0
  31. package/dist/utils/utilities.d.ts +12 -0
  32. package/dist/word.d.ts +19 -0
  33. package/dist/word_bank.d.ts +27 -0
  34. package/package.json +92 -0
@@ -0,0 +1,11 @@
1
+ declare class Escape_Mapper {
2
+ private map;
3
+ counter: number;
4
+ constructor();
5
+ escape_backslash_pairs(input: string): string;
6
+ escape_special_chars(input: string): string;
7
+ escape_named_escape(input: string): string;
8
+ restore_escaped_chars(input: string): string;
9
+ restore_preserve_escaped_chars(input: string): string;
10
+ }
11
+ export default Escape_Mapper;
@@ -0,0 +1,14 @@
1
+ import Logger from "../logger";
2
+ export declare class Supra_Builder {
3
+ private logger;
4
+ private weights;
5
+ private letters;
6
+ id_counter: number;
7
+ constructor(logger: Logger);
8
+ process_string(input: string, wordshape_line_num: number): string;
9
+ extract_letters_and_weights(input: string): [string[], (number | "s")[]];
10
+ replace_letter_and_clean(input: string, target_ID: number): string;
11
+ get_weights(): Record<number, number | "s">;
12
+ get_letters(): Record<number, string>;
13
+ }
14
+ export default Supra_Builder;
@@ -0,0 +1,23 @@
1
+ import Word from "../word";
2
+ import Escape_Mapper from "../escape_mapper";
3
+ import Supra_Builder from "./supra_builder";
4
+ import type { Output_Mode } from "../utils/types";
5
+ declare class Word_Builder {
6
+ private escape_mapper;
7
+ private supra_builder;
8
+ private categories;
9
+ private wordshapes;
10
+ private category_distribution;
11
+ private optionals_weight;
12
+ constructor(escape_mapper: Escape_Mapper, supra_builder: Supra_Builder, categories: Map<string, {
13
+ graphemes: string[];
14
+ weights: number[];
15
+ }>, wordshapes: {
16
+ items: string[];
17
+ weights: number[];
18
+ }, category_distribution: string, optionals_weight: number, output_mode: Output_Mode);
19
+ make_word(): Word;
20
+ resolve_wordshape_sets(input_list: string, distribution: string, optionals_weight: number): string;
21
+ extract_value_and_weight(input_list: string[], default_distribution: string): [string[], number[]];
22
+ }
23
+ export default Word_Builder;
@@ -0,0 +1,6 @@
1
+ import { vocabug, nesca } from "./main";
2
+ declare const the_conlangers_suite: {
3
+ vocabug: typeof vocabug;
4
+ nesca: typeof nesca;
5
+ };
6
+ export default the_conlangers_suite;
@@ -0,0 +1,45 @@
1
+ declare class Logger {
2
+ errors: string[];
3
+ warnings: string[];
4
+ infos: string[];
5
+ diagnostics: string[];
6
+ payload: string;
7
+ constructor();
8
+ Uncaught_Error: {
9
+ new (original: Error): {
10
+ name: string;
11
+ message: string;
12
+ stack?: string;
13
+ cause?: unknown;
14
+ };
15
+ captureStackTrace(targetObject: object, constructorOpt?: Function): void;
16
+ prepareStackTrace(err: Error, stackTraces: NodeJS.CallSite[]): any;
17
+ stackTraceLimit: number;
18
+ };
19
+ uncaught_error(original: Error): void;
20
+ private extract_location;
21
+ Validation_Error: {
22
+ new (message: string): {
23
+ name: string;
24
+ message: string;
25
+ stack?: string;
26
+ cause?: unknown;
27
+ };
28
+ captureStackTrace(targetObject: object, constructorOpt?: Function): void;
29
+ prepareStackTrace(err: Error, stackTraces: NodeJS.CallSite[]): any;
30
+ stackTraceLimit: number;
31
+ };
32
+ validation_error(message: string, line_num?: number | null): never;
33
+ warn(warn: string, line_num?: number | null): void;
34
+ info(info: string): void;
35
+ diagnostic(diagnostic: string): void;
36
+ set_payload(payload: string): void;
37
+ create_log(): {
38
+ payload: string;
39
+ errors: string[];
40
+ warnings: string[];
41
+ infos: string[];
42
+ diagnostics: string[];
43
+ };
44
+ }
45
+ export default Logger;
package/dist/main.d.ts ADDED
@@ -0,0 +1,22 @@
1
+ import type { Output_Mode } from "./utils/types";
2
+ import type { Log } from "./utils/types";
3
+ type Vocabug_Options = {
4
+ file: string;
5
+ num_of_words?: number | string;
6
+ output_mode?: Output_Mode;
7
+ remove_duplicates?: boolean;
8
+ force_word_limit?: boolean;
9
+ sort_words?: boolean;
10
+ output_divider?: string;
11
+ };
12
+ type Nesca_Options = {
13
+ file: string;
14
+ input_words: string;
15
+ output_mode?: Output_Mode;
16
+ sort_words?: boolean;
17
+ input_divider?: string;
18
+ output_divider?: string;
19
+ };
20
+ export declare function vocabug({ file, num_of_words, output_mode, remove_duplicates, force_word_limit, sort_words, output_divider, }: Vocabug_Options): Log;
21
+ export declare function nesca({ file, input_words, output_mode, input_divider, output_divider, sort_words, }: Nesca_Options): Log;
22
+ export {};
@@ -0,0 +1,79 @@
1
+ import type Escape_Mapper from "./escape_mapper";
2
+ import type lettercase_mapper from "./transforma/lettercase_mapper";
3
+ import Logger from "./logger";
4
+ import type { App, Schema } from "./utils/types";
5
+ import { Output_Mode, Distribution, Directive, Transform_Pending } from "./utils/types";
6
+ import type Chance_Mapper from "./transforma/chance_mapper";
7
+ declare class Parser {
8
+ private logger;
9
+ private escape_mapper;
10
+ lettercase_mapper: lettercase_mapper;
11
+ chance_mapper: Chance_Mapper;
12
+ num_of_words: number;
13
+ output_mode: Output_Mode;
14
+ remove_duplicates: boolean;
15
+ force_word_limit: boolean;
16
+ sort_words: boolean;
17
+ input_divider: string;
18
+ output_divider: string;
19
+ directive: Directive;
20
+ disable_directive: "p" | boolean;
21
+ directive_name: string;
22
+ category_distribution: Distribution;
23
+ category_pending: Map<string, {
24
+ content: string;
25
+ line_num: number;
26
+ }>;
27
+ units: Map<string, {
28
+ content: string;
29
+ line_num: number;
30
+ }>;
31
+ optionals_weight: number;
32
+ wordshape_distribution: Distribution;
33
+ wordshape_pending: {
34
+ content: string;
35
+ line_num: number;
36
+ };
37
+ feature_pending: Map<string, {
38
+ content: string;
39
+ line_num: number;
40
+ }>;
41
+ schema_input: Schema;
42
+ schema_output: Schema;
43
+ stages_pending: {
44
+ transforms_pending: Transform_Pending[];
45
+ name: string;
46
+ }[];
47
+ substages_pending: {
48
+ transforms_pending: Transform_Pending[];
49
+ name: string;
50
+ }[];
51
+ graphemes: string[];
52
+ syllable_boundaries: string[];
53
+ graphemes_pending: string;
54
+ alphabet: string[];
55
+ invisible: string[];
56
+ private file_line_num;
57
+ private app;
58
+ private current_stage_name;
59
+ constructor(logger: Logger, app: App, escape_mapper: Escape_Mapper, lettercase_mapper: lettercase_mapper, chance_mapper: Chance_Mapper, num_of_words_string: number | string, output_mode: Output_Mode, sort_words: boolean, remove_duplicates: boolean, force_word_limit: boolean, input_divider: string, output_divider: string);
60
+ private get_line;
61
+ parse_file(file: string): void;
62
+ push_transform_to_stage(transform: Transform_Pending): void;
63
+ get_cat_seg_fea(input: string, mode: "category" | "unit" | "feature"): [string, string, boolean];
64
+ private parse_distribution;
65
+ private validate_unit;
66
+ private parse_decorator;
67
+ private parse_directive;
68
+ private valid_words_brackets;
69
+ private parse_clusterfield;
70
+ private parse_routine;
71
+ private get_transform;
72
+ private get_schema;
73
+ private get_environment;
74
+ private validate_environment;
75
+ private parse_featurefield;
76
+ private parse_lettercasefield;
77
+ private valid_transform_brackets;
78
+ }
79
+ export default Parser;
@@ -0,0 +1,14 @@
1
+ import type Escape_Mapper from "../escape_mapper";
2
+ import Logger from "../logger";
3
+ import type { Associateme_Mapper } from "../utils/types";
4
+ declare class Canon_Graphemes_Resolver {
5
+ private logger;
6
+ private escape_mapper;
7
+ private graphemes_pending;
8
+ graphemes: string[];
9
+ associateme_mapper: Associateme_Mapper;
10
+ constructor(logger: Logger, escape_mapper: Escape_Mapper, graphemes_pending: string);
11
+ resolve_canon_graphemes(): void;
12
+ resolve_associatemes(): void;
13
+ }
14
+ export default Canon_Graphemes_Resolver;
@@ -0,0 +1,26 @@
1
+ import type Escape_Mapper from "../escape_mapper";
2
+ import Logger from "../logger";
3
+ import type { Output_Mode } from "../utils/types";
4
+ declare class Category_Resolver {
5
+ private logger;
6
+ private escape_mapper;
7
+ private output_mode;
8
+ category_distribution: string;
9
+ private category_pending;
10
+ categories: Map<string, {
11
+ graphemes: string[];
12
+ weights: number[];
13
+ }>;
14
+ trans_categories: Map<string, string[]>;
15
+ constructor(logger: Logger, output_mode: Output_Mode, escape_mapper: Escape_Mapper, category_distribution: string, category_pending: Map<string, {
16
+ content: string;
17
+ line_num: number;
18
+ }>);
19
+ private get_trans_categories;
20
+ resolve_categories(): void;
21
+ private valid_category_brackets;
22
+ private valid_category_weights;
23
+ private resolve_nested_categories;
24
+ show_debug(): void;
25
+ }
26
+ export default Category_Resolver;
@@ -0,0 +1,23 @@
1
+ import type Escape_Mapper from "../escape_mapper";
2
+ import Logger from "../logger";
3
+ import type { Output_Mode } from "../utils/types";
4
+ declare class Feature_Resolver {
5
+ private logger;
6
+ private escape_mapper;
7
+ private output_mode;
8
+ feature_pending: Map<string, {
9
+ content: string;
10
+ line_num: number;
11
+ }>;
12
+ features: Map<string, {
13
+ graphemes: string[];
14
+ }>;
15
+ graphemes: string[];
16
+ constructor(logger: Logger, output_mode: Output_Mode, escape_mapper: Escape_Mapper, feature_pending: Map<string, {
17
+ content: string;
18
+ line_num: number;
19
+ }>, graphemes: string[]);
20
+ resolve_features(): void;
21
+ show_debug(): void;
22
+ }
23
+ export default Feature_Resolver;
@@ -0,0 +1,34 @@
1
+ import Logger from "../logger";
2
+ import Supra_Builder from "../generata/supra_builder";
3
+ import type { Distribution, Output_Mode } from "../utils/types";
4
+ declare class Generation_Resolver {
5
+ private logger;
6
+ supra_builder: Supra_Builder;
7
+ private output_mode;
8
+ optionals_weight: number;
9
+ units: Map<string, {
10
+ content: string;
11
+ line_num: number;
12
+ }>;
13
+ wordshape_distribution: string;
14
+ private wordshape_pending;
15
+ wordshapes: {
16
+ items: string[];
17
+ weights: number[];
18
+ };
19
+ constructor(logger: Logger, output_mode: Output_Mode, supra_builder: Supra_Builder, wordshape_distribution: Distribution, units: Map<string, {
20
+ content: string;
21
+ line_num: number;
22
+ }>, wordshape_pending: {
23
+ content: string;
24
+ line_num: number;
25
+ }, optionals_weight: number);
26
+ private set_wordshapes;
27
+ private valid_words_brackets;
28
+ private extract_wordshape_value_and_weight;
29
+ private valid_words_weights;
30
+ private expand_wordshape_units;
31
+ private expand_units;
32
+ show_debug(): void;
33
+ }
34
+ export default Generation_Resolver;
@@ -0,0 +1,15 @@
1
+ import Logger from "../logger.js";
2
+ import Escape_Mapper from "../escape_mapper.js";
3
+ import type { Token } from "../utils/types.js";
4
+ import type { Token_Stream_Mode, Associateme_Mapper } from "../utils/types.js";
5
+ declare class Nesca_Grammar_Stream {
6
+ logger: Logger;
7
+ graphemes: string[];
8
+ associateme_mapper: Associateme_Mapper;
9
+ private escape_mapper;
10
+ constructor(logger: Logger, graphemes: string[], associateme_mapper: Associateme_Mapper, escape_mapper: Escape_Mapper);
11
+ main_parser(stream: string, mode: Token_Stream_Mode, line_num: number): Token[];
12
+ cluster_parser(stream: string, mode: Token_Stream_Mode, line_num: number): Token[];
13
+ find_base_location(mapper: Associateme_Mapper, grapheme: string): [number, number] | null;
14
+ }
15
+ export default Nesca_Grammar_Stream;
@@ -0,0 +1,15 @@
1
+ import Logger from "../logger";
2
+ import type { Output_Mode } from "../utils/types";
3
+ declare class Category_Resolver {
4
+ private logger;
5
+ private output_mode;
6
+ private category_pending;
7
+ trans_categories: Map<string, string[]>;
8
+ constructor(logger: Logger, output_mode: Output_Mode, category_pending: Map<string, {
9
+ content: string;
10
+ line_num: number;
11
+ }>);
12
+ private resolve_categories;
13
+ show_debug(): void;
14
+ }
15
+ export default Category_Resolver;
@@ -0,0 +1,63 @@
1
+ import Logger from "../logger";
2
+ import Nesca_Grammar_Stream from "./nesca_grammar_stream";
3
+ import type { Token, Output_Mode, Transform, Transform_Pending } from "../utils/types";
4
+ declare class Transform_Resolver {
5
+ private logger;
6
+ private output_mode;
7
+ nesca_grammar_stream: Nesca_Grammar_Stream;
8
+ categories: Map<string, string[]>;
9
+ stages_pending: {
10
+ transforms_pending: Transform_Pending[];
11
+ name: string;
12
+ }[];
13
+ stages: {
14
+ transforms: Transform[];
15
+ name: string;
16
+ }[];
17
+ substages_pending: {
18
+ transforms_pending: Transform_Pending[];
19
+ name: string;
20
+ }[];
21
+ substages: {
22
+ transforms: Transform[];
23
+ name: string;
24
+ }[];
25
+ syllable_boundaries: string[];
26
+ features: Map<string, {
27
+ graphemes: string[];
28
+ }>;
29
+ private line_num;
30
+ constructor(logger: Logger, output_mode: Output_Mode, nesca_grmmar_stream: Nesca_Grammar_Stream, categories: Map<string, string[]>, stages_pending: {
31
+ transforms_pending: Transform_Pending[];
32
+ name: string;
33
+ }[], substages_pending: {
34
+ transforms_pending: Transform_Pending[];
35
+ name: string;
36
+ }[], features: Map<string, {
37
+ graphemes: string[];
38
+ }>, syllable_boundaries: string[]);
39
+ resolve_stages(): void;
40
+ resolve_transforms(transform_pending: Transform_Pending[]): Transform[];
41
+ environment_helper(input: string): [string, string];
42
+ split_top_level(str: string): string[];
43
+ check_grammar_rules(str: string): void;
44
+ expand_chunk(chunk: string): string[];
45
+ resolve_alt_opt(input: string): string[][];
46
+ getTransformLengths<T>(target: T[][], result: T[][]): T[][];
47
+ categories_into_transform(input: string): string;
48
+ features_into_transform(stream: string): string;
49
+ private check_bracket_context;
50
+ private find_matching_bracket;
51
+ get_graphemes_from_matrix(feature_matrix: string): string;
52
+ normaliseTransformLength(target: string[][], result: string[][]): {
53
+ target_array: string[][];
54
+ result_array: string[][];
55
+ };
56
+ valid_transform_brackets(str: string): boolean;
57
+ valid_environment(input: string): boolean;
58
+ valid_cat_fea(stream: string): string;
59
+ format_tokens(seq: Token[]): string;
60
+ get_cluser_field_graphemes(input: string, mode: "RESULT" | "TARGET"): Token[][];
61
+ show_debug(): void;
62
+ }
63
+ export default Transform_Resolver;
@@ -0,0 +1,31 @@
1
+ import Word from "./word";
2
+ import Logger from "./logger";
3
+ import type { Output_Mode } from "./utils/types";
4
+ import Lettercase_Mapper from "./transforma/lettercase_mapper";
5
+ declare class Text_Builder {
6
+ private logger;
7
+ private lettercase_mapper;
8
+ private build_start;
9
+ private num_of_words;
10
+ private output_mode;
11
+ private remove_duplicates;
12
+ private force_word_limit;
13
+ private sort_words;
14
+ private output_divider;
15
+ private alphabet;
16
+ private invisible;
17
+ terminated: boolean;
18
+ private words;
19
+ private num_of_duplicates;
20
+ private num_of_rejects;
21
+ private num_of_duds;
22
+ private upper_gen_limit;
23
+ constructor(logger: Logger, lettercase_mapper: Lettercase_Mapper, build_start: number, num_of_words: number, remove_duplicates: boolean, force_word_limit: boolean, output_mode: Output_Mode, output_divider: string, sort_words: boolean, alphabet: string[], invisible: string[]);
24
+ add_word(word: Word): void;
25
+ create_record(): void;
26
+ make_text(): string;
27
+ paragraphify(words: string[]): string;
28
+ random_end_punctuation(): string;
29
+ show_debug(): void;
30
+ }
31
+ export default Text_Builder;
@@ -0,0 +1,11 @@
1
+ import type { Association, Associateme_Mapper } from "../utils/types";
2
+ declare class Carryover_Associator {
3
+ private caryover_list;
4
+ constructor();
5
+ set_item(entry_id: number, variant_id: number): void;
6
+ get_result_associateme(association: Association, associateme_mapper: Associateme_Mapper): string | null;
7
+ private find_first_item;
8
+ private remove_first_item;
9
+ find_grapheme(entry_id: number, base_id: number, variant_id: number, associateme_mapper: Associateme_Mapper): string | null;
10
+ }
11
+ export default Carryover_Associator;
@@ -0,0 +1,15 @@
1
+ declare class Chance_Mapper {
2
+ chances: {
3
+ id: number;
4
+ percent: number;
5
+ rolled: boolean | null;
6
+ }[];
7
+ check_parsing: boolean;
8
+ constructor();
9
+ add_chance(percent: number): void;
10
+ get_is_success(id: number): boolean | null;
11
+ reset(): void;
12
+ roll_all(): {};
13
+ get_last_chance(): number | null;
14
+ }
15
+ export default Chance_Mapper;
@@ -0,0 +1,2 @@
1
+ export declare function latin_to_greek(input: string): string;
2
+ export declare function greek_to_latin(input: string): string;
@@ -0,0 +1,3 @@
1
+ declare function latin_to_hangul(input: string): string;
2
+ declare function hangul_to_latin(input: string): string;
3
+ export { hangul_to_latin, latin_to_hangul };
@@ -0,0 +1,12 @@
1
+ declare class lettercase_mapper {
2
+ private map;
3
+ private reverse_map;
4
+ constructor();
5
+ create_map(new_map: Map<string, string>): void;
6
+ private tokenise;
7
+ capitalise(word: string): string;
8
+ decapitalise(word: string): string;
9
+ to_uppercase(word: string): string;
10
+ to_lowercase(word: string): string;
11
+ }
12
+ export default lettercase_mapper;
@@ -0,0 +1,14 @@
1
+ declare class Reference_Mapper {
2
+ private map;
3
+ capture_stream_index: number | null;
4
+ capture_stream: never[];
5
+ is_capturing_sequence: boolean;
6
+ constructor();
7
+ reset_capture_stream_index(): void;
8
+ set_capture_stream_index(index: number): void;
9
+ capture_reference(key: string, stream: string[]): void;
10
+ get_captured_reference(key: string): string[];
11
+ clone(): Reference_Mapper;
12
+ absorb(other: Reference_Mapper): void;
13
+ }
14
+ export default Reference_Mapper;
@@ -0,0 +1,66 @@
1
+ import Word from "../word";
2
+ import Logger from "../logger";
3
+ import type { Token, Output_Mode, Transform, Associateme_Mapper, Routine } from "../utils/types";
4
+ import Reference_Mapper from "./reference_mapper";
5
+ import Lettercase_Mapper from "./lettercase_mapper";
6
+ import Carryover_Associator from "./carryover_associator";
7
+ import Chance_Mapper from "./chance_mapper";
8
+ type Match_Result = {
9
+ start: number;
10
+ end: number;
11
+ matched: string[];
12
+ };
13
+ type Replacement = {
14
+ index_span: number;
15
+ length_span: number;
16
+ target_stream: string[];
17
+ replacement_stream: string[];
18
+ matched_conditions: string[];
19
+ };
20
+ declare class Transformer {
21
+ logger: Logger;
22
+ stages: {
23
+ transforms: Transform[];
24
+ name: string;
25
+ }[];
26
+ substages: {
27
+ transforms: Transform[];
28
+ name: string;
29
+ }[];
30
+ graphemes: string[];
31
+ lettercase_mapper: Lettercase_Mapper;
32
+ chance_mapper: Chance_Mapper;
33
+ private syllable_boundaries;
34
+ private debug;
35
+ private associateme_mapper;
36
+ constructor(logger: Logger, graphemes: string[], lettercase_mapper: Lettercase_Mapper, chance_mapper: Chance_Mapper, syllable_boundaries: string[], stages: {
37
+ transforms: Transform[];
38
+ name: string;
39
+ }[], substages: {
40
+ transforms: Transform[];
41
+ name: string;
42
+ }[], output_mode: Output_Mode, associateme_mapper: Associateme_Mapper);
43
+ run_routine(routine: Routine, word: Word, word_stream: string[], line_num: number): string[];
44
+ target_to_word_match(word_tokens: string[], raw_target: Token[], reference_mapper: Reference_Mapper, carryover_associator: Carryover_Associator): [number, number, string[]];
45
+ result_former(raw_result: Token[], target_stream: string[], reference_mapper: Reference_Mapper, carryover_associator: Carryover_Associator): string[];
46
+ resolve_association(mapper: Associateme_Mapper, grapheme: string): {
47
+ entry_id: number;
48
+ base_id: number;
49
+ variant_id: number;
50
+ } | null;
51
+ get_variant_id_for_base(mapper: Associateme_Mapper, entry_id: number, base_id: number, grapheme: string): number | null;
52
+ match_pattern_at(stream: string[], pattern: Token[], start: number, reference_mapper: Reference_Mapper, carryover_associator: Carryover_Associator | null, max_end?: number, target_stream?: string[]): Match_Result | null;
53
+ environment_match(word_stream: string[], target_stream: string[], startIdx: number, raw_target: string[], before: Token[], after: Token[], reference_mapper: Reference_Mapper): [boolean, string];
54
+ replacementa(word_stream: string[], replacements: Replacement[], word: Word, exceptions: {
55
+ before: Token[];
56
+ after: Token[];
57
+ }[], line_num: number): string[];
58
+ apply_transform(word: Word, word_stream: string[], transform: Transform): string[];
59
+ do_transforms(word: Word, transforms: Transform[]): Word;
60
+ do_stages(word: Word): Word;
61
+ get_variant_id(mapper: Associateme_Mapper, grapheme: string, baseToken: {
62
+ entry_id: number;
63
+ base_id: number;
64
+ }): number | null;
65
+ }
66
+ export default Transformer;
@@ -0,0 +1,3 @@
1
+ declare function xsampa_to_ipa(input: string): string;
2
+ declare function ipa_to_xsampa(ipa_in: string): string;
3
+ export { xsampa_to_ipa, ipa_to_xsampa };
@@ -0,0 +1,4 @@
1
+ declare function weighted_random_pick(items: string[], weights: number[]): string;
2
+ declare function supra_weighted_random_pick(items: string[], weights: (number | "s")[]): string;
3
+ declare function get_distribution(n: number, default_distribution: string): number[];
4
+ export { weighted_random_pick, get_distribution, supra_weighted_random_pick };