@fuzdev/fuz_code 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/LICENSE +25 -0
  2. package/README.md +185 -0
  3. package/dist/Code.svelte +146 -0
  4. package/dist/Code.svelte.d.ts +79 -0
  5. package/dist/Code.svelte.d.ts.map +1 -0
  6. package/dist/CodeHighlight.svelte +205 -0
  7. package/dist/CodeHighlight.svelte.d.ts +101 -0
  8. package/dist/CodeHighlight.svelte.d.ts.map +1 -0
  9. package/dist/code_sample.d.ts +8 -0
  10. package/dist/code_sample.d.ts.map +1 -0
  11. package/dist/code_sample.js +2 -0
  12. package/dist/grammar_clike.d.ts +12 -0
  13. package/dist/grammar_clike.d.ts.map +1 -0
  14. package/dist/grammar_clike.js +43 -0
  15. package/dist/grammar_css.d.ts +11 -0
  16. package/dist/grammar_css.d.ts.map +1 -0
  17. package/dist/grammar_css.js +70 -0
  18. package/dist/grammar_js.d.ts +11 -0
  19. package/dist/grammar_js.d.ts.map +1 -0
  20. package/dist/grammar_js.js +180 -0
  21. package/dist/grammar_json.d.ts +11 -0
  22. package/dist/grammar_json.d.ts.map +1 -0
  23. package/dist/grammar_json.js +35 -0
  24. package/dist/grammar_markdown.d.ts +8 -0
  25. package/dist/grammar_markdown.d.ts.map +1 -0
  26. package/dist/grammar_markdown.js +228 -0
  27. package/dist/grammar_markup.d.ts +31 -0
  28. package/dist/grammar_markup.d.ts.map +1 -0
  29. package/dist/grammar_markup.js +192 -0
  30. package/dist/grammar_svelte.d.ts +12 -0
  31. package/dist/grammar_svelte.d.ts.map +1 -0
  32. package/dist/grammar_svelte.js +150 -0
  33. package/dist/grammar_ts.d.ts +11 -0
  34. package/dist/grammar_ts.d.ts.map +1 -0
  35. package/dist/grammar_ts.js +95 -0
  36. package/dist/highlight_manager.d.ts +25 -0
  37. package/dist/highlight_manager.d.ts.map +1 -0
  38. package/dist/highlight_manager.js +139 -0
  39. package/dist/highlight_priorities.d.ts +3 -0
  40. package/dist/highlight_priorities.d.ts.map +1 -0
  41. package/dist/highlight_priorities.gen.d.ts +4 -0
  42. package/dist/highlight_priorities.gen.d.ts.map +1 -0
  43. package/dist/highlight_priorities.gen.js +58 -0
  44. package/dist/highlight_priorities.js +55 -0
  45. package/dist/syntax_styler.d.ts +277 -0
  46. package/dist/syntax_styler.d.ts.map +1 -0
  47. package/dist/syntax_styler.js +426 -0
  48. package/dist/syntax_styler_global.d.ts +3 -0
  49. package/dist/syntax_styler_global.d.ts.map +1 -0
  50. package/dist/syntax_styler_global.js +18 -0
  51. package/dist/syntax_token.d.ts +34 -0
  52. package/dist/syntax_token.d.ts.map +1 -0
  53. package/dist/syntax_token.js +27 -0
  54. package/dist/theme.css +98 -0
  55. package/dist/theme_highlight.css +160 -0
  56. package/dist/theme_variables.css +20 -0
  57. package/dist/tokenize_syntax.d.ts +28 -0
  58. package/dist/tokenize_syntax.d.ts.map +1 -0
  59. package/dist/tokenize_syntax.js +194 -0
  60. package/package.json +117 -0
  61. package/src/lib/code_sample.ts +10 -0
  62. package/src/lib/grammar_clike.ts +48 -0
  63. package/src/lib/grammar_css.ts +84 -0
  64. package/src/lib/grammar_js.ts +215 -0
  65. package/src/lib/grammar_json.ts +38 -0
  66. package/src/lib/grammar_markdown.ts +289 -0
  67. package/src/lib/grammar_markup.ts +225 -0
  68. package/src/lib/grammar_svelte.ts +165 -0
  69. package/src/lib/grammar_ts.ts +114 -0
  70. package/src/lib/highlight_manager.ts +182 -0
  71. package/src/lib/highlight_priorities.gen.ts +71 -0
  72. package/src/lib/highlight_priorities.ts +110 -0
  73. package/src/lib/syntax_styler.ts +583 -0
  74. package/src/lib/syntax_styler_global.ts +20 -0
  75. package/src/lib/syntax_token.ts +49 -0
  76. package/src/lib/tokenize_syntax.ts +270 -0
@@ -0,0 +1,95 @@
1
+ import { class_keywords } from './grammar_clike.js';
2
+ /**
3
+ * Based on Prism (https://github.com/PrismJS/prism)
4
+ * by Lea Verou (https://lea.verou.me/)
5
+ *
6
+ * MIT license
7
+ *
8
+ * @see LICENSE
9
+ */
10
+ export const add_grammar_ts = (syntax_styler) => {
11
+ const grammar_ts = syntax_styler.add_extended_lang('js', 'ts', {
12
+ class_name: {
13
+ pattern: new RegExp(`(\\b(?:${class_keywords}|type)\\s+)(?!keyof\\b)(?!\\s)[_$a-zA-Z\\xA0-\\uFFFF](?:(?!\\s)[$\\w\\xA0-\\uFFFF])*(?:\\s*<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>)?`),
14
+ lookbehind: true,
15
+ greedy: true,
16
+ inside: null, // see below
17
+ },
18
+ builtin: /\b(?:Array|Function|Promise|any|boolean|console|never|number|string|symbol|unknown)\b/,
19
+ // TypeScript arrow functions with type annotations
20
+ function_variable: {
21
+ pattern: /#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:async\s*)?(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)(?:\s*:\s*(?:(?!=>).)+)?\s*=>|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*=>)))/,
22
+ alias: 'function',
23
+ },
24
+ }, ['typescript']);
25
+ // The keywords TypeScript adds to JS
26
+ grammar_ts.keyword.push(/\b(?:abstract|declare|is|keyof|readonly|require|satisfies)\b/,
27
+ // keywords that have to be followed by an identifier
28
+ /\b(?:asserts|infer|interface|module|namespace|type)\b(?=\s*(?:[{_$a-zA-Z\xA0-\uFFFF]|$))/,
29
+ // This is for `import type *, {}`
30
+ /\btype\b(?=\s*(?:[{*]|$))/);
31
+ // doesn't work with TS because TS is too complex
32
+ delete grammar_ts.parameter;
33
+ delete grammar_ts.literal_property;
34
+ // a version of TS specifically for styling types
35
+ var type_inside = syntax_styler.extend_grammar('ts', {
36
+ // Recognize type names in type contexts
37
+ type_name: {
38
+ pattern: /\b[A-Z]\w*/,
39
+ alias: 'class_name',
40
+ },
41
+ });
42
+ // Prevent double-wrapping of class names
43
+ type_inside.class_name = undefined;
44
+ // After normalization, grammar_ts.class_name is an array
45
+ grammar_ts.class_name[0].inside = type_inside;
46
+ syntax_styler.grammar_insert_before('ts', 'function', {
47
+ type_assertion: {
48
+ pattern: /(\b(?:as|satisfies)\s+)(?!\s)[_$A-Za-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*/,
49
+ lookbehind: true,
50
+ alias: 'class_name',
51
+ },
52
+ import_type_keyword: {
53
+ pattern: /(\b(?:import|export)\s+)type\b|(\b(?:import|export)\s*\{[^}]*,\s*)type\b/,
54
+ lookbehind: true,
55
+ alias: 'special_keyword',
56
+ },
57
+ type_annotation: {
58
+ pattern: /:(?:\s*)((?:[^<>=;,)}\s]|<[^>]*>|\[[^\]]*\]|\s)+)(?=\s*=)/,
59
+ greedy: true,
60
+ inside: {
61
+ ':': /^:/,
62
+ type: {
63
+ pattern: /.+/,
64
+ inside: type_inside,
65
+ },
66
+ },
67
+ },
68
+ decorator: {
69
+ pattern: /@[$\w\xA0-\uFFFF]+/,
70
+ inside: {
71
+ at: {
72
+ pattern: /^@/,
73
+ alias: 'operator',
74
+ },
75
+ function: {
76
+ pattern: /^[\s\S]+/,
77
+ alias: 'decorator_name',
78
+ },
79
+ },
80
+ },
81
+ generic_function: {
82
+ // e.g. foo<T extends "bar" | "baz">( ...
83
+ pattern: /#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*<(?:[^<>]|<(?:[^<>]|<[^<>]*>)*>)*>(?=\s*\()/,
84
+ greedy: true,
85
+ inside: {
86
+ function: /^#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*/,
87
+ generic: {
88
+ pattern: /<[\s\S]+/, // everything after the first <
89
+ alias: 'class_name',
90
+ inside: type_inside,
91
+ },
92
+ },
93
+ },
94
+ });
95
+ };
@@ -0,0 +1,25 @@
1
+ import type { SyntaxTokenStream } from './syntax_token.js';
2
+ export type HighlightMode = 'auto' | 'ranges' | 'html';
3
+ /**
4
+ * Check for CSS Highlights API support.
5
+ */
6
+ export declare const supports_css_highlight_api: () => boolean;
7
+ /**
8
+ * Manages highlights for a single element.
9
+ * Tracks ranges per element and only removes its own ranges when clearing.
10
+ */
11
+ export declare class HighlightManager {
12
+ #private;
13
+ element_ranges: Map<string, Array<Range>>;
14
+ constructor();
15
+ /**
16
+ * Highlight from syntax styler token stream.
17
+ */
18
+ highlight_from_syntax_tokens(element: Element, tokens: SyntaxTokenStream): void;
19
+ /**
20
+ * Clear only this element's ranges from highlights.
21
+ */
22
+ clear_element_ranges(): void;
23
+ destroy(): void;
24
+ }
25
+ //# sourceMappingURL=highlight_manager.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"highlight_manager.d.ts","sourceRoot":"../src/lib/","sources":["../src/lib/highlight_manager.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,mBAAmB,CAAC;AAGzD,MAAM,MAAM,aAAa,GAAG,MAAM,GAAG,QAAQ,GAAG,MAAM,CAAC;AAEvD;;GAEG;AACH,eAAO,MAAM,0BAA0B,QAAO,OACS,CAAC;AAExD;;;GAGG;AACH,qBAAa,gBAAgB;;IAC5B,cAAc,EAAE,GAAG,CAAC,MAAM,EAAE,KAAK,CAAC,KAAK,CAAC,CAAC,CAAC;;IAS1C;;OAEG;IACH,4BAA4B,CAAC,OAAO,EAAE,OAAO,EAAE,MAAM,EAAE,iBAAiB,GAAG,IAAI;IAkD/E;;OAEG;IACH,oBAAoB,IAAI,IAAI;IAmB5B,OAAO,IAAI,IAAI;CAiFf"}
@@ -0,0 +1,139 @@
1
+ import { highlight_priorities } from './highlight_priorities.js';
2
+ /**
3
+ * Check for CSS Highlights API support.
4
+ */
5
+ export const supports_css_highlight_api = () => !!(globalThis.CSS?.highlights && globalThis.Highlight); // eslint-disable-line @typescript-eslint/no-unnecessary-condition
6
+ /**
7
+ * Manages highlights for a single element.
8
+ * Tracks ranges per element and only removes its own ranges when clearing.
9
+ */
10
+ export class HighlightManager {
11
+ element_ranges;
12
+ constructor() {
13
+ if (!supports_css_highlight_api()) {
14
+ throw Error('CSS Highlights API not supported');
15
+ }
16
+ this.element_ranges = new Map();
17
+ }
18
+ /**
19
+ * Highlight from syntax styler token stream.
20
+ */
21
+ highlight_from_syntax_tokens(element, tokens) {
22
+ // Find the text node (it might not be firstChild due to Svelte comment nodes)
23
+ let text_node = null;
24
+ for (const node of element.childNodes) {
25
+ if (node.nodeType === Node.TEXT_NODE) {
26
+ text_node = node;
27
+ break;
28
+ }
29
+ }
30
+ if (!text_node) {
31
+ throw new Error('no text node to highlight');
32
+ }
33
+ this.clear_element_ranges();
34
+ const ranges_by_type = new Map();
35
+ const final_pos = this.#create_all_ranges(tokens, text_node, ranges_by_type, 0);
36
+ // Validate that token positions matched text node length
37
+ const text_length = text_node.textContent?.length ?? 0;
38
+ if (final_pos !== text_length) {
39
+ throw new Error(`Token stream length mismatch: tokens covered ${final_pos} chars but text node has ${text_length} chars`);
40
+ }
41
+ // Apply highlights
42
+ for (const [type, ranges] of ranges_by_type) {
43
+ const prefixed_type = `token_${type}`;
44
+ // Track ranges for this element
45
+ this.element_ranges.set(prefixed_type, ranges);
46
+ // Get or create the shared highlight
47
+ let highlight = CSS.highlights.get(prefixed_type);
48
+ if (!highlight) {
49
+ highlight = new Highlight();
50
+ // Set priority based on CSS cascade order (higher = later in CSS = wins)
51
+ highlight.priority =
52
+ highlight_priorities[prefixed_type] ?? 0;
53
+ CSS.highlights.set(prefixed_type, highlight);
54
+ }
55
+ // Add all ranges to the highlight
56
+ for (const range of ranges) {
57
+ highlight.add(range);
58
+ }
59
+ }
60
+ }
61
+ /**
62
+ * Clear only this element's ranges from highlights.
63
+ */
64
+ clear_element_ranges() {
65
+ for (const [name, ranges] of this.element_ranges) {
66
+ const highlight = CSS.highlights.get(name);
67
+ if (!highlight) {
68
+ throw new Error('Expected to find CSS highlight: ' + name);
69
+ }
70
+ for (const range of ranges) {
71
+ highlight.delete(range);
72
+ }
73
+ if (highlight.size === 0) {
74
+ CSS.highlights.delete(name);
75
+ }
76
+ }
77
+ this.element_ranges.clear();
78
+ }
79
+ destroy() {
80
+ this.clear_element_ranges();
81
+ }
82
+ /**
83
+ * Create ranges for all tokens in the tree.
84
+ */
85
+ #create_all_ranges(tokens, text_node, ranges_by_type, offset) {
86
+ const text_length = text_node.textContent?.length ?? 0;
87
+ let pos = offset;
88
+ for (const token of tokens) {
89
+ if (typeof token === 'string') {
90
+ pos += token.length;
91
+ continue;
92
+ }
93
+ const length = token.length;
94
+ const end_pos = pos + length;
95
+ // Validate positions are within text node bounds before creating ranges
96
+ if (end_pos > text_length) {
97
+ throw new Error(`Token ${token.type} extends beyond text node: position ${end_pos} > length ${text_length}`);
98
+ }
99
+ try {
100
+ const range = new Range();
101
+ range.setStart(text_node, pos);
102
+ range.setEnd(text_node, end_pos);
103
+ // Add range for the token type
104
+ const type = token.type;
105
+ if (!ranges_by_type.has(type)) {
106
+ ranges_by_type.set(type, []);
107
+ }
108
+ ranges_by_type.get(type).push(range);
109
+ // Also add range for any aliases (alias is always an array)
110
+ for (const alias of token.alias) {
111
+ if (!ranges_by_type.has(alias)) {
112
+ ranges_by_type.set(alias, []);
113
+ }
114
+ // Create a new range for each alias (ranges can't be reused)
115
+ const alias_range = new Range();
116
+ alias_range.setStart(text_node, pos);
117
+ alias_range.setEnd(text_node, end_pos);
118
+ ranges_by_type.get(alias).push(alias_range);
119
+ }
120
+ }
121
+ catch (e) {
122
+ throw new Error(`Failed to create range for ${token.type}: ${e}`);
123
+ }
124
+ // Process nested tokens
125
+ if (Array.isArray(token.content)) {
126
+ const actual_end_pos = this.#create_all_ranges(token.content, text_node, ranges_by_type, pos);
127
+ // Validate that nested tokens match the parent token's claimed length
128
+ if (actual_end_pos !== end_pos) {
129
+ throw new Error(`Token ${token.type} length mismatch: claimed ${length} chars (${pos}-${end_pos}) but nested content covered ${actual_end_pos - pos} chars (${pos}-${actual_end_pos})`);
130
+ }
131
+ pos = actual_end_pos;
132
+ }
133
+ else {
134
+ pos = end_pos;
135
+ }
136
+ }
137
+ return pos;
138
+ }
139
+ }
@@ -0,0 +1,3 @@
1
+ export type HighlightTokenName = 'token_processing_instruction' | 'token_doctype' | 'token_cdata' | 'token_punctuation' | 'token_tag' | 'token_constant' | 'token_symbol' | 'token_deleted' | 'token_keyword' | 'token_null' | 'token_boolean' | 'token_interpolation_punctuation' | 'token_heading' | 'token_heading_punctuation' | 'token_tag_punctuation' | 'token_comment' | 'token_char' | 'token_inserted' | 'token_blockquote' | 'token_blockquote_punctuation' | 'token_builtin' | 'token_class_name' | 'token_number' | 'token_attr_value' | 'token_attr_quote' | 'token_string' | 'token_template_punctuation' | 'token_inline_code' | 'token_code_punctuation' | 'token_attr_equals' | 'token_selector' | 'token_function' | 'token_regex' | 'token_important' | 'token_variable' | 'token_atrule' | 'token_attr_name' | 'token_property' | 'token_decorator' | 'token_decorator_name' | 'token_link_text_wrapper' | 'token_link_text' | 'token_link_punctuation' | 'token_special_keyword' | 'token_namespace' | 'token_rule' | 'token_at_keyword' | 'token_url' | 'token_strikethrough' | 'token_bold' | 'token_italic';
2
+ export declare const highlight_priorities: Record<HighlightTokenName, number | undefined>;
3
+ //# sourceMappingURL=highlight_priorities.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"highlight_priorities.d.ts","sourceRoot":"../src/lib/","sources":["../src/lib/highlight_priorities.ts"],"names":[],"mappings":"AAEA,MAAM,MAAM,kBAAkB,GAC3B,8BAA8B,GAC9B,eAAe,GACf,aAAa,GACb,mBAAmB,GACnB,WAAW,GACX,gBAAgB,GAChB,cAAc,GACd,eAAe,GACf,eAAe,GACf,YAAY,GACZ,eAAe,GACf,iCAAiC,GACjC,eAAe,GACf,2BAA2B,GAC3B,uBAAuB,GACvB,eAAe,GACf,YAAY,GACZ,gBAAgB,GAChB,kBAAkB,GAClB,8BAA8B,GAC9B,eAAe,GACf,kBAAkB,GAClB,cAAc,GACd,kBAAkB,GAClB,kBAAkB,GAClB,cAAc,GACd,4BAA4B,GAC5B,mBAAmB,GACnB,wBAAwB,GACxB,mBAAmB,GACnB,gBAAgB,GAChB,gBAAgB,GAChB,aAAa,GACb,iBAAiB,GACjB,gBAAgB,GAChB,cAAc,GACd,iBAAiB,GACjB,gBAAgB,GAChB,iBAAiB,GACjB,sBAAsB,GACtB,yBAAyB,GACzB,iBAAiB,GACjB,wBAAwB,GACxB,uBAAuB,GACvB,iBAAiB,GACjB,YAAY,GACZ,kBAAkB,GAClB,WAAW,GACX,qBAAqB,GACrB,YAAY,GACZ,cAAc,CAAC;AAElB,eAAO,MAAM,oBAAoB,EAAE,MAAM,CAAC,kBAAkB,EAAE,MAAM,GAAG,SAAS,CAoDtE,CAAC"}
@@ -0,0 +1,4 @@
1
+ import type { Gen } from '@ryanatkn/gro';
2
+ /** @nodocs */
3
+ export declare const gen: Gen;
4
+ //# sourceMappingURL=highlight_priorities.gen.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"highlight_priorities.gen.d.ts","sourceRoot":"../src/lib/","sources":["../src/lib/highlight_priorities.gen.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,GAAG,EAAC,MAAM,eAAe,CAAC;AAKvC,cAAc;AACd,eAAO,MAAM,GAAG,EAAE,GAgEjB,CAAC"}
@@ -0,0 +1,58 @@
1
+ import { readFileSync } from 'node:fs';
2
+ const theme_css_path = 'src/lib/theme_highlight.css';
3
+ /** @nodocs */
4
+ export const gen = {
5
+ dependencies: { files: [theme_css_path] },
6
+ generate: ({ origin_path }) => {
7
+ // Read the theme_highlight.css file
8
+ let css_content = readFileSync(theme_css_path, 'utf-8');
9
+ // Strip CSS comments to avoid false positives
10
+ css_content = css_content.replace(/\/\*[\s\S]*?\*\//g, '');
11
+ // Extract ::highlight() rules by CSS rule blocks
12
+ const highlight_priorities = {};
13
+ let priority = 1;
14
+ // Split CSS into rule blocks (roughly by closing braces)
15
+ const rule_blocks = css_content.split('}');
16
+ for (const block of rule_blocks) {
17
+ // Find all ::highlight(token_name) declarations in this block
18
+ const highlight_regex = /::highlight\(([^)]+)\)/g;
19
+ const tokens_in_block = [];
20
+ let match;
21
+ while ((match = highlight_regex.exec(block)) !== null) {
22
+ const token_name = match[1];
23
+ if (!tokens_in_block.includes(token_name)) {
24
+ tokens_in_block.push(token_name);
25
+ }
26
+ }
27
+ // Assign the same priority to all tokens in this block
28
+ if (tokens_in_block.length > 0) {
29
+ for (const token_name of tokens_in_block) {
30
+ if (!Object.hasOwn(highlight_priorities, token_name)) {
31
+ highlight_priorities[token_name] = priority;
32
+ }
33
+ }
34
+ priority++;
35
+ }
36
+ }
37
+ const banner = `// generated by ${origin_path} - DO NOT EDIT OR RISK LOST DATA`;
38
+ // Generate priority entries
39
+ const priority_entries = Object.entries(highlight_priorities)
40
+ .map(([token, priority]) => `\t${token}: ${priority}`)
41
+ .join(',\n');
42
+ // Generate the type
43
+ const token_names = Object.keys(highlight_priorities)
44
+ .map((name) => `'${name}'`)
45
+ .join(' | ');
46
+ return `
47
+ ${banner}
48
+
49
+ export type HighlightTokenName = ${token_names};
50
+
51
+ export const highlight_priorities: Record<HighlightTokenName, number | undefined> = {
52
+ ${priority_entries},
53
+ } as const;
54
+
55
+ ${banner}
56
+ `;
57
+ },
58
+ };
@@ -0,0 +1,55 @@
1
+ // generated by src/lib/highlight_priorities.gen.ts - DO NOT EDIT OR RISK LOST DATA
2
+ export const highlight_priorities = {
3
+ token_processing_instruction: 1,
4
+ token_doctype: 1,
5
+ token_cdata: 1,
6
+ token_punctuation: 1,
7
+ token_tag: 2,
8
+ token_constant: 2,
9
+ token_symbol: 2,
10
+ token_deleted: 2,
11
+ token_keyword: 2,
12
+ token_null: 2,
13
+ token_boolean: 2,
14
+ token_interpolation_punctuation: 2,
15
+ token_heading: 2,
16
+ token_heading_punctuation: 2,
17
+ token_tag_punctuation: 2,
18
+ token_comment: 3,
19
+ token_char: 3,
20
+ token_inserted: 3,
21
+ token_blockquote: 3,
22
+ token_blockquote_punctuation: 3,
23
+ token_builtin: 4,
24
+ token_class_name: 4,
25
+ token_number: 4,
26
+ token_attr_value: 5,
27
+ token_attr_quote: 5,
28
+ token_string: 5,
29
+ token_template_punctuation: 5,
30
+ token_inline_code: 5,
31
+ token_code_punctuation: 5,
32
+ token_attr_equals: 6,
33
+ token_selector: 7,
34
+ token_function: 7,
35
+ token_regex: 7,
36
+ token_important: 7,
37
+ token_variable: 7,
38
+ token_atrule: 8,
39
+ token_attr_name: 9,
40
+ token_property: 9,
41
+ token_decorator: 9,
42
+ token_decorator_name: 9,
43
+ token_link_text_wrapper: 9,
44
+ token_link_text: 9,
45
+ token_link_punctuation: 9,
46
+ token_special_keyword: 10,
47
+ token_namespace: 10,
48
+ token_rule: 10,
49
+ token_at_keyword: 11,
50
+ token_url: 11,
51
+ token_strikethrough: 13,
52
+ token_bold: 14,
53
+ token_italic: 15,
54
+ };
55
+ // generated by src/lib/highlight_priorities.gen.ts - DO NOT EDIT OR RISK LOST DATA
@@ -0,0 +1,277 @@
1
+ import { SyntaxToken, type SyntaxTokenStream } from './syntax_token.js';
2
+ export type AddSyntaxGrammar = (syntax_styler: SyntaxStyler) => void;
3
+ /**
4
+ * Based on Prism (https://github.com/PrismJS/prism)
5
+ * by Lea Verou (https://lea.verou.me/)
6
+ *
7
+ * MIT license
8
+ *
9
+ * @see LICENSE
10
+ */
11
+ export declare class SyntaxStyler {
12
+ langs: Record<string, SyntaxGrammar | undefined>;
13
+ add_lang(id: string, grammar: SyntaxGrammarRaw, aliases?: Array<string>): void;
14
+ add_extended_lang(base_id: string, extension_id: string, extension: SyntaxGrammarRaw, aliases?: Array<string>): SyntaxGrammar;
15
+ get_lang(id: string): SyntaxGrammar;
16
+ /**
17
+ * Generates HTML with syntax highlighting from source code.
18
+ *
19
+ * **Process:**
20
+ * 1. Runs `before_tokenize` hook
21
+ * 2. Tokenizes code using the provided or looked-up grammar
22
+ * 3. Runs `after_tokenize` hook
23
+ * 4. Runs `wrap` hook on each token
24
+ * 5. Converts tokens to HTML with CSS classes
25
+ *
26
+ * **Parameter Relationship:**
27
+ * - `lang` is ALWAYS required for hook context and identification
28
+ * - `grammar` is optional; when undefined, automatically looks up via `this.get_lang(lang)`
29
+ * - When both are provided, `grammar` is used for tokenization, `lang` for metadata
30
+ *
31
+ * **Use cases:**
32
+ * - Standard usage: `stylize(code, 'ts')` - uses registered TypeScript grammar
33
+ * - Custom grammar: `stylize(code, 'ts', customGrammar)` - uses custom grammar but keeps 'ts' label
34
+ * - Extended grammar: `stylize(code, 'custom', this.extend_grammar('ts', extension))` - new language variant
35
+ *
36
+ * @param text - The source code to syntax highlight.
37
+ * @param lang - Language identifier (e.g., 'ts', 'css', 'html'). Used for:
38
+ * - Grammar lookup when `grammar` is undefined
39
+ * - Hook context (`lang` field passed to hooks)
40
+ * - Language identification in output
41
+ * @param grammar - Optional custom grammar object. When undefined, automatically
42
+ * looks up the grammar via `this.get_lang(lang)`. Provide this to use a custom
43
+ * or modified grammar instead of the registered one.
44
+ *
45
+ * @returns HTML string with syntax highlighting using CSS classes (`.token_*`)
46
+ *
47
+ * @example
48
+ * // Standard usage - uses registered grammar
49
+ * stylize('var foo = true;', 'ts');
50
+ *
51
+ * @example
52
+ * // Custom grammar - overrides registered grammar
53
+ * const customGrammar = { keyword: [...], string: [...] };
54
+ * stylize('var foo = false;', 'ts', customGrammar);
55
+ *
56
+ * @example
57
+ * // Extended grammar - builds on existing grammar
58
+ * const extended = this.extend_grammar('ts', { customToken: [...] });
59
+ * stylize('var foo = 42;', 'ts-extended', extended);
60
+ */
61
+ stylize(text: string, lang: string, grammar?: SyntaxGrammar | undefined): string;
62
+ /**
63
+ * Inserts tokens _before_ another token in a language definition or any other grammar.
64
+ *
65
+ * ## Usage
66
+ *
67
+ * This helper method makes it easy to modify existing languages. For example, the CSS language definition
68
+ * not only defines CSS styling for CSS documents, but also needs to define styling for CSS embedded
69
+ * in HTML through `<style>` elements. To do this, it needs to modify `syntax_styler.get_lang('markup')` and add the
70
+ * appropriate tokens. However, `syntax_styler.get_lang('markup')` is a regular JS object literal, so if you do
71
+ * this:
72
+ *
73
+ * ```js
74
+ * syntax_styler.get_lang('markup').style = {
75
+ * // token
76
+ * };
77
+ * ```
78
+ *
79
+ * then the `style` token will be added (and processed) at the end. `insert_before` allows you to insert tokens
80
+ * before existing tokens. For the CSS example above, you would use it like this:
81
+ *
82
+ * ```js
83
+ * grammar_insert_before('markup', 'cdata', {
84
+ * 'style': {
85
+ * // token
86
+ * }
87
+ * });
88
+ * ```
89
+ *
90
+ * ## Special cases
91
+ *
92
+ * If the grammars of `inside` and `insert` have tokens with the same name, the tokens in `inside`'s grammar
93
+ * will be ignored.
94
+ *
95
+ * This behavior can be used to insert tokens after `before`:
96
+ *
97
+ * ```js
98
+ * grammar_insert_before('markup', 'comment', {
99
+ * 'comment': syntax_styler.get_lang('markup').comment,
100
+ * // tokens after 'comment'
101
+ * });
102
+ * ```
103
+ *
104
+ * ## Limitations
105
+ *
106
+ * The main problem `insert_before` has to solve is iteration order. Since ES2015, the iteration order for object
107
+ * properties is guaranteed to be the insertion order (except for integer keys) but some browsers behave
108
+ * differently when keys are deleted and re-inserted. So `insert_before` can't be implemented by temporarily
109
+ * deleting properties which is necessary to insert at arbitrary positions.
110
+ *
111
+ * To solve this problem, `insert_before` doesn't actually insert the given tokens into the target object.
112
+ * Instead, it will create a new object and replace all references to the target object with the new one. This
113
+ * can be done without temporarily deleting properties, so the iteration order is well-defined.
114
+ *
115
+ * However, only references that can be reached from `syntax_styler.langs` or `insert` will be replaced. I.e. if
116
+ * you hold the target object in a variable, then the value of the variable will not change.
117
+ *
118
+ * ```js
119
+ * var oldMarkup = syntax_styler.get_lang('markup');
120
+ * var newMarkup = grammar_insert_before('markup', 'comment', { ... });
121
+ *
122
+ * assert(oldMarkup !== syntax_styler.get_lang('markup'));
123
+ * assert(newMarkup === syntax_styler.get_lang('markup'));
124
+ * ```
125
+ *
126
+ * @param inside - The property of `root` (e.g. a language id in `syntax_styler.langs`) that contains the
127
+ * object to be modified.
128
+ * @param before - The key to insert before.
129
+ * @param insert - An object containing the key-value pairs to be inserted.
130
+ * @param root - The object containing `inside`, i.e. the object that contains the
131
+ * object to be modified.
132
+ *
133
+ * Defaults to `syntax_styler.langs`.
134
+ *
135
+ * @returns the new grammar object
136
+ */
137
+ grammar_insert_before(inside: string, before: string, insert: SyntaxGrammarRaw, root?: Record<string, any>): SyntaxGrammar;
138
+ /**
139
+ * Converts the given token or token stream to an HTML representation.
140
+ *
141
+ * Runs the `wrap` hook on each `SyntaxToken`.
142
+ *
143
+ * @param o - The token or token stream to be converted.
144
+ * @param lang - The name of current language.
145
+ * @returns The HTML representation of the token or token stream.
146
+ */
147
+ stringify_token(o: string | SyntaxToken | SyntaxTokenStream, lang: string): string;
148
+ /**
149
+ * Creates a deep copy of the language with the given id and appends the given tokens.
150
+ *
151
+ * If a token in `extension` also appears in the copied language, then the existing token in the copied language
152
+ * will be overwritten at its original position.
153
+ *
154
+ * ## Best practices
155
+ *
156
+ * Since the position of overwriting tokens (token in `extension` that overwrite tokens in the copied language)
157
+ * doesn't matter, they can technically be in any order. However, this can be confusing to others that trying to
158
+ * understand the language definition because, normally, the order of tokens matters in the grammars.
159
+ *
160
+ * Therefore, it is encouraged to order overwriting tokens according to the positions of the overwritten tokens.
161
+ * Furthermore, all non-overwriting tokens should be placed after the overwriting ones.
162
+ *
163
+ * @param base_id - The id of the language to extend. This has to be a key in `syntax_styler.langs`.
164
+ * @param extension - The new tokens to append.
165
+ * @returns the new grammar
166
+ */
167
+ extend_grammar(base_id: string, extension: SyntaxGrammarRaw): SyntaxGrammar;
168
+ /**
169
+ * Normalize a single pattern to have consistent shape.
170
+ * This ensures all patterns have the same object shape for V8 optimization.
171
+ */
172
+ private normalize_pattern;
173
+ /**
174
+ * Normalize a grammar to have consistent object shapes.
175
+ * This performs several optimizations:
176
+ * 1. Merges `rest` property into main grammar
177
+ * 2. Ensures all pattern values are arrays
178
+ * 3. Normalizes all pattern objects to have consistent shapes
179
+ * 4. Adds global flag to greedy patterns
180
+ *
181
+ * This is called once at registration time to avoid runtime overhead.
182
+ * @param visited - Set of grammar object IDs already normalized (for circular references)
183
+ */
184
+ private normalize_grammar;
185
+ plugins: Record<string, any>;
186
+ hooks_before_tokenize: Array<HookBeforeTokenizeCallback>;
187
+ hooks_after_tokenize: Array<HookAfterTokenizeCallback>;
188
+ hooks_wrap: Array<HookWrapCallback>;
189
+ add_hook_before_tokenize(cb: HookBeforeTokenizeCallback): void;
190
+ add_hook_after_tokenize(cb: HookAfterTokenizeCallback): void;
191
+ add_hook_wrap(cb: HookWrapCallback): void;
192
+ run_hook_before_tokenize(ctx: HookBeforeTokenizeCallbackContext): void;
193
+ run_hook_after_tokenize(ctx: HookAfterTokenizeCallbackContext): void;
194
+ run_hook_wrap(ctx: HookWrapCallbackContext): void;
195
+ }
196
+ export type SyntaxGrammarValueRaw = RegExp | SyntaxGrammarTokenRaw | Array<RegExp | SyntaxGrammarTokenRaw>;
197
+ export type SyntaxGrammarRaw = Record<string, SyntaxGrammarValueRaw | undefined> & {
198
+ rest?: SyntaxGrammarRaw | undefined;
199
+ };
200
+ /**
201
+ * The expansion of a simple `RegExp` literal to support additional properties.
202
+ *
203
+ * The `inside` grammar will be used to tokenize the text value of each token of this kind.
204
+ *
205
+ * This can be used to make nested and even recursive language definitions.
206
+ *
207
+ * Note: This can cause infinite recursion. Be careful when you embed different languages or even the same language into
208
+ * each another.
209
+ *
210
+ * Note: Grammar authors can use optional properties, but they will be normalized
211
+ * to required properties at registration time for optimal performance.
212
+ */
213
+ export interface SyntaxGrammarTokenRaw {
214
+ /**
215
+ * The regular expression of the token.
216
+ */
217
+ pattern: RegExp;
218
+ /**
219
+ * If `true`, then the first capturing group of `pattern` will (effectively)
220
+ * behave as a lookbehind group meaning that the captured text will not be part of the matched text of the new token.
221
+ * @default false
222
+ */
223
+ lookbehind?: boolean;
224
+ /**
225
+ * Whether the token is greedy.
226
+ * @default false
227
+ */
228
+ greedy?: boolean;
229
+ /**
230
+ * An optional alias or list of aliases.
231
+ */
232
+ alias?: string | Array<string>;
233
+ /**
234
+ * The nested grammar of this token.
235
+ */
236
+ inside?: SyntaxGrammarRaw | null;
237
+ }
238
+ /**
239
+ * Grammar token with all properties required.
240
+ * This is the normalized representation used at runtime.
241
+ */
242
+ export interface SyntaxGrammarToken {
243
+ pattern: RegExp;
244
+ lookbehind: boolean;
245
+ greedy: boolean;
246
+ alias: Array<string>;
247
+ inside: SyntaxGrammar | null;
248
+ }
249
+ /**
250
+ * A grammar after normalization.
251
+ * All values are arrays of normalized tokens with consistent shapes.
252
+ */
253
+ export type SyntaxGrammar = Record<string, Array<SyntaxGrammarToken>>;
254
+ export type HookBeforeTokenizeCallback = (ctx: HookBeforeTokenizeCallbackContext) => void;
255
+ export type HookAfterTokenizeCallback = (ctx: HookAfterTokenizeCallbackContext) => void;
256
+ export type HookWrapCallback = (ctx: HookWrapCallbackContext) => void;
257
+ export interface HookBeforeTokenizeCallbackContext {
258
+ code: string;
259
+ grammar: SyntaxGrammar;
260
+ lang: string;
261
+ tokens: undefined;
262
+ }
263
+ export interface HookAfterTokenizeCallbackContext {
264
+ code: string;
265
+ grammar: SyntaxGrammar;
266
+ lang: string;
267
+ tokens: SyntaxTokenStream;
268
+ }
269
+ export interface HookWrapCallbackContext {
270
+ type: string;
271
+ content: string;
272
+ tag: string;
273
+ classes: Array<string>;
274
+ attributes: Record<string, string>;
275
+ lang: string;
276
+ }
277
+ //# sourceMappingURL=syntax_styler.d.ts.map