@weavelogic/knowledge-graph-agent 0.7.3 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/README.md +4 -0
  2. package/dist/_virtual/__vite-browser-external.js +2 -2
  3. package/dist/_virtual/__vite-browser-external.js.map +1 -1
  4. package/dist/_virtual/browser.js +2 -3
  5. package/dist/_virtual/browser.js.map +1 -1
  6. package/dist/cli/commands/hive-mind/add-frontmatter.js +2 -2
  7. package/dist/cli/commands/hive-mind/add-frontmatter.js.map +1 -1
  8. package/dist/cli/commands/hive-mind/analyze-links.js +2 -2
  9. package/dist/cli/commands/hive-mind/analyze-links.js.map +1 -1
  10. package/dist/cli/commands/hive-mind/find-connections.js +2 -2
  11. package/dist/cli/commands/hive-mind/find-connections.js.map +1 -1
  12. package/dist/cli/commands/hive-mind/validate-names.js +2 -2
  13. package/dist/cli/commands/hive-mind/validate-names.js.map +1 -1
  14. package/dist/graphql/server.js +2 -2
  15. package/dist/graphql/server.js.map +1 -1
  16. package/dist/mcp-server/tools/audit/index.d.ts +4 -0
  17. package/dist/mcp-server/tools/audit/index.d.ts.map +1 -1
  18. package/dist/node_modules/@typescript-eslint/project-service/dist/index.js +1 -1
  19. package/dist/node_modules/@typescript-eslint/types/dist/index.js +1 -1
  20. package/dist/node_modules/@typescript-eslint/visitor-keys/dist/index.js +1 -1
  21. package/dist/node_modules/debug/src/browser.js +1 -1
  22. package/dist/node_modules/fdir/dist/index.js +13 -13
  23. package/dist/node_modules/fdir/dist/index.js.map +1 -1
  24. package/dist/node_modules/tinyglobby/dist/index.js +14 -14
  25. package/dist/node_modules/tinyglobby/dist/index.js.map +1 -1
  26. package/dist/node_modules/typescript/lib/typescript.js +24 -24
  27. package/dist/node_modules/typescript/lib/typescript.js.map +1 -1
  28. package/dist/vector/services/embedding-service.js +1 -7
  29. package/dist/vector/services/embedding-service.js.map +1 -1
  30. package/package.json +3 -2
  31. package/dist/_virtual/browser2.js +0 -5
  32. package/dist/_virtual/browser2.js.map +0 -1
  33. package/dist/_virtual/index12.js +0 -7
  34. package/dist/_virtual/index12.js.map +0 -1
  35. package/dist/_virtual/ort-web.min.js +0 -8
  36. package/dist/_virtual/ort-web.min.js.map +0 -1
  37. package/dist/_virtual/ort-web.min2.js +0 -5
  38. package/dist/_virtual/ort-web.min2.js.map +0 -1
  39. package/dist/node_modules/@huggingface/jinja/dist/index.js +0 -118
  40. package/dist/node_modules/@huggingface/jinja/dist/index.js.map +0 -1
  41. package/dist/node_modules/@xenova/transformers/src/backends/onnx.js +0 -24
  42. package/dist/node_modules/@xenova/transformers/src/backends/onnx.js.map +0 -1
  43. package/dist/node_modules/@xenova/transformers/src/configs.js +0 -52
  44. package/dist/node_modules/@xenova/transformers/src/configs.js.map +0 -1
  45. package/dist/node_modules/@xenova/transformers/src/env.js +0 -35
  46. package/dist/node_modules/@xenova/transformers/src/env.js.map +0 -1
  47. package/dist/node_modules/@xenova/transformers/src/models.js +0 -3852
  48. package/dist/node_modules/@xenova/transformers/src/models.js.map +0 -1
  49. package/dist/node_modules/@xenova/transformers/src/tokenizers.js +0 -144
  50. package/dist/node_modules/@xenova/transformers/src/tokenizers.js.map +0 -1
  51. package/dist/node_modules/@xenova/transformers/src/utils/core.js +0 -52
  52. package/dist/node_modules/@xenova/transformers/src/utils/core.js.map +0 -1
  53. package/dist/node_modules/@xenova/transformers/src/utils/generation.js +0 -623
  54. package/dist/node_modules/@xenova/transformers/src/utils/generation.js.map +0 -1
  55. package/dist/node_modules/@xenova/transformers/src/utils/hub.js +0 -395
  56. package/dist/node_modules/@xenova/transformers/src/utils/hub.js.map +0 -1
  57. package/dist/node_modules/@xenova/transformers/src/utils/image.js +0 -12
  58. package/dist/node_modules/@xenova/transformers/src/utils/image.js.map +0 -1
  59. package/dist/node_modules/@xenova/transformers/src/utils/maths.js +0 -89
  60. package/dist/node_modules/@xenova/transformers/src/utils/maths.js.map +0 -1
  61. package/dist/node_modules/@xenova/transformers/src/utils/tensor.js +0 -750
  62. package/dist/node_modules/@xenova/transformers/src/utils/tensor.js.map +0 -1
  63. package/dist/node_modules/onnxruntime-common/dist/lib/backend-impl.js +0 -67
  64. package/dist/node_modules/onnxruntime-common/dist/lib/backend-impl.js.map +0 -1
  65. package/dist/node_modules/onnxruntime-common/dist/lib/env-impl.js +0 -24
  66. package/dist/node_modules/onnxruntime-common/dist/lib/env-impl.js.map +0 -1
  67. package/dist/node_modules/onnxruntime-common/dist/lib/env.js +0 -6
  68. package/dist/node_modules/onnxruntime-common/dist/lib/env.js.map +0 -1
  69. package/dist/node_modules/onnxruntime-common/dist/lib/index.js +0 -11
  70. package/dist/node_modules/onnxruntime-common/dist/lib/index.js.map +0 -1
  71. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session-impl.js +0 -162
  72. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session-impl.js.map +0 -1
  73. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session.js +0 -6
  74. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session.js.map +0 -1
  75. package/dist/node_modules/onnxruntime-common/dist/lib/tensor-impl.js +0 -393
  76. package/dist/node_modules/onnxruntime-common/dist/lib/tensor-impl.js.map +0 -1
  77. package/dist/node_modules/onnxruntime-common/dist/lib/tensor.js +0 -6
  78. package/dist/node_modules/onnxruntime-common/dist/lib/tensor.js.map +0 -1
  79. package/dist/node_modules/onnxruntime-web/dist/ort-web.min.js +0 -12919
  80. package/dist/node_modules/onnxruntime-web/dist/ort-web.min.js.map +0 -1
  81. package/dist/node_modules/ws/browser.js +0 -16
  82. package/dist/node_modules/ws/browser.js.map +0 -1
@@ -1,144 +0,0 @@
1
- import { reverseDictionary } from "./utils/core.js";
2
- import "fs";
3
- import "path";
4
- import "./env.js";
5
- import "./utils/tensor.js";
6
- import "../../../@huggingface/jinja/dist/index.js";
7
- const BYTES_TO_UNICODE = (() => {
8
- const bs = [
9
- ...Array.from({ length: "~".charCodeAt(0) - "!".charCodeAt(0) + 1 }, (_, i) => i + "!".charCodeAt(0)),
10
- ...Array.from({ length: "¬".charCodeAt(0) - "¡".charCodeAt(0) + 1 }, (_, i) => i + "¡".charCodeAt(0)),
11
- ...Array.from({ length: "ÿ".charCodeAt(0) - "®".charCodeAt(0) + 1 }, (_, i) => i + "®".charCodeAt(0))
12
- ];
13
- const cs = bs.slice();
14
- let n = 0;
15
- for (let b = 0; b < 256; ++b) {
16
- if (!bs.includes(b)) {
17
- bs.push(b);
18
- cs.push(256 + n);
19
- n += 1;
20
- }
21
- }
22
- const ccs = cs.map((n2) => String.fromCharCode(n2));
23
- return Object.fromEntries(bs.map((b, i) => [b, ccs[i]]));
24
- })();
25
- reverseDictionary(BYTES_TO_UNICODE);
26
- const WHISPER_LANGUAGES = [
27
- ["en", "english"],
28
- ["zh", "chinese"],
29
- ["de", "german"],
30
- ["es", "spanish"],
31
- ["ru", "russian"],
32
- ["ko", "korean"],
33
- ["fr", "french"],
34
- ["ja", "japanese"],
35
- ["pt", "portuguese"],
36
- ["tr", "turkish"],
37
- ["pl", "polish"],
38
- ["ca", "catalan"],
39
- ["nl", "dutch"],
40
- ["ar", "arabic"],
41
- ["sv", "swedish"],
42
- ["it", "italian"],
43
- ["id", "indonesian"],
44
- ["hi", "hindi"],
45
- ["fi", "finnish"],
46
- ["vi", "vietnamese"],
47
- ["he", "hebrew"],
48
- ["uk", "ukrainian"],
49
- ["el", "greek"],
50
- ["ms", "malay"],
51
- ["cs", "czech"],
52
- ["ro", "romanian"],
53
- ["da", "danish"],
54
- ["hu", "hungarian"],
55
- ["ta", "tamil"],
56
- ["no", "norwegian"],
57
- ["th", "thai"],
58
- ["ur", "urdu"],
59
- ["hr", "croatian"],
60
- ["bg", "bulgarian"],
61
- ["lt", "lithuanian"],
62
- ["la", "latin"],
63
- ["mi", "maori"],
64
- ["ml", "malayalam"],
65
- ["cy", "welsh"],
66
- ["sk", "slovak"],
67
- ["te", "telugu"],
68
- ["fa", "persian"],
69
- ["lv", "latvian"],
70
- ["bn", "bengali"],
71
- ["sr", "serbian"],
72
- ["az", "azerbaijani"],
73
- ["sl", "slovenian"],
74
- ["kn", "kannada"],
75
- ["et", "estonian"],
76
- ["mk", "macedonian"],
77
- ["br", "breton"],
78
- ["eu", "basque"],
79
- ["is", "icelandic"],
80
- ["hy", "armenian"],
81
- ["ne", "nepali"],
82
- ["mn", "mongolian"],
83
- ["bs", "bosnian"],
84
- ["kk", "kazakh"],
85
- ["sq", "albanian"],
86
- ["sw", "swahili"],
87
- ["gl", "galician"],
88
- ["mr", "marathi"],
89
- ["pa", "punjabi"],
90
- ["si", "sinhala"],
91
- ["km", "khmer"],
92
- ["sn", "shona"],
93
- ["yo", "yoruba"],
94
- ["so", "somali"],
95
- ["af", "afrikaans"],
96
- ["oc", "occitan"],
97
- ["ka", "georgian"],
98
- ["be", "belarusian"],
99
- ["tg", "tajik"],
100
- ["sd", "sindhi"],
101
- ["gu", "gujarati"],
102
- ["am", "amharic"],
103
- ["yi", "yiddish"],
104
- ["lo", "lao"],
105
- ["uz", "uzbek"],
106
- ["fo", "faroese"],
107
- ["ht", "haitian creole"],
108
- ["ps", "pashto"],
109
- ["tk", "turkmen"],
110
- ["nn", "nynorsk"],
111
- ["mt", "maltese"],
112
- ["sa", "sanskrit"],
113
- ["lb", "luxembourgish"],
114
- ["my", "myanmar"],
115
- ["bo", "tibetan"],
116
- ["tl", "tagalog"],
117
- ["mg", "malagasy"],
118
- ["as", "assamese"],
119
- ["tt", "tatar"],
120
- ["haw", "hawaiian"],
121
- ["ln", "lingala"],
122
- ["ha", "hausa"],
123
- ["ba", "bashkir"],
124
- ["jw", "javanese"],
125
- ["su", "sundanese"]
126
- ];
127
- new Map(WHISPER_LANGUAGES);
128
- new Map([
129
- ...WHISPER_LANGUAGES.map(([k, v]) => [v, k]),
130
- ...[
131
- ["burmese", "my"],
132
- ["valencian", "ca"],
133
- ["flemish", "nl"],
134
- ["haitian", "ht"],
135
- ["letzeburgesch", "lb"],
136
- ["pushto", "ps"],
137
- ["panjabi", "pa"],
138
- ["moldavian", "ro"],
139
- ["moldovan", "ro"],
140
- ["sinhalese", "si"],
141
- ["castilian", "es"]
142
- ]
143
- ]);
144
- //# sourceMappingURL=tokenizers.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"tokenizers.js","sources":["../../../../../node_modules/@xenova/transformers/src/tokenizers.js"],"sourcesContent":["\n/**\n * @file Tokenizers are used to prepare textual inputs for a model.\n * \n * **Example:** Create an `AutoTokenizer` and use it to tokenize a sentence.\n * This will automatically detect the tokenizer type based on the tokenizer class defined in `tokenizer.json`.\n * ```javascript\n * import { AutoTokenizer } from '@xenova/transformers';\n * \n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');\n * const { input_ids } = await tokenizer('I love transformers!');\n * // Tensor {\n * // data: BigInt64Array(6) [101n, 1045n, 2293n, 19081n, 999n, 102n],\n * // dims: [1, 6],\n * // type: 'int64',\n * // size: 6,\n * // }\n * ```\n * \n * @module tokenizers\n */\n\nimport {\n Callable,\n reverseDictionary,\n escapeRegExp,\n isIntegralNumber,\n mergeArrays,\n} from './utils/core.js';\n\nimport {\n getModelJSON,\n} from './utils/hub.js';\n\nimport { max, min, round } from './utils/maths.js';\nimport { Tensor } from './utils/tensor.js';\n\nimport {\n PriorityQueue,\n TokenLattice,\n CharTrie,\n} from './utils/data-structures.js';\n\nimport { Template } from '@huggingface/jinja';\n\n\n/**\n * @typedef {Object} TokenizerProperties Additional tokenizer-specific properties.\n * @property {boolean} [legacy=false] Whether or not the `legacy` behavior of the tokenizer should be used.\n * @typedef {import('./utils/hub.js').PretrainedOptions & TokenizerProperties} PretrainedTokenizerOptions\n */\n\n/**\n * Loads a tokenizer from the specified path.\n * @param {string} pretrained_model_name_or_path The path to the tokenizer directory.\n * @param {PretrainedTokenizerOptions} options Additional options for loading the tokenizer.\n * @returns {Promise<any[]>} A promise that resolves with information about the loaded tokenizer.\n */\nasync function loadTokenizer(pretrained_model_name_or_path, options) {\n\n const info = await Promise.all([\n getModelJSON(pretrained_model_name_or_path, 'tokenizer.json', true, options),\n getModelJSON(pretrained_model_name_or_path, 'tokenizer_config.json', true, options),\n ])\n\n // Override legacy option if `options.legacy` is not null\n if (options.legacy !== null) {\n info[1].legacy = options.legacy;\n }\n return info;\n}\n\n\n/**\n * Helper function to split a string on a regex, but keep the delimiters.\n * This is required, because the JavaScript `.split()` method does not keep the delimiters,\n * and wrapping in a capturing group causes issues with existing capturing groups (due to nesting).\n * @param {string} text The text to split.\n * @param {RegExp} regex The regex to split on.\n * @returns {string[]} The split string.\n */\nfunction regexSplit(text, regex) {\n const result = [];\n let prev = 0;\n for (const match of text.matchAll(regex)) {\n const fullMatch = match[0];\n if (prev < match.index) {\n result.push(text.slice(prev, match.index));\n }\n if (fullMatch.length > 0) {\n result.push(fullMatch);\n }\n prev = match.index + fullMatch.length;\n }\n if (prev < text.length) {\n result.push(text.slice(prev));\n }\n return result;\n}\n\n\n/**\n * Helper method to construct a pattern from a config object.\n * @param {Object} pattern The pattern object.\n * @param {boolean} invert Whether to invert the pattern.\n * @returns {RegExp|null} The compiled pattern.\n */\nfunction createPattern(pattern, invert = true) {\n\n if (pattern.Regex !== undefined) {\n // In certain cases, the pattern may contain unnecessary escape sequences (e.g., \\# or \\& or \\~).\n // i.e., valid in Python (where the patterns are exported from) but invalid in JavaScript (where the patterns are parsed).\n // This isn't an issue when creating the regex w/o the 'u' flag, but it is when the 'u' flag is used.\n // For this reason, it is necessary to remove these backslashes before creating the regex.\n // See https://stackoverflow.com/a/63007777/13989043 for more information\n let regex = pattern.Regex.replace(/\\\\([#&~])/g, '$1'); // TODO: add more characters to this list if necessary\n\n // We also handle special cases where the regex contains invalid (non-JS compatible) syntax.\n for (const [key, value] of PROBLEMATIC_REGEX_MAP) {\n regex = regex.replaceAll(key, value);\n }\n\n return new RegExp(regex, 'gu');\n\n } else if (pattern.String !== undefined) {\n const escaped = escapeRegExp(pattern.String);\n // NOTE: if invert is true, we wrap the pattern in a group so that it is kept when performing .split()\n return new RegExp(invert ? escaped : `(${escaped})`, 'gu');\n\n } else {\n console.warn('Unknown pattern type:', pattern)\n return null;\n }\n}\n\n/**\n * Helper function to convert an Object to a Map\n * @param {Object} obj The object to convert.\n * @returns {Map<string, any>} The map.\n */\nfunction objectToMap(obj) {\n return new Map(Object.entries(obj));\n}\n\n/**\n * Helper function to convert a tensor to a list before decoding.\n * @param {Tensor} tensor The tensor to convert.\n * @returns {number[]} The tensor as a list.\n */\nfunction prepareTensorForDecode(tensor) {\n const dims = tensor.dims;\n switch (dims.length) {\n case 1:\n return tensor.tolist();\n case 2:\n if (dims[0] !== 1) {\n throw new Error('Unable to decode tensor with `batch size !== 1`. Use `tokenizer.batch_decode(...)` for batched inputs.');\n }\n return tensor.tolist()[0];\n default:\n throw new Error(`Expected tensor to have 1-2 dimensions, got ${dims.length}.`)\n }\n}\n\n/**\n * Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms\n * @param {string} text The text to clean up.\n * @returns {string} The cleaned up text.\n */\nfunction clean_up_tokenization(text) {\n // Clean up a list of simple English tokenization artifacts\n // like spaces before punctuations and abbreviated forms\n return text.replace(/ \\./g, '.')\n .replace(/ \\?/g, '?')\n .replace(/ \\!/g, '!')\n .replace(/ ,/g, ',')\n .replace(/ \\' /g, \"'\")\n .replace(/ n\\'t/g, \"n't\")\n .replace(/ \\'m/g, \"'m\")\n .replace(/ \\'s/g, \"'s\")\n .replace(/ \\'ve/g, \"'ve\")\n .replace(/ \\'re/g, \"'re\");\n}\n\n/**\n * Helper function to remove accents from a string.\n * @param {string} text The text to remove accents from.\n * @returns {string} The text with accents removed.\n */\nfunction remove_accents(text) {\n return text.replace(/[\\u0300-\\u036f]/g, '');\n}\n\n/**\n * Helper function to lowercase a string and remove accents.\n * @param {string} text The text to lowercase and remove accents from.\n * @returns {string} The lowercased text with accents removed.\n */\nfunction lowercase_and_remove_accent(text) {\n return remove_accents(text.toLowerCase());\n}\n\n/**\n * Helper function to fuse consecutive values in an array equal to the specified value.\n * @param {string[]} arr The input array\n * @param {any} value The value to fuse on.\n * @param {Map<string, any>} mapping The mapping from input domain to value.\n */\nfunction fuse(arr, value, mapping) {\n const fused = [];\n let i = 0;\n while (i < arr.length) {\n fused.push(arr[i])\n if ((mapping.get(arr[i]) ?? value) !== value) {\n ++i;\n continue;\n }\n\n while (i < arr.length && (mapping.get(arr[i]) ?? value) === value) {\n ++i;\n }\n }\n\n return fused;\n}\n\n/**\n * Split a string on whitespace.\n * @param {string} text The text to split.\n * @returns {string[]} The split string.\n */\nfunction whitespace_split(text) {\n return text.match(/\\S+/g) || [];\n}\n\nconst PUNCTUATION_REGEX = '\\\\p{P}\\\\u0021-\\\\u002F\\\\u003A-\\\\u0040\\\\u005B-\\\\u0060\\\\u007B-\\\\u007E';\n\n// A mapping of regex patterns to their equivalent (but longer) JS-compatible versions.\nconst PROBLEMATIC_REGEX_MAP = new Map([\n // This uses the case insensitive group modifier, which is not supported in JavaScript.\n // When parsing the regex, an \"Invalid group\" error is thrown.\n [\"(?i:'s|'t|'re|'ve|'m|'ll|'d)\", \"(?:'([sS]|[tT]|[rR][eE]|[vV][eE]|[mM]|[lL][lL]|[dD]))\"],\n])\n\n\n/**\n * Represent a token added by the user on top of the existing Model vocabulary.\n * AddedToken can be configured to specify the behavior they should have in various situations like:\n * - Whether they should only match single words\n * - Whether to include any whitespace on its left or right\n */\nclass AddedToken {\n /**\n * Creates a new instance of AddedToken.\n * @param {Object} config Added token configuration object.\n * @param {string} config.content The content of the added token.\n * @param {number} config.id The id of the added token.\n * @param {boolean} [config.single_word=false] Whether this token must be a single word or can break words.\n * @param {boolean} [config.lstrip=false] Whether this token should strip whitespaces on its left.\n * @param {boolean} [config.rstrip=false] Whether this token should strip whitespaces on its right.\n * @param {boolean} [config.normalized=false] Whether this token should be normalized.\n * @param {boolean} [config.special=false] Whether this token is special.\n */\n constructor(config) {\n this.content = config.content;\n this.id = config.id;\n this.single_word = config.single_word ?? false;\n this.lstrip = config.lstrip ?? false;\n this.rstrip = config.rstrip ?? false;\n this.special = config.special ?? false;\n this.normalized = config.normalized ?? null;\n }\n}\n\n/**\n * Abstract base class for tokenizer models.\n *\n * @extends Callable\n */\nexport class TokenizerModel extends Callable {\n /**\n * Creates a new instance of TokenizerModel.\n * @param {Object} config The configuration object for the TokenizerModel.\n */\n constructor(config) {\n super();\n this.config = config;\n\n /** @type {string[]} */\n this.vocab = [];\n\n /**\n * A mapping of tokens to ids.\n * @type {Map<string, number>}\n */\n this.tokens_to_ids = new Map();\n\n this.unk_token_id = undefined;\n this.unk_token = undefined;\n this.end_of_word_suffix = undefined;\n\n /** @type {boolean} Whether to fuse unknown tokens when encoding. Defaults to false. */\n this.fuse_unk = this.config.fuse_unk ?? false;\n }\n\n /**\n * Instantiates a new TokenizerModel instance based on the configuration object provided.\n * @param {Object} config The configuration object for the TokenizerModel.\n * @param {...*} args Optional arguments to pass to the specific TokenizerModel constructor.\n * @returns {TokenizerModel} A new instance of a TokenizerModel.\n * @throws Will throw an error if the TokenizerModel type in the config is not recognized.\n */\n static fromConfig(config, ...args) {\n switch (config.type) {\n case 'WordPiece':\n return new WordPieceTokenizer(config);\n case 'Unigram':\n // @ts-ignore\n return new Unigram(config, ...args);\n\n case 'BPE':\n return new BPE(config);\n\n default:\n if (config.vocab) {\n // @ts-ignore\n return new LegacyTokenizerModel(config, ...args);\n }\n throw new Error(`Unknown TokenizerModel type: ${config.type}`);\n }\n }\n\n /**\n * Internal function to call the TokenizerModel instance.\n * @param {string[]} tokens The tokens to encode.\n * @returns {string[]} The encoded token IDs.\n */\n _call(tokens) {\n let ids = this.encode(tokens);\n if (this.fuse_unk) {\n // Fuse unknown tokens\n ids = fuse(ids, this.unk_token_id, this.tokens_to_ids);\n }\n return ids;\n }\n\n /**\n * Encodes a list of tokens into a list of token IDs.\n * @param {string[]} tokens The tokens to encode.\n * @returns {string[]} The encoded tokens.\n * @throws Will throw an error if not implemented in a subclass.\n */\n encode(tokens) {\n throw Error(\"encode should be implemented in subclass.\")\n }\n\n /**\n * Converts a list of tokens into a list of token IDs.\n * @param {string[]} tokens The tokens to convert.\n * @returns {number[]} The converted token IDs.\n */\n convert_tokens_to_ids(tokens) {\n return tokens.map(t => this.tokens_to_ids.get(t) ?? this.unk_token_id);\n }\n\n /**\n * Converts a list of token IDs into a list of tokens.\n * @param {number[]} ids The token IDs to convert.\n * @returns {string[]} The converted tokens.\n */\n convert_ids_to_tokens(ids) {\n return ids.map(i => this.vocab[i] ?? this.unk_token);\n }\n}\n\n/**\n * A subclass of TokenizerModel that uses WordPiece encoding to encode tokens.\n * @extends TokenizerModel\n */\nclass WordPieceTokenizer extends TokenizerModel {\n /**\n * @param {Object} config The configuration object.\n * @param {Object} config.vocab A mapping of tokens to ids.\n * @param {string} config.unk_token The unknown token string.\n * @param {string} config.continuing_subword_prefix The prefix to use for continuing subwords.\n * @param {number} [config.max_input_chars_per_word=100] The maximum number of characters per word.\n */\n constructor(config) {\n super(config);\n /**\n * A mapping of tokens to ids.\n * @type {Map<string, number>}\n */\n this.tokens_to_ids = objectToMap(config.vocab);\n\n /**\n * The id of the unknown token.\n * @type {number}\n */\n this.unk_token_id = this.tokens_to_ids.get(config.unk_token);\n\n /**\n * The unknown token string.\n * @type {string}\n */\n this.unk_token = config.unk_token;\n\n /**\n * The maximum number of characters allowed per word.\n * @type {number}\n */\n this.max_input_chars_per_word = config.max_input_chars_per_word ?? 100;\n\n /**\n * An array of tokens.\n * @type {string[]}\n */\n this.vocab = new Array(this.tokens_to_ids.size);\n for (const [key, value] of this.tokens_to_ids) {\n this.vocab[value] = key;\n }\n }\n\n /**\n * Encodes an array of tokens using WordPiece encoding.\n * @param {string[]} tokens The tokens to encode.\n * @returns {string[]} An array of encoded tokens.\n */\n encode(tokens) {\n const outputTokens = [];\n for (const token of tokens) {\n const chars = [...token];\n if (chars.length > this.max_input_chars_per_word) {\n outputTokens.push(this.unk_token);\n continue;\n }\n\n let isUnknown = false;\n let start = 0;\n const subTokens = [];\n\n while (start < chars.length) {\n let end = chars.length;\n let currentSubstring = null;\n while (start < end) {\n let substr = chars.slice(start, end).join('');\n\n if (start > 0) {\n substr = this.config.continuing_subword_prefix + substr;\n }\n if (this.tokens_to_ids.has(substr)) {\n currentSubstring = substr;\n break;\n }\n\n --end;\n }\n if (currentSubstring === null) {\n isUnknown = true;\n break;\n }\n subTokens.push(currentSubstring);\n start = end;\n }\n if (isUnknown) {\n outputTokens.push(this.unk_token);\n } else {\n outputTokens.push(...subTokens);\n }\n }\n\n return outputTokens;\n }\n\n}\n\n/**\n * Class representing a Unigram tokenizer model.\n * @extends TokenizerModel\n */\nclass Unigram extends TokenizerModel {\n /**\n * Create a new Unigram tokenizer model.\n * @param {Object} config The configuration object for the Unigram model.\n * @param {number} config.unk_id The ID of the unknown token\n * @param {any[][]} config.vocab A 2D array representing a mapping of tokens to scores.\n * @param {Object} moreConfig Additional configuration object for the Unigram model.\n */\n constructor(config, moreConfig) {\n super(config);\n\n const vocabSize = config.vocab.length;\n this.vocab = new Array(vocabSize);\n this.scores = new Array(vocabSize);\n for (let i = 0; i < vocabSize; ++i) {\n const piece = config.vocab[i];\n this.vocab[i] = piece[0];\n this.scores[i] = piece[1];\n }\n\n this.unk_token_id = config.unk_id;\n this.unk_token = this.vocab[config.unk_id];\n\n this.tokens_to_ids = new Map(this.vocab.map((x, i) => [x, i]));\n this.bosToken = ' '; // beginning of a sentence token\n\n this.bosTokenId = this.tokens_to_ids.get(this.bosToken); // NOTE: may be undefined\n this.eosToken = moreConfig.eos_token;\n\n this.eosTokenId = this.tokens_to_ids.get(this.eosToken);\n this.unkToken = this.vocab[this.unk_token_id];\n\n this.minScore = min(this.scores)[0];\n\n this.unkScore = this.minScore - 10.0;\n this.scores[this.unk_token_id] = this.unkScore;\n\n this.trie = new CharTrie();\n this.trie.extend(this.vocab);\n\n // NOTE: `fuse_unk` is hardcoded to true for Unigram models\n // See: https://github.com/huggingface/tokenizers/blob/b58227c7f1ccf8b73ee2268354336da56d91e492/tokenizers/src/models/unigram/model.rs#L119\n this.fuse_unk = true;\n }\n\n /**\n * Populates lattice nodes.\n * @param {TokenLattice} lattice The token lattice to populate with nodes.\n */\n populateNodes(lattice) {\n const sentence = lattice.sentence;\n const len = sentence.length;\n let beginPos = 0;\n while (beginPos < len) {\n const mblen = 1;\n let hasSingleNode = false;\n const tokens = [];\n\n for (let token of this.trie.commonPrefixSearch(sentence.slice(beginPos))) {\n tokens.push(token);\n const tokenId = this.tokens_to_ids.get(token);\n const tokenScore = this.scores[tokenId];\n const n = token.length;\n lattice.insert(beginPos, n, tokenScore, tokenId);\n if (!hasSingleNode && n === mblen) {\n hasSingleNode = true;\n }\n }\n if (!hasSingleNode) {\n lattice.insert(beginPos, mblen, this.unkScore, this.unk_token_id);\n }\n beginPos += mblen;\n }\n }\n\n /**\n * Encodes an array of tokens into an array of subtokens using the unigram model.\n *\n * @param {string} normalized The normalized string.\n * @returns {string[]} An array of subtokens obtained by encoding the input tokens using the unigram model.\n */\n tokenize(normalized) {\n const lattice = new TokenLattice(normalized, this.bosTokenId, this.eosTokenId);\n this.populateNodes(lattice);\n return lattice.tokens();\n }\n\n /**\n * Encodes an array of tokens using Unigram encoding.\n * @param {string[]} tokens The tokens to encode.\n * @returns {string[]} An array of encoded tokens.\n */\n encode(tokens) {\n const toReturn = [];\n for (const token of tokens) {\n const tokenized = this.tokenize(token);\n toReturn.push(...tokenized);\n }\n return toReturn;\n }\n\n}\n\n/**\n * Returns list of utf-8 byte and a mapping to unicode strings.\n * Specifically avoids mapping to whitespace/control characters the BPE code barfs on.\n * @returns {Object} Object with utf-8 byte keys and unicode string values.\n */\nconst BYTES_TO_UNICODE = (() => {\n // Returns list of utf-8 byte and a mapping to unicode strings.\n // We specifically avoids mapping to whitespace/control characters\n // the bpe code barfs on.\n\n const bs = [\n ...Array.from({ length: \"~\".charCodeAt(0) - \"!\".charCodeAt(0) + 1 }, (_, i) => i + \"!\".charCodeAt(0)),\n ...Array.from({ length: \"¬\".charCodeAt(0) - \"¡\".charCodeAt(0) + 1 }, (_, i) => i + \"¡\".charCodeAt(0)),\n ...Array.from({ length: \"ÿ\".charCodeAt(0) - \"®\".charCodeAt(0) + 1 }, (_, i) => i + \"®\".charCodeAt(0)),\n ];\n const cs = bs.slice();\n let n = 0;\n for (let b = 0; b < 256; ++b) {\n if (!bs.includes(b)) {\n bs.push(b);\n cs.push(256 + n);\n n += 1;\n }\n }\n const ccs = cs.map(n => String.fromCharCode(n));\n return Object.fromEntries(bs.map((b, i) => [b, ccs[i]]));\n})();\n\nconst UNICODE_TO_BYTES = reverseDictionary(BYTES_TO_UNICODE);\n\n\n/**\n * @typedef {Object} BPENode\n * @property {string} token The token associated with the node\n * @property {number} bias A positional bias for the node.\n * @property {number} [score] The score of the node.\n * @property {BPENode} [prev] The previous node in the linked list.\n * @property {BPENode} [next] The next node in the linked list.\n */\n\n/**\n * BPE class for encoding text into Byte-Pair-Encoding (BPE) tokens.\n * @extends TokenizerModel\n */\nclass BPE extends TokenizerModel {\n /**\n * Create a BPE instance.\n * @param {Object} config The configuration object for BPE.\n * @param {Object} config.vocab A mapping of tokens to ids.\n * @param {string[]} config.merges An array of BPE merges as strings.\n * @param {string} config.unk_token The unknown token used for out of vocabulary words.\n * @param {string} config.end_of_word_suffix The suffix to place at the end of each word.\n * @param {string} [config.continuing_subword_suffix] The suffix to insert between words.\n * @param {boolean} [config.byte_fallback=false] Whether to use spm byte-fallback trick (defaults to False)\n * @param {boolean} [config.ignore_merges=false] Whether or not to match tokens with the vocab before using merges.\n */\n constructor(config) {\n super(config);\n\n this.BPE_SPLIT_TOKEN = ' ';\n\n /** @type {Map<string, number>} */\n this.tokens_to_ids = objectToMap(config.vocab);\n\n this.unk_token_id = this.tokens_to_ids.get(config.unk_token);\n this.unk_token = config.unk_token;\n\n this.vocab = new Array(this.tokens_to_ids.size);\n for (const [key, value] of this.tokens_to_ids) {\n this.vocab[value] = key;\n }\n\n this.bpe_ranks = new Map(config.merges.map((x, i) => [x, i]));\n this.merges = config.merges.map(x => x.split(this.BPE_SPLIT_TOKEN));\n\n this.end_of_word_suffix = config.end_of_word_suffix;\n\n // NOTE: `continuing_subword_suffix` is custom (to support `BlenderbotSmallTokenizer`)\n this.continuing_subword_suffix = config.continuing_subword_suffix ?? null;\n\n this.byte_fallback = this.config.byte_fallback ?? false;\n\n if (this.byte_fallback) {\n this.text_encoder = new TextEncoder();\n }\n\n this.ignore_merges = this.config.ignore_merges ?? false;\n\n /** @type {Map<string, string[]>} */\n this.cache = new Map();\n }\n\n /**\n * Apply Byte-Pair-Encoding (BPE) to a given token. Efficient heap-based priority\n * queue implementation adapted from https://github.com/belladoreai/llama-tokenizer-js.\n * @param {string} token The token to encode.\n * @returns {string[]} The BPE encoded tokens.\n */\n bpe(token) {\n if (token.length === 0) {\n return [];\n }\n\n const cached = this.cache.get(token);\n if (cached !== undefined) {\n return cached;\n }\n\n const word = Array.from(token);\n if (this.end_of_word_suffix) {\n word[word.length - 1] += this.end_of_word_suffix;\n }\n\n let result = [];\n if (word.length > 1) {\n // Create a priority queue to store the nodes that will be merged.\n // The comparator function compares the scores of the nodes.\n const queue = new PriorityQueue((a, b) => a.score < b.score);\n\n // Construct a doubly-linked list of nodes that will be inserted into the priority queue,\n // starting with the individual characters. We also populate each node with a positional\n // bias to break ties in the priority queue.\n let startingNode = {\n token: word[0],\n bias: 0,\n prev: null,\n next: null,\n }\n\n let previousNode = startingNode\n for (let i = 1; i < word.length; ++i) {\n const currentNode = {\n bias: i / word.length, // Add fractional component to break ties\n token: word[i],\n prev: previousNode,\n next: null,\n }\n previousNode.next = currentNode\n this._add_node(queue, previousNode)\n previousNode = currentNode\n }\n\n while (!queue.isEmpty()) {\n // Get the next node with the highest priority\n const node = queue.pop();\n\n // Check that this merge is still possible\n if (node.deleted || !node.next || node.next.deleted) continue;\n\n // Here, we mark the current node (left side of the merge) and the next node (right side of the merge) as deleted.\n // This is because they will both be replaced by a new node representing the merge result.\n node.deleted = true;\n node.next.deleted = true;\n\n // Next, we fix the node that comes before the current node (i.e., left side of the merge).\n if (node.prev) {\n\n // Make a shallow copy of the previous node\n const newPreviousNode = { ...node.prev };\n\n // Mark the old previous node as deleted. This avoids erroneous merges later,\n // because there may still be references to this node in the priority queue.\n node.prev.deleted = true;\n node.prev = newPreviousNode;\n\n // Update the reference of the previous node, by pointing its previous node to this new previous node.\n if (newPreviousNode.prev) {\n newPreviousNode.prev.next = newPreviousNode;\n } else {\n // If the previous of the previous node does not exist, it means that\n // `newPreviousNode` must be the new `startingNode`.\n startingNode = newPreviousNode;\n }\n }\n\n // Create a new node which represents the result of the merge.\n const merged = {\n token: node.token + node.next.token,\n bias: node.bias,\n prev: node.prev,\n next: node.next.next,\n }\n\n // We now consider where we can add the new merged node to the priority queue:\n // 1. prev <-> merged\n if (merged.prev) {\n merged.prev.next = merged;\n this._add_node(queue, merged.prev);\n } else {\n // If `merged.prev` does not exist, then `merged` must be the new `startingNode`.\n startingNode = merged;\n }\n\n // 2. merged <-> next\n if (merged.next) {\n merged.next.prev = merged;\n this._add_node(queue, merged);\n }\n }\n\n // Traverse the linked list, starting from the `startingNode`, and collect the tokens.\n for (let currentNode = startingNode; currentNode !== null; currentNode = currentNode.next) {\n result.push(currentNode.token);\n }\n } else {\n result = word;\n }\n\n // Possibly append suffix\n if (this.continuing_subword_suffix) {\n // Do not append suffix to the last token\n for (let i = 0; i < result.length - 1; ++i) {\n result[i] += this.continuing_subword_suffix;\n }\n }\n\n // Save the result to the cache\n this.cache.set(token, result);\n\n return result;\n }\n\n\n /**\n * Helper function to add a node to the priority queue.\n * @param {PriorityQueue} queue \n * @param {BPENode} node\n * @private\n */\n _add_node(queue, node) {\n // `score` is a measure of the merge priority: lower means higher priority\n // We use the BPE rank as a measure of priority (i.e., the local of the merge in the merges list)\n // We also add a fractional component to the score to break ties (with the earlier character having higher priority)\n const rank = this.bpe_ranks.get(node.token + this.BPE_SPLIT_TOKEN + node.next.token);\n if (rank !== undefined) {\n node.score = rank + node.bias;\n queue.push(node);\n }\n }\n\n /**\n * Encodes the input sequence of tokens using the BPE algorithm and returns the resulting subword tokens.\n * @param {string[]} tokens The input sequence of tokens to encode.\n * @returns {string[]} The resulting subword tokens after applying the BPE algorithm to the input sequence of tokens.\n */\n encode(tokens) {\n const outputTokens = [];\n\n for (const token of tokens) {\n if (this.ignore_merges && this.tokens_to_ids.has(token)) {\n outputTokens.push(token);\n continue;\n }\n const bpe_token_list = this.bpe(token);\n\n for (const t of bpe_token_list) {\n if (this.tokens_to_ids.has(t)) {\n outputTokens.push(t);\n } else {\n if (this.byte_fallback) {\n outputTokens.push(\n ...Array.from(this.text_encoder.encode(t))\n .map(x => `<0x${x.toString(16).toUpperCase().padStart(2, '0')}>`)\n );\n } else {\n outputTokens.push(this.unk_token);\n }\n }\n }\n }\n\n return outputTokens;\n }\n\n}\n\n/**\n * Legacy tokenizer class for tokenizers with only a vocabulary.\n */\nclass LegacyTokenizerModel extends TokenizerModel {\n /**\n * Create a LegacyTokenizerModel instance.\n * @param {Object} config The configuration object for LegacyTokenizerModel.\n * @param {Object} config.vocab A (possibly nested) mapping of tokens to ids.\n * @param {Object} moreConfig Additional configuration object for the LegacyTokenizerModel model.\n */\n constructor(config, moreConfig) {\n super(config);\n\n /**@type {Map<string, number>} */\n this.tokens_to_ids = objectToMap(\n moreConfig.target_lang\n ? config.vocab[moreConfig.target_lang]\n : config.vocab\n );\n\n this.bos_token = moreConfig.bos_token;\n this.bos_token_id = this.tokens_to_ids.get(this.bos_token);\n\n this.eos_token = moreConfig.eos_token;\n this.eos_token_id = this.tokens_to_ids.get(this.eos_token);\n\n this.pad_token = moreConfig.pad_token;\n this.pad_token_id = this.tokens_to_ids.get(this.pad_token);\n\n this.unk_token = moreConfig.unk_token;\n this.unk_token_id = this.tokens_to_ids.get(this.unk_token);\n\n this.vocab = new Array(this.tokens_to_ids.size);\n for (const [key, value] of this.tokens_to_ids) {\n this.vocab[value] = key;\n }\n }\n\n encode(tokens) {\n return tokens;\n }\n}\n\n\n/**\n * A base class for text normalization.\n * @abstract\n */\nclass Normalizer extends Callable {\n /**\n * @param {Object} config The configuration object for the normalizer.\n */\n constructor(config) {\n super();\n this.config = config;\n }\n\n /**\n * Factory method for creating normalizers from config objects.\n * @static\n * @param {Object} config The configuration object for the normalizer.\n * @returns {Normalizer} A Normalizer object.\n * @throws {Error} If an unknown Normalizer type is specified in the config.\n */\n static fromConfig(config) {\n if (config === null) return null;\n switch (config.type) {\n case 'BertNormalizer':\n return new BertNormalizer(config);\n case 'Precompiled':\n return new Precompiled(config);\n case 'Sequence':\n return new NormalizerSequence(config);\n case 'Replace':\n return new Replace(config);\n case 'NFC':\n return new NFC(config);\n case 'NFKC':\n return new NFKC(config);\n case 'NFKD':\n return new NFKD(config);\n case 'Strip':\n return new StripNormalizer(config);\n case 'StripAccents':\n return new StripAccents(config);\n case 'Lowercase':\n return new Lowercase(config);\n case 'Prepend':\n return new Prepend(config);\n default:\n throw new Error(`Unknown Normalizer type: ${config.type}`);\n }\n }\n\n /**\n * Normalize the input text.\n * @abstract\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n * @throws {Error} If this method is not implemented in a subclass.\n */\n normalize(text) {\n throw Error(\"normalize should be implemented in subclass.\")\n }\n\n /**\n * Alias for {@link Normalizer#normalize}.\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n */\n _call(text) {\n return this.normalize(text);\n }\n\n}\n\n/**\n * Replace normalizer that replaces occurrences of a pattern with a given string or regular expression.\n * @extends Normalizer\n */\nclass Replace extends Normalizer {\n /**\n * Normalize the input text by replacing the pattern with the content.\n * @param {string} text The input text to be normalized.\n * @returns {string} The normalized text after replacing the pattern with the content.\n */\n normalize(text) {\n const pattern = createPattern(this.config.pattern);\n return pattern === null\n ? text\n : text.replaceAll(pattern, this.config.content);\n }\n}\n\n/**\n * A normalizer that applies Unicode normalization form C (NFC) to the input text.\n * @extends Normalizer\n */\nclass NFC extends Normalizer {\n /**\n * Normalize the input text by applying Unicode normalization form C (NFC).\n * @param {string} text The input text to be normalized.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n text = text.normalize('NFC')\n return text;\n }\n}\n\n/**\n * NFKC Normalizer.\n * @extends Normalizer\n */\nclass NFKC extends Normalizer {\n /**\n * Normalize text using NFKC normalization.\n * @param {string} text The text to be normalized.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n text = text.normalize('NFKC')\n return text;\n }\n}\n/**\n * NFKD Normalizer.\n * @extends Normalizer\n */\nclass NFKD extends Normalizer {\n /**\n * Normalize text using NFKD normalization.\n * @param {string} text The text to be normalized.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n text = text.normalize('NFKD')\n return text;\n }\n}\n\n/**\n * A normalizer that strips leading and/or trailing whitespace from the input text.\n */\nclass StripNormalizer extends Normalizer {\n /**\n * Strip leading and/or trailing whitespace from the input text.\n * @param {string} text The input text.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n if (this.config.strip_left && this.config.strip_right) {\n // Fast path to avoid an extra trim call\n text = text.trim();\n } else {\n if (this.config.strip_left) {\n text = text.trimStart();\n }\n if (this.config.strip_right) {\n text = text.trimEnd();\n }\n }\n return text;\n }\n}\n\n/**\n * StripAccents normalizer removes all accents from the text.\n * @extends Normalizer\n */\nclass StripAccents extends Normalizer {\n /**\n * Remove all accents from the text.\n * @param {string} text The input text.\n * @returns {string} The normalized text without accents.\n */\n normalize(text) {\n text = remove_accents(text);\n return text;\n }\n}\n\n/**\n * A Normalizer that lowercases the input string.\n * @extends Normalizer\n */\nclass Lowercase extends Normalizer {\n /**\n * Lowercases the input string.\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n text = text.toLowerCase();\n return text;\n }\n}\n\n/**\n * A Normalizer that prepends a string to the input string.\n * @extends Normalizer\n */\nclass Prepend extends Normalizer {\n /**\n * Prepends the input string.\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n text = this.config.prepend + text;\n return text;\n }\n}\n\n/**\n * A Normalizer that applies a sequence of Normalizers.\n * @extends Normalizer\n */\nclass NormalizerSequence extends Normalizer {\n /**\n * Create a new instance of NormalizerSequence.\n * @param {Object} config The configuration object.\n * @param {Object[]} config.normalizers An array of Normalizer configuration objects.\n */\n constructor(config) {\n super(config);\n this.normalizers = config.normalizers.map(x => Normalizer.fromConfig(x));\n }\n /**\n * Apply a sequence of Normalizers to the input text.\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n return this.normalizers.reduce((t, normalizer) => {\n return normalizer.normalize(t);\n }, text);\n }\n}\n\n/**\n * A class representing a normalizer used in BERT tokenization.\n * @extends Normalizer\n */\nclass BertNormalizer extends Normalizer {\n /**\n * Adds whitespace around any CJK (Chinese, Japanese, or Korean) character in the input text.\n *\n * @param {string} text The input text to tokenize.\n * @returns {string} The tokenized text with whitespace added around CJK characters.\n */\n _tokenize_chinese_chars(text) {\n /* Adds whitespace around any CJK character. */\n const output = [];\n for (let i = 0; i < text.length; ++i) {\n const char = text[i];\n const cp = char.charCodeAt(0);\n if (this._is_chinese_char(cp)) {\n output.push(\" \");\n output.push(char);\n output.push(\" \");\n } else {\n output.push(char);\n }\n }\n return output.join(\"\");\n }\n\n /**\n * Checks whether the given Unicode codepoint represents a CJK (Chinese, Japanese, or Korean) character.\n *\n * A \"chinese character\" is defined as anything in the CJK Unicode block:\n * https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n *\n * Note that the CJK Unicode block is NOT all Japanese and Korean characters, despite its name.\n * The modern Korean Hangul alphabet is a different block, as is Japanese Hiragana and Katakana.\n * Those alphabets are used to write space-separated words, so they are not treated specially\n * and are handled like all other languages.\n *\n * @param {number} cp The Unicode codepoint to check.\n * @returns {boolean} True if the codepoint represents a CJK character, false otherwise.\n */\n _is_chinese_char(cp) {\n return (\n (cp >= 0x4E00 && cp <= 0x9FFF)\n || (cp >= 0x3400 && cp <= 0x4DBF)\n || (cp >= 0x20000 && cp <= 0x2A6DF)\n || (cp >= 0x2A700 && cp <= 0x2B73F)\n || (cp >= 0x2B740 && cp <= 0x2B81F)\n || (cp >= 0x2B820 && cp <= 0x2CEAF)\n || (cp >= 0xF900 && cp <= 0xFAFF)\n || (cp >= 0x2F800 && cp <= 0x2FA1F)\n )\n }\n /**\n * Strips accents from the given text.\n * @param {string} text The text to strip accents from.\n * @returns {string} The text with accents removed.\n */\n stripAccents(text) {\n return text.normalize('NFD').replace(/[\\u0300-\\u036f]/g, '');\n }\n\n\n /**\n * Checks whether `char` is a control character.\n * @param {string} char The character to check.\n * @returns {boolean} Whether `char` is a control character.\n * @private\n */\n _is_control(char) {\n switch (char) {\n case '\\t':\n case '\\n':\n case '\\r':\n // These are technically control characters but we count them as whitespace characters.\n return false;\n\n default:\n // Check if unicode category starts with C:\n // Cc - Control\n // Cf - Format\n // Co - Private Use\n // Cs - Surrogate\n return /^\\p{Cc}|\\p{Cf}|\\p{Co}|\\p{Cs}$/u.test(char);\n }\n }\n\n /**\n * Performs invalid character removal and whitespace cleanup on text.\n * @param {string} text The text to clean.\n * @returns {string} The cleaned text.\n * @private\n */\n _clean_text(text) {\n const output = [];\n for (const char of text) {\n const cp = char.charCodeAt(0);\n if (cp === 0 || cp === 0xFFFD || this._is_control(char)) {\n continue;\n }\n if (/^\\s$/.test(char)) { // is whitespace\n output.push(\" \");\n } else {\n output.push(char);\n }\n }\n return output.join(\"\");\n }\n /**\n * Normalizes the given text based on the configuration.\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n if (this.config.clean_text) {\n text = this._clean_text(text);\n }\n\n if (this.config.handle_chinese_chars) {\n text = this._tokenize_chinese_chars(text);\n }\n\n if (this.config.lowercase) {\n text = text.toLowerCase();\n\n if (this.config.strip_accents !== false) {\n text = this.stripAccents(text);\n }\n } else if (this.config.strip_accents) {\n text = this.stripAccents(text);\n }\n\n return text;\n }\n}\n\n/**\n * A callable class representing a pre-tokenizer used in tokenization. Subclasses\n * should implement the `pre_tokenize_text` method to define the specific pre-tokenization logic.\n * @extends Callable\n */\nclass PreTokenizer extends Callable {\n /**\n * Factory method that returns an instance of a subclass of `PreTokenizer` based on the provided configuration.\n *\n * @static\n * @param {Object} config A configuration object for the pre-tokenizer.\n * @returns {PreTokenizer} An instance of a subclass of `PreTokenizer`.\n * @throws {Error} If the provided configuration object does not correspond to any known pre-tokenizer.\n */\n static fromConfig(config) {\n if (config === null) return null;\n\n switch (config.type) {\n case 'BertPreTokenizer':\n return new BertPreTokenizer(config);\n case 'Sequence':\n return new PreTokenizerSequence(config);\n case 'Whitespace':\n return new WhitespacePreTokenizer(config);\n case 'WhitespaceSplit':\n return new WhitespaceSplit(config);\n case 'Metaspace':\n return new MetaspacePreTokenizer(config);\n\n case 'ByteLevel':\n return new ByteLevelPreTokenizer(config);\n case 'Split':\n return new SplitPreTokenizer(config);\n case 'Punctuation':\n return new PunctuationPreTokenizer(config);\n case 'Digits':\n return new DigitsPreTokenizer(config);\n case 'Replace':\n return new ReplacePreTokenizer(config);\n default:\n throw new Error(`Unknown PreTokenizer type: ${config.type}`);\n }\n }\n\n /**\n * Method that should be implemented by subclasses to define the specific pre-tokenization logic.\n *\n * @abstract\n * @param {string} text The text to pre-tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} The pre-tokenized text.\n * @throws {Error} If the method is not implemented in the subclass.\n */\n pre_tokenize_text(text, options) {\n throw Error(\"pre_tokenize_text should be implemented in subclass.\")\n }\n\n /**\n * Tokenizes the given text into pre-tokens.\n * @param {string|string[]} text The text or array of texts to pre-tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of pre-tokens.\n */\n pre_tokenize(text, options) {\n return (Array.isArray(text)\n ? text.map(x => this.pre_tokenize_text(x, options))\n : this.pre_tokenize_text(text, options)\n ).flat();\n }\n\n /**\n * Alias for {@link PreTokenizer#pre_tokenize}.\n * @param {string|string[]} text The text or array of texts to pre-tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of pre-tokens.\n */\n _call(text, options) {\n return this.pre_tokenize(text, options);\n }\n}\n\n/**\n * @extends PreTokenizer\n */\nclass BertPreTokenizer extends PreTokenizer {\n /**\n * A PreTokenizer that splits text into wordpieces using a basic tokenization scheme\n * similar to that used in the original implementation of BERT.\n * \n * @param {Object} config The configuration object.\n */\n constructor(config) {\n super();\n // Construct a pattern which matches the rust implementation:\n // https://github.com/huggingface/tokenizers/blob/b4fcc9ce6e4ad5806e82826f816acfdfdc4fcc67/tokenizers/src/pre_tokenizers/bert.rs#L11\n // Equivalent to removing whitespace and splitting on punctuation (both \\p{P} and other ascii characters)\n this.pattern = new RegExp(`[^\\\\s${PUNCTUATION_REGEX}]+|[${PUNCTUATION_REGEX}]`, 'gu');\n }\n /**\n * Tokenizes a single text using the BERT pre-tokenization scheme.\n * \n * @param {string} text The text to tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens.\n */\n pre_tokenize_text(text, options) {\n return text.trim().match(this.pattern) || [];\n }\n}\n\n/**\n * A pre-tokenizer that splits text into Byte-Pair-Encoding (BPE) subwords.\n * @extends PreTokenizer\n */\nclass ByteLevelPreTokenizer extends PreTokenizer {\n /**\n * Creates a new instance of the `ByteLevelPreTokenizer` class.\n * @param {Object} config The configuration object.\n */\n constructor(config) {\n super();\n this.config = config;\n\n /**\n * @type {boolean} Whether to add a leading space to the first word.\n * This allows to treat the leading word just as any other word.\n */\n this.add_prefix_space = this.config.add_prefix_space;\n\n /**\n * @type {boolean} Whether the post processing step should trim offsets\n * to avoid including whitespaces.\n * @todo Use this in the pretokenization step.\n */\n this.trim_offsets = this.config.trim_offsets;\n\n /**\n * @type {boolean} Whether to use the standard GPT2 regex for whitespace splitting.\n * Set it to False if you want to use your own splitting. Defaults to true.\n */\n this.use_regex = this.config.use_regex ?? true;\n this.pattern = /'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+/gu;\n\n this.byte_encoder = BYTES_TO_UNICODE;\n this.text_encoder = new TextEncoder();\n }\n\n /**\n * Tokenizes a single piece of text using byte-level tokenization.\n * @param {string} text The text to tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens.\n */\n pre_tokenize_text(text, options) {\n // Add a leading space if the option is enabled\n if (this.add_prefix_space && !text.startsWith(' ')) {\n text = ' ' + text;\n }\n\n // Split on whitespace and punctuation\n const tokens = this.use_regex ? (text.match(this.pattern) || []) : [text];\n\n // Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)\n return tokens.map(\n token => Array.from(this.text_encoder.encode(token), byte => this.byte_encoder[byte]).join('')\n );\n }\n}\n\n/**\n * @typedef {'removed'|'isolated'|'mergedWithPrevious'|'mergedWithNext'|'contiguous'} SplitDelimiterBehavior\n */\n\n/**\n * Splits text using a given pattern.\n * @extends PreTokenizer\n */\nclass SplitPreTokenizer extends PreTokenizer {\n /**\n * @param {Object} config The configuration options for the pre-tokenizer.\n * @param {Object} config.pattern The pattern used to split the text. Can be a string or a regex object.\n * @param {string|undefined} config.pattern.String The string to use for splitting. Only defined if the pattern is a string.\n * @param {string|undefined} config.pattern.Regex The regex to use for splitting. Only defined if the pattern is a regex.\n * @param {SplitDelimiterBehavior} config.behavior The behavior to use when splitting.\n * @param {boolean} config.invert Whether to split (invert=false) or match (invert=true) the pattern.\n */\n constructor(config) {\n super();\n this.config = config;\n // TODO support all behaviours (config.behavior)\n\n this.pattern = createPattern(this.config.pattern, this.config.invert);\n }\n\n /**\n * Tokenizes text by splitting it using the given pattern.\n * @param {string} text The text to tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens.\n */\n pre_tokenize_text(text, options) {\n if (this.pattern === null) {\n return [];\n }\n\n if (this.config.invert) {\n return text.match(this.pattern) || [];\n } else {\n return regexSplit(text, this.pattern);\n }\n }\n}\n\n/**\n * Splits text based on punctuation.\n * @extends PreTokenizer\n */\nclass PunctuationPreTokenizer extends PreTokenizer {\n /**\n * @param {Object} config The configuration options for the pre-tokenizer.\n * @param {SplitDelimiterBehavior} config.behavior The behavior to use when splitting.\n */\n constructor(config) {\n super();\n this.config = config;\n this.pattern = new RegExp(`[^${PUNCTUATION_REGEX}]+|[${PUNCTUATION_REGEX}]+`, 'gu');\n }\n\n /**\n * Tokenizes text by splitting it using the given pattern.\n * @param {string} text The text to tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens.\n */\n pre_tokenize_text(text, options) {\n return text.match(this.pattern) || [];\n }\n}\n\n\n/**\n * Splits text based on digits.\n * @extends PreTokenizer\n */\nclass DigitsPreTokenizer extends PreTokenizer {\n /**\n * @param {Object} config The configuration options for the pre-tokenizer.\n * @param {boolean} config.individual_digits Whether to split on individual digits.\n */\n constructor(config) {\n super();\n this.config = config;\n\n // Construct a pattern which matches the rust implementation:\n const digit_pattern = `[^\\\\d]+|\\\\d${this.config.individual_digits ? '' : '+'}`;\n this.pattern = new RegExp(digit_pattern, 'gu');\n }\n\n /**\n * Tokenizes text by splitting it using the given pattern.\n * @param {string} text The text to tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens.\n */\n pre_tokenize_text(text, options) {\n return text.match(this.pattern) || [];\n }\n}\n\n/**\n * @typedef {Object} PostProcessedOutput\n * @property {string[]} tokens List of token produced by the post-processor.\n * @property {number[]} [token_type_ids] List of token type ids produced by the post-processor.\n */\n\n\n/**\n * @typedef {Object} EncodingSingle\n * @property {number[]} input_ids List of token ids to be fed to a model.\n * @property {number[]} attention_mask List of token type ids to be fed to a model\n * @property {number[]} [token_type_ids] List of indices specifying which tokens should be attended to by the model\n */\n\n\n/**\n * @extends Callable\n */\nclass PostProcessor extends Callable {\n\n /**\n * @param {Object} config The configuration for the post-processor.\n */\n constructor(config) {\n super();\n this.config = config;\n }\n\n /**\n * Factory method to create a PostProcessor object from a configuration object.\n *\n * @param {Object} config Configuration object representing a PostProcessor.\n * @returns {PostProcessor} A PostProcessor object created from the given configuration.\n * @throws {Error} If an unknown PostProcessor type is encountered.\n */\n static fromConfig(config) {\n if (config === null) return null;\n switch (config.type) {\n case 'TemplateProcessing':\n return new TemplateProcessing(config);\n\n case 'ByteLevel':\n return new ByteLevelPostProcessor(config);\n\n case 'RobertaProcessing':\n return new RobertaProcessing(config);\n case 'BertProcessing':\n return new BertProcessing(config);\n\n case 'Sequence':\n return new PostProcessorSequence(config);\n default:\n throw new Error(`Unknown PostProcessor type: ${config.type}`);\n }\n }\n\n /**\n * Method to be implemented in subclass to apply post-processing on the given tokens.\n *\n * @param {Array} tokens The input tokens to be post-processed.\n * @param {...*} args Additional arguments required by the post-processing logic.\n * @returns {PostProcessedOutput} The post-processed tokens.\n * @throws {Error} If the method is not implemented in subclass.\n */\n post_process(tokens, ...args) {\n throw Error(\"post_process should be implemented in subclass.\")\n }\n\n /**\n * Alias for {@link PostProcessor#post_process}.\n * @param {Array} tokens The text or array of texts to post-process.\n * @param {...*} args Additional arguments required by the post-processing logic.\n * @returns {PostProcessedOutput} The post-processed tokens.\n */\n _call(tokens, ...args) {\n return this.post_process(tokens, ...args);\n }\n}\n\n/**\n * A post-processor that adds special tokens to the beginning and end of the input.\n */\nclass BertProcessing extends PostProcessor {\n /**\n * @param {Object} config The configuration for the post-processor.\n * @param {string[]} config.cls The special tokens to add to the beginning of the input.\n * @param {string[]} config.sep The special tokens to add to the end of the input.\n */\n constructor(config) {\n super(config);\n // TODO use all of config: add_prefix_space, trim_offsets\n\n this.cls = config.cls[0];\n this.sep = config.sep[0];\n }\n\n /**\n * Adds the special tokens to the beginning and end of the input.\n * @param {string[]} tokens The input tokens.\n * @param {string[]} [tokens_pair=null] An optional second set of input tokens.\n * @returns {PostProcessedOutput} The post-processed tokens with the special tokens added to the beginning and end.\n */\n post_process(tokens, tokens_pair = null, {\n add_special_tokens = true,\n } = {}) {\n if (add_special_tokens) {\n tokens = mergeArrays([this.cls], tokens, [this.sep]);\n }\n\n let token_type_ids = new Array(tokens.length).fill(0);\n if (tokens_pair !== null) {\n // NOTE: It is intended to add 2 EOS tokens after the first set of tokens\n // https://github.com/huggingface/tokenizers/issues/983\n const middle = (add_special_tokens && this instanceof RobertaProcessing)\n ? [this.sep]\n : [];\n const after = add_special_tokens ? [this.sep] : [];\n\n tokens = mergeArrays(tokens, middle, tokens_pair, after);\n token_type_ids = mergeArrays(token_type_ids, new Array(tokens_pair.length + middle.length + after.length).fill(1));\n }\n return { tokens, token_type_ids };\n }\n}\nclass RobertaProcessing extends BertProcessing { } // NOTE: extends BertProcessing\n\n/**\n * Post processor that replaces special tokens in a template with actual tokens.\n * @extends PostProcessor\n */\nclass TemplateProcessing extends PostProcessor {\n /**\n * Creates a new instance of `TemplateProcessing`.\n * @param {Object} config The configuration options for the post processor.\n * @param {Array} config.single The template for a single sequence of tokens.\n * @param {Array} config.pair The template for a pair of sequences of tokens.\n */\n constructor(config) {\n super(config);\n\n this.single = config.single;\n this.pair = config.pair;\n }\n\n /**\n * Replaces special tokens in the template with actual tokens.\n * @param {string[]} tokens The list of tokens for the first sequence.\n * @param {string[]} [tokens_pair=null] The list of tokens for the second sequence (optional).\n * @returns {PostProcessedOutput} An object containing the list of tokens with the special tokens replaced with actual tokens.\n */\n post_process(tokens, tokens_pair = null, {\n add_special_tokens = true,\n } = {}) {\n const type = tokens_pair === null ? this.single : this.pair\n\n let processedTokens = [];\n let types = [];\n for (const item of type) {\n if ('SpecialToken' in item) {\n if (add_special_tokens) {\n processedTokens.push(item.SpecialToken.id);\n types.push(item.SpecialToken.type_id);\n }\n } else if ('Sequence' in item) {\n if (item.Sequence.id === 'A') {\n processedTokens = mergeArrays(processedTokens, tokens);\n types = mergeArrays(types, new Array(tokens.length).fill(item.Sequence.type_id));\n\n } else if (item.Sequence.id === 'B') {\n processedTokens = mergeArrays(processedTokens, tokens_pair);\n types = mergeArrays(types, new Array(tokens_pair.length).fill(item.Sequence.type_id));\n }\n }\n }\n return { tokens: processedTokens, token_type_ids: types };\n }\n}\n\n/**\n * A PostProcessor that returns the given tokens as is.\n * @extends PostProcessor\n */\nclass ByteLevelPostProcessor extends PostProcessor {\n /**\n * Post process the given tokens.\n * @param {string[]} tokens The list of tokens for the first sequence.\n * @param {string[]} [tokens_pair=null] The list of tokens for the second sequence (optional).\n * @returns {PostProcessedOutput} An object containing the post-processed tokens.\n */\n post_process(tokens, tokens_pair = null) {\n if (tokens_pair) {\n tokens = mergeArrays(tokens, tokens_pair);\n }\n return { tokens };\n }\n}\n\n\n/**\n * A post-processor that applies multiple post-processors in sequence.\n */\nclass PostProcessorSequence extends PostProcessor {\n\n /**\n * Creates a new instance of PostProcessorSequence.\n * @param {Object} config The configuration object.\n * @param {Object[]} config.processors The list of post-processors to apply.\n */\n constructor(config) {\n super(config);\n\n this.processors = config.processors.map(x => PostProcessor.fromConfig(x));\n }\n\n /**\n * Post process the given tokens.\n * @param {string[]} tokens The list of tokens for the first sequence.\n * @param {string[]} [tokens_pair=null] The list of tokens for the second sequence (optional).\n * @returns {PostProcessedOutput} An object containing the post-processed tokens.\n */\n post_process(tokens, tokens_pair = null, options = {}) {\n let token_type_ids;\n for (const processor of this.processors) {\n if (processor instanceof ByteLevelPostProcessor) {\n // Special case where we need to pass the tokens_pair to the post-processor\n const output = processor.post_process(tokens);\n tokens = output.tokens;\n if (tokens_pair) {\n const pair_output = processor.post_process(tokens_pair);\n tokens_pair = pair_output.tokens;\n }\n } else {\n const output = processor.post_process(tokens, tokens_pair, options);\n tokens = output.tokens;\n token_type_ids = output.token_type_ids;\n }\n }\n return { tokens, token_type_ids };\n }\n}\n\n/**\n * The base class for token decoders.\n * @extends Callable\n */\nclass Decoder extends Callable {\n\n /**\n * Creates an instance of `Decoder`.\n *\n * @param {Object} config The configuration object.\n */\n constructor(config) {\n super();\n this.config = config;\n\n /** @type {AddedToken[]} */\n this.added_tokens = [];\n this.end_of_word_suffix = null;\n this.trim_offsets = config.trim_offsets;\n }\n\n /**\n * Creates a decoder instance based on the provided configuration.\n *\n * @param {Object} config The configuration object.\n * @returns {Decoder} A decoder instance.\n * @throws {Error} If an unknown decoder type is provided.\n */\n static fromConfig(config) {\n if (config === null) return null;\n switch (config.type) {\n case 'WordPiece':\n return new WordPieceDecoder(config);\n case 'Metaspace':\n return new MetaspaceDecoder(config);\n case 'ByteLevel':\n return new ByteLevelDecoder(config);\n\n case 'Replace':\n return new ReplaceDecoder(config);\n case 'ByteFallback':\n return new ByteFallback(config);\n case 'Fuse':\n return new FuseDecoder(config);\n case 'Strip':\n return new StripDecoder(config);\n\n case 'Sequence':\n return new DecoderSequence(config);\n\n case 'CTC':\n return new CTCDecoder(config);\n case 'BPEDecoder':\n return new BPEDecoder(config);\n default:\n throw new Error(`Unknown Decoder type: ${config.type}`);\n }\n }\n\n /**\n * Calls the `decode` method.\n *\n * @param {string[]} tokens The list of tokens.\n * @returns {string} The decoded string.\n */\n _call(tokens) {\n return this.decode(tokens);\n }\n\n /**\n * Decodes a list of tokens.\n * @param {string[]} tokens The list of tokens.\n * @returns {string} The decoded string.\n */\n decode(tokens) {\n return this.decode_chain(tokens).join('');\n }\n\n /**\n * Apply the decoder to a list of tokens.\n * \n * @param {string[]} tokens The list of tokens.\n * @returns {string[]} The decoded list of tokens.\n * @throws {Error} If the `decode_chain` method is not implemented in the subclass.\n */\n decode_chain(tokens) {\n throw Error(\"`decode_chain` should be implemented in subclass.\")\n }\n\n}\n\nclass ReplaceDecoder extends Decoder {\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n const pattern = createPattern(this.config.pattern);\n return pattern === null\n ? tokens\n : tokens.map(token => token.replaceAll(pattern, this.config.content))\n }\n}\n\n\nclass ByteFallback extends Decoder {\n constructor(config) {\n super(config);\n\n this.text_decoder = new TextDecoder();\n }\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n\n const new_tokens = [];\n let previous_byte_tokens = [];\n\n for (const token of tokens) {\n let bytes = null;\n if (token.length === 6 && token.startsWith('<0x') && token.endsWith('>')) {\n const byte = parseInt(token.slice(3, 5), 16);\n if (!isNaN(byte)) {\n bytes = byte;\n }\n }\n if (bytes !== null) {\n previous_byte_tokens.push(bytes);\n } else {\n if (previous_byte_tokens.length > 0) {\n const string = this.text_decoder.decode(Uint8Array.from(previous_byte_tokens));\n new_tokens.push(string);\n previous_byte_tokens = [];\n }\n new_tokens.push(token);\n }\n }\n if (previous_byte_tokens.length > 0) {\n const string = this.text_decoder.decode(Uint8Array.from(previous_byte_tokens));\n new_tokens.push(string);\n previous_byte_tokens = [];\n }\n\n return new_tokens;\n }\n}\n\n/**\n * Fuse simply fuses all tokens into one big string.\n * It's usually the last decoding step anyway, but this decoder\n * exists incase some decoders need to happen after that step\n */\nclass FuseDecoder extends Decoder {\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n return [tokens.join('')];\n }\n}\n\n\nclass StripDecoder extends Decoder {\n constructor(config) {\n super(config);\n\n this.content = this.config.content;\n this.start = this.config.start;\n this.stop = this.config.stop;\n }\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n return tokens.map(token => {\n let start_cut = 0;\n for (let i = 0; i < this.start; ++i) {\n if (token[i] === this.content) {\n start_cut = i + 1;\n continue;\n } else {\n break;\n }\n }\n\n let stop_cut = token.length;\n for (let i = 0; i < this.stop; ++i) {\n const index = token.length - i - 1;\n if (token[index] === this.content) {\n stop_cut = index;\n continue;\n } else {\n break;\n }\n }\n\n return token.slice(start_cut, stop_cut)\n });\n }\n}\n\n/**\n * A decoder that decodes a list of WordPiece tokens into a single string.\n * @extends Decoder\n */\nclass WordPieceDecoder extends Decoder {\n\n /**\n * Creates a new instance of WordPieceDecoder.\n * @param {Object} config The configuration object.\n * @param {string} config.prefix The prefix used for WordPiece encoding.\n * @param {boolean} config.cleanup Whether to cleanup the decoded string.\n */\n constructor(config) {\n super(config);\n this.cleanup = config.cleanup;\n }\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n return tokens.map((token, i) => {\n if (i !== 0) {\n if (token.startsWith(this.config.prefix)) {\n // NOTE: .replace() is intended; only replace first occurrence\n token = token.replace(this.config.prefix, '');\n } else {\n token = ' ' + token;\n }\n }\n if (this.cleanup) {\n token = clean_up_tokenization(token)\n }\n\n return token;\n });\n }\n}\n\n/**\n * Byte-level decoder for tokenization output. Inherits from the `Decoder` class.\n * @extends Decoder\n */\nclass ByteLevelDecoder extends Decoder {\n\n /**\n * Create a `ByteLevelDecoder` object.\n * @param {Object} config Configuration object.\n */\n constructor(config) {\n super(config);\n\n this.byte_decoder = UNICODE_TO_BYTES;\n this.text_decoder = new TextDecoder(\"utf-8\", {\n fatal: false,\n ignoreBOM: true,\n });\n\n this.end_of_word_suffix = null;\n }\n\n /**\n * Convert an array of tokens to string by decoding each byte.\n * @param {string[]} tokens Array of tokens to be decoded.\n * @returns {string} The decoded string.\n */\n convert_tokens_to_string(tokens) {\n const text = tokens.join('');\n const byteArray = new Uint8Array([...text].map(c => this.byte_decoder[c]));\n const decoded_text = this.text_decoder.decode(byteArray);\n return decoded_text;\n }\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n // TODO move to base class (like HF)\n // tokens === filtered_tokens\n\n // To avoid mixing byte-level and unicode for byte-level BPT\n // we need to build string separately for added tokens and byte-level tokens\n // cf. https://github.com/huggingface/transformers/issues/1133\n const sub_texts = [];\n let current_sub_text = [];\n for (const token of tokens) {\n // tokens sent here are already filtered, so we don't need to do this\n // if (skip_special_tokens && this.all_special_ids.includes(token)) {\n // continue;\n // }\n\n if (this.added_tokens.find(x => x.content === token) !== undefined) {\n if (current_sub_text.length > 0) {\n sub_texts.push(this.convert_tokens_to_string(current_sub_text));\n current_sub_text = [];\n }\n sub_texts.push(token);\n } else {\n current_sub_text.push(token);\n }\n }\n if (current_sub_text.length > 0) {\n sub_texts.push(this.convert_tokens_to_string(current_sub_text));\n }\n\n // TODO add spaces_between_special_tokens and clean_up_tokenization_spaces options\n\n return sub_texts;\n }\n}\n\n/**\n * The CTC (Connectionist Temporal Classification) decoder.\n * See https://github.com/huggingface/tokenizers/blob/bb38f390a61883fc2f29d659af696f428d1cda6b/tokenizers/src/decoders/ctc.rs\n */\nclass CTCDecoder extends Decoder {\n\n constructor(config) {\n super(config);\n\n this.pad_token = this.config.pad_token;\n this.word_delimiter_token = this.config.word_delimiter_token;\n this.cleanup = this.config.cleanup;\n }\n /**\n * Converts a connectionist-temporal-classification (CTC) output tokens into a single string.\n * @param {string[]} tokens Array of tokens to be decoded.\n * @returns {string} The decoded string.\n */\n convert_tokens_to_string(tokens) {\n if (tokens.length === 0) return '';\n\n // group same tokens into non-repeating tokens in CTC style decoding\n const grouped_tokens = [tokens[0]];\n for (let i = 1; i < tokens.length; ++i) {\n if (tokens[i] !== grouped_tokens.at(-1)) {\n grouped_tokens.push(tokens[i]);\n }\n }\n\n // filter self.pad_token which is used as CTC-blank token\n const filtered_tokens = grouped_tokens.filter(token => token !== this.pad_token);\n\n let text = filtered_tokens.join('');\n if (this.cleanup) {\n // cleanup and replace delimiter token\n text = clean_up_tokenization(text)\n .replaceAll(this.word_delimiter_token, ' ')\n .trim();\n }\n return text;\n }\n\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n return [this.convert_tokens_to_string(tokens)];\n }\n}\n\n/**\n * Apply a sequence of decoders.\n * @extends Decoder\n */\nclass DecoderSequence extends Decoder {\n\n /**\n * Creates a new instance of DecoderSequence.\n * @param {Object} config The configuration object.\n * @param {Object[]} config.decoders The list of decoders to apply.\n */\n constructor(config) {\n super(config);\n this.decoders = config.decoders.map(x => Decoder.fromConfig(x));\n }\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n // Use reduce to apply each decoder to the tokens\n return this.decoders.reduce((toks, decoder) => {\n return decoder.decode_chain(toks);\n }, tokens);\n }\n\n}\n\nclass BPEDecoder extends Decoder {\n constructor(config) {\n super(config);\n\n this.suffix = this.config.suffix;\n }\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n return tokens.map((token, i) => {\n return token.replaceAll(this.suffix, (i === tokens.length - 1) ? '' : ' ')\n });\n }\n}\n\n// Custom decoder for VITS\nclass VitsDecoder extends Decoder {\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n let decoded = '';\n for (let i = 1; i < tokens.length; i += 2) {\n decoded += tokens[i];\n }\n return [decoded];\n }\n}\n\n\n/**\n * This PreTokenizer replaces spaces with the given replacement character, adds a prefix space if requested,\n * and returns a list of tokens.\n * @extends PreTokenizer\n */\nclass MetaspacePreTokenizer extends PreTokenizer {\n /**\n * @param {Object} config The configuration object for the MetaspacePreTokenizer.\n * @param {boolean} config.add_prefix_space Whether to add a prefix space to the first token.\n * @param {string} config.replacement The character to replace spaces with.\n * @param {string} [config.str_rep=config.replacement] An optional string representation of the replacement character.\n * @param {'first'|'never'|'always'} [config.prepend_scheme='always'] The metaspace prepending scheme.\n */\n constructor(config) {\n super();\n\n this.addPrefixSpace = config.add_prefix_space;\n this.replacement = config.replacement;\n this.strRep = config.str_rep || this.replacement;\n this.prepend_scheme = config.prepend_scheme ?? 'always';\n }\n\n /**\n * This method takes a string, replaces spaces with the replacement character,\n * adds a prefix space if requested, and returns a new list of tokens.\n * @param {string} text The text to pre-tokenize.\n * @param {Object} [options] The options for the pre-tokenization.\n * @param {number} [options.section_index] The index of the section to pre-tokenize.\n * @returns {string[]} A new list of pre-tokenized tokens.\n */\n pre_tokenize_text(text, {\n section_index = undefined,\n } = {}) {\n\n let normalized = text.replaceAll(' ', this.strRep);\n\n if (\n // We add a prefix space if:\n // (1) The addPrefixSpace option is enabled and the normalized\n // token does not already start with the replacement character.\n (this.addPrefixSpace && !normalized.startsWith(this.replacement))\n\n // and (2) either:\n // (a) prepend_scheme is 'always'\n // (b) prepend_scheme is 'first' and this is the first section\n && (\n this.prepend_scheme === 'always' ||\n (this.prepend_scheme === 'first' && section_index === 0)\n )\n ) {\n normalized = this.strRep + normalized;\n }\n return [normalized];\n }\n}\n\n/**\n * MetaspaceDecoder class extends the Decoder class and decodes Metaspace tokenization.\n * @extends Decoder\n */\nclass MetaspaceDecoder extends Decoder {\n /**\n * Constructs a new MetaspaceDecoder object.\n * @param {Object} config The configuration object for the MetaspaceDecoder.\n * @param {boolean} config.add_prefix_space Whether to add a prefix space to the decoded string.\n * @param {string} config.replacement The string to replace spaces with.\n */\n constructor(config) {\n super(config);\n\n this.addPrefixSpace = config.add_prefix_space;\n this.replacement = config.replacement;\n }\n\n /** @type {Decoder['decode_chain']} */\n decode_chain(tokens) {\n const result = [];\n for (let i = 0; i < tokens.length; ++i) {\n let normalized = tokens[i].replaceAll(this.replacement, ' ');\n if (this.addPrefixSpace && i == 0 && normalized.startsWith(' ')) {\n normalized = normalized.substring(1);\n }\n result.push(normalized);\n }\n return result;\n }\n}\n\n/**\n * A normalizer that applies a precompiled charsmap.\n * This is useful for applying complex normalizations in C++ and exposing them to JavaScript.\n * @extends Normalizer\n * @param {Object} config The configuration object for the Precompiled normalizer.\n * @param {Object} config.precompiled_charsmap The precompiled charsmap object.\n */\nclass Precompiled extends Normalizer {\n /**\n * Create a new instance of Precompiled normalizer.\n * @param {Object} config The configuration object.\n * @param {any} config.precompiled_charsmap Precompiled chars mapping.\n */\n constructor(config) {\n super(config);\n this.charsmap = config.precompiled_charsmap;\n }\n\n /**\n * Normalizes the given text by applying the precompiled charsmap.\n * @param {string} text The text to normalize.\n * @returns {string} The normalized text.\n */\n normalize(text) {\n // As stated in the sentencepiece normalization docs (https://github.com/google/sentencepiece/blob/master/doc/normalization.md#use-pre-defined-normalization-rule),\n // there are 5 pre-defined normalization rules:\n // 1. nmt_nfkc: NFKC normalization with some additional normalization around spaces. (default)\n // 2. nfkc: original NFKC normalization.\n // 3. nmt_nfkc_cf: nmt_nfkc + Unicode case folding (mostly lower casing)\n // 4. nfkc_cf: nfkc + Unicode case folding.\n // 5. identity: no normalization\n // \n // For now, we only implement the default (nmt_nfkc).\n // See https://raw.githubusercontent.com/google/sentencepiece/master/data/nmt_nfkc.tsv for the full list of rules.\n // TODO: detect when a different `this.charsmap` is used.\n\n text = text.replace(/[\\u0001-\\u0008\\u000B\\u000E-\\u001F\\u007F\\u008F\\u009F]/gm, ''); // Remove control characters\n text = text.replace(/[\\u0009\\u000A\\u000C\\u000D\\u1680\\u200B\\u200C\\u200E\\u200F\\u2028\\u2029\\u2581\\uFEFF\\uFFFD]/gm, '\\u0020'); // Replace certain characters with a space\n\n if (text.includes('\\uFF5E')) {\n // To match the sentencepiece implementation 100%, we must handle a very strange edge-case.\n // For some reason, the \"Fullwidth Tilde\" character (\\uFF5E) should not be converted to the standard Tilde character (\\u007E).\n // However, NFKC normalization does do this conversion. As a result, we split the string on the Fullwidth Tilde character,\n // perform NFKC normalization on each substring, and then join them back together with the Fullwidth Tilde character.\n const parts = text.split('\\uFF5E');\n text = parts.map(part => part.normalize('NFKC')).join('\\uFF5E');\n } else {\n text = text.normalize('NFKC');\n }\n\n return text;\n }\n}\n\n/**\n * A pre-tokenizer that applies a sequence of pre-tokenizers to the input text.\n * @extends PreTokenizer\n */\nclass PreTokenizerSequence extends PreTokenizer {\n /**\n * Creates an instance of PreTokenizerSequence.\n * @param {Object} config The configuration object for the pre-tokenizer sequence.\n * @param {Object[]} config.pretokenizers An array of pre-tokenizer configurations.\n */\n constructor(config) {\n super();\n this.tokenizers = config.pretokenizers.map(x => PreTokenizer.fromConfig(x));\n }\n\n /**\n * Applies each pre-tokenizer in the sequence to the input text in turn.\n * @param {string} text The text to pre-tokenize.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} The pre-tokenized text.\n */\n pre_tokenize_text(text, options) {\n // Use reduce to apply each tokenizer to the text\n return this.tokenizers.reduce((preTokenizedText, tokenizer) => {\n return tokenizer.pre_tokenize(preTokenizedText, options);\n }, [text]);\n }\n}\n\n/**\n * Splits on word boundaries (using the following regular expression: `\\w+|[^\\w\\s]+`).\n */\nclass WhitespacePreTokenizer extends PreTokenizer {\n /**\n * Creates an instance of WhitespacePreTokenizer.\n * @param {Object} config The configuration object for the pre-tokenizer.\n */\n constructor(config) {\n super();\n }\n /**\n * Pre-tokenizes the input text by splitting it on word boundaries.\n * @param {string} text The text to be pre-tokenized.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens produced by splitting the input text on whitespace.\n */\n pre_tokenize_text(text, options) {\n return text.match(/\\w+|[^\\w\\s]+/g) || [];\n }\n}\n\n/**\n * Splits a string of text by whitespace characters into individual tokens.\n * @extends PreTokenizer\n */\nclass WhitespaceSplit extends PreTokenizer {\n /**\n * Creates an instance of WhitespaceSplit.\n * @param {Object} config The configuration object for the pre-tokenizer.\n */\n constructor(config) {\n super();\n }\n /**\n * Pre-tokenizes the input text by splitting it on whitespace characters.\n * @param {string} text The text to be pre-tokenized.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens produced by splitting the input text on whitespace.\n */\n pre_tokenize_text(text, options) {\n return whitespace_split(text);\n }\n}\n\n// NOTE: `ReplacePreTokenizer` is custom (to support `BlenderbotSmallTokenizer`)\nclass ReplacePreTokenizer extends PreTokenizer {\n /**\n * @param {Object} config The configuration options for the pre-tokenizer.\n * @param {Object} config.pattern The pattern used to split the text. Can be a string or a regex object.\n * @param {string} config.content What to replace the pattern with.\n */\n constructor(config) {\n super();\n this.config = config;\n this.pattern = createPattern(this.config.pattern);\n this.content = this.config.content;\n }\n\n /**\n * Pre-tokenizes the input text by replacing certain characters.\n * @param {string} text The text to be pre-tokenized.\n * @param {Object} [options] Additional options for the pre-tokenization logic.\n * @returns {string[]} An array of tokens produced by replacing certain characters.\n */\n pre_tokenize_text(text, options) {\n if (this.pattern === null) {\n return [text];\n }\n return [text.replaceAll(this.pattern, this.config.content)];\n }\n}\n\nconst SPECIAL_TOKEN_ATTRIBUTES = [\n 'bos_token',\n 'eos_token',\n 'unk_token',\n 'sep_token',\n 'pad_token',\n 'cls_token',\n 'mask_token',\n // additional_special_tokens (TODO)\n]\n\n/**\n * \n * Helper function for padding values of an object, which are each arrays.\n * NOTE: No additional checks are made here for validity of arguments.\n * @param {Record<string, any[]>} item The input object.\n * @param {number} length The length to pad to.\n * @param {(key: string) => any} value_fn Determine the value to fill the array, based on its key.\n * @param {'right'|'left'} side Which side to pad the array.\n * @private\n */\nfunction padHelper(item, length, value_fn, side) {\n for (const key of Object.keys(item)) {\n const diff = length - item[key].length;\n const value = value_fn(key);\n\n const padData = new Array(diff).fill(value);\n item[key] = side === 'right'\n ? mergeArrays(item[key], padData)\n : mergeArrays(padData, item[key]);\n }\n}\n\n/**\n * Helper function for truncating values of an object, which are each arrays.\n * NOTE: No additional checks are made here for validity of arguments.\n * @param {Record<string, any[]>} item The input object.\n * @param {number} length The length to truncate to.\n * @private\n */\nfunction truncateHelper(item, length) {\n // Setting .length to a lower value truncates the array in-place:\n // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/length\n for (const key of Object.keys(item)) {\n item[key].length = length;\n }\n}\n\n\n/**\n * @typedef {Object} Message\n * @property {string} role The role of the message (e.g., \"user\" or \"assistant\" or \"system\").\n * @property {string} content The content of the message.\n */\n\nexport class PreTrainedTokenizer extends Callable {\n return_token_type_ids = false;\n\n _default_chat_template = `{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}`;\n\n /**\n * Create a new PreTrainedTokenizer instance.\n * @param {Object} tokenizerJSON The JSON of the tokenizer.\n * @param {Object} tokenizerConfig The config of the tokenizer.\n */\n constructor(tokenizerJSON, tokenizerConfig) {\n super();\n\n this._tokenizer_config = tokenizerConfig;\n\n // Construct parts of the tokenizer from the JSON\n this.normalizer = Normalizer.fromConfig(tokenizerJSON.normalizer);\n this.pre_tokenizer = PreTokenizer.fromConfig(tokenizerJSON.pre_tokenizer);\n this.model = TokenizerModel.fromConfig(tokenizerJSON.model, tokenizerConfig);\n this.post_processor = PostProcessor.fromConfig(tokenizerJSON.post_processor);\n this.decoder = Decoder.fromConfig(tokenizerJSON.decoder);\n\n // Add added_tokens to model\n this.special_tokens = [];\n this.all_special_ids = [];\n\n /** @type {AddedToken[]} */\n this.added_tokens = [];\n for (const addedToken of tokenizerJSON.added_tokens) {\n const token = new AddedToken(addedToken);\n this.added_tokens.push(token);\n\n this.model.tokens_to_ids.set(token.content, token.id);\n this.model.vocab[token.id] = token.content;\n\n if (token.special) {\n this.special_tokens.push(token.content);\n this.all_special_ids.push(token.id);\n }\n }\n\n // Update additional_special_tokens\n this.additional_special_tokens = tokenizerConfig.additional_special_tokens ?? [];\n this.special_tokens.push(...this.additional_special_tokens);\n this.special_tokens = [...new Set(this.special_tokens)]; // Remove duplicates\n\n if (this.decoder) {\n // Slight hack, but it prevents code duplication:\n this.decoder.added_tokens = this.added_tokens;\n\n // Another slight hack to add `end_of_word_suffix` (if present) to the decoder\n // This is needed for cases where BPE model and ByteLevel decoder are used\n // For more information, see https://github.com/xenova/transformers.js/issues/74\n // TODO: save this to the decoder when exporting?\n this.decoder.end_of_word_suffix = this.model.end_of_word_suffix;\n }\n\n\n this.added_tokens_regex = this.added_tokens.length > 0 ? new RegExp(\n this.added_tokens.map(x => `${x.lstrip ? '\\\\s*' : ''}(${escapeRegExp(x.content)})${x.rstrip ? '\\\\s*' : ''}`).join('|')\n ) : null;\n\n // Set mask token if present (otherwise will be undefined, which is fine)\n this.mask_token = this.getToken('mask_token');\n this.mask_token_id = this.model.tokens_to_ids.get(this.mask_token);\n\n this.pad_token = this.getToken('pad_token', 'eos_token');\n this.pad_token_id = this.model.tokens_to_ids.get(this.pad_token);\n\n this.sep_token = this.getToken('sep_token');\n this.sep_token_id = this.model.tokens_to_ids.get(this.sep_token);\n\n this.unk_token = this.getToken('unk_token');\n this.unk_token_id = this.model.tokens_to_ids.get(this.unk_token);\n\n this.model_max_length = tokenizerConfig.model_max_length;\n\n /** @type {boolean} Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). */\n this.remove_space = tokenizerConfig.remove_space;\n\n this.clean_up_tokenization_spaces = tokenizerConfig.clean_up_tokenization_spaces ?? true;\n this.do_lowercase_and_remove_accent = tokenizerConfig.do_lowercase_and_remove_accent ?? false;\n\n // TODO allow user to change this\n /** @type {'right'|'left'} */\n this.padding_side = 'right';\n\n this.legacy = false;\n\n this.chat_template = tokenizerConfig.chat_template ?? null;\n if (Array.isArray(this.chat_template)) {\n // Chat templates are stored as lists of dicts with fixed key names,\n // we reconstruct that into a single dict while loading them.\n const chat_template = Object.create(null);\n for (const { name, template } of this.chat_template) {\n if (typeof name !== 'string' || typeof template !== 'string') {\n throw new Error('Chat template must be a list of objects with \"name\" and \"template\" properties');\n }\n chat_template[name] = template;\n }\n this.chat_template = chat_template;\n }\n this._compiled_template_cache = new Map();\n }\n\n /**\n * Returns the value of the first matching key in the tokenizer config object.\n * @param {...string} keys One or more keys to search for in the tokenizer config object.\n * @returns {string|null} The value associated with the first matching key, or null if no match is found.\n * @throws {Error} If an object is found for a matching key and its __type property is not \"AddedToken\".\n */\n getToken(...keys) {\n for (const key of keys) {\n const item = this._tokenizer_config[key];\n\n if (!item) continue;\n\n if (typeof item === 'object') {\n if (item.__type === 'AddedToken') {\n return item.content;\n } else {\n throw Error(`Unknown token: ${item}`);\n }\n } else {\n return item;\n }\n }\n return null;\n }\n\n /**\n * Loads a pre-trained tokenizer from the given `pretrained_model_name_or_path`. \n * \n * @param {string} pretrained_model_name_or_path The path to the pre-trained tokenizer.\n * @param {PretrainedTokenizerOptions} options Additional options for loading the tokenizer.\n * \n * @throws {Error} Throws an error if the tokenizer.json or tokenizer_config.json files are not found in the `pretrained_model_name_or_path`.\n * @returns {Promise<PreTrainedTokenizer>} A new instance of the `PreTrainedTokenizer` class.\n */\n static async from_pretrained(pretrained_model_name_or_path, {\n progress_callback = null,\n config = null,\n cache_dir = null,\n local_files_only = false,\n revision = 'main',\n legacy = null,\n } = {}) {\n\n const info = await loadTokenizer(pretrained_model_name_or_path, {\n progress_callback,\n config,\n cache_dir,\n local_files_only,\n revision,\n legacy,\n })\n\n // @ts-ignore\n return new this(...info);\n }\n\n /**\n * @typedef {number[]|number[][]|Tensor} BatchEncodingItem\n * \n * @typedef {Object} BatchEncoding Holds the output of the tokenizer's call function.\n * @property {BatchEncodingItem} input_ids List of token ids to be fed to a model.\n * @property {BatchEncodingItem} attention_mask List of indices specifying which tokens should be attended to by the model.\n * @property {BatchEncodingItem} [token_type_ids] List of token type ids to be fed to a model.\n */\n\n /**\n * Encode/tokenize the given text(s).\n * @param {string|string[]} text The text to tokenize.\n * @param {Object} options An optional object containing the following properties:\n * @param {string|string[]} [options.text_pair=null] Optional second sequence to be encoded. If set, must be the same type as text.\n * @param {boolean|'max_length'} [options.padding=false] Whether to pad the input sequences.\n * @param {boolean} [options.add_special_tokens=true] Whether or not to add the special tokens associated with the corresponding model.\n * @param {boolean} [options.truncation=null] Whether to truncate the input sequences.\n * @param {number} [options.max_length=null] Maximum length of the returned list and optionally padding length.\n * @param {boolean} [options.return_tensor=true] Whether to return the results as Tensors or arrays.\n * @param {boolean} [options.return_token_type_ids=null] Whether to return the token type ids.\n * @returns {BatchEncoding} Object to be passed to the model.\n */\n _call(\n // Required positional arguments\n text,\n\n // Optional keyword arguments\n {\n text_pair = null,\n add_special_tokens = true,\n padding = false,\n truncation = null,\n max_length = null,\n return_tensor = true, // Different to HF\n return_token_type_ids = null,\n } = {},\n ) {\n\n const isBatched = Array.isArray(text);\n\n /** @type {EncodingSingle[]} */\n let encodedTokens;\n\n if (isBatched) {\n if (text.length === 0) {\n throw Error('text array must be non-empty')\n }\n\n if (text_pair !== null) {\n if (!Array.isArray(text_pair)) {\n throw Error('text_pair must also be an array')\n\n } else if (text.length !== text_pair.length) {\n throw Error('text and text_pair must have the same length')\n }\n\n encodedTokens = text.map(\n (t, i) => this._encode_plus(t, text_pair[i], { add_special_tokens, return_token_type_ids })\n )\n\n } else {\n encodedTokens = text.map(x => this._encode_plus(x, null, { add_special_tokens, return_token_type_ids }));\n }\n\n } else {\n if (text === null || text === undefined) {\n throw Error('text may not be null or undefined')\n }\n\n if (Array.isArray(text_pair)) {\n throw Error('When specifying `text_pair`, since `text` is a string, `text_pair` must also be a string (i.e., not an array).')\n }\n\n // For single input, we just wrap in an array, and then unwrap later.\n encodedTokens = [this._encode_plus(text, text_pair, { add_special_tokens, return_token_type_ids })];\n }\n // At this point, tokens is batched: [batch_size, tokens]\n // However, array may be jagged. So, we pad to max_length\n\n if (max_length === null) {\n if (padding === 'max_length') {\n max_length = this.model_max_length;\n } else {\n // Calculate max length from sequences\n max_length = max(encodedTokens.map(x => x.input_ids.length))[0];\n }\n } else {\n if (!truncation) {\n console.warn(`Truncation was not explicitly activated but \\`max_length\\` is provided a specific value, please use \\`truncation=true\\` to explicitly truncate examples to max length.`)\n }\n }\n\n // Ensure it is less than model max length\n max_length = Math.min(max_length, this.model_max_length)\n\n if (padding || truncation) {\n\n // Perform padding and/or truncation\n for (let i = 0; i < encodedTokens.length; ++i) {\n if (encodedTokens[i].input_ids.length === max_length) {\n continue;\n\n } else if (encodedTokens[i].input_ids.length > max_length) {\n // possibly truncate\n if (truncation) {\n truncateHelper(encodedTokens[i], max_length);\n }\n\n } else { // t.length < max_length\n // possibly pad\n if (padding) {\n padHelper(\n encodedTokens[i],\n max_length,\n key => key === 'input_ids' ? this.pad_token_id : 0,\n this.padding_side\n );\n }\n }\n }\n }\n\n const result = {};\n\n if (return_tensor) {\n if (!(padding && truncation)) {\n // Not, guaranteed that all items have same length, so\n // we perform additional check\n\n if (\n encodedTokens.some(x => {\n for (const key of Object.keys(x)) {\n if (x[key].length !== encodedTokens[0][key]?.length) {\n return true;\n }\n }\n return false;\n })\n ) {\n throw Error(\n \"Unable to create tensor, you should probably activate truncation and/or padding \" +\n \"with 'padding=true' and 'truncation=true' to have batched tensors with the same length.\"\n )\n }\n }\n\n // Now we actually convert to tensor\n // NOTE: In the same way as the python library, we return a batched tensor, regardless of\n // whether we have a single input or multiple inputs.\n const dims = [encodedTokens.length, encodedTokens[0].input_ids.length];\n\n for (const key of Object.keys(encodedTokens[0])) {\n result[key] = new Tensor('int64',\n BigInt64Array.from(encodedTokens.flatMap(x => x[key]).map(BigInt)),\n dims\n );\n }\n\n } else {\n for (const key of Object.keys(encodedTokens[0])) {\n result[key] = encodedTokens.map(x => x[key]);\n }\n\n // If not returning a tensor, we match the input type\n if (!isBatched) {\n // Input was not batched, so we unwrap\n for (const key of Object.keys(result)) {\n result[key] = result[key][0];\n }\n }\n }\n\n return /** @type {BatchEncoding} */(result);\n }\n\n /**\n * Encodes a single text using the preprocessor pipeline of the tokenizer.\n *\n * @param {string|null} text The text to encode.\n * @returns {string[]|null} The encoded tokens.\n */\n _encode_text(text) {\n if (text === null) return null;\n\n // Actual function which does encoding, for a single text\n // First, we take care of special tokens. Needed to avoid issues arising from\n // normalization and/or pretokenization (which may not preserve special tokens)\n const sections = this.added_tokens_regex ? text.split(this.added_tokens_regex).filter(x => x) : [text];\n\n const tokens = sections.map((x, section_index) => {\n const addedToken = this.added_tokens.find(t => t.content === x);\n if (addedToken !== undefined) {\n // Ignore added tokens\n return x\n } else {\n if (this.remove_space === true) {\n x = x.trim().split(/\\s+/).join(' ');\n }\n if (this.do_lowercase_and_remove_accent) {\n x = lowercase_and_remove_accent(x);\n }\n\n if (this.normalizer !== null) {\n x = this.normalizer(x);\n }\n\n // If, after normalization, this section is empty (e.g., trimming whitespace),\n // we return an empty array\n if (x.length === 0) {\n return [];\n }\n\n const sectionTokens = (this.pre_tokenizer !== null) ? this.pre_tokenizer(x, {\n section_index,\n }) : [x];\n\n const tokens = this.model(sectionTokens);\n\n return tokens;\n }\n }).flat();\n\n return tokens;\n }\n\n /**\n * Encodes a single text or a pair of texts using the model's tokenizer.\n *\n * @param {string} text The text to encode.\n * @param {string|null} text_pair The optional second text to encode.\n * @param {Object} options An optional object containing the following properties:\n * @param {boolean} [options.add_special_tokens=true] Whether or not to add the special tokens associated with the corresponding model.\n * @param {boolean} [options.return_token_type_ids=null] Whether to return token_type_ids.\n * @returns {EncodingSingle} An object containing the encoded text.\n * @private\n */\n _encode_plus(text, text_pair = null, {\n add_special_tokens = true,\n return_token_type_ids = null,\n } = {}) {\n // Function called by users to encode possibly multiple texts\n const tokens = this._encode_text(text);\n const tokens2 = this._encode_text(text_pair);\n\n const combinedTokens = this.post_processor\n ? this.post_processor(tokens, tokens2, { add_special_tokens })\n : { tokens: mergeArrays(tokens ?? [], tokens2 ?? []) };\n\n const input_ids = this.model.convert_tokens_to_ids(combinedTokens.tokens);\n\n const result = {\n input_ids,\n attention_mask: new Array(input_ids.length).fill(1),\n }\n if ((return_token_type_ids ?? this.return_token_type_ids) && combinedTokens.token_type_ids) {\n result.token_type_ids = combinedTokens.token_type_ids;\n }\n return result;\n }\n\n /**\n * Encodes a single text or a pair of texts using the model's tokenizer.\n *\n * @param {string} text The text to encode.\n * @param {string|null} text_pair The optional second text to encode.\n * @param {Object} options An optional object containing the following properties:\n * @param {boolean} [options.add_special_tokens=true] Whether or not to add the special tokens associated with the corresponding model.\n * @param {boolean} [options.return_token_type_ids=null] Whether to return token_type_ids.\n * @returns {number[]} An array of token IDs representing the encoded text(s).\n */\n encode(text, text_pair = null, {\n add_special_tokens = true,\n return_token_type_ids = null,\n } = {}) {\n const { input_ids } = this._encode_plus(text, text_pair, {\n add_special_tokens,\n return_token_type_ids,\n });\n return input_ids;\n }\n\n /**\n * Decode a batch of tokenized sequences.\n * @param {number[][]|Tensor} batch List/Tensor of tokenized input sequences.\n * @param {Object} decode_args (Optional) Object with decoding arguments.\n * @returns {string[]} List of decoded sequences.\n */\n batch_decode(batch, decode_args = {}) {\n if (batch instanceof Tensor) {\n batch = batch.tolist();\n }\n return batch.map(x => this.decode(x, decode_args));\n }\n\n /**\n * Decodes a sequence of token IDs back to a string.\n *\n * @param {number[]|Tensor} token_ids List/Tensor of token IDs to decode.\n * @param {Object} [decode_args={}]\n * @param {boolean} [decode_args.skip_special_tokens=false] If true, special tokens are removed from the output string.\n * @param {boolean} [decode_args.clean_up_tokenization_spaces=true] If true, spaces before punctuations and abbreviated forms are removed.\n *\n * @returns {string} The decoded string.\n * @throws {Error} If `token_ids` is not a non-empty array of integers.\n */\n decode(\n token_ids,\n decode_args = {},\n ) {\n if (token_ids instanceof Tensor) {\n token_ids = prepareTensorForDecode(token_ids);\n }\n\n if (!Array.isArray(token_ids) || token_ids.length === 0 || !isIntegralNumber(token_ids[0])) {\n throw Error(\"token_ids must be a non-empty array of integers.\");\n }\n\n return this.decode_single(token_ids, decode_args)\n }\n\n /**\n * Decode a single list of token ids to a string.\n * @param {number[]} token_ids List of token ids to decode\n * @param {Object} decode_args Optional arguments for decoding\n * @param {boolean} [decode_args.skip_special_tokens=false] Whether to skip special tokens during decoding\n * @param {boolean} [decode_args.clean_up_tokenization_spaces=null] Whether to clean up tokenization spaces during decoding.\n * If null, the value is set to `this.decoder.cleanup` if it exists, falling back to `this.clean_up_tokenization_spaces` if it exists, falling back to `true`.\n * @returns {string} The decoded string\n */\n decode_single(\n token_ids,\n {\n skip_special_tokens = false,\n clean_up_tokenization_spaces = null,\n }\n ) {\n let tokens = this.model.convert_ids_to_tokens(token_ids);\n if (skip_special_tokens) {\n tokens = tokens.filter(x => !this.special_tokens.includes(x));\n }\n\n // If `this.decoder` is null, we just join tokens with a space:\n // https://github.com/huggingface/tokenizers/blob/8edec536a737cb04494b454805be16c020abb14f/tokenizers/src/tokenizer/mod.rs#L835\n /** @type {string} */\n let decoded = this.decoder ? this.decoder(tokens) : tokens.join(' ');\n\n // Slight hack, but prevents having to pass `skip_special_tokens` to\n // each call to `decode`, which would lead to code duplication.\n if (this.decoder && this.decoder.end_of_word_suffix) {\n decoded = decoded.replaceAll(this.decoder.end_of_word_suffix, ' ');\n if (skip_special_tokens) {\n decoded = decoded.trim();\n }\n }\n\n if (clean_up_tokenization_spaces ?? this.clean_up_tokenization_spaces) {\n decoded = clean_up_tokenization(decoded);\n }\n\n return decoded;\n }\n\n get default_chat_template() {\n if (!this._warned_about_chat_template) {\n console.warn(\n \"No chat template is defined for this tokenizer - using a default chat template \" +\n \"that implements the ChatML format. If the default is not appropriate for \" +\n \"your model, please set `tokenizer.chat_template` to an appropriate template. \" +\n \"See https://huggingface.co/docs/transformers/main/chat_templating for more information.\"\n )\n this._warned_about_chat_template = true; // TODO move to logger.warning_once()\n }\n\n return this._default_chat_template;\n }\n\n /**\n * Converts a list of message objects with `\"role\"` and `\"content\"` keys to a list of token\n * ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to\n * determine the format and control tokens to use when converting. When chat_template is None, it will fall back\n * to the default_chat_template specified at the class level.\n * \n * See [here](https://huggingface.co/docs/transformers/chat_templating) for more information.\n * \n * **Example:** Applying a chat template to a conversation.\n * \n * ```javascript\n * import { AutoTokenizer } from \"@xenova/transformers\";\n * \n * const tokenizer = await AutoTokenizer.from_pretrained(\"Xenova/mistral-tokenizer-v1\");\n * \n * const chat = [\n * { \"role\": \"user\", \"content\": \"Hello, how are you?\" },\n * { \"role\": \"assistant\", \"content\": \"I'm doing great. How can I help you today?\" },\n * { \"role\": \"user\", \"content\": \"I'd like to show off how chat templating works!\" },\n * ]\n * \n * const text = tokenizer.apply_chat_template(chat, { tokenize: false });\n * // \"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]\"\n * \n * const input_ids = tokenizer.apply_chat_template(chat, { tokenize: true, return_tensor: false });\n * // [1, 733, 16289, 28793, 22557, 28725, 910, 460, 368, 28804, 733, 28748, 16289, 28793, 28737, 28742, 28719, 2548, 1598, 28723, 1602, 541, 315, 1316, 368, 3154, 28804, 2, 28705, 733, 16289, 28793, 315, 28742, 28715, 737, 298, 1347, 805, 910, 10706, 5752, 1077, 3791, 28808, 733, 28748, 16289, 28793]\n * ```\n * \n * @param {Message[]} conversation A list of message objects with `\"role\"` and `\"content\"` keys.\n * @param {Object} options An optional object containing the following properties:\n * @param {string} [options.chat_template=null] A Jinja template to use for this conversion. If\n * this is not passed, the model's default chat template will be used instead.\n * @param {boolean} [options.add_generation_prompt=false] Whether to end the prompt with the token(s) that indicate\n * the start of an assistant message. This is useful when you want to generate a response from the model.\n * Note that this argument will be passed to the chat template, and so it must be supported in the\n * template for this argument to have any effect.\n * @param {boolean} [options.tokenize=true] Whether to tokenize the output. If false, the output will be a string.\n * @param {boolean} [options.padding=false] Whether to pad sequences to the maximum length. Has no effect if tokenize is false.\n * @param {boolean} [options.truncation=false] Whether to truncate sequences to the maximum length. Has no effect if tokenize is false.\n * @param {number} [options.max_length=null] Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is false.\n * If not specified, the tokenizer's `max_length` attribute will be used as a default.\n * @param {boolean} [options.return_tensor=true] Whether to return the output as a Tensor or an Array. Has no effect if tokenize is false.\n * @param {Object} [options.tokenizer_kwargs={}] Additional options to pass to the tokenizer.\n * @returns {string | Tensor | number[]| number[][]} The tokenized output.\n */\n apply_chat_template(conversation, {\n chat_template = null,\n add_generation_prompt = false,\n tokenize = true,\n padding = false,\n truncation = false,\n max_length = null,\n return_tensor = true,\n tokenizer_kwargs = {},\n ...kwargs\n } = {}) {\n\n // First, handle the cases when the model has a dict of multiple templates\n if (\n (this.chat_template && typeof this.chat_template === 'object') ||\n (this.chat_template === null && this.default_chat_template && typeof this.default_chat_template === 'object')\n ) {\n const template_dict = this.chat_template ?? this.default_chat_template; // Guaranteed to be a non-null object\n\n if (chat_template !== null && Object.hasOwn(template_dict, chat_template)) {\n // The user can pass the name of a template to the chat template argument instead of an entire template\n chat_template = template_dict[chat_template];\n } else if (chat_template === null && 'default' in template_dict) {\n chat_template = template_dict['default'];\n } else if (chat_template === null) {\n throw Error(\n `This model has multiple chat templates with no default specified! Please either pass a chat ` +\n `template or the name of the template you wish to use to the 'chat_template' argument. Available ` +\n `template names are ${Object.keys(template_dict).sort()}.`\n )\n }\n } else {\n // These are the cases when the model has a single template\n // priority: `chat_template` argument > `tokenizer.chat_template` > `tokenizer.default_chat_template\n chat_template ??= this.chat_template ?? this.default_chat_template;\n }\n if (typeof chat_template !== 'string') {\n throw Error(`chat_template must be a string, but got ${typeof chat_template}`);\n }\n\n // Compilation function uses a cache to avoid recompiling the same template\n let compiledTemplate = this._compiled_template_cache.get(chat_template);\n if (compiledTemplate === undefined) {\n compiledTemplate = new Template(chat_template);\n this._compiled_template_cache.set(chat_template, compiledTemplate);\n }\n\n const special_tokens_map = Object.create(null);\n for (const key of SPECIAL_TOKEN_ATTRIBUTES) {\n const value = this.getToken(key);\n if (value) {\n special_tokens_map[key] = value;\n }\n }\n\n const rendered = compiledTemplate.render({\n messages: conversation,\n add_generation_prompt: add_generation_prompt,\n\n ...special_tokens_map,\n ...kwargs,\n });\n\n if (tokenize) {\n return this._call(rendered, {\n add_special_tokens: false,\n padding,\n truncation,\n max_length,\n return_tensor,\n ...tokenizer_kwargs,\n }).input_ids;\n }\n\n return rendered;\n }\n}\n\n/**\n * BertTokenizer is a class used to tokenize text for BERT models.\n * @extends PreTrainedTokenizer\n */\nexport class BertTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\n/**\n * Albert tokenizer\n * @extends PreTrainedTokenizer\n */\nexport class AlbertTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class MobileBertTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class SqueezeBertTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class DebertaTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class DebertaV2Tokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class HerbertTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class ConvBertTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class RoFormerTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\nexport class DistilBertTokenizer extends PreTrainedTokenizer { }\nexport class CamembertTokenizer extends PreTrainedTokenizer { }\nexport class XLMTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n console.warn('WARNING: `XLMTokenizer` is not yet supported by Hugging Face\\'s \"fast\" tokenizers library. Therefore, you may experience slightly inaccurate results.')\n }\n}\nexport class ElectraTokenizer extends PreTrainedTokenizer {\n return_token_type_ids = true;\n}\n\nexport class T5Tokenizer extends PreTrainedTokenizer { }\nexport class GPT2Tokenizer extends PreTrainedTokenizer {\n _default_chat_template = `{% for message in messages %}\" \"{{ message.content }}{{ eos_token }}\" \"{% endfor %}`\n}\nexport class BartTokenizer extends PreTrainedTokenizer { }\nexport class MBartTokenizer extends PreTrainedTokenizer {\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n\n this.languageRegex = /^[a-z]{2}_[A-Z]{2}$/;\n this.language_codes = this.special_tokens.filter(x => this.languageRegex.test(x));\n this.lang_to_token = x => x; // Identity function\n }\n\n /**\n * Helper function to build translation inputs for an `MBartTokenizer`.\n * @param {string|string[]} raw_inputs The text to tokenize.\n * @param {Object} tokenizer_options Options to be sent to the tokenizer\n * @param {Object} generate_kwargs Generation options.\n * @returns {Object} Object to be passed to the model.\n */\n _build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs) {\n return _build_translation_inputs(this, raw_inputs, tokenizer_options, generate_kwargs);\n }\n}\nexport class MBart50Tokenizer extends MBartTokenizer { } // NOTE: extends MBartTokenizer\n\nexport class RobertaTokenizer extends PreTrainedTokenizer { }\n\nexport class BloomTokenizer extends GPT2Tokenizer { // NOTE: `GPT2Tokenizer` to get the correct chat template\n\n constructor(tokenizerJSON, tokenizerConfig) {\n // Override the default (invalid) regex of the pretokenizer.\n // For more information, see https://github.com/xenova/transformers.js/issues/94\n const splitChars = '.,!?\\u2026\\u3002\\uff0c\\u3001\\u0964\\u06d4\\u060c';\n const patternObject = tokenizerJSON.pre_tokenizer?.pretokenizers[0]?.pattern;\n if (patternObject && patternObject.Regex === ` ?[^(\\\\s|[${splitChars}])]+`) {\n patternObject.Regex = ` ?[^\\\\s${splitChars}]+`;\n }\n super(tokenizerJSON, tokenizerConfig);\n }\n}\n\nconst SPIECE_UNDERLINE = \"▁\";\n\nexport class LlamaTokenizer extends PreTrainedTokenizer {\n _default_chat_template = `{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}`\n\n DEFAULT_SYSTEM_PROMPT =\n \"You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your \" +\n \"answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure \" +\n \"that your responses are socially unbiased and positive in nature.\\n\\n\" +\n \"If a question does not make any sense, or is not factually coherent, explain why instead of answering something not \" +\n \"correct. If you don't know the answer to a question, please don't share false information.\"\n\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n this.use_default_system_prompt = tokenizerConfig.use_default_system_prompt ?? false;\n\n this.legacy = tokenizerConfig.legacy ?? true;\n if (!this.legacy) {\n // See https://github.com/huggingface/transformers/pull/24565 for more information\n this.normalizer = null;\n this.pre_tokenizer = new MetaspacePreTokenizer({\n replacement: SPIECE_UNDERLINE,\n add_prefix_space: true,\n prepend_scheme: \"first\",\n });\n }\n }\n\n /**\n * Helper function to handle legacy encoding of SPM tokenizers.\n * Adapted from https://github.com/huggingface/transformers/blob/e6dcf8abd6f65bb4b6dfc1831b20d9ba49ce00e2/src/transformers/models/t5/tokenization_t5.py#L374-L387\n * @param {string} text The text to encode.\n * @returns {string[]} The encoded tokens.\n */\n _encode_text(text) {\n if (text === null) return null;\n\n if (this.legacy || text.length === 0) {\n return super._encode_text(text);\n }\n\n let tokens = super._encode_text(SPIECE_UNDERLINE + text.replaceAll(SPIECE_UNDERLINE, \" \"));\n if (tokens.length > 1 && tokens[0] === SPIECE_UNDERLINE && this.special_tokens.includes(tokens[1])) {\n tokens = tokens.slice(1);\n }\n return tokens;\n }\n\n get default_chat_template() {\n return super.default_chat_template\n .replaceAll('USE_DEFAULT_PROMPT', this.use_default_system_prompt ? 'true' : 'false')\n .replaceAll('DEFAULT_SYSTEM_MESSAGE', this.DEFAULT_SYSTEM_PROMPT.replaceAll(\"\\n\", \"\\\\n\").replaceAll(\"'\", \"\\\\'\"));\n }\n}\nexport class CodeLlamaTokenizer extends LlamaTokenizer { } // NOTE: `LlamaTokenizer` to get the correct chat template\n\nexport class XLMRobertaTokenizer extends PreTrainedTokenizer { }\nexport class MPNetTokenizer extends PreTrainedTokenizer { }\n\nexport class FalconTokenizer extends PreTrainedTokenizer { }\n\nexport class GPTNeoXTokenizer extends PreTrainedTokenizer { }\n\nexport class EsmTokenizer extends PreTrainedTokenizer { }\n\nexport class Qwen2Tokenizer extends PreTrainedTokenizer { }\n\nexport class GemmaTokenizer extends PreTrainedTokenizer {\n _default_chat_template = \"{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\\n' + message['content'] | trim + '<end_of_turn>\\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\\n'}}{% endif %}\"\n}\n\nexport class Grok1Tokenizer extends PreTrainedTokenizer { }\n\n/**\n * Helper function to build translation inputs for an `NllbTokenizer` or `M2M100Tokenizer`.\n * @param {PreTrainedTokenizer} self The tokenizer instance.\n * @param {string|string[]} raw_inputs The text to tokenize.\n * @param {Object} tokenizer_options Options to be sent to the tokenizer\n * @param {Object} generate_kwargs Generation options.\n * @returns {Object} Object to be passed to the model.\n * @private\n */\nfunction _build_translation_inputs(self, raw_inputs, tokenizer_options, generate_kwargs) {\n if (!('language_codes' in self) || !Array.isArray(self.language_codes)) {\n throw new Error('Tokenizer must have `language_codes` attribute set and it should be an array of language ids.')\n }\n if (!('languageRegex' in self) || !(self.languageRegex instanceof RegExp)) {\n throw new Error('Tokenizer must have `languageRegex` attribute set and it should be a regular expression.')\n }\n if (!('lang_to_token' in self) || typeof self.lang_to_token !== 'function') {\n throw new Error('Tokenizer must have `lang_to_token` attribute set and it should be a function.')\n }\n const src_lang_token = generate_kwargs.src_lang;\n const tgt_lang_token = generate_kwargs.tgt_lang;\n\n // Check that the target language is valid:\n if (!self.language_codes.includes(tgt_lang_token)) {\n throw new Error(`Target language code \"${tgt_lang_token}\" is not valid. Must be one of: {${self.language_codes.join(', ')}}`);\n }\n\n // Allow `src_lang` to be optional. If not set, we'll use the tokenizer's default.\n if (src_lang_token !== undefined) {\n // Check that the source language is valid:\n if (!self.language_codes.includes(src_lang_token)) {\n throw new Error(`Source language code \"${src_lang_token}\" is not valid. Must be one of: {${self.language_codes.join(', ')}}`);\n }\n\n // In the same way as the Python library, we override the post-processor\n // to force the source language to be first:\n for (const item of self.post_processor.config.single) {\n if ('SpecialToken' in item && self.languageRegex.test(item.SpecialToken.id)) {\n item.SpecialToken.id = self.lang_to_token(src_lang_token);\n break;\n }\n }\n // TODO: Do the same for pair?\n }\n\n // Override the `forced_bos_token_id` to force the correct language\n generate_kwargs.forced_bos_token_id = self.model.convert_tokens_to_ids([self.lang_to_token(tgt_lang_token)])[0];\n\n return self._call(raw_inputs, tokenizer_options);\n}\n\n/**\n * The NllbTokenizer class is used to tokenize text for NLLB (\"No Language Left Behind\") models.\n * \n * No Language Left Behind (NLLB) is a first-of-its-kind, AI breakthrough project\n * that open-sources models capable of delivering high-quality translations directly\n * between any pair of 200+ languages — including low-resource languages like Asturian,\n * Luganda, Urdu and more. It aims to help people communicate with anyone, anywhere,\n * regardless of their language preferences. For more information, check out their\n * [paper](https://arxiv.org/abs/2207.04672).\n * \n * For a list of supported languages (along with their language codes),\n * @see {@link https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200}\n */\nexport class NllbTokenizer extends PreTrainedTokenizer {\n\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n\n this.languageRegex = /^[a-z]{3}_[A-Z][a-z]{3}$/;\n this.language_codes = this.special_tokens.filter(x => this.languageRegex.test(x));\n this.lang_to_token = x => x; // Identity function\n }\n\n /**\n * Helper function to build translation inputs for an `NllbTokenizer`.\n * @param {string|string[]} raw_inputs The text to tokenize.\n * @param {Object} tokenizer_options Options to be sent to the tokenizer\n * @param {Object} generate_kwargs Generation options.\n * @returns {Object} Object to be passed to the model.\n */\n _build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs) {\n return _build_translation_inputs(this, raw_inputs, tokenizer_options, generate_kwargs);\n }\n}\n\n/**\n * The M2M100Tokenizer class is used to tokenize text for M2M100 (\"Many-to-Many\") models.\n * \n * M2M100 is a multilingual encoder-decoder (seq-to-seq) model trained for Many-to-Many\n * multilingual translation. It was introduced in this [paper](https://arxiv.org/abs/2010.11125)\n * and first released in [this](https://github.com/pytorch/fairseq/tree/master/examples/m2m_100) repository.\n * \n * For a list of supported languages (along with their language codes),\n * @see {@link https://huggingface.co/facebook/m2m100_418M#languages-covered}\n */\nexport class M2M100Tokenizer extends PreTrainedTokenizer {\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n\n this.languageRegex = /^__[a-z]{2,3}__$/;\n this.language_codes = this.special_tokens\n .filter(x => this.languageRegex.test(x))\n .map(x => x.slice(2, -2));\n this.lang_to_token = x => `__${x}__`;\n }\n\n /**\n * Helper function to build translation inputs for an `M2M100Tokenizer`.\n * @param {string|string[]} raw_inputs The text to tokenize.\n * @param {Object} tokenizer_options Options to be sent to the tokenizer\n * @param {Object} generate_kwargs Generation options.\n * @returns {Object} Object to be passed to the model.\n */\n _build_translation_inputs(raw_inputs, tokenizer_options, generate_kwargs) {\n return _build_translation_inputs(this, raw_inputs, tokenizer_options, generate_kwargs);\n }\n}\n\n\nconst WHISPER_LANGUAGES = [\n [\"en\", \"english\"],\n [\"zh\", \"chinese\"],\n [\"de\", \"german\"],\n [\"es\", \"spanish\"],\n [\"ru\", \"russian\"],\n [\"ko\", \"korean\"],\n [\"fr\", \"french\"],\n [\"ja\", \"japanese\"],\n [\"pt\", \"portuguese\"],\n [\"tr\", \"turkish\"],\n [\"pl\", \"polish\"],\n [\"ca\", \"catalan\"],\n [\"nl\", \"dutch\"],\n [\"ar\", \"arabic\"],\n [\"sv\", \"swedish\"],\n [\"it\", \"italian\"],\n [\"id\", \"indonesian\"],\n [\"hi\", \"hindi\"],\n [\"fi\", \"finnish\"],\n [\"vi\", \"vietnamese\"],\n [\"he\", \"hebrew\"],\n [\"uk\", \"ukrainian\"],\n [\"el\", \"greek\"],\n [\"ms\", \"malay\"],\n [\"cs\", \"czech\"],\n [\"ro\", \"romanian\"],\n [\"da\", \"danish\"],\n [\"hu\", \"hungarian\"],\n [\"ta\", \"tamil\"],\n [\"no\", \"norwegian\"],\n [\"th\", \"thai\"],\n [\"ur\", \"urdu\"],\n [\"hr\", \"croatian\"],\n [\"bg\", \"bulgarian\"],\n [\"lt\", \"lithuanian\"],\n [\"la\", \"latin\"],\n [\"mi\", \"maori\"],\n [\"ml\", \"malayalam\"],\n [\"cy\", \"welsh\"],\n [\"sk\", \"slovak\"],\n [\"te\", \"telugu\"],\n [\"fa\", \"persian\"],\n [\"lv\", \"latvian\"],\n [\"bn\", \"bengali\"],\n [\"sr\", \"serbian\"],\n [\"az\", \"azerbaijani\"],\n [\"sl\", \"slovenian\"],\n [\"kn\", \"kannada\"],\n [\"et\", \"estonian\"],\n [\"mk\", \"macedonian\"],\n [\"br\", \"breton\"],\n [\"eu\", \"basque\"],\n [\"is\", \"icelandic\"],\n [\"hy\", \"armenian\"],\n [\"ne\", \"nepali\"],\n [\"mn\", \"mongolian\"],\n [\"bs\", \"bosnian\"],\n [\"kk\", \"kazakh\"],\n [\"sq\", \"albanian\"],\n [\"sw\", \"swahili\"],\n [\"gl\", \"galician\"],\n [\"mr\", \"marathi\"],\n [\"pa\", \"punjabi\"],\n [\"si\", \"sinhala\"],\n [\"km\", \"khmer\"],\n [\"sn\", \"shona\"],\n [\"yo\", \"yoruba\"],\n [\"so\", \"somali\"],\n [\"af\", \"afrikaans\"],\n [\"oc\", \"occitan\"],\n [\"ka\", \"georgian\"],\n [\"be\", \"belarusian\"],\n [\"tg\", \"tajik\"],\n [\"sd\", \"sindhi\"],\n [\"gu\", \"gujarati\"],\n [\"am\", \"amharic\"],\n [\"yi\", \"yiddish\"],\n [\"lo\", \"lao\"],\n [\"uz\", \"uzbek\"],\n [\"fo\", \"faroese\"],\n [\"ht\", \"haitian creole\"],\n [\"ps\", \"pashto\"],\n [\"tk\", \"turkmen\"],\n [\"nn\", \"nynorsk\"],\n [\"mt\", \"maltese\"],\n [\"sa\", \"sanskrit\"],\n [\"lb\", \"luxembourgish\"],\n [\"my\", \"myanmar\"],\n [\"bo\", \"tibetan\"],\n [\"tl\", \"tagalog\"],\n [\"mg\", \"malagasy\"],\n [\"as\", \"assamese\"],\n [\"tt\", \"tatar\"],\n [\"haw\", \"hawaiian\"],\n [\"ln\", \"lingala\"],\n [\"ha\", \"hausa\"],\n [\"ba\", \"bashkir\"],\n [\"jw\", \"javanese\"],\n [\"su\", \"sundanese\"],\n]\n\n// @ts-ignore\nconst WHISPER_LANGUAGE_MAPPING = new Map(WHISPER_LANGUAGES);\n// @ts-ignore\nconst WHISPER_TO_LANGUAGE_CODE_MAPPING = new Map([\n ...WHISPER_LANGUAGES.map(([k, v]) => [v, k]),\n ...[\n [\"burmese\", \"my\"],\n [\"valencian\", \"ca\"],\n [\"flemish\", \"nl\"],\n [\"haitian\", \"ht\"],\n [\"letzeburgesch\", \"lb\"],\n [\"pushto\", \"ps\"],\n [\"panjabi\", \"pa\"],\n [\"moldavian\", \"ro\"],\n [\"moldovan\", \"ro\"],\n [\"sinhalese\", \"si\"],\n [\"castilian\", \"es\"],\n ]\n]);\n\n/**\n * WhisperTokenizer tokenizer\n * @extends PreTrainedTokenizer\n */\nexport class WhisperTokenizer extends PreTrainedTokenizer {\n _default_chat_template = `{% for message in messages %}\" \"{{ message.content }}{{ eos_token }}\" \"{% endfor %}`;\n\n /**\n * Decodes automatic speech recognition (ASR) sequences.\n * @param {Array<{tokens: number[], token_timestamps?: number[], stride: number[]}>} sequences The sequences to decode.\n * @param {Object} options The options to use for decoding.\n * @returns {Array<string|{chunks?: undefined|Array<{language: string|null, timestamp: Array<number|null>, text: string}>}>} The decoded sequences.\n */\n _decode_asr(sequences, {\n return_timestamps = false,\n return_language = false,\n time_precision = null,\n force_full_sequences = true\n } = {}) {\n // Set force_full_sequences=false if you want streaming\n // TODO add support for `return_language`\n\n // Internal method meant to only be used by asr pipeline.\n // Handles all the little quirks specific to whisper to handle\n // the various options not allowed in other seq2seq models\n\n // =========== Overview ============\n // - iterate over all outputs\n // - all tokens within output\n // - Each token can be\n // - language token\n // - special token\n // - timestamp token\n // - text token\n // - We accumulate the text tokens.\n // - We split on end timestamps\n // - Lots of complexity comes from stride and timestamps\n\n if (time_precision === null) {\n throw Error(\"Must specify time_precision\")\n }\n let last_language = null;\n\n const returnWordTimestamps = return_timestamps === \"word\";\n\n function new_chunk() {\n return { \"language\": last_language, \"timestamp\": [null, null], \"text\": \"\" };\n }\n\n // Welcome to the state machine!\n const chunks = [];\n let chunk = new_chunk();\n let time_offset = 0.0;\n const timestamp_begin = this.model.convert_tokens_to_ids([\"<|notimestamps|>\"])[0] + 1;\n\n let previous_tokens = [];\n let previous_token_timestamps = [];\n\n let skip = false;\n let right_stride_start = null;\n\n\n const all_special_ids = new Set(this.all_special_ids);\n\n for (const output of sequences) {\n // NOTE: python version has batches, so it uses [0]\n const token_ids = output.tokens;\n const token_timestamps = returnWordTimestamps ? output.token_timestamps : null;\n\n // These keep track of timestamps within strides, which need\n // to be skipped and resolve all tokens in a single chunk.\n let last_timestamp = null;\n let first_timestamp = timestamp_begin;\n\n if (\"stride\" in output) {\n const [chunk_len, stride_left, stride_right] = output.stride;\n\n // Offset the timings to account for the other `model_outputs`.\n time_offset -= stride_left;\n right_stride_start = chunk_len - stride_right;\n\n // Keeping track of timestamps within strides\n // We're going to NOT split on those, and delay until we're\n // out of BOTH stride. Otherwise lots of issues occur and\n // corner cases\n if (stride_left) {\n first_timestamp = stride_left / time_precision + timestamp_begin;\n }\n\n if (stride_right) {\n for (let i = token_ids.length - 1; i >= 0; --i) {\n const token = token_ids[i];\n if (token >= timestamp_begin) {\n // There can be several token in the right stride\n // But the last one is ALWAYS going to be skipped\n if (last_timestamp !== null && (token - timestamp_begin) * time_precision < right_stride_start) {\n break;\n }\n last_timestamp = token;\n }\n }\n }\n }\n\n let current_tokens = [];\n let current_token_timestamps = [];\n\n // - all tokens within output\n for (let i = 0; i < token_ids.length; ++i) {\n const token = token_ids[i];\n // 4 possible states for each token\n // - 1/ Language code\n // - 2/ all other special tokens (which we ignore)\n // - 3/ Timestamp\n // - 4/ Regular text\n\n if (all_special_ids.has(token)) {\n const text = this.decode([token]);\n const language = WHISPER_LANGUAGE_MAPPING.get(text.slice(2, -2));\n\n if (language !== undefined) {\n // 1/ Indeed some language\n // TODO Handle when language is different from the previous\n // one, and we cannot use timestamped tokens to create chunks\n if (last_language !== null && language !== last_language && !return_timestamps) {\n previous_tokens.push(current_tokens);\n const resolved_tokens = this.findLongestCommonSequence(previous_tokens)[0];\n const resolved_text = this.decode(resolved_tokens);\n chunk.text = resolved_text;\n chunks.push(chunk);\n\n // Flush all our temporary context\n previous_tokens = [];\n current_tokens = [];\n chunk = new_chunk();\n }\n\n last_language = chunk.language = language;\n } else {\n // 2/ This is a regular special token, ignoring it\n }\n } else if (token >= timestamp_begin) {\n // 3/ Timestamp token\n const time = (token - timestamp_begin) * time_precision + time_offset;\n const rounded_time = round(time, 2);\n\n if (last_timestamp !== null && token >= last_timestamp) {\n // Whisper outputted a timestamp token, but it falls within\n // our stride, so we're going to skip it for the time being\n // and resolve this later\n // Skip is necessary because timestamp tokens always come\n // by pair, so we need to skip the next one too (which would mark the start of another chunk).\n skip = true;\n } else if (skip || (previous_tokens.length > 0 && token < first_timestamp)) {\n skip = false;\n } else if (chunk.timestamp[0] === null) {\n chunk.timestamp[0] = rounded_time;\n } else {\n // This is the end of the timestamp chunk\n if (rounded_time === chunk.timestamp[0]) {\n // This is a bug in timestamp token output\n // where we're taking the duplicate token\n // as a stop where it should be a start.\n // This is an issue in the underlying model output\n // Let's just skip it so it becomes de-factor a start agin\n } else {\n chunk.timestamp[1] = rounded_time;\n\n // Handling merges\n previous_tokens.push(current_tokens)\n\n if (returnWordTimestamps) {\n previous_token_timestamps.push(current_token_timestamps);\n }\n const [resolved_tokens, resolved_token_timestamps] = this.findLongestCommonSequence(\n previous_tokens, previous_token_timestamps\n )\n\n const resolved_text = this.decode(resolved_tokens)\n chunk.text = resolved_text\n\n if (returnWordTimestamps) {\n chunk.words = this.collateWordTimestamps(\n resolved_tokens, resolved_token_timestamps, last_language,\n )\n }\n\n chunks.push(chunk)\n\n // Flush all our temporary context\n previous_tokens = []\n current_tokens = []\n previous_token_timestamps = []\n current_token_timestamps = []\n chunk = new_chunk()\n }\n }\n\n } else {\n // 4/ Regular token\n // We just append to the list of all tokens so we can handle\n // merges later and decode into text.\n current_tokens.push(token)\n\n if (returnWordTimestamps) {\n let start_time = round(token_timestamps[i] + time_offset, 2);\n\n let end_time;\n if (i + 1 < token_timestamps.length) {\n end_time = round(token_timestamps[i + 1] + time_offset, 2);\n } else {\n // should never happen\n end_time = null;\n }\n current_token_timestamps.push([start_time, end_time]);\n }\n\n }\n }\n\n if ('stride' in output) {\n const [chunk_len, stride_left, stride_right] = output.stride;\n time_offset += chunk_len - stride_right\n }\n\n // Leftover tokens\n if (current_tokens.length > 0) {\n previous_tokens.push(current_tokens)\n if (returnWordTimestamps) {\n previous_token_timestamps.push(current_token_timestamps);\n }\n } else if (previous_tokens.every(p => p.length === 0)) {\n // Flushing previous tokens (END)\"\n chunk = new_chunk()\n previous_tokens = []\n current_tokens = []\n previous_token_timestamps = [];\n current_token_timestamps = [];\n }\n\n }\n\n if (previous_tokens.length > 0) {\n if (force_full_sequences && return_timestamps) {\n // Last token should always be timestamps, so there shouldn't be\n // leftover\n throw new Error(\n \"Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. \" +\n \"Also make sure WhisperTimeStampLogitsProcessor was used during generation.\"\n );\n }\n\n // Happens when we don't use timestamps\n const [resolved_tokens, resolved_token_timestamps] = this.findLongestCommonSequence(previous_tokens, previous_token_timestamps);\n\n // Flushing previous tokens (FINAL)\n const resolved_text = this.decode(resolved_tokens);\n chunk.text = resolved_text;\n if (returnWordTimestamps) {\n chunk.words = this.collateWordTimestamps(\n resolved_tokens, resolved_token_timestamps, last_language,\n )\n }\n chunks.push(chunk);\n }\n\n let optional = Object.create(null);\n\n // Preparing and cleaning up the pipeline output\n const full_text = chunks.map(chunk => chunk.text).join('');\n if (return_timestamps || return_language) {\n for (let i = 0; i < chunks.length; ++i) {\n const chunk = chunks[i];\n if (!return_timestamps) {\n delete chunk[\"timestamp\"];\n }\n\n if (!return_language) {\n delete chunk[\"language\"];\n }\n }\n if (returnWordTimestamps) {\n const new_chunks = [];\n for (const chunk of chunks) {\n for (const word of chunk.words) {\n new_chunks.push(word);\n }\n }\n optional = { \"chunks\": new_chunks };\n } else {\n optional = { \"chunks\": chunks };\n }\n }\n return [full_text, optional];\n\n }\n\n /**\n * Finds the longest common sequence among the provided sequences.\n * @param {number[][]} sequences An array of sequences of token ids to compare.\n * @returns {number[][]} The longest common sequence found.\n * @throws {Error} If there is a bug within the function.\n * @private\n */\n findLongestCommonSequence(sequences, token_timestamp_sequences = null) {\n // It would be much harder to do O(n) because of fault tolerance.\n // We actually have a really good property which is that the total sequence\n // MUST be those subsequences in order.\n // If token_timestamp_sequences is provided, will split those sequences in\n // exactly the same way.\n let leftSequence = sequences[0];\n let leftLength = leftSequence.length;\n let totalSequence = [];\n\n const use_token_timestamp_sequences = Array.isArray(token_timestamp_sequences) && token_timestamp_sequences.length > 0;\n let total_token_timestamp_sequence = use_token_timestamp_sequences ? [] : null;\n let left_token_timestamp_sequence = use_token_timestamp_sequences ? token_timestamp_sequences[0] : null;\n for (let i = 1; i < sequences.length; ++i) {\n const rightSequence = sequences[i];\n let max = 0.0;\n let maxIndices = [leftLength, leftLength, 0, 0];\n // Here we're sliding matches\n // [a, b, c, d]\n // [c, d, f]\n // = [c] == [d]\n\n // [a, b, c, d]\n // [c, d, f]\n // = [c, d] == [c, d]\n\n\n // [a, b, c, d]\n // [c, d, f]\n\n // = [b, c, d] == [c, d, f]\n\n // [a, b, c, d]\n // [c, d, f]\n\n // [a, b, c] == [c, d, f]\n\n // [a, b, c, d]\n // [d, f]\n\n // [a, b] == [d, f]\n\n // [a, b, c, d]\n // [f]\n\n // [a] == [f]\n\n const rightLength = rightSequence.length;\n for (let j = 1; j < leftLength + rightLength; ++j) {\n const eps = j / 10000.0;\n const leftStart = Math.max(0, leftLength - j);\n const leftStop = Math.min(leftLength, leftLength + rightLength - j);\n const left = leftSequence.slice(leftStart, leftStop);\n const rightStart = Math.max(0, j - leftLength);\n const rightStop = Math.min(rightLength, j);\n const right = rightSequence.slice(rightStart, rightStop);\n if (left.length !== right.length) {\n throw new Error(\"There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference.\");\n }\n const matches = left.filter((elem, idx) => elem === right[idx]).length;\n const matching = matches / j + eps;\n if (matches > 1 && matching > max) {\n max = matching;\n maxIndices = [leftStart, leftStop, rightStart, rightStop];\n }\n }\n const [leftStart, leftStop, rightStart, rightStop] = maxIndices;\n const leftMid = Math.floor((leftStop + leftStart) / 2);\n const rightMid = Math.floor((rightStop + rightStart) / 2);\n totalSequence.push(...leftSequence.slice(0, leftMid));\n leftSequence = rightSequence.slice(rightMid);\n leftLength = leftSequence.length;\n\n if (use_token_timestamp_sequences) {\n total_token_timestamp_sequence.push(...left_token_timestamp_sequence.slice(0, leftMid));\n left_token_timestamp_sequence = token_timestamp_sequences[i].slice(rightMid);\n }\n }\n totalSequence.push(...leftSequence);\n\n if (use_token_timestamp_sequences) {\n total_token_timestamp_sequence.push(...left_token_timestamp_sequence);\n return [totalSequence, total_token_timestamp_sequence];\n } else {\n return [totalSequence, []];\n }\n }\n\n /** @private */\n collateWordTimestamps(tokens, token_timestamps, language) {\n\n const [words, _, token_indices] = this.combineTokensIntoWords(tokens, language);\n\n const timings = [];\n for (let i = 0; i < words.length; ++i) {\n const indices = token_indices[i];\n timings.push({\n text: words[i],\n timestamp: [\n token_timestamps[indices.at(0)][0],\n token_timestamps[indices.at(-1)][1],\n ],\n });\n }\n return timings;\n }\n\n /**\n * Groups tokens by word. Returns a tuple containing a list of strings with the words,\n * and a list of `token_id` sequences with the tokens making up each word.\n * @param {number[]} tokens \n * @param {string} [language] \n * @param {string} prepend_punctionations \n * @param {string} append_punctuations \n * \n * @private\n */\n combineTokensIntoWords(tokens, language, prepend_punctionations = \"\\\"'“¡¿([{-\", append_punctuations = \"\\\"'.。,,!!??::”)]}、\") {\n language = language ?? 'english';\n\n let words, word_tokens, token_indices;\n\n if ([\"chinese\", \"japanese\", \"thai\", \"lao\", \"myanmar\"].includes(language)) {\n // These languages don't typically use spaces.\n [words, word_tokens, token_indices] = this.splitTokensOnUnicode(tokens)\n } else {\n [words, word_tokens, token_indices] = this.splitTokensOnSpaces(tokens)\n }\n\n return this.mergePunctuations(words, word_tokens, token_indices, prepend_punctionations, append_punctuations);\n }\n\n /** @type {PreTrainedTokenizer['decode']} */\n decode(\n token_ids,\n decode_args,\n ) {\n let text;\n // @ts-ignore\n if (decode_args && decode_args.decode_with_timestamps) {\n if (token_ids instanceof Tensor) {\n token_ids = prepareTensorForDecode(token_ids);\n }\n text = this.decodeWithTimestamps(token_ids, decode_args);\n } else {\n text = super.decode(token_ids, decode_args);\n }\n // TODO: implement offsets\n // if (decode_args.output_offsets) {\n // let offsets = this.computeOffsets\n // }\n return text;\n }\n\n /**\n * @param {number[]} token_ids List of token IDs to decode.\n * @param {Object} decode_args Optional arguments for decoding\n * @private\n */\n decodeWithTimestamps(token_ids, decode_args) {\n const time_precision = decode_args?.time_precision ?? 0.02;\n\n const timestamp_begin = Array.from(this.all_special_ids).at(-1) + 1;\n /**@type {Array} */\n let outputs = [[]];\n for (const token of token_ids) {\n if (token >= timestamp_begin) {\n const timestamp = round((token - timestamp_begin) * time_precision, 2);\n outputs.push(`<|${timestamp}|>`);\n outputs.push([]);\n } else {\n outputs[outputs.length - 1].push(token);\n }\n }\n outputs = outputs.map(\n s => {\n if (typeof s === 'string') {\n return s;\n } else {\n return super.decode(s, decode_args);\n }\n }\n )\n\n return outputs.join('');\n }\n\n /**\n * Combine tokens into words by splitting at any position where the tokens are decoded as valid unicode points.\n * @param {number[]} tokens \n * @returns {*}\n * @private\n */\n splitTokensOnUnicode(tokens) {\n const decoded_full = this.decode(tokens, {\n // @ts-ignore\n decode_with_timestamps: true,\n });\n const replacement_char = '\\uFFFD';\n\n const words = []\n const word_tokens = []\n const token_indices = []\n let current_tokens = []\n let current_indices = []\n let unicode_offset = 0\n\n for (let token_idx = 0; token_idx < tokens.length; ++token_idx) {\n const token = tokens[token_idx];\n\n current_tokens.push(token);\n current_indices.push(token_idx);\n\n const decoded = this.decode(current_tokens, {\n // @ts-ignore\n decode_with_timestamps: true,\n });\n\n if (!decoded.includes(replacement_char) || decoded_full[unicode_offset + decoded.indexOf(replacement_char)] === replacement_char) {\n words.push(decoded)\n word_tokens.push(current_tokens)\n token_indices.push(current_indices)\n current_tokens = []\n current_indices = []\n unicode_offset += decoded.length;\n }\n\n }\n\n return [words, word_tokens, token_indices]\n }\n\n /**\n * Combine tokens into words by splitting at whitespace and punctuation tokens.\n * @param {number[]} tokens \n * @private\n */\n splitTokensOnSpaces(tokens) {\n\n const [subwords, subword_tokens_list, subword_indices_list] = this.splitTokensOnUnicode(tokens);\n\n const words = []\n const word_tokens = []\n const token_indices = []\n\n const punctuationRegex = new RegExp(`^[${PUNCTUATION_REGEX}]$`, 'gu');\n\n for (let i = 0; i < subwords.length; ++i) {\n\n const subword = subwords[i];\n const subword_tokens = subword_tokens_list[i];\n const subword_indices = subword_indices_list[i];\n\n // @ts-ignore\n const special = subword_tokens[0] >= this.model.tokens_to_ids.get('<|endoftext|>');\n const with_space = subword.startsWith(' ');\n const trimmed = subword.trim();\n const punctuation = punctuationRegex.test(trimmed);\n\n if (special || with_space || punctuation || words.length === 0) {\n words.push(subword);\n word_tokens.push(subword_tokens);\n token_indices.push(subword_indices);\n } else {\n const ix = words.length - 1;\n words[ix] += subword;\n word_tokens[ix].push(...subword_tokens);\n token_indices[ix].push(...subword_indices);\n }\n }\n\n return [words, word_tokens, token_indices];\n\n }\n\n /**\n * Merges punctuation tokens with neighboring words.\n * @param {string[]} words \n * @param {number[][]} tokens \n * @param {number[][]} indices \n * @param {string} prepended \n * @param {string} appended \n * @private\n */\n mergePunctuations(words, tokens, indices, prepended, appended) {\n\n const newWords = structuredClone(words);\n const newTokens = structuredClone(tokens);\n const newIndices = structuredClone(indices);\n\n\n // prepend punctuations\n let i = newWords.length - 2;\n let j = newWords.length - 1;\n\n while (i >= 0) {\n if (newWords[i].startsWith(' ') && prepended.includes(newWords[i].trim())) {\n newWords[j] = newWords[i] + newWords[j];\n newTokens[j] = mergeArrays(newTokens[i], newTokens[j]);\n newIndices[j] = mergeArrays(newIndices[i], newIndices[j]);\n newWords[i] = '';\n newTokens[i] = [];\n newIndices[i] = [];\n } else {\n j = i;\n }\n --i;\n }\n\n // append punctuations\n i = 0;\n j = 1;\n while (j < newWords.length) {\n if (!newWords[i].endsWith(' ') && appended.includes(newWords[j])) {\n newWords[i] += newWords[j];\n newTokens[i] = mergeArrays(newTokens[i], newTokens[j]);\n newIndices[i] = mergeArrays(newIndices[i], newIndices[j]);\n newWords[j] = '';\n newTokens[j] = [];\n newIndices[j] = [];\n } else {\n i = j;\n }\n ++j;\n }\n\n return [\n newWords.filter(x => x),\n newTokens.filter(x => x.length > 0),\n newIndices.filter(x => x.length > 0),\n ]\n }\n\n /**\n * Helper function to build translation inputs for a `WhisperTokenizer`,\n * depending on the language, task, and whether to predict timestamp tokens.\n * \n * Used to override the prefix tokens appended to the start of the label sequence.\n * \n * **Example: Get ids for a language**\n * ```javascript\n * // instantiate the tokenizer and set the prefix token to Spanish\n * const tokenizer = await WhisperTokenizer.from_pretrained('Xenova/whisper-tiny');\n * const forced_decoder_ids = tokenizer.get_decoder_prompt_ids({ language: 'spanish' });\n * // [(1, 50262), (2, 50363)]\n * ```\n * \n * @param {Object} options Options to generate the decoder prompt.\n * @param {string} [options.language] The language of the transcription text.\n * The corresponding language id token is appended to the start of the sequence for multilingual\n * speech recognition and speech translation tasks, e.g. for \"Spanish\" the token \"<|es|>\" is appended\n * to the start of sequence.\n * @param {string} [options.task] Task identifier to append at the start of sequence (if any).\n * This should be used for mulitlingual fine-tuning, with \"transcribe\" for speech recognition and\n * \"translate\" for speech translation.\n * @param {boolean} [options.no_timestamps] Whether to add the <|notimestamps|> token at the start of the sequence.\n * @returns {number[][]} The decoder prompt ids.\n */\n get_decoder_prompt_ids({\n language = null,\n task = null,\n no_timestamps = true,\n } = {}) {\n\n // <|lang_id|> <|task|> <|notimestamps|>\n\n const forced_decoder_ids = [];\n\n if (language) {\n // User wishes to specify the language\n language = language.toLowerCase();\n\n // Map to code from user-friendly name (e.g., \"english\" -> \"en\")\n let language_code = WHISPER_TO_LANGUAGE_CODE_MAPPING.get(language);\n\n if (language_code === undefined) {\n // User provided something that is not a language name\n\n if (WHISPER_LANGUAGE_MAPPING.has(language)) {\n // User provided the language code directly (e.g., \"en\")\n language_code = language;\n\n } else {\n // User provided something that is not a language code or name\n const is_language_code = language.length === 2;\n const langs = is_language_code ? WHISPER_LANGUAGE_MAPPING.keys() : WHISPER_LANGUAGE_MAPPING.values();\n\n throw new Error(`Language \"${language}\" is not supported. Must be one of: ${JSON.stringify(langs)}`);\n }\n }\n\n const language_token_id = this.model.tokens_to_ids.get(`<|${language_code}|>`);\n if (language_token_id === undefined) {\n throw new Error(`Unable to find language \"${language_code}\" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.`)\n }\n\n forced_decoder_ids.push(language_token_id);\n } else {\n // No token will be forced, which leaves the model to predict the language\n forced_decoder_ids.push(null);\n }\n\n if (task) {\n task = task.toLowerCase();\n if (task !== 'transcribe' && task !== 'translate') {\n throw new Error(`Task \"${task}\" is not supported. Must be one of: [\"transcribe\", \"translate\"]`);\n }\n\n const task_token_id = this.model.tokens_to_ids.get(`<|${task}|>`);\n if (task_token_id === undefined) {\n throw new Error(`Unable to find task \"${task}\" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.`)\n }\n\n forced_decoder_ids.push(task_token_id);\n } else {\n // No token will be forced, which leaves the model to predict the task\n forced_decoder_ids.push(null);\n }\n\n if (no_timestamps) {\n const no_timestamps_id = this.model.tokens_to_ids.get(`<|notimestamps|>`);\n if (no_timestamps_id === undefined) {\n throw new Error('Unable to find \"<|notimestamps|>\" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.')\n }\n\n forced_decoder_ids.push(no_timestamps_id);\n }\n\n return forced_decoder_ids.map((x, i) => [i + 1, x]).filter(x => x[1] !== null);\n\n }\n}\nexport class CodeGenTokenizer extends PreTrainedTokenizer { }\nexport class CLIPTokenizer extends PreTrainedTokenizer { }\nexport class SiglipTokenizer extends PreTrainedTokenizer { }\n\n/**\n * @todo This model is not yet supported by Hugging Face's \"fast\" tokenizers library (https://github.com/huggingface/tokenizers).\n * Therefore, this implementation (which is based on fast tokenizers) may produce slightly inaccurate results.\n */\nexport class MarianTokenizer extends PreTrainedTokenizer {\n /**\n * Create a new MarianTokenizer instance.\n * @param {Object} tokenizerJSON The JSON of the tokenizer.\n * @param {Object} tokenizerConfig The config of the tokenizer.\n */\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n\n this.languageRegex = /^(>>\\w+<<)\\s*/g;\n\n this.supported_language_codes = this.model.vocab.filter(\n x => this.languageRegex.test(x)\n );\n\n console.warn('WARNING: `MarianTokenizer` is not yet supported by Hugging Face\\'s \"fast\" tokenizers library. Therefore, you may experience slightly inaccurate results.')\n }\n\n /**\n * Encodes a single text. Overriding this method is necessary since the language codes\n * must be removed before encoding with sentencepiece model.\n * @see https://github.com/huggingface/transformers/blob/12d51db243a00726a548a43cc333390ebae731e3/src/transformers/models/marian/tokenization_marian.py#L204-L213\n *\n * @param {string|null} text The text to encode.\n * @returns {Array} The encoded tokens.\n */\n _encode_text(text) {\n if (text === null) return null;\n\n // Check if text starts with language code:\n const [matchInfo, ...remainder] = text.trim().split(this.languageRegex);\n\n if (remainder.length === 0) {\n // No language code, encode normally\n return super._encode_text(matchInfo);\n\n } else if (remainder.length === 2) {\n // Text starts with language code, so we do not encode it with sentencepiece.\n const [language, text] = remainder;\n\n if (!this.supported_language_codes.includes(language)) {\n console.warn(`Unsupported language code \"${language}\" detected, which may lead to unexpected behavior. Should be one of: ${JSON.stringify(this.supported_language_codes)}`)\n }\n return mergeArrays([language], super._encode_text(text));\n }\n }\n\n}\n\nexport class Wav2Vec2CTCTokenizer extends PreTrainedTokenizer { }\n\nexport class BlenderbotTokenizer extends PreTrainedTokenizer {\n _default_chat_template = `{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}`;\n}\nexport class BlenderbotSmallTokenizer extends BlenderbotTokenizer { } // NOTE `BlenderbotTokenizer` to get the correct chat template\n\nexport class SpeechT5Tokenizer extends PreTrainedTokenizer { }\n\nexport class NougatTokenizer extends PreTrainedTokenizer { }\n\nexport class VitsTokenizer extends PreTrainedTokenizer {\n\n constructor(tokenizerJSON, tokenizerConfig) {\n super(tokenizerJSON, tokenizerConfig);\n\n // Custom decoder function\n this.decoder = new VitsDecoder({});\n }\n}\n\nexport class CohereTokenizer extends PreTrainedTokenizer { }\n\n/**\n * Helper class which is used to instantiate pretrained tokenizers with the `from_pretrained` function.\n * The chosen tokenizer class is determined by the type specified in the tokenizer config.\n * \n * @example\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');\n */\nexport class AutoTokenizer {\n static TOKENIZER_CLASS_MAPPING = {\n T5Tokenizer,\n DistilBertTokenizer,\n CamembertTokenizer,\n DebertaTokenizer,\n DebertaV2Tokenizer,\n BertTokenizer,\n HerbertTokenizer,\n ConvBertTokenizer,\n RoFormerTokenizer,\n XLMTokenizer,\n ElectraTokenizer,\n MobileBertTokenizer,\n SqueezeBertTokenizer,\n AlbertTokenizer,\n GPT2Tokenizer,\n BartTokenizer,\n MBartTokenizer,\n MBart50Tokenizer,\n RobertaTokenizer,\n WhisperTokenizer,\n CodeGenTokenizer,\n CLIPTokenizer,\n SiglipTokenizer,\n MarianTokenizer,\n BloomTokenizer,\n NllbTokenizer,\n M2M100Tokenizer,\n LlamaTokenizer,\n CodeLlamaTokenizer,\n XLMRobertaTokenizer,\n MPNetTokenizer,\n FalconTokenizer,\n GPTNeoXTokenizer,\n EsmTokenizer,\n Wav2Vec2CTCTokenizer,\n BlenderbotTokenizer,\n BlenderbotSmallTokenizer,\n SpeechT5Tokenizer,\n NougatTokenizer,\n VitsTokenizer,\n Qwen2Tokenizer,\n GemmaTokenizer,\n Grok1Tokenizer,\n CohereTokenizer,\n\n // Base case:\n PreTrainedTokenizer,\n }\n\n\n /**\n * Instantiate one of the tokenizer classes of the library from a pretrained model.\n * \n * The tokenizer class to instantiate is selected based on the `tokenizer_class` property of the config object\n * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)\n * \n * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:\n * - A string, the *model id* of a pretrained tokenizer hosted inside a model repo on huggingface.co.\n * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a\n * user or organization name, like `dbmdz/bert-base-german-cased`.\n * - A path to a *directory* containing tokenizer files, e.g., `./my_model_directory/`.\n * @param {PretrainedTokenizerOptions} options Additional options for loading the tokenizer.\n * \n * @returns {Promise<PreTrainedTokenizer>} A new instance of the PreTrainedTokenizer class.\n */\n static async from_pretrained(pretrained_model_name_or_path, {\n quantized = true,\n progress_callback = null,\n config = null,\n cache_dir = null,\n local_files_only = false,\n revision = 'main',\n legacy = null,\n } = {}) {\n\n const [tokenizerJSON, tokenizerConfig] = await loadTokenizer(pretrained_model_name_or_path, {\n quantized,\n progress_callback,\n config,\n cache_dir,\n local_files_only,\n revision,\n legacy,\n })\n\n // Some tokenizers are saved with the \"Fast\" suffix, so we remove that if present.\n const tokenizerName = tokenizerConfig.tokenizer_class?.replace(/Fast$/, '') ?? 'PreTrainedTokenizer';\n\n let cls = this.TOKENIZER_CLASS_MAPPING[tokenizerName];\n if (!cls) {\n console.warn(`Unknown tokenizer class \"${tokenizerName}\", attempting to construct from base class.`);\n cls = PreTrainedTokenizer;\n }\n return new cls(tokenizerJSON, tokenizerConfig);\n }\n}\n"],"names":["n"],"mappings":";;;;;;AA4kBA,MAAM,oBAAoB,MAAM;AAK5B,QAAM,KAAK;AAAA,IACP,GAAG,MAAM,KAAK,EAAE,QAAQ,IAAI,WAAW,CAAC,IAAI,IAAI,WAAW,CAAC,IAAI,EAAC,GAAI,CAAC,GAAG,MAAM,IAAI,IAAI,WAAW,CAAC,CAAC;AAAA,IACpG,GAAG,MAAM,KAAK,EAAE,QAAQ,IAAI,WAAW,CAAC,IAAI,IAAI,WAAW,CAAC,IAAI,EAAC,GAAI,CAAC,GAAG,MAAM,IAAI,IAAI,WAAW,CAAC,CAAC;AAAA,IACpG,GAAG,MAAM,KAAK,EAAE,QAAQ,IAAI,WAAW,CAAC,IAAI,IAAI,WAAW,CAAC,IAAI,EAAC,GAAI,CAAC,GAAG,MAAM,IAAI,IAAI,WAAW,CAAC,CAAC;AAAA,EAC5G;AACI,QAAM,KAAK,GAAG,MAAK;AACnB,MAAI,IAAI;AACR,WAAS,IAAI,GAAG,IAAI,KAAK,EAAE,GAAG;AAC1B,QAAI,CAAC,GAAG,SAAS,CAAC,GAAG;AACjB,SAAG,KAAK,CAAC;AACT,SAAG,KAAK,MAAM,CAAC;AACf,WAAK;AAAA,IACT;AAAA,EACJ;AACA,QAAM,MAAM,GAAG,IAAI,CAAAA,OAAK,OAAO,aAAaA,EAAC,CAAC;AAC9C,SAAO,OAAO,YAAY,GAAG,IAAI,CAAC,GAAG,MAAM,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;AAC3D,GAAC;AAEwB,kBAAkB,gBAAgB;AAywF3D,MAAM,oBAAoB;AAAA,EACtB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,YAAY;AAAA,EACnB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,YAAY;AAAA,EACnB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,YAAY;AAAA,EACnB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,MAAM;AAAA,EACb,CAAC,MAAM,MAAM;AAAA,EACb,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,YAAY;AAAA,EACnB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,aAAa;AAAA,EACpB,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,YAAY;AAAA,EACnB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,WAAW;AAAA,EAClB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,YAAY;AAAA,EACnB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,KAAK;AAAA,EACZ,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,gBAAgB;AAAA,EACvB,CAAC,MAAM,QAAQ;AAAA,EACf,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,eAAe;AAAA,EACtB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,OAAO,UAAU;AAAA,EAClB,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,OAAO;AAAA,EACd,CAAC,MAAM,SAAS;AAAA,EAChB,CAAC,MAAM,UAAU;AAAA,EACjB,CAAC,MAAM,WAAW;AACtB;AAGiC,IAAI,IAAI,iBAAiB;AAEjB,IAAI,IAAI;AAAA,EAC7C,GAAG,kBAAkB,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;AAAA,EAC3C,GAAG;AAAA,IACC,CAAC,WAAW,IAAI;AAAA,IAChB,CAAC,aAAa,IAAI;AAAA,IAClB,CAAC,WAAW,IAAI;AAAA,IAChB,CAAC,WAAW,IAAI;AAAA,IAChB,CAAC,iBAAiB,IAAI;AAAA,IACtB,CAAC,UAAU,IAAI;AAAA,IACf,CAAC,WAAW,IAAI;AAAA,IAChB,CAAC,aAAa,IAAI;AAAA,IAClB,CAAC,YAAY,IAAI;AAAA,IACjB,CAAC,aAAa,IAAI;AAAA,IAClB,CAAC,aAAa,IAAI;AAAA,EAC1B;AACA,CAAC;","x_google_ignoreList":[0]}