@weavelogic/knowledge-graph-agent 0.6.0 → 0.7.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (219) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +70 -3
  3. package/dist/_virtual/__vite-browser-external.js +2 -2
  4. package/dist/_virtual/__vite-browser-external.js.map +1 -1
  5. package/dist/_virtual/index12.js +7 -0
  6. package/dist/_virtual/index12.js.map +1 -0
  7. package/dist/_virtual/ort-web.min.js +8 -0
  8. package/dist/_virtual/ort-web.min.js.map +1 -0
  9. package/dist/_virtual/ort-web.min2.js +5 -0
  10. package/dist/_virtual/ort-web.min2.js.map +1 -0
  11. package/dist/agents/base-agent.d.ts +63 -0
  12. package/dist/agents/base-agent.d.ts.map +1 -1
  13. package/dist/agents/base-agent.js +139 -0
  14. package/dist/agents/base-agent.js.map +1 -1
  15. package/dist/agents/coordinator-agent.d.ts +422 -0
  16. package/dist/agents/coordinator-agent.d.ts.map +1 -0
  17. package/dist/agents/documenter-agent.d.ts +298 -0
  18. package/dist/agents/documenter-agent.d.ts.map +1 -0
  19. package/dist/agents/index.d.ts +11 -1
  20. package/dist/agents/index.d.ts.map +1 -1
  21. package/dist/agents/index.js +4 -0
  22. package/dist/agents/index.js.map +1 -1
  23. package/dist/agents/mixins/index.d.ts +9 -0
  24. package/dist/agents/mixins/index.d.ts.map +1 -0
  25. package/dist/agents/mixins/trajectory-mixin.d.ts +112 -0
  26. package/dist/agents/mixins/trajectory-mixin.d.ts.map +1 -0
  27. package/dist/agents/optimizer-agent.d.ts +388 -0
  28. package/dist/agents/optimizer-agent.d.ts.map +1 -0
  29. package/dist/agents/planner-agent.d.ts +395 -0
  30. package/dist/agents/planner-agent.d.ts.map +1 -0
  31. package/dist/agents/registry.d.ts.map +1 -1
  32. package/dist/agents/registry.js +5 -0
  33. package/dist/agents/registry.js.map +1 -1
  34. package/dist/agents/reviewer-agent.d.ts +330 -0
  35. package/dist/agents/reviewer-agent.d.ts.map +1 -0
  36. package/dist/agents/types.d.ts +12 -1
  37. package/dist/agents/types.d.ts.map +1 -1
  38. package/dist/agents/types.js +1 -0
  39. package/dist/agents/types.js.map +1 -1
  40. package/dist/cli/commands/hive-mind/add-frontmatter.d.ts +102 -0
  41. package/dist/cli/commands/hive-mind/add-frontmatter.d.ts.map +1 -0
  42. package/dist/cli/commands/hive-mind/add-frontmatter.js +439 -0
  43. package/dist/cli/commands/hive-mind/add-frontmatter.js.map +1 -0
  44. package/dist/cli/commands/hive-mind/analyze-links.d.ts +80 -0
  45. package/dist/cli/commands/hive-mind/analyze-links.d.ts.map +1 -0
  46. package/dist/cli/commands/hive-mind/analyze-links.js +367 -0
  47. package/dist/cli/commands/hive-mind/analyze-links.js.map +1 -0
  48. package/dist/cli/commands/hive-mind/find-connections.d.ts +75 -0
  49. package/dist/cli/commands/hive-mind/find-connections.d.ts.map +1 -0
  50. package/dist/cli/commands/hive-mind/find-connections.js +347 -0
  51. package/dist/cli/commands/hive-mind/find-connections.js.map +1 -0
  52. package/dist/cli/commands/hive-mind/index.d.ts +37 -0
  53. package/dist/cli/commands/hive-mind/index.d.ts.map +1 -0
  54. package/dist/cli/commands/hive-mind/index.js +33 -0
  55. package/dist/cli/commands/hive-mind/index.js.map +1 -0
  56. package/dist/cli/commands/hive-mind/validate-names.d.ts +79 -0
  57. package/dist/cli/commands/hive-mind/validate-names.d.ts.map +1 -0
  58. package/dist/cli/commands/hive-mind/validate-names.js +353 -0
  59. package/dist/cli/commands/hive-mind/validate-names.js.map +1 -0
  60. package/dist/cli/commands/vector.js +2 -0
  61. package/dist/cli/commands/vector.js.map +1 -1
  62. package/dist/cli/index.d.ts.map +1 -1
  63. package/dist/cli/index.js +7 -0
  64. package/dist/cli/index.js.map +1 -1
  65. package/dist/equilibrium/agent-equilibrium.d.ts +194 -0
  66. package/dist/equilibrium/agent-equilibrium.d.ts.map +1 -0
  67. package/dist/equilibrium/agent-equilibrium.js +304 -0
  68. package/dist/equilibrium/agent-equilibrium.js.map +1 -0
  69. package/dist/equilibrium/graph-equilibrium.d.ts +177 -0
  70. package/dist/equilibrium/graph-equilibrium.d.ts.map +1 -0
  71. package/dist/equilibrium/index.d.ts +11 -0
  72. package/dist/equilibrium/index.d.ts.map +1 -0
  73. package/dist/equilibrium/memory-equilibrium.d.ts +153 -0
  74. package/dist/equilibrium/memory-equilibrium.d.ts.map +1 -0
  75. package/dist/graphql/resolvers/index.d.ts.map +1 -1
  76. package/dist/graphql/resolvers/queries.d.ts +11 -0
  77. package/dist/graphql/resolvers/queries.d.ts.map +1 -1
  78. package/dist/index.d.ts +2 -0
  79. package/dist/index.d.ts.map +1 -1
  80. package/dist/index.js +10 -4
  81. package/dist/index.js.map +1 -1
  82. package/dist/inference/index.d.ts +9 -0
  83. package/dist/inference/index.d.ts.map +1 -0
  84. package/dist/inference/model-selection.d.ts +131 -0
  85. package/dist/inference/model-selection.d.ts.map +1 -0
  86. package/dist/integrations/agentic-flow/adapters/agent-booster-adapter.d.ts +265 -0
  87. package/dist/integrations/agentic-flow/adapters/agent-booster-adapter.d.ts.map +1 -0
  88. package/dist/integrations/agentic-flow/adapters/agentdb-adapter.d.ts +197 -0
  89. package/dist/integrations/agentic-flow/adapters/agentdb-adapter.d.ts.map +1 -0
  90. package/dist/integrations/agentic-flow/adapters/agentdb-vector-store.d.ts +249 -0
  91. package/dist/integrations/agentic-flow/adapters/agentdb-vector-store.d.ts.map +1 -0
  92. package/dist/integrations/agentic-flow/adapters/base-adapter.d.ts +120 -0
  93. package/dist/integrations/agentic-flow/adapters/base-adapter.d.ts.map +1 -0
  94. package/dist/integrations/agentic-flow/adapters/federation-hub-adapter.d.ts +444 -0
  95. package/dist/integrations/agentic-flow/adapters/federation-hub-adapter.d.ts.map +1 -0
  96. package/dist/integrations/agentic-flow/adapters/index.d.ts +17 -0
  97. package/dist/integrations/agentic-flow/adapters/index.d.ts.map +1 -0
  98. package/dist/integrations/agentic-flow/adapters/model-router-adapter.d.ts +242 -0
  99. package/dist/integrations/agentic-flow/adapters/model-router-adapter.d.ts.map +1 -0
  100. package/dist/integrations/agentic-flow/adapters/quic-transport-adapter.d.ts +364 -0
  101. package/dist/integrations/agentic-flow/adapters/quic-transport-adapter.d.ts.map +1 -0
  102. package/dist/integrations/agentic-flow/adapters/reasoning-bank-adapter.d.ts +209 -0
  103. package/dist/integrations/agentic-flow/adapters/reasoning-bank-adapter.d.ts.map +1 -0
  104. package/dist/integrations/agentic-flow/benchmark/index.d.ts +9 -0
  105. package/dist/integrations/agentic-flow/benchmark/index.d.ts.map +1 -0
  106. package/dist/integrations/agentic-flow/benchmark/vector-benchmark.d.ts +253 -0
  107. package/dist/integrations/agentic-flow/benchmark/vector-benchmark.d.ts.map +1 -0
  108. package/dist/integrations/agentic-flow/config.d.ts +109 -0
  109. package/dist/integrations/agentic-flow/config.d.ts.map +1 -0
  110. package/dist/integrations/agentic-flow/feature-flags.d.ts +140 -0
  111. package/dist/integrations/agentic-flow/feature-flags.d.ts.map +1 -0
  112. package/dist/integrations/agentic-flow/index.d.ts +22 -0
  113. package/dist/integrations/agentic-flow/index.d.ts.map +1 -0
  114. package/dist/integrations/agentic-flow/migration/index.d.ts +9 -0
  115. package/dist/integrations/agentic-flow/migration/index.d.ts.map +1 -0
  116. package/dist/integrations/agentic-flow/migration/migrate-to-agentdb.d.ts +242 -0
  117. package/dist/integrations/agentic-flow/migration/migrate-to-agentdb.d.ts.map +1 -0
  118. package/dist/learning/index.d.ts +91 -0
  119. package/dist/learning/index.d.ts.map +1 -0
  120. package/dist/learning/learning-loop.d.ts +176 -0
  121. package/dist/learning/learning-loop.d.ts.map +1 -0
  122. package/dist/learning/services/ab-testing-framework.d.ts +135 -0
  123. package/dist/learning/services/ab-testing-framework.d.ts.map +1 -0
  124. package/dist/learning/services/agent-priming-service.d.ts +207 -0
  125. package/dist/learning/services/agent-priming-service.d.ts.map +1 -0
  126. package/dist/learning/services/daily-log-generator.d.ts +113 -0
  127. package/dist/learning/services/daily-log-generator.d.ts.map +1 -0
  128. package/dist/learning/services/index.d.ts +14 -0
  129. package/dist/learning/services/index.d.ts.map +1 -0
  130. package/dist/learning/services/memory-extraction-service.d.ts +87 -0
  131. package/dist/learning/services/memory-extraction-service.d.ts.map +1 -0
  132. package/dist/learning/services/task-completion-consumer.d.ts +162 -0
  133. package/dist/learning/services/task-completion-consumer.d.ts.map +1 -0
  134. package/dist/learning/services/trajectory-tracker.d.ts +174 -0
  135. package/dist/learning/services/trajectory-tracker.d.ts.map +1 -0
  136. package/dist/learning/types.d.ts +516 -0
  137. package/dist/learning/types.d.ts.map +1 -0
  138. package/dist/mcp/clients/claude-flow-memory-client.d.ts +259 -0
  139. package/dist/mcp/clients/claude-flow-memory-client.d.ts.map +1 -0
  140. package/dist/mcp/clients/claude-flow-memory-client.js +305 -0
  141. package/dist/mcp/clients/claude-flow-memory-client.js.map +1 -0
  142. package/dist/mcp/clients/index.d.ts +11 -0
  143. package/dist/mcp/clients/index.d.ts.map +1 -0
  144. package/dist/mcp/clients/mcp-client-adapter.d.ts +146 -0
  145. package/dist/mcp/clients/mcp-client-adapter.d.ts.map +1 -0
  146. package/dist/mcp/clients/mcp-client-adapter.js +372 -0
  147. package/dist/mcp/clients/mcp-client-adapter.js.map +1 -0
  148. package/dist/mcp/index.d.ts +10 -0
  149. package/dist/mcp/index.d.ts.map +1 -0
  150. package/dist/memory/vault-sync.d.ts +12 -0
  151. package/dist/memory/vault-sync.d.ts.map +1 -1
  152. package/dist/memory/vault-sync.js +94 -11
  153. package/dist/memory/vault-sync.js.map +1 -1
  154. package/dist/node_modules/@huggingface/jinja/dist/index.js +118 -0
  155. package/dist/node_modules/@huggingface/jinja/dist/index.js.map +1 -0
  156. package/dist/node_modules/@typescript-eslint/project-service/dist/index.js +1 -1
  157. package/dist/node_modules/@xenova/transformers/src/backends/onnx.js +24 -0
  158. package/dist/node_modules/@xenova/transformers/src/backends/onnx.js.map +1 -0
  159. package/dist/node_modules/@xenova/transformers/src/configs.js +52 -0
  160. package/dist/node_modules/@xenova/transformers/src/configs.js.map +1 -0
  161. package/dist/node_modules/@xenova/transformers/src/env.js +35 -0
  162. package/dist/node_modules/@xenova/transformers/src/env.js.map +1 -0
  163. package/dist/node_modules/@xenova/transformers/src/models.js +3852 -0
  164. package/dist/node_modules/@xenova/transformers/src/models.js.map +1 -0
  165. package/dist/node_modules/@xenova/transformers/src/tokenizers.js +144 -0
  166. package/dist/node_modules/@xenova/transformers/src/tokenizers.js.map +1 -0
  167. package/dist/node_modules/@xenova/transformers/src/utils/core.js +52 -0
  168. package/dist/node_modules/@xenova/transformers/src/utils/core.js.map +1 -0
  169. package/dist/node_modules/@xenova/transformers/src/utils/generation.js +623 -0
  170. package/dist/node_modules/@xenova/transformers/src/utils/generation.js.map +1 -0
  171. package/dist/node_modules/@xenova/transformers/src/utils/hub.js +395 -0
  172. package/dist/node_modules/@xenova/transformers/src/utils/hub.js.map +1 -0
  173. package/dist/node_modules/@xenova/transformers/src/utils/image.js +12 -0
  174. package/dist/node_modules/@xenova/transformers/src/utils/image.js.map +1 -0
  175. package/dist/node_modules/@xenova/transformers/src/utils/maths.js +89 -0
  176. package/dist/node_modules/@xenova/transformers/src/utils/maths.js.map +1 -0
  177. package/dist/node_modules/@xenova/transformers/src/utils/tensor.js +750 -0
  178. package/dist/node_modules/@xenova/transformers/src/utils/tensor.js.map +1 -0
  179. package/dist/node_modules/fdir/dist/index.js +13 -13
  180. package/dist/node_modules/fdir/dist/index.js.map +1 -1
  181. package/dist/node_modules/onnxruntime-common/dist/lib/backend-impl.js +67 -0
  182. package/dist/node_modules/onnxruntime-common/dist/lib/backend-impl.js.map +1 -0
  183. package/dist/node_modules/onnxruntime-common/dist/lib/env-impl.js +24 -0
  184. package/dist/node_modules/onnxruntime-common/dist/lib/env-impl.js.map +1 -0
  185. package/dist/node_modules/onnxruntime-common/dist/lib/env.js +6 -0
  186. package/dist/node_modules/onnxruntime-common/dist/lib/env.js.map +1 -0
  187. package/dist/node_modules/onnxruntime-common/dist/lib/index.js +11 -0
  188. package/dist/node_modules/onnxruntime-common/dist/lib/index.js.map +1 -0
  189. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session-impl.js +162 -0
  190. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session-impl.js.map +1 -0
  191. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session.js +6 -0
  192. package/dist/node_modules/onnxruntime-common/dist/lib/inference-session.js.map +1 -0
  193. package/dist/node_modules/onnxruntime-common/dist/lib/tensor-impl.js +393 -0
  194. package/dist/node_modules/onnxruntime-common/dist/lib/tensor-impl.js.map +1 -0
  195. package/dist/node_modules/onnxruntime-common/dist/lib/tensor.js +6 -0
  196. package/dist/node_modules/onnxruntime-common/dist/lib/tensor.js.map +1 -0
  197. package/dist/node_modules/onnxruntime-web/dist/ort-web.min.js +12919 -0
  198. package/dist/node_modules/onnxruntime-web/dist/ort-web.min.js.map +1 -0
  199. package/dist/node_modules/tinyglobby/dist/index.js +14 -14
  200. package/dist/node_modules/tinyglobby/dist/index.js.map +1 -1
  201. package/dist/node_modules/typescript/lib/typescript.js +24 -24
  202. package/dist/node_modules/typescript/lib/typescript.js.map +1 -1
  203. package/dist/transport/agent-transport.d.ts +269 -0
  204. package/dist/transport/agent-transport.d.ts.map +1 -0
  205. package/dist/transport/index.d.ts +10 -0
  206. package/dist/transport/index.d.ts.map +1 -0
  207. package/dist/vector/index.d.ts +1 -1
  208. package/dist/vector/index.d.ts.map +1 -1
  209. package/dist/vector/services/embedding-service.d.ts +244 -0
  210. package/dist/vector/services/embedding-service.d.ts.map +1 -0
  211. package/dist/vector/services/embedding-service.js +10 -0
  212. package/dist/vector/services/embedding-service.js.map +1 -0
  213. package/dist/vector/services/hybrid-search.d.ts +320 -0
  214. package/dist/vector/services/hybrid-search.d.ts.map +1 -0
  215. package/dist/vector/services/hybrid-search.js +3 -0
  216. package/dist/vector/services/hybrid-search.js.map +1 -0
  217. package/dist/vector/services/index.d.ts +4 -0
  218. package/dist/vector/services/index.d.ts.map +1 -1
  219. package/package.json +10 -1
@@ -0,0 +1 @@
1
+ {"version":3,"file":"models.js","sources":["../../../../../node_modules/@xenova/transformers/src/models.js"],"sourcesContent":["\n/**\n * @file Definitions of all models available in Transformers.js.\n * \n * **Example:** Load and run an `AutoModel`.\n * \n * ```javascript\n * import { AutoModel, AutoTokenizer } from '@xenova/transformers';\n *\n * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/bert-base-uncased');\n * let model = await AutoModel.from_pretrained('Xenova/bert-base-uncased');\n *\n * let inputs = await tokenizer('I love transformers!');\n * let { logits } = await model(inputs);\n * // Tensor {\n * // data: Float32Array(183132) [-7.117443084716797, -7.107812881469727, -7.092104911804199, ...]\n * // dims: (3) [1, 6, 30522],\n * // type: \"float32\",\n * // size: 183132,\n * // }\n * ```\n * \n * We also provide other `AutoModel`s (listed below), which you can use in the same way as the Python library. For example:\n * \n * **Example:** Load and run an `AutoModelForSeq2SeqLM`.\n * ```javascript\n * import { AutoModelForSeq2SeqLM, AutoTokenizer } from '@xenova/transformers';\n * \n * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/t5-small');\n * let model = await AutoModelForSeq2SeqLM.from_pretrained('Xenova/t5-small');\n *\n * let { input_ids } = await tokenizer('translate English to German: I love transformers!');\n * let outputs = await model.generate(input_ids);\n * let decoded = tokenizer.decode(outputs[0], { skip_special_tokens: true });\n * // 'Ich liebe Transformatoren!'\n * ```\n * \n * @module models\n */\n\nimport {\n AutoConfig,\n} from './configs.js';\n\nimport {\n Callable,\n isIntegralNumber,\n isTypedArray,\n mergeArrays,\n} from './utils/core.js';\n\nimport {\n getModelFile,\n getModelJSON,\n} from './utils/hub.js';\n\nimport {\n LogitsProcessorList,\n GenerationConfig,\n ForceTokensLogitsProcessor,\n ForcedBOSTokenLogitsProcessor,\n ForcedEOSTokenLogitsProcessor,\n SuppressTokensAtBeginLogitsProcessor,\n WhisperTimeStampLogitsProcessor,\n NoRepeatNGramLogitsProcessor,\n RepetitionPenaltyLogitsProcessor,\n NoBadWordsLogitsProcessor,\n MinLengthLogitsProcessor,\n MinNewTokensLengthLogitsProcessor,\n\n Sampler,\n} from './utils/generation.js';\n\nimport {\n cat,\n dynamicTimeWarping,\n mean,\n ones_like,\n stack,\n std_mean,\n Tensor,\n} from './utils/tensor.js';\n\nimport { executionProviders, ONNX } from './backends/onnx.js';\nimport { medianFilter } from './transformers.js';\nconst { InferenceSession, Tensor: ONNXTensor, env } = ONNX;\n\n/** @typedef {import('onnxruntime-web').InferenceSession} InferenceSession */\n\n//////////////////////////////////////////////////\n// Model types: used internally\nconst MODEL_TYPES = {\n EncoderOnly: 0,\n EncoderDecoder: 1,\n Seq2Seq: 2,\n Vision2Seq: 3,\n DecoderOnly: 4,\n MaskGeneration: 5,\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Helper functions\n\n// NOTE: These will be populated fully later\nconst MODEL_TYPE_MAPPING = new Map();\nconst MODEL_NAME_TO_CLASS_MAPPING = new Map();\nconst MODEL_CLASS_TO_NAME_MAPPING = new Map();\n\n\n/**\n * Constructs an InferenceSession using a model file located at the specified path.\n * @param {string} pretrained_model_name_or_path The path to the directory containing the model file.\n * @param {string} fileName The name of the model file.\n * @param {import('./utils/hub.js').PretrainedOptions} options Additional options for loading the model.\n * @returns {Promise<InferenceSession>} A Promise that resolves to an InferenceSession object.\n * @private\n */\nasync function constructSession(pretrained_model_name_or_path, fileName, options) {\n // TODO add option for user to force specify their desired execution provider\n let modelFileName = `onnx/${fileName}${options.quantized ? '_quantized' : ''}.onnx`;\n let buffer = await getModelFile(pretrained_model_name_or_path, modelFileName, true, options);\n\n try {\n return await InferenceSession.create(buffer, {\n executionProviders,\n });\n } catch (err) {\n // If the execution provided was only wasm, throw the error\n if (executionProviders.length === 1 && executionProviders[0] === 'wasm') {\n throw err;\n }\n\n console.warn(err);\n console.warn(\n 'Something went wrong during model construction (most likely a missing operation). ' +\n 'Using `wasm` as a fallback. '\n )\n return await InferenceSession.create(buffer, {\n executionProviders: ['wasm']\n });\n }\n}\n\n/**\n * Validate model inputs\n * @param {InferenceSession} session The InferenceSession object that will be run.\n * @param {Record<string, Tensor>} inputs The inputs to check.\n * @returns {Record<string, Tensor>} The checked inputs.\n * @throws {Error} If any inputs are missing.\n * @private\n */\nfunction validateInputs(session, inputs) {\n /**\n * NOTE: Create either a shallow or deep copy based on `onnx.wasm.proxy`\n * @type {Record<string, Tensor>}\n */\n const checkedInputs = Object.create(null);\n const missingInputs = [];\n for (const inputName of session.inputNames) {\n const tensor = inputs[inputName];\n // Rare case where one of the model's input names corresponds to a built-in\n // object name (e.g., toString), which would cause a simple (!tensor) check to fail,\n // because it's not undefined but a function.\n if (!(tensor instanceof Tensor)) {\n missingInputs.push(inputName);\n continue;\n }\n // NOTE: When `env.wasm.proxy is true` the tensor is moved across the Worker\n // boundary, transferring ownership to the worker and invalidating the tensor.\n // So, in this case, we simply sacrifice a clone for it.\n checkedInputs[inputName] = env.wasm.proxy ? tensor.clone() : tensor;\n }\n if (missingInputs.length > 0) {\n throw new Error(\n `An error occurred during model execution: \"Missing the following inputs: ${missingInputs.join(', ')}.`);\n }\n\n const numInputsProvided = Object.keys(inputs).length;\n const numInputsNeeded = session.inputNames.length;\n if (numInputsProvided > numInputsNeeded) {\n // No missing inputs, but too many inputs were provided.\n // Warn the user and ignore the extra inputs.\n let ignored = Object.keys(inputs).filter(inputName => !session.inputNames.includes(inputName));\n console.warn(`WARNING: Too many inputs were provided (${numInputsProvided} > ${numInputsNeeded}). The following inputs will be ignored: \"${ignored.join(', ')}\".`);\n }\n\n return checkedInputs;\n}\n\n/**\n * Executes an InferenceSession using the specified inputs.\n * NOTE: `inputs` must contain at least the input names of the model.\n * - If additional inputs are passed, they will be ignored.\n * - If inputs are missing, an error will be thrown.\n * \n * @param {InferenceSession} session The InferenceSession object to run.\n * @param {Object} inputs An object that maps input names to input tensors.\n * @returns {Promise<Object>} A Promise that resolves to an object that maps output names to output tensors.\n * @private\n */\nasync function sessionRun(session, inputs) {\n const checkedInputs = validateInputs(session, inputs);\n try {\n // @ts-ignore\n let output = await session.run(checkedInputs);\n output = replaceTensors(output);\n return output;\n } catch (e) {\n // This usually occurs when the inputs are of the wrong type.\n console.error(`An error occurred during model execution: \"${e}\".`);\n console.error('Inputs given to model:', checkedInputs);\n throw e;\n }\n}\n\n/**\n * Replaces ONNX Tensor objects with custom Tensor objects to support additional functions.\n * @param {Object} obj The object to replace tensor objects in.\n * @returns {Object} The object with tensor objects replaced by custom Tensor objects.\n * @private\n */\nfunction replaceTensors(obj) {\n for (let prop in obj) {\n if (obj[prop] instanceof ONNXTensor) {\n obj[prop] = new Tensor(obj[prop]);\n } else if (typeof obj[prop] === 'object') {\n replaceTensors(obj[prop]);\n }\n }\n return obj;\n}\n\n\n/**\n * Converts an array or Tensor of integers to an int64 Tensor.\n * @param {Array|Tensor} items The input integers to be converted.\n * @returns {Tensor} The int64 Tensor with the converted values.\n * @throws {Error} If the input array is empty or the input is a batched Tensor and not all sequences have the same length.\n * @private\n */\nfunction toI64Tensor(items) {\n if (items instanceof Tensor) {\n return items;\n }\n // items is an array\n if (items.length === 0) {\n throw Error(\"items must be non-empty\");\n }\n\n if (Array.isArray(items[0])) {\n // batched\n if (items.some(x => x.length !== items[0].length)) {\n throw Error(\"Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.\")\n }\n\n return new Tensor('int64',\n BigInt64Array.from(items.flat().map(x => BigInt(x))),\n [items.length, items[0].length]\n );\n } else {\n //flat\n return new Tensor('int64',\n BigInt64Array.from(items.map(x => BigInt(x))),\n [1, items.length]\n );\n }\n}\n\n/**\n * Prepares an attention mask for a sequence of tokens based on configuration options.\n * @param {Object} self The calling object instance.\n * @param {Tensor} tokens The input tokens.\n * @returns {Tensor} The attention mask tensor.\n * @private\n */\nfunction prepareAttentionMask(self, tokens) {\n\n // Prepare attention mask\n let pad_token_id = self.config.pad_token_id ?? null;\n let eos_token_id = self.config.eos_token_id ?? null;\n if (isIntegralNumber(eos_token_id)) {\n eos_token_id = [eos_token_id];\n }\n\n let is_pad_token_in_inputs = tokens.indexOf(pad_token_id) !== -1;\n let is_pad_token_not_equal_to_eos_token_id = (eos_token_id === null) || !eos_token_id.includes(pad_token_id)\n\n if (is_pad_token_in_inputs && is_pad_token_not_equal_to_eos_token_id) {\n let data = BigInt64Array.from(\n // Note: != so that int matches bigint\n // @ts-ignore\n tokens.data.map(x => x != pad_token_id)\n )\n return new Tensor('int64', data, tokens.dims)\n } else {\n return ones_like(tokens);\n }\n}\n\n/**\n * Add position IDs to the feeds object.\n * @param {Object} session The inference session.\n * @param {Object} feeds The input to the model.\n * @param {boolean} use_cache_branch Whether to use the cache branch of the model.\n * @returns {void}\n * @private\n */\nfunction preparePositionIds(session, feeds, use_cache_branch) {\n if (!session.inputNames.includes('position_ids')) return;\n\n const data = new BigInt64Array(feeds.attention_mask.data.length);\n\n // Compute cumulative sum of the attention mask along the sequence length dimension\n for (let i = 0; i < feeds.attention_mask.dims[0]; ++i) {\n let start = i * feeds.attention_mask.dims[1];\n let sum = BigInt(0);\n for (let j = 0; j < feeds.attention_mask.dims[1]; ++j) {\n const index = start + j;\n if (feeds.attention_mask.data[index] === 0n) {\n data[index] = BigInt(1);\n } else { // === 1n\n data[index] = sum;\n sum += feeds.attention_mask.data[index];\n }\n }\n }\n\n feeds.position_ids = new Tensor('int64', data, feeds.attention_mask.dims);\n\n if (use_cache_branch) {\n feeds.position_ids = feeds.position_ids.slice(null, -1).unsqueeze_(-1);\n }\n}\n\n/**\n * Creates a boolean tensor with a single value.\n * @param {boolean} value The value of the tensor.\n * @returns {Tensor} The boolean tensor.\n * @private\n */\nfunction boolTensor(value) {\n return new Tensor('bool', [value], [1]);\n}\n\n// JS doesn't support mixins, so we define some reused functions here, and allow \"this\" to be passed in\n/**\n * Perform forward pass on the seq2seq model (both encoder and decoder).\n * @param {Object} self The seq2seq model object.\n * @param {Object} model_inputs The input object for the model containing encoder and decoder inputs.\n * @returns {Promise<Seq2SeqLMOutput>} Promise that resolves with the output of the seq2seq model.\n * @private\n */\nasync function seq2seqForward(self, model_inputs) {\n\n let { encoder_outputs, past_key_values } = model_inputs;\n\n if (!encoder_outputs) {\n // Encoder outputs are not given, so we must compute them.\n encoder_outputs = (await encoderForward(self, model_inputs)).last_hidden_state;\n }\n let decoderFeeds = {\n input_ids: model_inputs.decoder_input_ids,\n encoder_hidden_states: encoder_outputs,\n };\n const use_cache_branch = !!past_key_values;\n\n if (self.decoder_merged_session.inputNames.includes('use_cache_branch')) {\n decoderFeeds.use_cache_branch = boolTensor(use_cache_branch);\n }\n\n if (self.decoder_merged_session.inputNames.includes('encoder_attention_mask')) {\n decoderFeeds.encoder_attention_mask = model_inputs.attention_mask\n }\n\n preparePositionIds(self.decoder_merged_session, decoderFeeds, use_cache_branch);\n self.addPastKeyValues(decoderFeeds, past_key_values);\n\n const decoderResults = await sessionRun(self.decoder_merged_session, decoderFeeds);\n let logits = decoderResults.logits;\n past_key_values = self.getPastKeyValues(decoderResults, past_key_values);\n\n // Get cross attention and/or decoder attentions if they are present\n const attns = self.getAttentions(decoderResults);\n\n return new Seq2SeqLMOutput({ logits, past_key_values, encoder_outputs, ...attns });\n}\n\n/**\n * Start the beam search process for the seq2seq model.\n * @param {PreTrainedModel} self The seq2seq model object.\n * @param {Tensor} inputTokenIds Array of input token ids for each input sequence.\n * @param {Object} generation_config The generation config.\n * @param {number} numOutputTokens The maximum number of output tokens for the model.\n * @returns {Object[]} Array of beam search objects.\n * @private\n */\nfunction seq2seqStartBeams(self, inputTokenIds, generation_config, numOutputTokens) {\n let beams = [];\n let beamId = 0;\n\n // @ts-ignore\n const requires_attention_mask = self.requires_attention_mask ?? true;\n\n // decoder_input_ids == output_token_ids\n let decoder_input_ids =\n generation_config.decoder_input_ids\n ?? generation_config.decoder_start_token_id\n ?? generation_config.bos_token_id\n ?? generation_config.eos_token_id;\n\n // Support input as tensor or list\n // TODO support batched decoder_input_ids\n if (decoder_input_ids instanceof Tensor) {\n decoder_input_ids = decoder_input_ids.tolist().flat();\n } else if (!Array.isArray(decoder_input_ids)) {\n decoder_input_ids = [decoder_input_ids];\n }\n\n for (let tokens of inputTokenIds) {\n // TODO: Improve\n // Currently, just add back batch dimension.\n // In future, allow for true parallel execution\n tokens.dims = [1, ...tokens.dims]\n\n // Create beam\n let start = {\n inputs: tokens,\n encoder_outputs: null,\n prev_model_outputs: null,\n\n output_token_ids: decoder_input_ids,\n done: false,\n score: 0,\n id: beamId++ // assign unique id to beams\n }\n\n if (requires_attention_mask) {\n start.attention_mask = prepareAttentionMask(self, tokens);\n }\n\n beams.push(start);\n }\n\n return beams;\n}\n\n/**\n * Run beam search on the seq2seq model for a single beam.\n * @param {PreTrainedModel} self The seq2seq model object.\n * @param {Object} beam The beam search object for which to run the model.\n * @param {Object} options options\n * @param {string} [options.input_name='input_ids'] The name of the input tensor for the encoder.\n * @returns {Promise<Object>} Promise that resolves with the output of the seq2seq model for the given beam.\n * @private\n */\nasync function seq2seqRunBeam(self, beam) {\n const input_name = self.main_input_name;\n\n let decoder_input_ids = beam.output_token_ids;\n if (beam.prev_model_outputs) {\n // After the first step, `prev_model_outputs` won't be null.\n // So, we cut decoder_input_ids if past is used\n decoder_input_ids = decoder_input_ids.slice(-1);\n }\n\n // 1. Prepare\n let model_inputs = {\n [input_name]: beam.inputs,\n decoder_input_ids: toI64Tensor(decoder_input_ids),\n encoder_outputs: beam.encoder_outputs,\n past_key_values: beam.prev_model_outputs?.past_key_values,\n }\n if (beam.attention_mask) {\n model_inputs.attention_mask = beam.attention_mask\n }\n\n // 2. Run\n let output = await self.forward(model_inputs);\n\n // 3. Update\n beam.prev_model_outputs = output;\n beam.encoder_outputs = output.encoder_outputs;\n\n return output;\n}\n\n/**\n * Update a beam with a new token ID.\n * @param {Object} beam The beam to update.\n * @param {number} newTokenId The new token ID to add to the beam's output.\n * @private\n */\nfunction seq2seqUpdatebeam(beam, newTokenId) {\n beam.output_token_ids = [...beam.output_token_ids, newTokenId];\n}\n\n/**\n * Forward pass of an encoder model.\n * @param {Object} self The encoder model.\n * @param {Object} model_inputs The input data to be used for the forward pass.\n * @returns {Promise<Object>} Promise that resolves with an object containing the model's outputs.\n * @private\n */\nasync function encoderForward(self, model_inputs) {\n const encoderFeeds = Object.create(null);\n for (const key of self.session.inputNames) {\n encoderFeeds[key] = model_inputs[key];\n }\n if (self.session.inputNames.includes('token_type_ids') && !encoderFeeds.token_type_ids) {\n // Assign default `token_type_ids` (all zeroes) to the `encoderFeeds` if the model expects it,\n // but they weren't created by the tokenizer.\n encoderFeeds.token_type_ids = new Tensor(\n 'int64',\n new BigInt64Array(encoderFeeds.input_ids.data.length),\n encoderFeeds.input_ids.dims\n )\n }\n return await sessionRun(self.session, encoderFeeds);\n}\n\n\n/**\n * Forward pass of a decoder model.\n * @param {Object} self The decoder model.\n * @param {Object} model_inputs The input data to be used for the forward pass.\n * @returns {Promise<Object>} Promise that resolves with an object containing the logits and past key values.\n * @private\n */\nasync function decoderForward(self, model_inputs) {\n let { input_ids, past_key_values, attention_mask } = model_inputs;\n let decoderFeeds = {\n input_ids: input_ids,\n attention_mask: attention_mask ?? prepareAttentionMask(self, input_ids),\n }\n const use_cache_branch = !!past_key_values;\n\n if (self.session.inputNames.includes('use_cache_branch')) {\n decoderFeeds.use_cache_branch = boolTensor(use_cache_branch);\n }\n\n preparePositionIds(self.session, decoderFeeds, use_cache_branch);\n\n self.addPastKeyValues(decoderFeeds, past_key_values);\n\n let decoderResults = await sessionRun(self.session, decoderFeeds);\n\n let logits = decoderResults.logits;\n\n past_key_values = self.getPastKeyValues(decoderResults, past_key_values);\n return { logits, past_key_values };\n}\n\n/**\n * Starts the generation of text by initializing the beams for the given input token IDs.\n * @param {Object} self The text generation model object.\n * @param {Tensor} inputTokenIds An tensor of input token IDs to generate text from.\n * @param {Object} generation_config The generation config.\n * @param {number} numOutputTokens The maximum number of tokens to generate for each beam.\n * @param {Tensor} [inputs_attention_mask] The attention mask tensor for the input token IDs.\n * @returns {Object[]} An array of beams initialized with the given inputs and parameters.\n * @private\n */\nfunction decoderStartBeams(self, inputTokenIds, generation_config, numOutputTokens, inputs_attention_mask) {\n let beams = [];\n\n let beamId = 0;\n for (let tokens of inputTokenIds) {\n let output_token_ids = tokens.tolist().map(Number);\n\n // TODO: Improve\n // Currently, just add back batch dimension.\n // In future, allow for true parallel execution\n tokens.dims = [1, ...tokens.dims]\n\n let attn_mask;\n if (inputs_attention_mask) {\n attn_mask = inputs_attention_mask[beamId];\n attn_mask.dims = [1, ...attn_mask.dims]\n\n } else {\n attn_mask = prepareAttentionMask(self, tokens)\n }\n\n let start = {\n input: tokens,\n model_input_ids: tokens,\n attention_mask: attn_mask,\n prev_model_outputs: null,\n\n output_token_ids: output_token_ids,\n num_output_tokens: numOutputTokens,\n\n done: false,\n score: 0,\n id: beamId++ // assign unique id to beams\n }\n\n beams.push(start);\n }\n return beams;\n}\n\n/**\n * Runs a single step of the text generation process for a given beam.\n *\n * @param {Object} self The decoder object.\n * @param {Object} beam The beam to run.\n * @param {Tensor} beam.input The input tensor.\n * @param {Tensor} beam.model_input_ids The input ids to the model.\n * @param {Tensor} beam.attention_mask The attention mask.\n * @param {Object} beam.prev_model_outputs The past key values.\n * @param {number[]} beam.output_token_ids The output token ids.\n * @returns {Promise<Object>} The output of the generation step.\n * @private\n */\nasync function decoderRunBeam(self, beam) {\n let attnMaskData = new BigInt64Array(beam.output_token_ids.length).fill(1n)\n\n // 1. Prepare\n let model_inputs = {\n input_ids: beam.model_input_ids,\n attention_mask: new Tensor(\n 'int64',\n attnMaskData,\n [1, attnMaskData.length]\n ),\n past_key_values: beam.prev_model_outputs?.past_key_values,\n }\n\n // 2. Run\n let output = await self.forward(model_inputs);\n\n // 3. Update\n beam.prev_model_outputs = output;\n\n return output;\n}\n\n/**\n * Update a beam with a new token ID.\n * @param {Object} beam The beam to update.\n * @param {number} newTokenId The new token ID to add to the beam's output.\n * @private\n */\nfunction decoderUpdatebeam(beam, newTokenId) {\n beam.output_token_ids = [...beam.output_token_ids, newTokenId];\n beam.model_input_ids = new Tensor('int64', [BigInt(newTokenId)], [1, 1]);\n}\n\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n/**\n * A base class for pre-trained models that provides the model configuration and an ONNX session.\n */\nexport class PreTrainedModel extends Callable {\n main_input_name = 'input_ids';\n\n /**\n * Creates a new instance of the `PreTrainedModel` class.\n * @param {Object} config The model configuration.\n * @param {any} session session for the model.\n */\n constructor(config, session) {\n super();\n\n this.config = config;\n this.session = session;\n\n const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor);\n const modelType = MODEL_TYPE_MAPPING.get(modelName);\n\n this.can_generate = false;\n this._runBeam = null;\n this._getStartBeams = null;\n this._updateBeam = null;\n this._forward = null;\n if (modelType === MODEL_TYPES.DecoderOnly) {\n this.can_generate = true;\n\n this._runBeam = decoderRunBeam;\n this._getStartBeams = decoderStartBeams;\n this._updateBeam = decoderUpdatebeam;\n this._forward = decoderForward;\n\n } else if (modelType === MODEL_TYPES.Seq2Seq || modelType === MODEL_TYPES.Vision2Seq) {\n this.can_generate = true;\n\n this._runBeam = seq2seqRunBeam;\n this._getStartBeams = seq2seqStartBeams;\n this._updateBeam = seq2seqUpdatebeam;\n this._forward = seq2seqForward;\n\n } else if (modelType === MODEL_TYPES.EncoderDecoder) {\n this._forward = encoderForward;\n\n } else { // should be MODEL_TYPES.EncoderOnly\n this._forward = encoderForward;\n }\n }\n\n /**\n * Disposes of all the ONNX sessions that were created during inference.\n * @returns {Promise<unknown[]>} An array of promises, one for each ONNX session that is being disposed.\n * @todo Use https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry\n */\n async dispose() {\n const promises = [];\n for (let key of Object.keys(this)) {\n const item = this[key];\n // @ts-ignore\n if (item instanceof InferenceSession) {\n promises.push(item.handler.dispose())\n }\n }\n return await Promise.all(promises);\n }\n\n /**\n * Instantiate one of the model classes of the library from a pretrained model.\n * \n * The model class to instantiate is selected based on the `model_type` property of the config object\n * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)\n * \n * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:\n * - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a\n * user or organization name, like `dbmdz/bert-base-german-cased`.\n * - A path to a *directory* containing model weights, e.g., `./my_model_directory/`.\n * @param {import('./utils/hub.js').PretrainedOptions} options Additional options for loading the model.\n * \n * @returns {Promise<PreTrainedModel>} A new instance of the `PreTrainedModel` class.\n */\n static async from_pretrained(pretrained_model_name_or_path, {\n quantized = true,\n progress_callback = null,\n config = null,\n cache_dir = null,\n local_files_only = false,\n revision = 'main',\n model_file_name = null,\n } = {}) {\n\n let options = {\n quantized,\n progress_callback,\n config,\n cache_dir,\n local_files_only,\n revision,\n model_file_name,\n }\n\n const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this);\n const modelType = MODEL_TYPE_MAPPING.get(modelName);\n\n let info;\n if (modelType === MODEL_TYPES.DecoderOnly) {\n info = await Promise.all([\n AutoConfig.from_pretrained(pretrained_model_name_or_path, options),\n constructSession(pretrained_model_name_or_path, options.model_file_name ?? 'decoder_model_merged', options),\n getModelJSON(pretrained_model_name_or_path, 'generation_config.json', false, options),\n ]);\n\n } else if (modelType === MODEL_TYPES.Seq2Seq || modelType === MODEL_TYPES.Vision2Seq) {\n info = await Promise.all([\n AutoConfig.from_pretrained(pretrained_model_name_or_path, options),\n constructSession(pretrained_model_name_or_path, 'encoder_model', options),\n constructSession(pretrained_model_name_or_path, 'decoder_model_merged', options),\n getModelJSON(pretrained_model_name_or_path, 'generation_config.json', false, options),\n ]);\n\n } else if (modelType === MODEL_TYPES.MaskGeneration) {\n info = await Promise.all([\n AutoConfig.from_pretrained(pretrained_model_name_or_path, options),\n constructSession(pretrained_model_name_or_path, 'vision_encoder', options),\n constructSession(pretrained_model_name_or_path, 'prompt_encoder_mask_decoder', options),\n ]);\n\n } else if (modelType === MODEL_TYPES.EncoderDecoder) {\n info = await Promise.all([\n AutoConfig.from_pretrained(pretrained_model_name_or_path, options),\n constructSession(pretrained_model_name_or_path, 'encoder_model', options),\n constructSession(pretrained_model_name_or_path, 'decoder_model_merged', options),\n ]);\n\n } else { // should be MODEL_TYPES.EncoderOnly\n if (modelType !== MODEL_TYPES.EncoderOnly) {\n console.warn(`Model type for '${modelName ?? config?.model_type}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`)\n }\n info = await Promise.all([\n AutoConfig.from_pretrained(pretrained_model_name_or_path, options),\n constructSession(pretrained_model_name_or_path, options.model_file_name ?? 'model', options)\n ]);\n }\n\n // @ts-ignore\n return new this(...info);\n }\n\n /**\n * Runs the model with the provided inputs\n * @param {Object} model_inputs Object containing input tensors\n * @returns {Promise<Object>} Object containing output tensors\n */\n async _call(model_inputs) {\n return await this.forward(model_inputs);\n }\n\n /**\n * Forward method for a pretrained model. If not overridden by a subclass, the correct forward method\n * will be chosen based on the model type.\n * @param {Object} model_inputs The input data to the model in the format specified in the ONNX model.\n * @returns {Promise<Object>} The output data from the model in the format specified in the ONNX model.\n * @throws {Error} This method must be implemented in subclasses.\n */\n async forward(model_inputs) {\n return await this._forward(this, model_inputs);\n }\n\n /**\n * @param {import('./utils/generation.js').GenerationConfigType} generation_config \n * @param {number} input_ids_seq_length The starting sequence length for the input ids.\n * @returns {LogitsProcessorList}\n * @private\n */\n _get_logits_processor(\n generation_config,\n input_ids_seq_length,\n // encoder_input_ids, TODO\n // prefix_allowed_tokens_fn, TODO\n logits_processor = null\n ) {\n const processors = new LogitsProcessorList();\n\n // if (generation_config.diversity_penalty !== null && generation_config.diversity_penalty > 0.0) {\n // processors.push(new HammingDiversityLogitsProcessor(\n // generation_config.diversity_penalty,\n // generation_config.num_beams,\n // generation_config.num_beam_groups\n // ));\n // }\n\n // if (generation_config.encoder_repetition_penalty !== null && generation_config.encoder_repetition_penalty !== 1.0) {\n // processors.push(new EncoderRepetitionPenaltyLogitsProcessor(\n // generation_config.encoder_repetition_penalty,\n // encoder_input_ids\n // ));\n // }\n\n if (generation_config.repetition_penalty !== null && generation_config.repetition_penalty !== 1.0) {\n processors.push(new RepetitionPenaltyLogitsProcessor(generation_config.repetition_penalty));\n }\n\n if (generation_config.no_repeat_ngram_size !== null && generation_config.no_repeat_ngram_size > 0) {\n processors.push(new NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size));\n }\n\n // if (generation_config.encoder_no_repeat_ngram_size !== null && generation_config.encoder_no_repeat_ngram_size > 0) {\n // if (this.config.is_encoder_decoder) {\n // processors.push(new EncoderNoRepeatNGramLogitsProcessor(\n // generation_config.encoder_no_repeat_ngram_size,\n // encoder_input_ids\n // ));\n // } else {\n // throw new Error(\"It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture\");\n // }\n // }\n\n if (generation_config.bad_words_ids !== null) {\n processors.push(new NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id));\n }\n\n if (generation_config.min_length !== null && generation_config.eos_token_id !== null && generation_config.min_length > 0) {\n processors.push(new MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id));\n }\n\n if (generation_config.min_new_tokens !== null && generation_config.eos_token_id !== null && generation_config.min_new_tokens > 0) {\n processors.push(new MinNewTokensLengthLogitsProcessor(\n input_ids_seq_length,\n generation_config.min_new_tokens,\n generation_config.eos_token_id\n ));\n }\n\n // if (prefix_allowed_tokens_fn !== null) {\n // processors.push(new PrefixConstrainedLogitsProcessor(\n // prefix_allowed_tokens_fn,\n // generation_config.num_beams / generation_config.num_beam_groups\n // ));\n // }\n\n\n if (generation_config.forced_bos_token_id !== null) {\n processors.push(new ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id));\n }\n\n if (generation_config.forced_eos_token_id !== null) {\n processors.push(new ForcedEOSTokenLogitsProcessor(\n generation_config.max_length,\n generation_config.forced_eos_token_id\n ));\n }\n\n // if (generation_config.remove_invalid_values === true) {\n // processors.push(new InfNanRemoveLogitsProcessor());\n // }\n\n // if (generation_config.exponential_decay_length_penalty !== null) {\n // processors.push(new ExponentialDecayLengthPenalty(\n // generation_config.exponential_decay_length_penalty,\n // generation_config.eos_token_id,\n // input_ids_seq_length\n // ));\n // }\n\n // if (generation_config.suppress_tokens !== null) {\n // processors.push(new SuppressTokensLogitsProcessor(generation_config.suppress_tokens));\n // }\n\n if (generation_config.begin_suppress_tokens !== null) {\n let begin_index = (input_ids_seq_length > 1 || generation_config.forced_bos_token_id === null)\n ? input_ids_seq_length\n : input_ids_seq_length + 1;\n\n if (generation_config.forced_decoder_ids !== null) {\n // generation starts after the last token that is forced\n begin_index += generation_config.forced_decoder_ids[generation_config.forced_decoder_ids.length - 1][0];\n }\n processors.push(new SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index));\n }\n\n if (generation_config.forced_decoder_ids !== null) {\n processors.push(new ForceTokensLogitsProcessor(generation_config.forced_decoder_ids));\n }\n\n if (logits_processor !== null) {\n processors.extend(logits_processor)\n }\n\n // `LogitNormalization` should always be the last logit processor, when present\n // if (generation_config.renormalize_logits === true) {\n // processors.push(new LogitNormalization());\n // }\n\n return processors;\n }\n\n /**\n * This function merges multiple generation configs together to form a final generation config to be used by the model for text generation.\n * It first creates an empty `GenerationConfig` object, then it applies the model's own `generation_config` property to it. Finally, if a `generation_config` object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object.\n * @param {import('./utils/generation.js').GenerationConfigType} generation_config A `GenerationConfig` object containing generation parameters.\n * @returns {import('./utils/generation.js').GenerationConfigType} The final generation config object to be used by the model for text generation.\n */\n _get_generation_config(generation_config) {\n // Create empty generation config (contains defaults)\n // We pass `this.config` so that if `eos_token_id` or `bos_token_id` exist in the model's config, we will use them\n let gen_config = new GenerationConfig(this.config);\n\n // Apply model's generation config, if it exists\n if ('generation_config' in this) {\n Object.assign(gen_config, this.generation_config);\n }\n\n // Finally, use any generation config specified by the user\n // when calling `generate`\n if (generation_config !== null) {\n Object.assign(gen_config, generation_config);\n }\n return gen_config;\n }\n\n /**\n * @typedef {import('./utils/maths.js').TypedArray} TypedArray\n */\n\n /**\n * @typedef {{ sequences: Tensor, decoder_attentions: Tensor, cross_attentions: Tensor }} EncoderDecoderOutput\n * @typedef {Object} DecoderOutput\n * \n * Generates text based on the given inputs and generation configuration using the model.\n * @param {Tensor|Array|TypedArray} inputs An array of input token IDs.\n * @param {Object|GenerationConfig|null} generation_config The generation configuration to use. If null, default configuration will be used.\n * @param {Object|null} logits_processor An optional logits processor to use. If null, a new LogitsProcessorList instance will be created.\n * @param {Object} options options\n * @param {Object} [options.inputs_attention_mask=null] An optional attention mask for the inputs.\n * @returns {Promise<number[][]|EncoderDecoderOutput|DecoderOutput>} An array of generated output sequences, where each sequence is an array of token IDs.\n * @throws {Error} Throws an error if the inputs array is empty.\n */\n async generate(\n inputs,\n generation_config = null,\n logits_processor = null,\n {\n inputs_attention_mask = null\n } = {},\n ) {\n if (!this.can_generate) {\n const modelName = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor);\n let errorMessage = `The current model class (${modelName}) is not compatible with \\`.generate()\\`, as it doesn't have a language model head.`\n\n const modelType = this.config.model_type;\n const possibleInfo =\n MODEL_WITH_LM_HEAD_MAPPING_NAMES.get(modelType)\n ?? MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES.get(modelType)\n ?? MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.get(modelType)\n // ?? MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES.get(modelType) // TODO\n ?? MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES.get(modelType);\n\n if (possibleInfo) {\n // TODO: support multiple possible classes\n errorMessage += ` Please use the following class instead: '${possibleInfo[0]}'`;\n }\n throw Error(errorMessage);\n }\n\n if (!(inputs instanceof Tensor) && !isTypedArray(inputs) && !Array.isArray(inputs)) {\n throw Error(`\\`inputs\\` must be a Tensor, TypedArray, or Array, but is \"${inputs.constructor.name}\".`);\n }\n\n let input_ids_seq_length;\n\n // Prepare `input_ids` which will be used for auto-regressive generation\n // TODO: Update to align with HF transformers' implementation\n if (this.config.is_encoder_decoder) {\n // Generating from the encoder outputs\n input_ids_seq_length = 0;\n\n } else {\n input_ids_seq_length = inputs instanceof Tensor ? inputs.dims.at(-1) : inputs.length;\n\n // decoder-only\n if (input_ids_seq_length === 0) {\n throw Error(\"Must supply a non-empty array of input token ids.\")\n }\n }\n\n // Update generation config with defaults\n generation_config = this._get_generation_config(generation_config);\n\n logits_processor = logits_processor ?? new LogitsProcessorList()\n\n // Update logits processor\n logits_processor = this._get_logits_processor(\n generation_config,\n input_ids_seq_length,\n logits_processor\n )\n\n /** @type {number[]} */\n let eos_token_ids = generation_config.eos_token_id;\n if (eos_token_ids !== null && !Array.isArray(eos_token_ids)) {\n eos_token_ids = [eos_token_ids];\n }\n\n // TODO implement early_stopping\n // https://huggingface.co/blog/how-to-generate\n\n let numOutputTokens = 1;\n const maxOutputTokens = numOutputTokens + (generation_config.max_new_tokens ?? Infinity);\n\n // Only use max length if max_new_tokens is not provided\n const useMaxLength = Number.isInteger(generation_config.max_length) && (generation_config.max_new_tokens ?? null) === null;\n let sampler = Sampler.getSampler(generation_config);\n\n // @ts-ignore\n let beams = this.getStartBeams(inputs, generation_config, numOutputTokens, inputs_attention_mask);\n\n while (beams.some(x => !x.done) && numOutputTokens < maxOutputTokens) {\n let newest_beams = [];\n for (let beam of beams) {\n if (beam.done) {\n // Add this beam back into the pool\n newest_beams.push(beam);\n continue\n }\n if (useMaxLength && beam.output_token_ids.length >= generation_config.max_length) {\n // Set this beam to done and add it back into the pool\n beam.done = true;\n newest_beams.push(beam);\n continue\n }\n\n // @ts-ignore\n let output = await this.runBeam(beam);\n\n // add attentions/scores to beam only if user requested\n if (generation_config.output_attentions) {\n this.addAttentionsToBeam(beam, output);\n }\n if (generation_config.output_scores) {\n // TODO add\n }\n\n // Logits are of the form [batch_size, out_seq_length, vocab_size]\n // In most cases, this will be [batch_size, 1, vocab_size]\n // So, we select the last token's logits:\n // (equivalent to `logits = outputs.logits[:, -1, :]`)\n let logits = output.logits.slice(null, -1, null);\n\n // Apply logits processor\n logits_processor(beam.output_token_ids, logits);\n\n let sampledTokens = sampler(logits);\n for (let [newTokenId, logProb] of sampledTokens) {\n // use previous beam as a starting point\n let newBeam = { ...beam };\n\n // update new beam\n // @ts-ignore\n this.updateBeam(newBeam, newTokenId);\n\n newBeam.score += logProb;\n\n if (eos_token_ids && eos_token_ids.includes(newTokenId)) {\n newBeam.done = true;\n }\n\n newest_beams.push(newBeam);\n }\n }\n ++numOutputTokens;\n\n // Next, we get the best beams, per ID\n newest_beams = this.groupBeams(newest_beams).map(\n group => group\n .sort((a, b) => b.score - a.score) // sort by score\n .slice(0, generation_config.num_beams) // remove outside beam width\n );\n\n // Flatten beams\n beams = newest_beams.flat();\n\n // Run callback\n if (generation_config.callback_function) {\n generation_config.callback_function(beams);\n }\n }\n\n // TODO: Ensure that we can return non-batched outputs\n\n const groupedBeams = this.groupBeams(beams);\n\n const getFlattened = (key) => groupedBeams.map(\n batch => {\n if (generation_config.num_return_sequences > 1) {\n return batch.slice(0, generation_config.num_return_sequences).map(x => x[key]);\n } else {\n return [batch[0][key]];\n }\n }\n ).flat(); // Flatten across batches (depth=1)\n\n const sequences = getFlattened('output_token_ids'); // [1, seqLength]\n\n if (generation_config.return_dict_in_generate) {\n // NOTE: `decoder_attentions` and `cross_attentions` should be:\n // list (one element for each generated token)\n // of list (one element for each layer of the decoder)\n // of torch.FloatTensor of shape (batch_size, num_heads, generated_length, sequence_length)\n // However, since we are only generating one batch at a time, they are of the form:\n // list (batches)\n // of list (one element for each generated token)\n // of list (one element for each layer of the decoder)\n // of torch.FloatTensor of shape (1, num_heads, generated_length, sequence_length)\n // \n // TODO: In future (when true parallelism, we should be able to return the correct shape)\n\n const decoder_attentions = getFlattened('decoder_attentions');\n const cross_attentions = getFlattened('cross_attentions');\n\n return {\n sequences,\n\n decoder_attentions,\n cross_attentions,\n }\n } else {\n return sequences;\n }\n }\n\n /**\n * Helper function to add attentions to beam\n * @param {Object} beam \n * @param {Object} output\n * @private \n */\n addAttentionsToBeam(beam, output) {\n if (this.config.is_encoder_decoder) {\n if (!output.cross_attentions || output.cross_attentions.length === 0) {\n throw Error(\n \"`output_attentions` is true, but the model did not produce cross-attentions. \" +\n \"This is most likely because the model was not exported with `output_attentions=True`.\"\n )\n }\n if (!beam.cross_attentions) {\n beam.cross_attentions = [];\n }\n beam.cross_attentions.push(output.cross_attentions);\n }\n\n if (!output.decoder_attentions || output.decoder_attentions.length === 0) {\n throw Error(\n \"`output_attentions` is true, but the model did not produce decoder-attentions. \" +\n \"This is most likely because the model was not exported with `output_attentions=True`.\"\n )\n }\n if (!beam.decoder_attentions) {\n beam.decoder_attentions = [];\n }\n beam.decoder_attentions.push(output.decoder_attentions);\n }\n\n /**\n * Groups an array of beam objects by their ids.\n *\n * @param {Array} beams The array of beam objects to group.\n * @returns {Array} An array of arrays, where each inner array contains beam objects with the same id.\n */\n groupBeams(beams) {\n // Group beams by their ids\n const groups = Object.create(null);\n for (const obj of beams) {\n if (groups[obj.id] === undefined) {\n groups[obj.id] = [obj];\n } else {\n groups[obj.id].push(obj);\n }\n }\n\n return Object.values(groups);\n }\n\n /**\n * Returns an object containing past key values from the given decoder results object.\n *\n * @param {Object} decoderResults The decoder results object.\n * @param {Object} pastKeyValues The previous past key values.\n * @returns {Object} An object containing past key values.\n */\n getPastKeyValues(decoderResults, pastKeyValues) {\n\n const pkvs = Object.create(null);\n\n for (const name in decoderResults) {\n if (name.startsWith('present')) {\n let newName = name.replace('present', 'past_key_values');\n\n if (pastKeyValues && name.includes('encoder')) {\n // Optimization introduced by optimum to reuse past key values. So, we just replace the constant\n // outputs with the previous past key values.\n // https://github.com/huggingface/optimum/blob/0bf2c05fb7e1182b52d21b703cfc95fd9e4ea3dc/optimum/onnxruntime/base.py#L677-L704\n pkvs[newName] = pastKeyValues[newName];\n } else {\n pkvs[newName] = decoderResults[name];\n }\n }\n }\n return pkvs;\n }\n\n /**\n * Returns an object containing attentions from the given decoder results object.\n *\n * @param {Object} decoderResults The decoder results object.\n * @returns {Object} An object containing attentions.\n */\n getAttentions(decoderResults) {\n const attns = Object.create(null);\n\n for (const attnName of ['cross_attentions', 'decoder_attentions']) {\n const result = [];\n for (const name in decoderResults) {\n if (name.startsWith(attnName)) {\n const index = name.split('.').pop()\n result[index] = decoderResults[name];\n }\n }\n attns[attnName] = result;\n }\n return attns;\n }\n\n /**\n * Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values.\n *\n * @param {Object} decoderFeeds The decoder feeds object to add past key values to.\n * @param {Object} pastKeyValues An object containing past key values.\n */\n addPastKeyValues(decoderFeeds, pastKeyValues) {\n if (pastKeyValues) {\n Object.assign(decoderFeeds, pastKeyValues)\n } else {\n // TODO support batches (i.e., batch_size > 1)\n const batch_size = 1;\n\n // @ts-ignore\n if (this.config.is_encoder_decoder && (this.add_encoder_pkv ?? true)) {\n // @ts-ignore\n let encoder_dims = [batch_size, this.num_encoder_heads, 0, this.encoder_dim_kv];\n // @ts-ignore\n let decoder_dims = [batch_size, this.num_decoder_heads, 0, this.decoder_dim_kv];\n // @ts-ignore\n for (let i = 0; i < this.num_decoder_layers; ++i) {\n decoderFeeds[`past_key_values.${i}.encoder.key`] = new Tensor('float32', [], encoder_dims)\n decoderFeeds[`past_key_values.${i}.encoder.value`] = new Tensor('float32', [], encoder_dims)\n decoderFeeds[`past_key_values.${i}.decoder.key`] = new Tensor('float32', [], decoder_dims)\n decoderFeeds[`past_key_values.${i}.decoder.value`] = new Tensor('float32', [], decoder_dims)\n }\n } else if (this.config.model_type === 'falcon') {\n // NOTE: Custom implementation for Falcon\n // @ts-ignore\n let dims = [batch_size * this.num_heads, 0, this.dim_kv]\n // @ts-ignore\n for (let i = 0; i < this.num_layers; ++i) {\n decoderFeeds[`past_key_values.${i}.key`] = new Tensor('float32', [], dims)\n decoderFeeds[`past_key_values.${i}.value`] = new Tensor('float32', [], dims)\n }\n } else if (this.config.multi_query) { // e.g., for `gpt_bigcode`\n // @ts-ignore\n let dims = [batch_size * this.num_heads, 0, 2 * this.dim_kv]\n // @ts-ignore\n for (let i = 0; i < this.num_layers; ++i) {\n decoderFeeds[`past_key_values.${i}.key_value`] = new Tensor('float32', [], dims)\n }\n } else if (this.config.model_type === 'bloom') {\n // NOTE: Custom implementation for Bloom\n\n // @ts-ignore\n let keyDims = [batch_size * this.num_heads, this.dim_kv, 0] // [batch_size x num_heads,64,past_sequence_length]\n // @ts-ignore\n let valueDims = [batch_size * this.num_heads, 0, this.dim_kv] // [batch_size x num_heads,past_sequence_length,64]\n // @ts-ignore\n for (let i = 0; i < this.num_layers; ++i) {\n decoderFeeds[`past_key_values.${i}.key`] = new Tensor('float32', [], keyDims)\n decoderFeeds[`past_key_values.${i}.value`] = new Tensor('float32', [], valueDims)\n }\n } else { // Decoder-only\n // @ts-ignore\n let dims = [batch_size, this.num_heads, 0, this.dim_kv]\n // @ts-ignore\n for (let i = 0; i < this.num_layers; ++i) {\n decoderFeeds[`past_key_values.${i}.key`] = new Tensor('float32', [], dims)\n decoderFeeds[`past_key_values.${i}.value`] = new Tensor('float32', [], dims)\n }\n }\n }\n }\n\n /**\n * Initializes and returns the beam for text generation task\n * @param {Tensor} inputTokenIds The input token ids.\n * @param {Object} generation_config The generation config.\n * @param {number} numOutputTokens The number of tokens to be generated.\n * @param {Tensor} inputs_attention_mask Optional input attention mask.\n * @returns {any} A Beam object representing the initialized beam.\n * @private\n */\n getStartBeams(inputTokenIds, generation_config, numOutputTokens, inputs_attention_mask) {\n return this._getStartBeams(this, inputTokenIds, generation_config, numOutputTokens, inputs_attention_mask)\n }\n\n /**\n * Runs a single step of the beam search generation algorithm.\n * @param {any} beam The current beam being generated.\n * @returns {Promise<any>} The updated beam after a single generation step.\n * @private\n */\n async runBeam(beam) {\n return await this._runBeam(this, beam);\n }\n\n /**\n * Update a beam with a new token ID.\n * @param {Object} beam The beam to update.\n * @param {number} newTokenId The new token ID to add to the beam's output.\n * @private\n */\n updateBeam(beam, newTokenId) {\n return this._updateBeam(beam, newTokenId);\n }\n}\n\n//////////////////////////////////////////////////\n// Base model output class\nexport class ModelOutput { }\n\n/**\n * Base class for model's outputs, with potential hidden states and attentions.\n */\nexport class BaseModelOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.last_hidden_state Sequence of hidden-states at the output of the last layer of the model.\n * @param {Tensor} [output.hidden_states] Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n * @param {Tensor} [output.attentions] Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n */\n constructor({ last_hidden_state, hidden_states = null, attentions = null }) {\n super();\n this.last_hidden_state = last_hidden_state;\n this.hidden_states = hidden_states;\n this.attentions = attentions;\n }\n}\n//////////////////////////////////////////////////\n// Bert models\nexport class BertPreTrainedModel extends PreTrainedModel { }\nexport class BertModel extends BertPreTrainedModel { }\n\n/**\n * BertForMaskedLM is a class representing a BERT model for masked language modeling.\n */\nexport class BertForMaskedLM extends BertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * BertForSequenceClassification is a class representing a BERT model for sequence classification.\n */\nexport class BertForSequenceClassification extends BertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * BertForTokenClassification is a class representing a BERT model for token classification.\n */\nexport class BertForTokenClassification extends BertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * BertForQuestionAnswering is a class representing a BERT model for question answering.\n */\nexport class BertForQuestionAnswering extends BertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// NomicBert models\nexport class NomicBertPreTrainedModel extends PreTrainedModel { }\nexport class NomicBertModel extends NomicBertPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// RoFormer models\nexport class RoFormerPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class RoFormerModel extends RoFormerPreTrainedModel { }\n\n/**\n * RoFormer Model with a `language modeling` head on top.\n */\nexport class RoFormerForMaskedLM extends RoFormerPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class RoFormerForSequenceClassification extends RoFormerPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output)\n * e.g. for Named-Entity-Recognition (NER) tasks.\n */\nexport class RoFormerForTokenClassification extends RoFormerPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD\n * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n */\nexport class RoFormerForQuestionAnswering extends RoFormerPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n// TODO: Add RoFormerForCausalLM and RoFormerForMultipleChoice\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// ConvBert models\nexport class ConvBertPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class ConvBertModel extends ConvBertPreTrainedModel { }\n\n/**\n * ConvBERT Model with a language modeling head on top.\n */\nexport class ConvBertForMaskedLM extends ConvBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class ConvBertForSequenceClassification extends ConvBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output)\n * e.g. for Named-Entity-Recognition (NER) tasks.\n */\nexport class ConvBertForTokenClassification extends ConvBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD\n * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`)\n */\nexport class ConvBertForQuestionAnswering extends ConvBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Electra models\nexport class ElectraPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare Electra Model transformer outputting raw hidden-states without any specific head on top.\n * Identical to the BERT model except that it uses an additional linear layer between the embedding\n * layer and the encoder if the hidden size and embedding size are different.\n */\nexport class ElectraModel extends ElectraPreTrainedModel { }\n// TODO add ElectraForPreTraining\n/**\n * Electra model with a language modeling head on top.\n */\nexport class ElectraForMaskedLM extends ElectraPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class ElectraForSequenceClassification extends ElectraPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * Electra model with a token classification head on top.\n */\nexport class ElectraForTokenClassification extends ElectraPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * LECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD\n * (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n */\nexport class ElectraForQuestionAnswering extends ElectraPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// CamemBERT models\nexport class CamembertPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class CamembertModel extends CamembertPreTrainedModel { }\n\n/**\n * CamemBERT Model with a `language modeling` head on top.\n */\nexport class CamembertForMaskedLM extends CamembertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.\n */\nexport class CamembertForSequenceClassification extends CamembertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.\n */\nexport class CamembertForTokenClassification extends CamembertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * CamemBERT Model with a span classification head on top for extractive question-answering tasks\n */\nexport class CamembertForQuestionAnswering extends CamembertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// DeBERTa models\nexport class DebertaPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class DebertaModel extends DebertaPreTrainedModel { }\n\n/**\n * DeBERTa Model with a `language modeling` head on top.\n */\nexport class DebertaForMaskedLM extends DebertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class DebertaForSequenceClassification extends DebertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.\n */\nexport class DebertaForTokenClassification extends DebertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n * layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n */\nexport class DebertaForQuestionAnswering extends DebertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// DeBERTa-v2 models\nexport class DebertaV2PreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class DebertaV2Model extends DebertaV2PreTrainedModel { }\n\n/**\n * DeBERTa-V2 Model with a `language modeling` head on top.\n */\nexport class DebertaV2ForMaskedLM extends DebertaV2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class DebertaV2ForSequenceClassification extends DebertaV2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.\n */\nexport class DebertaV2ForTokenClassification extends DebertaV2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n * layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n */\nexport class DebertaV2ForQuestionAnswering extends DebertaV2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// DistilBert models\nexport class DistilBertPreTrainedModel extends PreTrainedModel { }\nexport class DistilBertModel extends DistilBertPreTrainedModel { }\n\n/**\n * DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification.\n */\nexport class DistilBertForSequenceClassification extends DistilBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DistilBertForTokenClassification is a class representing a DistilBERT model for token classification.\n */\nexport class DistilBertForTokenClassification extends DistilBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n\n/**\n * DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering.\n */\nexport class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * DistilBertForMaskedLM is a class representing a DistilBERT model for masking task.\n */\nexport class DistilBertForMaskedLM extends DistilBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// ESM models\nexport class EsmPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare ESM Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class EsmModel extends EsmPreTrainedModel { }\n\n/**\n * ESM Model with a `language modeling` head on top.\n */\nexport class EsmForMaskedLM extends EsmPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class EsmForSequenceClassification extends EsmPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * ESM Model with a token classification head on top (a linear layer on top of the hidden-states output)\n * e.g. for Named-Entity-Recognition (NER) tasks.\n */\nexport class EsmForTokenClassification extends EsmPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// MobileBert models\nexport class MobileBertPreTrainedModel extends PreTrainedModel { }\nexport class MobileBertModel extends MobileBertPreTrainedModel { }\n\n/**\n * MobileBertForMaskedLM is a class representing a MobileBERT model for masking task.\n */\nexport class MobileBertForMaskedLM extends MobileBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class MobileBertForSequenceClassification extends MobileBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} returned object\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * MobileBert Model with a span classification head on top for extractive question-answering tasks\n */\nexport class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} returned object\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// MPNet models\nexport class MPNetPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class MPNetModel extends MPNetPreTrainedModel { }\n\n/**\n * MPNetForMaskedLM is a class representing a MPNet model for masked language modeling.\n */\nexport class MPNetForMaskedLM extends MPNetPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * MPNetForSequenceClassification is a class representing a MPNet model for sequence classification.\n */\nexport class MPNetForSequenceClassification extends MPNetPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * MPNetForTokenClassification is a class representing a MPNet model for token classification.\n */\nexport class MPNetForTokenClassification extends MPNetPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * MPNetForQuestionAnswering is a class representing a MPNet model for question answering.\n */\nexport class MPNetForQuestionAnswering extends MPNetPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// SqueezeBert models\nexport class SqueezeBertPreTrainedModel extends PreTrainedModel { }\nexport class SqueezeBertModel extends SqueezeBertPreTrainedModel { }\nexport class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\nexport class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} returned object\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\nexport class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} returned object\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Albert models\nexport class AlbertPreTrainedModel extends PreTrainedModel { }\nexport class AlbertModel extends AlbertPreTrainedModel { }\nexport class AlbertForSequenceClassification extends AlbertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} returned object\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\nexport class AlbertForQuestionAnswering extends AlbertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} returned object\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\nexport class AlbertForMaskedLM extends AlbertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// T5 models\nexport class T5PreTrainedModel extends PreTrainedModel { };\n\nexport class T5Model extends T5PreTrainedModel { }\n\n/**\n * T5Model is a class representing a T5 model for conditional generation.\n */\nexport class T5ForConditionalGeneration extends T5PreTrainedModel {\n\n /**\n * Creates a new instance of the `T5ForConditionalGeneration` class.\n * @param {Object} config The model configuration.\n * @param {any} session session for the model.\n * @param {any} decoder_merged_session session for the decoder.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.num_decoder_layers;\n this.num_decoder_heads = this.config.num_heads;\n this.decoder_dim_kv = this.config.d_kv;\n\n this.num_encoder_layers = this.config.num_layers;\n this.num_encoder_heads = this.config.num_heads;\n this.encoder_dim_kv = this.config.d_kv;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// LONGT5 models\n/**\n * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.\n */\nexport class LongT5PreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class LongT5Model extends LongT5PreTrainedModel { }\n\n/**\n * LONGT5 Model with a `language modeling` head on top.\n */\nexport class LongT5ForConditionalGeneration extends LongT5PreTrainedModel {\n /**\n * Creates a new instance of the `LongT5ForConditionalGeneration` class.\n * @param {Object} config The model configuration.\n * @param {any} session session for the model.\n * @param {any} decoder_merged_session session for the decoder.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.num_decoder_layers;\n this.num_decoder_heads = this.config.num_heads;\n this.decoder_dim_kv = this.config.d_kv;\n\n this.num_encoder_layers = this.config.num_layers;\n this.num_encoder_heads = this.config.num_heads;\n this.encoder_dim_kv = this.config.d_kv;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// MT5 models\nexport class MT5PreTrainedModel extends PreTrainedModel { };\n\nexport class MT5Model extends MT5PreTrainedModel { }\n\n/**\n * A class representing a conditional sequence-to-sequence model based on the MT5 architecture.\n */\nexport class MT5ForConditionalGeneration extends MT5PreTrainedModel {\n\n /**\n * Creates a new instance of the `MT5ForConditionalGeneration` class.\n * @param {any} config The model configuration.\n * @param {any} session The ONNX session containing the encoder weights.\n * @param {any} decoder_merged_session The ONNX session containing the merged decoder weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.num_decoder_layers;\n this.num_decoder_heads = this.config.num_heads;\n this.decoder_dim_kv = this.config.d_kv;\n\n this.num_encoder_layers = this.config.num_layers;\n this.num_encoder_heads = this.config.num_heads;\n this.encoder_dim_kv = this.config.d_kv;\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Bart models\nexport class BartPretrainedModel extends PreTrainedModel { };\n\n/**\n * The bare BART Model outputting raw hidden-states without any specific head on top.\n */\nexport class BartModel extends BartPretrainedModel { }\n\n/**\n * The BART Model with a language modeling head. Can be used for summarization.\n */\nexport class BartForConditionalGeneration extends BartPretrainedModel {\n\n /**\n * Creates a new instance of the `BartForConditionalGeneration` class.\n * @param {Object} config The configuration object for the Bart model.\n * @param {Object} session The ONNX session used to execute the model.\n * @param {Object} decoder_merged_session The ONNX session used to execute the decoder.\n * @param {Object} generation_config The generation configuration object.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n\n}\n\n/**\n * Bart model with a sequence classification/head on top (a linear layer on top of the pooled output)\n */\nexport class BartForSequenceClassification extends BartPretrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// MBart models\nexport class MBartPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare MBART Model outputting raw hidden-states without any specific head on top.\n */\nexport class MBartModel extends MBartPreTrainedModel { }\n\n/**\n * The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.\n */\nexport class MBartForConditionalGeneration extends MBartPreTrainedModel {\n\n /**\n * Creates a new instance of the `MBartForConditionalGeneration` class.\n * @param {Object} config The configuration object for the Bart model.\n * @param {Object} session The ONNX session used to execute the model.\n * @param {Object} decoder_merged_session The ONNX session used to execute the decoder.\n * @param {Object} generation_config The generation configuration object.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n\n}\n\n/**\n * MBart model with a sequence classification/head on top (a linear layer on top of the pooled output).\n */\nexport class MBartForSequenceClassification extends MBartPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n\nexport class MBartForCausalLM extends MBartPreTrainedModel {\n /**\n * Creates a new instance of the `MBartForCausalLM` class.\n * @param {Object} config Configuration object for the model.\n * @param {Object} decoder_merged_session ONNX Session object for the decoder.\n * @param {Object} generation_config Configuration object for the generation process.\n */\n constructor(config, decoder_merged_session, generation_config) {\n super(config, decoder_merged_session);\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Blenderbot models\nexport class BlenderbotPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare Blenderbot Model outputting raw hidden-states without any specific head on top.\n */\nexport class BlenderbotModel extends BlenderbotPreTrainedModel { }\n\n/**\n * The Blenderbot Model with a language modeling head. Can be used for summarization.\n */\nexport class BlenderbotForConditionalGeneration extends BlenderbotPreTrainedModel {\n\n /**\n * Creates a new instance of the `BlenderbotForConditionalGeneration` class.\n * @param {any} config The model configuration.\n * @param {any} session The ONNX session containing the encoder weights.\n * @param {any} decoder_merged_session The ONNX session containing the merged decoder weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Blenderbot models\nexport class BlenderbotSmallPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.\n */\nexport class BlenderbotSmallModel extends BlenderbotSmallPreTrainedModel { }\n\n/**\n * The BlenderbotSmall Model with a language modeling head. Can be used for summarization.\n */\nexport class BlenderbotSmallForConditionalGeneration extends BlenderbotSmallPreTrainedModel {\n\n /**\n * Creates a new instance of the `BlenderbotForConditionalGeneration` class.\n * @param {any} config The model configuration.\n * @param {any} session The ONNX session containing the encoder weights.\n * @param {any} decoder_merged_session The ONNX session containing the merged decoder weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Roberta models\nexport class RobertaPreTrainedModel extends PreTrainedModel { }\nexport class RobertaModel extends RobertaPreTrainedModel { }\n\n/**\n * RobertaForMaskedLM class for performing masked language modeling on Roberta models.\n */\nexport class RobertaForMaskedLM extends RobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * RobertaForSequenceClassification class for performing sequence classification on Roberta models.\n */\nexport class RobertaForSequenceClassification extends RobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} returned object\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * RobertaForTokenClassification class for performing token classification on Roberta models.\n */\nexport class RobertaForTokenClassification extends RobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * RobertaForQuestionAnswering class for performing question answering on Roberta models.\n */\nexport class RobertaForQuestionAnswering extends RobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} returned object\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// XLM models\n/**\n * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.\n */\nexport class XLMPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare XLM Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class XLMModel extends XLMPreTrainedModel { }\n\n/**\n * The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n */\nexport class XLMWithLMHeadModel extends XLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)\n */\nexport class XLMForSequenceClassification extends XLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} returned object\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * XLM Model with a token classification head on top (a linear layer on top of the hidden-states output)\n */\nexport class XLMForTokenClassification extends XLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * XLM Model with a span classification head on top for extractive question-answering tasks\n */\nexport class XLMForQuestionAnswering extends XLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} returned object\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// XLMRoberta models\nexport class XLMRobertaPreTrainedModel extends PreTrainedModel { }\nexport class XLMRobertaModel extends XLMRobertaPreTrainedModel { }\n\n/**\n * XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models.\n */\nexport class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<MaskedLMOutput>} returned object\n */\n async _call(model_inputs) {\n return new MaskedLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models.\n */\nexport class XLMRobertaForSequenceClassification extends XLMRobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} returned object\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models.\n */\nexport class XLMRobertaForTokenClassification extends XLMRobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models.\n */\nexport class XLMRobertaForQuestionAnswering extends XLMRobertaPreTrainedModel {\n /**\n * Calls the model on new inputs.\n *\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<QuestionAnsweringModelOutput>} returned object\n */\n async _call(model_inputs) {\n return new QuestionAnsweringModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Audio Spectrogram Transformer (AST) models\nexport class ASTPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare AST Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class ASTModel extends ASTPreTrainedModel { }\n\n/**\n * Audio Spectrogram Transformer model with an audio classification head on top\n * (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2.\n */\nexport class ASTForAudioClassification extends ASTPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Whisper models\nexport class WhisperPreTrainedModel extends PreTrainedModel { };\n\n/**\n * WhisperModel class for training Whisper models without a language model head.\n */\nexport class WhisperModel extends WhisperPreTrainedModel { }\n\n/**\n * WhisperForConditionalGeneration class for generating conditional outputs from Whisper models.\n */\nexport class WhisperForConditionalGeneration extends WhisperPreTrainedModel {\n\n requires_attention_mask = false;\n main_input_name = 'input_features';\n\n /**\n * Creates a new instance of the `WhisperForConditionalGeneration` class.\n * @param {Object} config Configuration object for the model.\n * @param {Object} session ONNX Session object for the model.\n * @param {Object} decoder_merged_session ONNX Session object for the decoder.\n * @param {Object} generation_config Configuration object for the generation process.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n\n /**\n * @typedef {Object} WhisperGenerationConfig\n * @extends GenerationConfig\n * @property {boolean} [return_timestamps=null] Whether to return the timestamps with the text. This enables the `WhisperTimestampsLogitsProcessor`.\n * @property {boolean} [return_token_timestamps=null] Whether to return token-level timestamps\n * with the text. This can be used with or without the `return_timestamps` option. To get word-level\n * timestamps, use the tokenizer to group the tokens into words.\n * @property {number} [num_frames=null] The number of audio frames available in this chunk. This is only used generating word-level timestamps.\n */\n\n /**\n * Generates outputs based on input and generation configuration.\n * @param {Object} inputs Input data for the model.\n * @param {WhisperGenerationConfig} generation_config Configuration object for the generation process.\n * @param {Object} logits_processor Optional logits processor object.\n * @returns {Promise<Object>} Promise object represents the generated outputs.\n */\n async generate(\n inputs,\n generation_config = null,\n logits_processor = null,\n // {\n // return_timestamps = null,\n // return_token_timestamps = null,\n // language = null,\n // task = null,\n // } = {},\n ) {\n // Create generation config object\n generation_config = this._get_generation_config(generation_config);\n\n\n // Whisper has additional options for returning timestamps\n generation_config.return_timestamps ??= false;\n\n // TODO add language and task\n\n if (generation_config.return_timestamps) {\n logits_processor = [new WhisperTimeStampLogitsProcessor(generation_config)]\n }\n\n if (generation_config.return_token_timestamps) {\n generation_config.output_attentions = true;\n generation_config.return_dict_in_generate = true;\n\n if (generation_config.task === 'translate') {\n console.warn(\"Token-level timestamps may not be reliable for task 'translate'.\")\n }\n\n if (!generation_config.alignment_heads) {\n throw new Error(\n \"Model generation config has no `alignment_heads`, token-level timestamps not available. \" +\n \"See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config.\"\n )\n }\n }\n\n const outputs = await super.generate(inputs, generation_config, logits_processor);\n\n if (generation_config.return_token_timestamps && generation_config.alignment_heads) {\n outputs[\"token_timestamps\"] = this._extract_token_timestamps(\n outputs,\n generation_config.alignment_heads,\n generation_config.num_frames,\n )\n }\n\n return outputs\n }\n\n /**\n * Calculates token-level timestamps using the encoder-decoder cross-attentions and\n * dynamic time-warping (DTW) to map each output token to a position in the input audio.\n * @param {Object} generate_outputs Outputs generated by the model\n * @param {Tensor[][][]} generate_outputs.cross_attentions The cross attentions output by the model\n * @param {Tensor[][][]} generate_outputs.decoder_attentions The decoder attentions output by the model\n * @param {number[][]} generate_outputs.sequences The sequences output by the model\n * @param {number[][]} alignment_heads Alignment heads of the model\n * @param {number} [num_frames=null] Number of frames in the input audio.\n * @param {number} [time_precision=0.02] Precision of the timestamps in seconds\n * @returns {Tensor} tensor containing the timestamps in seconds for each predicted token\n */\n _extract_token_timestamps(generate_outputs, alignment_heads, num_frames = null, time_precision = 0.02) {\n if (!generate_outputs.cross_attentions) {\n throw new Error(\n \"Model outputs must contain cross attentions to extract timestamps. \" +\n \"This is most likely because the model was not exported with `output_attentions=True`.\"\n )\n }\n\n let median_filter_width = this.config.median_filter_width;\n if (median_filter_width === undefined) {\n console.warn(\"Model config has no `median_filter_width`, using default value of 7.\")\n median_filter_width = 7;\n }\n\n const batchedMatrices = generate_outputs.cross_attentions.map(batch => {\n // Create a list with `decoder_layers` elements, each a tensor of shape\n // (batch size, attention_heads, output length, input length).\n let cross_attentions = Array.from({ length: this.config.decoder_layers },\n (_, i) => cat(batch.map(x => x[i]), 2)\n );\n\n let weights = stack(alignment_heads.map(([l, h]) => {\n return num_frames\n ? cross_attentions[l].slice(null, h, null, [0, num_frames])\n : cross_attentions[l].slice(null, h);\n }));\n weights = weights.transpose(1, 0, 2, 3)\n\n let [std, calculatedMean] = std_mean(weights, -2, 0, true);\n\n // Normalize and smoothen the weights.\n let smoothedWeights = weights.clone(); // [1, 8, seqLength, 1500]\n\n for (let a = 0; a < smoothedWeights.dims[0]; ++a) {\n let aTensor = smoothedWeights[a]; // [8, seqLength, 1500]\n\n for (let b = 0; b < aTensor.dims[0]; ++b) {\n let bTensor = aTensor[b]; // [seqLength, 1500]\n\n const stdTensor = std[a][b][0]; // [1500]\n const meanTensor = calculatedMean[a][b][0]; // [1500]\n\n for (let c = 0; c < bTensor.dims[0]; ++c) {\n\n let cTensor = bTensor[c]; // [1500]\n for (let d = 0; d < cTensor.data.length; ++d) {\n cTensor.data[d] = (cTensor.data[d] - meanTensor.data[d]) / stdTensor.data[d]\n }\n\n // Apply median filter.\n cTensor.data.set(medianFilter(cTensor.data, median_filter_width))\n }\n }\n }\n\n // Average the different cross-attention heads.\n const matrix = mean(smoothedWeights, 1);\n return matrix;\n });\n\n const timestampsShape = [generate_outputs.sequences.length, generate_outputs.sequences[0].length];\n\n const timestamps = new Tensor(\n 'float32',\n new Float32Array(timestampsShape[0] * timestampsShape[1]),\n timestampsShape\n );\n\n // Perform dynamic time warping on each element of the batch.\n for (let batch_idx = 0; batch_idx < timestampsShape[0]; ++batch_idx) {\n // NOTE: Since we run only one batch at a time, we can squeeze to get the same dimensions\n // as the python implementation\n const matrix = batchedMatrices[batch_idx].neg().squeeze_(0);\n let [text_indices, time_indices] = dynamicTimeWarping(matrix);\n\n let diffs = Array.from({ length: text_indices.length - 1 }, (v, i) => text_indices[i + 1] - text_indices[i]);\n let jumps = mergeArrays([1], diffs).map(x => !!x); // convert to boolean\n\n let jump_times = [];\n for (let i = 0; i < jumps.length; ++i) {\n if (jumps[i]) {\n jump_times.push(time_indices[i] * time_precision);\n // NOTE: No point in rounding here, since we set to Float32Array later\n }\n }\n timestamps[batch_idx].data.set(jump_times, 1)\n }\n\n return timestamps;\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n/**\n * Vision Encoder-Decoder model based on OpenAI's GPT architecture for image captioning and other vision tasks\n */\nexport class VisionEncoderDecoderModel extends PreTrainedModel {\n main_input_name = 'pixel_values';\n\n /**\n * Creates a new instance of the `VisionEncoderDecoderModel` class.\n * @param {Object} config The configuration object specifying the hyperparameters and other model settings.\n * @param {Object} session The ONNX session containing the encoder model.\n * @param {any} decoder_merged_session The ONNX session containing the merged decoder model.\n * @param {Object} generation_config Configuration object for the generation process.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n // Extract configs\n const encoderConfig = this.config.encoder;\n const decoderConfig = this.config.decoder;\n\n // Validate encoder\n const encoderModelType = encoderConfig.model_type;\n const encoderModel =\n MODEL_MAPPING_NAMES_ENCODER_ONLY.get(encoderModelType)\n ?? MODEL_MAPPING_NAMES_ENCODER_DECODER.get(encoderModelType);\n if (!encoderModel) {\n console.warn(`Model type for encoder '${encoderModelType}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`);\n }\n\n // Validate decoder\n const decoderModel = MODEL_WITH_LM_HEAD_MAPPING_NAMES.get(decoderConfig.model_type);\n if (!decoderModel) {\n throw new Error(`Unable to construct \\`VisionEncoderDecoder\\` due to unsupported decoder: \"${this.config.decoder.model_type}\"`);\n }\n\n // @ts-ignore\n const decoderModelClass = decoderModel[1];\n // @ts-ignore\n const decoder = new decoderModelClass(decoderConfig, decoder_merged_session, generation_config);\n\n this.add_encoder_pkv = 'num_decoder_layers' in decoder;\n if (this.add_encoder_pkv) {\n // Decoder is part of an encoder-decoder model\n this.num_decoder_layers = decoder.num_decoder_layers;\n this.num_decoder_heads = decoder.num_decoder_heads;\n this.decoder_dim_kv = decoder.decoder_dim_kv;\n\n this.num_encoder_layers = decoder.num_encoder_layers;\n this.num_encoder_heads = decoder.num_encoder_heads;\n this.encoder_dim_kv = decoder.encoder_dim_kv;\n\n } else {\n // Decoder is a decoder-only model\n this.num_layers = decoder.num_layers;\n this.num_heads = decoder.num_heads;\n this.dim_kv = decoder.dim_kv;\n }\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// CLIP models\nexport class CLIPPreTrainedModel extends PreTrainedModel { }\n\n/**\n * CLIP Text and Vision Model with a projection layers on top\n * \n * **Example:** Perform zero-shot image classification with a `CLIPModel`.\n * \n * ```javascript\n * import { AutoTokenizer, AutoProcessor, CLIPModel, RawImage } from '@xenova/transformers';\n * \n * // Load tokenizer, processor, and model\n * let tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16');\n * let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');\n * let model = await CLIPModel.from_pretrained('Xenova/clip-vit-base-patch16');\n * \n * // Run tokenization\n * let texts = ['a photo of a car', 'a photo of a football match']\n * let text_inputs = tokenizer(texts, { padding: true, truncation: true });\n * \n * // Read image and run processor\n * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');\n * let image_inputs = await processor(image);\n * \n * // Run model with both text and pixel inputs\n * let output = await model({ ...text_inputs, ...image_inputs });\n * // {\n * // logits_per_image: Tensor {\n * // dims: [ 1, 2 ],\n * // data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ],\n * // },\n * // logits_per_text: Tensor {\n * // dims: [ 2, 1 ],\n * // data: Float32Array(2) [ 18.579734802246094, 24.31830596923828 ],\n * // },\n * // text_embeds: Tensor {\n * // dims: [ 2, 512 ],\n * // data: Float32Array(1024) [ ... ],\n * // },\n * // image_embeds: Tensor {\n * // dims: [ 1, 512 ],\n * // data: Float32Array(512) [ ... ],\n * // }\n * // }\n * ```\n */\nexport class CLIPModel extends CLIPPreTrainedModel { }\n\n/**\n * CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output)\n * \n * **Example:** Compute text embeddings with `CLIPTextModelWithProjection`.\n * \n * ```javascript\n * import { AutoTokenizer, CLIPTextModelWithProjection } from '@xenova/transformers';\n * \n * // Load tokenizer and text model\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clip-vit-base-patch16');\n * const text_model = await CLIPTextModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');\n * \n * // Run tokenization\n * let texts = ['a photo of a car', 'a photo of a football match'];\n * let text_inputs = tokenizer(texts, { padding: true, truncation: true });\n * \n * // Compute embeddings\n * const { text_embeds } = await text_model(text_inputs);\n * // Tensor {\n * // dims: [ 2, 512 ],\n * // type: 'float32',\n * // data: Float32Array(1024) [ ... ],\n * // size: 1024\n * // }\n * ```\n */\nexport class CLIPTextModelWithProjection extends CLIPPreTrainedModel {\n\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, options = {}) {\n // Update default model file name if not provided\n options.model_file_name ??= 'text_model';\n return super.from_pretrained(pretrained_model_name_or_path, options);\n }\n}\n\n/**\n * CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output)\n * \n * **Example:** Compute vision embeddings with `CLIPVisionModelWithProjection`.\n * \n * ```javascript\n * import { AutoProcessor, CLIPVisionModelWithProjection, RawImage} from '@xenova/transformers';\n * \n * // Load processor and vision model\n * const processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16');\n * const vision_model = await CLIPVisionModelWithProjection.from_pretrained('Xenova/clip-vit-base-patch16');\n * \n * // Read image and run processor\n * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');\n * let image_inputs = await processor(image);\n * \n * // Compute embeddings\n * const { image_embeds } = await vision_model(image_inputs);\n * // Tensor {\n * // dims: [ 1, 512 ],\n * // type: 'float32',\n * // data: Float32Array(512) [ ... ],\n * // size: 512\n * // }\n * ```\n */\nexport class CLIPVisionModelWithProjection extends CLIPPreTrainedModel {\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, options = {}) {\n // Update default model file name if not provided\n options.model_file_name ??= 'vision_model';\n return super.from_pretrained(pretrained_model_name_or_path, options);\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// SigLIP models\nexport class SiglipPreTrainedModel extends PreTrainedModel { }\n\n/**\n * SigLIP Text and Vision Model with a projection layers on top\n * \n * **Example:** Perform zero-shot image classification with a `SiglipModel`.\n * \n * ```javascript\n * import { AutoTokenizer, AutoProcessor, SiglipModel, RawImage } from '@xenova/transformers';\n * \n * // Load tokenizer, processor, and model\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224');\n * const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224');\n * const model = await SiglipModel.from_pretrained('Xenova/siglip-base-patch16-224');\n * \n * // Run tokenization\n * const texts = ['a photo of 2 cats', 'a photo of 2 dogs'];\n * const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true });\n * \n * // Read image and run processor\n * const image = await RawImage.read('http://images.cocodataset.org/val2017/000000039769.jpg');\n * const image_inputs = await processor(image);\n * \n * // Run model with both text and pixel inputs\n * const output = await model({ ...text_inputs, ...image_inputs });\n * // {\n * // logits_per_image: Tensor {\n * // dims: [ 1, 2 ],\n * // data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ],\n * // },\n * // logits_per_text: Tensor {\n * // dims: [ 2, 1 ],\n * // data: Float32Array(2) [ -1.6019744873046875, -10.720091819763184 ],\n * // },\n * // text_embeds: Tensor {\n * // dims: [ 2, 768 ],\n * // data: Float32Array(1536) [ ... ],\n * // },\n * // image_embeds: Tensor {\n * // dims: [ 1, 768 ],\n * // data: Float32Array(768) [ ... ],\n * // }\n * // }\n * ```\n */\nexport class SiglipModel extends SiglipPreTrainedModel { }\n\n/**\n * The text model from SigLIP without any head or projection on top.\n * \n * **Example:** Compute text embeddings with `SiglipTextModel`.\n * \n * ```javascript\n * import { AutoTokenizer, SiglipTextModel } from '@xenova/transformers';\n * \n * // Load tokenizer and text model\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/siglip-base-patch16-224');\n * const text_model = await SiglipTextModel.from_pretrained('Xenova/siglip-base-patch16-224');\n * \n * // Run tokenization\n * const texts = ['a photo of 2 cats', 'a photo of 2 dogs'];\n * const text_inputs = tokenizer(texts, { padding: 'max_length', truncation: true });\n * \n * // Compute embeddings\n * const { pooler_output } = await text_model(text_inputs);\n * // Tensor {\n * // dims: [ 2, 768 ],\n * // type: 'float32',\n * // data: Float32Array(1536) [ ... ],\n * // size: 1536\n * // }\n * ```\n */\nexport class SiglipTextModel extends SiglipPreTrainedModel {\n\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, options = {}) {\n // Update default model file name if not provided\n options.model_file_name ??= 'text_model';\n return super.from_pretrained(pretrained_model_name_or_path, options);\n }\n}\n\n/**\n * The vision model from SigLIP without any head or projection on top.\n * \n * **Example:** Compute vision embeddings with `SiglipVisionModel`.\n * \n * ```javascript\n * import { AutoProcessor, SiglipVisionModel, RawImage} from '@xenova/transformers';\n * \n * // Load processor and vision model\n * const processor = await AutoProcessor.from_pretrained('Xenova/siglip-base-patch16-224');\n * const vision_model = await SiglipVisionModel.from_pretrained('Xenova/siglip-base-patch16-224');\n * \n * // Read image and run processor\n * const image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');\n * const image_inputs = await processor(image);\n * \n * // Compute embeddings\n * const { pooler_output } = await vision_model(image_inputs);\n * // Tensor {\n * // dims: [ 1, 768 ],\n * // type: 'float32',\n * // data: Float32Array(768) [ ... ],\n * // size: 768\n * // }\n * ```\n */\nexport class SiglipVisionModel extends CLIPPreTrainedModel {\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, options = {}) {\n // Update default model file name if not provided\n options.model_file_name ??= 'vision_model';\n return super.from_pretrained(pretrained_model_name_or_path, options);\n }\n}\n//////////////////////////////////////////////////\n// ChineseCLIP models\nexport class ChineseCLIPPreTrainedModel extends PreTrainedModel { }\n\nexport class ChineseCLIPModel extends ChineseCLIPPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// CLIPSeg models\nexport class CLIPSegPreTrainedModel extends PreTrainedModel { }\n\nexport class CLIPSegModel extends CLIPSegPreTrainedModel { }\n\n/**\n * CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation.\n * \n * **Example:** Perform zero-shot image segmentation with a `CLIPSegForImageSegmentation` model.\n * \n * ```javascript\n * import { AutoTokenizer, AutoProcessor, CLIPSegForImageSegmentation, RawImage } from '@xenova/transformers';\n * \n * // Load tokenizer, processor, and model\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clipseg-rd64-refined');\n * const processor = await AutoProcessor.from_pretrained('Xenova/clipseg-rd64-refined');\n * const model = await CLIPSegForImageSegmentation.from_pretrained('Xenova/clipseg-rd64-refined');\n * \n * // Run tokenization\n * const texts = ['a glass', 'something to fill', 'wood', 'a jar'];\n * const text_inputs = tokenizer(texts, { padding: true, truncation: true });\n * \n * // Read image and run processor\n * const image = await RawImage.read('https://github.com/timojl/clipseg/blob/master/example_image.jpg?raw=true');\n * const image_inputs = await processor(image);\n * \n * // Run model with both text and pixel inputs\n * const { logits } = await model({ ...text_inputs, ...image_inputs });\n * // logits: Tensor {\n * // dims: [4, 352, 352],\n * // type: 'float32',\n * // data: Float32Array(495616) [ ... ],\n * // size: 495616\n * // }\n * ```\n * \n * You can visualize the predictions as follows:\n * ```javascript\n * const preds = logits\n * .unsqueeze_(1)\n * .sigmoid_()\n * .mul_(255)\n * .round_()\n * .to('uint8');\n * \n * for (let i = 0; i < preds.dims[0]; ++i) {\n * const img = RawImage.fromTensor(preds[i]);\n * img.save(`prediction_${i}.png`);\n * }\n * ```\n */\nexport class CLIPSegForImageSegmentation extends CLIPSegPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// GPT2 models\nexport class GPT2PreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `GPT2PreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.n_head\n this.num_layers = this.config.n_layer\n this.dim_kv = this.config.n_embd / this.num_heads;\n }\n}\n\nexport class GPT2Model extends GPT2PreTrainedModel { }\n\n/**\n * GPT-2 language model head on top of the GPT-2 base model. This model is suitable for text generation tasks.\n */\nexport class GPT2LMHeadModel extends GPT2PreTrainedModel { }\n// export class GPT2ForSequenceClassification extends GPT2PreTrainedModel {\n// TODO\n// }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// GPTNeo models\nexport class GPTNeoPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `GPTNeoPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_heads;\n this.num_layers = this.config.num_layers;\n this.dim_kv = this.config.hidden_size / this.num_heads;\n }\n}\nexport class GPTNeoModel extends GPTNeoPreTrainedModel { }\n\nexport class GPTNeoForCausalLM extends GPTNeoPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// GPTNeoX models\nexport class GPTNeoXPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `GPTNeoXPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_attention_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.num_heads;\n }\n}\nexport class GPTNeoXModel extends GPTNeoXPreTrainedModel { }\n\nexport class GPTNeoXForCausalLM extends GPTNeoXPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// GPT-J models\nexport class GPTJPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `GPTJPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.n_head\n this.num_layers = this.config.n_layer\n this.dim_kv = this.config.n_embd / this.num_heads;\n }\n}\n\nexport class GPTJModel extends GPTJPreTrainedModel { }\n\nexport class GPTJForCausalLM extends GPTJPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// GPTBigCode models\nexport class GPTBigCodePreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `GPTBigCodePreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.n_head\n this.num_layers = this.config.n_layer\n this.dim_kv = this.config.n_embd / this.num_heads;\n }\n}\n\nexport class GPTBigCodeModel extends GPTBigCodePreTrainedModel { }\n\nexport class GPTBigCodeForCausalLM extends GPTBigCodePreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// CodeGen models\nexport class CodeGenPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `CodeGenPreTrainedModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.n_head\n this.num_layers = this.config.n_layer\n this.dim_kv = this.config.n_embd / this.num_heads;\n }\n}\n/**\n * CodeGenModel is a class representing a code generation model without a language model head.\n */\nexport class CodeGenModel extends CodeGenPreTrainedModel { }\n\n/**\n * CodeGenForCausalLM is a class that represents a code generation model based on the GPT-2 architecture. It extends the `CodeGenPreTrainedModel` class.\n */\nexport class CodeGenForCausalLM extends CodeGenPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// LLama models\n\n/**\n * The bare LLama Model outputting raw hidden-states without any specific head on top.\n */\nexport class LlamaPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `LlamaPreTrainedModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_key_value_heads ?? this.config.num_attention_heads\n this.num_layers = this.config.num_hidden_layers\n this.dim_kv = this.config.hidden_size / this.config.num_attention_heads\n }\n}\n/**\n * The bare LLaMA Model outputting raw hidden-states without any specific head on top.\n */\nexport class LlamaModel extends LlamaPreTrainedModel { }\n\nexport class LlamaForCausalLM extends LlamaPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Qwen2 models\n\n/**\n * The bare Qwen2 Model outputting raw hidden-states without any specific head on top.\n */\nexport class Qwen2PreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `Qwen2PreTrainedModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_key_value_heads ?? this.config.num_attention_heads\n this.num_layers = this.config.num_hidden_layers\n this.dim_kv = this.config.hidden_size / this.config.num_attention_heads\n }\n}\n/**\n * The bare Qwen2 Model outputting raw hidden-states without any specific head on top.\n */\nexport class Qwen2Model extends Qwen2PreTrainedModel { }\n\nexport class Qwen2ForCausalLM extends Qwen2PreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Phi models\n\nexport class PhiPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `PhiPreTrainedModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id;\n\n this.num_heads = this.config.num_attention_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.num_heads;\n }\n}\n/**\n * The bare Phi Model outputting raw hidden-states without any specific head on top.\n */\nexport class PhiModel extends PhiPreTrainedModel { }\n\nexport class PhiForCausalLM extends PhiPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Bloom models\n/**\n * The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n */\nexport class BloomPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `BloomPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.n_head\n this.num_layers = this.config.n_layer\n this.dim_kv = this.config.hidden_size / this.num_heads;\n }\n}\n\n/**\n * The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class BloomModel extends BloomPreTrainedModel { }\n\n/**\n * The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n */\nexport class BloomForCausalLM extends BloomPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// MPT models\nexport class MptPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `MptPreTrainedModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.n_heads\n this.num_layers = this.config.n_layers\n this.dim_kv = this.config.d_model / this.num_heads;\n }\n}\n\n/**\n * The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class MptModel extends MptPreTrainedModel { }\n\n/**\n * The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n */\nexport class MptForCausalLM extends MptPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// OPT models\nexport class OPTPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `OPTPreTrainedModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_attention_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.num_heads;\n }\n}\n\n/**\n * The bare OPT Model outputting raw hidden-states without any specific head on top.\n */\nexport class OPTModel extends OPTPreTrainedModel { }\n\n/**\n * The OPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).\n */\nexport class OPTForCausalLM extends OPTPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class ViTPreTrainedModel extends PreTrainedModel { }\nexport class ViTModel extends ViTPreTrainedModel { }\nexport class ViTForImageClassification extends ViTPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class FastViTPreTrainedModel extends PreTrainedModel { }\nexport class FastViTModel extends FastViTPreTrainedModel { }\nexport class FastViTForImageClassification extends FastViTPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class VitMattePreTrainedModel extends PreTrainedModel { }\n\n/**\n * ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes.\n * \n * **Example:** Perform image matting with a `VitMatteForImageMatting` model.\n * ```javascript\n * import { AutoProcessor, VitMatteForImageMatting, RawImage } from '@xenova/transformers';\n * \n * // Load processor and model\n * const processor = await AutoProcessor.from_pretrained('Xenova/vitmatte-small-distinctions-646');\n * const model = await VitMatteForImageMatting.from_pretrained('Xenova/vitmatte-small-distinctions-646');\n * \n * // Load image and trimap\n * const image = await RawImage.fromURL('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_image.png');\n * const trimap = await RawImage.fromURL('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/vitmatte_trimap.png');\n * \n * // Prepare image + trimap for the model\n * const inputs = await processor(image, trimap);\n * \n * // Predict alpha matte\n * const { alphas } = await model(inputs);\n * // Tensor {\n * // dims: [ 1, 1, 640, 960 ],\n * // type: 'float32',\n * // size: 614400,\n * // data: Float32Array(614400) [ 0.9894027709960938, 0.9970508813858032, ... ]\n * // }\n * ```\n * \n * You can visualize the alpha matte as follows:\n * ```javascript\n * import { Tensor, cat } from '@xenova/transformers';\n * \n * // Visualize predicted alpha matte\n * const imageTensor = image.toTensor();\n * \n * // Convert float (0-1) alpha matte to uint8 (0-255)\n * const alphaChannel = alphas\n * .squeeze(0)\n * .mul_(255)\n * .clamp_(0, 255)\n * .round_()\n * .to('uint8');\n * \n * // Concatenate original image with predicted alpha\n * const imageData = cat([imageTensor, alphaChannel], 0);\n * \n * // Save output image\n * const outputImage = RawImage.fromTensor(imageData);\n * outputImage.save('output.png');\n * ```\n */\nexport class VitMatteForImageMatting extends VitMattePreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new ImageMattingOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class MobileViTPreTrainedModel extends PreTrainedModel { }\nexport class MobileViTModel extends MobileViTPreTrainedModel { }\nexport class MobileViTForImageClassification extends MobileViTPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n// TODO: MobileViTForSemanticSegmentation\n\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class MobileViTV2PreTrainedModel extends PreTrainedModel { }\nexport class MobileViTV2Model extends MobileViTV2PreTrainedModel { }\nexport class MobileViTV2ForImageClassification extends MobileViTV2PreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n// TODO: MobileViTV2ForSemanticSegmentation\n\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class OwlViTPreTrainedModel extends PreTrainedModel { }\nexport class OwlViTModel extends OwlViTPreTrainedModel { }\nexport class OwlViTForObjectDetection extends OwlViTPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class Owlv2PreTrainedModel extends PreTrainedModel { }\nexport class Owlv2Model extends Owlv2PreTrainedModel { }\nexport class Owlv2ForObjectDetection extends Owlv2PreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Beit Models\nexport class BeitPreTrainedModel extends PreTrainedModel { }\nexport class BeitModel extends BeitPreTrainedModel { }\nexport class BeitForImageClassification extends BeitPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class DetrPreTrainedModel extends PreTrainedModel { }\nexport class DetrModel extends DetrPreTrainedModel { }\nexport class DetrForObjectDetection extends DetrPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new DetrObjectDetectionOutput(await super._call(model_inputs));\n }\n}\n\nexport class DetrForSegmentation extends DetrPreTrainedModel {\n /**\n * Runs the model with the provided inputs\n * @param {Object} model_inputs Model inputs\n * @returns {Promise<DetrSegmentationOutput>} Object containing segmentation outputs\n */\n async _call(model_inputs) {\n return new DetrSegmentationOutput(await super._call(model_inputs));\n }\n}\n\nexport class DetrObjectDetectionOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Classification logits (including no-object) for all queries.\n * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height).\n * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding).\n */\n constructor({ logits, pred_boxes }) {\n super();\n this.logits = logits;\n this.pred_boxes = pred_boxes;\n }\n}\n\nexport class DetrSegmentationOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits The output logits of the model.\n * @param {Tensor} output.pred_boxes Predicted boxes.\n * @param {Tensor} output.pred_masks Predicted masks.\n */\n constructor({ logits, pred_boxes, pred_masks }) {\n super();\n this.logits = logits;\n this.pred_boxes = pred_boxes;\n this.pred_masks = pred_masks;\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class TableTransformerPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare Table Transformer Model (consisting of a backbone and encoder-decoder Transformer)\n * outputting raw hidden-states without any specific head on top.\n */\nexport class TableTransformerModel extends TableTransformerPreTrainedModel { }\n\n/**\n * Table Transformer Model (consisting of a backbone and encoder-decoder Transformer)\n * with object detection heads on top, for tasks such as COCO detection.\n */\nexport class TableTransformerForObjectDetection extends TableTransformerPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new TableTransformerObjectDetectionOutput(await super._call(model_inputs));\n }\n}\nexport class TableTransformerObjectDetectionOutput extends DetrObjectDetectionOutput { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class DeiTPreTrainedModel extends PreTrainedModel { }\nexport class DeiTModel extends DeiTPreTrainedModel { }\nexport class DeiTForImageClassification extends DeiTPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n/**\n * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.\n */\nexport class ResNetPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare ResNet model outputting raw features without any specific head on top.\n */\nexport class ResNetModel extends ResNetPreTrainedModel { }\n\n/**\n * ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.\n */\nexport class ResNetForImageClassification extends ResNetPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class SwinPreTrainedModel extends PreTrainedModel { }\nexport class SwinModel extends SwinPreTrainedModel { }\nexport class SwinForImageClassification extends SwinPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class Swin2SRPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare Swin2SR Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class Swin2SRModel extends Swin2SRPreTrainedModel { }\n\n/**\n * Swin2SR Model transformer with an upsampler head on top for image super resolution and restoration.\n * \n * **Example:** Super-resolution w/ `Xenova/swin2SR-classical-sr-x2-64`.\n * \n * ```javascript\n * import { AutoProcessor, Swin2SRForImageSuperResolution, RawImage } from '@xenova/transformers';\n * \n * // Load processor and model\n * const model_id = 'Xenova/swin2SR-classical-sr-x2-64';\n * const processor = await AutoProcessor.from_pretrained(model_id);\n * const model = await Swin2SRForImageSuperResolution.from_pretrained(model_id);\n * \n * // Prepare model inputs\n * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/butterfly.jpg';\n * const image = await RawImage.fromURL(url);\n * const inputs = await processor(image);\n * \n * // Run model\n * const outputs = await model(inputs);\n * \n * // Convert Tensor to RawImage\n * const output = outputs.reconstruction.squeeze().clamp_(0, 1).mul_(255).round_().to('uint8');\n * const outputImage = RawImage.fromTensor(output);\n * // RawImage {\n * // data: Uint8Array(786432) [ 41, 31, 24, ... ],\n * // width: 512,\n * // height: 512,\n * // channels: 3\n * // }\n * ```\n */\nexport class Swin2SRForImageSuperResolution extends Swin2SRPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class DPTPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare DPT Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class DPTModel extends DPTPreTrainedModel { }\n\n/**\n * DPT Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.\n * \n * **Example:** Depth estimation w/ `Xenova/dpt-hybrid-midas`.\n * ```javascript\n * import { DPTForDepthEstimation, AutoProcessor, RawImage, interpolate, max } from '@xenova/transformers';\n * \n * // Load model and processor\n * const model_id = 'Xenova/dpt-hybrid-midas';\n * const model = await DPTForDepthEstimation.from_pretrained(model_id);\n * const processor = await AutoProcessor.from_pretrained(model_id);\n * \n * // Load image from URL\n * const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';\n * const image = await RawImage.fromURL(url);\n * \n * // Prepare image for the model\n * const inputs = await processor(image);\n * \n * // Run model\n * const { predicted_depth } = await model(inputs);\n * \n * // Interpolate to original size\n * const prediction = interpolate(predicted_depth, image.size.reverse(), 'bilinear', false);\n * \n * // Visualize the prediction\n * const formatted = prediction.mul_(255 / max(prediction.data)[0]).to('uint8');\n * const depth = RawImage.fromTensor(formatted);\n * // RawImage {\n * // data: Uint8Array(307200) [ 85, 85, 84, ... ],\n * // width: 640,\n * // height: 480,\n * // channels: 1\n * // }\n * ```\n */\nexport class DPTForDepthEstimation extends DPTPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class DepthAnythingPreTrainedModel extends PreTrainedModel { }\n\n/**\n * Depth Anything Model with a depth estimation head on top (consisting of 3 convolutional layers) e.g. for KITTI, NYUv2.\n */\nexport class DepthAnythingForDepthEstimation extends DepthAnythingPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class GLPNPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare GLPN encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.\n */\nexport class GLPNModel extends GLPNPreTrainedModel { }\n\n/**\n * GLPN Model transformer with a lightweight depth estimation head on top e.g. for KITTI, NYUv2.\n * \n * **Example:** Depth estimation w/ `Xenova/glpn-kitti`.\n * ```javascript\n * import { GLPNForDepthEstimation, AutoProcessor, RawImage, interpolate, max } from '@xenova/transformers';\n * \n * // Load model and processor\n * const model_id = 'Xenova/glpn-kitti';\n * const model = await GLPNForDepthEstimation.from_pretrained(model_id);\n * const processor = await AutoProcessor.from_pretrained(model_id);\n * \n * // Load image from URL\n * const url = 'http://images.cocodataset.org/val2017/000000039769.jpg';\n * const image = await RawImage.fromURL(url);\n * \n * // Prepare image for the model\n * const inputs = await processor(image);\n * \n * // Run model\n * const { predicted_depth } = await model(inputs);\n * \n * // Interpolate to original size\n * const prediction = interpolate(predicted_depth, image.size.reverse(), 'bilinear', false);\n * \n * // Visualize the prediction\n * const formatted = prediction.mul_(255 / max(prediction.data)[0]).to('uint8');\n * const depth = RawImage.fromTensor(formatted);\n * // RawImage {\n * // data: Uint8Array(307200) [ 207, 169, 154, ... ],\n * // width: 640,\n * // height: 480,\n * // channels: 1\n * // }\n * ```\n */\nexport class GLPNForDepthEstimation extends GLPNPreTrainedModel { }\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class DonutSwinPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.\n * \n * **Example:** Step-by-step Document Parsing.\n * \n * ```javascript\n * import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@xenova/transformers';\n * \n * // Choose model to use\n * const model_id = 'Xenova/donut-base-finetuned-cord-v2';\n * \n * // Prepare image inputs\n * const processor = await AutoProcessor.from_pretrained(model_id);\n * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/receipt.png';\n * const image = await RawImage.read(url);\n * const image_inputs = await processor(image);\n * \n * // Prepare decoder inputs\n * const tokenizer = await AutoTokenizer.from_pretrained(model_id);\n * const task_prompt = '<s_cord-v2>';\n * const decoder_input_ids = tokenizer(task_prompt, {\n * add_special_tokens: false,\n * }).input_ids;\n * \n * // Create the model\n * const model = await AutoModelForVision2Seq.from_pretrained(model_id);\n * \n * // Run inference\n * const output = await model.generate(image_inputs.pixel_values, {\n * decoder_input_ids,\n * max_length: model.config.decoder.max_position_embeddings,\n * });\n * \n * // Decode output\n * const decoded = tokenizer.batch_decode(output)[0];\n * // <s_cord-v2><s_menu><s_nm> CINNAMON SUGAR</s_nm><s_unitprice> 17,000</s_unitprice><s_cnt> 1 x</s_cnt><s_price> 17,000</s_price></s_menu><s_sub_total><s_subtotal_price> 17,000</s_subtotal_price></s_sub_total><s_total><s_total_price> 17,000</s_total_price><s_cashprice> 20,000</s_cashprice><s_changeprice> 3,000</s_changeprice></s_total></s>\n * ```\n * \n * **Example:** Step-by-step Document Visual Question Answering (DocVQA)\n * \n * ```javascript\n * import { AutoProcessor, AutoTokenizer, AutoModelForVision2Seq, RawImage } from '@xenova/transformers';\n * \n * // Choose model to use\n * const model_id = 'Xenova/donut-base-finetuned-docvqa';\n * \n * // Prepare image inputs\n * const processor = await AutoProcessor.from_pretrained(model_id);\n * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/invoice.png';\n * const image = await RawImage.read(url);\n * const image_inputs = await processor(image);\n * \n * // Prepare decoder inputs\n * const tokenizer = await AutoTokenizer.from_pretrained(model_id);\n * const question = 'What is the invoice number?';\n * const task_prompt = `<s_docvqa><s_question>${question}</s_question><s_answer>`;\n * const decoder_input_ids = tokenizer(task_prompt, {\n * add_special_tokens: false,\n * }).input_ids;\n * \n * // Create the model\n * const model = await AutoModelForVision2Seq.from_pretrained(model_id);\n * \n * // Run inference\n * const output = await model.generate(image_inputs.pixel_values, {\n * decoder_input_ids,\n * max_length: model.config.decoder.max_position_embeddings,\n * });\n * \n * // Decode output\n * const decoded = tokenizer.batch_decode(output)[0];\n * // <s_docvqa><s_question> What is the invoice number?</s_question><s_answer> us-001</s_answer></s>\n * ```\n */\nexport class DonutSwinModel extends DonutSwinPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class ConvNextPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare ConvNext model outputting raw features without any specific head on top.\n */\nexport class ConvNextModel extends ConvNextPreTrainedModel { }\n\n/**\n * ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.\n */\nexport class ConvNextForImageClassification extends ConvNextPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class ConvNextV2PreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare ConvNextV2 model outputting raw features without any specific head on top.\n */\nexport class ConvNextV2Model extends ConvNextV2PreTrainedModel { }\n\n/**\n * ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet.\n */\nexport class ConvNextV2ForImageClassification extends ConvNextV2PreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class Dinov2PreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class Dinov2Model extends Dinov2PreTrainedModel { }\n\n/**\n * Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet.\n */\nexport class Dinov2ForImageClassification extends Dinov2PreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class YolosPreTrainedModel extends PreTrainedModel { }\nexport class YolosModel extends YolosPreTrainedModel { }\nexport class YolosForObjectDetection extends YolosPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new YolosObjectDetectionOutput(await super._call(model_inputs));\n }\n}\n\nexport class YolosObjectDetectionOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Classification logits (including no-object) for all queries.\n * @param {Tensor} output.pred_boxes Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height).\n * These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding).\n */\n constructor({ logits, pred_boxes }) {\n super();\n this.logits = logits;\n this.pred_boxes = pred_boxes;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class SamPreTrainedModel extends PreTrainedModel { }\n\n/**\n * Segment Anything Model (SAM) for generating segmentation masks, given an input image\n * and optional 2D location and bounding boxes.\n * \n * **Example:** Perform mask generation w/ `Xenova/sam-vit-base`.\n * ```javascript\n * import { SamModel, AutoProcessor, RawImage } from '@xenova/transformers';\n * \n * const model = await SamModel.from_pretrained('Xenova/sam-vit-base');\n * const processor = await AutoProcessor.from_pretrained('Xenova/sam-vit-base');\n * \n * const img_url = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png';\n * const raw_image = await RawImage.read(img_url);\n * const input_points = [[[450, 600]]] // 2D localization of a window\n * \n * const inputs = await processor(raw_image, input_points);\n * const outputs = await model(inputs);\n * \n * const masks = await processor.post_process_masks(outputs.pred_masks, inputs.original_sizes, inputs.reshaped_input_sizes);\n * // [\n * // Tensor {\n * // dims: [ 1, 3, 1764, 2646 ],\n * // type: 'bool',\n * // data: Uint8Array(14002632) [ ... ],\n * // size: 14002632\n * // }\n * // ]\n * const scores = outputs.iou_scores;\n * // Tensor {\n * // dims: [ 1, 1, 3 ],\n * // type: 'float32',\n * // data: Float32Array(3) [\n * // 0.8892380595207214,\n * // 0.9311248064041138,\n * // 0.983696699142456\n * // ],\n * // size: 3\n * // }\n * ```\n */\nexport class SamModel extends SamPreTrainedModel {\n /**\n * Creates a new instance of the `SamModel` class.\n * @param {Object} config The configuration object specifying the hyperparameters and other model settings.\n * @param {Object} vision_encoder The ONNX session containing the vision encoder model.\n * @param {any} prompt_encoder_mask_decoder The ONNX session containing the prompt encoder and mask decoder model.\n */\n constructor(config, vision_encoder, prompt_encoder_mask_decoder) {\n super(config, vision_encoder);\n this.prompt_encoder_mask_decoder = prompt_encoder_mask_decoder;\n }\n\n /**\n * Compute image embeddings and positional image embeddings, given the pixel values of an image.\n * @param {Object} model_inputs Object containing the model inputs.\n * @param {Tensor} model_inputs.pixel_values Pixel values obtained using a `SamProcessor`.\n * @returns {Promise<{ image_embeddings: Tensor, image_positional_embeddings: Tensor }>} The image embeddings and positional image embeddings.\n */\n async get_image_embeddings({ pixel_values }) {\n // in:\n // - pixel_values: tensor.float32[batch_size,3,1024,1024]\n // \n // out:\n // - image_embeddings: tensor.float32[batch_size,256,64,64]\n // - image_positional_embeddings: tensor.float32[batch_size,256,64,64]\n return await encoderForward(this, { pixel_values })\n }\n\n /**\n * @typedef {Object} SamModelInputs Object containing the model inputs.\n * @property {Tensor} pixel_values Pixel values as a Tensor with shape `(batch_size, num_channels, height, width)`.\n * These can be obtained using a `SamProcessor`.\n * @property {Tensor} input_points Input 2D spatial points with shape `(batch_size, num_points, 2)`.\n * This is used by the prompt encoder to encode the prompt.\n * @property {Tensor} [input_labels] Input labels for the points, as a Tensor of shape `(batch_size, point_batch_size, num_points)`.\n * This is used by the prompt encoder to encode the prompt. There are 4 types of labels:\n * - `1`: the point is a point that contains the object of interest\n * - `0`: the point is a point that does not contain the object of interest\n * - `-1`: the point corresponds to the background\n * - `-10`: the point is a padding point, thus should be ignored by the prompt encoder\n * @property {Tensor} [image_embeddings] Image embeddings used by the mask decoder.\n * @property {Tensor} [image_positional_embeddings] Image positional embeddings used by the mask decoder.\n */\n\n /**\n * @param {SamModelInputs} model_inputs Object containing the model inputs.\n * @returns {Promise<Object>} The output of the model.\n */\n async forward(model_inputs) {\n if (!model_inputs.image_embeddings || !model_inputs.image_positional_embeddings) {\n // Compute the image embeddings if they are missing\n model_inputs = {\n ...model_inputs,\n ...(await this.get_image_embeddings(model_inputs))\n }\n }\n\n if (!model_inputs.input_labels) {\n // Set default input labels if they are missing\n const shape = model_inputs.input_points.dims.slice(0, -1);\n const numElements = shape.reduce((a, b) => a * b, 1);\n model_inputs.input_labels = new Tensor(\n 'int64',\n new BigInt64Array(numElements).fill(1n),\n shape\n );\n }\n\n // Returns:\n // - iou_scores: tensor.float32[batch_size,point_batch_size,3]\n // - pred_masks: tensor.float32[batch_size,point_batch_size,3,256,256]\n return await sessionRun(this.prompt_encoder_mask_decoder, {\n input_points: model_inputs.input_points,\n input_labels: model_inputs.input_labels,\n image_embeddings: model_inputs.image_embeddings,\n image_positional_embeddings: model_inputs.image_positional_embeddings,\n });\n }\n\n /**\n * Runs the model with the provided inputs\n * @param {Object} model_inputs Model inputs\n * @returns {Promise<SamImageSegmentationOutput>} Object containing segmentation outputs\n */\n async _call(model_inputs) {\n return new SamImageSegmentationOutput(await super._call(model_inputs));\n }\n}\n\n\n/**\n * Base class for Segment-Anything model's output.\n */\nexport class SamImageSegmentationOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.iou_scores The output logits of the model.\n * @param {Tensor} output.pred_masks Predicted boxes.\n */\n constructor({ iou_scores, pred_masks }) {\n super();\n this.iou_scores = iou_scores;\n this.pred_masks = pred_masks;\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// MarianMT models\nexport class MarianPreTrainedModel extends PreTrainedModel { };\n\nexport class MarianModel extends MarianPreTrainedModel { }\n\nexport class MarianMTModel extends MarianPreTrainedModel {\n\n /**\n * Creates a new instance of the `MarianMTModel` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {any} decoder_merged_session \n * @param {any} generation_config \n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// M2M100 models\nexport class M2M100PreTrainedModel extends PreTrainedModel { };\n\nexport class M2M100Model extends M2M100PreTrainedModel { }\n\nexport class M2M100ForConditionalGeneration extends M2M100PreTrainedModel {\n\n /**\n * Creates a new instance of the `M2M100ForConditionalGeneration` class.\n * @param {Object} config The model configuration object.\n * @param {Object} session The ONNX session object.\n * @param {any} decoder_merged_session \n * @param {any} generation_config \n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads;\n }\n\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Wav2Vec2 models\nexport class Wav2Vec2PreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare Wav2Vec2 Model transformer outputting raw hidden-states without any specific head on top.\n * \n * **Example:** Load and run a `Wav2Vec2Model` for feature extraction.\n * \n * ```javascript\n * import { AutoProcessor, AutoModel, read_audio } from '@xenova/transformers';\n * \n * // Read and preprocess audio\n * const processor = await AutoProcessor.from_pretrained('Xenova/mms-300m');\n * const audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000);\n * const inputs = await processor(audio);\n * \n * // Run model with inputs\n * const model = await AutoModel.from_pretrained('Xenova/mms-300m');\n * const output = await model(inputs);\n * // {\n * // last_hidden_state: Tensor {\n * // dims: [ 1, 1144, 1024 ],\n * // type: 'float32',\n * // data: Float32Array(1171456) [ ... ],\n * // size: 1171456\n * // }\n * // }\n * ```\n */\nexport class Wav2Vec2Model extends Wav2Vec2PreTrainedModel { }\n\nexport class Wav2Vec2ForCTC extends Wav2Vec2PreTrainedModel {\n /**\n * @param {Object} model_inputs\n * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform.\n * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1]\n */\n async _call(model_inputs) {\n return new CausalLMOutput(await super._call(model_inputs));\n }\n}\n\nexport class Wav2Vec2ForSequenceClassification extends Wav2Vec2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * Wav2Vec2 Model with a frame classification head on top for tasks like Speaker Diarization.\n */\nexport class Wav2Vec2ForAudioFrameClassification extends Wav2Vec2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// UniSpeech models\nexport class UniSpeechPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class UniSpeechModel extends UniSpeechPreTrainedModel { }\n\n/**\n * UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n */\nexport class UniSpeechForCTC extends UniSpeechPreTrainedModel {\n /**\n * @param {Object} model_inputs\n * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform.\n * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1]\n */\n async _call(model_inputs) {\n return new CausalLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output).\n */\nexport class UniSpeechForSequenceClassification extends UniSpeechPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// UniSpeechSat models\nexport class UniSpeechSatPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare UniSpeechSat Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class UniSpeechSatModel extends UniSpeechSatPreTrainedModel { }\n\n/**\n * UniSpeechSat Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n */\nexport class UniSpeechSatForCTC extends UniSpeechSatPreTrainedModel {\n /**\n * @param {Object} model_inputs\n * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform.\n * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1]\n */\n async _call(model_inputs) {\n return new CausalLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * UniSpeechSat Model with a sequence classification head on top (a linear layer over the pooled output).\n */\nexport class UniSpeechSatForSequenceClassification extends UniSpeechSatPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * UniSpeechSat Model with a frame classification head on top for tasks like Speaker Diarization.\n */\nexport class UniSpeechSatForAudioFrameClassification extends UniSpeechSatPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Wav2Vec2Bert models\nexport class Wav2Vec2BertPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare Wav2Vec2Bert Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class Wav2Vec2BertModel extends Wav2Vec2BertPreTrainedModel { }\n\n/**\n * Wav2Vec2Bert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n */\nexport class Wav2Vec2BertForCTC extends Wav2Vec2BertPreTrainedModel {\n /**\n * @param {Object} model_inputs\n * @param {Tensor} model_inputs.input_features Float values of input mel-spectrogram.\n * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1]\n */\n async _call(model_inputs) {\n return new CausalLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * Wav2Vec2Bert Model with a sequence classification head on top (a linear layer over the pooled output).\n */\nexport class Wav2Vec2BertForSequenceClassification extends Wav2Vec2BertPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Hubert models\nexport class HubertPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.\n * \n * **Example:** Load and run a `HubertModel` for feature extraction.\n * \n * ```javascript\n * import { AutoProcessor, AutoModel, read_audio } from '@xenova/transformers';\n * \n * // Read and preprocess audio\n * const processor = await AutoProcessor.from_pretrained('Xenova/hubert-base-ls960');\n * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000);\n * const inputs = await processor(audio);\n * \n * // Load and run model with inputs\n * const model = await AutoModel.from_pretrained('Xenova/hubert-base-ls960');\n * const output = await model(inputs);\n * // {\n * // last_hidden_state: Tensor {\n * // dims: [ 1, 549, 768 ],\n * // type: 'float32',\n * // data: Float32Array(421632) [0.0682469978928566, 0.08104046434164047, -0.4975186586380005, ...],\n * // size: 421632\n * // }\n * // }\n * ```\n */\nexport class HubertModel extends Wav2Vec2PreTrainedModel { }\n\n/**\n * Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n */\nexport class HubertForCTC extends Wav2Vec2PreTrainedModel {\n /**\n * @param {Object} model_inputs\n * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform.\n * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1]\n */\n async _call(model_inputs) {\n return new CausalLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting.\n */\nexport class HubertForSequenceClassification extends Wav2Vec2PreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// WavLM models\n/**\n * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.\n */\nexport class WavLMPreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.\n * \n * **Example:** Load and run a `WavLMModel` for feature extraction.\n * \n * ```javascript\n * import { AutoProcessor, AutoModel, read_audio } from '@xenova/transformers';\n * \n * // Read and preprocess audio\n * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base');\n * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav', 16000);\n * const inputs = await processor(audio);\n * \n * // Run model with inputs\n * const model = await AutoModel.from_pretrained('Xenova/wavlm-base');\n * const output = await model(inputs);\n * // {\n * // last_hidden_state: Tensor {\n * // dims: [ 1, 549, 768 ],\n * // type: 'float32',\n * // data: Float32Array(421632) [-0.349443256855011, -0.39341306686401367, 0.022836603224277496, ...],\n * // size: 421632\n * // }\n * // }\n * ```\n */\nexport class WavLMModel extends WavLMPreTrainedModel { }\n\n/**\n * WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\n */\nexport class WavLMForCTC extends WavLMPreTrainedModel {\n /**\n * @param {Object} model_inputs\n * @param {Tensor} model_inputs.input_values Float values of input raw speech waveform.\n * @param {Tensor} model_inputs.attention_mask Mask to avoid performing convolution and attention on padding token indices. Mask values selected in [0, 1]\n */\n async _call(model_inputs) {\n return new CausalLMOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * WavLM Model with a sequence classification head on top (a linear layer over the pooled output).\n */\nexport class WavLMForSequenceClassification extends WavLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n * \n * **Example:** Extract speaker embeddings with `WavLMForXVector`.\n * ```javascript\n * import { AutoProcessor, AutoModel, read_audio } from '@xenova/transformers';\n * \n * // Read and preprocess audio\n * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sv');\n * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';\n * const audio = await read_audio(url, 16000);\n * const inputs = await processor(audio);\n * \n * // Run model with inputs\n * const model = await AutoModel.from_pretrained('Xenova/wavlm-base-plus-sv');\n * const outputs = await model(inputs);\n * // {\n * // logits: Tensor {\n * // dims: [ 1, 512 ],\n * // type: 'float32',\n * // data: Float32Array(512) [0.5847219228744507, ...],\n * // size: 512\n * // },\n * // embeddings: Tensor {\n * // dims: [ 1, 512 ],\n * // type: 'float32',\n * // data: Float32Array(512) [-0.09079201519489288, ...],\n * // size: 512\n * // }\n * // }\n * ```\n */\nexport class WavLMForXVector extends WavLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<XVectorOutput>} An object containing the model's output logits and speaker embeddings.\n */\n async _call(model_inputs) {\n return new XVectorOutput(await super._call(model_inputs));\n }\n}\n\n/**\n * WavLM Model with a frame classification head on top for tasks like Speaker Diarization.\n * \n * **Example:** Perform speaker diarization with `WavLMForAudioFrameClassification`.\n * ```javascript\n * import { AutoProcessor, AutoModelForAudioFrameClassification, read_audio } from '@xenova/transformers';\n * \n * // Read and preprocess audio\n * const processor = await AutoProcessor.from_pretrained('Xenova/wavlm-base-plus-sd');\n * const url = 'https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/jfk.wav';\n * const audio = await read_audio(url, 16000);\n * const inputs = await processor(audio);\n * \n * // Run model with inputs\n * const model = await AutoModelForAudioFrameClassification.from_pretrained('Xenova/wavlm-base-plus-sd');\n * const { logits } = await model(inputs);\n * // {\n * // logits: Tensor {\n * // dims: [ 1, 549, 2 ], // [batch_size, num_frames, num_speakers]\n * // type: 'float32',\n * // data: Float32Array(1098) [-3.5301010608673096, ...],\n * // size: 1098\n * // }\n * // }\n * \n * const labels = logits[0].sigmoid().tolist().map(\n * frames => frames.map(speaker => speaker > 0.5 ? 1 : 0)\n * );\n * console.log(labels); // labels is a one-hot array of shape (num_frames, num_speakers)\n * // [\n * // [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],\n * // [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0],\n * // [0, 0], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1],\n * // ...\n * // ]\n * ```\n */\nexport class WavLMForAudioFrameClassification extends WavLMPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for sequence classification.\n */\n async _call(model_inputs) {\n return new TokenClassifierOutput(await super._call(model_inputs));\n }\n}\n\n//////////////////////////////////////////////////\n// SpeechT5 models\n/**\n * An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.\n */\nexport class SpeechT5PreTrainedModel extends PreTrainedModel { };\n\n/**\n * The bare SpeechT5 Encoder-Decoder Model outputting raw hidden-states without any specific pre- or post-nets.\n */\nexport class SpeechT5Model extends SpeechT5PreTrainedModel { };\n\n/**\n * SpeechT5 Model with a speech encoder and a text decoder.\n * \n * **Example:** Generate speech from text with `SpeechT5ForSpeechToText`.\n * ```javascript\n * import { AutoTokenizer, AutoProcessor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, Tensor } from '@xenova/transformers';\n * \n * // Load the tokenizer and processor\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/speecht5_tts');\n * const processor = await AutoProcessor.from_pretrained('Xenova/speecht5_tts');\n * \n * // Load the models\n * // NOTE: We use the unquantized versions as they are more accurate\n * const model = await SpeechT5ForTextToSpeech.from_pretrained('Xenova/speecht5_tts', { quantized: false });\n * const vocoder = await SpeechT5HifiGan.from_pretrained('Xenova/speecht5_hifigan', { quantized: false });\n * \n * // Load speaker embeddings from URL\n * const speaker_embeddings_data = new Float32Array(\n * await (await fetch('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/speaker_embeddings.bin')).arrayBuffer()\n * );\n * const speaker_embeddings = new Tensor(\n * 'float32',\n * speaker_embeddings_data,\n * [1, speaker_embeddings_data.length]\n * )\n * \n * // Run tokenization\n * const { input_ids } = tokenizer('Hello, my dog is cute');\n * \n * // Generate waveform\n * const { waveform } = await model.generate_speech(input_ids, speaker_embeddings, { vocoder });\n * console.log(waveform)\n * // Tensor {\n * // dims: [ 26112 ],\n * // type: 'float32',\n * // size: 26112,\n * // data: Float32Array(26112) [ -0.00043630177970044315, -0.00018082228780258447, ... ],\n * // }\n * ```\n */\nexport class SpeechT5ForSpeechToText extends SpeechT5PreTrainedModel { }\n\n/**\n * SpeechT5 Model with a text encoder and a speech decoder.\n */\nexport class SpeechT5ForTextToSpeech extends SpeechT5PreTrainedModel {\n\n /**\n * Creates a new instance of the `SpeechT5ForTextToSpeech` class.\n * @param {Object} config The model configuration.\n * @param {any} session session for the model.\n * @param {any} decoder_merged_session session for the decoder.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, decoder_merged_session, generation_config) {\n super(config, session);\n this.decoder_merged_session = decoder_merged_session;\n this.generation_config = generation_config;\n\n this.num_decoder_layers = this.config.decoder_layers;\n this.num_decoder_heads = this.config.decoder_attention_heads;\n this.decoder_dim_kv = this.config.hidden_size / this.num_decoder_heads;\n\n this.num_encoder_layers = this.config.encoder_layers;\n this.num_encoder_heads = this.config.encoder_attention_heads;\n this.encoder_dim_kv = this.config.hidden_size / this.num_encoder_heads;\n }\n\n /**\n * @typedef {Object} SpeechOutput\n * @property {Tensor} [spectrogram] The predicted log-mel spectrogram of shape\n * `(output_sequence_length, config.num_mel_bins)`. Returned when no `vocoder` is provided\n * @property {Tensor} [waveform] The predicted waveform of shape `(num_frames,)`. Returned when a `vocoder` is provided.\n * @property {Tensor} [cross_attentions] The outputs of the decoder's cross-attention layers of shape\n * `(config.decoder_layers, config.decoder_attention_heads, output_sequence_length, input_sequence_length)`. returned when `output_cross_attentions` is `true`.\n */\n\n /**\n * Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a speech waveform using a vocoder.\n * @param {Tensor} input_values Indices of input sequence tokens in the vocabulary.\n * @param {Tensor} speaker_embeddings Tensor containing the speaker embeddings.\n * @param {Object} options Optional parameters for generating speech.\n * @param {number} [options.threshold=0.5] The generated sequence ends when the predicted stop token probability exceeds this value.\n * @param {number} [options.minlenratio=0.0] Used to calculate the minimum required length for the output sequence.\n * @param {number} [options.maxlenratio=20.0] Used to calculate the maximum allowed length for the output sequence.\n * @param {Object} [options.vocoder=null] The vocoder that converts the mel spectrogram into a speech waveform. If `null`, the output is the mel spectrogram.\n * @param {boolean} [options.output_cross_attentions=false] Whether or not to return the attentions tensors of the decoder's cross-attention layers.\n * @returns {Promise<SpeechOutput>} A promise which resolves to an object containing the spectrogram, waveform, and cross-attention tensors.\n */\n async generate_speech(input_values, speaker_embeddings, {\n threshold = 0.5,\n minlenratio = 0.0,\n maxlenratio = 20.0,\n vocoder = null,\n // output_cross_attentions = false, // TODO add\n } = {}) {\n\n const model_inputs = {\n input_ids: input_values\n }\n\n const { encoder_outputs, encoder_attention_mask } = await encoderForward(this, model_inputs);\n\n const r = encoder_outputs.dims[1] / this.config.reduction_factor;\n const maxlen = Math.floor(r * maxlenratio);\n const minlen = Math.floor(r * minlenratio);\n\n const num_mel_bins = this.config.num_mel_bins;\n\n let spectrogramParts = [];\n let past_key_values = null;\n let decoder_outputs = null;\n let idx = 0;\n\n while (true) {\n ++idx;\n\n const use_cache_branch = boolTensor(!!decoder_outputs);\n let output_sequence;\n if (decoder_outputs) {\n output_sequence = decoder_outputs.output_sequence_out;\n } else {\n output_sequence = new Tensor(\n 'float32',\n new Float32Array(num_mel_bins),\n [1, 1, num_mel_bins],\n )\n }\n let decoderFeeds = {\n use_cache_branch,\n output_sequence,\n encoder_attention_mask: encoder_attention_mask,\n speaker_embeddings: speaker_embeddings,\n encoder_hidden_states: encoder_outputs,\n };\n\n this.addPastKeyValues(decoderFeeds, past_key_values);\n decoder_outputs = await sessionRun(this.decoder_merged_session, decoderFeeds);\n past_key_values = this.getPastKeyValues(decoder_outputs, past_key_values);\n\n const { prob, spectrum } = decoder_outputs;\n spectrogramParts.push(spectrum);\n\n if (idx >= minlen && (\n // Finished when stop token or maximum length is reached.\n Array.from(prob.data).filter(p => p >= threshold).length > 0 || idx >= maxlen\n )) {\n break;\n }\n }\n\n const spectrogram = cat(spectrogramParts);\n const { waveform } = await sessionRun(vocoder.session, { spectrogram });\n\n return {\n spectrogram,\n waveform,\n // cross_attentions: null, // TODO add\n }\n }\n}\n\n/**\n * HiFi-GAN vocoder.\n * \n * See [SpeechT5ForSpeechToText](./models#module_models.SpeechT5ForSpeechToText) for example usage.\n */\nexport class SpeechT5HifiGan extends PreTrainedModel {\n main_input_name = 'spectrogram';\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// TrOCR models\nexport class TrOCRPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `TrOCRPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id;\n\n this.num_encoder_layers = this.num_decoder_layers = this.config.decoder_layers;\n this.num_encoder_heads = this.num_decoder_heads = this.config.decoder_attention_heads;\n this.encoder_dim_kv = this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads;\n }\n}\n\n/**\n * The TrOCR Decoder with a language modeling head.\n */\nexport class TrOCRForCausalLM extends TrOCRPreTrainedModel { }\n\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Mistral models\n/**\n * The bare Mistral Model outputting raw hidden-states without any specific head on top.\n */\nexport class MistralPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `MistralPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_key_value_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.config.num_attention_heads;\n }\n}\n\nexport class MistralModel extends MistralPreTrainedModel { }\n\nexport class MistralForCausalLM extends MistralPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Starcoder2 models\n/**\n * The bare Starcoder2 Model outputting raw hidden-states without any specific head on top.\n */\nexport class Starcoder2PreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `Starcoder2PreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_key_value_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.config.num_attention_heads;\n }\n}\n\nexport class Starcoder2Model extends Starcoder2PreTrainedModel { }\n\nexport class Starcoder2ForCausalLM extends Starcoder2PreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// Falcon models\n/**\n * The bare Falcon Model outputting raw hidden-states without any specific head on top.\n */\nexport class FalconPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `FalconPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_attention_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.config.num_attention_heads;\n }\n}\n\nexport class FalconModel extends FalconPreTrainedModel { }\n\nexport class FalconForCausalLM extends FalconPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// CLAP models\nexport class ClapPreTrainedModel extends PreTrainedModel { }\n\nexport class ClapModel extends ClapPreTrainedModel { }\n\n/**\n * CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output).\n * \n * **Example:** Compute text embeddings with `ClapTextModelWithProjection`.\n * \n * ```javascript\n * import { AutoTokenizer, ClapTextModelWithProjection } from '@xenova/transformers';\n * \n * // Load tokenizer and text model\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/clap-htsat-unfused');\n * const text_model = await ClapTextModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused');\n * \n * // Run tokenization\n * const texts = ['a sound of a cat', 'a sound of a dog'];\n * const text_inputs = tokenizer(texts, { padding: true, truncation: true });\n * \n * // Compute embeddings\n * const { text_embeds } = await text_model(text_inputs);\n * // Tensor {\n * // dims: [ 2, 512 ],\n * // type: 'float32',\n * // data: Float32Array(1024) [ ... ],\n * // size: 1024\n * // }\n * ```\n */\nexport class ClapTextModelWithProjection extends ClapPreTrainedModel {\n\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, options = {}) {\n // Update default model file name if not provided\n options.model_file_name ??= 'text_model';\n return super.from_pretrained(pretrained_model_name_or_path, options);\n }\n}\n\n/**\n * CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output).\n * \n * **Example:** Compute audio embeddings with `ClapAudioModelWithProjection`.\n * \n * ```javascript\n * import { AutoProcessor, ClapAudioModelWithProjection, read_audio } from '@xenova/transformers';\n * \n * // Load processor and audio model\n * const processor = await AutoProcessor.from_pretrained('Xenova/clap-htsat-unfused');\n * const audio_model = await ClapAudioModelWithProjection.from_pretrained('Xenova/clap-htsat-unfused');\n * \n * // Read audio and run processor\n * const audio = await read_audio('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cat_meow.wav');\n * const audio_inputs = await processor(audio);\n * \n * // Compute embeddings\n * const { audio_embeds } = await audio_model(audio_inputs);\n * // Tensor {\n * // dims: [ 1, 512 ],\n * // type: 'float32',\n * // data: Float32Array(512) [ ... ],\n * // size: 512\n * // }\n * ```\n */\nexport class ClapAudioModelWithProjection extends ClapPreTrainedModel {\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, options = {}) {\n // Update default model file name if not provided\n options.model_file_name ??= 'audio_model';\n return super.from_pretrained(pretrained_model_name_or_path, options);\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// VITS models\nexport class VitsPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The complete VITS model, for text-to-speech synthesis.\n * \n * **Example:** Generate speech from text with `VitsModel`.\n * ```javascript\n * import { AutoTokenizer, VitsModel } from '@xenova/transformers';\n * \n * // Load the tokenizer and model\n * const tokenizer = await AutoTokenizer.from_pretrained('Xenova/mms-tts-eng');\n * const model = await VitsModel.from_pretrained('Xenova/mms-tts-eng');\n * \n * // Run tokenization\n * const inputs = tokenizer('I love transformers');\n * \n * // Generate waveform\n * const { waveform } = await model(inputs);\n * // Tensor {\n * // dims: [ 1, 35328 ],\n * // type: 'float32',\n * // data: Float32Array(35328) [ ... ],\n * // size: 35328,\n * // }\n * ```\n */\nexport class VitsModel extends VitsPreTrainedModel {\n /**\n * Calls the model on new inputs.\n * @param {Object} model_inputs The inputs to the model.\n * @returns {Promise<VitsModelOutput>} The outputs for the VITS model.\n */\n async _call(model_inputs) {\n return new VitsModelOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// Segformer models\nexport class SegformerPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare SegFormer encoder (Mix-Transformer) outputting raw hidden-states without any specific head on top.\n */\nexport class SegformerModel extends SegformerPreTrainedModel { }\n\n/**\n * SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet.\n */\nexport class SegformerForImageClassification extends SegformerPreTrainedModel { }\n\n/**\n * SegFormer Model transformer with an all-MLP decode head on top e.g. for ADE20k, CityScapes.\n */\nexport class SegformerForSemanticSegmentation extends SegformerPreTrainedModel { }\n\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\n// StableLm models\nexport class StableLmPreTrainedModel extends PreTrainedModel {\n /**\n * Creates a new instance of the `StableLmPreTrainedModel` class.\n * @param {Object} config The configuration of the model.\n * @param {any} session The ONNX session containing the model weights.\n * @param {GenerationConfig} generation_config The generation configuration.\n */\n constructor(config, session, generation_config) {\n super(config, session);\n this.generation_config = generation_config;\n\n // config doesn't contain pad_token_id, so we assume it is the eos_token_id\n this.config.pad_token_id = this.config.eos_token_id\n\n this.num_heads = this.config.num_attention_heads;\n this.num_layers = this.config.num_hidden_layers;\n this.dim_kv = this.config.hidden_size / this.num_heads;\n }\n}\n\n/**\n * The bare StableLm Model transformer outputting raw hidden-states without any specific head on top.\n */\nexport class StableLmModel extends StableLmPreTrainedModel { }\n\n/**\n * StableLm Model with a `language modeling` head on top for Causal Language Modeling (with past).\n */\nexport class StableLmForCausalLM extends StableLmPreTrainedModel { }\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\nexport class EfficientNetPreTrainedModel extends PreTrainedModel { }\n\n/**\n * The bare EfficientNet model outputting raw features without any specific head on top.\n */\nexport class EfficientNetModel extends EfficientNetPreTrainedModel { }\n\n/**\n * EfficientNet Model with an image classification head on top (a linear layer on top of the pooled features).\n */\nexport class EfficientNetForImageClassification extends EfficientNetPreTrainedModel {\n /**\n * @param {any} model_inputs\n */\n async _call(model_inputs) {\n return new SequenceClassifierOutput(await super._call(model_inputs));\n }\n}\n//////////////////////////////////////////////////\n\n\n//////////////////////////////////////////////////\n// AutoModels, used to simplify construction of PreTrainedModels\n// (uses config to instantiate correct class)\n\n/**\n * Base class of all AutoModels. Contains the `from_pretrained` function\n * which is used to instantiate pretrained models.\n */\nexport class PretrainedMixin {\n /**\n * Mapping from model type to model class.\n * @type {Map<string, Object>[]}\n */\n static MODEL_CLASS_MAPPINGS = null;\n\n /**\n * Whether to attempt to instantiate the base class (`PretrainedModel`) if \n * the model type is not found in the mapping.\n */\n static BASE_IF_FAIL = false;\n\n\n /** @type {PreTrainedModel.from_pretrained} */\n static async from_pretrained(pretrained_model_name_or_path, {\n quantized = true,\n progress_callback = null,\n config = null,\n cache_dir = null,\n local_files_only = false,\n revision = 'main',\n model_file_name = null,\n } = {}) {\n\n let options = {\n quantized,\n progress_callback,\n config,\n cache_dir,\n local_files_only,\n revision,\n model_file_name,\n }\n config = await AutoConfig.from_pretrained(pretrained_model_name_or_path, options);\n if (!options.config) {\n // If no config was passed, reuse this config for future processing\n options.config = config;\n }\n\n if (!this.MODEL_CLASS_MAPPINGS) {\n throw new Error(\"`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: \" + this.name);\n }\n\n for (let MODEL_CLASS_MAPPING of this.MODEL_CLASS_MAPPINGS) {\n const modelInfo = MODEL_CLASS_MAPPING.get(config.model_type);\n if (!modelInfo) {\n continue; // Item not found in this mapping\n }\n return await modelInfo[1].from_pretrained(pretrained_model_name_or_path, options);\n }\n\n if (this.BASE_IF_FAIL) {\n console.warn(`Unknown model class \"${config.model_type}\", attempting to construct from base class.`);\n return await PreTrainedModel.from_pretrained(pretrained_model_name_or_path, options);\n } else {\n throw Error(`Unsupported model type: ${config.model_type}`)\n }\n }\n}\n\nconst MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([\n ['bert', ['BertModel', BertModel]],\n ['nomic_bert', ['NomicBertModel', NomicBertModel]],\n ['roformer', ['RoFormerModel', RoFormerModel]],\n ['electra', ['ElectraModel', ElectraModel]],\n ['esm', ['EsmModel', EsmModel]],\n ['convbert', ['ConvBertModel', ConvBertModel]],\n ['camembert', ['CamembertModel', CamembertModel]],\n ['deberta', ['DebertaModel', DebertaModel]],\n ['deberta-v2', ['DebertaV2Model', DebertaV2Model]],\n ['mpnet', ['MPNetModel', MPNetModel]],\n ['albert', ['AlbertModel', AlbertModel]],\n ['distilbert', ['DistilBertModel', DistilBertModel]],\n ['roberta', ['RobertaModel', RobertaModel]],\n ['xlm', ['XLMModel', XLMModel]],\n ['xlm-roberta', ['XLMRobertaModel', XLMRobertaModel]],\n ['clap', ['ClapModel', ClapModel]],\n ['clip', ['CLIPModel', CLIPModel]],\n ['clipseg', ['CLIPSegModel', CLIPSegModel]],\n ['chinese_clip', ['ChineseCLIPModel', ChineseCLIPModel]],\n ['siglip', ['SiglipModel', SiglipModel]],\n ['mobilebert', ['MobileBertModel', MobileBertModel]],\n ['squeezebert', ['SqueezeBertModel', SqueezeBertModel]],\n ['wav2vec2', ['Wav2Vec2Model', Wav2Vec2Model]],\n ['wav2vec2-bert', ['Wav2Vec2BertModel', Wav2Vec2BertModel]],\n ['unispeech', ['UniSpeechModel', UniSpeechModel]],\n ['unispeech-sat', ['UniSpeechSatModel', UniSpeechSatModel]],\n ['hubert', ['HubertModel', HubertModel]],\n ['wavlm', ['WavLMModel', WavLMModel]],\n ['audio-spectrogram-transformer', ['ASTModel', ASTModel]],\n ['vits', ['VitsModel', VitsModel]],\n\n ['detr', ['DetrModel', DetrModel]],\n ['table-transformer', ['TableTransformerModel', TableTransformerModel]],\n ['vit', ['ViTModel', ViTModel]],\n ['fastvit', ['FastViTModel', FastViTModel]],\n ['mobilevit', ['MobileViTModel', MobileViTModel]],\n ['mobilevitv2', ['MobileViTV2Model', MobileViTV2Model]],\n ['owlvit', ['OwlViTModel', OwlViTModel]],\n ['owlv2', ['Owlv2Model', Owlv2Model]],\n ['beit', ['BeitModel', BeitModel]],\n ['deit', ['DeiTModel', DeiTModel]],\n ['convnext', ['ConvNextModel', ConvNextModel]],\n ['convnextv2', ['ConvNextV2Model', ConvNextV2Model]],\n ['dinov2', ['Dinov2Model', Dinov2Model]],\n ['resnet', ['ResNetModel', ResNetModel]],\n ['swin', ['SwinModel', SwinModel]],\n ['swin2sr', ['Swin2SRModel', Swin2SRModel]],\n ['donut-swin', ['DonutSwinModel', DonutSwinModel]],\n ['yolos', ['YolosModel', YolosModel]],\n ['dpt', ['DPTModel', DPTModel]],\n ['glpn', ['GLPNModel', GLPNModel]],\n\n ['hifigan', ['SpeechT5HifiGan', SpeechT5HifiGan]],\n ['efficientnet', ['EfficientNetModel', EfficientNetModel]],\n\n]);\n\nconst MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([\n ['t5', ['T5Model', T5Model]],\n ['longt5', ['LongT5Model', LongT5Model]],\n ['mt5', ['MT5Model', MT5Model]],\n ['bart', ['BartModel', BartModel]],\n ['mbart', ['MBartModel', MBartModel]],\n ['marian', ['MarianModel', MarianModel]],\n ['whisper', ['WhisperModel', WhisperModel]],\n ['m2m_100', ['M2M100Model', M2M100Model]],\n ['blenderbot', ['BlenderbotModel', BlenderbotModel]],\n ['blenderbot-small', ['BlenderbotSmallModel', BlenderbotSmallModel]],\n]);\n\n\nconst MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([\n ['bloom', ['BloomModel', BloomModel]],\n ['gpt2', ['GPT2Model', GPT2Model]],\n ['gptj', ['GPTJModel', GPTJModel]],\n ['gpt_bigcode', ['GPTBigCodeModel', GPTBigCodeModel]],\n ['gpt_neo', ['GPTNeoModel', GPTNeoModel]],\n ['gpt_neox', ['GPTNeoXModel', GPTNeoXModel]],\n ['codegen', ['CodeGenModel', CodeGenModel]],\n ['llama', ['LlamaModel', LlamaModel]],\n ['qwen2', ['Qwen2Model', Qwen2Model]],\n ['phi', ['PhiModel', PhiModel]],\n ['mpt', ['MptModel', MptModel]],\n ['opt', ['OPTModel', OPTModel]],\n ['mistral', ['MistralModel', MistralModel]],\n ['starcoder2', ['Starcoder2Model', Starcoder2Model]],\n ['falcon', ['FalconModel', FalconModel]],\n]);\n\nconst MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = new Map([\n ['speecht5', ['SpeechT5ForSpeechToText', SpeechT5ForSpeechToText]],\n ['whisper', ['WhisperForConditionalGeneration', WhisperForConditionalGeneration]],\n]);\n\nconst MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = new Map([\n ['speecht5', ['SpeechT5ForTextToSpeech', SpeechT5ForTextToSpeech]],\n]);\n\nconst MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = new Map([\n ['vits', ['VitsModel', VitsModel]],\n]);\n\nconst MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = new Map([\n ['bert', ['BertForSequenceClassification', BertForSequenceClassification]],\n ['roformer', ['RoFormerForSequenceClassification', RoFormerForSequenceClassification]],\n ['electra', ['ElectraForSequenceClassification', ElectraForSequenceClassification]],\n ['esm', ['EsmForSequenceClassification', EsmForSequenceClassification]],\n ['convbert', ['ConvBertForSequenceClassification', ConvBertForSequenceClassification]],\n ['camembert', ['CamembertForSequenceClassification', CamembertForSequenceClassification]],\n ['deberta', ['DebertaForSequenceClassification', DebertaForSequenceClassification]],\n ['deberta-v2', ['DebertaV2ForSequenceClassification', DebertaV2ForSequenceClassification]],\n ['mpnet', ['MPNetForSequenceClassification', MPNetForSequenceClassification]],\n ['albert', ['AlbertForSequenceClassification', AlbertForSequenceClassification]],\n ['distilbert', ['DistilBertForSequenceClassification', DistilBertForSequenceClassification]],\n ['roberta', ['RobertaForSequenceClassification', RobertaForSequenceClassification]],\n ['xlm', ['XLMForSequenceClassification', XLMForSequenceClassification]],\n ['xlm-roberta', ['XLMRobertaForSequenceClassification', XLMRobertaForSequenceClassification]],\n ['bart', ['BartForSequenceClassification', BartForSequenceClassification]],\n ['mbart', ['MBartForSequenceClassification', MBartForSequenceClassification]],\n ['mobilebert', ['MobileBertForSequenceClassification', MobileBertForSequenceClassification]],\n ['squeezebert', ['SqueezeBertForSequenceClassification', SqueezeBertForSequenceClassification]],\n]);\n\nconst MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = new Map([\n ['bert', ['BertForTokenClassification', BertForTokenClassification]],\n ['roformer', ['RoFormerForTokenClassification', RoFormerForTokenClassification]],\n ['electra', ['ElectraForTokenClassification', ElectraForTokenClassification]],\n ['esm', ['EsmForTokenClassification', EsmForTokenClassification]],\n ['convbert', ['ConvBertForTokenClassification', ConvBertForTokenClassification]],\n ['camembert', ['CamembertForTokenClassification', CamembertForTokenClassification]],\n ['deberta', ['DebertaForTokenClassification', DebertaForTokenClassification]],\n ['deberta-v2', ['DebertaV2ForTokenClassification', DebertaV2ForTokenClassification]],\n ['mpnet', ['MPNetForTokenClassification', MPNetForTokenClassification]],\n ['distilbert', ['DistilBertForTokenClassification', DistilBertForTokenClassification]],\n ['roberta', ['RobertaForTokenClassification', RobertaForTokenClassification]],\n ['xlm', ['XLMForTokenClassification', XLMForTokenClassification]],\n ['xlm-roberta', ['XLMRobertaForTokenClassification', XLMRobertaForTokenClassification]],\n]);\n\nconst MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = new Map([\n ['t5', ['T5ForConditionalGeneration', T5ForConditionalGeneration]],\n ['longt5', ['LongT5ForConditionalGeneration', LongT5ForConditionalGeneration]],\n ['mt5', ['MT5ForConditionalGeneration', MT5ForConditionalGeneration]],\n ['bart', ['BartForConditionalGeneration', BartForConditionalGeneration]],\n ['mbart', ['MBartForConditionalGeneration', MBartForConditionalGeneration]],\n ['marian', ['MarianMTModel', MarianMTModel]],\n ['m2m_100', ['M2M100ForConditionalGeneration', M2M100ForConditionalGeneration]],\n ['blenderbot', ['BlenderbotForConditionalGeneration', BlenderbotForConditionalGeneration]],\n ['blenderbot-small', ['BlenderbotSmallForConditionalGeneration', BlenderbotSmallForConditionalGeneration]],\n]);\n\nconst MODEL_WITH_LM_HEAD_MAPPING_NAMES = new Map([\n ['bloom', ['BloomForCausalLM', BloomForCausalLM]],\n ['gpt2', ['GPT2LMHeadModel', GPT2LMHeadModel]],\n ['gptj', ['GPTJForCausalLM', GPTJForCausalLM]],\n ['gpt_bigcode', ['GPTBigCodeForCausalLM', GPTBigCodeForCausalLM]],\n ['gpt_neo', ['GPTNeoForCausalLM', GPTNeoForCausalLM]],\n ['gpt_neox', ['GPTNeoXForCausalLM', GPTNeoXForCausalLM]],\n ['codegen', ['CodeGenForCausalLM', CodeGenForCausalLM]],\n ['llama', ['LlamaForCausalLM', LlamaForCausalLM]],\n ['qwen2', ['Qwen2ForCausalLM', Qwen2ForCausalLM]],\n ['phi', ['PhiForCausalLM', PhiForCausalLM]],\n ['mpt', ['MptForCausalLM', MptForCausalLM]],\n ['opt', ['OPTForCausalLM', OPTForCausalLM]],\n ['mbart', ['MBartForCausalLM', MBartForCausalLM]],\n ['mistral', ['MistralForCausalLM', MistralForCausalLM]],\n ['starcoder2', ['Starcoder2ForCausalLM', Starcoder2ForCausalLM]],\n ['falcon', ['FalconForCausalLM', FalconForCausalLM]],\n ['trocr', ['TrOCRForCausalLM', TrOCRForCausalLM]],\n ['stablelm', ['StableLmForCausalLM', StableLmForCausalLM]],\n]);\n\nconst MODEL_FOR_MASKED_LM_MAPPING_NAMES = new Map([\n ['bert', ['BertForMaskedLM', BertForMaskedLM]],\n ['roformer', ['RoFormerForMaskedLM', RoFormerForMaskedLM]],\n ['electra', ['ElectraForMaskedLM', ElectraForMaskedLM]],\n ['esm', ['EsmForMaskedLM', EsmForMaskedLM]],\n ['convbert', ['ConvBertForMaskedLM', ConvBertForMaskedLM]],\n ['camembert', ['CamembertForMaskedLM', CamembertForMaskedLM]],\n ['deberta', ['DebertaForMaskedLM', DebertaForMaskedLM]],\n ['deberta-v2', ['DebertaV2ForMaskedLM', DebertaV2ForMaskedLM]],\n ['mpnet', ['MPNetForMaskedLM', MPNetForMaskedLM]],\n ['albert', ['AlbertForMaskedLM', AlbertForMaskedLM]],\n ['distilbert', ['DistilBertForMaskedLM', DistilBertForMaskedLM]],\n ['roberta', ['RobertaForMaskedLM', RobertaForMaskedLM]],\n ['xlm', ['XLMWithLMHeadModel', XLMWithLMHeadModel]],\n ['xlm-roberta', ['XLMRobertaForMaskedLM', XLMRobertaForMaskedLM]],\n ['mobilebert', ['MobileBertForMaskedLM', MobileBertForMaskedLM]],\n ['squeezebert', ['SqueezeBertForMaskedLM', SqueezeBertForMaskedLM]],\n]);\n\nconst MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = new Map([\n ['bert', ['BertForQuestionAnswering', BertForQuestionAnswering]],\n ['roformer', ['RoFormerForQuestionAnswering', RoFormerForQuestionAnswering]],\n ['electra', ['ElectraForQuestionAnswering', ElectraForQuestionAnswering]],\n ['convbert', ['ConvBertForQuestionAnswering', ConvBertForQuestionAnswering]],\n ['camembert', ['CamembertForQuestionAnswering', CamembertForQuestionAnswering]],\n ['deberta', ['DebertaForQuestionAnswering', DebertaForQuestionAnswering]],\n ['deberta-v2', ['DebertaV2ForQuestionAnswering', DebertaV2ForQuestionAnswering]],\n ['mpnet', ['MPNetForQuestionAnswering', MPNetForQuestionAnswering]],\n ['albert', ['AlbertForQuestionAnswering', AlbertForQuestionAnswering]],\n ['distilbert', ['DistilBertForQuestionAnswering', DistilBertForQuestionAnswering]],\n ['roberta', ['RobertaForQuestionAnswering', RobertaForQuestionAnswering]],\n ['xlm', ['XLMForQuestionAnswering', XLMForQuestionAnswering]],\n ['xlm-roberta', ['XLMRobertaForQuestionAnswering', XLMRobertaForQuestionAnswering]],\n ['mobilebert', ['MobileBertForQuestionAnswering', MobileBertForQuestionAnswering]],\n ['squeezebert', ['SqueezeBertForQuestionAnswering', SqueezeBertForQuestionAnswering]],\n]);\n\nconst MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = new Map([\n ['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]],\n]);\n\nconst MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([\n ['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]],\n]);\n\nconst MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([\n ['vit', ['ViTForImageClassification', ViTForImageClassification]],\n ['fastvit', ['FastViTForImageClassification', FastViTForImageClassification]],\n ['mobilevit', ['MobileViTForImageClassification', MobileViTForImageClassification]],\n ['mobilevitv2', ['MobileViTV2ForImageClassification', MobileViTV2ForImageClassification]],\n ['beit', ['BeitForImageClassification', BeitForImageClassification]],\n ['deit', ['DeiTForImageClassification', DeiTForImageClassification]],\n ['convnext', ['ConvNextForImageClassification', ConvNextForImageClassification]],\n ['convnextv2', ['ConvNextV2ForImageClassification', ConvNextV2ForImageClassification]],\n ['dinov2', ['Dinov2ForImageClassification', Dinov2ForImageClassification]],\n ['resnet', ['ResNetForImageClassification', ResNetForImageClassification]],\n ['swin', ['SwinForImageClassification', SwinForImageClassification]],\n ['segformer', ['SegformerForImageClassification', SegformerForImageClassification]],\n ['efficientnet', ['EfficientNetForImageClassification', EfficientNetForImageClassification]],\n]);\n\nconst MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = new Map([\n ['detr', ['DetrForObjectDetection', DetrForObjectDetection]],\n ['table-transformer', ['TableTransformerForObjectDetection', TableTransformerForObjectDetection]],\n ['yolos', ['YolosForObjectDetection', YolosForObjectDetection]],\n]);\n\nconst MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = new Map([\n ['owlvit', ['OwlViTForObjectDetection', OwlViTForObjectDetection]],\n ['owlv2', ['Owlv2ForObjectDetection', Owlv2ForObjectDetection]],\n]);\n\nconst MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = new Map([\n ['detr', ['DetrForSegmentation', DetrForSegmentation]],\n ['clipseg', ['CLIPSegForImageSegmentation', CLIPSegForImageSegmentation]],\n]);\n\nconst MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = new Map([\n ['segformer', ['SegformerForSemanticSegmentation', SegformerForSemanticSegmentation]],\n]);\n\nconst MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = new Map([\n ['sam', ['SamModel', SamModel]],\n]);\n\nconst MODEL_FOR_CTC_MAPPING_NAMES = new Map([\n ['wav2vec2', ['Wav2Vec2ForCTC', Wav2Vec2ForCTC]],\n ['wav2vec2-bert', ['Wav2Vec2BertForCTC', Wav2Vec2BertForCTC]],\n ['unispeech', ['UniSpeechForCTC', UniSpeechForCTC]],\n ['unispeech-sat', ['UniSpeechSatForCTC', UniSpeechSatForCTC]],\n ['wavlm', ['WavLMForCTC', WavLMForCTC]],\n ['hubert', ['HubertForCTC', HubertForCTC]],\n]);\n\nconst MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = new Map([\n ['wav2vec2', ['Wav2Vec2ForSequenceClassification', Wav2Vec2ForSequenceClassification]],\n ['wav2vec2-bert', ['Wav2Vec2BertForSequenceClassification', Wav2Vec2BertForSequenceClassification]],\n ['unispeech', ['UniSpeechForSequenceClassification', UniSpeechForSequenceClassification]],\n ['unispeech-sat', ['UniSpeechSatForSequenceClassification', UniSpeechSatForSequenceClassification]],\n ['wavlm', ['WavLMForSequenceClassification', WavLMForSequenceClassification]],\n ['hubert', ['HubertForSequenceClassification', HubertForSequenceClassification]],\n ['audio-spectrogram-transformer', ['ASTForAudioClassification', ASTForAudioClassification]],\n]);\n\nconst MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = new Map([\n ['wavlm', ['WavLMForXVector', WavLMForXVector]],\n]);\n\nconst MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = new Map([\n ['unispeech-sat', ['UniSpeechSatForAudioFrameClassification', UniSpeechSatForAudioFrameClassification]],\n ['wavlm', ['WavLMForAudioFrameClassification', WavLMForAudioFrameClassification]],\n ['wav2vec2', ['Wav2Vec2ForAudioFrameClassification', Wav2Vec2ForAudioFrameClassification]],\n]);\n\nconst MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES = new Map([\n ['vitmatte', ['VitMatteForImageMatting', VitMatteForImageMatting]],\n]);\n\nconst MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = new Map([\n ['swin2sr', ['Swin2SRForImageSuperResolution', Swin2SRForImageSuperResolution]],\n])\n\nconst MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = new Map([\n ['dpt', ['DPTForDepthEstimation', DPTForDepthEstimation]],\n ['depth_anything', ['DepthAnythingForDepthEstimation', DepthAnythingForDepthEstimation]],\n ['glpn', ['GLPNForDepthEstimation', GLPNForDepthEstimation]],\n])\n\n// NOTE: This is custom to Transformers.js, and is necessary because certain models\n// (e.g., CLIP) are split into vision and text components\nconst MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES = new Map([\n ['clip', ['CLIPVisionModelWithProjection', CLIPVisionModelWithProjection]],\n ['siglip', ['SiglipVisionModel', SiglipVisionModel]],\n])\n\nconst MODEL_CLASS_TYPE_MAPPING = [\n [MODEL_MAPPING_NAMES_ENCODER_ONLY, MODEL_TYPES.EncoderOnly],\n [MODEL_MAPPING_NAMES_ENCODER_DECODER, MODEL_TYPES.EncoderDecoder],\n [MODEL_MAPPING_NAMES_DECODER_ONLY, MODEL_TYPES.DecoderOnly],\n [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq],\n [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Seq2Seq],\n [MODEL_WITH_LM_HEAD_MAPPING_NAMES, MODEL_TYPES.DecoderOnly],\n [MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Vision2Seq],\n [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES, MODEL_TYPES.MaskGeneration],\n [MODEL_FOR_CTC_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq],\n [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n [MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n\n // Custom:\n [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],\n];\n\nfor (const [mappings, type] of MODEL_CLASS_TYPE_MAPPING) {\n // @ts-ignore\n for (const [name, model] of mappings.values()) {\n MODEL_TYPE_MAPPING.set(name, type);\n MODEL_CLASS_TO_NAME_MAPPING.set(model, name);\n MODEL_NAME_TO_CLASS_MAPPING.set(name, model);\n }\n}\n\nconst CUSTOM_MAPPING = [\n ['CLIPTextModelWithProjection', CLIPTextModelWithProjection, MODEL_TYPES.EncoderOnly],\n ['SiglipTextModel', SiglipTextModel, MODEL_TYPES.EncoderOnly],\n ['ClapTextModelWithProjection', ClapTextModelWithProjection, MODEL_TYPES.EncoderOnly],\n ['ClapAudioModelWithProjection', ClapAudioModelWithProjection, MODEL_TYPES.EncoderOnly],\n]\nfor (const [name, model, type] of CUSTOM_MAPPING) {\n MODEL_TYPE_MAPPING.set(name, type);\n MODEL_CLASS_TO_NAME_MAPPING.set(model, name);\n MODEL_NAME_TO_CLASS_MAPPING.set(name, model);\n}\n\n\n/**\n * Helper class which is used to instantiate pretrained models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModel.from_pretrained('bert-base-uncased');\n */\nexport class AutoModel extends PretrainedMixin {\n /** @type {Map<string, Object>[]} */\n // @ts-ignore\n static MODEL_CLASS_MAPPINGS = MODEL_CLASS_TYPE_MAPPING.map(x => x[0]);\n static BASE_IF_FAIL = true;\n}\n\n/**\n * Helper class which is used to instantiate pretrained sequence classification models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForSequenceClassification.from_pretrained('distilbert-base-uncased-finetuned-sst-2-english');\n */\nexport class AutoModelForSequenceClassification extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained token classification models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForTokenClassification.from_pretrained('Davlan/distilbert-base-multilingual-cased-ner-hrl');\n */\nexport class AutoModelForTokenClassification extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained sequence-to-sequence models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForSeq2SeqLM.from_pretrained('t5-small');\n */\nexport class AutoModelForSeq2SeqLM extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained sequence-to-sequence speech-to-text models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForSpeechSeq2Seq.from_pretrained('openai/whisper-tiny.en');\n */\nexport class AutoModelForSpeechSeq2Seq extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained sequence-to-sequence text-to-spectrogram models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForTextToSpectrogram.from_pretrained('microsoft/speecht5_tts');\n */\nexport class AutoModelForTextToSpectrogram extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained text-to-waveform models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForTextToSpectrogram.from_pretrained('facebook/mms-tts-eng');\n */\nexport class AutoModelForTextToWaveform extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained causal language models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForCausalLM.from_pretrained('gpt2');\n */\nexport class AutoModelForCausalLM extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_WITH_LM_HEAD_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained masked language models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForMaskedLM.from_pretrained('bert-base-uncased');\n */\nexport class AutoModelForMaskedLM extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_MASKED_LM_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained question answering models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForQuestionAnswering.from_pretrained('distilbert-base-cased-distilled-squad');\n */\nexport class AutoModelForQuestionAnswering extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained vision-to-sequence models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForVision2Seq.from_pretrained('nlpconnect/vit-gpt2-image-captioning');\n */\nexport class AutoModelForVision2Seq extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained image classification models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForImageClassification.from_pretrained('google/vit-base-patch16-224');\n */\nexport class AutoModelForImageClassification extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForImageSegmentation.from_pretrained('facebook/detr-resnet-50-panoptic');\n */\nexport class AutoModelForImageSegmentation extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained image segmentation models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForSemanticSegmentation.from_pretrained('nvidia/segformer-b3-finetuned-cityscapes-1024-1024');\n */\nexport class AutoModelForSemanticSegmentation extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES];\n}\n\n/**\n * Helper class which is used to instantiate pretrained object detection models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForObjectDetection.from_pretrained('facebook/detr-resnet-50');\n */\nexport class AutoModelForObjectDetection extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES];\n}\n\nexport class AutoModelForZeroShotObjectDetection extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES];\n}\n\n\n/**\n * Helper class which is used to instantiate pretrained mask generation models with the `from_pretrained` function.\n * The chosen model class is determined by the type specified in the model config.\n * \n * @example\n * let model = await AutoModelForMaskGeneration.from_pretrained('Xenova/sam-vit-base');\n */\nexport class AutoModelForMaskGeneration extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES];\n}\n\nexport class AutoModelForCTC extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_CTC_MAPPING_NAMES];\n}\n\nexport class AutoModelForAudioClassification extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES];\n}\n\nexport class AutoModelForXVector extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES];\n}\n\nexport class AutoModelForAudioFrameClassification extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES];\n}\n\nexport class AutoModelForDocumentQuestionAnswering extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES];\n}\n\nexport class AutoModelForImageMatting extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES];\n}\n\nexport class AutoModelForImageToImage extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES];\n}\n\nexport class AutoModelForDepthEstimation extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES];\n}\n\nexport class AutoModelForImageFeatureExtraction extends PretrainedMixin {\n static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES];\n}\n\n//////////////////////////////////////////////////\n\n//////////////////////////////////////////////////\nexport class Seq2SeqLMOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits The output logits of the model.\n * @param {Tensor} output.past_key_values An tensor of key/value pairs that represent the previous state of the model.\n * @param {Tensor} output.encoder_outputs The output of the encoder in a sequence-to-sequence model.\n * @param {Tensor} [output.decoder_attentions] Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads.\n * @param {Tensor} [output.cross_attentions] Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads.\n */\n constructor({ logits, past_key_values, encoder_outputs, decoder_attentions = null, cross_attentions = null }) {\n super();\n this.logits = logits;\n this.past_key_values = past_key_values;\n this.encoder_outputs = encoder_outputs;\n this.decoder_attentions = decoder_attentions;\n this.cross_attentions = cross_attentions;\n }\n}\n\n/**\n * Base class for outputs of sentence classification models.\n */\nexport class SequenceClassifierOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits classification (or regression if config.num_labels==1) scores (before SoftMax).\n */\n constructor({ logits }) {\n super();\n this.logits = logits;\n }\n}\n\n/**\n * Base class for outputs of XVector models.\n */\nexport class XVectorOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Classification hidden states before AMSoftmax, of shape `(batch_size, config.xvector_output_dim)`.\n * @param {Tensor} output.embeddings Utterance embeddings used for vector similarity-based retrieval, of shape `(batch_size, config.xvector_output_dim)`.\n */\n constructor({ logits, embeddings }) {\n super();\n this.logits = logits;\n this.embeddings = embeddings;\n }\n}\n\n/**\n * Base class for outputs of token classification models.\n */\nexport class TokenClassifierOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Classification scores (before SoftMax).\n */\n constructor({ logits }) {\n super();\n this.logits = logits;\n }\n}\n\n/**\n * Base class for masked language models outputs.\n */\nexport class MaskedLMOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n */\n constructor({ logits }) {\n super();\n this.logits = logits;\n }\n}\n\n/**\n * Base class for outputs of question answering models.\n */\nexport class QuestionAnsweringModelOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.start_logits Span-start scores (before SoftMax).\n * @param {Tensor} output.end_logits Span-end scores (before SoftMax).\n */\n constructor({ start_logits, end_logits }) {\n super();\n this.start_logits = start_logits;\n this.end_logits = end_logits;\n }\n}\n\n\n/**\n * Base class for causal language model (or autoregressive) outputs.\n */\nexport class CausalLMOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax).\n */\n constructor({ logits }) {\n super();\n this.logits = logits;\n }\n}\n\n/**\n * Base class for causal language model (or autoregressive) outputs.\n */\nexport class CausalLMOutputWithPast extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.logits Prediction scores of the language modeling head (scores for each vocabulary token before softmax).\n * @param {Tensor} output.past_key_values Contains pre-computed hidden-states (key and values in the self-attention blocks)\n * that can be used (see `past_key_values` input) to speed up sequential decoding.\n */\n constructor({ logits, past_key_values }) {\n super();\n this.logits = logits;\n this.past_key_values = past_key_values;\n }\n}\n\nexport class ImageMattingOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.alphas Estimated alpha values, of shape `(batch_size, num_channels, height, width)`.\n */\n constructor({ alphas }) {\n super();\n this.alphas = alphas;\n }\n}\n\n/**\n * Describes the outputs for the VITS model.\n */\nexport class VitsModelOutput extends ModelOutput {\n /**\n * @param {Object} output The output of the model.\n * @param {Tensor} output.waveform The final audio waveform predicted by the model, of shape `(batch_size, sequence_length)`.\n * @param {Tensor} output.spectrogram The log-mel spectrogram predicted at the output of the flow model.\n * This spectrogram is passed to the Hi-Fi GAN decoder model to obtain the final audio waveform.\n */\n constructor({ waveform, spectrogram }) {\n super();\n this.waveform = waveform;\n this.spectrogram = spectrogram;\n }\n}\n"],"names":[],"mappings":";;;;;;;;;;AAqFA,MAAM,EAAE,kBAAkB,QAAQ,YAAY,IAAG,IAAK;AAMtD,MAAM,cAAc;AAAA,EAChB,aAAa;AAAA,EACb,gBAAgB;AAAA,EAChB,SAAS;AAAA,EACT,YAAY;AAAA,EACZ,aAAa;AAAA,EACb,gBAAgB;AACpB;AAQA,MAAM,qBAAqB,oBAAI,IAAG;AAClC,MAAM,8BAA8B,oBAAI,IAAG;AAC3C,MAAM,8BAA8B,oBAAI,IAAG;AAW3C,eAAe,iBAAiB,+BAA+B,UAAU,SAAS;AAE9E,MAAI,gBAAgB,QAAQ,QAAQ,GAAG,QAAQ,YAAY,eAAe,EAAE;AAC5E,MAAI,SAAS,MAAM,aAAa,+BAA+B,eAAe,MAAM,OAAO;AAE3F,MAAI;AACA,WAAO,MAAM,iBAAiB,OAAO,QAAQ;AAAA,MACzC;AAAA,IACZ,CAAS;AAAA,EACL,SAAS,KAAK;AAEV,QAAI,mBAAmB,WAAW,KAAK,mBAAmB,CAAC,MAAM,QAAQ;AACrE,YAAM;AAAA,IACV;AAEA,YAAQ,KAAK,GAAG;AAChB,YAAQ;AAAA,MACJ;AAAA,IAEZ;AACQ,WAAO,MAAM,iBAAiB,OAAO,QAAQ;AAAA,MACzC,oBAAoB,CAAC,MAAM;AAAA,IACvC,CAAS;AAAA,EACL;AACJ;AAUA,SAAS,eAAe,SAAS,QAAQ;AAKrC,QAAM,gBAAgB,uBAAO,OAAO,IAAI;AACxC,QAAM,gBAAgB,CAAA;AACtB,aAAW,aAAa,QAAQ,YAAY;AACxC,UAAM,SAAS,OAAO,SAAS;AAI/B,QAAI,EAAE,kBAAkB,SAAS;AAC7B,oBAAc,KAAK,SAAS;AAC5B;AAAA,IACJ;AAIA,kBAAc,SAAS,IAAI,IAAI,KAAK,QAAQ,OAAO,MAAK,IAAK;AAAA,EACjE;AACA,MAAI,cAAc,SAAS,GAAG;AAC1B,UAAM,IAAI;AAAA,MACN,4EAA4E,cAAc,KAAK,IAAI,CAAC;AAAA,IAAG;AAAA,EAC/G;AAEA,QAAM,oBAAoB,OAAO,KAAK,MAAM,EAAE;AAC9C,QAAM,kBAAkB,QAAQ,WAAW;AAC3C,MAAI,oBAAoB,iBAAiB;AAGrC,QAAI,UAAU,OAAO,KAAK,MAAM,EAAE,OAAO,eAAa,CAAC,QAAQ,WAAW,SAAS,SAAS,CAAC;AAC7F,YAAQ,KAAK,2CAA2C,iBAAiB,MAAM,eAAe,6CAA6C,QAAQ,KAAK,IAAI,CAAC,IAAI;AAAA,EACrK;AAEA,SAAO;AACX;AAaA,eAAe,WAAW,SAAS,QAAQ;AACvC,QAAM,gBAAgB,eAAe,SAAS,MAAM;AACpD,MAAI;AAEA,QAAI,SAAS,MAAM,QAAQ,IAAI,aAAa;AAC5C,aAAS,eAAe,MAAM;AAC9B,WAAO;AAAA,EACX,SAAS,GAAG;AAER,YAAQ,MAAM,8CAA8C,CAAC,IAAI;AACjE,YAAQ,MAAM,0BAA0B,aAAa;AACrD,UAAM;AAAA,EACV;AACJ;AAQA,SAAS,eAAe,KAAK;AACzB,WAAS,QAAQ,KAAK;AAClB,QAAI,IAAI,IAAI,aAAa,YAAY;AACjC,UAAI,IAAI,IAAI,IAAI,OAAO,IAAI,IAAI,CAAC;AAAA,IACpC,WAAW,OAAO,IAAI,IAAI,MAAM,UAAU;AACtC,qBAAe,IAAI,IAAI,CAAC;AAAA,IAC5B;AAAA,EACJ;AACA,SAAO;AACX;AAUA,SAAS,YAAY,OAAO;AACxB,MAAI,iBAAiB,QAAQ;AACzB,WAAO;AAAA,EACX;AAEA,MAAI,MAAM,WAAW,GAAG;AACpB,UAAM,MAAM,yBAAyB;AAAA,EACzC;AAEA,MAAI,MAAM,QAAQ,MAAM,CAAC,CAAC,GAAG;AAEzB,QAAI,MAAM,KAAK,OAAK,EAAE,WAAW,MAAM,CAAC,EAAE,MAAM,GAAG;AAC/C,YAAM,MAAM,4KAA4K;AAAA,IAC5L;AAEA,WAAO,IAAI;AAAA,MAAO;AAAA,MACd,cAAc,KAAK,MAAM,KAAI,EAAG,IAAI,OAAK,OAAO,CAAC,CAAC,CAAC;AAAA,MACnD,CAAC,MAAM,QAAQ,MAAM,CAAC,EAAE,MAAM;AAAA,IAC1C;AAAA,EACI,OAAO;AAEH,WAAO,IAAI;AAAA,MAAO;AAAA,MACd,cAAc,KAAK,MAAM,IAAI,OAAK,OAAO,CAAC,CAAC,CAAC;AAAA,MAC5C,CAAC,GAAG,MAAM,MAAM;AAAA,IAC5B;AAAA,EACI;AACJ;AASA,SAAS,qBAAqB,MAAM,QAAQ;AAGxC,MAAI,eAAe,KAAK,OAAO,gBAAgB;AAC/C,MAAI,eAAe,KAAK,OAAO,gBAAgB;AAC/C,MAAI,iBAAiB,YAAY,GAAG;AAChC,mBAAe,CAAC,YAAY;AAAA,EAChC;AAEA,MAAI,yBAAyB,OAAO,QAAQ,YAAY,MAAM;AAC9D,MAAI,yCAA0C,iBAAiB,QAAS,CAAC,aAAa,SAAS,YAAY;AAE3G,MAAI,0BAA0B,wCAAwC;AAClE,QAAI,OAAO,cAAc;AAAA;AAAA;AAAA,MAGrB,OAAO,KAAK,IAAI,OAAK,KAAK,YAAY;AAAA,IAClD;AACQ,WAAO,IAAI,OAAO,SAAS,MAAM,OAAO,IAAI;AAAA,EAChD,OAAO;AACH,WAAO,UAAU,MAAM;AAAA,EAC3B;AACJ;AAUA,SAAS,mBAAmB,SAAS,OAAO,kBAAkB;AAC1D,MAAI,CAAC,QAAQ,WAAW,SAAS,cAAc,EAAG;AAElD,QAAM,OAAO,IAAI,cAAc,MAAM,eAAe,KAAK,MAAM;AAG/D,WAAS,IAAI,GAAG,IAAI,MAAM,eAAe,KAAK,CAAC,GAAG,EAAE,GAAG;AACnD,QAAI,QAAQ,IAAI,MAAM,eAAe,KAAK,CAAC;AAC3C,QAAI,MAAM,OAAO,CAAC;AAClB,aAAS,IAAI,GAAG,IAAI,MAAM,eAAe,KAAK,CAAC,GAAG,EAAE,GAAG;AACnD,YAAM,QAAQ,QAAQ;AACtB,UAAI,MAAM,eAAe,KAAK,KAAK,MAAM,IAAI;AACzC,aAAK,KAAK,IAAI,OAAO,CAAC;AAAA,MAC1B,OAAO;AACH,aAAK,KAAK,IAAI;AACd,eAAO,MAAM,eAAe,KAAK,KAAK;AAAA,MAC1C;AAAA,IACJ;AAAA,EACJ;AAEA,QAAM,eAAe,IAAI,OAAO,SAAS,MAAM,MAAM,eAAe,IAAI;AAExE,MAAI,kBAAkB;AAClB,UAAM,eAAe,MAAM,aAAa,MAAM,MAAM,EAAE,EAAE,WAAW,EAAE;AAAA,EACzE;AACJ;AAQA,SAAS,WAAW,OAAO;AACvB,SAAO,IAAI,OAAO,QAAQ,CAAC,KAAK,GAAG,CAAC,CAAC,CAAC;AAC1C;AAUA,eAAe,eAAe,MAAM,cAAc;AAE9C,MAAI,EAAE,iBAAiB,gBAAe,IAAK;AAE3C,MAAI,CAAC,iBAAiB;AAElB,uBAAmB,MAAM,eAAe,MAAM,YAAY,GAAG;AAAA,EACjE;AACA,MAAI,eAAe;AAAA,IACf,WAAW,aAAa;AAAA,IACxB,uBAAuB;AAAA,EAC/B;AACI,QAAM,mBAAmB,CAAC,CAAC;AAE3B,MAAI,KAAK,uBAAuB,WAAW,SAAS,kBAAkB,GAAG;AACrE,iBAAa,mBAAmB,WAAW,gBAAgB;AAAA,EAC/D;AAEA,MAAI,KAAK,uBAAuB,WAAW,SAAS,wBAAwB,GAAG;AAC3E,iBAAa,yBAAyB,aAAa;AAAA,EACvD;AAEA,qBAAmB,KAAK,wBAAwB,cAAc,gBAAgB;AAC9E,OAAK,iBAAiB,cAAc,eAAe;AAEnD,QAAM,iBAAiB,MAAM,WAAW,KAAK,wBAAwB,YAAY;AACjF,MAAI,SAAS,eAAe;AAC5B,oBAAkB,KAAK,iBAAiB,gBAAgB,eAAe;AAGvE,QAAM,QAAQ,KAAK,cAAc,cAAc;AAE/C,SAAO,IAAI,gBAAgB,EAAE,QAAQ,iBAAiB,iBAAiB,GAAG,OAAO;AACrF;AAWA,SAAS,kBAAkB,MAAM,eAAe,mBAAmB,iBAAiB;AAChF,MAAI,QAAQ,CAAA;AACZ,MAAI,SAAS;AAGb,QAAM,0BAA0B,KAAK,2BAA2B;AAGhE,MAAI,oBACA,kBAAkB,qBACf,kBAAkB,0BAClB,kBAAkB,gBAClB,kBAAkB;AAIzB,MAAI,6BAA6B,QAAQ;AACrC,wBAAoB,kBAAkB,OAAM,EAAG,KAAI;AAAA,EACvD,WAAW,CAAC,MAAM,QAAQ,iBAAiB,GAAG;AAC1C,wBAAoB,CAAC,iBAAiB;AAAA,EAC1C;AAEA,WAAS,UAAU,eAAe;AAI9B,WAAO,OAAO,CAAC,GAAG,GAAG,OAAO,IAAI;AAGhC,QAAI,QAAQ;AAAA,MACR,QAAQ;AAAA,MACR,iBAAiB;AAAA,MACjB,oBAAoB;AAAA,MAEpB,kBAAkB;AAAA,MAClB,MAAM;AAAA,MACN,OAAO;AAAA,MACP,IAAI;AAAA;AAAA,IAChB;AAEQ,QAAI,yBAAyB;AACzB,YAAM,iBAAiB,qBAAqB,MAAM,MAAM;AAAA,IAC5D;AAEA,UAAM,KAAK,KAAK;AAAA,EACpB;AAEA,SAAO;AACX;AAWA,eAAe,eAAe,MAAM,MAAM;AACtC,QAAM,aAAa,KAAK;AAExB,MAAI,oBAAoB,KAAK;AAC7B,MAAI,KAAK,oBAAoB;AAGzB,wBAAoB,kBAAkB,MAAM,EAAE;AAAA,EAClD;AAGA,MAAI,eAAe;AAAA,IACf,CAAC,UAAU,GAAG,KAAK;AAAA,IACnB,mBAAmB,YAAY,iBAAiB;AAAA,IAChD,iBAAiB,KAAK;AAAA,IACtB,iBAAiB,KAAK,oBAAoB;AAAA,EAClD;AACI,MAAI,KAAK,gBAAgB;AACrB,iBAAa,iBAAiB,KAAK;AAAA,EACvC;AAGA,MAAI,SAAS,MAAM,KAAK,QAAQ,YAAY;AAG5C,OAAK,qBAAqB;AAC1B,OAAK,kBAAkB,OAAO;AAE9B,SAAO;AACX;AAQA,SAAS,kBAAkB,MAAM,YAAY;AACzC,OAAK,mBAAmB,CAAC,GAAG,KAAK,kBAAkB,UAAU;AACjE;AASA,eAAe,eAAe,MAAM,cAAc;AAC9C,QAAM,eAAe,uBAAO,OAAO,IAAI;AACvC,aAAW,OAAO,KAAK,QAAQ,YAAY;AACvC,iBAAa,GAAG,IAAI,aAAa,GAAG;AAAA,EACxC;AACA,MAAI,KAAK,QAAQ,WAAW,SAAS,gBAAgB,KAAK,CAAC,aAAa,gBAAgB;AAGpF,iBAAa,iBAAiB,IAAI;AAAA,MAC9B;AAAA,MACA,IAAI,cAAc,aAAa,UAAU,KAAK,MAAM;AAAA,MACpD,aAAa,UAAU;AAAA,IACnC;AAAA,EACI;AACA,SAAO,MAAM,WAAW,KAAK,SAAS,YAAY;AACtD;AAUA,eAAe,eAAe,MAAM,cAAc;AAC9C,MAAI,EAAE,WAAW,iBAAiB,eAAc,IAAK;AACrD,MAAI,eAAe;AAAA,IACf;AAAA,IACA,gBAAgB,kBAAkB,qBAAqB,MAAM,SAAS;AAAA,EAC9E;AACI,QAAM,mBAAmB,CAAC,CAAC;AAE3B,MAAI,KAAK,QAAQ,WAAW,SAAS,kBAAkB,GAAG;AACtD,iBAAa,mBAAmB,WAAW,gBAAgB;AAAA,EAC/D;AAEA,qBAAmB,KAAK,SAAS,cAAc,gBAAgB;AAE/D,OAAK,iBAAiB,cAAc,eAAe;AAEnD,MAAI,iBAAiB,MAAM,WAAW,KAAK,SAAS,YAAY;AAEhE,MAAI,SAAS,eAAe;AAE5B,oBAAkB,KAAK,iBAAiB,gBAAgB,eAAe;AACvE,SAAO,EAAE,QAAQ,gBAAe;AACpC;AAYA,SAAS,kBAAkB,MAAM,eAAe,mBAAmB,iBAAiB,uBAAuB;AACvG,MAAI,QAAQ,CAAA;AAEZ,MAAI,SAAS;AACb,WAAS,UAAU,eAAe;AAC9B,QAAI,mBAAmB,OAAO,OAAM,EAAG,IAAI,MAAM;AAKjD,WAAO,OAAO,CAAC,GAAG,GAAG,OAAO,IAAI;AAEhC,QAAI;AACJ,QAAI,uBAAuB;AACvB,kBAAY,sBAAsB,MAAM;AACxC,gBAAU,OAAO,CAAC,GAAG,GAAG,UAAU,IAAI;AAAA,IAE1C,OAAO;AACH,kBAAY,qBAAqB,MAAM,MAAM;AAAA,IACjD;AAEA,QAAI,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,iBAAiB;AAAA,MACjB,gBAAgB;AAAA,MAChB,oBAAoB;AAAA,MAEpB;AAAA,MACA,mBAAmB;AAAA,MAEnB,MAAM;AAAA,MACN,OAAO;AAAA,MACP,IAAI;AAAA;AAAA,IAChB;AAEQ,UAAM,KAAK,KAAK;AAAA,EACpB;AACA,SAAO;AACX;AAeA,eAAe,eAAe,MAAM,MAAM;AACtC,MAAI,eAAe,IAAI,cAAc,KAAK,iBAAiB,MAAM,EAAE,KAAK,EAAE;AAG1E,MAAI,eAAe;AAAA,IACf,WAAW,KAAK;AAAA,IAChB,gBAAgB,IAAI;AAAA,MAChB;AAAA,MACA;AAAA,MACA,CAAC,GAAG,aAAa,MAAM;AAAA,IACnC;AAAA,IACQ,iBAAiB,KAAK,oBAAoB;AAAA,EAClD;AAGI,MAAI,SAAS,MAAM,KAAK,QAAQ,YAAY;AAG5C,OAAK,qBAAqB;AAE1B,SAAO;AACX;AAQA,SAAS,kBAAkB,MAAM,YAAY;AACzC,OAAK,mBAAmB,CAAC,GAAG,KAAK,kBAAkB,UAAU;AAC7D,OAAK,kBAAkB,IAAI,OAAO,SAAS,CAAC,OAAO,UAAU,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAC3E;AAQO,MAAM,wBAAwB,SAAS;AAAA,EAC1C,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOlB,YAAY,QAAQ,SAAS;AACzB,UAAK;AAEL,SAAK,SAAS;AACd,SAAK,UAAU;AAEf,UAAM,YAAY,4BAA4B,IAAI,KAAK,WAAW;AAClE,UAAM,YAAY,mBAAmB,IAAI,SAAS;AAElD,SAAK,eAAe;AACpB,SAAK,WAAW;AAChB,SAAK,iBAAiB;AACtB,SAAK,cAAc;AACnB,SAAK,WAAW;AAChB,QAAI,cAAc,YAAY,aAAa;AACvC,WAAK,eAAe;AAEpB,WAAK,WAAW;AAChB,WAAK,iBAAiB;AACtB,WAAK,cAAc;AACnB,WAAK,WAAW;AAAA,IAEpB,WAAW,cAAc,YAAY,WAAW,cAAc,YAAY,YAAY;AAClF,WAAK,eAAe;AAEpB,WAAK,WAAW;AAChB,WAAK,iBAAiB;AACtB,WAAK,cAAc;AACnB,WAAK,WAAW;AAAA,IAEpB,WAAW,cAAc,YAAY,gBAAgB;AACjD,WAAK,WAAW;AAAA,IAEpB,OAAO;AACH,WAAK,WAAW;AAAA,IACpB;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,UAAU;AACZ,UAAM,WAAW,CAAA;AACjB,aAAS,OAAO,OAAO,KAAK,IAAI,GAAG;AAC/B,YAAM,OAAO,KAAK,GAAG;AAErB,UAAI,gBAAgB,kBAAkB;AAClC,iBAAS,KAAK,KAAK,QAAQ,QAAO,CAAE;AAAA,MACxC;AAAA,IACJ;AACA,WAAO,MAAM,QAAQ,IAAI,QAAQ;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiBA,aAAa,gBAAgB,+BAA+B;AAAA,IACxD,YAAY;AAAA,IACZ,oBAAoB;AAAA,IACpB,SAAS;AAAA,IACT,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB,WAAW;AAAA,IACX,kBAAkB;AAAA,EAC1B,IAAQ,IAAI;AAEJ,QAAI,UAAU;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACZ;AAEQ,UAAM,YAAY,4BAA4B,IAAI,IAAI;AACtD,UAAM,YAAY,mBAAmB,IAAI,SAAS;AAElD,QAAI;AACJ,QAAI,cAAc,YAAY,aAAa;AACvC,aAAO,MAAM,QAAQ,IAAI;AAAA,QACrB,WAAW,gBAAgB,+BAA+B,OAAO;AAAA,QACjE,iBAAiB,+BAA+B,QAAQ,mBAAmB,wBAAwB,OAAO;AAAA,QAC1G,aAAa,+BAA+B,0BAA0B,OAAO,OAAO;AAAA,MACpG,CAAa;AAAA,IAEL,WAAW,cAAc,YAAY,WAAW,cAAc,YAAY,YAAY;AAClF,aAAO,MAAM,QAAQ,IAAI;AAAA,QACrB,WAAW,gBAAgB,+BAA+B,OAAO;AAAA,QACjE,iBAAiB,+BAA+B,iBAAiB,OAAO;AAAA,QACxE,iBAAiB,+BAA+B,wBAAwB,OAAO;AAAA,QAC/E,aAAa,+BAA+B,0BAA0B,OAAO,OAAO;AAAA,MACpG,CAAa;AAAA,IAEL,WAAW,cAAc,YAAY,gBAAgB;AACjD,aAAO,MAAM,QAAQ,IAAI;AAAA,QACrB,WAAW,gBAAgB,+BAA+B,OAAO;AAAA,QACjE,iBAAiB,+BAA+B,kBAAkB,OAAO;AAAA,QACzE,iBAAiB,+BAA+B,+BAA+B,OAAO;AAAA,MACtG,CAAa;AAAA,IAEL,WAAW,cAAc,YAAY,gBAAgB;AACjD,aAAO,MAAM,QAAQ,IAAI;AAAA,QACrB,WAAW,gBAAgB,+BAA+B,OAAO;AAAA,QACjE,iBAAiB,+BAA+B,iBAAiB,OAAO;AAAA,QACxE,iBAAiB,+BAA+B,wBAAwB,OAAO;AAAA,MAC/F,CAAa;AAAA,IAEL,OAAO;AACH,UAAI,cAAc,YAAY,aAAa;AACvC,gBAAQ,KAAK,mBAAmB,aAAa,QAAQ,UAAU,qIAAqI;AAAA,MACxM;AACA,aAAO,MAAM,QAAQ,IAAI;AAAA,QACrB,WAAW,gBAAgB,+BAA+B,OAAO;AAAA,QACjE,iBAAiB,+BAA+B,QAAQ,mBAAmB,SAAS,OAAO;AAAA,MAC3G,CAAa;AAAA,IACL;AAGA,WAAO,IAAI,KAAK,GAAG,IAAI;AAAA,EAC3B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,MAAM,cAAc;AACtB,WAAO,MAAM,KAAK,QAAQ,YAAY;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAM,QAAQ,cAAc;AACxB,WAAO,MAAM,KAAK,SAAS,MAAM,YAAY;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,sBACI,mBACA,sBAGA,mBAAmB,MACrB;AACE,UAAM,aAAa,IAAI,oBAAmB;AAiB1C,QAAI,kBAAkB,uBAAuB,QAAQ,kBAAkB,uBAAuB,GAAK;AAC/F,iBAAW,KAAK,IAAI,iCAAiC,kBAAkB,kBAAkB,CAAC;AAAA,IAC9F;AAEA,QAAI,kBAAkB,yBAAyB,QAAQ,kBAAkB,uBAAuB,GAAG;AAC/F,iBAAW,KAAK,IAAI,6BAA6B,kBAAkB,oBAAoB,CAAC;AAAA,IAC5F;AAaA,QAAI,kBAAkB,kBAAkB,MAAM;AAC1C,iBAAW,KAAK,IAAI,0BAA0B,kBAAkB,eAAe,kBAAkB,YAAY,CAAC;AAAA,IAClH;AAEA,QAAI,kBAAkB,eAAe,QAAQ,kBAAkB,iBAAiB,QAAQ,kBAAkB,aAAa,GAAG;AACtH,iBAAW,KAAK,IAAI,yBAAyB,kBAAkB,YAAY,kBAAkB,YAAY,CAAC;AAAA,IAC9G;AAEA,QAAI,kBAAkB,mBAAmB,QAAQ,kBAAkB,iBAAiB,QAAQ,kBAAkB,iBAAiB,GAAG;AAC9H,iBAAW,KAAK,IAAI;AAAA,QAChB;AAAA,QACA,kBAAkB;AAAA,QAClB,kBAAkB;AAAA,MAClC,CAAa;AAAA,IACL;AAUA,QAAI,kBAAkB,wBAAwB,MAAM;AAChD,iBAAW,KAAK,IAAI,8BAA8B,kBAAkB,mBAAmB,CAAC;AAAA,IAC5F;AAEA,QAAI,kBAAkB,wBAAwB,MAAM;AAChD,iBAAW,KAAK,IAAI;AAAA,QAChB,kBAAkB;AAAA,QAClB,kBAAkB;AAAA,MAClC,CAAa;AAAA,IACL;AAkBA,QAAI,kBAAkB,0BAA0B,MAAM;AAClD,UAAI,cAAe,uBAAuB,KAAK,kBAAkB,wBAAwB,OACnF,uBACA,uBAAuB;AAE7B,UAAI,kBAAkB,uBAAuB,MAAM;AAE/C,uBAAe,kBAAkB,mBAAmB,kBAAkB,mBAAmB,SAAS,CAAC,EAAE,CAAC;AAAA,MAC1G;AACA,iBAAW,KAAK,IAAI,qCAAqC,kBAAkB,uBAAuB,WAAW,CAAC;AAAA,IAClH;AAEA,QAAI,kBAAkB,uBAAuB,MAAM;AAC/C,iBAAW,KAAK,IAAI,2BAA2B,kBAAkB,kBAAkB,CAAC;AAAA,IACxF;AAEA,QAAI,qBAAqB,MAAM;AAC3B,iBAAW,OAAO,gBAAgB;AAAA,IACtC;AAOA,WAAO;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,uBAAuB,mBAAmB;AAGtC,QAAI,aAAa,IAAI,iBAAiB,KAAK,MAAM;AAGjD,QAAI,uBAAuB,MAAM;AAC7B,aAAO,OAAO,YAAY,KAAK,iBAAiB;AAAA,IACpD;AAIA,QAAI,sBAAsB,MAAM;AAC5B,aAAO,OAAO,YAAY,iBAAiB;AAAA,IAC/C;AACA,WAAO;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmBA,MAAM,SACF,QACA,oBAAoB,MACpB,mBAAmB,MACnB;AAAA,IACI,wBAAwB;AAAA,EACpC,IAAY,CAAA,GACN;AACE,QAAI,CAAC,KAAK,cAAc;AACpB,YAAM,YAAY,4BAA4B,IAAI,KAAK,WAAW;AAClE,UAAI,eAAe,4BAA4B,SAAS;AAExD,YAAM,YAAY,KAAK,OAAO;AAC9B,YAAM,eACF,iCAAiC,IAAI,SAAS,KAC3C,6CAA6C,IAAI,SAAS,KAC1D,yCAAyC,IAAI,SAAS,KAEtD,qCAAqC,IAAI,SAAS;AAEzD,UAAI,cAAc;AAEd,wBAAgB,6CAA6C,aAAa,CAAC,CAAC;AAAA,MAChF;AACA,YAAM,MAAM,YAAY;AAAA,IAC5B;AAEA,QAAI,EAAE,kBAAkB,WAAW,CAAC,aAAa,MAAM,KAAK,CAAC,MAAM,QAAQ,MAAM,GAAG;AAChF,YAAM,MAAM,8DAA8D,OAAO,YAAY,IAAI,IAAI;AAAA,IACzG;AAEA,QAAI;AAIJ,QAAI,KAAK,OAAO,oBAAoB;AAEhC,6BAAuB;AAAA,IAE3B,OAAO;AACH,6BAAuB,kBAAkB,SAAS,OAAO,KAAK,GAAG,EAAE,IAAI,OAAO;AAG9E,UAAI,yBAAyB,GAAG;AAC5B,cAAM,MAAM,mDAAmD;AAAA,MACnE;AAAA,IACJ;AAGA,wBAAoB,KAAK,uBAAuB,iBAAiB;AAEjE,uBAAmB,oBAAoB,IAAI,oBAAmB;AAG9D,uBAAmB,KAAK;AAAA,MACpB;AAAA,MACA;AAAA,MACA;AAAA,IACZ;AAGQ,QAAI,gBAAgB,kBAAkB;AACtC,QAAI,kBAAkB,QAAQ,CAAC,MAAM,QAAQ,aAAa,GAAG;AACzD,sBAAgB,CAAC,aAAa;AAAA,IAClC;AAKA,QAAI,kBAAkB;AACtB,UAAM,kBAAkB,mBAAmB,kBAAkB,kBAAkB;AAG/E,UAAM,eAAe,OAAO,UAAU,kBAAkB,UAAU,MAAM,kBAAkB,kBAAkB,UAAU;AACtH,QAAI,UAAU,QAAQ,WAAW,iBAAiB;AAGlD,QAAI,QAAQ,KAAK,cAAc,QAAQ,mBAAmB,iBAAiB,qBAAqB;AAEhG,WAAO,MAAM,KAAK,OAAK,CAAC,EAAE,IAAI,KAAK,kBAAkB,iBAAiB;AAClE,UAAI,eAAe,CAAA;AACnB,eAAS,QAAQ,OAAO;AACpB,YAAI,KAAK,MAAM;AAEX,uBAAa,KAAK,IAAI;AACtB;AAAA,QACJ;AACA,YAAI,gBAAgB,KAAK,iBAAiB,UAAU,kBAAkB,YAAY;AAE9E,eAAK,OAAO;AACZ,uBAAa,KAAK,IAAI;AACtB;AAAA,QACJ;AAGA,YAAI,SAAS,MAAM,KAAK,QAAQ,IAAI;AAGpC,YAAI,kBAAkB,mBAAmB;AACrC,eAAK,oBAAoB,MAAM,MAAM;AAAA,QACzC;AACA,YAAI,kBAAkB,cAAe;AAQrC,YAAI,SAAS,OAAO,OAAO,MAAM,MAAM,IAAI,IAAI;AAG/C,yBAAiB,KAAK,kBAAkB,MAAM;AAE9C,YAAI,gBAAgB,QAAQ,MAAM;AAClC,iBAAS,CAAC,YAAY,OAAO,KAAK,eAAe;AAE7C,cAAI,UAAU,EAAE,GAAG,KAAI;AAIvB,eAAK,WAAW,SAAS,UAAU;AAEnC,kBAAQ,SAAS;AAEjB,cAAI,iBAAiB,cAAc,SAAS,UAAU,GAAG;AACrD,oBAAQ,OAAO;AAAA,UACnB;AAEA,uBAAa,KAAK,OAAO;AAAA,QAC7B;AAAA,MACJ;AACA,QAAE;AAGF,qBAAe,KAAK,WAAW,YAAY,EAAE;AAAA,QACzC,WAAS,MACJ,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK,EAChC,MAAM,GAAG,kBAAkB,SAAS;AAAA;AAAA,MACzD;AAGY,cAAQ,aAAa,KAAI;AAGzB,UAAI,kBAAkB,mBAAmB;AACrC,0BAAkB,kBAAkB,KAAK;AAAA,MAC7C;AAAA,IACJ;AAIA,UAAM,eAAe,KAAK,WAAW,KAAK;AAE1C,UAAM,eAAe,CAAC,QAAQ,aAAa;AAAA,MACvC,WAAS;AACL,YAAI,kBAAkB,uBAAuB,GAAG;AAC5C,iBAAO,MAAM,MAAM,GAAG,kBAAkB,oBAAoB,EAAE,IAAI,OAAK,EAAE,GAAG,CAAC;AAAA,QACjF,OAAO;AACH,iBAAO,CAAC,MAAM,CAAC,EAAE,GAAG,CAAC;AAAA,QACzB;AAAA,MACJ;AAAA,IACZ,EAAU,KAAI;AAEN,UAAM,YAAY,aAAa,kBAAkB;AAEjD,QAAI,kBAAkB,yBAAyB;AAa3C,YAAM,qBAAqB,aAAa,oBAAoB;AAC5D,YAAM,mBAAmB,aAAa,kBAAkB;AAExD,aAAO;AAAA,QACH;AAAA,QAEA;AAAA,QACA;AAAA,MAChB;AAAA,IACQ,OAAO;AACH,aAAO;AAAA,IACX;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,oBAAoB,MAAM,QAAQ;AAC9B,QAAI,KAAK,OAAO,oBAAoB;AAChC,UAAI,CAAC,OAAO,oBAAoB,OAAO,iBAAiB,WAAW,GAAG;AAClE,cAAM;AAAA,UACF;AAAA,QAEpB;AAAA,MACY;AACA,UAAI,CAAC,KAAK,kBAAkB;AACxB,aAAK,mBAAmB,CAAA;AAAA,MAC5B;AACA,WAAK,iBAAiB,KAAK,OAAO,gBAAgB;AAAA,IACtD;AAEA,QAAI,CAAC,OAAO,sBAAsB,OAAO,mBAAmB,WAAW,GAAG;AACtE,YAAM;AAAA,QACF;AAAA,MAEhB;AAAA,IACQ;AACA,QAAI,CAAC,KAAK,oBAAoB;AAC1B,WAAK,qBAAqB,CAAA;AAAA,IAC9B;AACA,SAAK,mBAAmB,KAAK,OAAO,kBAAkB;AAAA,EAC1D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,WAAW,OAAO;AAEd,UAAM,SAAS,uBAAO,OAAO,IAAI;AACjC,eAAW,OAAO,OAAO;AACrB,UAAI,OAAO,IAAI,EAAE,MAAM,QAAW;AAC9B,eAAO,IAAI,EAAE,IAAI,CAAC,GAAG;AAAA,MACzB,OAAO;AACH,eAAO,IAAI,EAAE,EAAE,KAAK,GAAG;AAAA,MAC3B;AAAA,IACJ;AAEA,WAAO,OAAO,OAAO,MAAM;AAAA,EAC/B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,iBAAiB,gBAAgB,eAAe;AAE5C,UAAM,OAAO,uBAAO,OAAO,IAAI;AAE/B,eAAW,QAAQ,gBAAgB;AAC/B,UAAI,KAAK,WAAW,SAAS,GAAG;AAC5B,YAAI,UAAU,KAAK,QAAQ,WAAW,iBAAiB;AAEvD,YAAI,iBAAiB,KAAK,SAAS,SAAS,GAAG;AAI3C,eAAK,OAAO,IAAI,cAAc,OAAO;AAAA,QACzC,OAAO;AACH,eAAK,OAAO,IAAI,eAAe,IAAI;AAAA,QACvC;AAAA,MACJ;AAAA,IACJ;AACA,WAAO;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,cAAc,gBAAgB;AAC1B,UAAM,QAAQ,uBAAO,OAAO,IAAI;AAEhC,eAAW,YAAY,CAAC,oBAAoB,oBAAoB,GAAG;AAC/D,YAAM,SAAS,CAAA;AACf,iBAAW,QAAQ,gBAAgB;AAC/B,YAAI,KAAK,WAAW,QAAQ,GAAG;AAC3B,gBAAM,QAAQ,KAAK,MAAM,GAAG,EAAE,IAAG;AACjC,iBAAO,KAAK,IAAI,eAAe,IAAI;AAAA,QACvC;AAAA,MACJ;AACA,YAAM,QAAQ,IAAI;AAAA,IACtB;AACA,WAAO;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,iBAAiB,cAAc,eAAe;AAC1C,QAAI,eAAe;AACf,aAAO,OAAO,cAAc,aAAa;AAAA,IAC7C,OAAO;AAEH,YAAM,aAAa;AAGnB,UAAI,KAAK,OAAO,uBAAuB,KAAK,mBAAmB,OAAO;AAElE,YAAI,eAAe,CAAC,YAAY,KAAK,mBAAmB,GAAG,KAAK,cAAc;AAE9E,YAAI,eAAe,CAAC,YAAY,KAAK,mBAAmB,GAAG,KAAK,cAAc;AAE9E,iBAAS,IAAI,GAAG,IAAI,KAAK,oBAAoB,EAAE,GAAG;AAC9C,uBAAa,mBAAmB,CAAC,cAAc,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,YAAY;AACzF,uBAAa,mBAAmB,CAAC,gBAAgB,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,YAAY;AAC3F,uBAAa,mBAAmB,CAAC,cAAc,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,YAAY;AACzF,uBAAa,mBAAmB,CAAC,gBAAgB,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,YAAY;AAAA,QAC/F;AAAA,MACJ,WAAW,KAAK,OAAO,eAAe,UAAU;AAG5C,YAAI,OAAO,CAAC,aAAa,KAAK,WAAW,GAAG,KAAK,MAAM;AAEvD,iBAAS,IAAI,GAAG,IAAI,KAAK,YAAY,EAAE,GAAG;AACtC,uBAAa,mBAAmB,CAAC,MAAM,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,IAAI;AACzE,uBAAa,mBAAmB,CAAC,QAAQ,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,IAAI;AAAA,QAC/E;AAAA,MACJ,WAAW,KAAK,OAAO,aAAa;AAEhC,YAAI,OAAO,CAAC,aAAa,KAAK,WAAW,GAAG,IAAI,KAAK,MAAM;AAE3D,iBAAS,IAAI,GAAG,IAAI,KAAK,YAAY,EAAE,GAAG;AACtC,uBAAa,mBAAmB,CAAC,YAAY,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,IAAI;AAAA,QACnF;AAAA,MACJ,WAAW,KAAK,OAAO,eAAe,SAAS;AAI3C,YAAI,UAAU,CAAC,aAAa,KAAK,WAAW,KAAK,QAAQ,CAAC;AAE1D,YAAI,YAAY,CAAC,aAAa,KAAK,WAAW,GAAG,KAAK,MAAM;AAE5D,iBAAS,IAAI,GAAG,IAAI,KAAK,YAAY,EAAE,GAAG;AACtC,uBAAa,mBAAmB,CAAC,MAAM,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,OAAO;AAC5E,uBAAa,mBAAmB,CAAC,QAAQ,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,SAAS;AAAA,QACpF;AAAA,MACJ,OAAO;AAEH,YAAI,OAAO,CAAC,YAAY,KAAK,WAAW,GAAG,KAAK,MAAM;AAEtD,iBAAS,IAAI,GAAG,IAAI,KAAK,YAAY,EAAE,GAAG;AACtC,uBAAa,mBAAmB,CAAC,MAAM,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,IAAI;AACzE,uBAAa,mBAAmB,CAAC,QAAQ,IAAI,IAAI,OAAO,WAAW,CAAA,GAAI,IAAI;AAAA,QAC/E;AAAA,MACJ;AAAA,IACJ;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,cAAc,eAAe,mBAAmB,iBAAiB,uBAAuB;AACpF,WAAO,KAAK,eAAe,MAAM,eAAe,mBAAmB,iBAAiB,qBAAqB;AAAA,EAC7G;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,QAAQ,MAAM;AAChB,WAAO,MAAM,KAAK,SAAS,MAAM,IAAI;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,WAAW,MAAM,YAAY;AACzB,WAAO,KAAK,YAAY,MAAM,UAAU;AAAA,EAC5C;AACJ;AAIO,MAAM,YAAY;AAAA;AAqBlB,MAAM,4BAA4B,gBAAgB;AAAA;AAClD,MAAM,kBAAkB,oBAAoB;AAAA;AAK5C,MAAM,wBAAwB,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOrD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,sCAAsC,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOnE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,mCAAmC,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,iCAAiC,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO9D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,iCAAiC,gBAAgB;AAAA;AACvD,MAAM,uBAAuB,yBAAyB;AAAA;AAKtD,MAAM,gCAAgC,gBAAgB;AAAA;AAKtD,MAAM,sBAAsB,wBAAwB;AAAA;AAKpD,MAAM,4BAA4B,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO7D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,0CAA0C,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAMO,MAAM,uCAAuC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,qCAAqC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAMO,MAAM,gCAAgC,gBAAgB;AAAA;AAKtD,MAAM,sBAAsB,wBAAwB;AAAA;AAKpD,MAAM,4BAA4B,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO7D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,0CAA0C,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAMO,MAAM,uCAAuC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,qCAAqC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAMO,MAAM,+BAA+B,gBAAgB;AAAA;AAOrD,MAAM,qBAAqB,uBAAuB;AAAA;AAKlD,MAAM,2BAA2B,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,yCAAyC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOzE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,sCAAsC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,oCAAoC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAMO,MAAM,iCAAiC,gBAAgB;AAAA;AAKvD,MAAM,uBAAuB,yBAAyB;AAAA;AAKtD,MAAM,6BAA6B,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,2CAA2C,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO7E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,wCAAwC,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO1E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,sCAAsC,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,+BAA+B,gBAAgB;AAAA;AAKrD,MAAM,qBAAqB,uBAAuB;AAAA;AAKlD,MAAM,2BAA2B,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,yCAAyC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOzE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,sCAAsC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,oCAAoC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,iCAAiC,gBAAgB;AAAA;AAKvD,MAAM,uBAAuB,yBAAyB;AAAA;AAKtD,MAAM,6BAA6B,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,2CAA2C,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO7E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,wCAAwC,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO1E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,sCAAsC,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,kCAAkC,gBAAgB;AAAA;AACxD,MAAM,wBAAwB,0BAA0B;AAAA;AAKxD,MAAM,4CAA4C,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,yCAAyC,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO5E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,uCAAuC,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO1E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,8BAA8B,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOjE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAMO,MAAM,2BAA2B,gBAAgB;AAAA;AAKjD,MAAM,iBAAiB,mBAAmB;AAAA;AAK1C,MAAM,uBAAuB,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOnD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,qCAAqC,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOjE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAMO,MAAM,kCAAkC,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO9D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAMO,MAAM,kCAAkC,gBAAgB;AAAA;AACxD,MAAM,wBAAwB,0BAA0B;AAAA;AAKxD,MAAM,8BAA8B,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOjE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,4CAA4C,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,uCAAuC,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO1E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,6BAA6B,gBAAgB;AAAA;AAKnD,MAAM,mBAAmB,qBAAqB;AAAA;AAK9C,MAAM,yBAAyB,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,uCAAuC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOrE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,oCAAoC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOlE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,kCAAkC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAMO,MAAM,mCAAmC,gBAAgB;AAAA;AACzD,MAAM,yBAAyB,2BAA2B;AAAA;AAC1D,MAAM,+BAA+B,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOnE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AACO,MAAM,6CAA6C,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOjF,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AACO,MAAM,wCAAwC,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO5E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAMO,MAAM,8BAA8B,gBAAgB;AAAA;AACpD,MAAM,oBAAoB,sBAAsB;AAAA;AAChD,MAAM,wCAAwC,sBAAsB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AACO,MAAM,mCAAmC,sBAAsB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOlE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AACO,MAAM,0BAA0B,sBAAsB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOzD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAMO,MAAM,0BAA0B,gBAAgB;AAAA;AAEhD,MAAM,gBAAgB,kBAAkB;AAAA;AAKxC,MAAM,mCAAmC,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAS9D,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO;AAElC,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO;AAAA,EACtC;AACJ;AASO,MAAM,8BAA8B,gBAAgB;AAAA;AAKpD,MAAM,oBAAoB,sBAAsB;AAAA;AAKhD,MAAM,uCAAuC,sBAAsB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQtE,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO;AAElC,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO;AAAA,EACtC;AACJ;AAMO,MAAM,2BAA2B,gBAAgB;AAAA;AAEjD,MAAM,iBAAiB,mBAAmB;AAAA;AAK1C,MAAM,oCAAoC,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAShE,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO;AAElC,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO;AAAA,EACtC;AACJ;AAKO,MAAM,4BAA4B,gBAAgB;AAAA;AAKlD,MAAM,kBAAkB,oBAAoB;AAAA;AAK5C,MAAM,qCAAqC,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASlE,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AAEJ;AAKO,MAAM,sCAAsC,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOnE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAMO,MAAM,6BAA6B,gBAAgB;AAAA;AAKnD,MAAM,mBAAmB,qBAAqB;AAAA;AAK9C,MAAM,sCAAsC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASpE,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AAEJ;AAKO,MAAM,uCAAuC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOrE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAGO,MAAM,yBAAyB,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvD,YAAY,QAAQ,wBAAwB,mBAAmB;AAC3D,UAAM,QAAQ,sBAAsB;AACpC,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AACJ;AAMO,MAAM,kCAAkC,gBAAgB;AAAA;AAKxD,MAAM,wBAAwB,0BAA0B;AAAA;AAKxD,MAAM,2CAA2C,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAS9E,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AACJ;AAMO,MAAM,uCAAuC,gBAAgB;AAAA;AAK7D,MAAM,6BAA6B,+BAA+B;AAAA;AAKlE,MAAM,gDAAgD,+BAA+B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASxF,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AACJ;AAMO,MAAM,+BAA+B,gBAAgB;AAAA;AACrD,MAAM,qBAAqB,uBAAuB;AAAA;AAKlD,MAAM,2BAA2B,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,yCAAyC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOzE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,sCAAsC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,oCAAoC,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AASO,MAAM,2BAA2B,gBAAgB;AAAA;AAKjD,MAAM,iBAAiB,mBAAmB;AAAA;AAK1C,MAAM,2BAA2B,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,qCAAqC,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOjE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,kCAAkC,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO9D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,gCAAgC,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO5D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,kCAAkC,gBAAgB;AAAA;AACxD,MAAM,wBAAwB,0BAA0B;AAAA;AAKxD,MAAM,8BAA8B,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOjE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,4CAA4C,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,yCAAyC,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO5E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,uCAAuC,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO1E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,6BAA6B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC3E;AACJ;AAKO,MAAM,2BAA2B,gBAAgB;AAAA;AAKjD,MAAM,iBAAiB,mBAAmB;AAAA;AAM1C,MAAM,kCAAkC,mBAAmB;AAAA;AAK3D,MAAM,+BAA+B,gBAAgB;AAAA;AAKrD,MAAM,qBAAqB,uBAAuB;AAAA;AAKlD,MAAM,wCAAwC,uBAAuB;AAAA,EAExE,0BAA0B;AAAA,EAC1B,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASlB,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmBA,MAAM,SACF,QACA,oBAAoB,MACpB,mBAAmB,MAOrB;AAEE,wBAAoB,KAAK,uBAAuB,iBAAiB;AAIjE,sBAAkB,sBAAsB;AAIxC,QAAI,kBAAkB,mBAAmB;AACrC,yBAAmB,CAAC,IAAI,gCAAgC,iBAAiB,CAAC;AAAA,IAC9E;AAEA,QAAI,kBAAkB,yBAAyB;AAC3C,wBAAkB,oBAAoB;AACtC,wBAAkB,0BAA0B;AAE5C,UAAI,kBAAkB,SAAS,aAAa;AACxC,gBAAQ,KAAK,kEAAkE;AAAA,MACnF;AAEA,UAAI,CAAC,kBAAkB,iBAAiB;AACpC,cAAM,IAAI;AAAA,UACN;AAAA,QAEpB;AAAA,MACY;AAAA,IACJ;AAEA,UAAM,UAAU,MAAM,MAAM,SAAS,QAAQ,mBAAmB,gBAAgB;AAEhF,QAAI,kBAAkB,2BAA2B,kBAAkB,iBAAiB;AAChF,cAAQ,kBAAkB,IAAI,KAAK;AAAA,QAC/B;AAAA,QACA,kBAAkB;AAAA,QAClB,kBAAkB;AAAA,MAClC;AAAA,IACQ;AAEA,WAAO;AAAA,EACX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,0BAA0B,kBAAkB,iBAAiB,aAAa,MAAM,iBAAiB,MAAM;AACnG,QAAI,CAAC,iBAAiB,kBAAkB;AACpC,YAAM,IAAI;AAAA,QACN;AAAA,MAEhB;AAAA,IACQ;AAEA,QAAI,sBAAsB,KAAK,OAAO;AACtC,QAAI,wBAAwB,QAAW;AACnC,cAAQ,KAAK,sEAAsE;AACnF,4BAAsB;AAAA,IAC1B;AAEA,UAAM,kBAAkB,iBAAiB,iBAAiB,IAAI,WAAS;AAGnE,UAAI,mBAAmB,MAAM;AAAA,QAAK,EAAE,QAAQ,KAAK,OAAO,eAAc;AAAA,QAClE,CAAC,GAAG,MAAM,IAAI,MAAM,IAAI,OAAK,EAAE,CAAC,CAAC,GAAG,CAAC;AAAA,MACrD;AAEY,UAAI,UAAU,MAAM,gBAAgB,IAAI,CAAC,CAAC,GAAG,CAAC,MAAM;AAChD,eAAO,aACD,iBAAiB,CAAC,EAAE,MAAM,MAAM,GAAG,MAAM,CAAC,GAAG,UAAU,CAAC,IACxD,iBAAiB,CAAC,EAAE,MAAM,MAAM,CAAC;AAAA,MAC3C,CAAC,CAAC;AACF,gBAAU,QAAQ,UAAU,GAAG,GAAG,GAAG,CAAC;AAEtC,UAAI,CAAC,KAAK,cAAc,IAAI,SAAS,SAAS,IAAI,GAAG,IAAI;AAGzD,UAAI,kBAAkB,QAAQ;AAE9B,eAAS,IAAI,GAAG,IAAI,gBAAgB,KAAK,CAAC,GAAG,EAAE,GAAG;AAC9C,YAAI,UAAU,gBAAgB,CAAC;AAE/B,iBAAS,IAAI,GAAG,IAAI,QAAQ,KAAK,CAAC,GAAG,EAAE,GAAG;AACtC,cAAI,UAAU,QAAQ,CAAC;AAEvB,gBAAM,YAAY,IAAI,CAAC,EAAE,CAAC,EAAE,CAAC;AAC7B,gBAAM,aAAa,eAAe,CAAC,EAAE,CAAC,EAAE,CAAC;AAEzC,mBAAS,IAAI,GAAG,IAAI,QAAQ,KAAK,CAAC,GAAG,EAAE,GAAG;AAEtC,gBAAI,UAAU,QAAQ,CAAC;AACvB,qBAAS,IAAI,GAAG,IAAI,QAAQ,KAAK,QAAQ,EAAE,GAAG;AAC1C,sBAAQ,KAAK,CAAC,KAAK,QAAQ,KAAK,CAAC,IAAI,WAAW,KAAK,CAAC,KAAK,UAAU,KAAK,CAAC;AAAA,YAC/E;AAGA,oBAAQ,KAAK,IAAI,aAAa,QAAQ,MAAM,mBAAmB,CAAC;AAAA,UACpE;AAAA,QACJ;AAAA,MACJ;AAGA,YAAM,SAAS,KAAK,iBAAiB,CAAC;AACtC,aAAO;AAAA,IACX,CAAC;AAED,UAAM,kBAAkB,CAAC,iBAAiB,UAAU,QAAQ,iBAAiB,UAAU,CAAC,EAAE,MAAM;AAEhG,UAAM,aAAa,IAAI;AAAA,MACnB;AAAA,MACA,IAAI,aAAa,gBAAgB,CAAC,IAAI,gBAAgB,CAAC,CAAC;AAAA,MACxD;AAAA,IACZ;AAGQ,aAAS,YAAY,GAAG,YAAY,gBAAgB,CAAC,GAAG,EAAE,WAAW;AAGjE,YAAM,SAAS,gBAAgB,SAAS,EAAE,IAAG,EAAG,SAAS,CAAC;AAC1D,UAAI,CAAC,cAAc,YAAY,IAAI,mBAAmB,MAAM;AAE5D,UAAI,QAAQ,MAAM,KAAK,EAAE,QAAQ,aAAa,SAAS,EAAC,GAAI,CAAC,GAAG,MAAM,aAAa,IAAI,CAAC,IAAI,aAAa,CAAC,CAAC;AAC3G,UAAI,QAAQ,YAAY,CAAC,CAAC,GAAG,KAAK,EAAE,IAAI,OAAK,CAAC,CAAC,CAAC;AAEhD,UAAI,aAAa,CAAA;AACjB,eAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,EAAE,GAAG;AACnC,YAAI,MAAM,CAAC,GAAG;AACV,qBAAW,KAAK,aAAa,CAAC,IAAI,cAAc;AAAA,QAEpD;AAAA,MACJ;AACA,iBAAW,SAAS,EAAE,KAAK,IAAI,YAAY,CAAC;AAAA,IAChD;AAEA,WAAO;AAAA,EACX;AACJ;AAOO,MAAM,kCAAkC,gBAAgB;AAAA,EAC3D,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASlB,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAGzB,UAAM,gBAAgB,KAAK,OAAO;AAClC,UAAM,gBAAgB,KAAK,OAAO;AAGlC,UAAM,mBAAmB,cAAc;AACvC,UAAM,eACF,iCAAiC,IAAI,gBAAgB,KAClD,oCAAoC,IAAI,gBAAgB;AAC/D,QAAI,CAAC,cAAc;AACf,cAAQ,KAAK,2BAA2B,gBAAgB,qIAAqI;AAAA,IACjM;AAGA,UAAM,eAAe,iCAAiC,IAAI,cAAc,UAAU;AAClF,QAAI,CAAC,cAAc;AACf,YAAM,IAAI,MAAM,6EAA6E,KAAK,OAAO,QAAQ,UAAU,GAAG;AAAA,IAClI;AAGA,UAAM,oBAAoB,aAAa,CAAC;AAExC,UAAM,UAAU,IAAI,kBAAkB,eAAe,wBAAwB,iBAAiB;AAE9F,SAAK,kBAAkB,wBAAwB;AAC/C,QAAI,KAAK,iBAAiB;AAEtB,WAAK,qBAAqB,QAAQ;AAClC,WAAK,oBAAoB,QAAQ;AACjC,WAAK,iBAAiB,QAAQ;AAE9B,WAAK,qBAAqB,QAAQ;AAClC,WAAK,oBAAoB,QAAQ;AACjC,WAAK,iBAAiB,QAAQ;AAAA,IAElC,OAAO;AAEH,WAAK,aAAa,QAAQ;AAC1B,WAAK,YAAY,QAAQ;AACzB,WAAK,SAAS,QAAQ;AAAA,IAC1B;AAAA,EACJ;AACJ;AAKO,MAAM,4BAA4B,gBAAgB;AAAA;AA6ClD,MAAM,kBAAkB,oBAAoB;AAAA;AA4B5C,MAAM,oCAAoC,oBAAoB;AAAA;AAAA,EAGjE,aAAa,gBAAgB,+BAA+B,UAAU,IAAI;AAEtE,YAAQ,oBAAoB;AAC5B,WAAO,MAAM,gBAAgB,+BAA+B,OAAO;AAAA,EACvE;AACJ;AA4BO,MAAM,sCAAsC,oBAAoB;AAAA;AAAA,EAEnE,aAAa,gBAAgB,+BAA+B,UAAU,IAAI;AAEtE,YAAQ,oBAAoB;AAC5B,WAAO,MAAM,gBAAgB,+BAA+B,OAAO;AAAA,EACvE;AACJ;AAMO,MAAM,8BAA8B,gBAAgB;AAAA;AA6CpD,MAAM,oBAAoB,sBAAsB;AAAA;AA4BhD,MAAM,wBAAwB,sBAAsB;AAAA;AAAA,EAGvD,aAAa,gBAAgB,+BAA+B,UAAU,IAAI;AAEtE,YAAQ,oBAAoB;AAC5B,WAAO,MAAM,gBAAgB,+BAA+B,OAAO;AAAA,EACvE;AACJ;AA4BO,MAAM,0BAA0B,oBAAoB;AAAA;AAAA,EAEvD,aAAa,gBAAgB,+BAA+B,UAAU,IAAI;AAEtE,YAAQ,oBAAoB;AAC5B,WAAO,MAAM,gBAAgB,+BAA+B,OAAO;AAAA,EACvE;AACJ;AAGO,MAAM,mCAAmC,gBAAgB;AAAA;AAEzD,MAAM,yBAAyB,2BAA2B;AAAA;AAM1D,MAAM,+BAA+B,gBAAgB;AAAA;AAErD,MAAM,qBAAqB,uBAAuB;AAAA;AAgDlD,MAAM,oCAAoC,uBAAuB;AAAA;AAMjE,MAAM,4BAA4B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOrD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,SAAS,KAAK;AAAA,EAC5C;AACJ;AAEO,MAAM,kBAAkB,oBAAoB;AAAA;AAK5C,MAAM,wBAAwB,oBAAoB;AAAA;AAQlD,MAAM,8BAA8B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK;AAAA,EACjD;AACJ;AACO,MAAM,oBAAoB,sBAAsB;AAAA;AAEhD,MAAM,0BAA0B,sBAAsB;AAAA;AAKtD,MAAM,+BAA+B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK;AAAA,EACjD;AACJ;AACO,MAAM,qBAAqB,uBAAuB;AAAA;AAElD,MAAM,2BAA2B,uBAAuB;AAAA;AAMxD,MAAM,4BAA4B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOrD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,SAAS,KAAK;AAAA,EAC5C;AACJ;AAEO,MAAM,kBAAkB,oBAAoB;AAAA;AAE5C,MAAM,wBAAwB,oBAAoB;AAAA;AAMlD,MAAM,kCAAkC,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3D,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,SAAS,KAAK;AAAA,EAC5C;AACJ;AAEO,MAAM,wBAAwB,0BAA0B;AAAA;AAExD,MAAM,8BAA8B,0BAA0B;AAAA;AAK9D,MAAM,+BAA+B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,SAAS,KAAK;AAAA,EAC5C;AACJ;AAIO,MAAM,qBAAqB,uBAAuB;AAAA;AAKlD,MAAM,2BAA2B,uBAAuB;AAAA;AAUxD,MAAM,6BAA6B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO,uBAAuB,KAAK,OAAO;AAChE,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK,OAAO;AAAA,EACxD;AACJ;AAIO,MAAM,mBAAmB,qBAAqB;AAAA;AAE9C,MAAM,yBAAyB,qBAAqB;AAAA;AASpD,MAAM,6BAA6B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO,uBAAuB,KAAK,OAAO;AAChE,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK,OAAO;AAAA,EACxD;AACJ;AAIO,MAAM,mBAAmB,qBAAqB;AAAA;AAE9C,MAAM,yBAAyB,qBAAqB;AAAA;AAOpD,MAAM,2BAA2B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK;AAAA,EACjD;AACJ;AAIO,MAAM,iBAAiB,mBAAmB;AAAA;AAE1C,MAAM,uBAAuB,mBAAmB;AAAA;AAShD,MAAM,6BAA6B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK;AAAA,EACjD;AACJ;AAKO,MAAM,mBAAmB,qBAAqB;AAAA;AAK9C,MAAM,yBAAyB,qBAAqB;AAAA;AAKpD,MAAM,2BAA2B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,UAAU,KAAK;AAAA,EAC7C;AACJ;AAKO,MAAM,iBAAiB,mBAAmB;AAAA;AAK1C,MAAM,uBAAuB,mBAAmB;AAAA;AAMhD,MAAM,2BAA2B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK;AAAA,EACjD;AACJ;AAKO,MAAM,iBAAiB,mBAAmB;AAAA;AAK1C,MAAM,uBAAuB,mBAAmB;AAAA;AAIhD,MAAM,2BAA2B,gBAAgB;AAAA;AACjD,MAAM,iBAAiB,mBAAmB;AAAA;AAC1C,MAAM,kCAAkC,mBAAmB;AAAA;AAAA;AAAA;AAAA,EAI9D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,+BAA+B,gBAAgB;AAAA;AACrD,MAAM,qBAAqB,uBAAuB;AAAA;AAClD,MAAM,sCAAsC,uBAAuB;AAAA;AAAA;AAAA;AAAA,EAItE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAIO,MAAM,gCAAgC,gBAAgB;AAAA;AAqDtD,MAAM,gCAAgC,wBAAwB;AAAA;AAAA;AAAA;AAAA,EAIjE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,mBAAmB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACjE;AACJ;AAIO,MAAM,iCAAiC,gBAAgB;AAAA;AACvD,MAAM,uBAAuB,yBAAyB;AAAA;AACtD,MAAM,wCAAwC,yBAAyB;AAAA;AAAA;AAAA;AAAA,EAI1E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAMO,MAAM,mCAAmC,gBAAgB;AAAA;AACzD,MAAM,yBAAyB,2BAA2B;AAAA;AAC1D,MAAM,0CAA0C,2BAA2B;AAAA;AAAA;AAAA;AAAA,EAI9E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAMO,MAAM,8BAA8B,gBAAgB;AAAA;AACpD,MAAM,oBAAoB,sBAAsB;AAAA;AAChD,MAAM,iCAAiC,sBAAsB;AAAA;AAI7D,MAAM,6BAA6B,gBAAgB;AAAA;AACnD,MAAM,mBAAmB,qBAAqB;AAAA;AAC9C,MAAM,gCAAgC,qBAAqB;AAAA;AAK3D,MAAM,4BAA4B,gBAAgB;AAAA;AAClD,MAAM,kBAAkB,oBAAoB;AAAA;AAC5C,MAAM,mCAAmC,oBAAoB;AAAA;AAAA;AAAA;AAAA,EAIhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,4BAA4B,gBAAgB;AAAA;AAClD,MAAM,kBAAkB,oBAAoB;AAAA;AAC5C,MAAM,+BAA+B,oBAAoB;AAAA;AAAA;AAAA;AAAA,EAI5D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,0BAA0B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACxE;AACJ;AAEO,MAAM,4BAA4B,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMzD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,uBAAuB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACrE;AACJ;AAEO,MAAM,kCAAkC,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvD,YAAY,EAAE,QAAQ,cAAc;AAChC,UAAK;AACL,SAAK,SAAS;AACd,SAAK,aAAa;AAAA,EACtB;AACJ;AAEO,MAAM,+BAA+B,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOpD,YAAY,EAAE,QAAQ,YAAY,WAAU,GAAI;AAC5C,UAAK;AACL,SAAK,SAAS;AACd,SAAK,aAAa;AAClB,SAAK,aAAa;AAAA,EACtB;AACJ;AAIO,MAAM,wCAAwC,gBAAgB;AAAA;AAM9D,MAAM,8BAA8B,gCAAgC;AAAA;AAMpE,MAAM,2CAA2C,gCAAgC;AAAA;AAAA;AAAA;AAAA,EAIpF,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sCAAsC,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpF;AACJ;AACO,MAAM,8CAA8C,0BAA0B;AAAA;AAK9E,MAAM,4BAA4B,gBAAgB;AAAA;AAClD,MAAM,kBAAkB,oBAAoB;AAAA;AAC5C,MAAM,mCAAmC,oBAAoB;AAAA;AAAA;AAAA;AAAA,EAIhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAQO,MAAM,8BAA8B,gBAAgB;AAAA;AAKpD,MAAM,oBAAoB,sBAAsB;AAAA;AAKhD,MAAM,qCAAqC,sBAAsB;AAAA;AAAA;AAAA;AAAA,EAIpE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,4BAA4B,gBAAgB;AAAA;AAClD,MAAM,kBAAkB,oBAAoB;AAAA;AAC5C,MAAM,mCAAmC,oBAAoB;AAAA;AAAA;AAAA;AAAA,EAIhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAIO,MAAM,+BAA+B,gBAAgB;AAAA;AAKrD,MAAM,qBAAqB,uBAAuB;AAAA;AAkClD,MAAM,uCAAuC,uBAAuB;AAAA;AAIpE,MAAM,2BAA2B,gBAAgB;AAAA;AAKjD,MAAM,iBAAiB,mBAAmB;AAAA;AAsC1C,MAAM,8BAA8B,mBAAmB;AAAA;AAIvD,MAAM,qCAAqC,gBAAgB;AAAA;AAK3D,MAAM,wCAAwC,6BAA6B;AAAA;AAK3E,MAAM,4BAA4B,gBAAgB;AAAA;AAKlD,MAAM,kBAAkB,oBAAoB;AAAA;AAsC5C,MAAM,+BAA+B,oBAAoB;AAAA;AAIzD,MAAM,iCAAiC,gBAAgB;AAAA;AA4EvD,MAAM,uBAAuB,yBAAyB;AAAA;AAKtD,MAAM,gCAAgC,gBAAgB;AAAA;AAKtD,MAAM,sBAAsB,wBAAwB;AAAA;AAKpD,MAAM,uCAAuC,wBAAwB;AAAA;AAAA;AAAA;AAAA,EAIxE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,kCAAkC,gBAAgB;AAAA;AAKxD,MAAM,wBAAwB,0BAA0B;AAAA;AAKxD,MAAM,yCAAyC,0BAA0B;AAAA;AAAA;AAAA;AAAA,EAI5E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAIO,MAAM,8BAA8B,gBAAgB;AAAA;AAKpD,MAAM,oBAAoB,sBAAsB;AAAA;AAKhD,MAAM,qCAAqC,sBAAsB;AAAA;AAAA;AAAA;AAAA,EAIpE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,6BAA6B,gBAAgB;AAAA;AACnD,MAAM,mBAAmB,qBAAqB;AAAA;AAC9C,MAAM,gCAAgC,qBAAqB;AAAA;AAAA;AAAA;AAAA,EAI9D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,2BAA2B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACzE;AACJ;AAEO,MAAM,mCAAmC,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxD,YAAY,EAAE,QAAQ,cAAc;AAChC,UAAK;AACL,SAAK,SAAS;AACd,SAAK,aAAa;AAAA,EACtB;AACJ;AAKO,MAAM,2BAA2B,gBAAgB;AAAA;AA0CjD,MAAM,iBAAiB,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO7C,YAAY,QAAQ,gBAAgB,6BAA6B;AAC7D,UAAM,QAAQ,cAAc;AAC5B,SAAK,8BAA8B;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,qBAAqB,EAAE,gBAAgB;AAOzC,WAAO,MAAM,eAAe,MAAM,EAAE,aAAY,CAAE;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBA,MAAM,QAAQ,cAAc;AACxB,QAAI,CAAC,aAAa,oBAAoB,CAAC,aAAa,6BAA6B;AAE7E,qBAAe;AAAA,QACX,GAAG;AAAA,QACH,GAAI,MAAM,KAAK,qBAAqB,YAAY;AAAA,MAChE;AAAA,IACQ;AAEA,QAAI,CAAC,aAAa,cAAc;AAE5B,YAAM,QAAQ,aAAa,aAAa,KAAK,MAAM,GAAG,EAAE;AACxD,YAAM,cAAc,MAAM,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC;AACnD,mBAAa,eAAe,IAAI;AAAA,QAC5B;AAAA,QACA,IAAI,cAAc,WAAW,EAAE,KAAK,EAAE;AAAA,QACtC;AAAA,MAChB;AAAA,IACQ;AAKA,WAAO,MAAM,WAAW,KAAK,6BAA6B;AAAA,MACtD,cAAc,aAAa;AAAA,MAC3B,cAAc,aAAa;AAAA,MAC3B,kBAAkB,aAAa;AAAA,MAC/B,6BAA6B,aAAa;AAAA,IACtD,CAAS;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,2BAA2B,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACzE;AACJ;AAMO,MAAM,mCAAmC,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMxD,YAAY,EAAE,YAAY,cAAc;AACpC,UAAK;AACL,SAAK,aAAa;AAClB,SAAK,aAAa;AAAA,EACtB;AACJ;AAMO,MAAM,8BAA8B,gBAAgB;AAAA;AAEpD,MAAM,oBAAoB,sBAAsB;AAAA;AAEhD,MAAM,sBAAsB,sBAAsB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASrD,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AACJ;AAKO,MAAM,8BAA8B,gBAAgB;AAAA;AAEpD,MAAM,oBAAoB,sBAAsB;AAAA;AAEhD,MAAM,uCAAuC,sBAAsB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAStE,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAEjD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EACrD;AAEJ;AAKO,MAAM,gCAAgC,gBAAgB;AAAA;AA4BtD,MAAM,sBAAsB,wBAAwB;AAAA;AAEpD,MAAM,uBAAuB,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMxD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAEO,MAAM,0CAA0C,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM3E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,4CAA4C,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM7E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,iCAAiC,gBAAgB;AAAA;AAKvD,MAAM,uBAAuB,yBAAyB;AAAA;AAKtD,MAAM,wBAAwB,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM1D,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,2CAA2C,yBAAyB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM7E,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,oCAAoC,gBAAgB;AAAA;AAK1D,MAAM,0BAA0B,4BAA4B;AAAA;AAK5D,MAAM,2BAA2B,4BAA4B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,8CAA8C,4BAA4B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMnF,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAKO,MAAM,gDAAgD,4BAA4B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMrF,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAKO,MAAM,oCAAoC,gBAAgB;AAAA;AAK1D,MAAM,0BAA0B,4BAA4B;AAAA;AAK5D,MAAM,2BAA2B,4BAA4B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMhE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,8CAA8C,4BAA4B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMnF,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAiCO,MAAM,oBAAoB,wBAAwB;AAAA;AAKlD,MAAM,qBAAqB,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMtD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,wCAAwC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMzE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAQO,MAAM,6BAA6B,gBAAgB;AAAA;AA4BnD,MAAM,mBAAmB,qBAAqB;AAAA;AAK9C,MAAM,oBAAoB,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMlD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,eAAe,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC7D;AACJ;AAKO,MAAM,uCAAuC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMrE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAkCO,MAAM,wBAAwB,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMtD,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,cAAc,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC5D;AACJ;AAuCO,MAAM,yCAAyC,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMvE,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,sBAAsB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACpE;AACJ;AAOO,MAAM,gCAAgC,gBAAgB;AAAA;AA+CtD,MAAM,gCAAgC,wBAAwB;AAAA;AAK9D,MAAM,gCAAgC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASjE,YAAY,QAAQ,SAAS,wBAAwB,mBAAmB;AACpE,UAAM,QAAQ,OAAO;AACrB,SAAK,yBAAyB;AAC9B,SAAK,oBAAoB;AAEzB,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,cAAc,KAAK;AAErD,SAAK,qBAAqB,KAAK,OAAO;AACtC,SAAK,oBAAoB,KAAK,OAAO;AACrC,SAAK,iBAAiB,KAAK,OAAO,cAAc,KAAK;AAAA,EACzD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBA,MAAM,gBAAgB,cAAc,oBAAoB;AAAA,IACpD,YAAY;AAAA,IACZ,cAAc;AAAA,IACd,cAAc;AAAA,IACd,UAAU;AAAA;AAAA,EAElB,IAAQ,IAAI;AAEJ,UAAM,eAAe;AAAA,MACjB,WAAW;AAAA,IACvB;AAEQ,UAAM,EAAE,iBAAiB,uBAAsB,IAAK,MAAM,eAAe,MAAM,YAAY;AAE3F,UAAM,IAAI,gBAAgB,KAAK,CAAC,IAAI,KAAK,OAAO;AAChD,UAAM,SAAS,KAAK,MAAM,IAAI,WAAW;AACzC,UAAM,SAAS,KAAK,MAAM,IAAI,WAAW;AAEzC,UAAM,eAAe,KAAK,OAAO;AAEjC,QAAI,mBAAmB,CAAA;AACvB,QAAI,kBAAkB;AACtB,QAAI,kBAAkB;AACtB,QAAI,MAAM;AAEV,WAAO,MAAM;AACT,QAAE;AAEF,YAAM,mBAAmB,WAAW,CAAC,CAAC,eAAe;AACrD,UAAI;AACJ,UAAI,iBAAiB;AACjB,0BAAkB,gBAAgB;AAAA,MACtC,OAAO;AACH,0BAAkB,IAAI;AAAA,UAClB;AAAA,UACA,IAAI,aAAa,YAAY;AAAA,UAC7B,CAAC,GAAG,GAAG,YAAY;AAAA,QACvC;AAAA,MACY;AACA,UAAI,eAAe;AAAA,QACf;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA,uBAAuB;AAAA,MACvC;AAEY,WAAK,iBAAiB,cAAc,eAAe;AACnD,wBAAkB,MAAM,WAAW,KAAK,wBAAwB,YAAY;AAC5E,wBAAkB,KAAK,iBAAiB,iBAAiB,eAAe;AAExE,YAAM,EAAE,MAAM,SAAQ,IAAK;AAC3B,uBAAiB,KAAK,QAAQ;AAE9B,UAAI,OAAO;AAAA,OAEP,MAAM,KAAK,KAAK,IAAI,EAAE,OAAO,OAAK,KAAK,SAAS,EAAE,SAAS,KAAK,OAAO,SACxE;AACC;AAAA,MACJ;AAAA,IACJ;AAEA,UAAM,cAAc,IAAI,gBAAgB;AACxC,UAAM,EAAE,SAAQ,IAAK,MAAM,WAAW,QAAQ,SAAS,EAAE,aAAa;AAEtE,WAAO;AAAA,MACH;AAAA,MACA;AAAA;AAAA,IAEZ;AAAA,EACI;AACJ;AAOO,MAAM,wBAAwB,gBAAgB;AAAA,EACjD,kBAAkB;AACtB;AAMO,MAAM,6BAA6B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,qBAAqB,KAAK,qBAAqB,KAAK,OAAO;AAChE,SAAK,oBAAoB,KAAK,oBAAoB,KAAK,OAAO;AAC9D,SAAK,iBAAiB,KAAK,iBAAiB,KAAK,OAAO,UAAU,KAAK;AAAA,EAC3E;AACJ;AAKO,MAAM,yBAAyB,qBAAqB;AAAA;AAUpD,MAAM,+BAA+B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOxD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK,OAAO;AAAA,EACxD;AACJ;AAEO,MAAM,qBAAqB,uBAAuB;AAAA;AAElD,MAAM,2BAA2B,uBAAuB;AAAA;AASxD,MAAM,kCAAkC,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO3D,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK,OAAO;AAAA,EACxD;AACJ;AAEO,MAAM,wBAAwB,0BAA0B;AAAA;AAExD,MAAM,8BAA8B,0BAA0B;AAAA;AAS9D,MAAM,8BAA8B,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK,OAAO;AAAA,EACxD;AACJ;AAEO,MAAM,oBAAoB,sBAAsB;AAAA;AAEhD,MAAM,0BAA0B,sBAAsB;AAAA;AAMtD,MAAM,4BAA4B,gBAAgB;AAAA;AAElD,MAAM,kBAAkB,oBAAoB;AAAA;AA4B5C,MAAM,oCAAoC,oBAAoB;AAAA;AAAA,EAGjE,aAAa,gBAAgB,+BAA+B,UAAU,IAAI;AAEtE,YAAQ,oBAAoB;AAC5B,WAAO,MAAM,gBAAgB,+BAA+B,OAAO;AAAA,EACvE;AACJ;AA4BO,MAAM,qCAAqC,oBAAoB;AAAA;AAAA,EAElE,aAAa,gBAAgB,+BAA+B,UAAU,IAAI;AAEtE,YAAQ,oBAAoB;AAC5B,WAAO,MAAM,gBAAgB,+BAA+B,OAAO;AAAA,EACvE;AACJ;AAMO,MAAM,4BAA4B,gBAAgB;AAAA;AA0BlD,MAAM,kBAAkB,oBAAoB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM/C,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,gBAAgB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EAC9D;AACJ;AAKO,MAAM,iCAAiC,gBAAgB;AAAA;AAUvD,MAAM,wCAAwC,yBAAyB;AAAA;AAKvE,MAAM,yCAAyC,yBAAyB;AAAA;AAMxE,MAAM,gCAAgC,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOzD,YAAY,QAAQ,SAAS,mBAAmB;AAC5C,UAAM,QAAQ,OAAO;AACrB,SAAK,oBAAoB;AAGzB,SAAK,OAAO,eAAe,KAAK,OAAO;AAEvC,SAAK,YAAY,KAAK,OAAO;AAC7B,SAAK,aAAa,KAAK,OAAO;AAC9B,SAAK,SAAS,KAAK,OAAO,cAAc,KAAK;AAAA,EACjD;AACJ;AAUO,MAAM,4BAA4B,wBAAwB;AAAA;AAK1D,MAAM,oCAAoC,gBAAgB;AAAA;AAK1D,MAAM,0BAA0B,4BAA4B;AAAA;AAK5D,MAAM,2CAA2C,4BAA4B;AAAA;AAAA;AAAA;AAAA,EAIhF,MAAM,MAAM,cAAc;AACtB,WAAO,IAAI,yBAAyB,MAAM,MAAM,MAAM,YAAY,CAAC;AAAA,EACvE;AACJ;AAYO,MAAM,gBAAgB;AAAA;AAAA;AAAA;AAAA;AAAA,EAKzB,OAAO,uBAAuB;AAAA;AAAA;AAAA;AAAA;AAAA,EAM9B,OAAO,eAAe;AAAA;AAAA,EAItB,aAAa,gBAAgB,+BAA+B;AAAA,IACxD,YAAY;AAAA,IACZ,oBAAoB;AAAA,IACpB,SAAS;AAAA,IACT,YAAY;AAAA,IACZ,mBAAmB;AAAA,IACnB,WAAW;AAAA,IACX,kBAAkB;AAAA,EAC1B,IAAQ,IAAI;AAEJ,QAAI,UAAU;AAAA,MACV;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACZ;AACQ,aAAS,MAAM,WAAW,gBAAgB,+BAA+B,OAAO;AAChF,QAAI,CAAC,QAAQ,QAAQ;AAEjB,cAAQ,SAAS;AAAA,IACrB;AAEA,QAAI,CAAC,KAAK,sBAAsB;AAC5B,YAAM,IAAI,MAAM,0EAA0E,KAAK,IAAI;AAAA,IACvG;AAEA,aAAS,uBAAuB,KAAK,sBAAsB;AACvD,YAAM,YAAY,oBAAoB,IAAI,OAAO,UAAU;AAC3D,UAAI,CAAC,WAAW;AACZ;AAAA,MACJ;AACA,aAAO,MAAM,UAAU,CAAC,EAAE,gBAAgB,+BAA+B,OAAO;AAAA,IACpF;AAEA,QAAI,KAAK,cAAc;AACnB,cAAQ,KAAK,wBAAwB,OAAO,UAAU,6CAA6C;AACnG,aAAO,MAAM,gBAAgB,gBAAgB,+BAA+B,OAAO;AAAA,IACvF,OAAO;AACH,YAAM,MAAM,2BAA2B,OAAO,UAAU,EAAE;AAAA,IAC9D;AAAA,EACJ;AACJ;AAEA,MAAM,mCAAmC,oBAAI,IAAI;AAAA,EAC7C,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,cAAc,CAAC,kBAAkB,cAAc,CAAC;AAAA,EACjD,CAAC,YAAY,CAAC,iBAAiB,aAAa,CAAC;AAAA,EAC7C,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,YAAY,CAAC,iBAAiB,aAAa,CAAC;AAAA,EAC7C,CAAC,aAAa,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAChD,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,cAAc,CAAC,kBAAkB,cAAc,CAAC;AAAA,EACjD,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,cAAc,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACnD,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,eAAe,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACpD,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,gBAAgB,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EACvD,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,cAAc,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACnD,CAAC,eAAe,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EACtD,CAAC,YAAY,CAAC,iBAAiB,aAAa,CAAC;AAAA,EAC7C,CAAC,iBAAiB,CAAC,qBAAqB,iBAAiB,CAAC;AAAA,EAC1D,CAAC,aAAa,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAChD,CAAC,iBAAiB,CAAC,qBAAqB,iBAAiB,CAAC;AAAA,EAC1D,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,iCAAiC,CAAC,YAAY,QAAQ,CAAC;AAAA,EACxD,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EAEjC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,qBAAqB,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EACtE,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,aAAa,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAChD,CAAC,eAAe,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EACtD,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,YAAY,CAAC,iBAAiB,aAAa,CAAC;AAAA,EAC7C,CAAC,cAAc,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACnD,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,cAAc,CAAC,kBAAkB,cAAc,CAAC;AAAA,EACjD,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EAEjC,CAAC,WAAW,CAAC,mBAAmB,eAAe,CAAC;AAAA,EAChD,CAAC,gBAAgB,CAAC,qBAAqB,iBAAiB,CAAC;AAE7D,CAAC;AAED,MAAM,sCAAsC,oBAAI,IAAI;AAAA,EAChD,CAAC,MAAM,CAAC,WAAW,OAAO,CAAC;AAAA,EAC3B,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAAA,EACvC,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,WAAW,CAAC,eAAe,WAAW,CAAC;AAAA,EACxC,CAAC,cAAc,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACnD,CAAC,oBAAoB,CAAC,wBAAwB,oBAAoB,CAAC;AACvE,CAAC;AAGD,MAAM,mCAAmC,oBAAI,IAAI;AAAA,EAC7C,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AAAA,EACjC,CAAC,eAAe,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACpD,CAAC,WAAW,CAAC,eAAe,WAAW,CAAC;AAAA,EACxC,CAAC,YAAY,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC3C,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,SAAS,CAAC,cAAc,UAAU,CAAC;AAAA,EACpC,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAAA,EAC9B,CAAC,WAAW,CAAC,gBAAgB,YAAY,CAAC;AAAA,EAC1C,CAAC,cAAc,CAAC,mBAAmB,eAAe,CAAC;AAAA,EACnD,CAAC,UAAU,CAAC,eAAe,WAAW,CAAC;AAC3C,CAAC;AAED,MAAM,2CAA2C,oBAAI,IAAI;AAAA,EACrD,CAAC,YAAY,CAAC,2BAA2B,uBAAuB,CAAC;AAAA,EACjE,CAAC,WAAW,CAAC,mCAAmC,+BAA+B,CAAC;AACpF,CAAC;AAED,MAAM,8CAA8C,oBAAI,IAAI;AAAA,EACxD,CAAC,YAAY,CAAC,2BAA2B,uBAAuB,CAAC;AACrE,CAAC;AAED,MAAM,2CAA2C,oBAAI,IAAI;AAAA,EACrD,CAAC,QAAQ,CAAC,aAAa,SAAS,CAAC;AACrC,CAAC;AAED,MAAM,kDAAkD,oBAAI,IAAI;AAAA,EAC5D,CAAC,QAAQ,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EACzE,CAAC,YAAY,CAAC,qCAAqC,iCAAiC,CAAC;AAAA,EACrF,CAAC,WAAW,CAAC,oCAAoC,gCAAgC,CAAC;AAAA,EAClF,CAAC,OAAO,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EACtE,CAAC,YAAY,CAAC,qCAAqC,iCAAiC,CAAC;AAAA,EACrF,CAAC,aAAa,CAAC,sCAAsC,kCAAkC,CAAC;AAAA,EACxF,CAAC,WAAW,CAAC,oCAAoC,gCAAgC,CAAC;AAAA,EAClF,CAAC,cAAc,CAAC,sCAAsC,kCAAkC,CAAC;AAAA,EACzF,CAAC,SAAS,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC5E,CAAC,UAAU,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EAC/E,CAAC,cAAc,CAAC,uCAAuC,mCAAmC,CAAC;AAAA,EAC3F,CAAC,WAAW,CAAC,oCAAoC,gCAAgC,CAAC;AAAA,EAClF,CAAC,OAAO,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EACtE,CAAC,eAAe,CAAC,uCAAuC,mCAAmC,CAAC;AAAA,EAC5F,CAAC,QAAQ,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EACzE,CAAC,SAAS,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC5E,CAAC,cAAc,CAAC,uCAAuC,mCAAmC,CAAC;AAAA,EAC3F,CAAC,eAAe,CAAC,wCAAwC,oCAAoC,CAAC;AAClG,CAAC;AAED,MAAM,+CAA+C,oBAAI,IAAI;AAAA,EACzD,CAAC,QAAQ,CAAC,8BAA8B,0BAA0B,CAAC;AAAA,EACnE,CAAC,YAAY,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC/E,CAAC,WAAW,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC5E,CAAC,OAAO,CAAC,6BAA6B,yBAAyB,CAAC;AAAA,EAChE,CAAC,YAAY,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC/E,CAAC,aAAa,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EAClF,CAAC,WAAW,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC5E,CAAC,cAAc,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EACnF,CAAC,SAAS,CAAC,+BAA+B,2BAA2B,CAAC;AAAA,EACtE,CAAC,cAAc,CAAC,oCAAoC,gCAAgC,CAAC;AAAA,EACrF,CAAC,WAAW,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC5E,CAAC,OAAO,CAAC,6BAA6B,yBAAyB,CAAC;AAAA,EAChE,CAAC,eAAe,CAAC,oCAAoC,gCAAgC,CAAC;AAC1F,CAAC;AAED,MAAM,+CAA+C,oBAAI,IAAI;AAAA,EACzD,CAAC,MAAM,CAAC,8BAA8B,0BAA0B,CAAC;AAAA,EACjE,CAAC,UAAU,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC7E,CAAC,OAAO,CAAC,+BAA+B,2BAA2B,CAAC;AAAA,EACpE,CAAC,QAAQ,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EACvE,CAAC,SAAS,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC1E,CAAC,UAAU,CAAC,iBAAiB,aAAa,CAAC;AAAA,EAC3C,CAAC,WAAW,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC9E,CAAC,cAAc,CAAC,sCAAsC,kCAAkC,CAAC;AAAA,EACzF,CAAC,oBAAoB,CAAC,2CAA2C,uCAAuC,CAAC;AAC7G,CAAC;AAED,MAAM,mCAAmC,oBAAI,IAAI;AAAA,EAC7C,CAAC,SAAS,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EAChD,CAAC,QAAQ,CAAC,mBAAmB,eAAe,CAAC;AAAA,EAC7C,CAAC,QAAQ,CAAC,mBAAmB,eAAe,CAAC;AAAA,EAC7C,CAAC,eAAe,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EAChE,CAAC,WAAW,CAAC,qBAAqB,iBAAiB,CAAC;AAAA,EACpD,CAAC,YAAY,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EACvD,CAAC,WAAW,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EACtD,CAAC,SAAS,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EAChD,CAAC,SAAS,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EAChD,CAAC,OAAO,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAC1C,CAAC,OAAO,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAC1C,CAAC,OAAO,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAC1C,CAAC,SAAS,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EAChD,CAAC,WAAW,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EACtD,CAAC,cAAc,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EAC/D,CAAC,UAAU,CAAC,qBAAqB,iBAAiB,CAAC;AAAA,EACnD,CAAC,SAAS,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EAChD,CAAC,YAAY,CAAC,uBAAuB,mBAAmB,CAAC;AAC7D,CAAC;AAED,MAAM,oCAAoC,oBAAI,IAAI;AAAA,EAC9C,CAAC,QAAQ,CAAC,mBAAmB,eAAe,CAAC;AAAA,EAC7C,CAAC,YAAY,CAAC,uBAAuB,mBAAmB,CAAC;AAAA,EACzD,CAAC,WAAW,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EACtD,CAAC,OAAO,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAC1C,CAAC,YAAY,CAAC,uBAAuB,mBAAmB,CAAC;AAAA,EACzD,CAAC,aAAa,CAAC,wBAAwB,oBAAoB,CAAC;AAAA,EAC5D,CAAC,WAAW,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EACtD,CAAC,cAAc,CAAC,wBAAwB,oBAAoB,CAAC;AAAA,EAC7D,CAAC,SAAS,CAAC,oBAAoB,gBAAgB,CAAC;AAAA,EAChD,CAAC,UAAU,CAAC,qBAAqB,iBAAiB,CAAC;AAAA,EACnD,CAAC,cAAc,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EAC/D,CAAC,WAAW,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EACtD,CAAC,OAAO,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EAClD,CAAC,eAAe,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EAChE,CAAC,cAAc,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EAC/D,CAAC,eAAe,CAAC,0BAA0B,sBAAsB,CAAC;AACtE,CAAC;AAED,MAAM,6CAA6C,oBAAI,IAAI;AAAA,EACvD,CAAC,QAAQ,CAAC,4BAA4B,wBAAwB,CAAC;AAAA,EAC/D,CAAC,YAAY,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EAC3E,CAAC,WAAW,CAAC,+BAA+B,2BAA2B,CAAC;AAAA,EACxE,CAAC,YAAY,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EAC3E,CAAC,aAAa,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC9E,CAAC,WAAW,CAAC,+BAA+B,2BAA2B,CAAC;AAAA,EACxE,CAAC,cAAc,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC/E,CAAC,SAAS,CAAC,6BAA6B,yBAAyB,CAAC;AAAA,EAClE,CAAC,UAAU,CAAC,8BAA8B,0BAA0B,CAAC;AAAA,EACrE,CAAC,cAAc,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EACjF,CAAC,WAAW,CAAC,+BAA+B,2BAA2B,CAAC;AAAA,EACxE,CAAC,OAAO,CAAC,2BAA2B,uBAAuB,CAAC;AAAA,EAC5D,CAAC,eAAe,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAClF,CAAC,cAAc,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EACjF,CAAC,eAAe,CAAC,mCAAmC,+BAA+B,CAAC;AACxF,CAAC;AAED,MAAM,uCAAuC,oBAAI,IAAI;AAAA,EACjD,CAAC,0BAA0B,CAAC,6BAA6B,yBAAyB,CAAC;AACvF,CAAC;AAMD,MAAM,+CAA+C,oBAAI,IAAI;AAAA,EACzD,CAAC,OAAO,CAAC,6BAA6B,yBAAyB,CAAC;AAAA,EAChE,CAAC,WAAW,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EAC5E,CAAC,aAAa,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EAClF,CAAC,eAAe,CAAC,qCAAqC,iCAAiC,CAAC;AAAA,EACxF,CAAC,QAAQ,CAAC,8BAA8B,0BAA0B,CAAC;AAAA,EACnE,CAAC,QAAQ,CAAC,8BAA8B,0BAA0B,CAAC;AAAA,EACnE,CAAC,YAAY,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC/E,CAAC,cAAc,CAAC,oCAAoC,gCAAgC,CAAC;AAAA,EACrF,CAAC,UAAU,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EACzE,CAAC,UAAU,CAAC,gCAAgC,4BAA4B,CAAC;AAAA,EACzE,CAAC,QAAQ,CAAC,8BAA8B,0BAA0B,CAAC;AAAA,EACnE,CAAC,aAAa,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EAClF,CAAC,gBAAgB,CAAC,sCAAsC,kCAAkC,CAAC;AAC/F,CAAC;AAED,MAAM,2CAA2C,oBAAI,IAAI;AAAA,EACrD,CAAC,QAAQ,CAAC,0BAA0B,sBAAsB,CAAC;AAAA,EAC3D,CAAC,qBAAqB,CAAC,sCAAsC,kCAAkC,CAAC;AAAA,EAChG,CAAC,SAAS,CAAC,2BAA2B,uBAAuB,CAAC;AAClE,CAAC;AAED,MAAM,qDAAqD,oBAAI,IAAI;AAAA,EAC/D,CAAC,UAAU,CAAC,4BAA4B,wBAAwB,CAAC;AAAA,EACjE,CAAC,SAAS,CAAC,2BAA2B,uBAAuB,CAAC;AAClE,CAAC;AAED,MAAM,6CAA6C,oBAAI,IAAI;AAAA,EACvD,CAAC,QAAQ,CAAC,uBAAuB,mBAAmB,CAAC;AAAA,EACrD,CAAC,WAAW,CAAC,+BAA+B,2BAA2B,CAAC;AAC5E,CAAC;AAED,MAAM,gDAAgD,oBAAI,IAAI;AAAA,EAC1D,CAAC,aAAa,CAAC,oCAAoC,gCAAgC,CAAC;AACxF,CAAC;AAED,MAAM,0CAA0C,oBAAI,IAAI;AAAA,EACpD,CAAC,OAAO,CAAC,YAAY,QAAQ,CAAC;AAClC,CAAC;AAED,MAAM,8BAA8B,oBAAI,IAAI;AAAA,EACxC,CAAC,YAAY,CAAC,kBAAkB,cAAc,CAAC;AAAA,EAC/C,CAAC,iBAAiB,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EAC5D,CAAC,aAAa,CAAC,mBAAmB,eAAe,CAAC;AAAA,EAClD,CAAC,iBAAiB,CAAC,sBAAsB,kBAAkB,CAAC;AAAA,EAC5D,CAAC,SAAS,CAAC,eAAe,WAAW,CAAC;AAAA,EACtC,CAAC,UAAU,CAAC,gBAAgB,YAAY,CAAC;AAC7C,CAAC;AAED,MAAM,+CAA+C,oBAAI,IAAI;AAAA,EACzD,CAAC,YAAY,CAAC,qCAAqC,iCAAiC,CAAC;AAAA,EACrF,CAAC,iBAAiB,CAAC,yCAAyC,qCAAqC,CAAC;AAAA,EAClG,CAAC,aAAa,CAAC,sCAAsC,kCAAkC,CAAC;AAAA,EACxF,CAAC,iBAAiB,CAAC,yCAAyC,qCAAqC,CAAC;AAAA,EAClG,CAAC,SAAS,CAAC,kCAAkC,8BAA8B,CAAC;AAAA,EAC5E,CAAC,UAAU,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EAC/E,CAAC,iCAAiC,CAAC,6BAA6B,yBAAyB,CAAC;AAC9F,CAAC;AAED,MAAM,wCAAwC,oBAAI,IAAI;AAAA,EAClD,CAAC,SAAS,CAAC,mBAAmB,eAAe,CAAC;AAClD,CAAC;AAED,MAAM,qDAAqD,oBAAI,IAAI;AAAA,EAC/D,CAAC,iBAAiB,CAAC,2CAA2C,uCAAuC,CAAC;AAAA,EACtG,CAAC,SAAS,CAAC,oCAAoC,gCAAgC,CAAC;AAAA,EAChF,CAAC,YAAY,CAAC,uCAAuC,mCAAmC,CAAC;AAC7F,CAAC;AAED,MAAM,wCAAwC,oBAAI,IAAI;AAAA,EAClD,CAAC,YAAY,CAAC,2BAA2B,uBAAuB,CAAC;AACrE,CAAC;AAED,MAAM,yCAAyC,oBAAI,IAAI;AAAA,EACnD,CAAC,WAAW,CAAC,kCAAkC,8BAA8B,CAAC;AAClF,CAAC;AAED,MAAM,2CAA2C,oBAAI,IAAI;AAAA,EACrD,CAAC,OAAO,CAAC,yBAAyB,qBAAqB,CAAC;AAAA,EACxD,CAAC,kBAAkB,CAAC,mCAAmC,+BAA+B,CAAC;AAAA,EACvF,CAAC,QAAQ,CAAC,0BAA0B,sBAAsB,CAAC;AAC/D,CAAC;AAID,MAAM,mDAAmD,oBAAI,IAAI;AAAA,EAC7D,CAAC,QAAQ,CAAC,iCAAiC,6BAA6B,CAAC;AAAA,EACzE,CAAC,UAAU,CAAC,qBAAqB,iBAAiB,CAAC;AACvD,CAAC;AAED,MAAM,2BAA2B;AAAA,EAC7B,CAAC,kCAAkC,YAAY,WAAW;AAAA,EAC1D,CAAC,qCAAqC,YAAY,cAAc;AAAA,EAChE,CAAC,kCAAkC,YAAY,WAAW;AAAA,EAC1D,CAAC,iDAAiD,YAAY,WAAW;AAAA,EACzE,CAAC,8CAA8C,YAAY,WAAW;AAAA,EACtE,CAAC,8CAA8C,YAAY,OAAO;AAAA,EAClE,CAAC,0CAA0C,YAAY,OAAO;AAAA,EAC9D,CAAC,kCAAkC,YAAY,WAAW;AAAA,EAC1D,CAAC,mCAAmC,YAAY,WAAW;AAAA,EAC3D,CAAC,4CAA4C,YAAY,WAAW;AAAA,EACpE,CAAC,sCAAsC,YAAY,UAAU;AAAA,EAC7D,CAAC,8CAA8C,YAAY,WAAW;AAAA,EACtE,CAAC,4CAA4C,YAAY,WAAW;AAAA,EACpE,CAAC,+CAA+C,YAAY,WAAW;AAAA,EACvE,CAAC,uCAAuC,YAAY,WAAW;AAAA,EAC/D,CAAC,wCAAwC,YAAY,WAAW;AAAA,EAChE,CAAC,0CAA0C,YAAY,WAAW;AAAA,EAClE,CAAC,0CAA0C,YAAY,WAAW;AAAA,EAClE,CAAC,oDAAoD,YAAY,WAAW;AAAA,EAC5E,CAAC,yCAAyC,YAAY,cAAc;AAAA,EACpE,CAAC,6BAA6B,YAAY,WAAW;AAAA,EACrD,CAAC,8CAA8C,YAAY,WAAW;AAAA,EACtE,CAAC,6CAA6C,YAAY,OAAO;AAAA,EACjE,CAAC,0CAA0C,YAAY,WAAW;AAAA,EAClE,CAAC,uCAAuC,YAAY,WAAW;AAAA,EAC/D,CAAC,oDAAoD,YAAY,WAAW;AAAA;AAAA,EAG5E,CAAC,kDAAkD,YAAY,WAAW;AAC9E;AAEA,WAAW,CAAC,UAAU,IAAI,KAAK,0BAA0B;AAErD,aAAW,CAAC,MAAM,KAAK,KAAK,SAAS,OAAM,GAAI;AAC3C,uBAAmB,IAAI,MAAM,IAAI;AACjC,gCAA4B,IAAI,OAAO,IAAI;AAC3C,gCAA4B,IAAI,MAAM,KAAK;AAAA,EAC/C;AACJ;AAEA,MAAM,iBAAiB;AAAA,EACnB,CAAC,+BAA+B,6BAA6B,YAAY,WAAW;AAAA,EACpF,CAAC,mBAAmB,iBAAiB,YAAY,WAAW;AAAA,EAC5D,CAAC,+BAA+B,6BAA6B,YAAY,WAAW;AAAA,EACpF,CAAC,gCAAgC,8BAA8B,YAAY,WAAW;AAC1F;AACA,WAAW,CAAC,MAAM,OAAO,IAAI,KAAK,gBAAgB;AAC9C,qBAAmB,IAAI,MAAM,IAAI;AACjC,8BAA4B,IAAI,OAAO,IAAI;AAC3C,8BAA4B,IAAI,MAAM,KAAK;AAC/C;AAUO,MAAM,kBAAkB,gBAAgB;AAAA;AAAA;AAAA,EAG3C,OAAO,uBAAuB,yBAAyB,IAAI,OAAK,EAAE,CAAC,CAAC;AAAA,EACpE,OAAO,eAAe;AAC1B;AAmNO,MAAM,wBAAwB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAS7C,YAAY,EAAE,QAAQ,iBAAiB,iBAAiB,qBAAqB,MAAM,mBAAmB,QAAQ;AAC1G,UAAK;AACL,SAAK,SAAS;AACd,SAAK,kBAAkB;AACvB,SAAK,kBAAkB;AACvB,SAAK,qBAAqB;AAC1B,SAAK,mBAAmB;AAAA,EAC5B;AACJ;AAKO,MAAM,iCAAiC,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,EAKtD,YAAY,EAAE,UAAU;AACpB,UAAK;AACL,SAAK,SAAS;AAAA,EAClB;AACJ;AAKO,MAAM,sBAAsB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM3C,YAAY,EAAE,QAAQ,cAAc;AAChC,UAAK;AACL,SAAK,SAAS;AACd,SAAK,aAAa;AAAA,EACtB;AACJ;AAKO,MAAM,8BAA8B,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,EAKnD,YAAY,EAAE,UAAU;AACpB,UAAK;AACL,SAAK,SAAS;AAAA,EAClB;AACJ;AAKO,MAAM,uBAAuB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,EAK5C,YAAY,EAAE,UAAU;AACpB,UAAK;AACL,SAAK,SAAS;AAAA,EAClB;AACJ;AAKO,MAAM,qCAAqC,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM1D,YAAY,EAAE,cAAc,cAAc;AACtC,UAAK;AACL,SAAK,eAAe;AACpB,SAAK,aAAa;AAAA,EACtB;AACJ;AAMO,MAAM,uBAAuB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,EAK5C,YAAY,EAAE,UAAU;AACpB,UAAK;AACL,SAAK,SAAS;AAAA,EAClB;AACJ;AAmBO,MAAM,2BAA2B,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA,EAKhD,YAAY,EAAE,UAAU;AACpB,UAAK;AACL,SAAK,SAAS;AAAA,EAClB;AACJ;AAKO,MAAM,wBAAwB,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO7C,YAAY,EAAE,UAAU,eAAe;AACnC,UAAK;AACL,SAAK,WAAW;AAChB,SAAK,cAAc;AAAA,EACvB;AACJ;","x_google_ignoreList":[0]}