@huggingface/transformers 3.0.2 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (258) hide show
  1. package/README.md +13 -4
  2. package/dist/ort-wasm-simd-threaded.jsep.wasm +0 -0
  3. package/dist/transformers.cjs +16655 -13040
  4. package/dist/transformers.cjs.map +1 -1
  5. package/dist/transformers.js +17095 -13468
  6. package/dist/transformers.js.map +1 -1
  7. package/dist/transformers.min.cjs +244 -52
  8. package/dist/transformers.min.cjs.map +1 -1
  9. package/dist/transformers.min.js +235 -43
  10. package/dist/transformers.min.js.map +1 -1
  11. package/dist/transformers.min.mjs +246 -54
  12. package/dist/transformers.min.mjs.map +1 -1
  13. package/dist/transformers.mjs +16818 -13202
  14. package/dist/transformers.mjs.map +1 -1
  15. package/package.json +4 -4
  16. package/src/base/feature_extraction_utils.js +54 -0
  17. package/src/base/image_processors_utils.js +1089 -0
  18. package/src/base/processing_utils.js +145 -0
  19. package/src/configs.js +15 -4
  20. package/src/env.js +6 -6
  21. package/src/generation/configuration_utils.js +7 -0
  22. package/src/generation/logits_process.js +22 -16
  23. package/src/generation/streamers.js +7 -2
  24. package/src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js +90 -0
  25. package/src/models/auto/feature_extraction_auto.js +41 -0
  26. package/src/models/auto/image_processing_auto.js +29 -0
  27. package/src/models/auto/processing_auto.js +100 -0
  28. package/src/models/beit/image_processing_beit.js +5 -0
  29. package/src/models/bit/image_processing_bit.js +5 -0
  30. package/src/models/chinese_clip/image_processing_chinese_clip.js +5 -0
  31. package/src/models/clap/feature_extraction_clap.js +159 -0
  32. package/src/models/clip/image_processing_clip.js +6 -0
  33. package/src/models/convnext/image_processing_convnext.js +45 -0
  34. package/src/models/deit/image_processing_deit.js +6 -0
  35. package/src/models/detr/image_processing_detr.js +52 -0
  36. package/src/models/donut/image_processing_donut.js +31 -0
  37. package/src/models/dpt/image_processing_dpt.js +6 -0
  38. package/src/models/efficientnet/image_processing_efficientnet.js +13 -0
  39. package/src/models/feature_extractors.js +12 -0
  40. package/src/models/florence2/processing_florence2.js +128 -0
  41. package/src/models/glpn/image_processing_glpn.js +5 -0
  42. package/src/models/idefics3/image_processing_idefics3.js +219 -0
  43. package/src/models/idefics3/processing_idefics3.js +136 -0
  44. package/src/models/image_processors.js +37 -0
  45. package/src/models/janus/image_processing_janus.js +26 -0
  46. package/src/models/janus/processing_janus.js +123 -0
  47. package/src/models/jina_clip/image_processing_jina_clip.js +26 -0
  48. package/src/models/jina_clip/processing_jina_clip.js +24 -0
  49. package/src/models/llava_onevision/image_processing_llava_onevision.js +5 -0
  50. package/src/models/mask2former/image_processing_mask2former.js +5 -0
  51. package/src/models/maskformer/image_processing_maskformer.js +18 -0
  52. package/src/models/mgp_str/processing_mgp_str.js +170 -0
  53. package/src/models/mobilenet_v1/image_processing_mobilenet_v1.js +7 -0
  54. package/src/models/mobilenet_v2/image_processing_mobilenet_v2.js +7 -0
  55. package/src/models/mobilenet_v3/image_processing_mobilenet_v3.js +7 -0
  56. package/src/models/mobilenet_v4/image_processing_mobilenet_v4.js +7 -0
  57. package/src/models/mobilevit/image_processing_mobilevit.js +6 -0
  58. package/src/models/nougat/image_processing_nougat.js +5 -0
  59. package/src/models/owlv2/image_processing_owlv2.js +5 -0
  60. package/src/models/owlvit/image_processing_owlvit.js +12 -0
  61. package/src/models/owlvit/processing_owlvit.js +7 -0
  62. package/src/models/processors.js +12 -0
  63. package/src/models/pvt/image_processing_pvt.js +5 -0
  64. package/src/models/pyannote/feature_extraction_pyannote.js +28 -0
  65. package/src/models/pyannote/processing_pyannote.js +71 -0
  66. package/src/models/qwen2_vl/image_processing_qwen2_vl.js +52 -0
  67. package/src/models/qwen2_vl/processing_qwen2_vl.js +52 -0
  68. package/src/models/rt_detr/image_processing_rt_detr.js +12 -0
  69. package/src/models/sam/image_processing_sam.js +242 -0
  70. package/src/models/sam/processing_sam.js +20 -0
  71. package/src/models/sapiens/image_processing_sapiens.js +13 -0
  72. package/src/models/seamless_m4t/feature_extraction_seamless_m4t.js +180 -0
  73. package/src/models/segformer/image_processing_segformer.js +13 -0
  74. package/src/models/siglip/image_processing_siglip.js +5 -0
  75. package/src/models/speecht5/feature_extraction_speecht5.js +4 -0
  76. package/src/models/speecht5/processing_speecht5.js +17 -0
  77. package/src/models/swin2sr/image_processing_swin2sr.js +24 -0
  78. package/src/models/vit/image_processing_vit.js +7 -0
  79. package/src/models/vitmatte/image_processing_vitmatte.js +50 -0
  80. package/src/models/vitpose/image_processing_vitpose.js +89 -0
  81. package/src/models/wav2vec2/feature_extraction_wav2vec2.js +44 -0
  82. package/src/models/wav2vec2/processing_wav2vec2.js +15 -0
  83. package/src/models/wespeaker/feature_extraction_wespeaker.js +100 -0
  84. package/src/models/whisper/feature_extraction_whisper.js +84 -0
  85. package/src/models/whisper/processing_whisper.js +21 -0
  86. package/src/models/yolos/image_processing_yolos.js +12 -0
  87. package/src/models.js +755 -34
  88. package/src/pipelines.js +8 -8
  89. package/src/tokenizers.js +5 -0
  90. package/src/transformers.js +15 -2
  91. package/src/utils/constants.js +8 -1
  92. package/src/utils/core.js +51 -9
  93. package/src/utils/dtypes.js +2 -1
  94. package/src/utils/hub.js +2 -1
  95. package/src/utils/image.js +87 -33
  96. package/src/utils/tensor.js +39 -2
  97. package/types/base/feature_extraction_utils.d.ts +41 -0
  98. package/types/base/feature_extraction_utils.d.ts.map +1 -0
  99. package/types/base/image_processors_utils.d.ts +323 -0
  100. package/types/base/image_processors_utils.d.ts.map +1 -0
  101. package/types/base/processing_utils.d.ts +80 -0
  102. package/types/base/processing_utils.d.ts.map +1 -0
  103. package/types/configs.d.ts +5 -2
  104. package/types/configs.d.ts.map +1 -1
  105. package/types/env.d.ts +1 -1
  106. package/types/env.d.ts.map +1 -1
  107. package/types/generation/configuration_utils.d.ts +6 -0
  108. package/types/generation/configuration_utils.d.ts.map +1 -1
  109. package/types/generation/logits_process.d.ts +30 -20
  110. package/types/generation/logits_process.d.ts.map +1 -1
  111. package/types/generation/streamers.d.ts +13 -8
  112. package/types/generation/streamers.d.ts.map +1 -1
  113. package/types/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.d.ts +25 -0
  114. package/types/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.d.ts.map +1 -0
  115. package/types/models/auto/feature_extraction_auto.d.ts +5 -0
  116. package/types/models/auto/feature_extraction_auto.d.ts.map +1 -0
  117. package/types/models/auto/image_processing_auto.d.ts +5 -0
  118. package/types/models/auto/image_processing_auto.d.ts.map +1 -0
  119. package/types/models/auto/processing_auto.d.ts +35 -0
  120. package/types/models/auto/processing_auto.d.ts.map +1 -0
  121. package/types/models/beit/image_processing_beit.d.ts +4 -0
  122. package/types/models/beit/image_processing_beit.d.ts.map +1 -0
  123. package/types/models/bit/image_processing_bit.d.ts +4 -0
  124. package/types/models/bit/image_processing_bit.d.ts.map +1 -0
  125. package/types/models/chinese_clip/image_processing_chinese_clip.d.ts +4 -0
  126. package/types/models/chinese_clip/image_processing_chinese_clip.d.ts.map +1 -0
  127. package/types/models/clap/feature_extraction_clap.d.ts +57 -0
  128. package/types/models/clap/feature_extraction_clap.d.ts.map +1 -0
  129. package/types/models/clip/image_processing_clip.d.ts +6 -0
  130. package/types/models/clip/image_processing_clip.d.ts.map +1 -0
  131. package/types/models/convnext/image_processing_convnext.d.ts +12 -0
  132. package/types/models/convnext/image_processing_convnext.d.ts.map +1 -0
  133. package/types/models/deit/image_processing_deit.d.ts +6 -0
  134. package/types/models/deit/image_processing_deit.d.ts.map +1 -0
  135. package/types/models/detr/image_processing_detr.d.ts +42 -0
  136. package/types/models/detr/image_processing_detr.d.ts.map +1 -0
  137. package/types/models/donut/image_processing_donut.d.ts +7 -0
  138. package/types/models/donut/image_processing_donut.d.ts.map +1 -0
  139. package/types/models/dpt/image_processing_dpt.d.ts +6 -0
  140. package/types/models/dpt/image_processing_dpt.d.ts.map +1 -0
  141. package/types/models/efficientnet/image_processing_efficientnet.d.ts +6 -0
  142. package/types/models/efficientnet/image_processing_efficientnet.d.ts.map +1 -0
  143. package/types/models/feature_extractors.d.ts +10 -0
  144. package/types/models/feature_extractors.d.ts.map +1 -0
  145. package/types/models/florence2/processing_florence2.d.ts +39 -0
  146. package/types/models/florence2/processing_florence2.d.ts.map +1 -0
  147. package/types/models/glpn/image_processing_glpn.d.ts +4 -0
  148. package/types/models/glpn/image_processing_glpn.d.ts.map +1 -0
  149. package/types/models/idefics3/image_processing_idefics3.d.ts +40 -0
  150. package/types/models/idefics3/image_processing_idefics3.d.ts.map +1 -0
  151. package/types/models/idefics3/processing_idefics3.d.ts +19 -0
  152. package/types/models/idefics3/processing_idefics3.d.ts.map +1 -0
  153. package/types/models/image_processors.d.ts +37 -0
  154. package/types/models/image_processors.d.ts.map +1 -0
  155. package/types/models/janus/image_processing_janus.d.ts +7 -0
  156. package/types/models/janus/image_processing_janus.d.ts.map +1 -0
  157. package/types/models/janus/processing_janus.d.ts +77 -0
  158. package/types/models/janus/processing_janus.d.ts.map +1 -0
  159. package/types/models/jina_clip/image_processing_jina_clip.d.ts +5 -0
  160. package/types/models/jina_clip/image_processing_jina_clip.d.ts.map +1 -0
  161. package/types/models/jina_clip/processing_jina_clip.d.ts +9 -0
  162. package/types/models/jina_clip/processing_jina_clip.d.ts.map +1 -0
  163. package/types/models/llava_onevision/image_processing_llava_onevision.d.ts +4 -0
  164. package/types/models/llava_onevision/image_processing_llava_onevision.d.ts.map +1 -0
  165. package/types/models/mask2former/image_processing_mask2former.d.ts +4 -0
  166. package/types/models/mask2former/image_processing_mask2former.d.ts.map +1 -0
  167. package/types/models/maskformer/image_processing_maskformer.d.ts +22 -0
  168. package/types/models/maskformer/image_processing_maskformer.d.ts.map +1 -0
  169. package/types/models/mgp_str/processing_mgp_str.d.ts +64 -0
  170. package/types/models/mgp_str/processing_mgp_str.d.ts.map +1 -0
  171. package/types/models/mobilenet_v1/image_processing_mobilenet_v1.d.ts +6 -0
  172. package/types/models/mobilenet_v1/image_processing_mobilenet_v1.d.ts.map +1 -0
  173. package/types/models/mobilenet_v2/image_processing_mobilenet_v2.d.ts +6 -0
  174. package/types/models/mobilenet_v2/image_processing_mobilenet_v2.d.ts.map +1 -0
  175. package/types/models/mobilenet_v3/image_processing_mobilenet_v3.d.ts +6 -0
  176. package/types/models/mobilenet_v3/image_processing_mobilenet_v3.d.ts.map +1 -0
  177. package/types/models/mobilenet_v4/image_processing_mobilenet_v4.d.ts +6 -0
  178. package/types/models/mobilenet_v4/image_processing_mobilenet_v4.d.ts.map +1 -0
  179. package/types/models/mobilevit/image_processing_mobilevit.d.ts +6 -0
  180. package/types/models/mobilevit/image_processing_mobilevit.d.ts.map +1 -0
  181. package/types/models/nougat/image_processing_nougat.d.ts +4 -0
  182. package/types/models/nougat/image_processing_nougat.d.ts.map +1 -0
  183. package/types/models/owlv2/image_processing_owlv2.d.ts +4 -0
  184. package/types/models/owlv2/image_processing_owlv2.d.ts.map +1 -0
  185. package/types/models/owlvit/image_processing_owlvit.d.ts +10 -0
  186. package/types/models/owlvit/image_processing_owlvit.d.ts.map +1 -0
  187. package/types/models/owlvit/processing_owlvit.d.ts +8 -0
  188. package/types/models/owlvit/processing_owlvit.d.ts.map +1 -0
  189. package/types/models/processors.d.ts +13 -0
  190. package/types/models/processors.d.ts.map +1 -0
  191. package/types/models/pvt/image_processing_pvt.d.ts +4 -0
  192. package/types/models/pvt/image_processing_pvt.d.ts.map +1 -0
  193. package/types/models/pyannote/feature_extraction_pyannote.d.ts +13 -0
  194. package/types/models/pyannote/feature_extraction_pyannote.d.ts.map +1 -0
  195. package/types/models/pyannote/processing_pyannote.d.ts +30 -0
  196. package/types/models/pyannote/processing_pyannote.d.ts.map +1 -0
  197. package/types/models/qwen2_vl/image_processing_qwen2_vl.d.ts +11 -0
  198. package/types/models/qwen2_vl/image_processing_qwen2_vl.d.ts.map +1 -0
  199. package/types/models/qwen2_vl/processing_qwen2_vl.d.ts +17 -0
  200. package/types/models/qwen2_vl/processing_qwen2_vl.d.ts.map +1 -0
  201. package/types/models/rt_detr/image_processing_rt_detr.d.ts +8 -0
  202. package/types/models/rt_detr/image_processing_rt_detr.d.ts.map +1 -0
  203. package/types/models/sam/image_processing_sam.d.ts +103 -0
  204. package/types/models/sam/image_processing_sam.d.ts.map +1 -0
  205. package/types/models/sam/processing_sam.d.ts +9 -0
  206. package/types/models/sam/processing_sam.d.ts.map +1 -0
  207. package/types/models/seamless_m4t/feature_extraction_seamless_m4t.d.ts +34 -0
  208. package/types/models/seamless_m4t/feature_extraction_seamless_m4t.d.ts.map +1 -0
  209. package/types/models/segformer/image_processing_segformer.d.ts +10 -0
  210. package/types/models/segformer/image_processing_segformer.d.ts.map +1 -0
  211. package/types/models/siglip/image_processing_siglip.d.ts +4 -0
  212. package/types/models/siglip/image_processing_siglip.d.ts.map +1 -0
  213. package/types/models/speecht5/feature_extraction_speecht5.d.ts +4 -0
  214. package/types/models/speecht5/feature_extraction_speecht5.d.ts.map +1 -0
  215. package/types/models/speecht5/processing_speecht5.d.ts +14 -0
  216. package/types/models/speecht5/processing_speecht5.d.ts.map +1 -0
  217. package/types/models/swin2sr/image_processing_swin2sr.d.ts +5 -0
  218. package/types/models/swin2sr/image_processing_swin2sr.d.ts.map +1 -0
  219. package/types/models/vit/image_processing_vit.d.ts +6 -0
  220. package/types/models/vit/image_processing_vit.d.ts.map +1 -0
  221. package/types/models/vitmatte/image_processing_vitmatte.d.ts +12 -0
  222. package/types/models/vitmatte/image_processing_vitmatte.d.ts.map +1 -0
  223. package/types/models/vitpose/image_processing_vitpose.d.ts +26 -0
  224. package/types/models/vitpose/image_processing_vitpose.d.ts.map +1 -0
  225. package/types/models/wav2vec2/feature_extraction_wav2vec2.d.ts +19 -0
  226. package/types/models/wav2vec2/feature_extraction_wav2vec2.d.ts.map +1 -0
  227. package/types/models/wav2vec2/processing_wav2vec2.d.ts +12 -0
  228. package/types/models/wav2vec2/processing_wav2vec2.d.ts.map +1 -0
  229. package/types/models/wespeaker/feature_extraction_wespeaker.d.ts +23 -0
  230. package/types/models/wespeaker/feature_extraction_wespeaker.d.ts.map +1 -0
  231. package/types/models/whisper/feature_extraction_whisper.d.ts +21 -0
  232. package/types/models/whisper/feature_extraction_whisper.d.ts.map +1 -0
  233. package/types/models/whisper/processing_whisper.d.ts +17 -0
  234. package/types/models/whisper/processing_whisper.d.ts.map +1 -0
  235. package/types/models/yolos/image_processing_yolos.d.ts +10 -0
  236. package/types/models/yolos/image_processing_yolos.d.ts.map +1 -0
  237. package/types/models.d.ts +150 -0
  238. package/types/models.d.ts.map +1 -1
  239. package/types/pipelines.d.ts +2 -3
  240. package/types/pipelines.d.ts.map +1 -1
  241. package/types/tokenizers.d.ts +3 -0
  242. package/types/tokenizers.d.ts.map +1 -1
  243. package/types/transformers.d.ts +10 -1
  244. package/types/utils/constants.d.ts +6 -0
  245. package/types/utils/constants.d.ts.map +1 -1
  246. package/types/utils/core.d.ts +65 -3
  247. package/types/utils/core.d.ts.map +1 -1
  248. package/types/utils/dtypes.d.ts +3 -2
  249. package/types/utils/dtypes.d.ts.map +1 -1
  250. package/types/utils/hub.d.ts +1 -1
  251. package/types/utils/hub.d.ts.map +1 -1
  252. package/types/utils/image.d.ts +14 -2
  253. package/types/utils/image.d.ts.map +1 -1
  254. package/types/utils/tensor.d.ts +39 -4
  255. package/types/utils/tensor.d.ts.map +1 -1
  256. package/src/processors.js +0 -2655
  257. package/types/processors.d.ts +0 -924
  258. package/types/processors.d.ts.map +0 -1
package/src/pipelines.js CHANGED
@@ -45,8 +45,10 @@ import {
45
45
  } from './models.js';
46
46
  import {
47
47
  AutoProcessor,
48
- Processor
49
- } from './processors.js';
48
+ } from './models/auto/processing_auto.js';
49
+ import {
50
+ Processor,
51
+ } from './base/processing_utils.js';
50
52
 
51
53
  import {
52
54
  Callable,
@@ -54,7 +56,6 @@ import {
54
56
 
55
57
  import {
56
58
  dispatchCallback,
57
- pop,
58
59
  product,
59
60
  } from './utils/core.js';
60
61
  import {
@@ -158,7 +159,6 @@ function get_bounding_box(box, asInteger) {
158
159
  /**
159
160
  * The Pipeline class is the class from which all pipelines inherit.
160
161
  * Refer to this class for methods shared across different pipelines.
161
- * @extends Callable
162
162
  */
163
163
  export class Pipeline extends Callable {
164
164
  /**
@@ -2131,8 +2131,8 @@ export class ImageSegmentationPipeline extends (/** @type {new (options: ImagePi
2131
2131
  fn = this.subtasks_mapping[subtask];
2132
2132
  } else {
2133
2133
  for (let [task, func] of Object.entries(this.subtasks_mapping)) {
2134
- if (func in this.processor.feature_extractor) {
2135
- fn = this.processor.feature_extractor[func].bind(this.processor.feature_extractor);
2134
+ if (func in this.processor.image_processor) {
2135
+ fn = this.processor.image_processor[func].bind(this.processor.image_processor);
2136
2136
  subtask = task;
2137
2137
  break;
2138
2138
  }
@@ -2362,7 +2362,7 @@ export class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipe
2362
2362
  const output = await this.model({ pixel_values, pixel_mask });
2363
2363
 
2364
2364
  // @ts-ignore
2365
- const processed = this.processor.feature_extractor.post_process_object_detection(output, threshold, imageSizes);
2365
+ const processed = this.processor.image_processor.post_process_object_detection(output, threshold, imageSizes);
2366
2366
 
2367
2367
  // Add labels
2368
2368
  const id2label = this.model.config.id2label;
@@ -2510,7 +2510,7 @@ export class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: T
2510
2510
  const output = await this.model({ ...text_inputs, pixel_values });
2511
2511
 
2512
2512
  // @ts-ignore
2513
- const processed = this.processor.feature_extractor.post_process_object_detection(output, threshold, imageSize, true)[0];
2513
+ const processed = this.processor.image_processor.post_process_object_detection(output, threshold, imageSize, true)[0];
2514
2514
  let result = processed.boxes.map((box, i) => ({
2515
2515
  score: processed.scores[i],
2516
2516
  label: candidate_labels[processed.classes[i]],
package/src/tokenizers.js CHANGED
@@ -1518,6 +1518,8 @@ class SplitPreTokenizer extends PreTokenizer {
1518
1518
 
1519
1519
  if (this.config.invert) {
1520
1520
  return text.match(this.pattern) || [];
1521
+ } else if (this.config.behavior?.toLowerCase() === 'removed') {
1522
+ return text.split(this.pattern).filter(x => x);
1521
1523
  } else {
1522
1524
  return regexSplit(text, this.pattern);
1523
1525
  }
@@ -4255,6 +4257,8 @@ export class VitsTokenizer extends PreTrainedTokenizer {
4255
4257
 
4256
4258
  export class CohereTokenizer extends PreTrainedTokenizer { }
4257
4259
 
4260
+ export class MgpstrTokenizer extends PreTrainedTokenizer { }
4261
+
4258
4262
  /**
4259
4263
  * Helper class which is used to instantiate pretrained tokenizers with the `from_pretrained` function.
4260
4264
  * The chosen tokenizer class is determined by the type specified in the tokenizer config.
@@ -4308,6 +4312,7 @@ export class AutoTokenizer {
4308
4312
  GemmaTokenizer,
4309
4313
  Grok1Tokenizer,
4310
4314
  CohereTokenizer,
4315
+ MgpstrTokenizer,
4311
4316
 
4312
4317
  // Base case:
4313
4318
  PreTrainedTokenizer,
@@ -12,10 +12,10 @@
12
12
  */
13
13
 
14
14
  export { env } from './env.js';
15
+
15
16
  export * from './pipelines.js';
16
17
  export * from './models.js';
17
18
  export * from './tokenizers.js';
18
- export * from './processors.js';
19
19
  export * from './configs.js';
20
20
 
21
21
  export * from './utils/audio.js';
@@ -23,6 +23,19 @@ export * from './utils/image.js';
23
23
  export * from './utils/tensor.js';
24
24
  export * from './utils/maths.js';
25
25
 
26
+
27
+ export { FeatureExtractor } from './base/feature_extraction_utils.js';
28
+ export * from './models/feature_extractors.js';
29
+ export * from './models/auto/feature_extraction_auto.js';
30
+
31
+ export { ImageProcessor } from './base/image_processors_utils.js';
32
+ export * from './models/image_processors.js';
33
+ export * from './models/auto/image_processing_auto.js';
34
+
35
+ export { Processor } from './base/processing_utils.js';
36
+ export * from './models/processors.js';
37
+ export * from './models/auto/processing_auto.js';
38
+
26
39
  export * from './generation/streamers.js';
27
40
  export * from './generation/stopping_criteria.js';
28
-
41
+ export * from './generation/logits_process.js';
@@ -1,2 +1,9 @@
1
1
 
2
- export const GITHUB_ISSUE_URL = 'https://github.com/huggingface/transformers.js/issues/new/choose';
2
+ export const GITHUB_ISSUE_URL = 'https://github.com/huggingface/transformers.js/issues/new/choose';
3
+
4
+ export const CONFIG_NAME = "config.json"
5
+ export const FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
6
+ export const IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
7
+ export const PROCESSOR_NAME = "processor_config.json"
8
+ export const CHAT_TEMPLATE_NAME = "chat_template.json"
9
+ export const GENERATION_CONFIG_NAME = "generation_config.json"
package/src/utils/core.js CHANGED
@@ -1,18 +1,37 @@
1
1
 
2
2
  /**
3
3
  * @file Core utility functions/classes for Transformers.js.
4
- *
4
+ *
5
5
  * These are only used internally, meaning an end-user shouldn't
6
6
  * need to access anything here.
7
- *
7
+ *
8
8
  * @module utils/core
9
9
  */
10
10
 
11
+ /**
12
+ * @typedef {Object} ProgressInfo
13
+ * @property {'initiate' | 'download' | 'progress' | 'done'} status The status of the progress item.
14
+ * @property {string} name This can be either:
15
+ * - a string, the *model id* of a model repo on huggingface.co.
16
+ * - a path to a *directory* potentially containing the file.
17
+ * @property {string} file The name of the file
18
+ * @property {number} [progress] A number between 0 and 100. Only available for the 'progress' status.
19
+ * @property {number} [loaded] The number of bytes loaded. Only available for the 'progress' status.
20
+ * @property {number} [total] The total number of bytes to be loaded. Only available for the 'progress' status.
21
+ */
22
+
23
+ /**
24
+ * A callback function that is called with progress information.
25
+ * @callback ProgressCallback
26
+ * @param {ProgressInfo} progressInfo
27
+ * @returns {void}
28
+ */
29
+
11
30
  /**
12
31
  * Helper function to dispatch progress callbacks.
13
32
  *
14
- * @param {Function} progress_callback The progress callback function to dispatch.
15
- * @param {any} data The data to pass to the progress callback function.
33
+ * @param {ProgressCallback | null | undefined} progress_callback The progress callback function to dispatch.
34
+ * @param {ProgressInfo} data The data to pass to the progress callback function.
16
35
  * @returns {void}
17
36
  * @private
18
37
  */
@@ -46,7 +65,7 @@ export function escapeRegExp(string) {
46
65
  * Check if a value is a typed array.
47
66
  * @param {*} val The value to check.
48
67
  * @returns {boolean} True if the value is a `TypedArray`, false otherwise.
49
- *
68
+ *
50
69
  * Adapted from https://stackoverflow.com/a/71091338/13989043
51
70
  */
52
71
  export function isTypedArray(val) {
@@ -63,6 +82,15 @@ export function isIntegralNumber(x) {
63
82
  return Number.isInteger(x) || typeof x === 'bigint'
64
83
  }
65
84
 
85
+ /**
86
+ * Determine if a provided width or height is nullish.
87
+ * @param {*} x The value to check.
88
+ * @returns {boolean} True if the value is `null`, `undefined` or `-1`, false otherwise.
89
+ */
90
+ export function isNullishDimension(x) {
91
+ return x === null || x === undefined || x === -1;
92
+ }
93
+
66
94
  /**
67
95
  * Calculates the dimensions of a nested array.
68
96
  *
@@ -132,9 +160,9 @@ export function calculateReflectOffset(i, w) {
132
160
  }
133
161
 
134
162
  /**
135
- *
136
- * @param {Object} o
137
- * @param {string[]} props
163
+ *
164
+ * @param {Object} o
165
+ * @param {string[]} props
138
166
  * @returns {Object}
139
167
  */
140
168
  export function pick(o, props) {
@@ -151,7 +179,7 @@ export function pick(o, props) {
151
179
  /**
152
180
  * Calculate the length of a string, taking multi-byte characters into account.
153
181
  * This mimics the behavior of Python's `len` function.
154
- * @param {string} s The string to calculate the length of.
182
+ * @param {string} s The string to calculate the length of.
155
183
  * @returns {number} The length of the string.
156
184
  */
157
185
  export function len(s) {
@@ -159,3 +187,17 @@ export function len(s) {
159
187
  for (const c of s) ++length;
160
188
  return length;
161
189
  }
190
+
191
+ /**
192
+ * Count the occurrences of a value in an array or string.
193
+ * This mimics the behavior of Python's `count` method.
194
+ * @param {any[]|string} arr The array or string to search.
195
+ * @param {any} value The value to count.
196
+ */
197
+ export function count(arr, value) {
198
+ let count = 0;
199
+ for (const v of arr) {
200
+ if (v === value) ++count;
201
+ }
202
+ return count;
203
+ }
@@ -31,6 +31,7 @@ export const isWebGpuFp16Supported = (function () {
31
31
  })();
32
32
 
33
33
  export const DATA_TYPES = Object.freeze({
34
+ auto: 'auto', // Auto-detect based on environment
34
35
  fp32: 'fp32',
35
36
  fp16: 'fp16',
36
37
  q8: 'q8',
@@ -47,7 +48,7 @@ export const DEFAULT_DEVICE_DTYPE_MAPPING = Object.freeze({
47
48
  [DEVICE_TYPES.wasm]: DATA_TYPES.q8,
48
49
  });
49
50
 
50
- /** @type {Record<DataType, string>} */
51
+ /** @type {Record<Exclude<DataType, "auto">, string>} */
51
52
  export const DEFAULT_DTYPE_SUFFIX_MAPPING = Object.freeze({
52
53
  [DATA_TYPES.fp32]: '',
53
54
  [DATA_TYPES.fp16]: '_fp16',
package/src/utils/hub.js CHANGED
@@ -13,7 +13,7 @@ import { dispatchCallback } from './core.js';
13
13
 
14
14
  /**
15
15
  * @typedef {Object} PretrainedOptions Options for loading a pretrained model.
16
- * @property {function} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates.
16
+ * @property {import('./core.js').ProgressCallback} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates.
17
17
  * @property {import('../configs.js').PretrainedConfig} [config=null] Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:
18
18
  * - The model is a model provided by the library (loaded with the *model id* string of a pretrained model).
19
19
  * - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory.
@@ -504,6 +504,7 @@ export async function getModelFile(path_or_repo_id, filename, fatal = true, opti
504
504
  file: filename
505
505
  })
506
506
 
507
+ /** @type {import('./core.js').ProgressInfo} */
507
508
  const progressInfo = {
508
509
  status: 'progress',
509
510
  name: path_or_repo_id,
@@ -1,27 +1,26 @@
1
1
 
2
2
  /**
3
- * @file Helper module for image processing.
4
- *
5
- * These functions and classes are only used internally,
3
+ * @file Helper module for image processing.
4
+ *
5
+ * These functions and classes are only used internally,
6
6
  * meaning an end-user shouldn't need to access anything here.
7
- *
7
+ *
8
8
  * @module utils/image
9
9
  */
10
10
 
11
+ import { isNullishDimension } from './core.js';
11
12
  import { getFile } from './hub.js';
12
- import { env } from '../env.js';
13
+ import { env, apis } from '../env.js';
13
14
  import { Tensor } from './tensor.js';
14
15
 
15
16
  // Will be empty (or not used) if running in browser or web-worker
16
17
  import sharp from 'sharp';
17
18
 
18
- const BROWSER_ENV = typeof self !== 'undefined';
19
- const WEBWORKER_ENV = BROWSER_ENV && self.constructor.name === 'DedicatedWorkerGlobalScope';
20
-
21
19
  let createCanvasFunction;
22
20
  let ImageDataClass;
23
21
  let loadImageFunction;
24
- if (BROWSER_ENV) {
22
+ const IS_BROWSER_OR_WEBWORKER = apis.IS_BROWSER_ENV || apis.IS_WEBWORKER_ENV;
23
+ if (IS_BROWSER_OR_WEBWORKER) {
25
24
  // Running in browser or web-worker
26
25
  createCanvasFunction = (/** @type {number} */ width, /** @type {number} */ height) => {
27
26
  if (!self.OffscreenCanvas) {
@@ -91,7 +90,7 @@ export class RawImage {
91
90
  this.channels = channels;
92
91
  }
93
92
 
94
- /**
93
+ /**
95
94
  * Returns the size of the image (width, height).
96
95
  * @returns {[number, number]} The size of the image (width, height).
97
96
  */
@@ -101,9 +100,9 @@ export class RawImage {
101
100
 
102
101
  /**
103
102
  * Helper method for reading an image from a variety of input types.
104
- * @param {RawImage|string|URL} input
103
+ * @param {RawImage|string|URL} input
105
104
  * @returns The image object.
106
- *
105
+ *
107
106
  * **Example:** Read image from a URL.
108
107
  * ```javascript
109
108
  * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg');
@@ -131,7 +130,7 @@ export class RawImage {
131
130
  * @returns {RawImage} The image object.
132
131
  */
133
132
  static fromCanvas(canvas) {
134
- if (!BROWSER_ENV) {
133
+ if (!IS_BROWSER_OR_WEBWORKER) {
135
134
  throw new Error('fromCanvas() is only supported in browser environments.')
136
135
  }
137
136
 
@@ -160,7 +159,7 @@ export class RawImage {
160
159
  * @returns {Promise<RawImage>} The image object.
161
160
  */
162
161
  static async fromBlob(blob) {
163
- if (BROWSER_ENV) {
162
+ if (IS_BROWSER_OR_WEBWORKER) {
164
163
  // Running in environment with canvas
165
164
  const img = await loadImageFunction(blob);
166
165
 
@@ -181,7 +180,7 @@ export class RawImage {
181
180
 
182
181
  /**
183
182
  * Helper method to create a new Image from a tensor
184
- * @param {Tensor} tensor
183
+ * @param {Tensor} tensor
185
184
  */
186
185
  static fromTensor(tensor, channel_format = 'CHW') {
187
186
  if (tensor.dims.length !== 3) {
@@ -306,8 +305,8 @@ export class RawImage {
306
305
 
307
306
  /**
308
307
  * Resize the image to the given dimensions. This method uses the canvas API to perform the resizing.
309
- * @param {number} width The width of the new image.
310
- * @param {number} height The height of the new image.
308
+ * @param {number} width The width of the new image. `null` or `-1` will preserve the aspect ratio.
309
+ * @param {number} height The height of the new image. `null` or `-1` will preserve the aspect ratio.
311
310
  * @param {Object} options Additional options for resizing.
312
311
  * @param {0|1|2|3|4|5|string} [options.resample] The resampling method to use.
313
312
  * @returns {Promise<RawImage>} `this` to support chaining.
@@ -316,10 +315,29 @@ export class RawImage {
316
315
  resample = 2,
317
316
  } = {}) {
318
317
 
318
+ // Do nothing if the image already has the desired size
319
+ if (this.width === width && this.height === height) {
320
+ return this;
321
+ }
322
+
319
323
  // Ensure resample method is a string
320
324
  let resampleMethod = RESAMPLING_MAPPING[resample] ?? resample;
321
325
 
322
- if (BROWSER_ENV) {
326
+ // Calculate width / height to maintain aspect ratio, in the event that
327
+ // the user passed a null value in.
328
+ // This allows users to pass in something like `resize(320, null)` to
329
+ // resize to 320 width, but maintain aspect ratio.
330
+ const nullish_width = isNullishDimension(width);
331
+ const nullish_height = isNullishDimension(height);
332
+ if (nullish_width && nullish_height) {
333
+ return this;
334
+ } else if (nullish_width) {
335
+ width = (height / this.height) * this.width;
336
+ } else if (nullish_height) {
337
+ height = (width / this.width) * this.height;
338
+ }
339
+
340
+ if (IS_BROWSER_OR_WEBWORKER) {
323
341
  // TODO use `resample` in browser environment
324
342
 
325
343
  // Store number of channels before resizing
@@ -355,7 +373,7 @@ export class RawImage {
355
373
  case 'nearest':
356
374
  case 'bilinear':
357
375
  case 'bicubic':
358
- // Perform resizing using affine transform.
376
+ // Perform resizing using affine transform.
359
377
  // This matches how the python Pillow library does it.
360
378
  img = img.affine([width / this.width, 0, 0, height / this.height], {
361
379
  interpolator: resampleMethod
@@ -368,7 +386,7 @@ export class RawImage {
368
386
  img = img.resize({
369
387
  width, height,
370
388
  fit: 'fill',
371
- kernel: 'lanczos3', // PIL Lanczos uses a kernel size of 3
389
+ kernel: 'lanczos3', // PIL Lanczos uses a kernel size of 3
372
390
  });
373
391
  break;
374
392
 
@@ -392,7 +410,7 @@ export class RawImage {
392
410
  return this;
393
411
  }
394
412
 
395
- if (BROWSER_ENV) {
413
+ if (IS_BROWSER_OR_WEBWORKER) {
396
414
  // Store number of channels before padding
397
415
  const numChannels = this.channels;
398
416
 
@@ -408,13 +426,14 @@ export class RawImage {
408
426
  // Draw image to context, padding in the process
409
427
  ctx.drawImage(canvas,
410
428
  0, 0, this.width, this.height,
411
- left, top, newWidth, newHeight
429
+ left, top, this.width, this.height
412
430
  );
413
431
 
414
432
  // Create image from the padded data
415
433
  const paddedImage = new RawImage(
416
434
  ctx.getImageData(0, 0, newWidth, newHeight).data,
417
- newWidth, newHeight, 4);
435
+ newWidth, newHeight, 4
436
+ );
418
437
 
419
438
  // Convert back so that image has the same number of channels as before
420
439
  return paddedImage.convert(numChannels);
@@ -440,14 +459,14 @@ export class RawImage {
440
459
  const crop_width = x_max - x_min + 1;
441
460
  const crop_height = y_max - y_min + 1;
442
461
 
443
- if (BROWSER_ENV) {
462
+ if (IS_BROWSER_OR_WEBWORKER) {
444
463
  // Store number of channels before resizing
445
464
  const numChannels = this.channels;
446
465
 
447
466
  // Create canvas object for this image
448
467
  const canvas = this.toCanvas();
449
468
 
450
- // Create a new canvas of the desired size. This is needed since if the
469
+ // Create a new canvas of the desired size. This is needed since if the
451
470
  // image is too small, we need to pad it with black pixels.
452
471
  const ctx = createCanvasFunction(crop_width, crop_height).getContext('2d');
453
472
 
@@ -488,14 +507,14 @@ export class RawImage {
488
507
  const height_offset = (this.height - crop_height) / 2;
489
508
 
490
509
 
491
- if (BROWSER_ENV) {
510
+ if (IS_BROWSER_OR_WEBWORKER) {
492
511
  // Store number of channels before resizing
493
512
  const numChannels = this.channels;
494
513
 
495
514
  // Create canvas object for this image
496
515
  const canvas = this.toCanvas();
497
516
 
498
- // Create a new canvas of the desired size. This is needed since if the
517
+ // Create a new canvas of the desired size. This is needed since if the
499
518
  // image is too small, we need to pad it with black pixels.
500
519
  const ctx = createCanvasFunction(crop_width, crop_height).getContext('2d');
501
520
 
@@ -593,7 +612,7 @@ export class RawImage {
593
612
  }
594
613
 
595
614
  async toBlob(type = 'image/png', quality = 1) {
596
- if (!BROWSER_ENV) {
615
+ if (!IS_BROWSER_OR_WEBWORKER) {
597
616
  throw new Error('toBlob() is only supported in browser environments.')
598
617
  }
599
618
 
@@ -619,7 +638,7 @@ export class RawImage {
619
638
  }
620
639
 
621
640
  toCanvas() {
622
- if (!BROWSER_ENV) {
641
+ if (!IS_BROWSER_OR_WEBWORKER) {
623
642
  throw new Error('toCanvas() is only supported in browser environments.')
624
643
  }
625
644
 
@@ -637,6 +656,36 @@ export class RawImage {
637
656
  return clonedCanvas;
638
657
  }
639
658
 
659
+ /**
660
+ * Split this image into individual bands. This method returns an array of individual image bands from an image.
661
+ * For example, splitting an "RGB" image creates three new images each containing a copy of one of the original bands (red, green, blue).
662
+ *
663
+ * Inspired by PIL's `Image.split()` [function](https://pillow.readthedocs.io/en/latest/reference/Image.html#PIL.Image.Image.split).
664
+ * @returns {RawImage[]} An array containing bands.
665
+ */
666
+ split() {
667
+ const { data, width, height, channels } = this;
668
+
669
+ /** @type {typeof Uint8Array | typeof Uint8ClampedArray} */
670
+ const data_type = /** @type {any} */(data.constructor);
671
+ const per_channel_length = data.length / channels;
672
+
673
+ // Pre-allocate buffers for each channel
674
+ const split_data = Array.from(
675
+ { length: channels },
676
+ () => new data_type(per_channel_length),
677
+ );
678
+
679
+ // Write pixel data
680
+ for (let i = 0; i < per_channel_length; ++i) {
681
+ const data_offset = channels * i;
682
+ for (let j = 0; j < channels; ++j) {
683
+ split_data[j][i] = data[data_offset + j];
684
+ }
685
+ }
686
+ return split_data.map((data) => new RawImage(data, width, height, 1));
687
+ }
688
+
640
689
  /**
641
690
  * Helper method to update the image data.
642
691
  * @param {Uint8ClampedArray} data The new image data.
@@ -693,8 +742,8 @@ export class RawImage {
693
742
  */
694
743
  async save(path) {
695
744
 
696
- if (BROWSER_ENV) {
697
- if (WEBWORKER_ENV) {
745
+ if (IS_BROWSER_OR_WEBWORKER) {
746
+ if (apis.IS_WEBWORKER_ENV) {
698
747
  throw new Error('Unable to save an image from a Web Worker.')
699
748
  }
700
749
 
@@ -730,7 +779,7 @@ export class RawImage {
730
779
  }
731
780
 
732
781
  toSharp() {
733
- if (BROWSER_ENV) {
782
+ if (IS_BROWSER_OR_WEBWORKER) {
734
783
  throw new Error('toSharp() is only supported in server-side environments.')
735
784
  }
736
785
 
@@ -742,4 +791,9 @@ export class RawImage {
742
791
  }
743
792
  });
744
793
  }
745
- }
794
+ }
795
+
796
+ /**
797
+ * Helper function to load an image from a URL, path, etc.
798
+ */
799
+ export const load_image = RawImage.read.bind(RawImage);
@@ -32,6 +32,8 @@ const DataTypeMap = Object.freeze({
32
32
  int64: BigInt64Array,
33
33
  uint64: BigUint64Array,
34
34
  bool: Uint8Array,
35
+ uint4: Uint8Array,
36
+ int4: Int8Array,
35
37
  });
36
38
 
37
39
  /**
@@ -340,10 +342,43 @@ export class Tensor {
340
342
  return this;
341
343
  }
342
344
 
345
+ /**
346
+ * Creates a deep copy of the current Tensor.
347
+ * @returns {Tensor} A new Tensor with the same type, data, and dimensions as the original.
348
+ */
343
349
  clone() {
344
350
  return new Tensor(this.type, this.data.slice(), this.dims.slice());
345
351
  }
346
352
 
353
+ /**
354
+ * Performs a slice operation on the Tensor along specified dimensions.
355
+ *
356
+ * Consider a Tensor that has a dimension of [4, 7]:
357
+ * ```
358
+ * [ 1, 2, 3, 4, 5, 6, 7]
359
+ * [ 8, 9, 10, 11, 12, 13, 14]
360
+ * [15, 16, 17, 18, 19, 20, 21]
361
+ * [22, 23, 24, 25, 26, 27, 28]
362
+ * ```
363
+ * We can slice against the two dims of row and column, for instance in this
364
+ * case we can start at the second element, and return to the second last,
365
+ * like this:
366
+ * ```
367
+ * tensor.slice([1, -1], [1, -1]);
368
+ * ```
369
+ * which would return:
370
+ * ```
371
+ * [ 9, 10, 11, 12, 13 ]
372
+ * [ 16, 17, 18, 19, 20 ]
373
+ * ```
374
+ *
375
+ * @param {...(number|number[]|null)} slices The slice specifications for each dimension.
376
+ * - If a number is given, then a single element is selected.
377
+ * - If an array of two numbers is given, then a range of elements [start, end (exclusive)] is selected.
378
+ * - If null is given, then the entire dimension is selected.
379
+ * @returns {Tensor} A new Tensor containing the selected elements.
380
+ * @throws {Error} If the slice input is invalid.
381
+ */
347
382
  slice(...slices) {
348
383
  // This allows for slicing with ranges and numbers
349
384
  const newTensorDims = [];
@@ -413,7 +448,6 @@ export class Tensor {
413
448
  data[i] = this_data[originalIndex];
414
449
  }
415
450
  return new Tensor(this.type, data, newTensorDims);
416
-
417
451
  }
418
452
 
419
453
  /**
@@ -1321,7 +1355,7 @@ function fullHelper(size, fill_value, dtype, cls) {
1321
1355
  /**
1322
1356
  * Creates a tensor of size size filled with fill_value. The tensor's dtype is inferred from fill_value.
1323
1357
  * @param {number[]} size A sequence of integers defining the shape of the output tensor.
1324
- * @param {number|bigint} fill_value The value to fill the output tensor with.
1358
+ * @param {number|bigint|boolean} fill_value The value to fill the output tensor with.
1325
1359
  * @returns {Tensor} The filled tensor.
1326
1360
  */
1327
1361
  export function full(size, fill_value) {
@@ -1333,6 +1367,9 @@ export function full(size, fill_value) {
1333
1367
  } else if (typeof fill_value === 'bigint') {
1334
1368
  dtype = 'int64';
1335
1369
  typedArrayCls = BigInt64Array;
1370
+ } else if (typeof fill_value === 'boolean') {
1371
+ dtype = 'bool';
1372
+ typedArrayCls = Uint8Array;
1336
1373
  } else {
1337
1374
  // TODO: support other dtypes
1338
1375
  throw new Error(`Unsupported data type: ${typeof fill_value}`);
@@ -0,0 +1,41 @@
1
+ /**
2
+ * Helper function to validate audio inputs.
3
+ * @param {any} audio The audio data.
4
+ * @param {string} feature_extractor The name of the feature extractor.
5
+ * @private
6
+ */
7
+ export function validate_audio_inputs(audio: any, feature_extractor: string): void;
8
+ declare const FeatureExtractor_base: new () => {
9
+ (...args: any[]): any;
10
+ _call(...args: any[]): any;
11
+ };
12
+ /**
13
+ * Base class for feature extractors.
14
+ */
15
+ export class FeatureExtractor extends FeatureExtractor_base {
16
+ /**
17
+ * Instantiate one of the processor classes of the library from a pretrained model.
18
+ *
19
+ * The processor class to instantiate is selected based on the `image_processor_type` (or `feature_extractor_type`; legacy)
20
+ * property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)
21
+ *
22
+ * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:
23
+ * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co.
24
+ * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
25
+ * user or organization name, like `dbmdz/bert-base-german-cased`.
26
+ * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`.
27
+ * @param {import('../utils/hub.js').PretrainedOptions} options Additional options for loading the processor.
28
+ *
29
+ * @returns {Promise<FeatureExtractor>} A new instance of the Processor class.
30
+ */
31
+ static from_pretrained(pretrained_model_name_or_path: string, options: import('../utils/hub.js').PretrainedOptions): Promise<FeatureExtractor>;
32
+ /**
33
+ * Constructs a new FeatureExtractor instance.
34
+ *
35
+ * @param {Object} config The configuration for the feature extractor.
36
+ */
37
+ constructor(config: any);
38
+ config: any;
39
+ }
40
+ export {};
41
+ //# sourceMappingURL=feature_extraction_utils.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"feature_extraction_utils.d.ts","sourceRoot":"","sources":["../../src/base/feature_extraction_utils.js"],"names":[],"mappings":"AAwCA;;;;;GAKG;AACH,6CAJW,GAAG,qBACH,MAAM,QAUhB;;;;;AAjDD;;GAEG;AACH;IAWI;;;;;;;;;;;;;;OAcG;IACH,sDATW,MAAM,WAKN,OAAO,iBAAiB,EAAE,iBAAiB,GAEzC,QAAQ,gBAAgB,CAAC,CAKrC;IA5BD;;;;OAIG;IACH,yBAGC;IADG,YAAoB;CAsB3B"}