@huggingface/transformers 3.4.0 → 3.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +8 -2
  2. package/dist/transformers.js +528 -201
  3. package/dist/transformers.js.map +1 -1
  4. package/dist/transformers.min.js +1 -1
  5. package/dist/transformers.min.js.map +1 -1
  6. package/dist/transformers.node.cjs +508 -200
  7. package/dist/transformers.node.cjs.map +1 -1
  8. package/dist/transformers.node.min.cjs +1 -1
  9. package/dist/transformers.node.min.cjs.map +1 -1
  10. package/dist/transformers.node.min.mjs +1 -1
  11. package/dist/transformers.node.min.mjs.map +1 -1
  12. package/dist/transformers.node.mjs +528 -201
  13. package/dist/transformers.node.mjs.map +1 -1
  14. package/dist/transformers.web.js +528 -201
  15. package/dist/transformers.web.js.map +1 -1
  16. package/dist/transformers.web.min.js +1 -1
  17. package/dist/transformers.web.min.js.map +1 -1
  18. package/package.json +1 -1
  19. package/src/configs.js +2 -0
  20. package/src/env.js +1 -1
  21. package/src/models/feature_extractors.js +1 -0
  22. package/src/models/snac/feature_extraction_snac.js +3 -0
  23. package/src/models.js +125 -2
  24. package/src/pipelines.js +140 -135
  25. package/src/tokenizers.js +44 -34
  26. package/src/utils/data-structures.js +74 -0
  27. package/src/utils/hub.js +36 -15
  28. package/src/utils/image.js +9 -1
  29. package/src/utils/tensor.js +6 -2
  30. package/types/configs.d.ts.map +1 -1
  31. package/types/models/feature_extractors.d.ts +1 -0
  32. package/types/models/snac/feature_extraction_snac.d.ts +4 -0
  33. package/types/models/snac/feature_extraction_snac.d.ts.map +1 -0
  34. package/types/models.d.ts +72 -0
  35. package/types/models.d.ts.map +1 -1
  36. package/types/pipelines.d.ts +2 -2
  37. package/types/pipelines.d.ts.map +1 -1
  38. package/types/tokenizers.d.ts +4 -1
  39. package/types/tokenizers.d.ts.map +1 -1
  40. package/types/tsconfig.tsbuildinfo +1 -1
  41. package/types/utils/data-structures.d.ts +26 -0
  42. package/types/utils/data-structures.d.ts.map +1 -1
  43. package/types/utils/hub.d.ts.map +1 -1
  44. package/types/utils/image.d.ts +2 -2
  45. package/types/utils/image.d.ts.map +1 -1
  46. package/types/utils/tensor.d.ts.map +1 -1
@@ -3692,6 +3692,7 @@ function getNormalizedConfig(config) {
3692
3692
  // Sub-configs
3693
3693
  case 'llava':
3694
3694
  case 'paligemma':
3695
+ case 'gemma3':
3695
3696
  case 'florence2':
3696
3697
  case 'llava_onevision':
3697
3698
  case 'idefics3':
@@ -3751,6 +3752,7 @@ function getNormalizedConfig(config) {
3751
3752
  break;
3752
3753
  case 'gemma':
3753
3754
  case 'gemma2':
3755
+ case 'gemma3_text':
3754
3756
  case 'glm':
3755
3757
  case 'helium':
3756
3758
  mapping['num_heads'] = 'num_key_value_heads';
@@ -4081,7 +4083,7 @@ __webpack_require__.r(__webpack_exports__);
4081
4083
 
4082
4084
 
4083
4085
 
4084
- const VERSION = '3.4.0';
4086
+ const VERSION = '3.4.2';
4085
4087
 
4086
4088
  // Check if various APIs are available (depends on environment)
4087
4089
  const IS_BROWSER_ENV = typeof window !== "undefined" && typeof window.document !== "undefined";
@@ -6219,6 +6221,9 @@ __webpack_require__.r(__webpack_exports__);
6219
6221
  /* harmony export */ Gemma2ForCausalLM: () => (/* binding */ Gemma2ForCausalLM),
6220
6222
  /* harmony export */ Gemma2Model: () => (/* binding */ Gemma2Model),
6221
6223
  /* harmony export */ Gemma2PreTrainedModel: () => (/* binding */ Gemma2PreTrainedModel),
6224
+ /* harmony export */ Gemma3ForCausalLM: () => (/* binding */ Gemma3ForCausalLM),
6225
+ /* harmony export */ Gemma3Model: () => (/* binding */ Gemma3Model),
6226
+ /* harmony export */ Gemma3PreTrainedModel: () => (/* binding */ Gemma3PreTrainedModel),
6222
6227
  /* harmony export */ GemmaForCausalLM: () => (/* binding */ GemmaForCausalLM),
6223
6228
  /* harmony export */ GemmaModel: () => (/* binding */ GemmaModel),
6224
6229
  /* harmony export */ GemmaPreTrainedModel: () => (/* binding */ GemmaPreTrainedModel),
@@ -6289,6 +6294,10 @@ __webpack_require__.r(__webpack_exports__);
6289
6294
  /* harmony export */ MaskFormerModel: () => (/* binding */ MaskFormerModel),
6290
6295
  /* harmony export */ MaskFormerPreTrainedModel: () => (/* binding */ MaskFormerPreTrainedModel),
6291
6296
  /* harmony export */ MaskedLMOutput: () => (/* binding */ MaskedLMOutput),
6297
+ /* harmony export */ Metric3DForDepthEstimation: () => (/* binding */ Metric3DForDepthEstimation),
6298
+ /* harmony export */ Metric3DPreTrainedModel: () => (/* binding */ Metric3DPreTrainedModel),
6299
+ /* harmony export */ Metric3Dv2ForDepthEstimation: () => (/* binding */ Metric3Dv2ForDepthEstimation),
6300
+ /* harmony export */ Metric3Dv2PreTrainedModel: () => (/* binding */ Metric3Dv2PreTrainedModel),
6292
6301
  /* harmony export */ MgpstrForSceneTextRecognition: () => (/* binding */ MgpstrForSceneTextRecognition),
6293
6302
  /* harmony export */ MgpstrModelOutput: () => (/* binding */ MgpstrModelOutput),
6294
6303
  /* harmony export */ MgpstrPreTrainedModel: () => (/* binding */ MgpstrPreTrainedModel),
@@ -6400,10 +6409,18 @@ __webpack_require__.r(__webpack_exports__);
6400
6409
  /* harmony export */ Qwen2PreTrainedModel: () => (/* binding */ Qwen2PreTrainedModel),
6401
6410
  /* harmony export */ Qwen2VLForConditionalGeneration: () => (/* binding */ Qwen2VLForConditionalGeneration),
6402
6411
  /* harmony export */ Qwen2VLPreTrainedModel: () => (/* binding */ Qwen2VLPreTrainedModel),
6412
+ /* harmony export */ RFDetrForObjectDetection: () => (/* binding */ RFDetrForObjectDetection),
6413
+ /* harmony export */ RFDetrModel: () => (/* binding */ RFDetrModel),
6414
+ /* harmony export */ RFDetrObjectDetectionOutput: () => (/* binding */ RFDetrObjectDetectionOutput),
6415
+ /* harmony export */ RFDetrPreTrainedModel: () => (/* binding */ RFDetrPreTrainedModel),
6403
6416
  /* harmony export */ RTDetrForObjectDetection: () => (/* binding */ RTDetrForObjectDetection),
6404
6417
  /* harmony export */ RTDetrModel: () => (/* binding */ RTDetrModel),
6405
6418
  /* harmony export */ RTDetrObjectDetectionOutput: () => (/* binding */ RTDetrObjectDetectionOutput),
6406
6419
  /* harmony export */ RTDetrPreTrainedModel: () => (/* binding */ RTDetrPreTrainedModel),
6420
+ /* harmony export */ RTDetrV2ForObjectDetection: () => (/* binding */ RTDetrV2ForObjectDetection),
6421
+ /* harmony export */ RTDetrV2Model: () => (/* binding */ RTDetrV2Model),
6422
+ /* harmony export */ RTDetrV2ObjectDetectionOutput: () => (/* binding */ RTDetrV2ObjectDetectionOutput),
6423
+ /* harmony export */ RTDetrV2PreTrainedModel: () => (/* binding */ RTDetrV2PreTrainedModel),
6407
6424
  /* harmony export */ ResNetForImageClassification: () => (/* binding */ ResNetForImageClassification),
6408
6425
  /* harmony export */ ResNetModel: () => (/* binding */ ResNetModel),
6409
6426
  /* harmony export */ ResNetPreTrainedModel: () => (/* binding */ ResNetPreTrainedModel),
@@ -6437,6 +6454,10 @@ __webpack_require__.r(__webpack_exports__);
6437
6454
  /* harmony export */ SiglipTextModel: () => (/* binding */ SiglipTextModel),
6438
6455
  /* harmony export */ SiglipVisionModel: () => (/* binding */ SiglipVisionModel),
6439
6456
  /* harmony export */ SmolVLMForConditionalGeneration: () => (/* binding */ SmolVLMForConditionalGeneration),
6457
+ /* harmony export */ SnacDecoderModel: () => (/* binding */ SnacDecoderModel),
6458
+ /* harmony export */ SnacEncoderModel: () => (/* binding */ SnacEncoderModel),
6459
+ /* harmony export */ SnacModel: () => (/* binding */ SnacModel),
6460
+ /* harmony export */ SnacPreTrainedModel: () => (/* binding */ SnacPreTrainedModel),
6440
6461
  /* harmony export */ SpeechT5ForSpeechToText: () => (/* binding */ SpeechT5ForSpeechToText),
6441
6462
  /* harmony export */ SpeechT5ForTextToSpeech: () => (/* binding */ SpeechT5ForTextToSpeech),
6442
6463
  /* harmony export */ SpeechT5HifiGan: () => (/* binding */ SpeechT5HifiGan),
@@ -7095,8 +7116,8 @@ async function decoderForward(self, model_inputs, is_encoder_decoder = false) {
7095
7116
  new_model_inputs.use_cache_branch = boolTensor(!!past_key_values);
7096
7117
  }
7097
7118
  if (session.inputNames.includes('position_ids') && new_model_inputs.attention_mask && !new_model_inputs.position_ids) {
7098
- // NOTE: Handle a special case for paligemma models, where positions are 1-indexed
7099
- const start_index = self.config.model_type === 'paligemma' ? 1 : 0;
7119
+ // NOTE: Handle a special case for paligemma/gemma3 models, where positions are 1-indexed
7120
+ const start_index = ['paligemma', 'gemma3_text', 'gemma3'].includes(self.config.model_type) ? 1 : 0;
7100
7121
  new_model_inputs.position_ids = createPositionIds(new_model_inputs, past_key_values, start_index);
7101
7122
  }
7102
7123
 
@@ -11021,6 +11042,23 @@ class Gemma2Model extends Gemma2PreTrainedModel { }
11021
11042
  class Gemma2ForCausalLM extends Gemma2PreTrainedModel { }
11022
11043
  //////////////////////////////////////////////////
11023
11044
 
11045
+
11046
+ //////////////////////////////////////////////////
11047
+ // Gemma3 models
11048
+
11049
+ /**
11050
+ * The bare Gemma3 Model outputting raw hidden-states without any specific head on top.
11051
+ */
11052
+ class Gemma3PreTrainedModel extends PreTrainedModel { }
11053
+ /**
11054
+ * The bare Gemma3 Model outputting raw hidden-states without any specific head on top.
11055
+ */
11056
+ class Gemma3Model extends Gemma3PreTrainedModel { }
11057
+
11058
+ class Gemma3ForCausalLM extends Gemma3PreTrainedModel { }
11059
+ //////////////////////////////////////////////////
11060
+
11061
+
11024
11062
  //////////////////////////////////////////////////
11025
11063
  class OpenELMPreTrainedModel extends PreTrainedModel { }
11026
11064
  class OpenELMModel extends OpenELMPreTrainedModel { }
@@ -11665,6 +11703,37 @@ class RTDetrObjectDetectionOutput extends ModelOutput {
11665
11703
  }
11666
11704
  //////////////////////////////////////////////////
11667
11705
 
11706
+
11707
+ //////////////////////////////////////////////////
11708
+ class RTDetrV2PreTrainedModel extends PreTrainedModel { }
11709
+ class RTDetrV2Model extends RTDetrV2PreTrainedModel { }
11710
+ class RTDetrV2ForObjectDetection extends RTDetrV2PreTrainedModel {
11711
+ /**
11712
+ * @param {any} model_inputs
11713
+ */
11714
+ async _call(model_inputs) {
11715
+ return new RTDetrV2ObjectDetectionOutput(await super._call(model_inputs));
11716
+ }
11717
+ }
11718
+
11719
+ class RTDetrV2ObjectDetectionOutput extends RTDetrObjectDetectionOutput {}
11720
+ //////////////////////////////////////////////////
11721
+
11722
+ //////////////////////////////////////////////////
11723
+ class RFDetrPreTrainedModel extends PreTrainedModel { }
11724
+ class RFDetrModel extends RFDetrPreTrainedModel { }
11725
+ class RFDetrForObjectDetection extends RFDetrPreTrainedModel {
11726
+ /**
11727
+ * @param {any} model_inputs
11728
+ */
11729
+ async _call(model_inputs) {
11730
+ return new RFDetrObjectDetectionOutput(await super._call(model_inputs));
11731
+ }
11732
+ }
11733
+
11734
+ class RFDetrObjectDetectionOutput extends RTDetrObjectDetectionOutput {}
11735
+ //////////////////////////////////////////////////
11736
+
11668
11737
  //////////////////////////////////////////////////
11669
11738
  class TableTransformerPreTrainedModel extends PreTrainedModel { }
11670
11739
 
@@ -11873,6 +11942,16 @@ class DepthProPreTrainedModel extends PreTrainedModel { }
11873
11942
  class DepthProForDepthEstimation extends DepthProPreTrainedModel { }
11874
11943
  //////////////////////////////////////////////////
11875
11944
 
11945
+ //////////////////////////////////////////////////
11946
+ class Metric3DPreTrainedModel extends PreTrainedModel { }
11947
+ class Metric3DForDepthEstimation extends Metric3DPreTrainedModel { }
11948
+ //////////////////////////////////////////////////
11949
+
11950
+ //////////////////////////////////////////////////
11951
+ class Metric3Dv2PreTrainedModel extends PreTrainedModel { }
11952
+ class Metric3Dv2ForDepthEstimation extends Metric3Dv2PreTrainedModel { }
11953
+ //////////////////////////////////////////////////
11954
+
11876
11955
  //////////////////////////////////////////////////
11877
11956
  class MaskFormerPreTrainedModel extends PreTrainedModel { }
11878
11957
  class MaskFormerModel extends MaskFormerPreTrainedModel { }
@@ -13788,6 +13867,60 @@ class DacDecoderModel extends DacPreTrainedModel {
13788
13867
  }
13789
13868
  //////////////////////////////////////////////////
13790
13869
 
13870
+
13871
+ //////////////////////////////////////////////////
13872
+ // Snac models
13873
+ class SnacPreTrainedModel extends PreTrainedModel {
13874
+ main_input_name = 'input_values';
13875
+ forward_params = ['input_values'];
13876
+ }
13877
+
13878
+ /**
13879
+ * The SNAC (Multi-Scale Neural Audio Codec) model.
13880
+ */
13881
+ class SnacModel extends SnacPreTrainedModel {
13882
+ /**
13883
+ * Encodes the input audio waveform into discrete codes.
13884
+ * @param {Object} inputs Model inputs
13885
+ * @param {Tensor} [inputs.input_values] Float values of the input audio waveform, of shape `(batch_size, channels, sequence_length)`).
13886
+ * @returns {Promise<Record<string, Tensor>>} The output tensors of shape `(batch_size, num_codebooks, sequence_length)`.
13887
+ */
13888
+ async encode(inputs) {
13889
+ return await sessionRun(this.sessions['encoder_model'], inputs);
13890
+ }
13891
+
13892
+ /**
13893
+ * Decodes the given frames into an output audio waveform.
13894
+ * @param {Record<string, Tensor>} inputs The encoded audio codes.
13895
+ * @returns {Promise<{audio_values: Tensor}>} The output tensor of shape `(batch_size, num_channels, sequence_length)`.
13896
+ */
13897
+ async decode(inputs) {
13898
+ return await sessionRun(this.sessions['decoder_model'], inputs);
13899
+ }
13900
+ }
13901
+
13902
+ class SnacEncoderModel extends SnacPreTrainedModel {
13903
+ /** @type {typeof PreTrainedModel.from_pretrained} */
13904
+ static async from_pretrained(pretrained_model_name_or_path, options = {}) {
13905
+ return super.from_pretrained(pretrained_model_name_or_path, {
13906
+ ...options,
13907
+ // Update default model file name if not provided
13908
+ model_file_name: options.model_file_name ?? 'encoder_model',
13909
+ });
13910
+ }
13911
+ }
13912
+ class SnacDecoderModel extends SnacPreTrainedModel {
13913
+ /** @type {typeof PreTrainedModel.from_pretrained} */
13914
+ static async from_pretrained(pretrained_model_name_or_path, options = {}) {
13915
+ return super.from_pretrained(pretrained_model_name_or_path, {
13916
+ ...options,
13917
+ // Update default model file name if not provided
13918
+ model_file_name: options.model_file_name ?? 'decoder_model',
13919
+ });
13920
+ }
13921
+ }
13922
+ //////////////////////////////////////////////////
13923
+
13791
13924
  //////////////////////////////////////////////////
13792
13925
  // AutoModels, used to simplify construction of PreTrainedModels
13793
13926
  // (uses config to instantiate correct class)
@@ -13908,6 +14041,8 @@ const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([
13908
14041
 
13909
14042
  ['detr', ['DetrModel', DetrModel]],
13910
14043
  ['rt_detr', ['RTDetrModel', RTDetrModel]],
14044
+ ['rt_detr_v2', ['RTDetrV2Model', RTDetrV2Model]],
14045
+ ['rf_detr', ['RFDetrModel', RFDetrModel]],
13911
14046
  ['table-transformer', ['TableTransformerModel', TableTransformerModel]],
13912
14047
  ['vit', ['ViTModel', ViTModel]],
13913
14048
  ['ijepa', ['IJepaModel', IJepaModel]],
@@ -13969,6 +14104,7 @@ const MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([
13969
14104
  const MODEL_MAPPING_NAMES_AUTO_ENCODER = new Map([
13970
14105
  ['mimi', ['MimiModel', MimiModel]],
13971
14106
  ['dac', ['DacModel', DacModel]],
14107
+ ['snac', ['SnacModel', SnacModel]],
13972
14108
  ]);
13973
14109
 
13974
14110
  const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([
@@ -13989,6 +14125,7 @@ const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([
13989
14125
  ['cohere', ['CohereModel', CohereModel]],
13990
14126
  ['gemma', ['GemmaModel', GemmaModel]],
13991
14127
  ['gemma2', ['Gemma2Model', Gemma2Model]],
14128
+ ['gemma3_text', ['Gemma3Model', Gemma3Model]],
13992
14129
  ['helium', ['HeliumModel', HeliumModel]],
13993
14130
  ['glm', ['GlmModel', GlmModel]],
13994
14131
  ['openelm', ['OpenELMModel', OpenELMModel]],
@@ -14088,6 +14225,7 @@ const MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = new Map([
14088
14225
  ['cohere', ['CohereForCausalLM', CohereForCausalLM]],
14089
14226
  ['gemma', ['GemmaForCausalLM', GemmaForCausalLM]],
14090
14227
  ['gemma2', ['Gemma2ForCausalLM', Gemma2ForCausalLM]],
14228
+ ['gemma3_text', ['Gemma3ForCausalLM', Gemma3ForCausalLM]],
14091
14229
  ['helium', ['HeliumForCausalLM', HeliumForCausalLM]],
14092
14230
  ['glm', ['GlmForCausalLM', GlmForCausalLM]],
14093
14231
  ['openelm', ['OpenELMForCausalLM', OpenELMForCausalLM]],
@@ -14204,6 +14342,8 @@ const MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([
14204
14342
  const MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = new Map([
14205
14343
  ['detr', ['DetrForObjectDetection', DetrForObjectDetection]],
14206
14344
  ['rt_detr', ['RTDetrForObjectDetection', RTDetrForObjectDetection]],
14345
+ ['rt_detr_v2', ['RTDetrV2ForObjectDetection', RTDetrV2ForObjectDetection]],
14346
+ ['rf_detr', ['RFDetrForObjectDetection', RFDetrForObjectDetection]],
14207
14347
  ['table-transformer', ['TableTransformerForObjectDetection', TableTransformerForObjectDetection]],
14208
14348
  ['yolos', ['YolosForObjectDetection', YolosForObjectDetection]],
14209
14349
  ]);
@@ -14289,6 +14429,8 @@ const MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = new Map([
14289
14429
  ['glpn', ['GLPNForDepthEstimation', GLPNForDepthEstimation]],
14290
14430
  ['sapiens', ['SapiensForDepthEstimation', SapiensForDepthEstimation]],
14291
14431
  ['depth_pro', ['DepthProForDepthEstimation', DepthProForDepthEstimation]],
14432
+ ['metric3d', ['Metric3DForDepthEstimation', Metric3DForDepthEstimation]],
14433
+ ['metric3dv2', ['Metric3Dv2ForDepthEstimation', Metric3Dv2ForDepthEstimation]],
14292
14434
  ])
14293
14435
 
14294
14436
  const MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES = new Map([
@@ -14374,6 +14516,8 @@ const CUSTOM_MAPPING = [
14374
14516
  ['DacDecoderModel', DacDecoderModel, MODEL_TYPES.EncoderOnly],
14375
14517
  ['MimiEncoderModel', MimiEncoderModel, MODEL_TYPES.EncoderOnly],
14376
14518
  ['MimiDecoderModel', MimiDecoderModel, MODEL_TYPES.EncoderOnly],
14519
+ ['SnacEncoderModel', SnacEncoderModel, MODEL_TYPES.EncoderOnly],
14520
+ ['SnacDecoderModel', SnacDecoderModel, MODEL_TYPES.EncoderOnly],
14377
14521
  ]
14378
14522
  for (const [name, model, type] of CUSTOM_MAPPING) {
14379
14523
  MODEL_TYPE_MAPPING.set(name, type);
@@ -15672,14 +15816,15 @@ __webpack_require__.r(__webpack_exports__);
15672
15816
  /* harmony export */ ClapFeatureExtractor: () => (/* reexport safe */ _clap_feature_extraction_clap_js__WEBPACK_IMPORTED_MODULE_2__.ClapFeatureExtractor),
15673
15817
  /* harmony export */ DacFeatureExtractor: () => (/* reexport safe */ _dac_feature_extraction_dac_js__WEBPACK_IMPORTED_MODULE_3__.DacFeatureExtractor),
15674
15818
  /* harmony export */ EncodecFeatureExtractor: () => (/* reexport safe */ _encodec_feature_extraction_encodec_js__WEBPACK_IMPORTED_MODULE_1__.EncodecFeatureExtractor),
15675
- /* harmony export */ ImageFeatureExtractor: () => (/* reexport safe */ _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_11__.ImageProcessor),
15819
+ /* harmony export */ ImageFeatureExtractor: () => (/* reexport safe */ _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_12__.ImageProcessor),
15676
15820
  /* harmony export */ MoonshineFeatureExtractor: () => (/* reexport safe */ _moonshine_feature_extraction_moonshine_js__WEBPACK_IMPORTED_MODULE_4__.MoonshineFeatureExtractor),
15677
15821
  /* harmony export */ PyAnnoteFeatureExtractor: () => (/* reexport safe */ _pyannote_feature_extraction_pyannote_js__WEBPACK_IMPORTED_MODULE_5__.PyAnnoteFeatureExtractor),
15678
15822
  /* harmony export */ SeamlessM4TFeatureExtractor: () => (/* reexport safe */ _seamless_m4t_feature_extraction_seamless_m4t_js__WEBPACK_IMPORTED_MODULE_6__.SeamlessM4TFeatureExtractor),
15679
- /* harmony export */ SpeechT5FeatureExtractor: () => (/* reexport safe */ _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_7__.SpeechT5FeatureExtractor),
15680
- /* harmony export */ Wav2Vec2FeatureExtractor: () => (/* reexport safe */ _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_8__.Wav2Vec2FeatureExtractor),
15681
- /* harmony export */ WeSpeakerFeatureExtractor: () => (/* reexport safe */ _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_9__.WeSpeakerFeatureExtractor),
15682
- /* harmony export */ WhisperFeatureExtractor: () => (/* reexport safe */ _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_10__.WhisperFeatureExtractor)
15823
+ /* harmony export */ SnacFeatureExtractor: () => (/* reexport safe */ _snac_feature_extraction_snac_js__WEBPACK_IMPORTED_MODULE_7__.SnacFeatureExtractor),
15824
+ /* harmony export */ SpeechT5FeatureExtractor: () => (/* reexport safe */ _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_8__.SpeechT5FeatureExtractor),
15825
+ /* harmony export */ Wav2Vec2FeatureExtractor: () => (/* reexport safe */ _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_9__.Wav2Vec2FeatureExtractor),
15826
+ /* harmony export */ WeSpeakerFeatureExtractor: () => (/* reexport safe */ _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_10__.WeSpeakerFeatureExtractor),
15827
+ /* harmony export */ WhisperFeatureExtractor: () => (/* reexport safe */ _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_11__.WhisperFeatureExtractor)
15683
15828
  /* harmony export */ });
15684
15829
  /* harmony import */ var _audio_spectrogram_transformer_feature_extraction_audio_spectrogram_transformer_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js */ "./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js");
15685
15830
  /* harmony import */ var _encodec_feature_extraction_encodec_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./encodec/feature_extraction_encodec.js */ "./src/models/encodec/feature_extraction_encodec.js");
@@ -15688,11 +15833,13 @@ __webpack_require__.r(__webpack_exports__);
15688
15833
  /* harmony import */ var _moonshine_feature_extraction_moonshine_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./moonshine/feature_extraction_moonshine.js */ "./src/models/moonshine/feature_extraction_moonshine.js");
15689
15834
  /* harmony import */ var _pyannote_feature_extraction_pyannote_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./pyannote/feature_extraction_pyannote.js */ "./src/models/pyannote/feature_extraction_pyannote.js");
15690
15835
  /* harmony import */ var _seamless_m4t_feature_extraction_seamless_m4t_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./seamless_m4t/feature_extraction_seamless_m4t.js */ "./src/models/seamless_m4t/feature_extraction_seamless_m4t.js");
15691
- /* harmony import */ var _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./speecht5/feature_extraction_speecht5.js */ "./src/models/speecht5/feature_extraction_speecht5.js");
15692
- /* harmony import */ var _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./wav2vec2/feature_extraction_wav2vec2.js */ "./src/models/wav2vec2/feature_extraction_wav2vec2.js");
15693
- /* harmony import */ var _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./wespeaker/feature_extraction_wespeaker.js */ "./src/models/wespeaker/feature_extraction_wespeaker.js");
15694
- /* harmony import */ var _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./whisper/feature_extraction_whisper.js */ "./src/models/whisper/feature_extraction_whisper.js");
15695
- /* harmony import */ var _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ../base/image_processors_utils.js */ "./src/base/image_processors_utils.js");
15836
+ /* harmony import */ var _snac_feature_extraction_snac_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./snac/feature_extraction_snac.js */ "./src/models/snac/feature_extraction_snac.js");
15837
+ /* harmony import */ var _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./speecht5/feature_extraction_speecht5.js */ "./src/models/speecht5/feature_extraction_speecht5.js");
15838
+ /* harmony import */ var _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./wav2vec2/feature_extraction_wav2vec2.js */ "./src/models/wav2vec2/feature_extraction_wav2vec2.js");
15839
+ /* harmony import */ var _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./wespeaker/feature_extraction_wespeaker.js */ "./src/models/wespeaker/feature_extraction_wespeaker.js");
15840
+ /* harmony import */ var _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ./whisper/feature_extraction_whisper.js */ "./src/models/whisper/feature_extraction_whisper.js");
15841
+ /* harmony import */ var _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ../base/image_processors_utils.js */ "./src/base/image_processors_utils.js");
15842
+
15696
15843
 
15697
15844
 
15698
15845
 
@@ -18682,6 +18829,24 @@ __webpack_require__.r(__webpack_exports__);
18682
18829
 
18683
18830
 
18684
18831
 
18832
+ /***/ }),
18833
+
18834
+ /***/ "./src/models/snac/feature_extraction_snac.js":
18835
+ /*!****************************************************!*\
18836
+ !*** ./src/models/snac/feature_extraction_snac.js ***!
18837
+ \****************************************************/
18838
+ /***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => {
18839
+
18840
+ __webpack_require__.r(__webpack_exports__);
18841
+ /* harmony export */ __webpack_require__.d(__webpack_exports__, {
18842
+ /* harmony export */ SnacFeatureExtractor: () => (/* binding */ SnacFeatureExtractor)
18843
+ /* harmony export */ });
18844
+ /* harmony import */ var _dac_feature_extraction_dac_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../dac/feature_extraction_dac.js */ "./src/models/dac/feature_extraction_dac.js");
18845
+
18846
+
18847
+ class SnacFeatureExtractor extends _dac_feature_extraction_dac_js__WEBPACK_IMPORTED_MODULE_0__.DacFeatureExtractor { }
18848
+
18849
+
18685
18850
  /***/ }),
18686
18851
 
18687
18852
  /***/ "./src/models/speecht5/feature_extraction_speecht5.js":
@@ -19921,16 +20086,16 @@ __webpack_require__.r(__webpack_exports__);
19921
20086
  /* harmony import */ var _utils_image_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./utils/image.js */ "./src/utils/image.js");
19922
20087
  /**
19923
20088
  * @file Pipelines provide a high-level, easy to use, API for running machine learning models.
19924
- *
20089
+ *
19925
20090
  * **Example:** Instantiate pipeline using the `pipeline` function.
19926
20091
  * ```javascript
19927
20092
  * import { pipeline } from '@huggingface/transformers';
19928
- *
20093
+ *
19929
20094
  * const classifier = await pipeline('sentiment-analysis');
19930
20095
  * const output = await classifier('I love transformers!');
19931
20096
  * // [{'label': 'POSITIVE', 'score': 0.999817686}]
19932
20097
  * ```
19933
- *
20098
+ *
19934
20099
  * @module pipelines
19935
20100
  */
19936
20101
 
@@ -19949,7 +20114,7 @@ __webpack_require__.r(__webpack_exports__);
19949
20114
 
19950
20115
 
19951
20116
  /**
19952
- * @typedef {string | RawImage | URL} ImageInput
20117
+ * @typedef {string | RawImage | URL | Blob | HTMLCanvasElement | OffscreenCanvas} ImageInput
19953
20118
  * @typedef {ImageInput|ImageInput[]} ImagePipelineInputs
19954
20119
  */
19955
20120
 
@@ -20023,7 +20188,7 @@ function get_bounding_box(box, asInteger) {
20023
20188
  /**
20024
20189
  * @callback DisposeType Disposes the item.
20025
20190
  * @returns {Promise<void>} A promise that resolves when the item has been disposed.
20026
- *
20191
+ *
20027
20192
  * @typedef {Object} Disposable
20028
20193
  * @property {DisposeType} dispose A promise that resolves when the pipeline has been disposed.
20029
20194
  */
@@ -20060,7 +20225,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20060
20225
  * @property {string} task The task of the pipeline. Useful for specifying subtasks.
20061
20226
  * @property {PreTrainedModel} model The model used by the pipeline.
20062
20227
  * @property {PreTrainedTokenizer} tokenizer The tokenizer used by the pipeline.
20063
- *
20228
+ *
20064
20229
  * @typedef {ModelTokenizerConstructorArgs} TextPipelineConstructorArgs An object used to instantiate a text-based pipeline.
20065
20230
  */
20066
20231
 
@@ -20069,7 +20234,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20069
20234
  * @property {string} task The task of the pipeline. Useful for specifying subtasks.
20070
20235
  * @property {PreTrainedModel} model The model used by the pipeline.
20071
20236
  * @property {Processor} processor The processor used by the pipeline.
20072
- *
20237
+ *
20073
20238
  * @typedef {ModelProcessorConstructorArgs} AudioPipelineConstructorArgs An object used to instantiate an audio-based pipeline.
20074
20239
  * @typedef {ModelProcessorConstructorArgs} ImagePipelineConstructorArgs An object used to instantiate an image-based pipeline.
20075
20240
  */
@@ -20081,7 +20246,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20081
20246
  * @property {PreTrainedModel} model The model used by the pipeline.
20082
20247
  * @property {PreTrainedTokenizer} tokenizer The tokenizer used by the pipeline.
20083
20248
  * @property {Processor} processor The processor used by the pipeline.
20084
- *
20249
+ *
20085
20250
  * @typedef {ModelTokenizerProcessorConstructorArgs} TextAudioPipelineConstructorArgs An object used to instantiate a text- and audio-based pipeline.
20086
20251
  * @typedef {ModelTokenizerProcessorConstructorArgs} TextImagePipelineConstructorArgs An object used to instantiate a text- and image-based pipeline.
20087
20252
  */
@@ -20091,15 +20256,15 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20091
20256
  * @property {string} label The label predicted.
20092
20257
  * @property {number} score The corresponding probability.
20093
20258
  * @typedef {TextClassificationSingle[]} TextClassificationOutput
20094
- *
20259
+ *
20095
20260
  * @typedef {Object} TextClassificationPipelineOptions Parameters specific to text classification pipelines.
20096
20261
  * @property {number} [top_k=1] The number of top predictions to be returned.
20097
- *
20262
+ *
20098
20263
  * @callback TextClassificationPipelineCallback Classify the text(s) given as inputs.
20099
20264
  * @param {string|string[]} texts The input text(s) to be classified.
20100
20265
  * @param {TextClassificationPipelineOptions} [options] The options to use for text classification.
20101
20266
  * @returns {Promise<TextClassificationOutput|TextClassificationOutput[]>} An array or object containing the predicted labels and scores.
20102
- *
20267
+ *
20103
20268
  * @typedef {TextPipelineConstructorArgs & TextClassificationPipelineCallback & Disposable} TextClassificationPipelineType
20104
20269
  */
20105
20270
 
@@ -20112,7 +20277,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20112
20277
  * const output = await classifier('I love transformers!');
20113
20278
  * // [{ label: 'POSITIVE', score: 0.999788761138916 }]
20114
20279
  * ```
20115
- *
20280
+ *
20116
20281
  * **Example:** Multilingual sentiment-analysis w/ `Xenova/bert-base-multilingual-uncased-sentiment` (and return top 5 classes).
20117
20282
  * ```javascript
20118
20283
  * const classifier = await pipeline('sentiment-analysis', 'Xenova/bert-base-multilingual-uncased-sentiment');
@@ -20125,7 +20290,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20125
20290
  * // { label: '2 stars', score: 0.0009423971059732139 }
20126
20291
  * // ]
20127
20292
  * ```
20128
- *
20293
+ *
20129
20294
  * **Example:** Toxic comment classification w/ `Xenova/toxic-bert` (and return all classes).
20130
20295
  * ```javascript
20131
20296
  * const classifier = await pipeline('text-classification', 'Xenova/toxic-bert');
@@ -20210,21 +20375,21 @@ class TextClassificationPipeline extends (/** @type {new (options: TextPipelineC
20210
20375
  * @property {number} [start] The index of the start of the corresponding entity in the sentence.
20211
20376
  * @property {number} [end] The index of the end of the corresponding entity in the sentence.
20212
20377
  * @typedef {TokenClassificationSingle[]} TokenClassificationOutput
20213
- *
20378
+ *
20214
20379
  * @typedef {Object} TokenClassificationPipelineOptions Parameters specific to token classification pipelines.
20215
20380
  * @property {string[]} [ignore_labels] A list of labels to ignore.
20216
- *
20381
+ *
20217
20382
  * @callback TokenClassificationPipelineCallback Classify each token of the text(s) given as inputs.
20218
20383
  * @param {string|string[]} texts One or several texts (or one list of texts) for token classification.
20219
20384
  * @param {TokenClassificationPipelineOptions} [options] The options to use for token classification.
20220
20385
  * @returns {Promise<TokenClassificationOutput|TokenClassificationOutput[]>} The result.
20221
- *
20386
+ *
20222
20387
  * @typedef {TextPipelineConstructorArgs & TokenClassificationPipelineCallback & Disposable} TokenClassificationPipelineType
20223
20388
  */
20224
20389
 
20225
20390
  /**
20226
20391
  * Named Entity Recognition pipeline using any `ModelForTokenClassification`.
20227
- *
20392
+ *
20228
20393
  * **Example:** Perform named entity recognition with `Xenova/bert-base-NER`.
20229
20394
  * ```javascript
20230
20395
  * const classifier = await pipeline('token-classification', 'Xenova/bert-base-NER');
@@ -20234,7 +20399,7 @@ class TextClassificationPipeline extends (/** @type {new (options: TextPipelineC
20234
20399
  * // { entity: 'B-LOC', score: 0.9994474053382874, index: 9, word: 'London' }
20235
20400
  * // ]
20236
20401
  * ```
20237
- *
20402
+ *
20238
20403
  * **Example:** Perform named entity recognition with `Xenova/bert-base-NER` (and return all labels).
20239
20404
  * ```javascript
20240
20405
  * const classifier = await pipeline('token-classification', 'Xenova/bert-base-NER');
@@ -20330,22 +20495,22 @@ class TokenClassificationPipeline extends (/** @type {new (options: TextPipeline
20330
20495
  * @property {number} [start] The character start index of the answer (in the tokenized version of the input).
20331
20496
  * @property {number} [end] The character end index of the answer (in the tokenized version of the input).
20332
20497
  * @property {string} answer The answer to the question.
20333
- *
20498
+ *
20334
20499
  * @typedef {Object} QuestionAnsweringPipelineOptions Parameters specific to question answering pipelines.
20335
20500
  * @property {number} [top_k=1] The number of top answer predictions to be returned.
20336
- *
20501
+ *
20337
20502
  * @callback QuestionAnsweringPipelineCallback Answer the question(s) given as inputs by using the context(s).
20338
20503
  * @param {string|string[]} question One or several question(s) (must be used in conjunction with the `context` argument).
20339
20504
  * @param {string|string[]} context One or several context(s) associated with the question(s) (must be used in conjunction with the `question` argument).
20340
20505
  * @param {QuestionAnsweringPipelineOptions} [options] The options to use for question answering.
20341
20506
  * @returns {Promise<QuestionAnsweringOutput|QuestionAnsweringOutput[]>} An array or object containing the predicted answers and scores.
20342
- *
20507
+ *
20343
20508
  * @typedef {TextPipelineConstructorArgs & QuestionAnsweringPipelineCallback & Disposable} QuestionAnsweringPipelineType
20344
20509
  */
20345
20510
 
20346
20511
  /**
20347
20512
  * Question Answering pipeline using any `ModelForQuestionAnswering`.
20348
- *
20513
+ *
20349
20514
  * **Example:** Run question answering with `Xenova/distilbert-base-uncased-distilled-squad`.
20350
20515
  * ```javascript
20351
20516
  * const answerer = await pipeline('question-answering', 'Xenova/distilbert-base-uncased-distilled-squad');
@@ -20470,10 +20635,10 @@ class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineCo
20470
20635
  * @property {number} token The predicted token id (to replace the masked one).
20471
20636
  * @property {string} token_str The predicted token (to replace the masked one).
20472
20637
  * @typedef {FillMaskSingle[]} FillMaskOutput
20473
- *
20638
+ *
20474
20639
  * @typedef {Object} FillMaskPipelineOptions Parameters specific to fill mask pipelines.
20475
20640
  * @property {number} [top_k=5] When passed, overrides the number of predictions to return.
20476
- *
20641
+ *
20477
20642
  * @callback FillMaskPipelineCallback Fill the masked token in the text(s) given as inputs.
20478
20643
  * @param {string|string[]} texts One or several texts (or one list of prompts) with masked tokens.
20479
20644
  * @param {FillMaskPipelineOptions} [options] The options to use for masked language modelling.
@@ -20481,13 +20646,13 @@ class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineCo
20481
20646
  * and the sequence with the predicted token filled in, or an array of such arrays (one for each input text).
20482
20647
  * If only one input text is given, the output will be an array of objects.
20483
20648
  * @throws {Error} When the mask token is not found in the input text.
20484
- *
20649
+ *
20485
20650
  * @typedef {TextPipelineConstructorArgs & FillMaskPipelineCallback & Disposable} FillMaskPipelineType
20486
20651
  */
20487
20652
 
20488
20653
  /**
20489
20654
  * Masked language modeling prediction pipeline using any `ModelWithLMHead`.
20490
- *
20655
+ *
20491
20656
  * **Example:** Perform masked language modelling (a.k.a. "fill-mask") with `Xenova/bert-base-uncased`.
20492
20657
  * ```javascript
20493
20658
  * const unmasker = await pipeline('fill-mask', 'Xenova/bert-base-cased');
@@ -20500,7 +20665,7 @@ class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineCo
20500
20665
  * // { token_str: 'life', score: 0.01859794743359089, token: 1297, sequence: 'The goal of life is life.' }
20501
20666
  * // ]
20502
20667
  * ```
20503
- *
20668
+ *
20504
20669
  * **Example:** Perform masked language modelling (a.k.a. "fill-mask") with `Xenova/bert-base-cased` (and return top result).
20505
20670
  * ```javascript
20506
20671
  * const unmasker = await pipeline('fill-mask', 'Xenova/bert-base-cased');
@@ -20577,18 +20742,18 @@ class FillMaskPipeline extends (/** @type {new (options: TextPipelineConstructor
20577
20742
  * @typedef {Object} Text2TextGenerationSingle
20578
20743
  * @property {string} generated_text The generated text.
20579
20744
  * @typedef {Text2TextGenerationSingle[]} Text2TextGenerationOutput
20580
- *
20745
+ *
20581
20746
  * @callback Text2TextGenerationPipelineCallback Generate the output text(s) using text(s) given as inputs.
20582
20747
  * @param {string|string[]} texts Input text for the encoder.
20583
20748
  * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
20584
20749
  * @returns {Promise<Text2TextGenerationOutput|Text2TextGenerationOutput[]>}
20585
- *
20750
+ *
20586
20751
  * @typedef {TextPipelineConstructorArgs & Text2TextGenerationPipelineCallback & Disposable} Text2TextGenerationPipelineType
20587
20752
  */
20588
20753
 
20589
20754
  /**
20590
20755
  * Text2TextGenerationPipeline class for generating text using a model that performs text-to-text generation tasks.
20591
- *
20756
+ *
20592
20757
  * **Example:** Text-to-text generation w/ `Xenova/LaMini-Flan-T5-783M`.
20593
20758
  * ```javascript
20594
20759
  * const generator = await pipeline('text2text-generation', 'Xenova/LaMini-Flan-T5-783M');
@@ -20664,18 +20829,18 @@ class Text2TextGenerationPipeline extends (/** @type {new (options: TextPipeline
20664
20829
  * @typedef {Object} SummarizationSingle
20665
20830
  * @property {string} summary_text The summary text.
20666
20831
  * @typedef {SummarizationSingle[]} SummarizationOutput
20667
- *
20832
+ *
20668
20833
  * @callback SummarizationPipelineCallback Summarize the text(s) given as inputs.
20669
20834
  * @param {string|string[]} texts One or several articles (or one list of articles) to summarize.
20670
20835
  * @param {import('./generation/configuration_utils.js').GenerationConfig} [options] Additional keyword arguments to pass along to the generate method of the model.
20671
20836
  * @returns {Promise<SummarizationOutput|SummarizationOutput[]>}
20672
- *
20837
+ *
20673
20838
  * @typedef {TextPipelineConstructorArgs & SummarizationPipelineCallback & Disposable} SummarizationPipelineType
20674
20839
  */
20675
20840
 
20676
20841
  /**
20677
20842
  * A pipeline for summarization tasks, inheriting from Text2TextGenerationPipeline.
20678
- *
20843
+ *
20679
20844
  * **Example:** Summarization w/ `Xenova/distilbart-cnn-6-6`.
20680
20845
  * ```javascript
20681
20846
  * const generator = await pipeline('summarization', 'Xenova/distilbart-cnn-6-6');
@@ -20711,23 +20876,23 @@ class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstr
20711
20876
  * @typedef {Object} TranslationSingle
20712
20877
  * @property {string} translation_text The translated text.
20713
20878
  * @typedef {TranslationSingle[]} TranslationOutput
20714
- *
20879
+ *
20715
20880
  * @callback TranslationPipelineCallback Translate the text(s) given as inputs.
20716
20881
  * @param {string|string[]} texts Texts to be translated.
20717
20882
  * @param {import('./generation/configuration_utils.js').GenerationConfig} [options] Additional keyword arguments to pass along to the generate method of the model.
20718
20883
  * @returns {Promise<TranslationOutput|TranslationOutput[]>}
20719
- *
20884
+ *
20720
20885
  * @typedef {TextPipelineConstructorArgs & TranslationPipelineCallback & Disposable} TranslationPipelineType
20721
20886
  */
20722
20887
 
20723
20888
  /**
20724
20889
  * Translates text from one language to another.
20725
- *
20890
+ *
20726
20891
  * **Example:** Multilingual translation w/ `Xenova/nllb-200-distilled-600M`.
20727
- *
20892
+ *
20728
20893
  * See [here](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200)
20729
20894
  * for the full list of languages and their corresponding codes.
20730
- *
20895
+ *
20731
20896
  * ```javascript
20732
20897
  * const translator = await pipeline('translation', 'Xenova/nllb-200-distilled-600M');
20733
20898
  * const output = await translator('जीवन एक चॉकलेट बॉक्स की तरह है।', {
@@ -20736,12 +20901,12 @@ class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstr
20736
20901
  * });
20737
20902
  * // [{ translation_text: 'La vie est comme une boîte à chocolat.' }]
20738
20903
  * ```
20739
- *
20904
+ *
20740
20905
  * **Example:** Multilingual translation w/ `Xenova/m2m100_418M`.
20741
- *
20906
+ *
20742
20907
  * See [here](https://huggingface.co/facebook/m2m100_418M#languages-covered)
20743
20908
  * for the full list of languages and their corresponding codes.
20744
- *
20909
+ *
20745
20910
  * ```javascript
20746
20911
  * const translator = await pipeline('translation', 'Xenova/m2m100_418M');
20747
20912
  * const output = await translator('生活就像一盒巧克力。', {
@@ -20750,12 +20915,12 @@ class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstr
20750
20915
  * });
20751
20916
  * // [{ translation_text: 'Life is like a box of chocolate.' }]
20752
20917
  * ```
20753
- *
20918
+ *
20754
20919
  * **Example:** Multilingual translation w/ `Xenova/mbart-large-50-many-to-many-mmt`.
20755
- *
20920
+ *
20756
20921
  * See [here](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt#languages-covered)
20757
20922
  * for the full list of languages and their corresponding codes.
20758
- *
20923
+ *
20759
20924
  * ```javascript
20760
20925
  * const translator = await pipeline('translation', 'Xenova/mbart-large-50-many-to-many-mmt');
20761
20926
  * const output = await translator('संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है', {
@@ -20784,21 +20949,21 @@ function isChat(x) {
20784
20949
 
20785
20950
  /**
20786
20951
  * @typedef {import('./tokenizers.js').Message[]} Chat
20787
- *
20952
+ *
20788
20953
  * @typedef {Object} TextGenerationSingle
20789
20954
  * @property {string|Chat} generated_text The generated text.
20790
20955
  * @typedef {TextGenerationSingle[]} TextGenerationOutput
20791
- *
20956
+ *
20792
20957
  * @typedef {Object} TextGenerationSpecificParams Parameters specific to text-generation pipelines.
20793
20958
  * @property {boolean} [add_special_tokens] Whether or not to add special tokens when tokenizing the sequences.
20794
20959
  * @property {boolean} [return_full_text=true] If set to `false` only added text is returned, otherwise the full text is returned.
20795
20960
  * @typedef {import('./generation/configuration_utils.js').GenerationConfig & TextGenerationSpecificParams} TextGenerationConfig
20796
- *
20961
+ *
20797
20962
  * @callback TextGenerationPipelineCallback Complete the prompt(s) given as inputs.
20798
20963
  * @param {string|string[]|Chat|Chat[]} texts One or several prompts (or one list of prompts) to complete.
20799
20964
  * @param {Partial<TextGenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
20800
20965
  * @returns {Promise<TextGenerationOutput|TextGenerationOutput[]>} An array or object containing the generated texts.
20801
- *
20966
+ *
20802
20967
  * @typedef {TextPipelineConstructorArgs & TextGenerationPipelineCallback & Disposable} TextGenerationPipelineType
20803
20968
  */
20804
20969
 
@@ -20806,7 +20971,7 @@ function isChat(x) {
20806
20971
  * Language generation pipeline using any `ModelWithLMHead` or `ModelForCausalLM`.
20807
20972
  * This pipeline predicts the words that will follow a specified text prompt.
20808
20973
  * NOTE: For the full list of generation parameters, see [`GenerationConfig`](./utils/generation#module_utils/generation.GenerationConfig).
20809
- *
20974
+ *
20810
20975
  * **Example:** Text generation with `Xenova/distilgpt2` (default settings).
20811
20976
  * ```javascript
20812
20977
  * const generator = await pipeline('text-generation', 'Xenova/distilgpt2');
@@ -20814,7 +20979,7 @@ function isChat(x) {
20814
20979
  * const output = await generator(text);
20815
20980
  * // [{ generated_text: "I enjoy walking with my cute dog, and I love to play with the other dogs." }]
20816
20981
  * ```
20817
- *
20982
+ *
20818
20983
  * **Example:** Text generation with `Xenova/distilgpt2` (custom settings).
20819
20984
  * ```javascript
20820
20985
  * const generator = await pipeline('text-generation', 'Xenova/distilgpt2');
@@ -20833,7 +20998,7 @@ function isChat(x) {
20833
20998
  * // "generated_text": "Once upon a time, there was an abundance of information about the most important and influential"
20834
20999
  * // }]
20835
21000
  * ```
20836
- *
21001
+ *
20837
21002
  * **Example:** Run code generation with `Xenova/codegen-350M-mono`.
20838
21003
  * ```javascript
20839
21004
  * const generator = await pipeline('text-generation', 'Xenova/codegen-350M-mono');
@@ -20952,7 +21117,7 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20952
21117
  * @property {string} sequence The sequence for which this is the output.
20953
21118
  * @property {string[]} labels The labels sorted by order of likelihood.
20954
21119
  * @property {number[]} scores The probabilities for each of the labels.
20955
- *
21120
+ *
20956
21121
  * @typedef {Object} ZeroShotClassificationPipelineOptions Parameters specific to zero-shot classification pipelines.
20957
21122
  * @property {string} [hypothesis_template="This example is {}."] The template used to turn each
20958
21123
  * candidate label into an NLI-style hypothesis. The candidate label will replace the {} placeholder.
@@ -20960,14 +21125,14 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20960
21125
  * If `false`, the scores are normalized such that the sum of the label likelihoods for each sequence
20961
21126
  * is 1. If `true`, the labels are considered independent and probabilities are normalized for each
20962
21127
  * candidate by doing a softmax of the entailment score vs. the contradiction score.
20963
- *
21128
+ *
20964
21129
  * @callback ZeroShotClassificationPipelineCallback Classify the sequence(s) given as inputs.
20965
21130
  * @param {string|string[]} texts The sequence(s) to classify, will be truncated if the model input is too large.
20966
21131
  * @param {string|string[]} candidate_labels The set of possible class labels to classify each sequence into.
20967
21132
  * Can be a single label, a string of comma-separated labels, or a list of labels.
20968
21133
  * @param {ZeroShotClassificationPipelineOptions} [options] The options to use for zero-shot classification.
20969
21134
  * @returns {Promise<ZeroShotClassificationOutput|ZeroShotClassificationOutput[]>} An array or object containing the predicted labels and scores.
20970
- *
21135
+ *
20971
21136
  * @typedef {TextPipelineConstructorArgs & ZeroShotClassificationPipelineCallback & Disposable} ZeroShotClassificationPipelineType
20972
21137
  */
20973
21138
 
@@ -20976,7 +21141,7 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20976
21141
  * trained on NLI (natural language inference) tasks. Equivalent of `text-classification`
20977
21142
  * pipelines, but these models don't require a hardcoded number of potential classes, they
20978
21143
  * can be chosen at runtime. It usually means it's slower but it is **much** more flexible.
20979
- *
21144
+ *
20980
21145
  * **Example:** Zero shot classification with `Xenova/mobilebert-uncased-mnli`.
20981
21146
  * ```javascript
20982
21147
  * const classifier = await pipeline('zero-shot-classification', 'Xenova/mobilebert-uncased-mnli');
@@ -20989,7 +21154,7 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20989
21154
  * // scores: [ 0.5562091040482018, 0.1843621307860853, 0.13942646639336376, 0.12000229877234923 ]
20990
21155
  * // }
20991
21156
  * ```
20992
- *
21157
+ *
20993
21158
  * **Example:** Zero shot classification with `Xenova/nli-deberta-v3-xsmall` (multi-label).
20994
21159
  * ```javascript
20995
21160
  * const classifier = await pipeline('zero-shot-classification', 'Xenova/nli-deberta-v3-xsmall');
@@ -21103,20 +21268,20 @@ class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipel
21103
21268
  * @property {'none'|'mean'|'cls'} [pooling="none"] The pooling method to use.
21104
21269
  * @property {boolean} [normalize=false] Whether or not to normalize the embeddings in the last dimension.
21105
21270
  * @property {boolean} [quantize=false] Whether or not to quantize the embeddings.
21106
- * @property {'binary'|'ubinary'} [precision='binary'] The precision to use for quantization.
21107
- *
21271
+ * @property {'binary'|'ubinary'} [precision='binary'] The precision to use for quantization.
21272
+ *
21108
21273
  * @callback FeatureExtractionPipelineCallback Extract the features of the input(s).
21109
21274
  * @param {string|string[]} texts One or several texts (or one list of texts) to get the features of.
21110
21275
  * @param {FeatureExtractionPipelineOptions} [options] The options to use for feature extraction.
21111
21276
  * @returns {Promise<Tensor>} The features computed by the model.
21112
- *
21277
+ *
21113
21278
  * @typedef {TextPipelineConstructorArgs & FeatureExtractionPipelineCallback & Disposable} FeatureExtractionPipelineType
21114
21279
  */
21115
21280
 
21116
21281
  /**
21117
21282
  * Feature extraction pipeline using no model head. This pipeline extracts the hidden
21118
21283
  * states from the base transformer, which can be used as features in downstream tasks.
21119
- *
21284
+ *
21120
21285
  * **Example:** Run feature extraction with `bert-base-uncased` (without pooling/normalization).
21121
21286
  * ```javascript
21122
21287
  * const extractor = await pipeline('feature-extraction', 'Xenova/bert-base-uncased', { revision: 'default' });
@@ -21127,7 +21292,7 @@ class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipel
21127
21292
  * // dims: [1, 8, 768]
21128
21293
  * // }
21129
21294
  * ```
21130
- *
21295
+ *
21131
21296
  * **Example:** Run feature extraction with `bert-base-uncased` (with pooling/normalization).
21132
21297
  * ```javascript
21133
21298
  * const extractor = await pipeline('feature-extraction', 'Xenova/bert-base-uncased', { revision: 'default' });
@@ -21138,7 +21303,7 @@ class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipel
21138
21303
  * // dims: [1, 768]
21139
21304
  * // }
21140
21305
  * ```
21141
- *
21306
+ *
21142
21307
  * **Example:** Calculating embeddings with `sentence-transformers` models.
21143
21308
  * ```javascript
21144
21309
  * const extractor = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
@@ -21219,19 +21384,19 @@ class FeatureExtractionPipeline extends (/** @type {new (options: TextPipelineCo
21219
21384
  /**
21220
21385
  * @typedef {Object} ImageFeatureExtractionPipelineOptions Parameters specific to image feature extraction pipelines.
21221
21386
  * @property {boolean} [pool=null] Whether or not to return the pooled output. If set to `false`, the model will return the raw hidden states.
21222
- *
21387
+ *
21223
21388
  * @callback ImageFeatureExtractionPipelineCallback Extract the features of the input(s).
21224
21389
  * @param {ImagePipelineInputs} images One or several images (or one list of images) to get the features of.
21225
21390
  * @param {ImageFeatureExtractionPipelineOptions} [options] The options to use for image feature extraction.
21226
21391
  * @returns {Promise<Tensor>} The image features computed by the model.
21227
- *
21392
+ *
21228
21393
  * @typedef {ImagePipelineConstructorArgs & ImageFeatureExtractionPipelineCallback & Disposable} ImageFeatureExtractionPipelineType
21229
21394
  */
21230
21395
 
21231
21396
  /**
21232
21397
  * Image feature extraction pipeline using no model head. This pipeline extracts the hidden
21233
21398
  * states from the base transformer, which can be used as features in downstream tasks.
21234
- *
21399
+ *
21235
21400
  * **Example:** Perform image feature extraction with `Xenova/vit-base-patch16-224-in21k`.
21236
21401
  * ```javascript
21237
21402
  * const image_feature_extractor = await pipeline('image-feature-extraction', 'Xenova/vit-base-patch16-224-in21k');
@@ -21244,7 +21409,7 @@ class FeatureExtractionPipeline extends (/** @type {new (options: TextPipelineCo
21244
21409
  * // size: 151296
21245
21410
  * // }
21246
21411
  * ```
21247
- *
21412
+ *
21248
21413
  * **Example:** Compute image embeddings with `Xenova/clip-vit-base-patch32`.
21249
21414
  * ```javascript
21250
21415
  * const image_feature_extractor = await pipeline('image-feature-extraction', 'Xenova/clip-vit-base-patch32');
@@ -21300,12 +21465,12 @@ class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipe
21300
21465
  * @property {string} label The label predicted.
21301
21466
  * @property {number} score The corresponding probability.
21302
21467
  * @typedef {AudioClassificationSingle[]} AudioClassificationOutput
21303
- *
21468
+ *
21304
21469
  * @typedef {Object} AudioClassificationPipelineOptions Parameters specific to audio classification pipelines.
21305
21470
  * @property {number} [top_k=5] The number of top labels that will be returned by the pipeline.
21306
21471
  * If the provided number is `null` or higher than the number of labels available in the model configuration,
21307
21472
  * it will default to the number of labels.
21308
- *
21473
+ *
21309
21474
  * @callback AudioClassificationPipelineCallback Classify the sequence(s) given as inputs.
21310
21475
  * @param {AudioPipelineInputs} audio The input audio file(s) to be classified. The input is either:
21311
21476
  * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate
@@ -21314,14 +21479,14 @@ class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipe
21314
21479
  * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done).
21315
21480
  * @param {AudioClassificationPipelineOptions} [options] The options to use for audio classification.
21316
21481
  * @returns {Promise<AudioClassificationOutput|AudioClassificationOutput[]>} An array or object containing the predicted labels and scores.
21317
- *
21482
+ *
21318
21483
  * @typedef {AudioPipelineConstructorArgs & AudioClassificationPipelineCallback & Disposable} AudioClassificationPipelineType
21319
21484
  */
21320
21485
 
21321
21486
  /**
21322
21487
  * Audio classification pipeline using any `AutoModelForAudioClassification`.
21323
21488
  * This pipeline predicts the class of a raw waveform or an audio file.
21324
- *
21489
+ *
21325
21490
  * **Example:** Perform audio classification with `Xenova/wav2vec2-large-xlsr-53-gender-recognition-librispeech`.
21326
21491
  * ```javascript
21327
21492
  * const classifier = await pipeline('audio-classification', 'Xenova/wav2vec2-large-xlsr-53-gender-recognition-librispeech');
@@ -21332,7 +21497,7 @@ class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipe
21332
21497
  * // { label: 'female', score: 0.001845747814513743 }
21333
21498
  * // ]
21334
21499
  * ```
21335
- *
21500
+ *
21336
21501
  * **Example:** Perform audio classification with `Xenova/ast-finetuned-audioset-10-10-0.4593` and return top 4 results.
21337
21502
  * ```javascript
21338
21503
  * const classifier = await pipeline('audio-classification', 'Xenova/ast-finetuned-audioset-10-10-0.4593');
@@ -21397,12 +21562,12 @@ class AudioClassificationPipeline extends (/** @type {new (options: AudioPipelin
21397
21562
  * @typedef {Object} ZeroShotAudioClassificationOutput
21398
21563
  * @property {string} label The label identified by the model. It is one of the suggested `candidate_label`.
21399
21564
  * @property {number} score The score attributed by the model for that label (between 0 and 1).
21400
- *
21565
+ *
21401
21566
  * @typedef {Object} ZeroShotAudioClassificationPipelineOptions Parameters specific to zero-shot audio classification pipelines.
21402
21567
  * @property {string} [hypothesis_template="This is a sound of {}."] The sentence used in conjunction with `candidate_labels`
21403
21568
  * to attempt the audio classification by replacing the placeholder with the candidate_labels.
21404
21569
  * Then likelihood is estimated by using `logits_per_audio`.
21405
- *
21570
+ *
21406
21571
  * @callback ZeroShotAudioClassificationPipelineCallback Classify the sequence(s) given as inputs.
21407
21572
  * @param {AudioPipelineInputs} audio The input audio file(s) to be classified. The input is either:
21408
21573
  * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate
@@ -21412,14 +21577,14 @@ class AudioClassificationPipeline extends (/** @type {new (options: AudioPipelin
21412
21577
  * @param {string[]} candidate_labels The candidate labels for this audio.
21413
21578
  * @param {ZeroShotAudioClassificationPipelineOptions} [options] The options to use for zero-shot audio classification.
21414
21579
  * @returns {Promise<ZeroShotAudioClassificationOutput[]|ZeroShotAudioClassificationOutput[][]>} An array of objects containing the predicted labels and scores.
21415
- *
21580
+ *
21416
21581
  * @typedef {TextAudioPipelineConstructorArgs & ZeroShotAudioClassificationPipelineCallback & Disposable} ZeroShotAudioClassificationPipelineType
21417
21582
  */
21418
21583
 
21419
21584
  /**
21420
21585
  * Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you
21421
21586
  * provide an audio and a set of `candidate_labels`.
21422
- *
21587
+ *
21423
21588
  * **Example**: Perform zero-shot audio classification with `Xenova/clap-htsat-unfused`.
21424
21589
  * ```javascript
21425
21590
  * const classifier = await pipeline('zero-shot-audio-classification', 'Xenova/clap-htsat-unfused');
@@ -21452,7 +21617,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21452
21617
  audio = [/** @type {AudioInput} */ (audio)];
21453
21618
  }
21454
21619
 
21455
- // Insert label into hypothesis template
21620
+ // Insert label into hypothesis template
21456
21621
  const texts = candidate_labels.map(
21457
21622
  x => hypothesis_template.replace('{}', x)
21458
21623
  );
@@ -21496,7 +21661,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21496
21661
  * @property {string} text The recognized text.
21497
21662
  * @property {Chunk[]} [chunks] When using `return_timestamps`, the `chunks` will become a list
21498
21663
  * containing all the various text chunks identified by the model.
21499
- *
21664
+ *
21500
21665
  * @typedef {Object} AutomaticSpeechRecognitionSpecificParams Parameters specific to automatic-speech-recognition pipelines.
21501
21666
  * @property {boolean|'word'} [return_timestamps] Whether to return timestamps or not. Default is `false`.
21502
21667
  * @property {number} [chunk_length_s] The length of audio chunks to process in seconds. Default is 0 (no chunking).
@@ -21506,7 +21671,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21506
21671
  * @property {string} [task] The task to perform. Default is `null`, meaning it should be auto-detected.
21507
21672
  * @property {number} [num_frames] The number of frames in the input audio.
21508
21673
  * @typedef {import('./generation/configuration_utils.js').GenerationConfig & AutomaticSpeechRecognitionSpecificParams} AutomaticSpeechRecognitionConfig
21509
- *
21674
+ *
21510
21675
  * @callback AutomaticSpeechRecognitionPipelineCallback Transcribe the audio sequence(s) given as inputs to text.
21511
21676
  * @param {AudioPipelineInputs} audio The input audio file(s) to be transcribed. The input is either:
21512
21677
  * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate
@@ -21515,7 +21680,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21515
21680
  * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done).
21516
21681
  * @param {Partial<AutomaticSpeechRecognitionConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
21517
21682
  * @returns {Promise<AutomaticSpeechRecognitionOutput|AutomaticSpeechRecognitionOutput[]>} An object containing the transcription text and optionally timestamps if `return_timestamps` is `true`.
21518
- *
21683
+ *
21519
21684
  * @typedef {TextAudioPipelineConstructorArgs & AutomaticSpeechRecognitionPipelineCallback & Disposable} AutomaticSpeechRecognitionPipelineType
21520
21685
  */
21521
21686
 
@@ -21529,7 +21694,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21529
21694
  * const output = await transcriber(url);
21530
21695
  * // { text: " And so my fellow Americans ask not what your country can do for you, ask what you can do for your country." }
21531
21696
  * ```
21532
- *
21697
+ *
21533
21698
  * **Example:** Transcribe English w/ timestamps.
21534
21699
  * ```javascript
21535
21700
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
@@ -21543,7 +21708,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21543
21708
  * // ]
21544
21709
  * // }
21545
21710
  * ```
21546
- *
21711
+ *
21547
21712
  * **Example:** Transcribe English w/ word-level timestamps.
21548
21713
  * ```javascript
21549
21714
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
@@ -21562,7 +21727,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21562
21727
  * // ]
21563
21728
  * // }
21564
21729
  * ```
21565
- *
21730
+ *
21566
21731
  * **Example:** Transcribe French.
21567
21732
  * ```javascript
21568
21733
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small');
@@ -21570,7 +21735,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21570
21735
  * const output = await transcriber(url, { language: 'french', task: 'transcribe' });
21571
21736
  * // { text: " J'adore, j'aime, je n'aime pas, je déteste." }
21572
21737
  * ```
21573
- *
21738
+ *
21574
21739
  * **Example:** Translate French to English.
21575
21740
  * ```javascript
21576
21741
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small');
@@ -21578,7 +21743,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21578
21743
  * const output = await transcriber(url, { language: 'french', task: 'translate' });
21579
21744
  * // { text: " I love, I like, I don't like, I hate." }
21580
21745
  * ```
21581
- *
21746
+ *
21582
21747
  * **Example:** Transcribe/translate audio longer than 30 seconds.
21583
21748
  * ```javascript
21584
21749
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
@@ -21801,18 +21966,18 @@ class AutomaticSpeechRecognitionPipeline extends (/** @type {new (options: TextA
21801
21966
  * @typedef {Object} ImageToTextSingle
21802
21967
  * @property {string} generated_text The generated text.
21803
21968
  * @typedef {ImageToTextSingle[]} ImageToTextOutput
21804
- *
21969
+ *
21805
21970
  * @callback ImageToTextPipelineCallback Assign labels to the image(s) passed as inputs.
21806
21971
  * @param {ImagePipelineInputs} texts The images to be captioned.
21807
21972
  * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
21808
21973
  * @returns {Promise<ImageToTextOutput|ImageToTextOutput[]>} An object (or array of objects) containing the generated text(s).
21809
- *
21974
+ *
21810
21975
  * @typedef {TextImagePipelineConstructorArgs & ImageToTextPipelineCallback & Disposable} ImageToTextPipelineType
21811
21976
  */
21812
21977
 
21813
21978
  /**
21814
21979
  * Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image.
21815
- *
21980
+ *
21816
21981
  * **Example:** Generate a caption for an image w/ `Xenova/vit-gpt2-image-captioning`.
21817
21982
  * ```javascript
21818
21983
  * const captioner = await pipeline('image-to-text', 'Xenova/vit-gpt2-image-captioning');
@@ -21820,7 +21985,7 @@ class AutomaticSpeechRecognitionPipeline extends (/** @type {new (options: TextA
21820
21985
  * const output = await captioner(url);
21821
21986
  * // [{ generated_text: 'a cat laying on a couch with another cat' }]
21822
21987
  * ```
21823
- *
21988
+ *
21824
21989
  * **Example:** Optical Character Recognition (OCR) w/ `Xenova/trocr-small-handwritten`.
21825
21990
  * ```javascript
21826
21991
  * const captioner = await pipeline('image-to-text', 'Xenova/trocr-small-handwritten');
@@ -21866,22 +22031,22 @@ class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineCon
21866
22031
  * @property {string} label The label identified by the model.
21867
22032
  * @property {number} score The score attributed by the model for that label.
21868
22033
  * @typedef {ImageClassificationSingle[]} ImageClassificationOutput
21869
- *
22034
+ *
21870
22035
  * @typedef {Object} ImageClassificationPipelineOptions Parameters specific to image classification pipelines.
21871
- * @property {number} [top_k=1] The number of top labels that will be returned by the pipeline.
21872
- *
22036
+ * @property {number} [top_k=1] The number of top labels that will be returned by the pipeline.
22037
+ *
21873
22038
  * @callback ImageClassificationPipelineCallback Assign labels to the image(s) passed as inputs.
21874
22039
  * @param {ImagePipelineInputs} images The input images(s) to be classified.
21875
22040
  * @param {ImageClassificationPipelineOptions} [options] The options to use for image classification.
21876
22041
  * @returns {Promise<ImageClassificationOutput|ImageClassificationOutput[]>} An array or object containing the predicted labels and scores.
21877
- *
22042
+ *
21878
22043
  * @typedef {ImagePipelineConstructorArgs & ImageClassificationPipelineCallback & Disposable} ImageClassificationPipelineType
21879
22044
  */
21880
22045
 
21881
22046
  /**
21882
22047
  * Image classification pipeline using any `AutoModelForImageClassification`.
21883
22048
  * This pipeline predicts the class of an image.
21884
- *
22049
+ *
21885
22050
  * **Example:** Classify an image.
21886
22051
  * ```javascript
21887
22052
  * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224');
@@ -21891,7 +22056,7 @@ class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineCon
21891
22056
  * // { label: 'tiger, Panthera tigris', score: 0.632695734500885 },
21892
22057
  * // ]
21893
22058
  * ```
21894
- *
22059
+ *
21895
22060
  * **Example:** Classify an image and return top `n` classes.
21896
22061
  * ```javascript
21897
22062
  * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224');
@@ -21903,7 +22068,7 @@ class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineCon
21903
22068
  * // { label: 'lion, king of beasts, Panthera leo', score: 0.00045060308184474707 },
21904
22069
  * // ]
21905
22070
  * ```
21906
- *
22071
+ *
21907
22072
  * **Example:** Classify an image and return all classes.
21908
22073
  * ```javascript
21909
22074
  * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224');
@@ -21970,7 +22135,7 @@ class ImageClassificationPipeline extends (/** @type {new (options: ImagePipelin
21970
22135
  * @property {string|null} label The label of the segment.
21971
22136
  * @property {number|null} score The score of the segment.
21972
22137
  * @property {RawImage} mask The mask of the segment.
21973
- *
22138
+ *
21974
22139
  * @typedef {Object} ImageSegmentationPipelineOptions Parameters specific to image segmentation pipelines.
21975
22140
  * @property {number} [threshold=0.5] Probability threshold to filter out predicted masks.
21976
22141
  * @property {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values.
@@ -21979,19 +22144,19 @@ class ImageClassificationPipeline extends (/** @type {new (options: ImagePipelin
21979
22144
  * depending on model capabilities. If not set, the pipeline will attempt to resolve (in that order).
21980
22145
  * @property {number[]} [label_ids_to_fuse=null] List of label ids to fuse. If not set, do not fuse any labels.
21981
22146
  * @property {number[][]} [target_sizes=null] List of target sizes for the input images. If not set, use the original image sizes.
21982
- *
22147
+ *
21983
22148
  * @callback ImageSegmentationPipelineCallback Segment the input images.
21984
22149
  * @param {ImagePipelineInputs} images The input images.
21985
22150
  * @param {ImageSegmentationPipelineOptions} [options] The options to use for image segmentation.
21986
22151
  * @returns {Promise<ImageSegmentationPipelineOutput[]>} The annotated segments.
21987
- *
22152
+ *
21988
22153
  * @typedef {ImagePipelineConstructorArgs & ImageSegmentationPipelineCallback & Disposable} ImageSegmentationPipelineType
21989
22154
  */
21990
22155
 
21991
22156
  /**
21992
22157
  * Image segmentation pipeline using any `AutoModelForXXXSegmentation`.
21993
22158
  * This pipeline predicts masks of objects and their classes.
21994
- *
22159
+ *
21995
22160
  * **Example:** Perform image segmentation with `Xenova/detr-resnet-50-panoptic`.
21996
22161
  * ```javascript
21997
22162
  * const segmenter = await pipeline('image-segmentation', 'Xenova/detr-resnet-50-panoptic');
@@ -22075,12 +22240,17 @@ class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineC
22075
22240
  /** @type {ImageSegmentationPipelineOutput[]} */
22076
22241
  const annotation = [];
22077
22242
  if (!subtask) {
22243
+ // We define an epsilon to safeguard against numerical/precision issues when detecting
22244
+ // the normalization mode of the output (i.e., sigmoid already applied, or not).
22245
+ // See https://github.com/microsoft/onnxruntime/issues/23943 for more information.
22246
+ const epsilon = 1e-5;
22247
+
22078
22248
  // Perform standard image segmentation
22079
22249
  const result = output[outputNames[0]];
22080
22250
  for (let i = 0; i < imageSizes.length; ++i) {
22081
22251
  const size = imageSizes[i];
22082
22252
  const item = result[i];
22083
- if (item.data.some(x => x < 0 || x > 1)) {
22253
+ if (item.data.some(x => x < -epsilon || x > 1 + epsilon)) {
22084
22254
  item.sigmoid_();
22085
22255
  }
22086
22256
  const mask = await _utils_image_js__WEBPACK_IMPORTED_MODULE_9__.RawImage.fromTensor(item.mul_(255).to('uint8')).resize(size[1], size[0]);
@@ -22149,19 +22319,19 @@ class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineC
22149
22319
 
22150
22320
  /**
22151
22321
  * @typedef {Object} BackgroundRemovalPipelineOptions Parameters specific to image segmentation pipelines.
22152
- *
22322
+ *
22153
22323
  * @callback BackgroundRemovalPipelineCallback Segment the input images.
22154
22324
  * @param {ImagePipelineInputs} images The input images.
22155
22325
  * @param {BackgroundRemovalPipelineOptions} [options] The options to use for image segmentation.
22156
22326
  * @returns {Promise<RawImage[]>} The images with the background removed.
22157
- *
22327
+ *
22158
22328
  * @typedef {ImagePipelineConstructorArgs & BackgroundRemovalPipelineCallback & Disposable} BackgroundRemovalPipelineType
22159
22329
  */
22160
22330
 
22161
22331
  /**
22162
22332
  * Background removal pipeline using certain `AutoModelForXXXSegmentation`.
22163
22333
  * This pipeline removes the backgrounds of images.
22164
- *
22334
+ *
22165
22335
  * **Example:** Perform background removal with `Xenova/modnet`.
22166
22336
  * ```javascript
22167
22337
  * const segmenter = await pipeline('background-removal', 'Xenova/modnet');
@@ -22172,7 +22342,7 @@ class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineC
22172
22342
  * // ]
22173
22343
  * ```
22174
22344
  */
22175
- class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ImageSegmentationPipelineType} */ (ImageSegmentationPipeline)) {
22345
+ class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => BackgroundRemovalPipelineType} */ (/** @type {any} */(ImageSegmentationPipeline))) {
22176
22346
  /**
22177
22347
  * Create a new BackgroundRemovalPipeline.
22178
22348
  * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline.
@@ -22207,25 +22377,25 @@ class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineC
22207
22377
  * @typedef {Object} ZeroShotImageClassificationOutput
22208
22378
  * @property {string} label The label identified by the model. It is one of the suggested `candidate_label`.
22209
22379
  * @property {number} score The score attributed by the model for that label (between 0 and 1).
22210
- *
22380
+ *
22211
22381
  * @typedef {Object} ZeroShotImageClassificationPipelineOptions Parameters specific to zero-shot image classification pipelines.
22212
22382
  * @property {string} [hypothesis_template="This is a photo of {}"] The sentence used in conjunction with `candidate_labels`
22213
22383
  * to attempt the image classification by replacing the placeholder with the candidate_labels.
22214
22384
  * Then likelihood is estimated by using `logits_per_image`.
22215
- *
22385
+ *
22216
22386
  * @callback ZeroShotImageClassificationPipelineCallback Assign labels to the image(s) passed as inputs.
22217
22387
  * @param {ImagePipelineInputs} images The input images.
22218
22388
  * @param {string[]} candidate_labels The candidate labels for this image.
22219
22389
  * @param {ZeroShotImageClassificationPipelineOptions} [options] The options to use for zero-shot image classification.
22220
22390
  * @returns {Promise<ZeroShotImageClassificationOutput[]|ZeroShotImageClassificationOutput[][]>} An array of objects containing the predicted labels and scores.
22221
- *
22391
+ *
22222
22392
  * @typedef {TextImagePipelineConstructorArgs & ZeroShotImageClassificationPipelineCallback & Disposable} ZeroShotImageClassificationPipelineType
22223
22393
  */
22224
22394
 
22225
22395
  /**
22226
22396
  * Zero shot image classification pipeline. This pipeline predicts the class of
22227
22397
  * an image when you provide an image and a set of `candidate_labels`.
22228
- *
22398
+ *
22229
22399
  * **Example:** Zero shot image classification w/ `Xenova/clip-vit-base-patch32`.
22230
22400
  * ```javascript
22231
22401
  * const classifier = await pipeline('zero-shot-image-classification', 'Xenova/clip-vit-base-patch32');
@@ -22255,7 +22425,7 @@ class ZeroShotImageClassificationPipeline extends (/** @type {new (options: Text
22255
22425
  const isBatched = Array.isArray(images);
22256
22426
  const preparedImages = await prepareImages(images);
22257
22427
 
22258
- // Insert label into hypothesis template
22428
+ // Insert label into hypothesis template
22259
22429
  const texts = candidate_labels.map(
22260
22430
  x => hypothesis_template.replace('{}', x)
22261
22431
  );
@@ -22302,23 +22472,23 @@ class ZeroShotImageClassificationPipeline extends (/** @type {new (options: Text
22302
22472
  * @property {number} score The score attributed by the model for that label.
22303
22473
  * @property {BoundingBox} box The bounding box of detected object in image's original size, or as a percentage if `percentage` is set to true.
22304
22474
  * @typedef {ObjectDetectionPipelineSingle[]} ObjectDetectionPipelineOutput
22305
- *
22475
+ *
22306
22476
  * @typedef {Object} ObjectDetectionPipelineOptions Parameters specific to object detection pipelines.
22307
22477
  * @property {number} [threshold=0.9] The threshold used to filter boxes by score.
22308
22478
  * @property {boolean} [percentage=false] Whether to return the boxes coordinates in percentage (true) or in pixels (false).
22309
- *
22479
+ *
22310
22480
  * @callback ObjectDetectionPipelineCallback Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
22311
22481
  * @param {ImagePipelineInputs} images The input images.
22312
22482
  * @param {ObjectDetectionPipelineOptions} [options] The options to use for object detection.
22313
- * @returns {Promise<ObjectDetectionPipelineOutput|ObjectDetectionPipelineOutput[]>} A list of objects or a list of list of objects.
22314
- *
22483
+ * @returns {Promise<ObjectDetectionPipelineOutput|ObjectDetectionPipelineOutput[]>} A list of objects or a list of list of objects.
22484
+ *
22315
22485
  * @typedef {ImagePipelineConstructorArgs & ObjectDetectionPipelineCallback & Disposable} ObjectDetectionPipelineType
22316
22486
  */
22317
22487
 
22318
22488
  /**
22319
22489
  * Object detection pipeline using any `AutoModelForObjectDetection`.
22320
22490
  * This pipeline predicts bounding boxes of objects and their classes.
22321
- *
22491
+ *
22322
22492
  * **Example:** Run object-detection with `Xenova/detr-resnet-50`.
22323
22493
  * ```javascript
22324
22494
  * const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
@@ -22392,27 +22562,27 @@ class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipelineCon
22392
22562
  * @property {string} label Text query corresponding to the found object.
22393
22563
  * @property {number} score Score corresponding to the object (between 0 and 1).
22394
22564
  * @property {BoundingBox} box Bounding box of the detected object in image's original size, or as a percentage if `percentage` is set to true.
22395
- *
22565
+ *
22396
22566
  * @typedef {Object} ZeroShotObjectDetectionPipelineOptions Parameters specific to zero-shot object detection pipelines.
22397
22567
  * @property {number} [threshold=0.1] The probability necessary to make a prediction.
22398
22568
  * @property {number} [top_k=null] The number of top predictions that will be returned by the pipeline.
22399
22569
  * If the provided number is `null` or higher than the number of predictions available, it will default
22400
22570
  * to the number of predictions.
22401
22571
  * @property {boolean} [percentage=false] Whether to return the boxes coordinates in percentage (true) or in pixels (false).
22402
- *
22572
+ *
22403
22573
  * @callback ZeroShotObjectDetectionPipelineCallback Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
22404
22574
  * @param {ImagePipelineInputs} images The input images.
22405
22575
  * @param {string[]} candidate_labels What the model should recognize in the image.
22406
22576
  * @param {ZeroShotObjectDetectionPipelineOptions} [options] The options to use for zero-shot object detection.
22407
22577
  * @returns {Promise<ZeroShotObjectDetectionOutput[]|ZeroShotObjectDetectionOutput[][]>} An array of objects containing the predicted labels, scores, and bounding boxes.
22408
- *
22578
+ *
22409
22579
  * @typedef {TextImagePipelineConstructorArgs & ZeroShotObjectDetectionPipelineCallback & Disposable} ZeroShotObjectDetectionPipelineType
22410
22580
  */
22411
22581
 
22412
22582
  /**
22413
22583
  * Zero-shot object detection pipeline. This pipeline predicts bounding boxes of
22414
22584
  * objects when you provide an image and a set of `candidate_labels`.
22415
- *
22585
+ *
22416
22586
  * **Example:** Zero-shot object detection w/ `Xenova/owlvit-base-patch32`.
22417
22587
  * ```javascript
22418
22588
  * const detector = await pipeline('zero-shot-object-detection', 'Xenova/owlvit-base-patch32');
@@ -22442,7 +22612,7 @@ class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipelineCon
22442
22612
  * // }
22443
22613
  * // ]
22444
22614
  * ```
22445
- *
22615
+ *
22446
22616
  * **Example:** Zero-shot object detection w/ `Xenova/owlvit-base-patch32` (returning top 4 matches and setting a threshold).
22447
22617
  * ```javascript
22448
22618
  * const detector = await pipeline('zero-shot-object-detection', 'Xenova/owlvit-base-patch32');
@@ -22557,13 +22727,13 @@ class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: TextImag
22557
22727
  * @typedef {Object} DocumentQuestionAnsweringSingle
22558
22728
  * @property {string} answer The generated text.
22559
22729
  * @typedef {DocumentQuestionAnsweringSingle[]} DocumentQuestionAnsweringOutput
22560
- *
22730
+ *
22561
22731
  * @callback DocumentQuestionAnsweringPipelineCallback Answer the question given as input by using the document.
22562
22732
  * @param {ImageInput} image The image of the document to use.
22563
22733
  * @param {string} question A question to ask of the document.
22564
22734
  * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
22565
22735
  * @returns {Promise<DocumentQuestionAnsweringOutput|DocumentQuestionAnsweringOutput[]>} An object (or array of objects) containing the answer(s).
22566
- *
22736
+ *
22567
22737
  * @typedef {TextImagePipelineConstructorArgs & DocumentQuestionAnsweringPipelineCallback & Disposable} DocumentQuestionAnsweringPipelineType
22568
22738
  */
22569
22739
 
@@ -22571,7 +22741,7 @@ class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: TextImag
22571
22741
  * Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`.
22572
22742
  * The inputs/outputs are similar to the (extractive) question answering pipeline; however,
22573
22743
  * the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context.
22574
- *
22744
+ *
22575
22745
  * **Example:** Answer questions about a document with `Xenova/donut-base-finetuned-docvqa`.
22576
22746
  * ```javascript
22577
22747
  * const qa_pipeline = await pipeline('document-question-answering', 'Xenova/donut-base-finetuned-docvqa');
@@ -22641,22 +22811,22 @@ class DocumentQuestionAnsweringPipeline extends (/** @type {new (options: TextIm
22641
22811
  * @typedef {Object} TextToAudioOutput
22642
22812
  * @property {Float32Array} audio The generated audio waveform.
22643
22813
  * @property {number} sampling_rate The sampling rate of the generated audio waveform.
22644
- *
22814
+ *
22645
22815
  * @typedef {Object} TextToAudioPipelineOptions Parameters specific to text-to-audio pipelines.
22646
22816
  * @property {Tensor|Float32Array|string|URL} [speaker_embeddings=null] The speaker embeddings (if the model requires it).
22647
- *
22817
+ *
22648
22818
  * @callback TextToAudioPipelineCallback Generates speech/audio from the inputs.
22649
22819
  * @param {string|string[]} texts The text(s) to generate.
22650
22820
  * @param {TextToAudioPipelineOptions} options Parameters passed to the model generation/forward method.
22651
22821
  * @returns {Promise<TextToAudioOutput>} An object containing the generated audio and sampling rate.
22652
- *
22822
+ *
22653
22823
  * @typedef {TextToAudioPipelineConstructorArgs & TextToAudioPipelineCallback & Disposable} TextToAudioPipelineType
22654
22824
  */
22655
22825
 
22656
22826
  /**
22657
22827
  * Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`.
22658
22828
  * This pipeline generates an audio file from an input text and optional other conditional inputs.
22659
- *
22829
+ *
22660
22830
  * **Example:** Generate audio from text with `Xenova/speecht5_tts`.
22661
22831
  * ```javascript
22662
22832
  * const synthesizer = await pipeline('text-to-speech', 'Xenova/speecht5_tts', { quantized: false });
@@ -22667,17 +22837,17 @@ class DocumentQuestionAnsweringPipeline extends (/** @type {new (options: TextIm
22667
22837
  * // sampling_rate: 16000
22668
22838
  * // }
22669
22839
  * ```
22670
- *
22840
+ *
22671
22841
  * You can then save the audio to a .wav file with the `wavefile` package:
22672
22842
  * ```javascript
22673
22843
  * import wavefile from 'wavefile';
22674
22844
  * import fs from 'fs';
22675
- *
22845
+ *
22676
22846
  * const wav = new wavefile.WaveFile();
22677
22847
  * wav.fromScratch(1, out.sampling_rate, '32f', out.audio);
22678
22848
  * fs.writeFileSync('out.wav', wav.toBuffer());
22679
22849
  * ```
22680
- *
22850
+ *
22681
22851
  * **Example:** Multilingual speech generation with `Xenova/mms-tts-fra`. See [here](https://huggingface.co/models?pipeline_tag=text-to-speech&other=vits&sort=trending) for the full list of available languages (1107).
22682
22852
  * ```javascript
22683
22853
  * const synthesizer = await pipeline('text-to-speech', 'Xenova/mms-tts-fra');
@@ -22783,13 +22953,13 @@ class TextToAudioPipeline extends (/** @type {new (options: TextToAudioPipelineC
22783
22953
  * @callback ImageToImagePipelineCallback Transform the image(s) passed as inputs.
22784
22954
  * @param {ImagePipelineInputs} images The images to transform.
22785
22955
  * @returns {Promise<RawImage|RawImage[]>} The transformed image or list of images.
22786
- *
22956
+ *
22787
22957
  * @typedef {ImagePipelineConstructorArgs & ImageToImagePipelineCallback & Disposable} ImageToImagePipelineType
22788
22958
  */
22789
22959
 
22790
22960
  /**
22791
22961
  * Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous image input.
22792
- *
22962
+ *
22793
22963
  * **Example:** Super-resolution w/ `Xenova/swin2SR-classical-sr-x2-64`
22794
22964
  * ```javascript
22795
22965
  * const upscaler = await pipeline('image-to-image', 'Xenova/swin2SR-classical-sr-x2-64');
@@ -22834,17 +23004,17 @@ class ImageToImagePipeline extends (/** @type {new (options: ImagePipelineConstr
22834
23004
  * @typedef {Object} DepthEstimationPipelineOutput
22835
23005
  * @property {Tensor} predicted_depth The raw depth map predicted by the model.
22836
23006
  * @property {RawImage} depth The processed depth map as an image (with the same size as the input image).
22837
- *
23007
+ *
22838
23008
  * @callback DepthEstimationPipelineCallback Predicts the depth for the image(s) passed as inputs.
22839
23009
  * @param {ImagePipelineInputs} images The images to compute depth for.
22840
23010
  * @returns {Promise<DepthEstimationPipelineOutput|DepthEstimationPipelineOutput[]>} An image or a list of images containing result(s).
22841
- *
23011
+ *
22842
23012
  * @typedef {ImagePipelineConstructorArgs & DepthEstimationPipelineCallback & Disposable} DepthEstimationPipelineType
22843
23013
  */
22844
23014
 
22845
23015
  /**
22846
23016
  * Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image.
22847
- *
23017
+ *
22848
23018
  * **Example:** Depth estimation w/ `Xenova/dpt-hybrid-midas`
22849
23019
  * ```javascript
22850
23020
  * const depth_estimator = await pipeline('depth-estimation', 'Xenova/dpt-hybrid-midas');
@@ -23229,7 +23399,7 @@ const TASK_ALIASES = Object.freeze({
23229
23399
 
23230
23400
  /**
23231
23401
  * Utility factory method to build a `Pipeline` object.
23232
- *
23402
+ *
23233
23403
  * @template {PipelineType} T The type of pipeline to return.
23234
23404
  * @param {T} task The task defining which pipeline will be returned. Currently accepted tasks are:
23235
23405
  * - `"audio-classification"`: will return a `AudioClassificationPipeline`.
@@ -26047,13 +26217,12 @@ class PreTrainedTokenizer extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__
26047
26217
  this.decoder.end_of_word_suffix = this.model.end_of_word_suffix;
26048
26218
  }
26049
26219
 
26050
- this.added_tokens_regex = this.added_tokens.length > 0 ? new RegExp(
26051
- this.added_tokens.slice()
26052
- // Sort by length (desc) to avoid early partial matches
26053
- .sort((a, b) => b.content.length - a.content.length)
26054
- .map(x => `${x.lstrip ? '\\s*' : ''}(${(0,_utils_core_js__WEBPACK_IMPORTED_MODULE_1__.escapeRegExp)(x.content)})${x.rstrip ? '\\s*' : ''}`)
26055
- .join('|')
26056
- ) : null;
26220
+ this.added_tokens_splitter = new _utils_data_structures_js__WEBPACK_IMPORTED_MODULE_5__.DictionarySplitter(
26221
+ this.added_tokens.map(x => x.content),
26222
+ );
26223
+
26224
+ /** @type {Map<string, AddedToken>} */
26225
+ this.added_tokens_map = new Map(this.added_tokens.map(x => [x.content, x]))
26057
26226
 
26058
26227
  // Set mask token if present (otherwise will be undefined, which is fine)
26059
26228
  this.mask_token = this.getToken('mask_token');
@@ -26348,40 +26517,50 @@ class PreTrainedTokenizer extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__
26348
26517
  // Actual function which does encoding, for a single text
26349
26518
  // First, we take care of special tokens. Needed to avoid issues arising from
26350
26519
  // normalization and/or pretokenization (which may not preserve special tokens)
26351
- const sections = this.added_tokens_regex ? text.split(this.added_tokens_regex).filter(x => x) : [text];
26352
-
26353
- const tokens = sections.map((x, section_index) => {
26354
- const addedToken = this.added_tokens.find(t => t.content === x);
26355
- if (addedToken !== undefined) {
26356
- // Ignore added tokens
26357
- return x
26358
- } else {
26359
- if (this.remove_space === true) {
26360
- x = x.trim().split(/\s+/).join(' ');
26361
- }
26362
- if (this.do_lowercase_and_remove_accent) {
26363
- x = lowercase_and_remove_accent(x);
26520
+ const sections = this.added_tokens_splitter.split(text);
26521
+
26522
+ // Process left/right stripping of added tokens
26523
+ for (let i = 0; i < sections.length; ++i) {
26524
+ const addedToken = this.added_tokens_map.get(sections[i]);
26525
+ if (addedToken) {
26526
+ if (addedToken.lstrip && i > 0) {
26527
+ sections[i - 1] = sections[i - 1].trimEnd();
26364
26528
  }
26365
-
26366
- if (this.normalizer !== null) {
26367
- x = this.normalizer(x);
26529
+ if (addedToken.rstrip && i < sections.length - 1) {
26530
+ sections[i + 1] = sections[i + 1].trimStart();
26368
26531
  }
26532
+ }
26533
+ }
26369
26534
 
26370
- // If, after normalization, this section is empty (e.g., trimming whitespace),
26371
- // we return an empty array
26372
- if (x.length === 0) {
26373
- return [];
26374
- }
26535
+ const tokens = sections.flatMap((x, section_index) => {
26536
+ if (x.length === 0) return [];
26537
+ if (this.added_tokens_map.has(x)) return [x]; // Return added tokens unchanged
26375
26538
 
26376
- const sectionTokens = (this.pre_tokenizer !== null) ? this.pre_tokenizer(x, {
26377
- section_index,
26378
- }) : [x];
26539
+ if (this.remove_space === true) {
26540
+ x = x.trim().split(/\s+/).join(' ');
26541
+ }
26542
+ if (this.do_lowercase_and_remove_accent) {
26543
+ x = lowercase_and_remove_accent(x);
26544
+ }
26379
26545
 
26380
- const tokens = this.model(sectionTokens);
26546
+ if (this.normalizer !== null) {
26547
+ x = this.normalizer(x);
26548
+ }
26381
26549
 
26382
- return tokens;
26550
+ // If, after normalization, this section is empty (e.g., trimming whitespace),
26551
+ // we return an empty array
26552
+ if (x.length === 0) {
26553
+ return [];
26383
26554
  }
26384
- }).flat();
26555
+
26556
+ const sectionTokens = (this.pre_tokenizer !== null) ? this.pre_tokenizer(x, {
26557
+ section_index,
26558
+ }) : [x];
26559
+
26560
+ const tokens = this.model(sectionTokens);
26561
+
26562
+ return tokens;
26563
+ });
26385
26564
 
26386
26565
  return tokens;
26387
26566
  }
@@ -29006,6 +29185,7 @@ function count(arr, value) {
29006
29185
  __webpack_require__.r(__webpack_exports__);
29007
29186
  /* harmony export */ __webpack_require__.d(__webpack_exports__, {
29008
29187
  /* harmony export */ CharTrie: () => (/* binding */ CharTrie),
29188
+ /* harmony export */ DictionarySplitter: () => (/* binding */ DictionarySplitter),
29009
29189
  /* harmony export */ PriorityQueue: () => (/* binding */ PriorityQueue),
29010
29190
  /* harmony export */ TokenLattice: () => (/* binding */ TokenLattice)
29011
29191
  /* harmony export */ });
@@ -29457,6 +29637,80 @@ class TokenLatticeNode {
29457
29637
  }
29458
29638
  }
29459
29639
 
29640
+ /**
29641
+ * A data structure which uses a trie to split a string into tokens based on a dictionary.
29642
+ * It can also use a regular expression to preprocess the input text before splitting.
29643
+ *
29644
+ * NOTE: To ensure multi-byte characters are handled correctly, we operate at byte-level instead of character-level.
29645
+ */
29646
+ class DictionarySplitter {
29647
+ /**
29648
+ * @param {string[]} dictionary The dictionary of words to use for splitting.
29649
+ */
29650
+ constructor(dictionary) {
29651
+ this.trie = this._buildTrie(dictionary);
29652
+ }
29653
+
29654
+ /**
29655
+ * Builds a trie from the given dictionary.
29656
+ * @param {string[]} dictionary The dictionary of words to build the trie from.
29657
+ * @returns {Object} The root node of the trie.
29658
+ * @private
29659
+ */
29660
+ _buildTrie(dictionary) {
29661
+ const trie = Object.create(null);
29662
+ for (const word of dictionary) {
29663
+ let node = trie;
29664
+ for (let i = 0; i < word.length; ++i) {
29665
+ node = (node[word[i]] ??= Object.create(null));
29666
+ }
29667
+ node.end = word;
29668
+ }
29669
+ return trie;
29670
+ }
29671
+
29672
+ /**
29673
+ * Splits the input text into tokens based on the dictionary.
29674
+ * @param {string} text The input text to split.
29675
+ * @returns {string[]} An array of tokens.
29676
+ */
29677
+ split(text) {
29678
+ const result = [];
29679
+ const n = text.length;
29680
+ let start = 0;
29681
+ let i = 0;
29682
+
29683
+ while (i < n) {
29684
+ let node = this.trie;
29685
+ let match = null;
29686
+ let j = i;
29687
+
29688
+ while (j < n && (node = node[text[j]])) {
29689
+ if (node.end) {
29690
+ // Always keep the last (i.e., longest) match.
29691
+ match = node.end;
29692
+ }
29693
+ ++j;
29694
+ }
29695
+
29696
+ if (match) {
29697
+ if (i > start) {
29698
+ result.push(text.slice(start, i));
29699
+ }
29700
+ result.push(match);
29701
+ i += match.length;
29702
+ start = i;
29703
+ } else {
29704
+ ++i;
29705
+ }
29706
+ }
29707
+ if (start < n) {
29708
+ result.push(text.slice(start));
29709
+ }
29710
+ return result;
29711
+ }
29712
+ }
29713
+
29460
29714
 
29461
29715
  /***/ }),
29462
29716
 
@@ -29836,6 +30090,22 @@ function isValidUrl(string, protocols = null, validHosts = null) {
29836
30090
  return true;
29837
30091
  }
29838
30092
 
30093
+ const REPO_ID_REGEX = /^(\b[\w\-.]+\b\/)?\b[\w\-.]{1,96}\b$/;
30094
+
30095
+ /**
30096
+ * Tests whether a string is a valid Hugging Face model ID or not.
30097
+ * Adapted from https://github.com/huggingface/huggingface_hub/blob/6378820ebb03f071988a96c7f3268f5bdf8f9449/src/huggingface_hub/utils/_validators.py#L119-L170
30098
+ *
30099
+ * @param {string} string The string to test
30100
+ * @returns {boolean} True if the string is a valid model ID, false otherwise.
30101
+ */
30102
+ function isValidHfModelId(string) {
30103
+ if (!REPO_ID_REGEX.test(string)) return false;
30104
+ if (string.includes("..") || string.includes("--")) return false;
30105
+ if (string.endsWith(".git") || string.endsWith(".ipynb")) return false;
30106
+ return true;
30107
+ }
30108
+
29839
30109
  /**
29840
30110
  * Helper function to get a file, using either the Fetch API or FileSystem API.
29841
30111
  *
@@ -30088,12 +30358,13 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
30088
30358
  }
30089
30359
 
30090
30360
  const revision = options.revision ?? 'main';
30361
+ const requestURL = pathJoin(path_or_repo_id, filename);
30091
30362
 
30092
- let requestURL = pathJoin(path_or_repo_id, filename);
30093
- let cachePath = pathJoin(_env_js__WEBPACK_IMPORTED_MODULE_2__.env.localModelPath, requestURL);
30094
-
30095
- let localPath = requestURL;
30096
- let remoteURL = pathJoin(
30363
+ const validModelId = isValidHfModelId(path_or_repo_id);
30364
+ const localPath = validModelId
30365
+ ? pathJoin(_env_js__WEBPACK_IMPORTED_MODULE_2__.env.localModelPath, requestURL)
30366
+ : requestURL;
30367
+ const remoteURL = pathJoin(
30097
30368
  _env_js__WEBPACK_IMPORTED_MODULE_2__.env.remoteHost,
30098
30369
  _env_js__WEBPACK_IMPORTED_MODULE_2__.env.remotePathTemplate
30099
30370
  .replaceAll('{model}', path_or_repo_id)
@@ -30101,14 +30372,14 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
30101
30372
  filename
30102
30373
  );
30103
30374
 
30104
- // Choose cache key for filesystem cache
30105
- // When using the main revision (default), we use the request URL as the cache key.
30106
- // If a specific revision is requested, we account for this in the cache key.
30107
- let fsCacheKey = revision === 'main' ? requestURL : pathJoin(path_or_repo_id, revision, filename);
30108
-
30109
30375
  /** @type {string} */
30110
30376
  let cacheKey;
30111
- let proposedCacheKey = cache instanceof FileCache ? fsCacheKey : remoteURL;
30377
+ const proposedCacheKey = cache instanceof FileCache
30378
+ // Choose cache key for filesystem cache
30379
+ // When using the main revision (default), we use the request URL as the cache key.
30380
+ // If a specific revision is requested, we account for this in the cache key.
30381
+ ? revision === 'main' ? requestURL : pathJoin(path_or_repo_id, revision, filename)
30382
+ : remoteURL;
30112
30383
 
30113
30384
  // Whether to cache the final response in the end.
30114
30385
  let toCacheResponse = false;
@@ -30121,11 +30392,10 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
30121
30392
  // 1. We first try to get from cache using the local path. In some environments (like deno),
30122
30393
  // non-URL cache keys are not allowed. In these cases, `response` will be undefined.
30123
30394
  // 2. If no response is found, we try to get from cache using the remote URL or file system cache.
30124
- response = await tryCache(cache, cachePath, proposedCacheKey);
30395
+ response = await tryCache(cache, localPath, proposedCacheKey);
30125
30396
  }
30126
30397
 
30127
30398
  const cacheHit = response !== undefined;
30128
-
30129
30399
  if (response === undefined) {
30130
30400
  // Caching not available, or file is not cached, so we perform the request
30131
30401
 
@@ -30143,9 +30413,9 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
30143
30413
  console.warn(`Unable to load from local path "${localPath}": "${e}"`);
30144
30414
  }
30145
30415
  } else if (options.local_files_only) {
30146
- throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${localPath}.`);
30416
+ throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${requestURL}.`);
30147
30417
  } else if (!_env_js__WEBPACK_IMPORTED_MODULE_2__.env.allowRemoteModels) {
30148
- throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${localPath}.`);
30418
+ throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${requestURL}.`);
30149
30419
  }
30150
30420
  }
30151
30421
 
@@ -30165,6 +30435,11 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
30165
30435
  return null;
30166
30436
  }
30167
30437
  }
30438
+ if (!validModelId) {
30439
+ // Before making any requests to the remote server, we check if the model ID is valid.
30440
+ // This prevents unnecessary network requests for invalid model IDs.
30441
+ throw Error(`Local file missing at "${localPath}" and download aborted due to invalid model ID "${path_or_repo_id}".`);
30442
+ }
30168
30443
 
30169
30444
  // File not found locally, so we try to download it from the remote server
30170
30445
  response = await getFile(remoteURL);
@@ -30495,7 +30770,7 @@ class RawImage {
30495
30770
 
30496
30771
  /**
30497
30772
  * Helper method for reading an image from a variety of input types.
30498
- * @param {RawImage|string|URL} input
30773
+ * @param {RawImage|string|URL|Blob|HTMLCanvasElement|OffscreenCanvas} input
30499
30774
  * @returns The image object.
30500
30775
  *
30501
30776
  * **Example:** Read image from a URL.
@@ -30514,6 +30789,14 @@ class RawImage {
30514
30789
  return input;
30515
30790
  } else if (typeof input === 'string' || input instanceof URL) {
30516
30791
  return await this.fromURL(input);
30792
+ } else if (input instanceof Blob) {
30793
+ return await this.fromBlob(input);
30794
+ } else if (
30795
+ (typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement)
30796
+ ||
30797
+ (typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)
30798
+ ) {
30799
+ return this.fromCanvas(input);
30517
30800
  } else {
30518
30801
  throw new Error(`Unsupported input type: ${typeof input}`);
30519
30802
  }
@@ -33523,8 +33806,12 @@ function calc_unsqueeze_dims(dims, dim) {
33523
33806
  * @private
33524
33807
  */
33525
33808
  function safeIndex(index, size, dimension = null, boundsCheck = true) {
33526
- if (boundsCheck && (index < -size || index >= size)) {
33527
- throw new Error(`IndexError: index ${index} is out of bounds for dimension${dimension === null ? '' : ' ' + dimension} with size ${size}`);
33809
+ if (index < -size || index >= size) {
33810
+ if (boundsCheck) {
33811
+ throw new Error(`IndexError: index ${index} is out of bounds for dimension${dimension === null ? '' : ' ' + dimension} with size ${size}`);
33812
+ } else {
33813
+ return index < -size ? 0 : size;
33814
+ }
33528
33815
  }
33529
33816
 
33530
33817
  if (index < 0) {
@@ -34379,6 +34666,9 @@ __webpack_require__.r(__webpack_exports__);
34379
34666
  /* harmony export */ Gemma2ForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma2ForCausalLM),
34380
34667
  /* harmony export */ Gemma2Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma2Model),
34381
34668
  /* harmony export */ Gemma2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma2PreTrainedModel),
34669
+ /* harmony export */ Gemma3ForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma3ForCausalLM),
34670
+ /* harmony export */ Gemma3Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma3Model),
34671
+ /* harmony export */ Gemma3PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma3PreTrainedModel),
34382
34672
  /* harmony export */ GemmaForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaForCausalLM),
34383
34673
  /* harmony export */ GemmaModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaModel),
34384
34674
  /* harmony export */ GemmaPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaPreTrainedModel),
@@ -34480,6 +34770,10 @@ __webpack_require__.r(__webpack_exports__);
34480
34770
  /* harmony export */ MaskFormerPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskFormerPreTrainedModel),
34481
34771
  /* harmony export */ MaskedLMOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskedLMOutput),
34482
34772
  /* harmony export */ MaxLengthCriteria: () => (/* reexport safe */ _generation_stopping_criteria_js__WEBPACK_IMPORTED_MODULE_20__.MaxLengthCriteria),
34773
+ /* harmony export */ Metric3DForDepthEstimation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3DForDepthEstimation),
34774
+ /* harmony export */ Metric3DPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3DPreTrainedModel),
34775
+ /* harmony export */ Metric3Dv2ForDepthEstimation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3Dv2ForDepthEstimation),
34776
+ /* harmony export */ Metric3Dv2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3Dv2PreTrainedModel),
34483
34777
  /* harmony export */ MgpstrForSceneTextRecognition: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MgpstrForSceneTextRecognition),
34484
34778
  /* harmony export */ MgpstrModelOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MgpstrModelOutput),
34485
34779
  /* harmony export */ MgpstrPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MgpstrPreTrainedModel),
@@ -34632,11 +34926,19 @@ __webpack_require__.r(__webpack_exports__);
34632
34926
  /* harmony export */ Qwen2VLImageProcessor: () => (/* reexport safe */ _models_image_processors_js__WEBPACK_IMPORTED_MODULE_14__.Qwen2VLImageProcessor),
34633
34927
  /* harmony export */ Qwen2VLPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Qwen2VLPreTrainedModel),
34634
34928
  /* harmony export */ Qwen2VLProcessor: () => (/* reexport safe */ _models_processors_js__WEBPACK_IMPORTED_MODULE_17__.Qwen2VLProcessor),
34929
+ /* harmony export */ RFDetrForObjectDetection: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RFDetrForObjectDetection),
34930
+ /* harmony export */ RFDetrModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RFDetrModel),
34931
+ /* harmony export */ RFDetrObjectDetectionOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RFDetrObjectDetectionOutput),
34932
+ /* harmony export */ RFDetrPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RFDetrPreTrainedModel),
34635
34933
  /* harmony export */ RTDetrForObjectDetection: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrForObjectDetection),
34636
34934
  /* harmony export */ RTDetrImageProcessor: () => (/* reexport safe */ _models_image_processors_js__WEBPACK_IMPORTED_MODULE_14__.RTDetrImageProcessor),
34637
34935
  /* harmony export */ RTDetrModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrModel),
34638
34936
  /* harmony export */ RTDetrObjectDetectionOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrObjectDetectionOutput),
34639
34937
  /* harmony export */ RTDetrPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrPreTrainedModel),
34938
+ /* harmony export */ RTDetrV2ForObjectDetection: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrV2ForObjectDetection),
34939
+ /* harmony export */ RTDetrV2Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrV2Model),
34940
+ /* harmony export */ RTDetrV2ObjectDetectionOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrV2ObjectDetectionOutput),
34941
+ /* harmony export */ RTDetrV2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.RTDetrV2PreTrainedModel),
34640
34942
  /* harmony export */ RawAudio: () => (/* reexport safe */ _utils_audio_js__WEBPACK_IMPORTED_MODULE_5__.RawAudio),
34641
34943
  /* harmony export */ RawImage: () => (/* reexport safe */ _utils_image_js__WEBPACK_IMPORTED_MODULE_6__.RawImage),
34642
34944
  /* harmony export */ RawVideo: () => (/* reexport safe */ _utils_video_js__WEBPACK_IMPORTED_MODULE_7__.RawVideo),
@@ -34686,6 +34988,11 @@ __webpack_require__.r(__webpack_exports__);
34686
34988
  /* harmony export */ SmolVLMForConditionalGeneration: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SmolVLMForConditionalGeneration),
34687
34989
  /* harmony export */ SmolVLMImageProcessor: () => (/* reexport safe */ _models_image_processors_js__WEBPACK_IMPORTED_MODULE_14__.SmolVLMImageProcessor),
34688
34990
  /* harmony export */ SmolVLMProcessor: () => (/* reexport safe */ _models_processors_js__WEBPACK_IMPORTED_MODULE_17__.SmolVLMProcessor),
34991
+ /* harmony export */ SnacDecoderModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacDecoderModel),
34992
+ /* harmony export */ SnacEncoderModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacEncoderModel),
34993
+ /* harmony export */ SnacFeatureExtractor: () => (/* reexport safe */ _models_feature_extractors_js__WEBPACK_IMPORTED_MODULE_11__.SnacFeatureExtractor),
34994
+ /* harmony export */ SnacModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacModel),
34995
+ /* harmony export */ SnacPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacPreTrainedModel),
34689
34996
  /* harmony export */ SpeechT5FeatureExtractor: () => (/* reexport safe */ _models_feature_extractors_js__WEBPACK_IMPORTED_MODULE_11__.SpeechT5FeatureExtractor),
34690
34997
  /* harmony export */ SpeechT5ForSpeechToText: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SpeechT5ForSpeechToText),
34691
34998
  /* harmony export */ SpeechT5ForTextToSpeech: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SpeechT5ForTextToSpeech),
@@ -35208,6 +35515,9 @@ var __webpack_exports__GPTNeoXTokenizer = __webpack_exports__.GPTNeoXTokenizer;
35208
35515
  var __webpack_exports__Gemma2ForCausalLM = __webpack_exports__.Gemma2ForCausalLM;
35209
35516
  var __webpack_exports__Gemma2Model = __webpack_exports__.Gemma2Model;
35210
35517
  var __webpack_exports__Gemma2PreTrainedModel = __webpack_exports__.Gemma2PreTrainedModel;
35518
+ var __webpack_exports__Gemma3ForCausalLM = __webpack_exports__.Gemma3ForCausalLM;
35519
+ var __webpack_exports__Gemma3Model = __webpack_exports__.Gemma3Model;
35520
+ var __webpack_exports__Gemma3PreTrainedModel = __webpack_exports__.Gemma3PreTrainedModel;
35211
35521
  var __webpack_exports__GemmaForCausalLM = __webpack_exports__.GemmaForCausalLM;
35212
35522
  var __webpack_exports__GemmaModel = __webpack_exports__.GemmaModel;
35213
35523
  var __webpack_exports__GemmaPreTrainedModel = __webpack_exports__.GemmaPreTrainedModel;
@@ -35309,6 +35619,10 @@ var __webpack_exports__MaskFormerModel = __webpack_exports__.MaskFormerModel;
35309
35619
  var __webpack_exports__MaskFormerPreTrainedModel = __webpack_exports__.MaskFormerPreTrainedModel;
35310
35620
  var __webpack_exports__MaskedLMOutput = __webpack_exports__.MaskedLMOutput;
35311
35621
  var __webpack_exports__MaxLengthCriteria = __webpack_exports__.MaxLengthCriteria;
35622
+ var __webpack_exports__Metric3DForDepthEstimation = __webpack_exports__.Metric3DForDepthEstimation;
35623
+ var __webpack_exports__Metric3DPreTrainedModel = __webpack_exports__.Metric3DPreTrainedModel;
35624
+ var __webpack_exports__Metric3Dv2ForDepthEstimation = __webpack_exports__.Metric3Dv2ForDepthEstimation;
35625
+ var __webpack_exports__Metric3Dv2PreTrainedModel = __webpack_exports__.Metric3Dv2PreTrainedModel;
35312
35626
  var __webpack_exports__MgpstrForSceneTextRecognition = __webpack_exports__.MgpstrForSceneTextRecognition;
35313
35627
  var __webpack_exports__MgpstrModelOutput = __webpack_exports__.MgpstrModelOutput;
35314
35628
  var __webpack_exports__MgpstrPreTrainedModel = __webpack_exports__.MgpstrPreTrainedModel;
@@ -35461,11 +35775,19 @@ var __webpack_exports__Qwen2VLForConditionalGeneration = __webpack_exports__.Qwe
35461
35775
  var __webpack_exports__Qwen2VLImageProcessor = __webpack_exports__.Qwen2VLImageProcessor;
35462
35776
  var __webpack_exports__Qwen2VLPreTrainedModel = __webpack_exports__.Qwen2VLPreTrainedModel;
35463
35777
  var __webpack_exports__Qwen2VLProcessor = __webpack_exports__.Qwen2VLProcessor;
35778
+ var __webpack_exports__RFDetrForObjectDetection = __webpack_exports__.RFDetrForObjectDetection;
35779
+ var __webpack_exports__RFDetrModel = __webpack_exports__.RFDetrModel;
35780
+ var __webpack_exports__RFDetrObjectDetectionOutput = __webpack_exports__.RFDetrObjectDetectionOutput;
35781
+ var __webpack_exports__RFDetrPreTrainedModel = __webpack_exports__.RFDetrPreTrainedModel;
35464
35782
  var __webpack_exports__RTDetrForObjectDetection = __webpack_exports__.RTDetrForObjectDetection;
35465
35783
  var __webpack_exports__RTDetrImageProcessor = __webpack_exports__.RTDetrImageProcessor;
35466
35784
  var __webpack_exports__RTDetrModel = __webpack_exports__.RTDetrModel;
35467
35785
  var __webpack_exports__RTDetrObjectDetectionOutput = __webpack_exports__.RTDetrObjectDetectionOutput;
35468
35786
  var __webpack_exports__RTDetrPreTrainedModel = __webpack_exports__.RTDetrPreTrainedModel;
35787
+ var __webpack_exports__RTDetrV2ForObjectDetection = __webpack_exports__.RTDetrV2ForObjectDetection;
35788
+ var __webpack_exports__RTDetrV2Model = __webpack_exports__.RTDetrV2Model;
35789
+ var __webpack_exports__RTDetrV2ObjectDetectionOutput = __webpack_exports__.RTDetrV2ObjectDetectionOutput;
35790
+ var __webpack_exports__RTDetrV2PreTrainedModel = __webpack_exports__.RTDetrV2PreTrainedModel;
35469
35791
  var __webpack_exports__RawAudio = __webpack_exports__.RawAudio;
35470
35792
  var __webpack_exports__RawImage = __webpack_exports__.RawImage;
35471
35793
  var __webpack_exports__RawVideo = __webpack_exports__.RawVideo;
@@ -35515,6 +35837,11 @@ var __webpack_exports__SiglipVisionModel = __webpack_exports__.SiglipVisionModel
35515
35837
  var __webpack_exports__SmolVLMForConditionalGeneration = __webpack_exports__.SmolVLMForConditionalGeneration;
35516
35838
  var __webpack_exports__SmolVLMImageProcessor = __webpack_exports__.SmolVLMImageProcessor;
35517
35839
  var __webpack_exports__SmolVLMProcessor = __webpack_exports__.SmolVLMProcessor;
35840
+ var __webpack_exports__SnacDecoderModel = __webpack_exports__.SnacDecoderModel;
35841
+ var __webpack_exports__SnacEncoderModel = __webpack_exports__.SnacEncoderModel;
35842
+ var __webpack_exports__SnacFeatureExtractor = __webpack_exports__.SnacFeatureExtractor;
35843
+ var __webpack_exports__SnacModel = __webpack_exports__.SnacModel;
35844
+ var __webpack_exports__SnacPreTrainedModel = __webpack_exports__.SnacPreTrainedModel;
35518
35845
  var __webpack_exports__SpeechT5FeatureExtractor = __webpack_exports__.SpeechT5FeatureExtractor;
35519
35846
  var __webpack_exports__SpeechT5ForSpeechToText = __webpack_exports__.SpeechT5ForSpeechToText;
35520
35847
  var __webpack_exports__SpeechT5ForTextToSpeech = __webpack_exports__.SpeechT5ForTextToSpeech;
@@ -35708,6 +36035,6 @@ var __webpack_exports__topk = __webpack_exports__.topk;
35708
36035
  var __webpack_exports__window_function = __webpack_exports__.window_function;
35709
36036
  var __webpack_exports__zeros = __webpack_exports__.zeros;
35710
36037
  var __webpack_exports__zeros_like = __webpack_exports__.zeros_like;
35711
- export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoFeatureExtractor as AutoFeatureExtractor, __webpack_exports__AutoImageProcessor as AutoImageProcessor, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForAudioTextToText as AutoModelForAudioTextToText, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageTextToText as AutoModelForImageTextToText, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForPoseEstimation as AutoModelForPoseEstimation, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BackgroundRemovalPipeline as BackgroundRemovalPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__ClassifierFreeGuidanceLogitsProcessor as ClassifierFreeGuidanceLogitsProcessor, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DacDecoderModel as DacDecoderModel, __webpack_exports__DacDecoderOutput as DacDecoderOutput, __webpack_exports__DacEncoderModel as DacEncoderModel, __webpack_exports__DacEncoderOutput as DacEncoderOutput, __webpack_exports__DacFeatureExtractor as DacFeatureExtractor, __webpack_exports__DacModel as DacModel, __webpack_exports__DacPreTrainedModel as DacPreTrainedModel, __webpack_exports__DataTypeMap as DataTypeMap, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTImageProcessor as DeiTImageProcessor, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DepthProForDepthEstimation as DepthProForDepthEstimation, __webpack_exports__DepthProPreTrainedModel as DepthProPreTrainedModel, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrImageProcessor as DetrImageProcessor, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__Dinov2WithRegistersForImageClassification as Dinov2WithRegistersForImageClassification, __webpack_exports__Dinov2WithRegistersModel as Dinov2WithRegistersModel, __webpack_exports__Dinov2WithRegistersPreTrainedModel as Dinov2WithRegistersPreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutImageProcessor as DonutImageProcessor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EncodecFeatureExtractor as EncodecFeatureExtractor, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__ExaoneForCausalLM as ExaoneForCausalLM, __webpack_exports__ExaoneModel as ExaoneModel, __webpack_exports__ExaonePreTrainedModel as ExaonePreTrainedModel, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor, __webpack_exports__ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__GlmForCausalLM as GlmForCausalLM, __webpack_exports__GlmModel as GlmModel, __webpack_exports__GlmPreTrainedModel as GlmPreTrainedModel, __webpack_exports__GraniteForCausalLM as GraniteForCausalLM, __webpack_exports__GraniteModel as GraniteModel, __webpack_exports__GranitePreTrainedModel as GranitePreTrainedModel, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroundingDinoForObjectDetection as GroundingDinoForObjectDetection, __webpack_exports__GroundingDinoImageProcessor as GroundingDinoImageProcessor, __webpack_exports__GroundingDinoPreTrainedModel as GroundingDinoPreTrainedModel, __webpack_exports__GroundingDinoProcessor as GroundingDinoProcessor, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HeliumForCausalLM as HeliumForCausalLM, __webpack_exports__HeliumModel as HeliumModel, __webpack_exports__HeliumPreTrainedModel as HeliumPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__IJepaForImageClassification as IJepaForImageClassification, __webpack_exports__IJepaModel as IJepaModel, __webpack_exports__IJepaPreTrainedModel as IJepaPreTrainedModel, __webpack_exports__Idefics3ForConditionalGeneration as Idefics3ForConditionalGeneration, __webpack_exports__Idefics3ImageProcessor as Idefics3ImageProcessor, __webpack_exports__Idefics3PreTrainedModel as Idefics3PreTrainedModel, __webpack_exports__Idefics3Processor as Idefics3Processor, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageProcessor as ImageProcessor, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__JinaCLIPImageProcessor as JinaCLIPImageProcessor, __webpack_exports__JinaCLIPModel as JinaCLIPModel, __webpack_exports__JinaCLIPPreTrainedModel as JinaCLIPPreTrainedModel, __webpack_exports__JinaCLIPProcessor as JinaCLIPProcessor, __webpack_exports__JinaCLIPTextModel as JinaCLIPTextModel, __webpack_exports__JinaCLIPVisionModel as JinaCLIPVisionModel, __webpack_exports__LiteWhisperForConditionalGeneration as LiteWhisperForConditionalGeneration, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaOnevisionForConditionalGeneration as LlavaOnevisionForConditionalGeneration, __webpack_exports__LlavaOnevisionImageProcessor as LlavaOnevisionImageProcessor, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LogitsProcessor as LogitsProcessor, __webpack_exports__LogitsProcessorList as LogitsProcessorList, __webpack_exports__LogitsWarper as LogitsWarper, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__Mask2FormerImageProcessor as Mask2FormerImageProcessor, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerImageProcessor as MaskFormerImageProcessor, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__MgpstrForSceneTextRecognition as MgpstrForSceneTextRecognition, __webpack_exports__MgpstrModelOutput as MgpstrModelOutput, __webpack_exports__MgpstrPreTrainedModel as MgpstrPreTrainedModel, __webpack_exports__MgpstrProcessor as MgpstrProcessor, __webpack_exports__MgpstrTokenizer as MgpstrTokenizer, __webpack_exports__MimiDecoderModel as MimiDecoderModel, __webpack_exports__MimiDecoderOutput as MimiDecoderOutput, __webpack_exports__MimiEncoderModel as MimiEncoderModel, __webpack_exports__MimiEncoderOutput as MimiEncoderOutput, __webpack_exports__MimiModel as MimiModel, __webpack_exports__MimiPreTrainedModel as MimiPreTrainedModel, __webpack_exports__MinLengthLogitsProcessor as MinLengthLogitsProcessor, __webpack_exports__MinNewTokensLengthLogitsProcessor as MinNewTokensLengthLogitsProcessor, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileLLMForCausalLM as MobileLLMForCausalLM, __webpack_exports__MobileLLMModel as MobileLLMModel, __webpack_exports__MobileLLMPreTrainedModel as MobileLLMPreTrainedModel, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1ForSemanticSegmentation as MobileNetV1ForSemanticSegmentation, __webpack_exports__MobileNetV1ImageProcessor as MobileNetV1ImageProcessor, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2ForSemanticSegmentation as MobileNetV2ForSemanticSegmentation, __webpack_exports__MobileNetV2ImageProcessor as MobileNetV2ImageProcessor, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3ForSemanticSegmentation as MobileNetV3ForSemanticSegmentation, __webpack_exports__MobileNetV3ImageProcessor as MobileNetV3ImageProcessor, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4ForSemanticSegmentation as MobileNetV4ForSemanticSegmentation, __webpack_exports__MobileNetV4ImageProcessor as MobileNetV4ImageProcessor, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__ModernBertForMaskedLM as ModernBertForMaskedLM, __webpack_exports__ModernBertForSequenceClassification as ModernBertForSequenceClassification, __webpack_exports__ModernBertForTokenClassification as ModernBertForTokenClassification, __webpack_exports__ModernBertModel as ModernBertModel, __webpack_exports__ModernBertPreTrainedModel as ModernBertPreTrainedModel, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MoonshineFeatureExtractor as MoonshineFeatureExtractor, __webpack_exports__MoonshineForConditionalGeneration as MoonshineForConditionalGeneration, __webpack_exports__MoonshineModel as MoonshineModel, __webpack_exports__MoonshinePreTrainedModel as MoonshinePreTrainedModel, __webpack_exports__MoonshineProcessor as MoonshineProcessor, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MultiModalityCausalLM as MultiModalityCausalLM, __webpack_exports__MultiModalityPreTrainedModel as MultiModalityPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NoBadWordsLogitsProcessor as NoBadWordsLogitsProcessor, __webpack_exports__NoRepeatNGramLogitsProcessor as NoRepeatNGramLogitsProcessor, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__Olmo2ForCausalLM as Olmo2ForCausalLM, __webpack_exports__Olmo2Model as Olmo2Model, __webpack_exports__Olmo2PreTrainedModel as Olmo2PreTrainedModel, __webpack_exports__OlmoForCausalLM as OlmoForCausalLM, __webpack_exports__OlmoModel as OlmoModel, __webpack_exports__OlmoPreTrainedModel as OlmoPreTrainedModel, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTImageProcessor as OwlViTImageProcessor, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__PaliGemmaForConditionalGeneration as PaliGemmaForConditionalGeneration, __webpack_exports__PaliGemmaPreTrainedModel as PaliGemmaPreTrainedModel, __webpack_exports__PaliGemmaProcessor as PaliGemmaProcessor, __webpack_exports__PatchTSMixerForPrediction as PatchTSMixerForPrediction, __webpack_exports__PatchTSMixerModel as PatchTSMixerModel, __webpack_exports__PatchTSMixerPreTrainedModel as PatchTSMixerPreTrainedModel, __webpack_exports__PatchTSTForPrediction as PatchTSTForPrediction, __webpack_exports__PatchTSTModel as PatchTSTModel, __webpack_exports__PatchTSTPreTrainedModel as PatchTSTPreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__Phi3VForCausalLM as Phi3VForCausalLM, __webpack_exports__Phi3VImageProcessor as Phi3VImageProcessor, __webpack_exports__Phi3VPreTrainedModel as Phi3VPreTrainedModel, __webpack_exports__Phi3VProcessor as Phi3VProcessor, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGeneration, __webpack_exports__Qwen2VLImageProcessor as Qwen2VLImageProcessor, __webpack_exports__Qwen2VLPreTrainedModel as Qwen2VLPreTrainedModel, __webpack_exports__Qwen2VLProcessor as Qwen2VLProcessor, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawAudio as RawAudio, __webpack_exports__RawImage as RawImage, __webpack_exports__RawVideo as RawVideo, __webpack_exports__RawVideoFrame as RawVideoFrame, __webpack_exports__RepetitionPenaltyLogitsProcessor as RepetitionPenaltyLogitsProcessor, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerImageProcessor as SegformerImageProcessor, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SmolVLMForConditionalGeneration as SmolVLMForConditionalGeneration, __webpack_exports__SmolVLMImageProcessor as SmolVLMImageProcessor, __webpack_exports__SmolVLMProcessor as SmolVLMProcessor, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__StyleTextToSpeech2Model as StyleTextToSpeech2Model, __webpack_exports__StyleTextToSpeech2PreTrainedModel as StyleTextToSpeech2PreTrainedModel, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__SuppressTokensAtBeginLogitsProcessor as SuppressTokensAtBeginLogitsProcessor, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinForSemanticSegmentation as SwinForSemanticSegmentation, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__TemperatureLogitsWarper as TemperatureLogitsWarper, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TopKLogitsWarper as TopKLogitsWarper, __webpack_exports__TopPLogitsWarper as TopPLogitsWarper, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UltravoxModel as UltravoxModel, __webpack_exports__UltravoxPreTrainedModel as UltravoxPreTrainedModel, __webpack_exports__UltravoxProcessor as UltravoxProcessor, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__VLChatProcessor as VLChatProcessor, __webpack_exports__VLMImageProcessor as VLMImageProcessor, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitPoseForPoseEstimation as VitPoseForPoseEstimation, __webpack_exports__VitPoseImageProcessor as VitPoseImageProcessor, __webpack_exports__VitPosePreTrainedModel as VitPosePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2Processor as Wav2Vec2Processor, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTimeStampLogitsProcessor as WhisperTimeStampLogitsProcessor, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosImageProcessor as YolosImageProcessor, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__load_image as load_image, __webpack_exports__load_video as load_video, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__rand as rand, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__slice as slice, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
36038
+ export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoFeatureExtractor as AutoFeatureExtractor, __webpack_exports__AutoImageProcessor as AutoImageProcessor, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForAudioTextToText as AutoModelForAudioTextToText, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageTextToText as AutoModelForImageTextToText, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForPoseEstimation as AutoModelForPoseEstimation, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BackgroundRemovalPipeline as BackgroundRemovalPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__ClassifierFreeGuidanceLogitsProcessor as ClassifierFreeGuidanceLogitsProcessor, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DacDecoderModel as DacDecoderModel, __webpack_exports__DacDecoderOutput as DacDecoderOutput, __webpack_exports__DacEncoderModel as DacEncoderModel, __webpack_exports__DacEncoderOutput as DacEncoderOutput, __webpack_exports__DacFeatureExtractor as DacFeatureExtractor, __webpack_exports__DacModel as DacModel, __webpack_exports__DacPreTrainedModel as DacPreTrainedModel, __webpack_exports__DataTypeMap as DataTypeMap, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTImageProcessor as DeiTImageProcessor, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DepthProForDepthEstimation as DepthProForDepthEstimation, __webpack_exports__DepthProPreTrainedModel as DepthProPreTrainedModel, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrImageProcessor as DetrImageProcessor, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__Dinov2WithRegistersForImageClassification as Dinov2WithRegistersForImageClassification, __webpack_exports__Dinov2WithRegistersModel as Dinov2WithRegistersModel, __webpack_exports__Dinov2WithRegistersPreTrainedModel as Dinov2WithRegistersPreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutImageProcessor as DonutImageProcessor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EncodecFeatureExtractor as EncodecFeatureExtractor, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__ExaoneForCausalLM as ExaoneForCausalLM, __webpack_exports__ExaoneModel as ExaoneModel, __webpack_exports__ExaonePreTrainedModel as ExaonePreTrainedModel, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor, __webpack_exports__ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__Gemma3ForCausalLM as Gemma3ForCausalLM, __webpack_exports__Gemma3Model as Gemma3Model, __webpack_exports__Gemma3PreTrainedModel as Gemma3PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__GlmForCausalLM as GlmForCausalLM, __webpack_exports__GlmModel as GlmModel, __webpack_exports__GlmPreTrainedModel as GlmPreTrainedModel, __webpack_exports__GraniteForCausalLM as GraniteForCausalLM, __webpack_exports__GraniteModel as GraniteModel, __webpack_exports__GranitePreTrainedModel as GranitePreTrainedModel, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroundingDinoForObjectDetection as GroundingDinoForObjectDetection, __webpack_exports__GroundingDinoImageProcessor as GroundingDinoImageProcessor, __webpack_exports__GroundingDinoPreTrainedModel as GroundingDinoPreTrainedModel, __webpack_exports__GroundingDinoProcessor as GroundingDinoProcessor, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HeliumForCausalLM as HeliumForCausalLM, __webpack_exports__HeliumModel as HeliumModel, __webpack_exports__HeliumPreTrainedModel as HeliumPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__IJepaForImageClassification as IJepaForImageClassification, __webpack_exports__IJepaModel as IJepaModel, __webpack_exports__IJepaPreTrainedModel as IJepaPreTrainedModel, __webpack_exports__Idefics3ForConditionalGeneration as Idefics3ForConditionalGeneration, __webpack_exports__Idefics3ImageProcessor as Idefics3ImageProcessor, __webpack_exports__Idefics3PreTrainedModel as Idefics3PreTrainedModel, __webpack_exports__Idefics3Processor as Idefics3Processor, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageProcessor as ImageProcessor, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__JinaCLIPImageProcessor as JinaCLIPImageProcessor, __webpack_exports__JinaCLIPModel as JinaCLIPModel, __webpack_exports__JinaCLIPPreTrainedModel as JinaCLIPPreTrainedModel, __webpack_exports__JinaCLIPProcessor as JinaCLIPProcessor, __webpack_exports__JinaCLIPTextModel as JinaCLIPTextModel, __webpack_exports__JinaCLIPVisionModel as JinaCLIPVisionModel, __webpack_exports__LiteWhisperForConditionalGeneration as LiteWhisperForConditionalGeneration, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaOnevisionForConditionalGeneration as LlavaOnevisionForConditionalGeneration, __webpack_exports__LlavaOnevisionImageProcessor as LlavaOnevisionImageProcessor, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LogitsProcessor as LogitsProcessor, __webpack_exports__LogitsProcessorList as LogitsProcessorList, __webpack_exports__LogitsWarper as LogitsWarper, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__Mask2FormerImageProcessor as Mask2FormerImageProcessor, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerImageProcessor as MaskFormerImageProcessor, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__Metric3DForDepthEstimation as Metric3DForDepthEstimation, __webpack_exports__Metric3DPreTrainedModel as Metric3DPreTrainedModel, __webpack_exports__Metric3Dv2ForDepthEstimation as Metric3Dv2ForDepthEstimation, __webpack_exports__Metric3Dv2PreTrainedModel as Metric3Dv2PreTrainedModel, __webpack_exports__MgpstrForSceneTextRecognition as MgpstrForSceneTextRecognition, __webpack_exports__MgpstrModelOutput as MgpstrModelOutput, __webpack_exports__MgpstrPreTrainedModel as MgpstrPreTrainedModel, __webpack_exports__MgpstrProcessor as MgpstrProcessor, __webpack_exports__MgpstrTokenizer as MgpstrTokenizer, __webpack_exports__MimiDecoderModel as MimiDecoderModel, __webpack_exports__MimiDecoderOutput as MimiDecoderOutput, __webpack_exports__MimiEncoderModel as MimiEncoderModel, __webpack_exports__MimiEncoderOutput as MimiEncoderOutput, __webpack_exports__MimiModel as MimiModel, __webpack_exports__MimiPreTrainedModel as MimiPreTrainedModel, __webpack_exports__MinLengthLogitsProcessor as MinLengthLogitsProcessor, __webpack_exports__MinNewTokensLengthLogitsProcessor as MinNewTokensLengthLogitsProcessor, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileLLMForCausalLM as MobileLLMForCausalLM, __webpack_exports__MobileLLMModel as MobileLLMModel, __webpack_exports__MobileLLMPreTrainedModel as MobileLLMPreTrainedModel, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1ForSemanticSegmentation as MobileNetV1ForSemanticSegmentation, __webpack_exports__MobileNetV1ImageProcessor as MobileNetV1ImageProcessor, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2ForSemanticSegmentation as MobileNetV2ForSemanticSegmentation, __webpack_exports__MobileNetV2ImageProcessor as MobileNetV2ImageProcessor, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3ForSemanticSegmentation as MobileNetV3ForSemanticSegmentation, __webpack_exports__MobileNetV3ImageProcessor as MobileNetV3ImageProcessor, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4ForSemanticSegmentation as MobileNetV4ForSemanticSegmentation, __webpack_exports__MobileNetV4ImageProcessor as MobileNetV4ImageProcessor, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__ModernBertForMaskedLM as ModernBertForMaskedLM, __webpack_exports__ModernBertForSequenceClassification as ModernBertForSequenceClassification, __webpack_exports__ModernBertForTokenClassification as ModernBertForTokenClassification, __webpack_exports__ModernBertModel as ModernBertModel, __webpack_exports__ModernBertPreTrainedModel as ModernBertPreTrainedModel, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MoonshineFeatureExtractor as MoonshineFeatureExtractor, __webpack_exports__MoonshineForConditionalGeneration as MoonshineForConditionalGeneration, __webpack_exports__MoonshineModel as MoonshineModel, __webpack_exports__MoonshinePreTrainedModel as MoonshinePreTrainedModel, __webpack_exports__MoonshineProcessor as MoonshineProcessor, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MultiModalityCausalLM as MultiModalityCausalLM, __webpack_exports__MultiModalityPreTrainedModel as MultiModalityPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NoBadWordsLogitsProcessor as NoBadWordsLogitsProcessor, __webpack_exports__NoRepeatNGramLogitsProcessor as NoRepeatNGramLogitsProcessor, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__Olmo2ForCausalLM as Olmo2ForCausalLM, __webpack_exports__Olmo2Model as Olmo2Model, __webpack_exports__Olmo2PreTrainedModel as Olmo2PreTrainedModel, __webpack_exports__OlmoForCausalLM as OlmoForCausalLM, __webpack_exports__OlmoModel as OlmoModel, __webpack_exports__OlmoPreTrainedModel as OlmoPreTrainedModel, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTImageProcessor as OwlViTImageProcessor, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__PaliGemmaForConditionalGeneration as PaliGemmaForConditionalGeneration, __webpack_exports__PaliGemmaPreTrainedModel as PaliGemmaPreTrainedModel, __webpack_exports__PaliGemmaProcessor as PaliGemmaProcessor, __webpack_exports__PatchTSMixerForPrediction as PatchTSMixerForPrediction, __webpack_exports__PatchTSMixerModel as PatchTSMixerModel, __webpack_exports__PatchTSMixerPreTrainedModel as PatchTSMixerPreTrainedModel, __webpack_exports__PatchTSTForPrediction as PatchTSTForPrediction, __webpack_exports__PatchTSTModel as PatchTSTModel, __webpack_exports__PatchTSTPreTrainedModel as PatchTSTPreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__Phi3VForCausalLM as Phi3VForCausalLM, __webpack_exports__Phi3VImageProcessor as Phi3VImageProcessor, __webpack_exports__Phi3VPreTrainedModel as Phi3VPreTrainedModel, __webpack_exports__Phi3VProcessor as Phi3VProcessor, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGeneration, __webpack_exports__Qwen2VLImageProcessor as Qwen2VLImageProcessor, __webpack_exports__Qwen2VLPreTrainedModel as Qwen2VLPreTrainedModel, __webpack_exports__Qwen2VLProcessor as Qwen2VLProcessor, __webpack_exports__RFDetrForObjectDetection as RFDetrForObjectDetection, __webpack_exports__RFDetrModel as RFDetrModel, __webpack_exports__RFDetrObjectDetectionOutput as RFDetrObjectDetectionOutput, __webpack_exports__RFDetrPreTrainedModel as RFDetrPreTrainedModel, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RTDetrV2ForObjectDetection as RTDetrV2ForObjectDetection, __webpack_exports__RTDetrV2Model as RTDetrV2Model, __webpack_exports__RTDetrV2ObjectDetectionOutput as RTDetrV2ObjectDetectionOutput, __webpack_exports__RTDetrV2PreTrainedModel as RTDetrV2PreTrainedModel, __webpack_exports__RawAudio as RawAudio, __webpack_exports__RawImage as RawImage, __webpack_exports__RawVideo as RawVideo, __webpack_exports__RawVideoFrame as RawVideoFrame, __webpack_exports__RepetitionPenaltyLogitsProcessor as RepetitionPenaltyLogitsProcessor, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerImageProcessor as SegformerImageProcessor, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SmolVLMForConditionalGeneration as SmolVLMForConditionalGeneration, __webpack_exports__SmolVLMImageProcessor as SmolVLMImageProcessor, __webpack_exports__SmolVLMProcessor as SmolVLMProcessor, __webpack_exports__SnacDecoderModel as SnacDecoderModel, __webpack_exports__SnacEncoderModel as SnacEncoderModel, __webpack_exports__SnacFeatureExtractor as SnacFeatureExtractor, __webpack_exports__SnacModel as SnacModel, __webpack_exports__SnacPreTrainedModel as SnacPreTrainedModel, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__StyleTextToSpeech2Model as StyleTextToSpeech2Model, __webpack_exports__StyleTextToSpeech2PreTrainedModel as StyleTextToSpeech2PreTrainedModel, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__SuppressTokensAtBeginLogitsProcessor as SuppressTokensAtBeginLogitsProcessor, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinForSemanticSegmentation as SwinForSemanticSegmentation, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__TemperatureLogitsWarper as TemperatureLogitsWarper, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TopKLogitsWarper as TopKLogitsWarper, __webpack_exports__TopPLogitsWarper as TopPLogitsWarper, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UltravoxModel as UltravoxModel, __webpack_exports__UltravoxPreTrainedModel as UltravoxPreTrainedModel, __webpack_exports__UltravoxProcessor as UltravoxProcessor, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__VLChatProcessor as VLChatProcessor, __webpack_exports__VLMImageProcessor as VLMImageProcessor, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitPoseForPoseEstimation as VitPoseForPoseEstimation, __webpack_exports__VitPoseImageProcessor as VitPoseImageProcessor, __webpack_exports__VitPosePreTrainedModel as VitPosePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2Processor as Wav2Vec2Processor, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTimeStampLogitsProcessor as WhisperTimeStampLogitsProcessor, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosImageProcessor as YolosImageProcessor, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__load_image as load_image, __webpack_exports__load_video as load_video, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__rand as rand, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__slice as slice, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
35712
36039
 
35713
36040
  //# sourceMappingURL=transformers.web.js.map