@huggingface/transformers 3.4.0 → 3.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +6 -2
  2. package/dist/transformers.js +315 -152
  3. package/dist/transformers.js.map +1 -1
  4. package/dist/transformers.min.js +1 -1
  5. package/dist/transformers.min.js.map +1 -1
  6. package/dist/transformers.node.cjs +303 -151
  7. package/dist/transformers.node.cjs.map +1 -1
  8. package/dist/transformers.node.min.cjs +1 -1
  9. package/dist/transformers.node.min.cjs.map +1 -1
  10. package/dist/transformers.node.min.mjs +1 -1
  11. package/dist/transformers.node.min.mjs.map +1 -1
  12. package/dist/transformers.node.mjs +315 -152
  13. package/dist/transformers.node.mjs.map +1 -1
  14. package/dist/transformers.web.js +315 -152
  15. package/dist/transformers.web.js.map +1 -1
  16. package/dist/transformers.web.min.js +1 -1
  17. package/dist/transformers.web.min.js.map +1 -1
  18. package/package.json +1 -1
  19. package/src/configs.js +2 -0
  20. package/src/env.js +1 -1
  21. package/src/models/feature_extractors.js +1 -0
  22. package/src/models/snac/feature_extraction_snac.js +3 -0
  23. package/src/models.js +90 -2
  24. package/src/pipelines.js +140 -135
  25. package/src/utils/image.js +9 -1
  26. package/src/utils/tensor.js +6 -2
  27. package/types/configs.d.ts.map +1 -1
  28. package/types/models/feature_extractors.d.ts +1 -0
  29. package/types/models/snac/feature_extraction_snac.d.ts +4 -0
  30. package/types/models/snac/feature_extraction_snac.d.ts.map +1 -0
  31. package/types/models.d.ts +48 -0
  32. package/types/models.d.ts.map +1 -1
  33. package/types/pipelines.d.ts +2 -2
  34. package/types/pipelines.d.ts.map +1 -1
  35. package/types/tsconfig.tsbuildinfo +1 -1
  36. package/types/utils/image.d.ts +2 -2
  37. package/types/utils/image.d.ts.map +1 -1
  38. package/types/utils/tensor.d.ts.map +1 -1
@@ -3692,6 +3692,7 @@ function getNormalizedConfig(config) {
3692
3692
  // Sub-configs
3693
3693
  case 'llava':
3694
3694
  case 'paligemma':
3695
+ case 'gemma3':
3695
3696
  case 'florence2':
3696
3697
  case 'llava_onevision':
3697
3698
  case 'idefics3':
@@ -3751,6 +3752,7 @@ function getNormalizedConfig(config) {
3751
3752
  break;
3752
3753
  case 'gemma':
3753
3754
  case 'gemma2':
3755
+ case 'gemma3_text':
3754
3756
  case 'glm':
3755
3757
  case 'helium':
3756
3758
  mapping['num_heads'] = 'num_key_value_heads';
@@ -4081,7 +4083,7 @@ __webpack_require__.r(__webpack_exports__);
4081
4083
 
4082
4084
 
4083
4085
 
4084
- const VERSION = '3.4.0';
4086
+ const VERSION = '3.4.1';
4085
4087
 
4086
4088
  // Check if various APIs are available (depends on environment)
4087
4089
  const IS_BROWSER_ENV = typeof window !== "undefined" && typeof window.document !== "undefined";
@@ -6219,6 +6221,9 @@ __webpack_require__.r(__webpack_exports__);
6219
6221
  /* harmony export */ Gemma2ForCausalLM: () => (/* binding */ Gemma2ForCausalLM),
6220
6222
  /* harmony export */ Gemma2Model: () => (/* binding */ Gemma2Model),
6221
6223
  /* harmony export */ Gemma2PreTrainedModel: () => (/* binding */ Gemma2PreTrainedModel),
6224
+ /* harmony export */ Gemma3ForCausalLM: () => (/* binding */ Gemma3ForCausalLM),
6225
+ /* harmony export */ Gemma3Model: () => (/* binding */ Gemma3Model),
6226
+ /* harmony export */ Gemma3PreTrainedModel: () => (/* binding */ Gemma3PreTrainedModel),
6222
6227
  /* harmony export */ GemmaForCausalLM: () => (/* binding */ GemmaForCausalLM),
6223
6228
  /* harmony export */ GemmaModel: () => (/* binding */ GemmaModel),
6224
6229
  /* harmony export */ GemmaPreTrainedModel: () => (/* binding */ GemmaPreTrainedModel),
@@ -6289,6 +6294,10 @@ __webpack_require__.r(__webpack_exports__);
6289
6294
  /* harmony export */ MaskFormerModel: () => (/* binding */ MaskFormerModel),
6290
6295
  /* harmony export */ MaskFormerPreTrainedModel: () => (/* binding */ MaskFormerPreTrainedModel),
6291
6296
  /* harmony export */ MaskedLMOutput: () => (/* binding */ MaskedLMOutput),
6297
+ /* harmony export */ Metric3DForDepthEstimation: () => (/* binding */ Metric3DForDepthEstimation),
6298
+ /* harmony export */ Metric3DPreTrainedModel: () => (/* binding */ Metric3DPreTrainedModel),
6299
+ /* harmony export */ Metric3Dv2ForDepthEstimation: () => (/* binding */ Metric3Dv2ForDepthEstimation),
6300
+ /* harmony export */ Metric3Dv2PreTrainedModel: () => (/* binding */ Metric3Dv2PreTrainedModel),
6292
6301
  /* harmony export */ MgpstrForSceneTextRecognition: () => (/* binding */ MgpstrForSceneTextRecognition),
6293
6302
  /* harmony export */ MgpstrModelOutput: () => (/* binding */ MgpstrModelOutput),
6294
6303
  /* harmony export */ MgpstrPreTrainedModel: () => (/* binding */ MgpstrPreTrainedModel),
@@ -6437,6 +6446,10 @@ __webpack_require__.r(__webpack_exports__);
6437
6446
  /* harmony export */ SiglipTextModel: () => (/* binding */ SiglipTextModel),
6438
6447
  /* harmony export */ SiglipVisionModel: () => (/* binding */ SiglipVisionModel),
6439
6448
  /* harmony export */ SmolVLMForConditionalGeneration: () => (/* binding */ SmolVLMForConditionalGeneration),
6449
+ /* harmony export */ SnacDecoderModel: () => (/* binding */ SnacDecoderModel),
6450
+ /* harmony export */ SnacEncoderModel: () => (/* binding */ SnacEncoderModel),
6451
+ /* harmony export */ SnacModel: () => (/* binding */ SnacModel),
6452
+ /* harmony export */ SnacPreTrainedModel: () => (/* binding */ SnacPreTrainedModel),
6440
6453
  /* harmony export */ SpeechT5ForSpeechToText: () => (/* binding */ SpeechT5ForSpeechToText),
6441
6454
  /* harmony export */ SpeechT5ForTextToSpeech: () => (/* binding */ SpeechT5ForTextToSpeech),
6442
6455
  /* harmony export */ SpeechT5HifiGan: () => (/* binding */ SpeechT5HifiGan),
@@ -7095,8 +7108,8 @@ async function decoderForward(self, model_inputs, is_encoder_decoder = false) {
7095
7108
  new_model_inputs.use_cache_branch = boolTensor(!!past_key_values);
7096
7109
  }
7097
7110
  if (session.inputNames.includes('position_ids') && new_model_inputs.attention_mask && !new_model_inputs.position_ids) {
7098
- // NOTE: Handle a special case for paligemma models, where positions are 1-indexed
7099
- const start_index = self.config.model_type === 'paligemma' ? 1 : 0;
7111
+ // NOTE: Handle a special case for paligemma/gemma3 models, where positions are 1-indexed
7112
+ const start_index = ['paligemma', 'gemma3_text', 'gemma3'].includes(self.config.model_type) ? 1 : 0;
7100
7113
  new_model_inputs.position_ids = createPositionIds(new_model_inputs, past_key_values, start_index);
7101
7114
  }
7102
7115
 
@@ -11021,6 +11034,23 @@ class Gemma2Model extends Gemma2PreTrainedModel { }
11021
11034
  class Gemma2ForCausalLM extends Gemma2PreTrainedModel { }
11022
11035
  //////////////////////////////////////////////////
11023
11036
 
11037
+
11038
+ //////////////////////////////////////////////////
11039
+ // Gemma3 models
11040
+
11041
+ /**
11042
+ * The bare Gemma3 Model outputting raw hidden-states without any specific head on top.
11043
+ */
11044
+ class Gemma3PreTrainedModel extends PreTrainedModel { }
11045
+ /**
11046
+ * The bare Gemma3 Model outputting raw hidden-states without any specific head on top.
11047
+ */
11048
+ class Gemma3Model extends Gemma3PreTrainedModel { }
11049
+
11050
+ class Gemma3ForCausalLM extends Gemma3PreTrainedModel { }
11051
+ //////////////////////////////////////////////////
11052
+
11053
+
11024
11054
  //////////////////////////////////////////////////
11025
11055
  class OpenELMPreTrainedModel extends PreTrainedModel { }
11026
11056
  class OpenELMModel extends OpenELMPreTrainedModel { }
@@ -11873,6 +11903,16 @@ class DepthProPreTrainedModel extends PreTrainedModel { }
11873
11903
  class DepthProForDepthEstimation extends DepthProPreTrainedModel { }
11874
11904
  //////////////////////////////////////////////////
11875
11905
 
11906
+ //////////////////////////////////////////////////
11907
+ class Metric3DPreTrainedModel extends PreTrainedModel { }
11908
+ class Metric3DForDepthEstimation extends Metric3DPreTrainedModel { }
11909
+ //////////////////////////////////////////////////
11910
+
11911
+ //////////////////////////////////////////////////
11912
+ class Metric3Dv2PreTrainedModel extends PreTrainedModel { }
11913
+ class Metric3Dv2ForDepthEstimation extends Metric3Dv2PreTrainedModel { }
11914
+ //////////////////////////////////////////////////
11915
+
11876
11916
  //////////////////////////////////////////////////
11877
11917
  class MaskFormerPreTrainedModel extends PreTrainedModel { }
11878
11918
  class MaskFormerModel extends MaskFormerPreTrainedModel { }
@@ -13788,6 +13828,60 @@ class DacDecoderModel extends DacPreTrainedModel {
13788
13828
  }
13789
13829
  //////////////////////////////////////////////////
13790
13830
 
13831
+
13832
+ //////////////////////////////////////////////////
13833
+ // Snac models
13834
+ class SnacPreTrainedModel extends PreTrainedModel {
13835
+ main_input_name = 'input_values';
13836
+ forward_params = ['input_values'];
13837
+ }
13838
+
13839
+ /**
13840
+ * The SNAC (Multi-Scale Neural Audio Codec) model.
13841
+ */
13842
+ class SnacModel extends SnacPreTrainedModel {
13843
+ /**
13844
+ * Encodes the input audio waveform into discrete codes.
13845
+ * @param {Object} inputs Model inputs
13846
+ * @param {Tensor} [inputs.input_values] Float values of the input audio waveform, of shape `(batch_size, channels, sequence_length)`).
13847
+ * @returns {Promise<Record<string, Tensor>>} The output tensors of shape `(batch_size, num_codebooks, sequence_length)`.
13848
+ */
13849
+ async encode(inputs) {
13850
+ return await sessionRun(this.sessions['encoder_model'], inputs);
13851
+ }
13852
+
13853
+ /**
13854
+ * Decodes the given frames into an output audio waveform.
13855
+ * @param {Record<string, Tensor>} inputs The encoded audio codes.
13856
+ * @returns {Promise<{audio_values: Tensor}>} The output tensor of shape `(batch_size, num_channels, sequence_length)`.
13857
+ */
13858
+ async decode(inputs) {
13859
+ return await sessionRun(this.sessions['decoder_model'], inputs);
13860
+ }
13861
+ }
13862
+
13863
+ class SnacEncoderModel extends SnacPreTrainedModel {
13864
+ /** @type {typeof PreTrainedModel.from_pretrained} */
13865
+ static async from_pretrained(pretrained_model_name_or_path, options = {}) {
13866
+ return super.from_pretrained(pretrained_model_name_or_path, {
13867
+ ...options,
13868
+ // Update default model file name if not provided
13869
+ model_file_name: options.model_file_name ?? 'encoder_model',
13870
+ });
13871
+ }
13872
+ }
13873
+ class SnacDecoderModel extends SnacPreTrainedModel {
13874
+ /** @type {typeof PreTrainedModel.from_pretrained} */
13875
+ static async from_pretrained(pretrained_model_name_or_path, options = {}) {
13876
+ return super.from_pretrained(pretrained_model_name_or_path, {
13877
+ ...options,
13878
+ // Update default model file name if not provided
13879
+ model_file_name: options.model_file_name ?? 'decoder_model',
13880
+ });
13881
+ }
13882
+ }
13883
+ //////////////////////////////////////////////////
13884
+
13791
13885
  //////////////////////////////////////////////////
13792
13886
  // AutoModels, used to simplify construction of PreTrainedModels
13793
13887
  // (uses config to instantiate correct class)
@@ -13969,6 +14063,7 @@ const MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([
13969
14063
  const MODEL_MAPPING_NAMES_AUTO_ENCODER = new Map([
13970
14064
  ['mimi', ['MimiModel', MimiModel]],
13971
14065
  ['dac', ['DacModel', DacModel]],
14066
+ ['snac', ['SnacModel', SnacModel]],
13972
14067
  ]);
13973
14068
 
13974
14069
  const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([
@@ -13989,6 +14084,7 @@ const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([
13989
14084
  ['cohere', ['CohereModel', CohereModel]],
13990
14085
  ['gemma', ['GemmaModel', GemmaModel]],
13991
14086
  ['gemma2', ['Gemma2Model', Gemma2Model]],
14087
+ ['gemma3_text', ['Gemma3Model', Gemma3Model]],
13992
14088
  ['helium', ['HeliumModel', HeliumModel]],
13993
14089
  ['glm', ['GlmModel', GlmModel]],
13994
14090
  ['openelm', ['OpenELMModel', OpenELMModel]],
@@ -14088,6 +14184,7 @@ const MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = new Map([
14088
14184
  ['cohere', ['CohereForCausalLM', CohereForCausalLM]],
14089
14185
  ['gemma', ['GemmaForCausalLM', GemmaForCausalLM]],
14090
14186
  ['gemma2', ['Gemma2ForCausalLM', Gemma2ForCausalLM]],
14187
+ ['gemma3_text', ['Gemma3ForCausalLM', Gemma3ForCausalLM]],
14091
14188
  ['helium', ['HeliumForCausalLM', HeliumForCausalLM]],
14092
14189
  ['glm', ['GlmForCausalLM', GlmForCausalLM]],
14093
14190
  ['openelm', ['OpenELMForCausalLM', OpenELMForCausalLM]],
@@ -14289,6 +14386,8 @@ const MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = new Map([
14289
14386
  ['glpn', ['GLPNForDepthEstimation', GLPNForDepthEstimation]],
14290
14387
  ['sapiens', ['SapiensForDepthEstimation', SapiensForDepthEstimation]],
14291
14388
  ['depth_pro', ['DepthProForDepthEstimation', DepthProForDepthEstimation]],
14389
+ ['metric3d', ['Metric3DForDepthEstimation', Metric3DForDepthEstimation]],
14390
+ ['metric3dv2', ['Metric3Dv2ForDepthEstimation', Metric3Dv2ForDepthEstimation]],
14292
14391
  ])
14293
14392
 
14294
14393
  const MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES = new Map([
@@ -14374,6 +14473,8 @@ const CUSTOM_MAPPING = [
14374
14473
  ['DacDecoderModel', DacDecoderModel, MODEL_TYPES.EncoderOnly],
14375
14474
  ['MimiEncoderModel', MimiEncoderModel, MODEL_TYPES.EncoderOnly],
14376
14475
  ['MimiDecoderModel', MimiDecoderModel, MODEL_TYPES.EncoderOnly],
14476
+ ['SnacEncoderModel', SnacEncoderModel, MODEL_TYPES.EncoderOnly],
14477
+ ['SnacDecoderModel', SnacDecoderModel, MODEL_TYPES.EncoderOnly],
14377
14478
  ]
14378
14479
  for (const [name, model, type] of CUSTOM_MAPPING) {
14379
14480
  MODEL_TYPE_MAPPING.set(name, type);
@@ -15672,14 +15773,15 @@ __webpack_require__.r(__webpack_exports__);
15672
15773
  /* harmony export */ ClapFeatureExtractor: () => (/* reexport safe */ _clap_feature_extraction_clap_js__WEBPACK_IMPORTED_MODULE_2__.ClapFeatureExtractor),
15673
15774
  /* harmony export */ DacFeatureExtractor: () => (/* reexport safe */ _dac_feature_extraction_dac_js__WEBPACK_IMPORTED_MODULE_3__.DacFeatureExtractor),
15674
15775
  /* harmony export */ EncodecFeatureExtractor: () => (/* reexport safe */ _encodec_feature_extraction_encodec_js__WEBPACK_IMPORTED_MODULE_1__.EncodecFeatureExtractor),
15675
- /* harmony export */ ImageFeatureExtractor: () => (/* reexport safe */ _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_11__.ImageProcessor),
15776
+ /* harmony export */ ImageFeatureExtractor: () => (/* reexport safe */ _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_12__.ImageProcessor),
15676
15777
  /* harmony export */ MoonshineFeatureExtractor: () => (/* reexport safe */ _moonshine_feature_extraction_moonshine_js__WEBPACK_IMPORTED_MODULE_4__.MoonshineFeatureExtractor),
15677
15778
  /* harmony export */ PyAnnoteFeatureExtractor: () => (/* reexport safe */ _pyannote_feature_extraction_pyannote_js__WEBPACK_IMPORTED_MODULE_5__.PyAnnoteFeatureExtractor),
15678
15779
  /* harmony export */ SeamlessM4TFeatureExtractor: () => (/* reexport safe */ _seamless_m4t_feature_extraction_seamless_m4t_js__WEBPACK_IMPORTED_MODULE_6__.SeamlessM4TFeatureExtractor),
15679
- /* harmony export */ SpeechT5FeatureExtractor: () => (/* reexport safe */ _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_7__.SpeechT5FeatureExtractor),
15680
- /* harmony export */ Wav2Vec2FeatureExtractor: () => (/* reexport safe */ _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_8__.Wav2Vec2FeatureExtractor),
15681
- /* harmony export */ WeSpeakerFeatureExtractor: () => (/* reexport safe */ _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_9__.WeSpeakerFeatureExtractor),
15682
- /* harmony export */ WhisperFeatureExtractor: () => (/* reexport safe */ _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_10__.WhisperFeatureExtractor)
15780
+ /* harmony export */ SnacFeatureExtractor: () => (/* reexport safe */ _snac_feature_extraction_snac_js__WEBPACK_IMPORTED_MODULE_7__.SnacFeatureExtractor),
15781
+ /* harmony export */ SpeechT5FeatureExtractor: () => (/* reexport safe */ _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_8__.SpeechT5FeatureExtractor),
15782
+ /* harmony export */ Wav2Vec2FeatureExtractor: () => (/* reexport safe */ _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_9__.Wav2Vec2FeatureExtractor),
15783
+ /* harmony export */ WeSpeakerFeatureExtractor: () => (/* reexport safe */ _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_10__.WeSpeakerFeatureExtractor),
15784
+ /* harmony export */ WhisperFeatureExtractor: () => (/* reexport safe */ _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_11__.WhisperFeatureExtractor)
15683
15785
  /* harmony export */ });
15684
15786
  /* harmony import */ var _audio_spectrogram_transformer_feature_extraction_audio_spectrogram_transformer_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js */ "./src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js");
15685
15787
  /* harmony import */ var _encodec_feature_extraction_encodec_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./encodec/feature_extraction_encodec.js */ "./src/models/encodec/feature_extraction_encodec.js");
@@ -15688,11 +15790,13 @@ __webpack_require__.r(__webpack_exports__);
15688
15790
  /* harmony import */ var _moonshine_feature_extraction_moonshine_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./moonshine/feature_extraction_moonshine.js */ "./src/models/moonshine/feature_extraction_moonshine.js");
15689
15791
  /* harmony import */ var _pyannote_feature_extraction_pyannote_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./pyannote/feature_extraction_pyannote.js */ "./src/models/pyannote/feature_extraction_pyannote.js");
15690
15792
  /* harmony import */ var _seamless_m4t_feature_extraction_seamless_m4t_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./seamless_m4t/feature_extraction_seamless_m4t.js */ "./src/models/seamless_m4t/feature_extraction_seamless_m4t.js");
15691
- /* harmony import */ var _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./speecht5/feature_extraction_speecht5.js */ "./src/models/speecht5/feature_extraction_speecht5.js");
15692
- /* harmony import */ var _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./wav2vec2/feature_extraction_wav2vec2.js */ "./src/models/wav2vec2/feature_extraction_wav2vec2.js");
15693
- /* harmony import */ var _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./wespeaker/feature_extraction_wespeaker.js */ "./src/models/wespeaker/feature_extraction_wespeaker.js");
15694
- /* harmony import */ var _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./whisper/feature_extraction_whisper.js */ "./src/models/whisper/feature_extraction_whisper.js");
15695
- /* harmony import */ var _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ../base/image_processors_utils.js */ "./src/base/image_processors_utils.js");
15793
+ /* harmony import */ var _snac_feature_extraction_snac_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./snac/feature_extraction_snac.js */ "./src/models/snac/feature_extraction_snac.js");
15794
+ /* harmony import */ var _speecht5_feature_extraction_speecht5_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./speecht5/feature_extraction_speecht5.js */ "./src/models/speecht5/feature_extraction_speecht5.js");
15795
+ /* harmony import */ var _wav2vec2_feature_extraction_wav2vec2_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./wav2vec2/feature_extraction_wav2vec2.js */ "./src/models/wav2vec2/feature_extraction_wav2vec2.js");
15796
+ /* harmony import */ var _wespeaker_feature_extraction_wespeaker_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./wespeaker/feature_extraction_wespeaker.js */ "./src/models/wespeaker/feature_extraction_wespeaker.js");
15797
+ /* harmony import */ var _whisper_feature_extraction_whisper_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ./whisper/feature_extraction_whisper.js */ "./src/models/whisper/feature_extraction_whisper.js");
15798
+ /* harmony import */ var _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ../base/image_processors_utils.js */ "./src/base/image_processors_utils.js");
15799
+
15696
15800
 
15697
15801
 
15698
15802
 
@@ -18682,6 +18786,24 @@ __webpack_require__.r(__webpack_exports__);
18682
18786
 
18683
18787
 
18684
18788
 
18789
+ /***/ }),
18790
+
18791
+ /***/ "./src/models/snac/feature_extraction_snac.js":
18792
+ /*!****************************************************!*\
18793
+ !*** ./src/models/snac/feature_extraction_snac.js ***!
18794
+ \****************************************************/
18795
+ /***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => {
18796
+
18797
+ __webpack_require__.r(__webpack_exports__);
18798
+ /* harmony export */ __webpack_require__.d(__webpack_exports__, {
18799
+ /* harmony export */ SnacFeatureExtractor: () => (/* binding */ SnacFeatureExtractor)
18800
+ /* harmony export */ });
18801
+ /* harmony import */ var _dac_feature_extraction_dac_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../dac/feature_extraction_dac.js */ "./src/models/dac/feature_extraction_dac.js");
18802
+
18803
+
18804
+ class SnacFeatureExtractor extends _dac_feature_extraction_dac_js__WEBPACK_IMPORTED_MODULE_0__.DacFeatureExtractor { }
18805
+
18806
+
18685
18807
  /***/ }),
18686
18808
 
18687
18809
  /***/ "./src/models/speecht5/feature_extraction_speecht5.js":
@@ -19921,16 +20043,16 @@ __webpack_require__.r(__webpack_exports__);
19921
20043
  /* harmony import */ var _utils_image_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./utils/image.js */ "./src/utils/image.js");
19922
20044
  /**
19923
20045
  * @file Pipelines provide a high-level, easy to use, API for running machine learning models.
19924
- *
20046
+ *
19925
20047
  * **Example:** Instantiate pipeline using the `pipeline` function.
19926
20048
  * ```javascript
19927
20049
  * import { pipeline } from '@huggingface/transformers';
19928
- *
20050
+ *
19929
20051
  * const classifier = await pipeline('sentiment-analysis');
19930
20052
  * const output = await classifier('I love transformers!');
19931
20053
  * // [{'label': 'POSITIVE', 'score': 0.999817686}]
19932
20054
  * ```
19933
- *
20055
+ *
19934
20056
  * @module pipelines
19935
20057
  */
19936
20058
 
@@ -19949,7 +20071,7 @@ __webpack_require__.r(__webpack_exports__);
19949
20071
 
19950
20072
 
19951
20073
  /**
19952
- * @typedef {string | RawImage | URL} ImageInput
20074
+ * @typedef {string | RawImage | URL | Blob | HTMLCanvasElement | OffscreenCanvas} ImageInput
19953
20075
  * @typedef {ImageInput|ImageInput[]} ImagePipelineInputs
19954
20076
  */
19955
20077
 
@@ -20023,7 +20145,7 @@ function get_bounding_box(box, asInteger) {
20023
20145
  /**
20024
20146
  * @callback DisposeType Disposes the item.
20025
20147
  * @returns {Promise<void>} A promise that resolves when the item has been disposed.
20026
- *
20148
+ *
20027
20149
  * @typedef {Object} Disposable
20028
20150
  * @property {DisposeType} dispose A promise that resolves when the pipeline has been disposed.
20029
20151
  */
@@ -20060,7 +20182,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20060
20182
  * @property {string} task The task of the pipeline. Useful for specifying subtasks.
20061
20183
  * @property {PreTrainedModel} model The model used by the pipeline.
20062
20184
  * @property {PreTrainedTokenizer} tokenizer The tokenizer used by the pipeline.
20063
- *
20185
+ *
20064
20186
  * @typedef {ModelTokenizerConstructorArgs} TextPipelineConstructorArgs An object used to instantiate a text-based pipeline.
20065
20187
  */
20066
20188
 
@@ -20069,7 +20191,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20069
20191
  * @property {string} task The task of the pipeline. Useful for specifying subtasks.
20070
20192
  * @property {PreTrainedModel} model The model used by the pipeline.
20071
20193
  * @property {Processor} processor The processor used by the pipeline.
20072
- *
20194
+ *
20073
20195
  * @typedef {ModelProcessorConstructorArgs} AudioPipelineConstructorArgs An object used to instantiate an audio-based pipeline.
20074
20196
  * @typedef {ModelProcessorConstructorArgs} ImagePipelineConstructorArgs An object used to instantiate an image-based pipeline.
20075
20197
  */
@@ -20081,7 +20203,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20081
20203
  * @property {PreTrainedModel} model The model used by the pipeline.
20082
20204
  * @property {PreTrainedTokenizer} tokenizer The tokenizer used by the pipeline.
20083
20205
  * @property {Processor} processor The processor used by the pipeline.
20084
- *
20206
+ *
20085
20207
  * @typedef {ModelTokenizerProcessorConstructorArgs} TextAudioPipelineConstructorArgs An object used to instantiate a text- and audio-based pipeline.
20086
20208
  * @typedef {ModelTokenizerProcessorConstructorArgs} TextImagePipelineConstructorArgs An object used to instantiate a text- and image-based pipeline.
20087
20209
  */
@@ -20091,15 +20213,15 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20091
20213
  * @property {string} label The label predicted.
20092
20214
  * @property {number} score The corresponding probability.
20093
20215
  * @typedef {TextClassificationSingle[]} TextClassificationOutput
20094
- *
20216
+ *
20095
20217
  * @typedef {Object} TextClassificationPipelineOptions Parameters specific to text classification pipelines.
20096
20218
  * @property {number} [top_k=1] The number of top predictions to be returned.
20097
- *
20219
+ *
20098
20220
  * @callback TextClassificationPipelineCallback Classify the text(s) given as inputs.
20099
20221
  * @param {string|string[]} texts The input text(s) to be classified.
20100
20222
  * @param {TextClassificationPipelineOptions} [options] The options to use for text classification.
20101
20223
  * @returns {Promise<TextClassificationOutput|TextClassificationOutput[]>} An array or object containing the predicted labels and scores.
20102
- *
20224
+ *
20103
20225
  * @typedef {TextPipelineConstructorArgs & TextClassificationPipelineCallback & Disposable} TextClassificationPipelineType
20104
20226
  */
20105
20227
 
@@ -20112,7 +20234,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20112
20234
  * const output = await classifier('I love transformers!');
20113
20235
  * // [{ label: 'POSITIVE', score: 0.999788761138916 }]
20114
20236
  * ```
20115
- *
20237
+ *
20116
20238
  * **Example:** Multilingual sentiment-analysis w/ `Xenova/bert-base-multilingual-uncased-sentiment` (and return top 5 classes).
20117
20239
  * ```javascript
20118
20240
  * const classifier = await pipeline('sentiment-analysis', 'Xenova/bert-base-multilingual-uncased-sentiment');
@@ -20125,7 +20247,7 @@ class Pipeline extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_4__.Callable {
20125
20247
  * // { label: '2 stars', score: 0.0009423971059732139 }
20126
20248
  * // ]
20127
20249
  * ```
20128
- *
20250
+ *
20129
20251
  * **Example:** Toxic comment classification w/ `Xenova/toxic-bert` (and return all classes).
20130
20252
  * ```javascript
20131
20253
  * const classifier = await pipeline('text-classification', 'Xenova/toxic-bert');
@@ -20210,21 +20332,21 @@ class TextClassificationPipeline extends (/** @type {new (options: TextPipelineC
20210
20332
  * @property {number} [start] The index of the start of the corresponding entity in the sentence.
20211
20333
  * @property {number} [end] The index of the end of the corresponding entity in the sentence.
20212
20334
  * @typedef {TokenClassificationSingle[]} TokenClassificationOutput
20213
- *
20335
+ *
20214
20336
  * @typedef {Object} TokenClassificationPipelineOptions Parameters specific to token classification pipelines.
20215
20337
  * @property {string[]} [ignore_labels] A list of labels to ignore.
20216
- *
20338
+ *
20217
20339
  * @callback TokenClassificationPipelineCallback Classify each token of the text(s) given as inputs.
20218
20340
  * @param {string|string[]} texts One or several texts (or one list of texts) for token classification.
20219
20341
  * @param {TokenClassificationPipelineOptions} [options] The options to use for token classification.
20220
20342
  * @returns {Promise<TokenClassificationOutput|TokenClassificationOutput[]>} The result.
20221
- *
20343
+ *
20222
20344
  * @typedef {TextPipelineConstructorArgs & TokenClassificationPipelineCallback & Disposable} TokenClassificationPipelineType
20223
20345
  */
20224
20346
 
20225
20347
  /**
20226
20348
  * Named Entity Recognition pipeline using any `ModelForTokenClassification`.
20227
- *
20349
+ *
20228
20350
  * **Example:** Perform named entity recognition with `Xenova/bert-base-NER`.
20229
20351
  * ```javascript
20230
20352
  * const classifier = await pipeline('token-classification', 'Xenova/bert-base-NER');
@@ -20234,7 +20356,7 @@ class TextClassificationPipeline extends (/** @type {new (options: TextPipelineC
20234
20356
  * // { entity: 'B-LOC', score: 0.9994474053382874, index: 9, word: 'London' }
20235
20357
  * // ]
20236
20358
  * ```
20237
- *
20359
+ *
20238
20360
  * **Example:** Perform named entity recognition with `Xenova/bert-base-NER` (and return all labels).
20239
20361
  * ```javascript
20240
20362
  * const classifier = await pipeline('token-classification', 'Xenova/bert-base-NER');
@@ -20330,22 +20452,22 @@ class TokenClassificationPipeline extends (/** @type {new (options: TextPipeline
20330
20452
  * @property {number} [start] The character start index of the answer (in the tokenized version of the input).
20331
20453
  * @property {number} [end] The character end index of the answer (in the tokenized version of the input).
20332
20454
  * @property {string} answer The answer to the question.
20333
- *
20455
+ *
20334
20456
  * @typedef {Object} QuestionAnsweringPipelineOptions Parameters specific to question answering pipelines.
20335
20457
  * @property {number} [top_k=1] The number of top answer predictions to be returned.
20336
- *
20458
+ *
20337
20459
  * @callback QuestionAnsweringPipelineCallback Answer the question(s) given as inputs by using the context(s).
20338
20460
  * @param {string|string[]} question One or several question(s) (must be used in conjunction with the `context` argument).
20339
20461
  * @param {string|string[]} context One or several context(s) associated with the question(s) (must be used in conjunction with the `question` argument).
20340
20462
  * @param {QuestionAnsweringPipelineOptions} [options] The options to use for question answering.
20341
20463
  * @returns {Promise<QuestionAnsweringOutput|QuestionAnsweringOutput[]>} An array or object containing the predicted answers and scores.
20342
- *
20464
+ *
20343
20465
  * @typedef {TextPipelineConstructorArgs & QuestionAnsweringPipelineCallback & Disposable} QuestionAnsweringPipelineType
20344
20466
  */
20345
20467
 
20346
20468
  /**
20347
20469
  * Question Answering pipeline using any `ModelForQuestionAnswering`.
20348
- *
20470
+ *
20349
20471
  * **Example:** Run question answering with `Xenova/distilbert-base-uncased-distilled-squad`.
20350
20472
  * ```javascript
20351
20473
  * const answerer = await pipeline('question-answering', 'Xenova/distilbert-base-uncased-distilled-squad');
@@ -20470,10 +20592,10 @@ class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineCo
20470
20592
  * @property {number} token The predicted token id (to replace the masked one).
20471
20593
  * @property {string} token_str The predicted token (to replace the masked one).
20472
20594
  * @typedef {FillMaskSingle[]} FillMaskOutput
20473
- *
20595
+ *
20474
20596
  * @typedef {Object} FillMaskPipelineOptions Parameters specific to fill mask pipelines.
20475
20597
  * @property {number} [top_k=5] When passed, overrides the number of predictions to return.
20476
- *
20598
+ *
20477
20599
  * @callback FillMaskPipelineCallback Fill the masked token in the text(s) given as inputs.
20478
20600
  * @param {string|string[]} texts One or several texts (or one list of prompts) with masked tokens.
20479
20601
  * @param {FillMaskPipelineOptions} [options] The options to use for masked language modelling.
@@ -20481,13 +20603,13 @@ class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineCo
20481
20603
  * and the sequence with the predicted token filled in, or an array of such arrays (one for each input text).
20482
20604
  * If only one input text is given, the output will be an array of objects.
20483
20605
  * @throws {Error} When the mask token is not found in the input text.
20484
- *
20606
+ *
20485
20607
  * @typedef {TextPipelineConstructorArgs & FillMaskPipelineCallback & Disposable} FillMaskPipelineType
20486
20608
  */
20487
20609
 
20488
20610
  /**
20489
20611
  * Masked language modeling prediction pipeline using any `ModelWithLMHead`.
20490
- *
20612
+ *
20491
20613
  * **Example:** Perform masked language modelling (a.k.a. "fill-mask") with `Xenova/bert-base-uncased`.
20492
20614
  * ```javascript
20493
20615
  * const unmasker = await pipeline('fill-mask', 'Xenova/bert-base-cased');
@@ -20500,7 +20622,7 @@ class QuestionAnsweringPipeline extends (/** @type {new (options: TextPipelineCo
20500
20622
  * // { token_str: 'life', score: 0.01859794743359089, token: 1297, sequence: 'The goal of life is life.' }
20501
20623
  * // ]
20502
20624
  * ```
20503
- *
20625
+ *
20504
20626
  * **Example:** Perform masked language modelling (a.k.a. "fill-mask") with `Xenova/bert-base-cased` (and return top result).
20505
20627
  * ```javascript
20506
20628
  * const unmasker = await pipeline('fill-mask', 'Xenova/bert-base-cased');
@@ -20577,18 +20699,18 @@ class FillMaskPipeline extends (/** @type {new (options: TextPipelineConstructor
20577
20699
  * @typedef {Object} Text2TextGenerationSingle
20578
20700
  * @property {string} generated_text The generated text.
20579
20701
  * @typedef {Text2TextGenerationSingle[]} Text2TextGenerationOutput
20580
- *
20702
+ *
20581
20703
  * @callback Text2TextGenerationPipelineCallback Generate the output text(s) using text(s) given as inputs.
20582
20704
  * @param {string|string[]} texts Input text for the encoder.
20583
20705
  * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
20584
20706
  * @returns {Promise<Text2TextGenerationOutput|Text2TextGenerationOutput[]>}
20585
- *
20707
+ *
20586
20708
  * @typedef {TextPipelineConstructorArgs & Text2TextGenerationPipelineCallback & Disposable} Text2TextGenerationPipelineType
20587
20709
  */
20588
20710
 
20589
20711
  /**
20590
20712
  * Text2TextGenerationPipeline class for generating text using a model that performs text-to-text generation tasks.
20591
- *
20713
+ *
20592
20714
  * **Example:** Text-to-text generation w/ `Xenova/LaMini-Flan-T5-783M`.
20593
20715
  * ```javascript
20594
20716
  * const generator = await pipeline('text2text-generation', 'Xenova/LaMini-Flan-T5-783M');
@@ -20664,18 +20786,18 @@ class Text2TextGenerationPipeline extends (/** @type {new (options: TextPipeline
20664
20786
  * @typedef {Object} SummarizationSingle
20665
20787
  * @property {string} summary_text The summary text.
20666
20788
  * @typedef {SummarizationSingle[]} SummarizationOutput
20667
- *
20789
+ *
20668
20790
  * @callback SummarizationPipelineCallback Summarize the text(s) given as inputs.
20669
20791
  * @param {string|string[]} texts One or several articles (or one list of articles) to summarize.
20670
20792
  * @param {import('./generation/configuration_utils.js').GenerationConfig} [options] Additional keyword arguments to pass along to the generate method of the model.
20671
20793
  * @returns {Promise<SummarizationOutput|SummarizationOutput[]>}
20672
- *
20794
+ *
20673
20795
  * @typedef {TextPipelineConstructorArgs & SummarizationPipelineCallback & Disposable} SummarizationPipelineType
20674
20796
  */
20675
20797
 
20676
20798
  /**
20677
20799
  * A pipeline for summarization tasks, inheriting from Text2TextGenerationPipeline.
20678
- *
20800
+ *
20679
20801
  * **Example:** Summarization w/ `Xenova/distilbart-cnn-6-6`.
20680
20802
  * ```javascript
20681
20803
  * const generator = await pipeline('summarization', 'Xenova/distilbart-cnn-6-6');
@@ -20711,23 +20833,23 @@ class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstr
20711
20833
  * @typedef {Object} TranslationSingle
20712
20834
  * @property {string} translation_text The translated text.
20713
20835
  * @typedef {TranslationSingle[]} TranslationOutput
20714
- *
20836
+ *
20715
20837
  * @callback TranslationPipelineCallback Translate the text(s) given as inputs.
20716
20838
  * @param {string|string[]} texts Texts to be translated.
20717
20839
  * @param {import('./generation/configuration_utils.js').GenerationConfig} [options] Additional keyword arguments to pass along to the generate method of the model.
20718
20840
  * @returns {Promise<TranslationOutput|TranslationOutput[]>}
20719
- *
20841
+ *
20720
20842
  * @typedef {TextPipelineConstructorArgs & TranslationPipelineCallback & Disposable} TranslationPipelineType
20721
20843
  */
20722
20844
 
20723
20845
  /**
20724
20846
  * Translates text from one language to another.
20725
- *
20847
+ *
20726
20848
  * **Example:** Multilingual translation w/ `Xenova/nllb-200-distilled-600M`.
20727
- *
20849
+ *
20728
20850
  * See [here](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200)
20729
20851
  * for the full list of languages and their corresponding codes.
20730
- *
20852
+ *
20731
20853
  * ```javascript
20732
20854
  * const translator = await pipeline('translation', 'Xenova/nllb-200-distilled-600M');
20733
20855
  * const output = await translator('जीवन एक चॉकलेट बॉक्स की तरह है।', {
@@ -20736,12 +20858,12 @@ class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstr
20736
20858
  * });
20737
20859
  * // [{ translation_text: 'La vie est comme une boîte à chocolat.' }]
20738
20860
  * ```
20739
- *
20861
+ *
20740
20862
  * **Example:** Multilingual translation w/ `Xenova/m2m100_418M`.
20741
- *
20863
+ *
20742
20864
  * See [here](https://huggingface.co/facebook/m2m100_418M#languages-covered)
20743
20865
  * for the full list of languages and their corresponding codes.
20744
- *
20866
+ *
20745
20867
  * ```javascript
20746
20868
  * const translator = await pipeline('translation', 'Xenova/m2m100_418M');
20747
20869
  * const output = await translator('生活就像一盒巧克力。', {
@@ -20750,12 +20872,12 @@ class SummarizationPipeline extends (/** @type {new (options: TextPipelineConstr
20750
20872
  * });
20751
20873
  * // [{ translation_text: 'Life is like a box of chocolate.' }]
20752
20874
  * ```
20753
- *
20875
+ *
20754
20876
  * **Example:** Multilingual translation w/ `Xenova/mbart-large-50-many-to-many-mmt`.
20755
- *
20877
+ *
20756
20878
  * See [here](https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt#languages-covered)
20757
20879
  * for the full list of languages and their corresponding codes.
20758
- *
20880
+ *
20759
20881
  * ```javascript
20760
20882
  * const translator = await pipeline('translation', 'Xenova/mbart-large-50-many-to-many-mmt');
20761
20883
  * const output = await translator('संयुक्त राष्ट्र के प्रमुख का कहना है कि सीरिया में कोई सैन्य समाधान नहीं है', {
@@ -20784,21 +20906,21 @@ function isChat(x) {
20784
20906
 
20785
20907
  /**
20786
20908
  * @typedef {import('./tokenizers.js').Message[]} Chat
20787
- *
20909
+ *
20788
20910
  * @typedef {Object} TextGenerationSingle
20789
20911
  * @property {string|Chat} generated_text The generated text.
20790
20912
  * @typedef {TextGenerationSingle[]} TextGenerationOutput
20791
- *
20913
+ *
20792
20914
  * @typedef {Object} TextGenerationSpecificParams Parameters specific to text-generation pipelines.
20793
20915
  * @property {boolean} [add_special_tokens] Whether or not to add special tokens when tokenizing the sequences.
20794
20916
  * @property {boolean} [return_full_text=true] If set to `false` only added text is returned, otherwise the full text is returned.
20795
20917
  * @typedef {import('./generation/configuration_utils.js').GenerationConfig & TextGenerationSpecificParams} TextGenerationConfig
20796
- *
20918
+ *
20797
20919
  * @callback TextGenerationPipelineCallback Complete the prompt(s) given as inputs.
20798
20920
  * @param {string|string[]|Chat|Chat[]} texts One or several prompts (or one list of prompts) to complete.
20799
20921
  * @param {Partial<TextGenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
20800
20922
  * @returns {Promise<TextGenerationOutput|TextGenerationOutput[]>} An array or object containing the generated texts.
20801
- *
20923
+ *
20802
20924
  * @typedef {TextPipelineConstructorArgs & TextGenerationPipelineCallback & Disposable} TextGenerationPipelineType
20803
20925
  */
20804
20926
 
@@ -20806,7 +20928,7 @@ function isChat(x) {
20806
20928
  * Language generation pipeline using any `ModelWithLMHead` or `ModelForCausalLM`.
20807
20929
  * This pipeline predicts the words that will follow a specified text prompt.
20808
20930
  * NOTE: For the full list of generation parameters, see [`GenerationConfig`](./utils/generation#module_utils/generation.GenerationConfig).
20809
- *
20931
+ *
20810
20932
  * **Example:** Text generation with `Xenova/distilgpt2` (default settings).
20811
20933
  * ```javascript
20812
20934
  * const generator = await pipeline('text-generation', 'Xenova/distilgpt2');
@@ -20814,7 +20936,7 @@ function isChat(x) {
20814
20936
  * const output = await generator(text);
20815
20937
  * // [{ generated_text: "I enjoy walking with my cute dog, and I love to play with the other dogs." }]
20816
20938
  * ```
20817
- *
20939
+ *
20818
20940
  * **Example:** Text generation with `Xenova/distilgpt2` (custom settings).
20819
20941
  * ```javascript
20820
20942
  * const generator = await pipeline('text-generation', 'Xenova/distilgpt2');
@@ -20833,7 +20955,7 @@ function isChat(x) {
20833
20955
  * // "generated_text": "Once upon a time, there was an abundance of information about the most important and influential"
20834
20956
  * // }]
20835
20957
  * ```
20836
- *
20958
+ *
20837
20959
  * **Example:** Run code generation with `Xenova/codegen-350M-mono`.
20838
20960
  * ```javascript
20839
20961
  * const generator = await pipeline('text-generation', 'Xenova/codegen-350M-mono');
@@ -20952,7 +21074,7 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20952
21074
  * @property {string} sequence The sequence for which this is the output.
20953
21075
  * @property {string[]} labels The labels sorted by order of likelihood.
20954
21076
  * @property {number[]} scores The probabilities for each of the labels.
20955
- *
21077
+ *
20956
21078
  * @typedef {Object} ZeroShotClassificationPipelineOptions Parameters specific to zero-shot classification pipelines.
20957
21079
  * @property {string} [hypothesis_template="This example is {}."] The template used to turn each
20958
21080
  * candidate label into an NLI-style hypothesis. The candidate label will replace the {} placeholder.
@@ -20960,14 +21082,14 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20960
21082
  * If `false`, the scores are normalized such that the sum of the label likelihoods for each sequence
20961
21083
  * is 1. If `true`, the labels are considered independent and probabilities are normalized for each
20962
21084
  * candidate by doing a softmax of the entailment score vs. the contradiction score.
20963
- *
21085
+ *
20964
21086
  * @callback ZeroShotClassificationPipelineCallback Classify the sequence(s) given as inputs.
20965
21087
  * @param {string|string[]} texts The sequence(s) to classify, will be truncated if the model input is too large.
20966
21088
  * @param {string|string[]} candidate_labels The set of possible class labels to classify each sequence into.
20967
21089
  * Can be a single label, a string of comma-separated labels, or a list of labels.
20968
21090
  * @param {ZeroShotClassificationPipelineOptions} [options] The options to use for zero-shot classification.
20969
21091
  * @returns {Promise<ZeroShotClassificationOutput|ZeroShotClassificationOutput[]>} An array or object containing the predicted labels and scores.
20970
- *
21092
+ *
20971
21093
  * @typedef {TextPipelineConstructorArgs & ZeroShotClassificationPipelineCallback & Disposable} ZeroShotClassificationPipelineType
20972
21094
  */
20973
21095
 
@@ -20976,7 +21098,7 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20976
21098
  * trained on NLI (natural language inference) tasks. Equivalent of `text-classification`
20977
21099
  * pipelines, but these models don't require a hardcoded number of potential classes, they
20978
21100
  * can be chosen at runtime. It usually means it's slower but it is **much** more flexible.
20979
- *
21101
+ *
20980
21102
  * **Example:** Zero shot classification with `Xenova/mobilebert-uncased-mnli`.
20981
21103
  * ```javascript
20982
21104
  * const classifier = await pipeline('zero-shot-classification', 'Xenova/mobilebert-uncased-mnli');
@@ -20989,7 +21111,7 @@ class TextGenerationPipeline extends (/** @type {new (options: TextPipelineConst
20989
21111
  * // scores: [ 0.5562091040482018, 0.1843621307860853, 0.13942646639336376, 0.12000229877234923 ]
20990
21112
  * // }
20991
21113
  * ```
20992
- *
21114
+ *
20993
21115
  * **Example:** Zero shot classification with `Xenova/nli-deberta-v3-xsmall` (multi-label).
20994
21116
  * ```javascript
20995
21117
  * const classifier = await pipeline('zero-shot-classification', 'Xenova/nli-deberta-v3-xsmall');
@@ -21103,20 +21225,20 @@ class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipel
21103
21225
  * @property {'none'|'mean'|'cls'} [pooling="none"] The pooling method to use.
21104
21226
  * @property {boolean} [normalize=false] Whether or not to normalize the embeddings in the last dimension.
21105
21227
  * @property {boolean} [quantize=false] Whether or not to quantize the embeddings.
21106
- * @property {'binary'|'ubinary'} [precision='binary'] The precision to use for quantization.
21107
- *
21228
+ * @property {'binary'|'ubinary'} [precision='binary'] The precision to use for quantization.
21229
+ *
21108
21230
  * @callback FeatureExtractionPipelineCallback Extract the features of the input(s).
21109
21231
  * @param {string|string[]} texts One or several texts (or one list of texts) to get the features of.
21110
21232
  * @param {FeatureExtractionPipelineOptions} [options] The options to use for feature extraction.
21111
21233
  * @returns {Promise<Tensor>} The features computed by the model.
21112
- *
21234
+ *
21113
21235
  * @typedef {TextPipelineConstructorArgs & FeatureExtractionPipelineCallback & Disposable} FeatureExtractionPipelineType
21114
21236
  */
21115
21237
 
21116
21238
  /**
21117
21239
  * Feature extraction pipeline using no model head. This pipeline extracts the hidden
21118
21240
  * states from the base transformer, which can be used as features in downstream tasks.
21119
- *
21241
+ *
21120
21242
  * **Example:** Run feature extraction with `bert-base-uncased` (without pooling/normalization).
21121
21243
  * ```javascript
21122
21244
  * const extractor = await pipeline('feature-extraction', 'Xenova/bert-base-uncased', { revision: 'default' });
@@ -21127,7 +21249,7 @@ class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipel
21127
21249
  * // dims: [1, 8, 768]
21128
21250
  * // }
21129
21251
  * ```
21130
- *
21252
+ *
21131
21253
  * **Example:** Run feature extraction with `bert-base-uncased` (with pooling/normalization).
21132
21254
  * ```javascript
21133
21255
  * const extractor = await pipeline('feature-extraction', 'Xenova/bert-base-uncased', { revision: 'default' });
@@ -21138,7 +21260,7 @@ class ZeroShotClassificationPipeline extends (/** @type {new (options: TextPipel
21138
21260
  * // dims: [1, 768]
21139
21261
  * // }
21140
21262
  * ```
21141
- *
21263
+ *
21142
21264
  * **Example:** Calculating embeddings with `sentence-transformers` models.
21143
21265
  * ```javascript
21144
21266
  * const extractor = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
@@ -21219,19 +21341,19 @@ class FeatureExtractionPipeline extends (/** @type {new (options: TextPipelineCo
21219
21341
  /**
21220
21342
  * @typedef {Object} ImageFeatureExtractionPipelineOptions Parameters specific to image feature extraction pipelines.
21221
21343
  * @property {boolean} [pool=null] Whether or not to return the pooled output. If set to `false`, the model will return the raw hidden states.
21222
- *
21344
+ *
21223
21345
  * @callback ImageFeatureExtractionPipelineCallback Extract the features of the input(s).
21224
21346
  * @param {ImagePipelineInputs} images One or several images (or one list of images) to get the features of.
21225
21347
  * @param {ImageFeatureExtractionPipelineOptions} [options] The options to use for image feature extraction.
21226
21348
  * @returns {Promise<Tensor>} The image features computed by the model.
21227
- *
21349
+ *
21228
21350
  * @typedef {ImagePipelineConstructorArgs & ImageFeatureExtractionPipelineCallback & Disposable} ImageFeatureExtractionPipelineType
21229
21351
  */
21230
21352
 
21231
21353
  /**
21232
21354
  * Image feature extraction pipeline using no model head. This pipeline extracts the hidden
21233
21355
  * states from the base transformer, which can be used as features in downstream tasks.
21234
- *
21356
+ *
21235
21357
  * **Example:** Perform image feature extraction with `Xenova/vit-base-patch16-224-in21k`.
21236
21358
  * ```javascript
21237
21359
  * const image_feature_extractor = await pipeline('image-feature-extraction', 'Xenova/vit-base-patch16-224-in21k');
@@ -21244,7 +21366,7 @@ class FeatureExtractionPipeline extends (/** @type {new (options: TextPipelineCo
21244
21366
  * // size: 151296
21245
21367
  * // }
21246
21368
  * ```
21247
- *
21369
+ *
21248
21370
  * **Example:** Compute image embeddings with `Xenova/clip-vit-base-patch32`.
21249
21371
  * ```javascript
21250
21372
  * const image_feature_extractor = await pipeline('image-feature-extraction', 'Xenova/clip-vit-base-patch32');
@@ -21300,12 +21422,12 @@ class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipe
21300
21422
  * @property {string} label The label predicted.
21301
21423
  * @property {number} score The corresponding probability.
21302
21424
  * @typedef {AudioClassificationSingle[]} AudioClassificationOutput
21303
- *
21425
+ *
21304
21426
  * @typedef {Object} AudioClassificationPipelineOptions Parameters specific to audio classification pipelines.
21305
21427
  * @property {number} [top_k=5] The number of top labels that will be returned by the pipeline.
21306
21428
  * If the provided number is `null` or higher than the number of labels available in the model configuration,
21307
21429
  * it will default to the number of labels.
21308
- *
21430
+ *
21309
21431
  * @callback AudioClassificationPipelineCallback Classify the sequence(s) given as inputs.
21310
21432
  * @param {AudioPipelineInputs} audio The input audio file(s) to be classified. The input is either:
21311
21433
  * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate
@@ -21314,14 +21436,14 @@ class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipe
21314
21436
  * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done).
21315
21437
  * @param {AudioClassificationPipelineOptions} [options] The options to use for audio classification.
21316
21438
  * @returns {Promise<AudioClassificationOutput|AudioClassificationOutput[]>} An array or object containing the predicted labels and scores.
21317
- *
21439
+ *
21318
21440
  * @typedef {AudioPipelineConstructorArgs & AudioClassificationPipelineCallback & Disposable} AudioClassificationPipelineType
21319
21441
  */
21320
21442
 
21321
21443
  /**
21322
21444
  * Audio classification pipeline using any `AutoModelForAudioClassification`.
21323
21445
  * This pipeline predicts the class of a raw waveform or an audio file.
21324
- *
21446
+ *
21325
21447
  * **Example:** Perform audio classification with `Xenova/wav2vec2-large-xlsr-53-gender-recognition-librispeech`.
21326
21448
  * ```javascript
21327
21449
  * const classifier = await pipeline('audio-classification', 'Xenova/wav2vec2-large-xlsr-53-gender-recognition-librispeech');
@@ -21332,7 +21454,7 @@ class ImageFeatureExtractionPipeline extends (/** @type {new (options: ImagePipe
21332
21454
  * // { label: 'female', score: 0.001845747814513743 }
21333
21455
  * // ]
21334
21456
  * ```
21335
- *
21457
+ *
21336
21458
  * **Example:** Perform audio classification with `Xenova/ast-finetuned-audioset-10-10-0.4593` and return top 4 results.
21337
21459
  * ```javascript
21338
21460
  * const classifier = await pipeline('audio-classification', 'Xenova/ast-finetuned-audioset-10-10-0.4593');
@@ -21397,12 +21519,12 @@ class AudioClassificationPipeline extends (/** @type {new (options: AudioPipelin
21397
21519
  * @typedef {Object} ZeroShotAudioClassificationOutput
21398
21520
  * @property {string} label The label identified by the model. It is one of the suggested `candidate_label`.
21399
21521
  * @property {number} score The score attributed by the model for that label (between 0 and 1).
21400
- *
21522
+ *
21401
21523
  * @typedef {Object} ZeroShotAudioClassificationPipelineOptions Parameters specific to zero-shot audio classification pipelines.
21402
21524
  * @property {string} [hypothesis_template="This is a sound of {}."] The sentence used in conjunction with `candidate_labels`
21403
21525
  * to attempt the audio classification by replacing the placeholder with the candidate_labels.
21404
21526
  * Then likelihood is estimated by using `logits_per_audio`.
21405
- *
21527
+ *
21406
21528
  * @callback ZeroShotAudioClassificationPipelineCallback Classify the sequence(s) given as inputs.
21407
21529
  * @param {AudioPipelineInputs} audio The input audio file(s) to be classified. The input is either:
21408
21530
  * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate
@@ -21412,14 +21534,14 @@ class AudioClassificationPipeline extends (/** @type {new (options: AudioPipelin
21412
21534
  * @param {string[]} candidate_labels The candidate labels for this audio.
21413
21535
  * @param {ZeroShotAudioClassificationPipelineOptions} [options] The options to use for zero-shot audio classification.
21414
21536
  * @returns {Promise<ZeroShotAudioClassificationOutput[]|ZeroShotAudioClassificationOutput[][]>} An array of objects containing the predicted labels and scores.
21415
- *
21537
+ *
21416
21538
  * @typedef {TextAudioPipelineConstructorArgs & ZeroShotAudioClassificationPipelineCallback & Disposable} ZeroShotAudioClassificationPipelineType
21417
21539
  */
21418
21540
 
21419
21541
  /**
21420
21542
  * Zero shot audio classification pipeline using `ClapModel`. This pipeline predicts the class of an audio when you
21421
21543
  * provide an audio and a set of `candidate_labels`.
21422
- *
21544
+ *
21423
21545
  * **Example**: Perform zero-shot audio classification with `Xenova/clap-htsat-unfused`.
21424
21546
  * ```javascript
21425
21547
  * const classifier = await pipeline('zero-shot-audio-classification', 'Xenova/clap-htsat-unfused');
@@ -21452,7 +21574,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21452
21574
  audio = [/** @type {AudioInput} */ (audio)];
21453
21575
  }
21454
21576
 
21455
- // Insert label into hypothesis template
21577
+ // Insert label into hypothesis template
21456
21578
  const texts = candidate_labels.map(
21457
21579
  x => hypothesis_template.replace('{}', x)
21458
21580
  );
@@ -21496,7 +21618,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21496
21618
  * @property {string} text The recognized text.
21497
21619
  * @property {Chunk[]} [chunks] When using `return_timestamps`, the `chunks` will become a list
21498
21620
  * containing all the various text chunks identified by the model.
21499
- *
21621
+ *
21500
21622
  * @typedef {Object} AutomaticSpeechRecognitionSpecificParams Parameters specific to automatic-speech-recognition pipelines.
21501
21623
  * @property {boolean|'word'} [return_timestamps] Whether to return timestamps or not. Default is `false`.
21502
21624
  * @property {number} [chunk_length_s] The length of audio chunks to process in seconds. Default is 0 (no chunking).
@@ -21506,7 +21628,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21506
21628
  * @property {string} [task] The task to perform. Default is `null`, meaning it should be auto-detected.
21507
21629
  * @property {number} [num_frames] The number of frames in the input audio.
21508
21630
  * @typedef {import('./generation/configuration_utils.js').GenerationConfig & AutomaticSpeechRecognitionSpecificParams} AutomaticSpeechRecognitionConfig
21509
- *
21631
+ *
21510
21632
  * @callback AutomaticSpeechRecognitionPipelineCallback Transcribe the audio sequence(s) given as inputs to text.
21511
21633
  * @param {AudioPipelineInputs} audio The input audio file(s) to be transcribed. The input is either:
21512
21634
  * - `string` or `URL` that is the filename/URL of the audio file, the file will be read at the processor's sampling rate
@@ -21515,7 +21637,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21515
21637
  * - `Float32Array` or `Float64Array` of shape `(n, )`, representing the raw audio at the correct sampling rate (no further check will be done).
21516
21638
  * @param {Partial<AutomaticSpeechRecognitionConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
21517
21639
  * @returns {Promise<AutomaticSpeechRecognitionOutput|AutomaticSpeechRecognitionOutput[]>} An object containing the transcription text and optionally timestamps if `return_timestamps` is `true`.
21518
- *
21640
+ *
21519
21641
  * @typedef {TextAudioPipelineConstructorArgs & AutomaticSpeechRecognitionPipelineCallback & Disposable} AutomaticSpeechRecognitionPipelineType
21520
21642
  */
21521
21643
 
@@ -21529,7 +21651,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21529
21651
  * const output = await transcriber(url);
21530
21652
  * // { text: " And so my fellow Americans ask not what your country can do for you, ask what you can do for your country." }
21531
21653
  * ```
21532
- *
21654
+ *
21533
21655
  * **Example:** Transcribe English w/ timestamps.
21534
21656
  * ```javascript
21535
21657
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
@@ -21543,7 +21665,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21543
21665
  * // ]
21544
21666
  * // }
21545
21667
  * ```
21546
- *
21668
+ *
21547
21669
  * **Example:** Transcribe English w/ word-level timestamps.
21548
21670
  * ```javascript
21549
21671
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
@@ -21562,7 +21684,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21562
21684
  * // ]
21563
21685
  * // }
21564
21686
  * ```
21565
- *
21687
+ *
21566
21688
  * **Example:** Transcribe French.
21567
21689
  * ```javascript
21568
21690
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small');
@@ -21570,7 +21692,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21570
21692
  * const output = await transcriber(url, { language: 'french', task: 'transcribe' });
21571
21693
  * // { text: " J'adore, j'aime, je n'aime pas, je déteste." }
21572
21694
  * ```
21573
- *
21695
+ *
21574
21696
  * **Example:** Translate French to English.
21575
21697
  * ```javascript
21576
21698
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-small');
@@ -21578,7 +21700,7 @@ class ZeroShotAudioClassificationPipeline extends (/** @type {new (options: Text
21578
21700
  * const output = await transcriber(url, { language: 'french', task: 'translate' });
21579
21701
  * // { text: " I love, I like, I don't like, I hate." }
21580
21702
  * ```
21581
- *
21703
+ *
21582
21704
  * **Example:** Transcribe/translate audio longer than 30 seconds.
21583
21705
  * ```javascript
21584
21706
  * const transcriber = await pipeline('automatic-speech-recognition', 'Xenova/whisper-tiny.en');
@@ -21801,18 +21923,18 @@ class AutomaticSpeechRecognitionPipeline extends (/** @type {new (options: TextA
21801
21923
  * @typedef {Object} ImageToTextSingle
21802
21924
  * @property {string} generated_text The generated text.
21803
21925
  * @typedef {ImageToTextSingle[]} ImageToTextOutput
21804
- *
21926
+ *
21805
21927
  * @callback ImageToTextPipelineCallback Assign labels to the image(s) passed as inputs.
21806
21928
  * @param {ImagePipelineInputs} texts The images to be captioned.
21807
21929
  * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
21808
21930
  * @returns {Promise<ImageToTextOutput|ImageToTextOutput[]>} An object (or array of objects) containing the generated text(s).
21809
- *
21931
+ *
21810
21932
  * @typedef {TextImagePipelineConstructorArgs & ImageToTextPipelineCallback & Disposable} ImageToTextPipelineType
21811
21933
  */
21812
21934
 
21813
21935
  /**
21814
21936
  * Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image.
21815
- *
21937
+ *
21816
21938
  * **Example:** Generate a caption for an image w/ `Xenova/vit-gpt2-image-captioning`.
21817
21939
  * ```javascript
21818
21940
  * const captioner = await pipeline('image-to-text', 'Xenova/vit-gpt2-image-captioning');
@@ -21820,7 +21942,7 @@ class AutomaticSpeechRecognitionPipeline extends (/** @type {new (options: TextA
21820
21942
  * const output = await captioner(url);
21821
21943
  * // [{ generated_text: 'a cat laying on a couch with another cat' }]
21822
21944
  * ```
21823
- *
21945
+ *
21824
21946
  * **Example:** Optical Character Recognition (OCR) w/ `Xenova/trocr-small-handwritten`.
21825
21947
  * ```javascript
21826
21948
  * const captioner = await pipeline('image-to-text', 'Xenova/trocr-small-handwritten');
@@ -21866,22 +21988,22 @@ class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineCon
21866
21988
  * @property {string} label The label identified by the model.
21867
21989
  * @property {number} score The score attributed by the model for that label.
21868
21990
  * @typedef {ImageClassificationSingle[]} ImageClassificationOutput
21869
- *
21991
+ *
21870
21992
  * @typedef {Object} ImageClassificationPipelineOptions Parameters specific to image classification pipelines.
21871
- * @property {number} [top_k=1] The number of top labels that will be returned by the pipeline.
21872
- *
21993
+ * @property {number} [top_k=1] The number of top labels that will be returned by the pipeline.
21994
+ *
21873
21995
  * @callback ImageClassificationPipelineCallback Assign labels to the image(s) passed as inputs.
21874
21996
  * @param {ImagePipelineInputs} images The input images(s) to be classified.
21875
21997
  * @param {ImageClassificationPipelineOptions} [options] The options to use for image classification.
21876
21998
  * @returns {Promise<ImageClassificationOutput|ImageClassificationOutput[]>} An array or object containing the predicted labels and scores.
21877
- *
21999
+ *
21878
22000
  * @typedef {ImagePipelineConstructorArgs & ImageClassificationPipelineCallback & Disposable} ImageClassificationPipelineType
21879
22001
  */
21880
22002
 
21881
22003
  /**
21882
22004
  * Image classification pipeline using any `AutoModelForImageClassification`.
21883
22005
  * This pipeline predicts the class of an image.
21884
- *
22006
+ *
21885
22007
  * **Example:** Classify an image.
21886
22008
  * ```javascript
21887
22009
  * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224');
@@ -21891,7 +22013,7 @@ class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineCon
21891
22013
  * // { label: 'tiger, Panthera tigris', score: 0.632695734500885 },
21892
22014
  * // ]
21893
22015
  * ```
21894
- *
22016
+ *
21895
22017
  * **Example:** Classify an image and return top `n` classes.
21896
22018
  * ```javascript
21897
22019
  * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224');
@@ -21903,7 +22025,7 @@ class ImageToTextPipeline extends (/** @type {new (options: TextImagePipelineCon
21903
22025
  * // { label: 'lion, king of beasts, Panthera leo', score: 0.00045060308184474707 },
21904
22026
  * // ]
21905
22027
  * ```
21906
- *
22028
+ *
21907
22029
  * **Example:** Classify an image and return all classes.
21908
22030
  * ```javascript
21909
22031
  * const classifier = await pipeline('image-classification', 'Xenova/vit-base-patch16-224');
@@ -21970,7 +22092,7 @@ class ImageClassificationPipeline extends (/** @type {new (options: ImagePipelin
21970
22092
  * @property {string|null} label The label of the segment.
21971
22093
  * @property {number|null} score The score of the segment.
21972
22094
  * @property {RawImage} mask The mask of the segment.
21973
- *
22095
+ *
21974
22096
  * @typedef {Object} ImageSegmentationPipelineOptions Parameters specific to image segmentation pipelines.
21975
22097
  * @property {number} [threshold=0.5] Probability threshold to filter out predicted masks.
21976
22098
  * @property {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values.
@@ -21979,19 +22101,19 @@ class ImageClassificationPipeline extends (/** @type {new (options: ImagePipelin
21979
22101
  * depending on model capabilities. If not set, the pipeline will attempt to resolve (in that order).
21980
22102
  * @property {number[]} [label_ids_to_fuse=null] List of label ids to fuse. If not set, do not fuse any labels.
21981
22103
  * @property {number[][]} [target_sizes=null] List of target sizes for the input images. If not set, use the original image sizes.
21982
- *
22104
+ *
21983
22105
  * @callback ImageSegmentationPipelineCallback Segment the input images.
21984
22106
  * @param {ImagePipelineInputs} images The input images.
21985
22107
  * @param {ImageSegmentationPipelineOptions} [options] The options to use for image segmentation.
21986
22108
  * @returns {Promise<ImageSegmentationPipelineOutput[]>} The annotated segments.
21987
- *
22109
+ *
21988
22110
  * @typedef {ImagePipelineConstructorArgs & ImageSegmentationPipelineCallback & Disposable} ImageSegmentationPipelineType
21989
22111
  */
21990
22112
 
21991
22113
  /**
21992
22114
  * Image segmentation pipeline using any `AutoModelForXXXSegmentation`.
21993
22115
  * This pipeline predicts masks of objects and their classes.
21994
- *
22116
+ *
21995
22117
  * **Example:** Perform image segmentation with `Xenova/detr-resnet-50-panoptic`.
21996
22118
  * ```javascript
21997
22119
  * const segmenter = await pipeline('image-segmentation', 'Xenova/detr-resnet-50-panoptic');
@@ -22075,12 +22197,17 @@ class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineC
22075
22197
  /** @type {ImageSegmentationPipelineOutput[]} */
22076
22198
  const annotation = [];
22077
22199
  if (!subtask) {
22200
+ // We define an epsilon to safeguard against numerical/precision issues when detecting
22201
+ // the normalization mode of the output (i.e., sigmoid already applied, or not).
22202
+ // See https://github.com/microsoft/onnxruntime/issues/23943 for more information.
22203
+ const epsilon = 1e-5;
22204
+
22078
22205
  // Perform standard image segmentation
22079
22206
  const result = output[outputNames[0]];
22080
22207
  for (let i = 0; i < imageSizes.length; ++i) {
22081
22208
  const size = imageSizes[i];
22082
22209
  const item = result[i];
22083
- if (item.data.some(x => x < 0 || x > 1)) {
22210
+ if (item.data.some(x => x < -epsilon || x > 1 + epsilon)) {
22084
22211
  item.sigmoid_();
22085
22212
  }
22086
22213
  const mask = await _utils_image_js__WEBPACK_IMPORTED_MODULE_9__.RawImage.fromTensor(item.mul_(255).to('uint8')).resize(size[1], size[0]);
@@ -22149,19 +22276,19 @@ class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineC
22149
22276
 
22150
22277
  /**
22151
22278
  * @typedef {Object} BackgroundRemovalPipelineOptions Parameters specific to image segmentation pipelines.
22152
- *
22279
+ *
22153
22280
  * @callback BackgroundRemovalPipelineCallback Segment the input images.
22154
22281
  * @param {ImagePipelineInputs} images The input images.
22155
22282
  * @param {BackgroundRemovalPipelineOptions} [options] The options to use for image segmentation.
22156
22283
  * @returns {Promise<RawImage[]>} The images with the background removed.
22157
- *
22284
+ *
22158
22285
  * @typedef {ImagePipelineConstructorArgs & BackgroundRemovalPipelineCallback & Disposable} BackgroundRemovalPipelineType
22159
22286
  */
22160
22287
 
22161
22288
  /**
22162
22289
  * Background removal pipeline using certain `AutoModelForXXXSegmentation`.
22163
22290
  * This pipeline removes the backgrounds of images.
22164
- *
22291
+ *
22165
22292
  * **Example:** Perform background removal with `Xenova/modnet`.
22166
22293
  * ```javascript
22167
22294
  * const segmenter = await pipeline('background-removal', 'Xenova/modnet');
@@ -22172,7 +22299,7 @@ class ImageSegmentationPipeline extends (/** @type {new (options: ImagePipelineC
22172
22299
  * // ]
22173
22300
  * ```
22174
22301
  */
22175
- class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => ImageSegmentationPipelineType} */ (ImageSegmentationPipeline)) {
22302
+ class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineConstructorArgs) => BackgroundRemovalPipelineType} */ (/** @type {any} */(ImageSegmentationPipeline))) {
22176
22303
  /**
22177
22304
  * Create a new BackgroundRemovalPipeline.
22178
22305
  * @param {ImagePipelineConstructorArgs} options An object used to instantiate the pipeline.
@@ -22207,25 +22334,25 @@ class BackgroundRemovalPipeline extends (/** @type {new (options: ImagePipelineC
22207
22334
  * @typedef {Object} ZeroShotImageClassificationOutput
22208
22335
  * @property {string} label The label identified by the model. It is one of the suggested `candidate_label`.
22209
22336
  * @property {number} score The score attributed by the model for that label (between 0 and 1).
22210
- *
22337
+ *
22211
22338
  * @typedef {Object} ZeroShotImageClassificationPipelineOptions Parameters specific to zero-shot image classification pipelines.
22212
22339
  * @property {string} [hypothesis_template="This is a photo of {}"] The sentence used in conjunction with `candidate_labels`
22213
22340
  * to attempt the image classification by replacing the placeholder with the candidate_labels.
22214
22341
  * Then likelihood is estimated by using `logits_per_image`.
22215
- *
22342
+ *
22216
22343
  * @callback ZeroShotImageClassificationPipelineCallback Assign labels to the image(s) passed as inputs.
22217
22344
  * @param {ImagePipelineInputs} images The input images.
22218
22345
  * @param {string[]} candidate_labels The candidate labels for this image.
22219
22346
  * @param {ZeroShotImageClassificationPipelineOptions} [options] The options to use for zero-shot image classification.
22220
22347
  * @returns {Promise<ZeroShotImageClassificationOutput[]|ZeroShotImageClassificationOutput[][]>} An array of objects containing the predicted labels and scores.
22221
- *
22348
+ *
22222
22349
  * @typedef {TextImagePipelineConstructorArgs & ZeroShotImageClassificationPipelineCallback & Disposable} ZeroShotImageClassificationPipelineType
22223
22350
  */
22224
22351
 
22225
22352
  /**
22226
22353
  * Zero shot image classification pipeline. This pipeline predicts the class of
22227
22354
  * an image when you provide an image and a set of `candidate_labels`.
22228
- *
22355
+ *
22229
22356
  * **Example:** Zero shot image classification w/ `Xenova/clip-vit-base-patch32`.
22230
22357
  * ```javascript
22231
22358
  * const classifier = await pipeline('zero-shot-image-classification', 'Xenova/clip-vit-base-patch32');
@@ -22255,7 +22382,7 @@ class ZeroShotImageClassificationPipeline extends (/** @type {new (options: Text
22255
22382
  const isBatched = Array.isArray(images);
22256
22383
  const preparedImages = await prepareImages(images);
22257
22384
 
22258
- // Insert label into hypothesis template
22385
+ // Insert label into hypothesis template
22259
22386
  const texts = candidate_labels.map(
22260
22387
  x => hypothesis_template.replace('{}', x)
22261
22388
  );
@@ -22302,23 +22429,23 @@ class ZeroShotImageClassificationPipeline extends (/** @type {new (options: Text
22302
22429
  * @property {number} score The score attributed by the model for that label.
22303
22430
  * @property {BoundingBox} box The bounding box of detected object in image's original size, or as a percentage if `percentage` is set to true.
22304
22431
  * @typedef {ObjectDetectionPipelineSingle[]} ObjectDetectionPipelineOutput
22305
- *
22432
+ *
22306
22433
  * @typedef {Object} ObjectDetectionPipelineOptions Parameters specific to object detection pipelines.
22307
22434
  * @property {number} [threshold=0.9] The threshold used to filter boxes by score.
22308
22435
  * @property {boolean} [percentage=false] Whether to return the boxes coordinates in percentage (true) or in pixels (false).
22309
- *
22436
+ *
22310
22437
  * @callback ObjectDetectionPipelineCallback Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
22311
22438
  * @param {ImagePipelineInputs} images The input images.
22312
22439
  * @param {ObjectDetectionPipelineOptions} [options] The options to use for object detection.
22313
- * @returns {Promise<ObjectDetectionPipelineOutput|ObjectDetectionPipelineOutput[]>} A list of objects or a list of list of objects.
22314
- *
22440
+ * @returns {Promise<ObjectDetectionPipelineOutput|ObjectDetectionPipelineOutput[]>} A list of objects or a list of list of objects.
22441
+ *
22315
22442
  * @typedef {ImagePipelineConstructorArgs & ObjectDetectionPipelineCallback & Disposable} ObjectDetectionPipelineType
22316
22443
  */
22317
22444
 
22318
22445
  /**
22319
22446
  * Object detection pipeline using any `AutoModelForObjectDetection`.
22320
22447
  * This pipeline predicts bounding boxes of objects and their classes.
22321
- *
22448
+ *
22322
22449
  * **Example:** Run object-detection with `Xenova/detr-resnet-50`.
22323
22450
  * ```javascript
22324
22451
  * const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
@@ -22392,27 +22519,27 @@ class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipelineCon
22392
22519
  * @property {string} label Text query corresponding to the found object.
22393
22520
  * @property {number} score Score corresponding to the object (between 0 and 1).
22394
22521
  * @property {BoundingBox} box Bounding box of the detected object in image's original size, or as a percentage if `percentage` is set to true.
22395
- *
22522
+ *
22396
22523
  * @typedef {Object} ZeroShotObjectDetectionPipelineOptions Parameters specific to zero-shot object detection pipelines.
22397
22524
  * @property {number} [threshold=0.1] The probability necessary to make a prediction.
22398
22525
  * @property {number} [top_k=null] The number of top predictions that will be returned by the pipeline.
22399
22526
  * If the provided number is `null` or higher than the number of predictions available, it will default
22400
22527
  * to the number of predictions.
22401
22528
  * @property {boolean} [percentage=false] Whether to return the boxes coordinates in percentage (true) or in pixels (false).
22402
- *
22529
+ *
22403
22530
  * @callback ZeroShotObjectDetectionPipelineCallback Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
22404
22531
  * @param {ImagePipelineInputs} images The input images.
22405
22532
  * @param {string[]} candidate_labels What the model should recognize in the image.
22406
22533
  * @param {ZeroShotObjectDetectionPipelineOptions} [options] The options to use for zero-shot object detection.
22407
22534
  * @returns {Promise<ZeroShotObjectDetectionOutput[]|ZeroShotObjectDetectionOutput[][]>} An array of objects containing the predicted labels, scores, and bounding boxes.
22408
- *
22535
+ *
22409
22536
  * @typedef {TextImagePipelineConstructorArgs & ZeroShotObjectDetectionPipelineCallback & Disposable} ZeroShotObjectDetectionPipelineType
22410
22537
  */
22411
22538
 
22412
22539
  /**
22413
22540
  * Zero-shot object detection pipeline. This pipeline predicts bounding boxes of
22414
22541
  * objects when you provide an image and a set of `candidate_labels`.
22415
- *
22542
+ *
22416
22543
  * **Example:** Zero-shot object detection w/ `Xenova/owlvit-base-patch32`.
22417
22544
  * ```javascript
22418
22545
  * const detector = await pipeline('zero-shot-object-detection', 'Xenova/owlvit-base-patch32');
@@ -22442,7 +22569,7 @@ class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipelineCon
22442
22569
  * // }
22443
22570
  * // ]
22444
22571
  * ```
22445
- *
22572
+ *
22446
22573
  * **Example:** Zero-shot object detection w/ `Xenova/owlvit-base-patch32` (returning top 4 matches and setting a threshold).
22447
22574
  * ```javascript
22448
22575
  * const detector = await pipeline('zero-shot-object-detection', 'Xenova/owlvit-base-patch32');
@@ -22557,13 +22684,13 @@ class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: TextImag
22557
22684
  * @typedef {Object} DocumentQuestionAnsweringSingle
22558
22685
  * @property {string} answer The generated text.
22559
22686
  * @typedef {DocumentQuestionAnsweringSingle[]} DocumentQuestionAnsweringOutput
22560
- *
22687
+ *
22561
22688
  * @callback DocumentQuestionAnsweringPipelineCallback Answer the question given as input by using the document.
22562
22689
  * @param {ImageInput} image The image of the document to use.
22563
22690
  * @param {string} question A question to ask of the document.
22564
22691
  * @param {Partial<import('./generation/configuration_utils.js').GenerationConfig>} [options] Additional keyword arguments to pass along to the generate method of the model.
22565
22692
  * @returns {Promise<DocumentQuestionAnsweringOutput|DocumentQuestionAnsweringOutput[]>} An object (or array of objects) containing the answer(s).
22566
- *
22693
+ *
22567
22694
  * @typedef {TextImagePipelineConstructorArgs & DocumentQuestionAnsweringPipelineCallback & Disposable} DocumentQuestionAnsweringPipelineType
22568
22695
  */
22569
22696
 
@@ -22571,7 +22698,7 @@ class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: TextImag
22571
22698
  * Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`.
22572
22699
  * The inputs/outputs are similar to the (extractive) question answering pipeline; however,
22573
22700
  * the pipeline takes an image (and optional OCR'd words/boxes) as input instead of text context.
22574
- *
22701
+ *
22575
22702
  * **Example:** Answer questions about a document with `Xenova/donut-base-finetuned-docvqa`.
22576
22703
  * ```javascript
22577
22704
  * const qa_pipeline = await pipeline('document-question-answering', 'Xenova/donut-base-finetuned-docvqa');
@@ -22641,22 +22768,22 @@ class DocumentQuestionAnsweringPipeline extends (/** @type {new (options: TextIm
22641
22768
  * @typedef {Object} TextToAudioOutput
22642
22769
  * @property {Float32Array} audio The generated audio waveform.
22643
22770
  * @property {number} sampling_rate The sampling rate of the generated audio waveform.
22644
- *
22771
+ *
22645
22772
  * @typedef {Object} TextToAudioPipelineOptions Parameters specific to text-to-audio pipelines.
22646
22773
  * @property {Tensor|Float32Array|string|URL} [speaker_embeddings=null] The speaker embeddings (if the model requires it).
22647
- *
22774
+ *
22648
22775
  * @callback TextToAudioPipelineCallback Generates speech/audio from the inputs.
22649
22776
  * @param {string|string[]} texts The text(s) to generate.
22650
22777
  * @param {TextToAudioPipelineOptions} options Parameters passed to the model generation/forward method.
22651
22778
  * @returns {Promise<TextToAudioOutput>} An object containing the generated audio and sampling rate.
22652
- *
22779
+ *
22653
22780
  * @typedef {TextToAudioPipelineConstructorArgs & TextToAudioPipelineCallback & Disposable} TextToAudioPipelineType
22654
22781
  */
22655
22782
 
22656
22783
  /**
22657
22784
  * Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`.
22658
22785
  * This pipeline generates an audio file from an input text and optional other conditional inputs.
22659
- *
22786
+ *
22660
22787
  * **Example:** Generate audio from text with `Xenova/speecht5_tts`.
22661
22788
  * ```javascript
22662
22789
  * const synthesizer = await pipeline('text-to-speech', 'Xenova/speecht5_tts', { quantized: false });
@@ -22667,17 +22794,17 @@ class DocumentQuestionAnsweringPipeline extends (/** @type {new (options: TextIm
22667
22794
  * // sampling_rate: 16000
22668
22795
  * // }
22669
22796
  * ```
22670
- *
22797
+ *
22671
22798
  * You can then save the audio to a .wav file with the `wavefile` package:
22672
22799
  * ```javascript
22673
22800
  * import wavefile from 'wavefile';
22674
22801
  * import fs from 'fs';
22675
- *
22802
+ *
22676
22803
  * const wav = new wavefile.WaveFile();
22677
22804
  * wav.fromScratch(1, out.sampling_rate, '32f', out.audio);
22678
22805
  * fs.writeFileSync('out.wav', wav.toBuffer());
22679
22806
  * ```
22680
- *
22807
+ *
22681
22808
  * **Example:** Multilingual speech generation with `Xenova/mms-tts-fra`. See [here](https://huggingface.co/models?pipeline_tag=text-to-speech&other=vits&sort=trending) for the full list of available languages (1107).
22682
22809
  * ```javascript
22683
22810
  * const synthesizer = await pipeline('text-to-speech', 'Xenova/mms-tts-fra');
@@ -22783,13 +22910,13 @@ class TextToAudioPipeline extends (/** @type {new (options: TextToAudioPipelineC
22783
22910
  * @callback ImageToImagePipelineCallback Transform the image(s) passed as inputs.
22784
22911
  * @param {ImagePipelineInputs} images The images to transform.
22785
22912
  * @returns {Promise<RawImage|RawImage[]>} The transformed image or list of images.
22786
- *
22913
+ *
22787
22914
  * @typedef {ImagePipelineConstructorArgs & ImageToImagePipelineCallback & Disposable} ImageToImagePipelineType
22788
22915
  */
22789
22916
 
22790
22917
  /**
22791
22918
  * Image to Image pipeline using any `AutoModelForImageToImage`. This pipeline generates an image based on a previous image input.
22792
- *
22919
+ *
22793
22920
  * **Example:** Super-resolution w/ `Xenova/swin2SR-classical-sr-x2-64`
22794
22921
  * ```javascript
22795
22922
  * const upscaler = await pipeline('image-to-image', 'Xenova/swin2SR-classical-sr-x2-64');
@@ -22834,17 +22961,17 @@ class ImageToImagePipeline extends (/** @type {new (options: ImagePipelineConstr
22834
22961
  * @typedef {Object} DepthEstimationPipelineOutput
22835
22962
  * @property {Tensor} predicted_depth The raw depth map predicted by the model.
22836
22963
  * @property {RawImage} depth The processed depth map as an image (with the same size as the input image).
22837
- *
22964
+ *
22838
22965
  * @callback DepthEstimationPipelineCallback Predicts the depth for the image(s) passed as inputs.
22839
22966
  * @param {ImagePipelineInputs} images The images to compute depth for.
22840
22967
  * @returns {Promise<DepthEstimationPipelineOutput|DepthEstimationPipelineOutput[]>} An image or a list of images containing result(s).
22841
- *
22968
+ *
22842
22969
  * @typedef {ImagePipelineConstructorArgs & DepthEstimationPipelineCallback & Disposable} DepthEstimationPipelineType
22843
22970
  */
22844
22971
 
22845
22972
  /**
22846
22973
  * Depth estimation pipeline using any `AutoModelForDepthEstimation`. This pipeline predicts the depth of an image.
22847
- *
22974
+ *
22848
22975
  * **Example:** Depth estimation w/ `Xenova/dpt-hybrid-midas`
22849
22976
  * ```javascript
22850
22977
  * const depth_estimator = await pipeline('depth-estimation', 'Xenova/dpt-hybrid-midas');
@@ -23229,7 +23356,7 @@ const TASK_ALIASES = Object.freeze({
23229
23356
 
23230
23357
  /**
23231
23358
  * Utility factory method to build a `Pipeline` object.
23232
- *
23359
+ *
23233
23360
  * @template {PipelineType} T The type of pipeline to return.
23234
23361
  * @param {T} task The task defining which pipeline will be returned. Currently accepted tasks are:
23235
23362
  * - `"audio-classification"`: will return a `AudioClassificationPipeline`.
@@ -30495,7 +30622,7 @@ class RawImage {
30495
30622
 
30496
30623
  /**
30497
30624
  * Helper method for reading an image from a variety of input types.
30498
- * @param {RawImage|string|URL} input
30625
+ * @param {RawImage|string|URL|Blob|HTMLCanvasElement|OffscreenCanvas} input
30499
30626
  * @returns The image object.
30500
30627
  *
30501
30628
  * **Example:** Read image from a URL.
@@ -30514,6 +30641,14 @@ class RawImage {
30514
30641
  return input;
30515
30642
  } else if (typeof input === 'string' || input instanceof URL) {
30516
30643
  return await this.fromURL(input);
30644
+ } else if (input instanceof Blob) {
30645
+ return await this.fromBlob(input);
30646
+ } else if (
30647
+ (typeof HTMLCanvasElement !== "undefined" && input instanceof HTMLCanvasElement)
30648
+ ||
30649
+ (typeof OffscreenCanvas !== "undefined" && input instanceof OffscreenCanvas)
30650
+ ) {
30651
+ return this.fromCanvas(input);
30517
30652
  } else {
30518
30653
  throw new Error(`Unsupported input type: ${typeof input}`);
30519
30654
  }
@@ -33523,8 +33658,12 @@ function calc_unsqueeze_dims(dims, dim) {
33523
33658
  * @private
33524
33659
  */
33525
33660
  function safeIndex(index, size, dimension = null, boundsCheck = true) {
33526
- if (boundsCheck && (index < -size || index >= size)) {
33527
- throw new Error(`IndexError: index ${index} is out of bounds for dimension${dimension === null ? '' : ' ' + dimension} with size ${size}`);
33661
+ if (index < -size || index >= size) {
33662
+ if (boundsCheck) {
33663
+ throw new Error(`IndexError: index ${index} is out of bounds for dimension${dimension === null ? '' : ' ' + dimension} with size ${size}`);
33664
+ } else {
33665
+ return index < -size ? 0 : size;
33666
+ }
33528
33667
  }
33529
33668
 
33530
33669
  if (index < 0) {
@@ -34379,6 +34518,9 @@ __webpack_require__.r(__webpack_exports__);
34379
34518
  /* harmony export */ Gemma2ForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma2ForCausalLM),
34380
34519
  /* harmony export */ Gemma2Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma2Model),
34381
34520
  /* harmony export */ Gemma2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma2PreTrainedModel),
34521
+ /* harmony export */ Gemma3ForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma3ForCausalLM),
34522
+ /* harmony export */ Gemma3Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma3Model),
34523
+ /* harmony export */ Gemma3PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Gemma3PreTrainedModel),
34382
34524
  /* harmony export */ GemmaForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaForCausalLM),
34383
34525
  /* harmony export */ GemmaModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaModel),
34384
34526
  /* harmony export */ GemmaPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaPreTrainedModel),
@@ -34480,6 +34622,10 @@ __webpack_require__.r(__webpack_exports__);
34480
34622
  /* harmony export */ MaskFormerPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskFormerPreTrainedModel),
34481
34623
  /* harmony export */ MaskedLMOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskedLMOutput),
34482
34624
  /* harmony export */ MaxLengthCriteria: () => (/* reexport safe */ _generation_stopping_criteria_js__WEBPACK_IMPORTED_MODULE_20__.MaxLengthCriteria),
34625
+ /* harmony export */ Metric3DForDepthEstimation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3DForDepthEstimation),
34626
+ /* harmony export */ Metric3DPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3DPreTrainedModel),
34627
+ /* harmony export */ Metric3Dv2ForDepthEstimation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3Dv2ForDepthEstimation),
34628
+ /* harmony export */ Metric3Dv2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Metric3Dv2PreTrainedModel),
34483
34629
  /* harmony export */ MgpstrForSceneTextRecognition: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MgpstrForSceneTextRecognition),
34484
34630
  /* harmony export */ MgpstrModelOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MgpstrModelOutput),
34485
34631
  /* harmony export */ MgpstrPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MgpstrPreTrainedModel),
@@ -34686,6 +34832,11 @@ __webpack_require__.r(__webpack_exports__);
34686
34832
  /* harmony export */ SmolVLMForConditionalGeneration: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SmolVLMForConditionalGeneration),
34687
34833
  /* harmony export */ SmolVLMImageProcessor: () => (/* reexport safe */ _models_image_processors_js__WEBPACK_IMPORTED_MODULE_14__.SmolVLMImageProcessor),
34688
34834
  /* harmony export */ SmolVLMProcessor: () => (/* reexport safe */ _models_processors_js__WEBPACK_IMPORTED_MODULE_17__.SmolVLMProcessor),
34835
+ /* harmony export */ SnacDecoderModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacDecoderModel),
34836
+ /* harmony export */ SnacEncoderModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacEncoderModel),
34837
+ /* harmony export */ SnacFeatureExtractor: () => (/* reexport safe */ _models_feature_extractors_js__WEBPACK_IMPORTED_MODULE_11__.SnacFeatureExtractor),
34838
+ /* harmony export */ SnacModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacModel),
34839
+ /* harmony export */ SnacPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SnacPreTrainedModel),
34689
34840
  /* harmony export */ SpeechT5FeatureExtractor: () => (/* reexport safe */ _models_feature_extractors_js__WEBPACK_IMPORTED_MODULE_11__.SpeechT5FeatureExtractor),
34690
34841
  /* harmony export */ SpeechT5ForSpeechToText: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SpeechT5ForSpeechToText),
34691
34842
  /* harmony export */ SpeechT5ForTextToSpeech: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.SpeechT5ForTextToSpeech),
@@ -35208,6 +35359,9 @@ var __webpack_exports__GPTNeoXTokenizer = __webpack_exports__.GPTNeoXTokenizer;
35208
35359
  var __webpack_exports__Gemma2ForCausalLM = __webpack_exports__.Gemma2ForCausalLM;
35209
35360
  var __webpack_exports__Gemma2Model = __webpack_exports__.Gemma2Model;
35210
35361
  var __webpack_exports__Gemma2PreTrainedModel = __webpack_exports__.Gemma2PreTrainedModel;
35362
+ var __webpack_exports__Gemma3ForCausalLM = __webpack_exports__.Gemma3ForCausalLM;
35363
+ var __webpack_exports__Gemma3Model = __webpack_exports__.Gemma3Model;
35364
+ var __webpack_exports__Gemma3PreTrainedModel = __webpack_exports__.Gemma3PreTrainedModel;
35211
35365
  var __webpack_exports__GemmaForCausalLM = __webpack_exports__.GemmaForCausalLM;
35212
35366
  var __webpack_exports__GemmaModel = __webpack_exports__.GemmaModel;
35213
35367
  var __webpack_exports__GemmaPreTrainedModel = __webpack_exports__.GemmaPreTrainedModel;
@@ -35309,6 +35463,10 @@ var __webpack_exports__MaskFormerModel = __webpack_exports__.MaskFormerModel;
35309
35463
  var __webpack_exports__MaskFormerPreTrainedModel = __webpack_exports__.MaskFormerPreTrainedModel;
35310
35464
  var __webpack_exports__MaskedLMOutput = __webpack_exports__.MaskedLMOutput;
35311
35465
  var __webpack_exports__MaxLengthCriteria = __webpack_exports__.MaxLengthCriteria;
35466
+ var __webpack_exports__Metric3DForDepthEstimation = __webpack_exports__.Metric3DForDepthEstimation;
35467
+ var __webpack_exports__Metric3DPreTrainedModel = __webpack_exports__.Metric3DPreTrainedModel;
35468
+ var __webpack_exports__Metric3Dv2ForDepthEstimation = __webpack_exports__.Metric3Dv2ForDepthEstimation;
35469
+ var __webpack_exports__Metric3Dv2PreTrainedModel = __webpack_exports__.Metric3Dv2PreTrainedModel;
35312
35470
  var __webpack_exports__MgpstrForSceneTextRecognition = __webpack_exports__.MgpstrForSceneTextRecognition;
35313
35471
  var __webpack_exports__MgpstrModelOutput = __webpack_exports__.MgpstrModelOutput;
35314
35472
  var __webpack_exports__MgpstrPreTrainedModel = __webpack_exports__.MgpstrPreTrainedModel;
@@ -35515,6 +35673,11 @@ var __webpack_exports__SiglipVisionModel = __webpack_exports__.SiglipVisionModel
35515
35673
  var __webpack_exports__SmolVLMForConditionalGeneration = __webpack_exports__.SmolVLMForConditionalGeneration;
35516
35674
  var __webpack_exports__SmolVLMImageProcessor = __webpack_exports__.SmolVLMImageProcessor;
35517
35675
  var __webpack_exports__SmolVLMProcessor = __webpack_exports__.SmolVLMProcessor;
35676
+ var __webpack_exports__SnacDecoderModel = __webpack_exports__.SnacDecoderModel;
35677
+ var __webpack_exports__SnacEncoderModel = __webpack_exports__.SnacEncoderModel;
35678
+ var __webpack_exports__SnacFeatureExtractor = __webpack_exports__.SnacFeatureExtractor;
35679
+ var __webpack_exports__SnacModel = __webpack_exports__.SnacModel;
35680
+ var __webpack_exports__SnacPreTrainedModel = __webpack_exports__.SnacPreTrainedModel;
35518
35681
  var __webpack_exports__SpeechT5FeatureExtractor = __webpack_exports__.SpeechT5FeatureExtractor;
35519
35682
  var __webpack_exports__SpeechT5ForSpeechToText = __webpack_exports__.SpeechT5ForSpeechToText;
35520
35683
  var __webpack_exports__SpeechT5ForTextToSpeech = __webpack_exports__.SpeechT5ForTextToSpeech;
@@ -35708,6 +35871,6 @@ var __webpack_exports__topk = __webpack_exports__.topk;
35708
35871
  var __webpack_exports__window_function = __webpack_exports__.window_function;
35709
35872
  var __webpack_exports__zeros = __webpack_exports__.zeros;
35710
35873
  var __webpack_exports__zeros_like = __webpack_exports__.zeros_like;
35711
- export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoFeatureExtractor as AutoFeatureExtractor, __webpack_exports__AutoImageProcessor as AutoImageProcessor, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForAudioTextToText as AutoModelForAudioTextToText, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageTextToText as AutoModelForImageTextToText, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForPoseEstimation as AutoModelForPoseEstimation, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BackgroundRemovalPipeline as BackgroundRemovalPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__ClassifierFreeGuidanceLogitsProcessor as ClassifierFreeGuidanceLogitsProcessor, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DacDecoderModel as DacDecoderModel, __webpack_exports__DacDecoderOutput as DacDecoderOutput, __webpack_exports__DacEncoderModel as DacEncoderModel, __webpack_exports__DacEncoderOutput as DacEncoderOutput, __webpack_exports__DacFeatureExtractor as DacFeatureExtractor, __webpack_exports__DacModel as DacModel, __webpack_exports__DacPreTrainedModel as DacPreTrainedModel, __webpack_exports__DataTypeMap as DataTypeMap, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTImageProcessor as DeiTImageProcessor, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DepthProForDepthEstimation as DepthProForDepthEstimation, __webpack_exports__DepthProPreTrainedModel as DepthProPreTrainedModel, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrImageProcessor as DetrImageProcessor, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__Dinov2WithRegistersForImageClassification as Dinov2WithRegistersForImageClassification, __webpack_exports__Dinov2WithRegistersModel as Dinov2WithRegistersModel, __webpack_exports__Dinov2WithRegistersPreTrainedModel as Dinov2WithRegistersPreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutImageProcessor as DonutImageProcessor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EncodecFeatureExtractor as EncodecFeatureExtractor, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__ExaoneForCausalLM as ExaoneForCausalLM, __webpack_exports__ExaoneModel as ExaoneModel, __webpack_exports__ExaonePreTrainedModel as ExaonePreTrainedModel, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor, __webpack_exports__ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__GlmForCausalLM as GlmForCausalLM, __webpack_exports__GlmModel as GlmModel, __webpack_exports__GlmPreTrainedModel as GlmPreTrainedModel, __webpack_exports__GraniteForCausalLM as GraniteForCausalLM, __webpack_exports__GraniteModel as GraniteModel, __webpack_exports__GranitePreTrainedModel as GranitePreTrainedModel, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroundingDinoForObjectDetection as GroundingDinoForObjectDetection, __webpack_exports__GroundingDinoImageProcessor as GroundingDinoImageProcessor, __webpack_exports__GroundingDinoPreTrainedModel as GroundingDinoPreTrainedModel, __webpack_exports__GroundingDinoProcessor as GroundingDinoProcessor, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HeliumForCausalLM as HeliumForCausalLM, __webpack_exports__HeliumModel as HeliumModel, __webpack_exports__HeliumPreTrainedModel as HeliumPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__IJepaForImageClassification as IJepaForImageClassification, __webpack_exports__IJepaModel as IJepaModel, __webpack_exports__IJepaPreTrainedModel as IJepaPreTrainedModel, __webpack_exports__Idefics3ForConditionalGeneration as Idefics3ForConditionalGeneration, __webpack_exports__Idefics3ImageProcessor as Idefics3ImageProcessor, __webpack_exports__Idefics3PreTrainedModel as Idefics3PreTrainedModel, __webpack_exports__Idefics3Processor as Idefics3Processor, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageProcessor as ImageProcessor, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__JinaCLIPImageProcessor as JinaCLIPImageProcessor, __webpack_exports__JinaCLIPModel as JinaCLIPModel, __webpack_exports__JinaCLIPPreTrainedModel as JinaCLIPPreTrainedModel, __webpack_exports__JinaCLIPProcessor as JinaCLIPProcessor, __webpack_exports__JinaCLIPTextModel as JinaCLIPTextModel, __webpack_exports__JinaCLIPVisionModel as JinaCLIPVisionModel, __webpack_exports__LiteWhisperForConditionalGeneration as LiteWhisperForConditionalGeneration, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaOnevisionForConditionalGeneration as LlavaOnevisionForConditionalGeneration, __webpack_exports__LlavaOnevisionImageProcessor as LlavaOnevisionImageProcessor, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LogitsProcessor as LogitsProcessor, __webpack_exports__LogitsProcessorList as LogitsProcessorList, __webpack_exports__LogitsWarper as LogitsWarper, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__Mask2FormerImageProcessor as Mask2FormerImageProcessor, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerImageProcessor as MaskFormerImageProcessor, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__MgpstrForSceneTextRecognition as MgpstrForSceneTextRecognition, __webpack_exports__MgpstrModelOutput as MgpstrModelOutput, __webpack_exports__MgpstrPreTrainedModel as MgpstrPreTrainedModel, __webpack_exports__MgpstrProcessor as MgpstrProcessor, __webpack_exports__MgpstrTokenizer as MgpstrTokenizer, __webpack_exports__MimiDecoderModel as MimiDecoderModel, __webpack_exports__MimiDecoderOutput as MimiDecoderOutput, __webpack_exports__MimiEncoderModel as MimiEncoderModel, __webpack_exports__MimiEncoderOutput as MimiEncoderOutput, __webpack_exports__MimiModel as MimiModel, __webpack_exports__MimiPreTrainedModel as MimiPreTrainedModel, __webpack_exports__MinLengthLogitsProcessor as MinLengthLogitsProcessor, __webpack_exports__MinNewTokensLengthLogitsProcessor as MinNewTokensLengthLogitsProcessor, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileLLMForCausalLM as MobileLLMForCausalLM, __webpack_exports__MobileLLMModel as MobileLLMModel, __webpack_exports__MobileLLMPreTrainedModel as MobileLLMPreTrainedModel, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1ForSemanticSegmentation as MobileNetV1ForSemanticSegmentation, __webpack_exports__MobileNetV1ImageProcessor as MobileNetV1ImageProcessor, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2ForSemanticSegmentation as MobileNetV2ForSemanticSegmentation, __webpack_exports__MobileNetV2ImageProcessor as MobileNetV2ImageProcessor, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3ForSemanticSegmentation as MobileNetV3ForSemanticSegmentation, __webpack_exports__MobileNetV3ImageProcessor as MobileNetV3ImageProcessor, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4ForSemanticSegmentation as MobileNetV4ForSemanticSegmentation, __webpack_exports__MobileNetV4ImageProcessor as MobileNetV4ImageProcessor, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__ModernBertForMaskedLM as ModernBertForMaskedLM, __webpack_exports__ModernBertForSequenceClassification as ModernBertForSequenceClassification, __webpack_exports__ModernBertForTokenClassification as ModernBertForTokenClassification, __webpack_exports__ModernBertModel as ModernBertModel, __webpack_exports__ModernBertPreTrainedModel as ModernBertPreTrainedModel, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MoonshineFeatureExtractor as MoonshineFeatureExtractor, __webpack_exports__MoonshineForConditionalGeneration as MoonshineForConditionalGeneration, __webpack_exports__MoonshineModel as MoonshineModel, __webpack_exports__MoonshinePreTrainedModel as MoonshinePreTrainedModel, __webpack_exports__MoonshineProcessor as MoonshineProcessor, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MultiModalityCausalLM as MultiModalityCausalLM, __webpack_exports__MultiModalityPreTrainedModel as MultiModalityPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NoBadWordsLogitsProcessor as NoBadWordsLogitsProcessor, __webpack_exports__NoRepeatNGramLogitsProcessor as NoRepeatNGramLogitsProcessor, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__Olmo2ForCausalLM as Olmo2ForCausalLM, __webpack_exports__Olmo2Model as Olmo2Model, __webpack_exports__Olmo2PreTrainedModel as Olmo2PreTrainedModel, __webpack_exports__OlmoForCausalLM as OlmoForCausalLM, __webpack_exports__OlmoModel as OlmoModel, __webpack_exports__OlmoPreTrainedModel as OlmoPreTrainedModel, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTImageProcessor as OwlViTImageProcessor, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__PaliGemmaForConditionalGeneration as PaliGemmaForConditionalGeneration, __webpack_exports__PaliGemmaPreTrainedModel as PaliGemmaPreTrainedModel, __webpack_exports__PaliGemmaProcessor as PaliGemmaProcessor, __webpack_exports__PatchTSMixerForPrediction as PatchTSMixerForPrediction, __webpack_exports__PatchTSMixerModel as PatchTSMixerModel, __webpack_exports__PatchTSMixerPreTrainedModel as PatchTSMixerPreTrainedModel, __webpack_exports__PatchTSTForPrediction as PatchTSTForPrediction, __webpack_exports__PatchTSTModel as PatchTSTModel, __webpack_exports__PatchTSTPreTrainedModel as PatchTSTPreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__Phi3VForCausalLM as Phi3VForCausalLM, __webpack_exports__Phi3VImageProcessor as Phi3VImageProcessor, __webpack_exports__Phi3VPreTrainedModel as Phi3VPreTrainedModel, __webpack_exports__Phi3VProcessor as Phi3VProcessor, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGeneration, __webpack_exports__Qwen2VLImageProcessor as Qwen2VLImageProcessor, __webpack_exports__Qwen2VLPreTrainedModel as Qwen2VLPreTrainedModel, __webpack_exports__Qwen2VLProcessor as Qwen2VLProcessor, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawAudio as RawAudio, __webpack_exports__RawImage as RawImage, __webpack_exports__RawVideo as RawVideo, __webpack_exports__RawVideoFrame as RawVideoFrame, __webpack_exports__RepetitionPenaltyLogitsProcessor as RepetitionPenaltyLogitsProcessor, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerImageProcessor as SegformerImageProcessor, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SmolVLMForConditionalGeneration as SmolVLMForConditionalGeneration, __webpack_exports__SmolVLMImageProcessor as SmolVLMImageProcessor, __webpack_exports__SmolVLMProcessor as SmolVLMProcessor, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__StyleTextToSpeech2Model as StyleTextToSpeech2Model, __webpack_exports__StyleTextToSpeech2PreTrainedModel as StyleTextToSpeech2PreTrainedModel, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__SuppressTokensAtBeginLogitsProcessor as SuppressTokensAtBeginLogitsProcessor, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinForSemanticSegmentation as SwinForSemanticSegmentation, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__TemperatureLogitsWarper as TemperatureLogitsWarper, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TopKLogitsWarper as TopKLogitsWarper, __webpack_exports__TopPLogitsWarper as TopPLogitsWarper, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UltravoxModel as UltravoxModel, __webpack_exports__UltravoxPreTrainedModel as UltravoxPreTrainedModel, __webpack_exports__UltravoxProcessor as UltravoxProcessor, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__VLChatProcessor as VLChatProcessor, __webpack_exports__VLMImageProcessor as VLMImageProcessor, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitPoseForPoseEstimation as VitPoseForPoseEstimation, __webpack_exports__VitPoseImageProcessor as VitPoseImageProcessor, __webpack_exports__VitPosePreTrainedModel as VitPosePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2Processor as Wav2Vec2Processor, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTimeStampLogitsProcessor as WhisperTimeStampLogitsProcessor, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosImageProcessor as YolosImageProcessor, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__load_image as load_image, __webpack_exports__load_video as load_video, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__rand as rand, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__slice as slice, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
35874
+ export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoFeatureExtractor as AutoFeatureExtractor, __webpack_exports__AutoImageProcessor as AutoImageProcessor, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForAudioTextToText as AutoModelForAudioTextToText, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageTextToText as AutoModelForImageTextToText, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForPoseEstimation as AutoModelForPoseEstimation, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BackgroundRemovalPipeline as BackgroundRemovalPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__ClassifierFreeGuidanceLogitsProcessor as ClassifierFreeGuidanceLogitsProcessor, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DacDecoderModel as DacDecoderModel, __webpack_exports__DacDecoderOutput as DacDecoderOutput, __webpack_exports__DacEncoderModel as DacEncoderModel, __webpack_exports__DacEncoderOutput as DacEncoderOutput, __webpack_exports__DacFeatureExtractor as DacFeatureExtractor, __webpack_exports__DacModel as DacModel, __webpack_exports__DacPreTrainedModel as DacPreTrainedModel, __webpack_exports__DataTypeMap as DataTypeMap, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTImageProcessor as DeiTImageProcessor, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DepthProForDepthEstimation as DepthProForDepthEstimation, __webpack_exports__DepthProPreTrainedModel as DepthProPreTrainedModel, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrImageProcessor as DetrImageProcessor, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__Dinov2WithRegistersForImageClassification as Dinov2WithRegistersForImageClassification, __webpack_exports__Dinov2WithRegistersModel as Dinov2WithRegistersModel, __webpack_exports__Dinov2WithRegistersPreTrainedModel as Dinov2WithRegistersPreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutImageProcessor as DonutImageProcessor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EncodecFeatureExtractor as EncodecFeatureExtractor, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__ExaoneForCausalLM as ExaoneForCausalLM, __webpack_exports__ExaoneModel as ExaoneModel, __webpack_exports__ExaonePreTrainedModel as ExaonePreTrainedModel, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor, __webpack_exports__ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__Gemma3ForCausalLM as Gemma3ForCausalLM, __webpack_exports__Gemma3Model as Gemma3Model, __webpack_exports__Gemma3PreTrainedModel as Gemma3PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__GlmForCausalLM as GlmForCausalLM, __webpack_exports__GlmModel as GlmModel, __webpack_exports__GlmPreTrainedModel as GlmPreTrainedModel, __webpack_exports__GraniteForCausalLM as GraniteForCausalLM, __webpack_exports__GraniteModel as GraniteModel, __webpack_exports__GranitePreTrainedModel as GranitePreTrainedModel, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroundingDinoForObjectDetection as GroundingDinoForObjectDetection, __webpack_exports__GroundingDinoImageProcessor as GroundingDinoImageProcessor, __webpack_exports__GroundingDinoPreTrainedModel as GroundingDinoPreTrainedModel, __webpack_exports__GroundingDinoProcessor as GroundingDinoProcessor, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HeliumForCausalLM as HeliumForCausalLM, __webpack_exports__HeliumModel as HeliumModel, __webpack_exports__HeliumPreTrainedModel as HeliumPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__IJepaForImageClassification as IJepaForImageClassification, __webpack_exports__IJepaModel as IJepaModel, __webpack_exports__IJepaPreTrainedModel as IJepaPreTrainedModel, __webpack_exports__Idefics3ForConditionalGeneration as Idefics3ForConditionalGeneration, __webpack_exports__Idefics3ImageProcessor as Idefics3ImageProcessor, __webpack_exports__Idefics3PreTrainedModel as Idefics3PreTrainedModel, __webpack_exports__Idefics3Processor as Idefics3Processor, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageProcessor as ImageProcessor, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__JinaCLIPImageProcessor as JinaCLIPImageProcessor, __webpack_exports__JinaCLIPModel as JinaCLIPModel, __webpack_exports__JinaCLIPPreTrainedModel as JinaCLIPPreTrainedModel, __webpack_exports__JinaCLIPProcessor as JinaCLIPProcessor, __webpack_exports__JinaCLIPTextModel as JinaCLIPTextModel, __webpack_exports__JinaCLIPVisionModel as JinaCLIPVisionModel, __webpack_exports__LiteWhisperForConditionalGeneration as LiteWhisperForConditionalGeneration, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaOnevisionForConditionalGeneration as LlavaOnevisionForConditionalGeneration, __webpack_exports__LlavaOnevisionImageProcessor as LlavaOnevisionImageProcessor, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LogitsProcessor as LogitsProcessor, __webpack_exports__LogitsProcessorList as LogitsProcessorList, __webpack_exports__LogitsWarper as LogitsWarper, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__Mask2FormerImageProcessor as Mask2FormerImageProcessor, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerImageProcessor as MaskFormerImageProcessor, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__Metric3DForDepthEstimation as Metric3DForDepthEstimation, __webpack_exports__Metric3DPreTrainedModel as Metric3DPreTrainedModel, __webpack_exports__Metric3Dv2ForDepthEstimation as Metric3Dv2ForDepthEstimation, __webpack_exports__Metric3Dv2PreTrainedModel as Metric3Dv2PreTrainedModel, __webpack_exports__MgpstrForSceneTextRecognition as MgpstrForSceneTextRecognition, __webpack_exports__MgpstrModelOutput as MgpstrModelOutput, __webpack_exports__MgpstrPreTrainedModel as MgpstrPreTrainedModel, __webpack_exports__MgpstrProcessor as MgpstrProcessor, __webpack_exports__MgpstrTokenizer as MgpstrTokenizer, __webpack_exports__MimiDecoderModel as MimiDecoderModel, __webpack_exports__MimiDecoderOutput as MimiDecoderOutput, __webpack_exports__MimiEncoderModel as MimiEncoderModel, __webpack_exports__MimiEncoderOutput as MimiEncoderOutput, __webpack_exports__MimiModel as MimiModel, __webpack_exports__MimiPreTrainedModel as MimiPreTrainedModel, __webpack_exports__MinLengthLogitsProcessor as MinLengthLogitsProcessor, __webpack_exports__MinNewTokensLengthLogitsProcessor as MinNewTokensLengthLogitsProcessor, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileLLMForCausalLM as MobileLLMForCausalLM, __webpack_exports__MobileLLMModel as MobileLLMModel, __webpack_exports__MobileLLMPreTrainedModel as MobileLLMPreTrainedModel, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1ForSemanticSegmentation as MobileNetV1ForSemanticSegmentation, __webpack_exports__MobileNetV1ImageProcessor as MobileNetV1ImageProcessor, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2ForSemanticSegmentation as MobileNetV2ForSemanticSegmentation, __webpack_exports__MobileNetV2ImageProcessor as MobileNetV2ImageProcessor, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3ForSemanticSegmentation as MobileNetV3ForSemanticSegmentation, __webpack_exports__MobileNetV3ImageProcessor as MobileNetV3ImageProcessor, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4ForSemanticSegmentation as MobileNetV4ForSemanticSegmentation, __webpack_exports__MobileNetV4ImageProcessor as MobileNetV4ImageProcessor, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__ModernBertForMaskedLM as ModernBertForMaskedLM, __webpack_exports__ModernBertForSequenceClassification as ModernBertForSequenceClassification, __webpack_exports__ModernBertForTokenClassification as ModernBertForTokenClassification, __webpack_exports__ModernBertModel as ModernBertModel, __webpack_exports__ModernBertPreTrainedModel as ModernBertPreTrainedModel, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MoonshineFeatureExtractor as MoonshineFeatureExtractor, __webpack_exports__MoonshineForConditionalGeneration as MoonshineForConditionalGeneration, __webpack_exports__MoonshineModel as MoonshineModel, __webpack_exports__MoonshinePreTrainedModel as MoonshinePreTrainedModel, __webpack_exports__MoonshineProcessor as MoonshineProcessor, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MultiModalityCausalLM as MultiModalityCausalLM, __webpack_exports__MultiModalityPreTrainedModel as MultiModalityPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NoBadWordsLogitsProcessor as NoBadWordsLogitsProcessor, __webpack_exports__NoRepeatNGramLogitsProcessor as NoRepeatNGramLogitsProcessor, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__Olmo2ForCausalLM as Olmo2ForCausalLM, __webpack_exports__Olmo2Model as Olmo2Model, __webpack_exports__Olmo2PreTrainedModel as Olmo2PreTrainedModel, __webpack_exports__OlmoForCausalLM as OlmoForCausalLM, __webpack_exports__OlmoModel as OlmoModel, __webpack_exports__OlmoPreTrainedModel as OlmoPreTrainedModel, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTImageProcessor as OwlViTImageProcessor, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__PaliGemmaForConditionalGeneration as PaliGemmaForConditionalGeneration, __webpack_exports__PaliGemmaPreTrainedModel as PaliGemmaPreTrainedModel, __webpack_exports__PaliGemmaProcessor as PaliGemmaProcessor, __webpack_exports__PatchTSMixerForPrediction as PatchTSMixerForPrediction, __webpack_exports__PatchTSMixerModel as PatchTSMixerModel, __webpack_exports__PatchTSMixerPreTrainedModel as PatchTSMixerPreTrainedModel, __webpack_exports__PatchTSTForPrediction as PatchTSTForPrediction, __webpack_exports__PatchTSTModel as PatchTSTModel, __webpack_exports__PatchTSTPreTrainedModel as PatchTSTPreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__Phi3VForCausalLM as Phi3VForCausalLM, __webpack_exports__Phi3VImageProcessor as Phi3VImageProcessor, __webpack_exports__Phi3VPreTrainedModel as Phi3VPreTrainedModel, __webpack_exports__Phi3VProcessor as Phi3VProcessor, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGeneration, __webpack_exports__Qwen2VLImageProcessor as Qwen2VLImageProcessor, __webpack_exports__Qwen2VLPreTrainedModel as Qwen2VLPreTrainedModel, __webpack_exports__Qwen2VLProcessor as Qwen2VLProcessor, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawAudio as RawAudio, __webpack_exports__RawImage as RawImage, __webpack_exports__RawVideo as RawVideo, __webpack_exports__RawVideoFrame as RawVideoFrame, __webpack_exports__RepetitionPenaltyLogitsProcessor as RepetitionPenaltyLogitsProcessor, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerImageProcessor as SegformerImageProcessor, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SmolVLMForConditionalGeneration as SmolVLMForConditionalGeneration, __webpack_exports__SmolVLMImageProcessor as SmolVLMImageProcessor, __webpack_exports__SmolVLMProcessor as SmolVLMProcessor, __webpack_exports__SnacDecoderModel as SnacDecoderModel, __webpack_exports__SnacEncoderModel as SnacEncoderModel, __webpack_exports__SnacFeatureExtractor as SnacFeatureExtractor, __webpack_exports__SnacModel as SnacModel, __webpack_exports__SnacPreTrainedModel as SnacPreTrainedModel, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__StyleTextToSpeech2Model as StyleTextToSpeech2Model, __webpack_exports__StyleTextToSpeech2PreTrainedModel as StyleTextToSpeech2PreTrainedModel, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__SuppressTokensAtBeginLogitsProcessor as SuppressTokensAtBeginLogitsProcessor, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinForSemanticSegmentation as SwinForSemanticSegmentation, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__TemperatureLogitsWarper as TemperatureLogitsWarper, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TopKLogitsWarper as TopKLogitsWarper, __webpack_exports__TopPLogitsWarper as TopPLogitsWarper, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UltravoxModel as UltravoxModel, __webpack_exports__UltravoxPreTrainedModel as UltravoxPreTrainedModel, __webpack_exports__UltravoxProcessor as UltravoxProcessor, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__VLChatProcessor as VLChatProcessor, __webpack_exports__VLMImageProcessor as VLMImageProcessor, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitPoseForPoseEstimation as VitPoseForPoseEstimation, __webpack_exports__VitPoseImageProcessor as VitPoseImageProcessor, __webpack_exports__VitPosePreTrainedModel as VitPosePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2Processor as Wav2Vec2Processor, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTimeStampLogitsProcessor as WhisperTimeStampLogitsProcessor, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosImageProcessor as YolosImageProcessor, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__load_image as load_image, __webpack_exports__load_video as load_video, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__rand as rand, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__slice as slice, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
35712
35875
 
35713
35876
  //# sourceMappingURL=transformers.web.js.map