@huggingface/transformers 3.1.0 → 3.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -3
- package/dist/ort-wasm-simd-threaded.jsep.wasm +0 -0
- package/dist/transformers.cjs +965 -195
- package/dist/transformers.cjs.map +1 -1
- package/dist/transformers.js +2251 -1360
- package/dist/transformers.js.map +1 -1
- package/dist/transformers.min.cjs +1 -352
- package/dist/transformers.min.cjs.map +1 -1
- package/dist/transformers.min.js +1 -415
- package/dist/transformers.min.js.map +1 -1
- package/dist/transformers.min.mjs +1 -352
- package/dist/transformers.min.mjs.map +1 -1
- package/dist/transformers.mjs +979 -194
- package/dist/transformers.mjs.map +1 -1
- package/package.json +11 -16
- package/src/backends/onnx.js +2 -7
- package/src/configs.js +3 -1
- package/src/env.js +6 -6
- package/src/generation/configuration_utils.js +7 -0
- package/src/generation/logits_process.js +22 -16
- package/src/generation/streamers.js +7 -2
- package/src/models/idefics3/image_processing_idefics3.js +219 -0
- package/src/models/idefics3/processing_idefics3.js +136 -0
- package/src/models/image_processors.js +1 -0
- package/src/models/paligemma/processing_paligemma.js +82 -0
- package/src/models/processors.js +2 -0
- package/src/models.js +169 -39
- package/src/tokenizers.js +12 -1
- package/src/utils/core.js +53 -9
- package/src/utils/dtypes.js +2 -1
- package/src/utils/hub.js +8 -12
- package/src/utils/image.js +59 -16
- package/src/utils/tensor.js +6 -1
- package/types/backends/onnx.d.ts +2 -2
- package/types/backends/onnx.d.ts.map +1 -1
- package/types/base/feature_extraction_utils.d.ts +1 -1
- package/types/base/feature_extraction_utils.d.ts.map +1 -1
- package/types/base/image_processors_utils.d.ts +2 -2
- package/types/base/image_processors_utils.d.ts.map +1 -1
- package/types/base/processing_utils.d.ts +4 -4
- package/types/base/processing_utils.d.ts.map +1 -1
- package/types/configs.d.ts +7 -7
- package/types/configs.d.ts.map +1 -1
- package/types/env.d.ts +2 -2
- package/types/env.d.ts.map +1 -1
- package/types/generation/configuration_utils.d.ts +7 -1
- package/types/generation/configuration_utils.d.ts.map +1 -1
- package/types/generation/logits_process.d.ts +32 -22
- package/types/generation/logits_process.d.ts.map +1 -1
- package/types/generation/logits_sampler.d.ts.map +1 -1
- package/types/generation/parameters.d.ts +5 -5
- package/types/generation/stopping_criteria.d.ts +1 -1
- package/types/generation/stopping_criteria.d.ts.map +1 -1
- package/types/generation/streamers.d.ts +15 -10
- package/types/generation/streamers.d.ts.map +1 -1
- package/types/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.d.ts +1 -1
- package/types/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.d.ts.map +1 -1
- package/types/models/auto/feature_extraction_auto.d.ts.map +1 -1
- package/types/models/auto/image_processing_auto.d.ts.map +1 -1
- package/types/models/auto/processing_auto.d.ts +1 -1
- package/types/models/auto/processing_auto.d.ts.map +1 -1
- package/types/models/clap/feature_extraction_clap.d.ts +1 -1
- package/types/models/clap/feature_extraction_clap.d.ts.map +1 -1
- package/types/models/detr/image_processing_detr.d.ts +11 -11
- package/types/models/detr/image_processing_detr.d.ts.map +1 -1
- package/types/models/donut/image_processing_donut.d.ts +1 -1
- package/types/models/donut/image_processing_donut.d.ts.map +1 -1
- package/types/models/florence2/processing_florence2.d.ts.map +1 -1
- package/types/models/idefics3/image_processing_idefics3.d.ts +40 -0
- package/types/models/idefics3/image_processing_idefics3.d.ts.map +1 -0
- package/types/models/idefics3/processing_idefics3.d.ts +19 -0
- package/types/models/idefics3/processing_idefics3.d.ts.map +1 -0
- package/types/models/image_processors.d.ts +1 -0
- package/types/models/janus/image_processing_janus.d.ts +1 -1
- package/types/models/janus/image_processing_janus.d.ts.map +1 -1
- package/types/models/janus/processing_janus.d.ts.map +1 -1
- package/types/models/maskformer/image_processing_maskformer.d.ts +8 -8
- package/types/models/maskformer/image_processing_maskformer.d.ts.map +1 -1
- package/types/models/mgp_str/processing_mgp_str.d.ts +2 -2
- package/types/models/mgp_str/processing_mgp_str.d.ts.map +1 -1
- package/types/models/owlvit/image_processing_owlvit.d.ts.map +1 -1
- package/types/models/paligemma/processing_paligemma.d.ts +12 -0
- package/types/models/paligemma/processing_paligemma.d.ts.map +1 -0
- package/types/models/processors.d.ts +2 -0
- package/types/models/pyannote/feature_extraction_pyannote.d.ts.map +1 -1
- package/types/models/pyannote/processing_pyannote.d.ts +1 -1
- package/types/models/pyannote/processing_pyannote.d.ts.map +1 -1
- package/types/models/qwen2_vl/processing_qwen2_vl.d.ts.map +1 -1
- package/types/models/rt_detr/image_processing_rt_detr.d.ts.map +1 -1
- package/types/models/sam/image_processing_sam.d.ts.map +1 -1
- package/types/models/seamless_m4t/feature_extraction_seamless_m4t.d.ts +1 -1
- package/types/models/seamless_m4t/feature_extraction_seamless_m4t.d.ts.map +1 -1
- package/types/models/segformer/image_processing_segformer.d.ts.map +1 -1
- package/types/models/speecht5/processing_speecht5.d.ts.map +1 -1
- package/types/models/swin2sr/image_processing_swin2sr.d.ts +1 -1
- package/types/models/swin2sr/image_processing_swin2sr.d.ts.map +1 -1
- package/types/models/vitmatte/image_processing_vitmatte.d.ts.map +1 -1
- package/types/models/vitpose/image_processing_vitpose.d.ts +1 -1
- package/types/models/vitpose/image_processing_vitpose.d.ts.map +1 -1
- package/types/models/wav2vec2/feature_extraction_wav2vec2.d.ts.map +1 -1
- package/types/models/wav2vec2/processing_wav2vec2.d.ts.map +1 -1
- package/types/models/wespeaker/feature_extraction_wespeaker.d.ts +1 -1
- package/types/models/wespeaker/feature_extraction_wespeaker.d.ts.map +1 -1
- package/types/models/whisper/feature_extraction_whisper.d.ts +1 -1
- package/types/models/whisper/feature_extraction_whisper.d.ts.map +1 -1
- package/types/models/whisper/generation_whisper.d.ts.map +1 -1
- package/types/models/whisper/processing_whisper.d.ts.map +1 -1
- package/types/models/yolos/image_processing_yolos.d.ts.map +1 -1
- package/types/models.d.ts +44 -10
- package/types/models.d.ts.map +1 -1
- package/types/ops/registry.d.ts.map +1 -1
- package/types/pipelines.d.ts +26 -51
- package/types/pipelines.d.ts.map +1 -1
- package/types/tokenizers.d.ts +10 -6
- package/types/tokenizers.d.ts.map +1 -1
- package/types/utils/audio.d.ts.map +1 -1
- package/types/utils/constants.d.ts.map +1 -1
- package/types/utils/core.d.ts +94 -22
- package/types/utils/core.d.ts.map +1 -1
- package/types/utils/data-structures.d.ts.map +1 -1
- package/types/utils/devices.d.ts.map +1 -1
- package/types/utils/dtypes.d.ts +3 -2
- package/types/utils/dtypes.d.ts.map +1 -1
- package/types/utils/generic.d.ts.map +1 -1
- package/types/utils/hub.d.ts +3 -3
- package/types/utils/hub.d.ts.map +1 -1
- package/types/utils/image.d.ts +14 -1
- package/types/utils/image.d.ts.map +1 -1
- package/types/utils/maths.d.ts +10 -10
- package/types/utils/maths.d.ts.map +1 -1
- package/types/utils/tensor.d.ts +10 -8
- package/types/utils/tensor.d.ts.map +1 -1
package/dist/transformers.mjs
CHANGED
|
@@ -55,10 +55,10 @@ module.exports = __WEBPACK_EXTERNAL_MODULE_url__;
|
|
|
55
55
|
|
|
56
56
|
/***/ }),
|
|
57
57
|
|
|
58
|
-
/***/ "?
|
|
59
|
-
|
|
60
|
-
!***
|
|
61
|
-
|
|
58
|
+
/***/ "?8b6b":
|
|
59
|
+
/*!*********************************!*\
|
|
60
|
+
!*** onnxruntime-web (ignored) ***!
|
|
61
|
+
\*********************************/
|
|
62
62
|
/***/ (() => {
|
|
63
63
|
|
|
64
64
|
/* (ignored) */
|
|
@@ -3871,7 +3871,7 @@ const version = '1.20.1';
|
|
|
3871
3871
|
\******************************/
|
|
3872
3872
|
/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => {
|
|
3873
3873
|
|
|
3874
|
-
var
|
|
3874
|
+
var onnxruntime_web__WEBPACK_IMPORTED_MODULE_2___namespace_cache;
|
|
3875
3875
|
__webpack_require__.r(__webpack_exports__);
|
|
3876
3876
|
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
3877
3877
|
/* harmony export */ Tensor: () => (/* reexport safe */ onnxruntime_common__WEBPACK_IMPORTED_MODULE_3__.Tensor),
|
|
@@ -3882,7 +3882,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
3882
3882
|
/* harmony export */ });
|
|
3883
3883
|
/* harmony import */ var _env_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../env.js */ "./src/env.js");
|
|
3884
3884
|
/* harmony import */ var onnxruntime_node__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! onnxruntime-node */ "onnxruntime-node");
|
|
3885
|
-
/* harmony import */ var
|
|
3885
|
+
/* harmony import */ var onnxruntime_web__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! onnxruntime-web */ "?8b6b");
|
|
3886
3886
|
/* harmony import */ var onnxruntime_common__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! onnxruntime-common */ "./node_modules/onnxruntime-common/dist/esm/index.js");
|
|
3887
3887
|
/**
|
|
3888
3888
|
* @file Handler file for choosing the correct version of ONNX Runtime, based on the environment.
|
|
@@ -3908,11 +3908,6 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
3908
3908
|
// In either case, we select the default export if it exists, otherwise we use the named export.
|
|
3909
3909
|
|
|
3910
3910
|
|
|
3911
|
-
// Use subpath-imports to ensure Node.js and browser interoperability.
|
|
3912
|
-
// See package.json and https://nodejs.org/api/packages.html#subpath-imports
|
|
3913
|
-
// for more information.
|
|
3914
|
-
// @ts-ignore
|
|
3915
|
-
|
|
3916
3911
|
|
|
3917
3912
|
|
|
3918
3913
|
|
|
@@ -3954,7 +3949,7 @@ if (ORT_SYMBOL in globalThis) {
|
|
|
3954
3949
|
} else if (_env_js__WEBPACK_IMPORTED_MODULE_0__.apis.IS_NODE_ENV) {
|
|
3955
3950
|
ONNX = onnxruntime_node__WEBPACK_IMPORTED_MODULE_1__["default"] ?? onnxruntime_node__WEBPACK_IMPORTED_MODULE_1__;
|
|
3956
3951
|
|
|
3957
|
-
// Updated as of ONNX Runtime 1.
|
|
3952
|
+
// Updated as of ONNX Runtime 1.20.1
|
|
3958
3953
|
// The following table lists the supported versions of ONNX Runtime Node.js binding provided with pre-built binaries.
|
|
3959
3954
|
// | EPs/Platforms | Windows x64 | Windows arm64 | Linux x64 | Linux arm64 | MacOS x64 | MacOS arm64 |
|
|
3960
3955
|
// | ------------- | ----------- | ------------- | ----------------- | ----------- | --------- | ----------- |
|
|
@@ -3977,7 +3972,7 @@ if (ORT_SYMBOL in globalThis) {
|
|
|
3977
3972
|
supportedDevices.push('cpu');
|
|
3978
3973
|
defaultDevices = ['cpu'];
|
|
3979
3974
|
} else {
|
|
3980
|
-
ONNX = /*#__PURE__*/ (
|
|
3975
|
+
ONNX = /*#__PURE__*/ (onnxruntime_web__WEBPACK_IMPORTED_MODULE_2___namespace_cache || (onnxruntime_web__WEBPACK_IMPORTED_MODULE_2___namespace_cache = __webpack_require__.t(onnxruntime_web__WEBPACK_IMPORTED_MODULE_2__, 2)));
|
|
3981
3976
|
|
|
3982
3977
|
if (_env_js__WEBPACK_IMPORTED_MODULE_0__.apis.IS_WEBNN_AVAILABLE) {
|
|
3983
3978
|
// TODO: Only push supported providers (depending on available hardware)
|
|
@@ -5533,6 +5528,7 @@ function getNormalizedConfig(config) {
|
|
|
5533
5528
|
case 'paligemma':
|
|
5534
5529
|
case 'florence2':
|
|
5535
5530
|
case 'llava_onevision':
|
|
5531
|
+
case 'idefics3':
|
|
5536
5532
|
init_normalized_config = getNormalizedConfig(config.text_config);
|
|
5537
5533
|
break;
|
|
5538
5534
|
case 'moondream1':
|
|
@@ -5567,6 +5563,7 @@ function getNormalizedConfig(config) {
|
|
|
5567
5563
|
break;
|
|
5568
5564
|
case 'llama':
|
|
5569
5565
|
case 'olmo':
|
|
5566
|
+
case 'olmo2':
|
|
5570
5567
|
case 'mobilellm':
|
|
5571
5568
|
case 'granite':
|
|
5572
5569
|
case 'cohere':
|
|
@@ -5846,7 +5843,7 @@ class AutoConfig {
|
|
|
5846
5843
|
* See https://onnxruntime.ai/docs/tutorials/web/env-flags-and-session-options.html#freedimensionoverrides
|
|
5847
5844
|
* for more information.
|
|
5848
5845
|
* @property {import('./utils/devices.js').DeviceType} [device] The default device to use for the model.
|
|
5849
|
-
* @property {import('./utils/dtypes.js').DataType} [dtype] The default data type to use for the model.
|
|
5846
|
+
* @property {import('./utils/dtypes.js').DataType|Record<string, import('./utils/dtypes.js').DataType>} [dtype] The default data type to use for the model.
|
|
5850
5847
|
* @property {boolean|Record<string, boolean>} [use_external_data_format=false] Whether to load the model using the external data format (used for models >= 2GB in size).
|
|
5851
5848
|
*/
|
|
5852
5849
|
|
|
@@ -5895,12 +5892,12 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
5895
5892
|
|
|
5896
5893
|
|
|
5897
5894
|
|
|
5898
|
-
const VERSION = '3.1.
|
|
5895
|
+
const VERSION = '3.1.2';
|
|
5899
5896
|
|
|
5900
5897
|
// Check if various APIs are available (depends on environment)
|
|
5901
|
-
const IS_BROWSER_ENV = typeof
|
|
5902
|
-
const IS_WEBWORKER_ENV =
|
|
5903
|
-
const IS_WEB_CACHE_AVAILABLE =
|
|
5898
|
+
const IS_BROWSER_ENV = typeof window !== "undefined" && typeof window.document !== "undefined";
|
|
5899
|
+
const IS_WEBWORKER_ENV = typeof self !== "undefined" && self.constructor?.name === 'DedicatedWorkerGlobalScope';
|
|
5900
|
+
const IS_WEB_CACHE_AVAILABLE = typeof self !== "undefined" && 'caches' in self;
|
|
5904
5901
|
const IS_WEBGPU_AVAILABLE = typeof navigator !== 'undefined' && 'gpu' in navigator;
|
|
5905
5902
|
const IS_WEBNN_AVAILABLE = typeof navigator !== 'undefined' && 'ml' in navigator;
|
|
5906
5903
|
|
|
@@ -5913,7 +5910,7 @@ const IS_PATH_AVAILABLE = !isEmpty(path__WEBPACK_IMPORTED_MODULE_1__["default"])
|
|
|
5913
5910
|
* A read-only object containing information about the APIs available in the current environment.
|
|
5914
5911
|
*/
|
|
5915
5912
|
const apis = Object.freeze({
|
|
5916
|
-
/** Whether we are running in a browser environment */
|
|
5913
|
+
/** Whether we are running in a browser environment (and not a web worker) */
|
|
5917
5914
|
IS_BROWSER_ENV,
|
|
5918
5915
|
|
|
5919
5916
|
/** Whether we are running in a web worker environment */
|
|
@@ -6006,7 +6003,7 @@ const env = {
|
|
|
6006
6003
|
remoteHost: 'https://huggingface.co/',
|
|
6007
6004
|
remotePathTemplate: '{model}/resolve/{revision}/',
|
|
6008
6005
|
|
|
6009
|
-
allowLocalModels: !IS_BROWSER_ENV,
|
|
6006
|
+
allowLocalModels: !(IS_BROWSER_ENV || IS_WEBWORKER_ENV),
|
|
6010
6007
|
localModelPath: localModelPath,
|
|
6011
6008
|
useFS: IS_FS_AVAILABLE,
|
|
6012
6009
|
|
|
@@ -6306,6 +6303,13 @@ class GenerationConfig {
|
|
|
6306
6303
|
*/
|
|
6307
6304
|
suppress_tokens = null;
|
|
6308
6305
|
|
|
6306
|
+
/**
|
|
6307
|
+
* A streamer that will be used to stream the generation.
|
|
6308
|
+
* @type {import('./streamers.js').TextStreamer}
|
|
6309
|
+
* @default null
|
|
6310
|
+
*/
|
|
6311
|
+
streamer = null;
|
|
6312
|
+
|
|
6309
6313
|
/**
|
|
6310
6314
|
* A list of tokens that will be suppressed at the beginning of the generation.
|
|
6311
6315
|
* The `SuppressBeginTokens` logit processor will set their log probs to `-inf` so that they are not sampled.
|
|
@@ -6611,7 +6615,7 @@ class ForcedBOSTokenLogitsProcessor extends LogitsProcessor {
|
|
|
6611
6615
|
* Apply the BOS token forcing to the logits.
|
|
6612
6616
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6613
6617
|
* @param {Tensor} logits The logits.
|
|
6614
|
-
* @returns {
|
|
6618
|
+
* @returns {Tensor} The logits with BOS token forcing.
|
|
6615
6619
|
*/
|
|
6616
6620
|
_call(input_ids, logits) {
|
|
6617
6621
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
@@ -6681,7 +6685,7 @@ class SuppressTokensAtBeginLogitsProcessor extends LogitsProcessor {
|
|
|
6681
6685
|
* Apply the BOS token forcing to the logits.
|
|
6682
6686
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6683
6687
|
* @param {Tensor} logits The logits.
|
|
6684
|
-
* @returns {
|
|
6688
|
+
* @returns {Tensor} The logits with BOS token forcing.
|
|
6685
6689
|
*/
|
|
6686
6690
|
_call(input_ids, logits) {
|
|
6687
6691
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
@@ -6851,7 +6855,7 @@ class NoRepeatNGramLogitsProcessor extends LogitsProcessor {
|
|
|
6851
6855
|
* Apply the no-repeat-ngram processor to the logits.
|
|
6852
6856
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6853
6857
|
* @param {Tensor} logits The logits.
|
|
6854
|
-
* @returns {
|
|
6858
|
+
* @returns {Tensor} The logits with no-repeat-ngram processing.
|
|
6855
6859
|
*/
|
|
6856
6860
|
_call(input_ids, logits) {
|
|
6857
6861
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
@@ -6866,12 +6870,22 @@ class NoRepeatNGramLogitsProcessor extends LogitsProcessor {
|
|
|
6866
6870
|
}
|
|
6867
6871
|
|
|
6868
6872
|
/**
|
|
6869
|
-
* A logits processor that
|
|
6873
|
+
* A logits processor that prevents the repetition of previous tokens through a penalty.
|
|
6874
|
+
* This penalty is applied at most once per token. Note that, for decoder-only models like most LLMs,
|
|
6875
|
+
* the considered tokens include the prompt.
|
|
6876
|
+
*
|
|
6877
|
+
* In the original [paper](https://arxiv.org/pdf/1909.05858.pdf), the authors suggest the use of a
|
|
6878
|
+
* penalty of around 1.2 to achieve a good balance between truthful generation and lack of repetition.
|
|
6879
|
+
* To penalize and reduce repetition, use `penalty` values above 1.0, where a higher value penalizes
|
|
6880
|
+
* more strongly. To reward and encourage repetition, use `penalty` values between 0.0 and 1.0, where
|
|
6881
|
+
* a lower value rewards more strongly.
|
|
6870
6882
|
*/
|
|
6871
6883
|
class RepetitionPenaltyLogitsProcessor extends LogitsProcessor {
|
|
6872
6884
|
/**
|
|
6873
6885
|
* Create a RepetitionPenaltyLogitsProcessor.
|
|
6874
|
-
* @param {number} penalty The
|
|
6886
|
+
* @param {number} penalty The parameter for repetition penalty.
|
|
6887
|
+
* - 1.0 means no penalty. Above 1.0 penalizes previously generated tokens.
|
|
6888
|
+
* - Between 0.0 and 1.0 rewards previously generated tokens.
|
|
6875
6889
|
*/
|
|
6876
6890
|
constructor(penalty) {
|
|
6877
6891
|
super();
|
|
@@ -6882,16 +6896,12 @@ class RepetitionPenaltyLogitsProcessor extends LogitsProcessor {
|
|
|
6882
6896
|
* Apply the repetition penalty to the logits.
|
|
6883
6897
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6884
6898
|
* @param {Tensor} logits The logits.
|
|
6885
|
-
* @returns {
|
|
6899
|
+
* @returns {Tensor} The logits with repetition penalty processing.
|
|
6886
6900
|
*/
|
|
6887
6901
|
_call(input_ids, logits) {
|
|
6888
|
-
// Modify the logits corresponding to each element in `input_ids`.
|
|
6889
|
-
// As a consequence, the logits corresponding to tokens that appear
|
|
6890
|
-
// many times in the output will be penalised more.
|
|
6891
|
-
|
|
6892
6902
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
6893
6903
|
const batch_logits_data = /** @type {Float32Array} */(logits[i].data);
|
|
6894
|
-
for (const input_id of input_ids[i]) {
|
|
6904
|
+
for (const input_id of new Set(input_ids[i])) {
|
|
6895
6905
|
const token = Number(input_id);
|
|
6896
6906
|
if (batch_logits_data[token] < 0) {
|
|
6897
6907
|
batch_logits_data[token] *= this.penalty;
|
|
@@ -6924,7 +6934,7 @@ class MinLengthLogitsProcessor extends LogitsProcessor {
|
|
|
6924
6934
|
* Apply logit processor.
|
|
6925
6935
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6926
6936
|
* @param {Tensor} logits The logits.
|
|
6927
|
-
* @returns {
|
|
6937
|
+
* @returns {Tensor} The processed logits.
|
|
6928
6938
|
*/
|
|
6929
6939
|
_call(input_ids, logits) {
|
|
6930
6940
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
@@ -6962,7 +6972,7 @@ class MinNewTokensLengthLogitsProcessor extends LogitsProcessor {
|
|
|
6962
6972
|
* Apply logit processor.
|
|
6963
6973
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6964
6974
|
* @param {Tensor} logits The logits.
|
|
6965
|
-
* @returns {
|
|
6975
|
+
* @returns {Tensor} The processed logits.
|
|
6966
6976
|
*/
|
|
6967
6977
|
_call(input_ids, logits) {
|
|
6968
6978
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
@@ -6995,7 +7005,7 @@ class NoBadWordsLogitsProcessor extends LogitsProcessor {
|
|
|
6995
7005
|
* Apply logit processor.
|
|
6996
7006
|
* @param {bigint[][]} input_ids The input IDs.
|
|
6997
7007
|
* @param {Tensor} logits The logits.
|
|
6998
|
-
* @returns {
|
|
7008
|
+
* @returns {Tensor} The processed logits.
|
|
6999
7009
|
*/
|
|
7000
7010
|
_call(input_ids, logits) {
|
|
7001
7011
|
for (let i = 0; i < input_ids.length; ++i) {
|
|
@@ -7056,7 +7066,7 @@ class ClassifierFreeGuidanceLogitsProcessor extends LogitsProcessor {
|
|
|
7056
7066
|
* Apply logit processor.
|
|
7057
7067
|
* @param {bigint[][]} input_ids The input IDs.
|
|
7058
7068
|
* @param {Tensor} logits The logits.
|
|
7059
|
-
* @returns {
|
|
7069
|
+
* @returns {Tensor} The processed logits.
|
|
7060
7070
|
*/
|
|
7061
7071
|
_call(input_ids, logits) {
|
|
7062
7072
|
if (logits.dims[0] !== 2 * input_ids.length) {
|
|
@@ -7110,7 +7120,7 @@ class TemperatureLogitsWarper extends LogitsWarper {
|
|
|
7110
7120
|
* Apply logit warper.
|
|
7111
7121
|
* @param {bigint[][]} input_ids The input IDs.
|
|
7112
7122
|
* @param {Tensor} logits The logits.
|
|
7113
|
-
* @returns {
|
|
7123
|
+
* @returns {Tensor} The processed logits.
|
|
7114
7124
|
*/
|
|
7115
7125
|
_call(input_ids, logits) {
|
|
7116
7126
|
const batch_logits_data = /** @type {Float32Array} */(logits.data);
|
|
@@ -7625,7 +7635,12 @@ const stdout_write = _env_js__WEBPACK_IMPORTED_MODULE_2__.apis.IS_PROCESS_AVAILA
|
|
|
7625
7635
|
class TextStreamer extends BaseStreamer {
|
|
7626
7636
|
/**
|
|
7627
7637
|
*
|
|
7628
|
-
* @param {import('../tokenizers.js').PreTrainedTokenizer} tokenizer
|
|
7638
|
+
* @param {import('../tokenizers.js').PreTrainedTokenizer} tokenizer
|
|
7639
|
+
* @param {Object} options
|
|
7640
|
+
* @param {boolean} [options.skip_prompt=false] Whether to skip the prompt tokens
|
|
7641
|
+
* @param {function(string): void} [options.callback_function=null] Function to call when a piece of text is ready to display
|
|
7642
|
+
* @param {function(bigint[]): void} [options.token_callback_function=null] Function to call when a new token is generated
|
|
7643
|
+
* @param {Object} [options.decode_kwargs={}] Additional keyword arguments to pass to the tokenizer's decode method
|
|
7629
7644
|
*/
|
|
7630
7645
|
constructor(tokenizer, {
|
|
7631
7646
|
skip_prompt = false,
|
|
@@ -7734,7 +7749,7 @@ class WhisperTextStreamer extends TextStreamer {
|
|
|
7734
7749
|
* @param {Object} options
|
|
7735
7750
|
* @param {boolean} [options.skip_prompt=false] Whether to skip the prompt tokens
|
|
7736
7751
|
* @param {function(string): void} [options.callback_function=null] Function to call when a piece of text is ready to display
|
|
7737
|
-
* @param {function(
|
|
7752
|
+
* @param {function(bigint[]): void} [options.token_callback_function=null] Function to call when a new token is generated
|
|
7738
7753
|
* @param {function(number): void} [options.on_chunk_start=null] Function to call when a new chunk starts
|
|
7739
7754
|
* @param {function(number): void} [options.on_chunk_end=null] Function to call when a chunk ends
|
|
7740
7755
|
* @param {function(): void} [options.on_finalize=null] Function to call when the stream is finalized
|
|
@@ -8013,6 +8028,11 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
8013
8028
|
/* harmony export */ HubertForSequenceClassification: () => (/* binding */ HubertForSequenceClassification),
|
|
8014
8029
|
/* harmony export */ HubertModel: () => (/* binding */ HubertModel),
|
|
8015
8030
|
/* harmony export */ HubertPreTrainedModel: () => (/* binding */ HubertPreTrainedModel),
|
|
8031
|
+
/* harmony export */ IJepaForImageClassification: () => (/* binding */ IJepaForImageClassification),
|
|
8032
|
+
/* harmony export */ IJepaModel: () => (/* binding */ IJepaModel),
|
|
8033
|
+
/* harmony export */ IJepaPreTrainedModel: () => (/* binding */ IJepaPreTrainedModel),
|
|
8034
|
+
/* harmony export */ Idefics3ForConditionalGeneration: () => (/* binding */ Idefics3ForConditionalGeneration),
|
|
8035
|
+
/* harmony export */ Idefics3PreTrainedModel: () => (/* binding */ Idefics3PreTrainedModel),
|
|
8016
8036
|
/* harmony export */ ImageMattingOutput: () => (/* binding */ ImageMattingOutput),
|
|
8017
8037
|
/* harmony export */ JAISLMHeadModel: () => (/* binding */ JAISLMHeadModel),
|
|
8018
8038
|
/* harmony export */ JAISModel: () => (/* binding */ JAISModel),
|
|
@@ -8102,6 +8122,9 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
8102
8122
|
/* harmony export */ OPTForCausalLM: () => (/* binding */ OPTForCausalLM),
|
|
8103
8123
|
/* harmony export */ OPTModel: () => (/* binding */ OPTModel),
|
|
8104
8124
|
/* harmony export */ OPTPreTrainedModel: () => (/* binding */ OPTPreTrainedModel),
|
|
8125
|
+
/* harmony export */ Olmo2ForCausalLM: () => (/* binding */ Olmo2ForCausalLM),
|
|
8126
|
+
/* harmony export */ Olmo2Model: () => (/* binding */ Olmo2Model),
|
|
8127
|
+
/* harmony export */ Olmo2PreTrainedModel: () => (/* binding */ Olmo2PreTrainedModel),
|
|
8105
8128
|
/* harmony export */ OlmoForCausalLM: () => (/* binding */ OlmoForCausalLM),
|
|
8106
8129
|
/* harmony export */ OlmoModel: () => (/* binding */ OlmoModel),
|
|
8107
8130
|
/* harmony export */ OlmoPreTrainedModel: () => (/* binding */ OlmoPreTrainedModel),
|
|
@@ -8114,6 +8137,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
8114
8137
|
/* harmony export */ Owlv2ForObjectDetection: () => (/* binding */ Owlv2ForObjectDetection),
|
|
8115
8138
|
/* harmony export */ Owlv2Model: () => (/* binding */ Owlv2Model),
|
|
8116
8139
|
/* harmony export */ Owlv2PreTrainedModel: () => (/* binding */ Owlv2PreTrainedModel),
|
|
8140
|
+
/* harmony export */ PaliGemmaForConditionalGeneration: () => (/* binding */ PaliGemmaForConditionalGeneration),
|
|
8141
|
+
/* harmony export */ PaliGemmaPreTrainedModel: () => (/* binding */ PaliGemmaPreTrainedModel),
|
|
8117
8142
|
/* harmony export */ PatchTSMixerForPrediction: () => (/* binding */ PatchTSMixerForPrediction),
|
|
8118
8143
|
/* harmony export */ PatchTSMixerModel: () => (/* binding */ PatchTSMixerModel),
|
|
8119
8144
|
/* harmony export */ PatchTSMixerPreTrainedModel: () => (/* binding */ PatchTSMixerPreTrainedModel),
|
|
@@ -8419,6 +8444,22 @@ async function getSession(pretrained_model_name_or_path, fileName, options) {
|
|
|
8419
8444
|
}
|
|
8420
8445
|
}
|
|
8421
8446
|
|
|
8447
|
+
if (dtype === _utils_dtypes_js__WEBPACK_IMPORTED_MODULE_2__.DATA_TYPES.auto) {
|
|
8448
|
+
// Try to choose the auto dtype based on the custom config
|
|
8449
|
+
let config_dtype = custom_config.dtype;
|
|
8450
|
+
if (typeof config_dtype !== 'string') {
|
|
8451
|
+
config_dtype = config_dtype[fileName];
|
|
8452
|
+
}
|
|
8453
|
+
|
|
8454
|
+
if (config_dtype && config_dtype !== _utils_dtypes_js__WEBPACK_IMPORTED_MODULE_2__.DATA_TYPES.auto && _utils_dtypes_js__WEBPACK_IMPORTED_MODULE_2__.DATA_TYPES.hasOwnProperty(config_dtype)) {
|
|
8455
|
+
// Defined by the custom config, and is not "auto"
|
|
8456
|
+
dtype = config_dtype;
|
|
8457
|
+
} else {
|
|
8458
|
+
// Choose default dtype based on device, falling back to fp32
|
|
8459
|
+
dtype = _utils_dtypes_js__WEBPACK_IMPORTED_MODULE_2__.DEFAULT_DEVICE_DTYPE_MAPPING[selectedDevice] ?? _utils_dtypes_js__WEBPACK_IMPORTED_MODULE_2__.DATA_TYPES.fp32;
|
|
8460
|
+
}
|
|
8461
|
+
}
|
|
8462
|
+
|
|
8422
8463
|
const selectedDtype = /** @type {import("./utils/dtypes.js").DataType} */(dtype);
|
|
8423
8464
|
|
|
8424
8465
|
if (!_utils_dtypes_js__WEBPACK_IMPORTED_MODULE_2__.DEFAULT_DTYPE_SUFFIX_MAPPING.hasOwnProperty(selectedDtype)) {
|
|
@@ -8624,9 +8665,17 @@ async function sessionRun(session, inputs) {
|
|
|
8624
8665
|
output = replaceTensors(output);
|
|
8625
8666
|
return output;
|
|
8626
8667
|
} catch (e) {
|
|
8668
|
+
// Error messages can be long (nested) and uninformative. For this reason,
|
|
8669
|
+
// we apply minor formatting to show the most important information
|
|
8670
|
+
const formatted = Object.fromEntries(Object.entries(checkedInputs)
|
|
8671
|
+
.map(([k, { type, dims, data }]) => [k, {
|
|
8672
|
+
// Extract these properties from the underlying ORT tensor
|
|
8673
|
+
type, dims, data,
|
|
8674
|
+
}]));
|
|
8675
|
+
|
|
8627
8676
|
// This usually occurs when the inputs are of the wrong type.
|
|
8628
8677
|
console.error(`An error occurred during model execution: "${e}".`);
|
|
8629
|
-
console.error('Inputs given to model:',
|
|
8678
|
+
console.error('Inputs given to model:', formatted);
|
|
8630
8679
|
throw e;
|
|
8631
8680
|
}
|
|
8632
8681
|
}
|
|
@@ -8771,7 +8820,9 @@ async function decoderForward(self, model_inputs, is_encoder_decoder = false) {
|
|
|
8771
8820
|
new_model_inputs.use_cache_branch = boolTensor(!!past_key_values);
|
|
8772
8821
|
}
|
|
8773
8822
|
if (session.inputNames.includes('position_ids') && new_model_inputs.attention_mask && !new_model_inputs.position_ids) {
|
|
8774
|
-
|
|
8823
|
+
// NOTE: Handle a special case for paligemma models, where positions are 1-indexed
|
|
8824
|
+
const start_index = self.config.model_type === 'paligemma' ? 1 : 0;
|
|
8825
|
+
new_model_inputs.position_ids = createPositionIds(new_model_inputs, past_key_values, start_index);
|
|
8775
8826
|
}
|
|
8776
8827
|
|
|
8777
8828
|
// Unpack the `past_key_values` object into model inputs
|
|
@@ -8783,6 +8834,39 @@ async function decoderForward(self, model_inputs, is_encoder_decoder = false) {
|
|
|
8783
8834
|
}
|
|
8784
8835
|
|
|
8785
8836
|
|
|
8837
|
+
|
|
8838
|
+
function default_merge_input_ids_with_image_features({
|
|
8839
|
+
image_token_id,
|
|
8840
|
+
inputs_embeds,
|
|
8841
|
+
image_features,
|
|
8842
|
+
input_ids,
|
|
8843
|
+
attention_mask,
|
|
8844
|
+
}) {
|
|
8845
|
+
const image_tokens = input_ids.tolist().map(ids =>
|
|
8846
|
+
ids.reduce((acc, x, idx) => {
|
|
8847
|
+
if (x == image_token_id) acc.push(idx);
|
|
8848
|
+
return acc;
|
|
8849
|
+
}, [])
|
|
8850
|
+
);
|
|
8851
|
+
const n_image_tokens = image_tokens.reduce((acc, x) => acc + x.length, 0);
|
|
8852
|
+
const n_image_features = image_features.dims[0];
|
|
8853
|
+
if (n_image_tokens !== n_image_features) {
|
|
8854
|
+
throw new Error(`Image features and image tokens do not match: tokens: ${n_image_tokens}, features ${n_image_features}`);
|
|
8855
|
+
}
|
|
8856
|
+
|
|
8857
|
+
// Equivalent to performing a masked_scatter
|
|
8858
|
+
let img = 0;
|
|
8859
|
+
for (let i = 0; i < image_tokens.length; ++i) {
|
|
8860
|
+
const tokens = image_tokens[i];
|
|
8861
|
+
const embeds = inputs_embeds[i];
|
|
8862
|
+
for (let j = 0; j < tokens.length; ++j) {
|
|
8863
|
+
embeds[tokens[j]].data.set(image_features[img++].data)
|
|
8864
|
+
}
|
|
8865
|
+
}
|
|
8866
|
+
return { inputs_embeds, attention_mask }
|
|
8867
|
+
}
|
|
8868
|
+
|
|
8869
|
+
|
|
8786
8870
|
/**
|
|
8787
8871
|
* Forward pass of an image-text-to-text model.
|
|
8788
8872
|
* @param {Object} self The image-text-to-text model model.
|
|
@@ -8874,14 +8958,14 @@ async function imageTextToTextForward(self, {
|
|
|
8874
8958
|
* @param {Tensor} attention_mask
|
|
8875
8959
|
* @returns {{data: BigInt64Array, dims: number[]}}
|
|
8876
8960
|
*/
|
|
8877
|
-
function cumsum_masked_fill(attention_mask) {
|
|
8961
|
+
function cumsum_masked_fill(attention_mask, start_index = 0) {
|
|
8878
8962
|
const [bz, seq_len] = attention_mask.dims;
|
|
8879
8963
|
const attn_mask_data = attention_mask.data;
|
|
8880
8964
|
|
|
8881
8965
|
const data = new BigInt64Array(attn_mask_data.length);
|
|
8882
8966
|
for (let i = 0; i < bz; ++i) {
|
|
8883
8967
|
const start = i * seq_len;
|
|
8884
|
-
let sum = BigInt(
|
|
8968
|
+
let sum = BigInt(start_index);
|
|
8885
8969
|
for (let j = 0; j < seq_len; ++j) {
|
|
8886
8970
|
const index = start + j;
|
|
8887
8971
|
if (attn_mask_data[index] === 0n) {
|
|
@@ -8908,10 +8992,10 @@ function cumsum_masked_fill(attention_mask) {
|
|
|
8908
8992
|
* position_ids = position_ids[:, -input_ids.shape[1] :]
|
|
8909
8993
|
* ```
|
|
8910
8994
|
*/
|
|
8911
|
-
function createPositionIds(model_inputs, past_key_values = null) {
|
|
8995
|
+
function createPositionIds(model_inputs, past_key_values = null, start_index = 0) {
|
|
8912
8996
|
const { input_ids, inputs_embeds, attention_mask } = model_inputs;
|
|
8913
8997
|
|
|
8914
|
-
const { data, dims } = cumsum_masked_fill(attention_mask);
|
|
8998
|
+
const { data, dims } = cumsum_masked_fill(attention_mask, start_index);
|
|
8915
8999
|
let position_ids = new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_9__.Tensor('int64', data, dims);
|
|
8916
9000
|
if (past_key_values) {
|
|
8917
9001
|
const offset = -(input_ids ?? inputs_embeds).dims.at(1);
|
|
@@ -9250,7 +9334,10 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
9250
9334
|
|
|
9251
9335
|
} else { // should be MODEL_TYPES.EncoderOnly
|
|
9252
9336
|
if (modelType !== MODEL_TYPES.EncoderOnly) {
|
|
9253
|
-
|
|
9337
|
+
const type = modelName ?? config?.model_type;
|
|
9338
|
+
if (type !== 'custom') {
|
|
9339
|
+
console.warn(`Model type for '${type}' not found, assuming encoder-only architecture. Please report this at ${_utils_constants_js__WEBPACK_IMPORTED_MODULE_6__.GITHUB_ISSUE_URL}.`)
|
|
9340
|
+
}
|
|
9254
9341
|
}
|
|
9255
9342
|
info = await Promise.all([
|
|
9256
9343
|
constructSessions(pretrained_model_name_or_path, {
|
|
@@ -9994,7 +10081,7 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
9994
10081
|
const dtype = session?.config?.kv_cache_dtype ?? 'float32';
|
|
9995
10082
|
const empty = (dtype === 'float16') ? new Uint16Array() : [];
|
|
9996
10083
|
|
|
9997
|
-
const batch_size = (decoderFeeds[this.main_input_name] ?? decoderFeeds.attention_mask)
|
|
10084
|
+
const batch_size = (decoderFeeds[this.main_input_name] ?? decoderFeeds.attention_mask)?.dims?.[0] ?? 1;
|
|
9998
10085
|
const shapes = (0,_configs_js__WEBPACK_IMPORTED_MODULE_0__.getKeyValueShapes)(this.config, { batch_size });
|
|
9999
10086
|
|
|
10000
10087
|
for (const name in shapes) {
|
|
@@ -11541,8 +11628,8 @@ class VisionEncoderDecoderModel extends PreTrainedModel {
|
|
|
11541
11628
|
class LlavaPreTrainedModel extends PreTrainedModel {
|
|
11542
11629
|
forward_params = [
|
|
11543
11630
|
'input_ids',
|
|
11544
|
-
'pixel_values',
|
|
11545
11631
|
'attention_mask',
|
|
11632
|
+
'pixel_values',
|
|
11546
11633
|
'position_ids',
|
|
11547
11634
|
'past_key_values',
|
|
11548
11635
|
];
|
|
@@ -11724,6 +11811,70 @@ class Florence2ForConditionalGeneration extends Florence2PreTrainedModel {
|
|
|
11724
11811
|
return decoder_outputs;
|
|
11725
11812
|
}
|
|
11726
11813
|
}
|
|
11814
|
+
|
|
11815
|
+
class PaliGemmaPreTrainedModel extends PreTrainedModel {
|
|
11816
|
+
forward_params = [
|
|
11817
|
+
'input_ids',
|
|
11818
|
+
// 'inputs_embeds',
|
|
11819
|
+
'attention_mask',
|
|
11820
|
+
'pixel_values',
|
|
11821
|
+
'position_ids',
|
|
11822
|
+
'past_key_values',
|
|
11823
|
+
];
|
|
11824
|
+
}
|
|
11825
|
+
|
|
11826
|
+
class PaliGemmaForConditionalGeneration extends PaliGemmaPreTrainedModel {
|
|
11827
|
+
_merge_input_ids_with_image_features(kwargs) {
|
|
11828
|
+
const vision_hidden_size = kwargs.image_features.dims.at(-1);
|
|
11829
|
+
const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size);
|
|
11830
|
+
|
|
11831
|
+
return default_merge_input_ids_with_image_features({
|
|
11832
|
+
// @ts-ignore
|
|
11833
|
+
image_token_id: this.config.image_token_index,
|
|
11834
|
+
...kwargs,
|
|
11835
|
+
image_features: reshaped_image_hidden_states,
|
|
11836
|
+
})
|
|
11837
|
+
}
|
|
11838
|
+
}
|
|
11839
|
+
|
|
11840
|
+
//////////////////////////////////////////////////
|
|
11841
|
+
// Idefics3 Models
|
|
11842
|
+
class Idefics3PreTrainedModel extends PreTrainedModel {
|
|
11843
|
+
forward_params = [
|
|
11844
|
+
'input_ids',
|
|
11845
|
+
'attention_mask',
|
|
11846
|
+
'pixel_values',
|
|
11847
|
+
'pixel_attention_mask',
|
|
11848
|
+
'position_ids',
|
|
11849
|
+
'past_key_values',
|
|
11850
|
+
];
|
|
11851
|
+
}
|
|
11852
|
+
|
|
11853
|
+
/**
|
|
11854
|
+
* The LLAVA model which consists of a vision backbone and a language model.
|
|
11855
|
+
*/
|
|
11856
|
+
class Idefics3ForConditionalGeneration extends Idefics3PreTrainedModel {
|
|
11857
|
+
|
|
11858
|
+
async encode_image({ pixel_values, pixel_attention_mask }) {
|
|
11859
|
+
const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values, pixel_attention_mask })).image_features;
|
|
11860
|
+
return features;
|
|
11861
|
+
}
|
|
11862
|
+
|
|
11863
|
+
_merge_input_ids_with_image_features(kwargs) {
|
|
11864
|
+
const vision_hidden_size = kwargs.image_features.dims.at(-1);
|
|
11865
|
+
const reshaped_image_hidden_states = kwargs.image_features.view(-1, vision_hidden_size);
|
|
11866
|
+
|
|
11867
|
+
return default_merge_input_ids_with_image_features({
|
|
11868
|
+
// @ts-ignore
|
|
11869
|
+
image_token_id: this.config.image_token_id,
|
|
11870
|
+
...kwargs,
|
|
11871
|
+
image_features: reshaped_image_hidden_states,
|
|
11872
|
+
})
|
|
11873
|
+
}
|
|
11874
|
+
}
|
|
11875
|
+
//////////////////////////////////////////////////
|
|
11876
|
+
|
|
11877
|
+
//////////////////////////////////////////////////
|
|
11727
11878
|
class CLIPPreTrainedModel extends PreTrainedModel { }
|
|
11728
11879
|
|
|
11729
11880
|
/**
|
|
@@ -12223,6 +12374,13 @@ class OlmoModel extends OlmoPreTrainedModel { }
|
|
|
12223
12374
|
class OlmoForCausalLM extends OlmoPreTrainedModel { }
|
|
12224
12375
|
//////////////////////////////////////////////////
|
|
12225
12376
|
|
|
12377
|
+
//////////////////////////////////////////////////
|
|
12378
|
+
// OLMo2 models
|
|
12379
|
+
class Olmo2PreTrainedModel extends PreTrainedModel { }
|
|
12380
|
+
class Olmo2Model extends Olmo2PreTrainedModel { }
|
|
12381
|
+
class Olmo2ForCausalLM extends Olmo2PreTrainedModel { }
|
|
12382
|
+
//////////////////////////////////////////////////
|
|
12383
|
+
|
|
12226
12384
|
|
|
12227
12385
|
//////////////////////////////////////////////////
|
|
12228
12386
|
// Granite models
|
|
@@ -12517,36 +12675,12 @@ class Qwen2VLForConditionalGeneration extends Qwen2VLPreTrainedModel {
|
|
|
12517
12675
|
return features;
|
|
12518
12676
|
}
|
|
12519
12677
|
|
|
12520
|
-
_merge_input_ids_with_image_features({
|
|
12521
|
-
|
|
12522
|
-
|
|
12523
|
-
|
|
12524
|
-
|
|
12525
|
-
|
|
12526
|
-
// @ts-ignore
|
|
12527
|
-
const { image_token_id } = this.config;
|
|
12528
|
-
const image_tokens = input_ids.tolist().map(ids =>
|
|
12529
|
-
ids.reduce((acc, x, idx) => {
|
|
12530
|
-
if (x == image_token_id) acc.push(idx);
|
|
12531
|
-
return acc;
|
|
12532
|
-
}, [])
|
|
12533
|
-
);
|
|
12534
|
-
const n_image_tokens = image_tokens.reduce((acc, x) => acc + x.length, 0);
|
|
12535
|
-
const n_image_features = image_features.dims[0];
|
|
12536
|
-
if (n_image_tokens !== n_image_features) {
|
|
12537
|
-
throw new Error(`Image features and image tokens do not match: tokens: ${n_image_tokens}, features ${n_image_features}`);
|
|
12538
|
-
}
|
|
12539
|
-
|
|
12540
|
-
// Equivalent to performing a masked_scatter
|
|
12541
|
-
let img = 0;
|
|
12542
|
-
for (let i = 0; i < image_tokens.length; ++i) {
|
|
12543
|
-
const tokens = image_tokens[i];
|
|
12544
|
-
const embeds = inputs_embeds[i];
|
|
12545
|
-
for (let j = 0; j < tokens.length; ++j) {
|
|
12546
|
-
embeds[tokens[j]].data.set(image_features[img++].data)
|
|
12547
|
-
}
|
|
12548
|
-
}
|
|
12549
|
-
return { inputs_embeds, attention_mask }
|
|
12678
|
+
_merge_input_ids_with_image_features(kwargs) {
|
|
12679
|
+
return default_merge_input_ids_with_image_features({
|
|
12680
|
+
// @ts-ignore
|
|
12681
|
+
image_token_id: this.config.image_token_id,
|
|
12682
|
+
...kwargs
|
|
12683
|
+
})
|
|
12550
12684
|
}
|
|
12551
12685
|
|
|
12552
12686
|
prepare_inputs_for_generation(input_ids, model_inputs, generation_config) {
|
|
@@ -12663,6 +12797,20 @@ class ViTForImageClassification extends ViTPreTrainedModel {
|
|
|
12663
12797
|
//////////////////////////////////////////////////
|
|
12664
12798
|
|
|
12665
12799
|
|
|
12800
|
+
//////////////////////////////////////////////////
|
|
12801
|
+
class IJepaPreTrainedModel extends PreTrainedModel { }
|
|
12802
|
+
class IJepaModel extends IJepaPreTrainedModel { }
|
|
12803
|
+
class IJepaForImageClassification extends IJepaPreTrainedModel {
|
|
12804
|
+
/**
|
|
12805
|
+
* @param {any} model_inputs
|
|
12806
|
+
*/
|
|
12807
|
+
async _call(model_inputs) {
|
|
12808
|
+
return new SequenceClassifierOutput(await super._call(model_inputs));
|
|
12809
|
+
}
|
|
12810
|
+
}
|
|
12811
|
+
//////////////////////////////////////////////////
|
|
12812
|
+
|
|
12813
|
+
|
|
12666
12814
|
//////////////////////////////////////////////////
|
|
12667
12815
|
class VitPosePreTrainedModel extends PreTrainedModel { }
|
|
12668
12816
|
|
|
@@ -14933,6 +15081,7 @@ const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([
|
|
|
14933
15081
|
['rt_detr', ['RTDetrModel', RTDetrModel]],
|
|
14934
15082
|
['table-transformer', ['TableTransformerModel', TableTransformerModel]],
|
|
14935
15083
|
['vit', ['ViTModel', ViTModel]],
|
|
15084
|
+
['ijepa', ['IJepaModel', IJepaModel]],
|
|
14936
15085
|
['pvt', ['PvtModel', PvtModel]],
|
|
14937
15086
|
['vit_msn', ['ViTMSNModel', ViTMSNModel]],
|
|
14938
15087
|
['vit_mae', ['ViTMAEModel', ViTMAEModel]],
|
|
@@ -14997,6 +15146,7 @@ const MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([
|
|
|
14997
15146
|
['codegen', ['CodeGenModel', CodeGenModel]],
|
|
14998
15147
|
['llama', ['LlamaModel', LlamaModel]],
|
|
14999
15148
|
['olmo', ['OlmoModel', OlmoModel]],
|
|
15149
|
+
['olmo2', ['Olmo2Model', Olmo2Model]],
|
|
15000
15150
|
['mobilellm', ['MobileLLMModel', MobileLLMModel]],
|
|
15001
15151
|
['granite', ['GraniteModel', GraniteModel]],
|
|
15002
15152
|
['cohere', ['CohereModel', CohereModel]],
|
|
@@ -15088,6 +15238,7 @@ const MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = new Map([
|
|
|
15088
15238
|
['codegen', ['CodeGenForCausalLM', CodeGenForCausalLM]],
|
|
15089
15239
|
['llama', ['LlamaForCausalLM', LlamaForCausalLM]],
|
|
15090
15240
|
['olmo', ['OlmoForCausalLM', OlmoForCausalLM]],
|
|
15241
|
+
['olmo2', ['Olmo2ForCausalLM', Olmo2ForCausalLM]],
|
|
15091
15242
|
['mobilellm', ['MobileLLMForCausalLM', MobileLLMForCausalLM]],
|
|
15092
15243
|
['granite', ['GraniteForCausalLM', GraniteForCausalLM]],
|
|
15093
15244
|
['cohere', ['CohereForCausalLM', CohereForCausalLM]],
|
|
@@ -15151,6 +15302,7 @@ const MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = new Map([
|
|
|
15151
15302
|
|
|
15152
15303
|
const MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = new Map([
|
|
15153
15304
|
['vision-encoder-decoder', ['VisionEncoderDecoderModel', VisionEncoderDecoderModel]],
|
|
15305
|
+
['idefics3', ['Idefics3ForConditionalGeneration', Idefics3ForConditionalGeneration]],
|
|
15154
15306
|
]);
|
|
15155
15307
|
|
|
15156
15308
|
const MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES = new Map([
|
|
@@ -15159,6 +15311,8 @@ const MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES = new Map([
|
|
|
15159
15311
|
['moondream1', ['Moondream1ForConditionalGeneration', Moondream1ForConditionalGeneration]],
|
|
15160
15312
|
['florence2', ['Florence2ForConditionalGeneration', Florence2ForConditionalGeneration]],
|
|
15161
15313
|
['qwen2-vl', ['Qwen2VLForConditionalGeneration', Qwen2VLForConditionalGeneration]],
|
|
15314
|
+
['idefics3', ['Idefics3ForConditionalGeneration', Idefics3ForConditionalGeneration]],
|
|
15315
|
+
['paligemma', ['PaliGemmaForConditionalGeneration', PaliGemmaForConditionalGeneration]],
|
|
15162
15316
|
]);
|
|
15163
15317
|
|
|
15164
15318
|
const MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([
|
|
@@ -15167,6 +15321,7 @@ const MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([
|
|
|
15167
15321
|
|
|
15168
15322
|
const MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([
|
|
15169
15323
|
['vit', ['ViTForImageClassification', ViTForImageClassification]],
|
|
15324
|
+
['ijepa', ['IJepaForImageClassification', IJepaForImageClassification]],
|
|
15170
15325
|
['pvt', ['PvtForImageClassification', PvtForImageClassification]],
|
|
15171
15326
|
['vit_msn', ['ViTMSNForImageClassification', ViTMSNForImageClassification]],
|
|
15172
15327
|
['fastvit', ['FastViTForImageClassification', FastViTForImageClassification]],
|
|
@@ -16770,6 +16925,394 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
16770
16925
|
class GLPNFeatureExtractor extends _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_0__.ImageProcessor { }
|
|
16771
16926
|
|
|
16772
16927
|
|
|
16928
|
+
/***/ }),
|
|
16929
|
+
|
|
16930
|
+
/***/ "./src/models/idefics3/image_processing_idefics3.js":
|
|
16931
|
+
/*!**********************************************************!*\
|
|
16932
|
+
!*** ./src/models/idefics3/image_processing_idefics3.js ***!
|
|
16933
|
+
\**********************************************************/
|
|
16934
|
+
/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => {
|
|
16935
|
+
|
|
16936
|
+
__webpack_require__.r(__webpack_exports__);
|
|
16937
|
+
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
16938
|
+
/* harmony export */ Idefics3ImageProcessor: () => (/* binding */ Idefics3ImageProcessor)
|
|
16939
|
+
/* harmony export */ });
|
|
16940
|
+
/* harmony import */ var _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../../base/image_processors_utils.js */ "./src/base/image_processors_utils.js");
|
|
16941
|
+
/* harmony import */ var _utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../../utils/tensor.js */ "./src/utils/tensor.js");
|
|
16942
|
+
|
|
16943
|
+
|
|
16944
|
+
|
|
16945
|
+
|
|
16946
|
+
|
|
16947
|
+
class Idefics3ImageProcessor extends _base_image_processors_utils_js__WEBPACK_IMPORTED_MODULE_0__.ImageProcessor {
|
|
16948
|
+
constructor(config) {
|
|
16949
|
+
super(config);
|
|
16950
|
+
|
|
16951
|
+
this.do_image_splitting = config.do_image_splitting ?? true;
|
|
16952
|
+
this.max_image_size = config.max_image_size;
|
|
16953
|
+
}
|
|
16954
|
+
|
|
16955
|
+
/**
|
|
16956
|
+
* @typedef {import('../../utils/image.js').RawImage} RawImage
|
|
16957
|
+
* @typedef {import('../../utils/tensor.js').Tensor} Tensor
|
|
16958
|
+
*/
|
|
16959
|
+
|
|
16960
|
+
/**
|
|
16961
|
+
* Calculate size to resize images to, to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.
|
|
16962
|
+
* @param {Tensor} pixel_values Tensor of the image to resize.
|
|
16963
|
+
* @param {number} vision_encoder_max_size Maximum size of the output image. If the image is larger than this size,
|
|
16964
|
+
* it will be split into patches of this size, and the original image will be concatenated with the patches, resized to max_size.
|
|
16965
|
+
*/
|
|
16966
|
+
get_resize_for_vision_encoder(pixel_values, vision_encoder_max_size) {
|
|
16967
|
+
let [height, width] = pixel_values.dims.slice(-2);
|
|
16968
|
+
|
|
16969
|
+
const aspect_ratio = width / height;
|
|
16970
|
+
if (width >= height) {
|
|
16971
|
+
width = Math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size;
|
|
16972
|
+
height = Math.floor(width / aspect_ratio);
|
|
16973
|
+
height = Math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size;
|
|
16974
|
+
} else {
|
|
16975
|
+
height = Math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size;
|
|
16976
|
+
width = Math.floor(height * aspect_ratio);
|
|
16977
|
+
width = Math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size;
|
|
16978
|
+
}
|
|
16979
|
+
return { height, width };
|
|
16980
|
+
}
|
|
16981
|
+
|
|
16982
|
+
/** @param {RawImage|RawImage[]|RawImage[][]} images */
|
|
16983
|
+
async _call(images, {
|
|
16984
|
+
do_image_splitting = null,
|
|
16985
|
+
return_row_col_info = false,
|
|
16986
|
+
} = {}) {
|
|
16987
|
+
|
|
16988
|
+
/** @type {RawImage[][]} */
|
|
16989
|
+
let batched_2d_images;
|
|
16990
|
+
if (!Array.isArray(images)) {
|
|
16991
|
+
batched_2d_images = [[images]];
|
|
16992
|
+
} else {
|
|
16993
|
+
if (images.length === 0 || !images[0]) {
|
|
16994
|
+
throw new Error("No images provided.");
|
|
16995
|
+
}
|
|
16996
|
+
if (!Array.isArray(images[0])) {
|
|
16997
|
+
batched_2d_images = [/** @type {RawImage[]} */(images)];
|
|
16998
|
+
} else {
|
|
16999
|
+
batched_2d_images = /** @type {RawImage[][]} */(images);
|
|
17000
|
+
}
|
|
17001
|
+
}
|
|
17002
|
+
|
|
17003
|
+
// List of tensors, each with shape [patches, channels, height, width]
|
|
17004
|
+
let all_pixel_values = [];
|
|
17005
|
+
let images_list_rows = [];
|
|
17006
|
+
let images_list_cols = [];
|
|
17007
|
+
|
|
17008
|
+
const original_sizes = [];
|
|
17009
|
+
const reshaped_input_sizes = [];
|
|
17010
|
+
for (const image_batch of batched_2d_images) {
|
|
17011
|
+
|
|
17012
|
+
let images_list = await Promise.all(image_batch.map(x => this.preprocess(x)));
|
|
17013
|
+
|
|
17014
|
+
// Original sizes of images
|
|
17015
|
+
original_sizes.push(...images_list.map(x => x.original_size));
|
|
17016
|
+
|
|
17017
|
+
// Reshaped sizes of images, before padding or cropping
|
|
17018
|
+
reshaped_input_sizes.push(...images_list.map(x => x.reshaped_input_size));
|
|
17019
|
+
|
|
17020
|
+
// Convert images to 4D tensors for easier processing
|
|
17021
|
+
images_list.forEach(x => x.pixel_values.unsqueeze_(0));
|
|
17022
|
+
|
|
17023
|
+
const { longest_edge } = this.max_image_size;
|
|
17024
|
+
|
|
17025
|
+
/** @type {Tensor[]} */
|
|
17026
|
+
let images_tensor;
|
|
17027
|
+
if (do_image_splitting ?? this.do_image_splitting) {
|
|
17028
|
+
let image_rows = new Array(images_list.length);
|
|
17029
|
+
let image_cols = new Array(images_list.length);
|
|
17030
|
+
|
|
17031
|
+
// We first resize both height and width of each image to the nearest max_image_size multiple, disregarding the aspect ratio
|
|
17032
|
+
images_tensor = await Promise.all(images_list.map(async (x, i) => {
|
|
17033
|
+
const new_size = this.get_resize_for_vision_encoder(x.pixel_values, longest_edge);
|
|
17034
|
+
|
|
17035
|
+
const resized = await (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.interpolate_4d)(x.pixel_values, {
|
|
17036
|
+
size: [new_size.height, new_size.width],
|
|
17037
|
+
});
|
|
17038
|
+
|
|
17039
|
+
const { frames, num_splits_h, num_splits_w } = await this.split_image(resized, this.max_image_size);
|
|
17040
|
+
image_rows[i] = num_splits_h;
|
|
17041
|
+
image_cols[i] = num_splits_w;
|
|
17042
|
+
return (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.cat)(frames, 0);
|
|
17043
|
+
}));
|
|
17044
|
+
|
|
17045
|
+
images_list_rows.push(image_rows);
|
|
17046
|
+
images_list_cols.push(image_cols);
|
|
17047
|
+
|
|
17048
|
+
} else {
|
|
17049
|
+
/** @type {[number, number]} */
|
|
17050
|
+
const size = [longest_edge, longest_edge];
|
|
17051
|
+
images_tensor = await Promise.all(
|
|
17052
|
+
images_list.map(x => (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.interpolate_4d)(x.pixel_values, { size }))
|
|
17053
|
+
);
|
|
17054
|
+
|
|
17055
|
+
images_list_rows.push(new Array(images_list.length).fill(0));
|
|
17056
|
+
images_list_cols.push(new Array(images_list.length).fill(0));
|
|
17057
|
+
}
|
|
17058
|
+
|
|
17059
|
+
all_pixel_values.push((0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.cat)(images_tensor, 0));
|
|
17060
|
+
}
|
|
17061
|
+
|
|
17062
|
+
const batch_size = all_pixel_values.length;
|
|
17063
|
+
const [n, c, h, w] = all_pixel_values[0].dims;
|
|
17064
|
+
|
|
17065
|
+
// Stack pixel values
|
|
17066
|
+
let pixel_values;
|
|
17067
|
+
let pixel_attention_mask;
|
|
17068
|
+
if (batch_size === 1) {
|
|
17069
|
+
pixel_values = all_pixel_values[0].unsqueeze_(0);
|
|
17070
|
+
pixel_attention_mask = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.full)([batch_size, n, h, w], true);
|
|
17071
|
+
} else {
|
|
17072
|
+
// Add padding (if necessary) to images with less patches than the maximum number of patches
|
|
17073
|
+
const max_num_patches = Math.max(...all_pixel_values.map(x => x.dims.at(0)));
|
|
17074
|
+
|
|
17075
|
+
pixel_attention_mask = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.full)([batch_size, max_num_patches, h, w], true);
|
|
17076
|
+
const pixel_attention_mask_data = pixel_attention_mask.data;
|
|
17077
|
+
const pixel_attention_mask_stride = max_num_patches * h * w;
|
|
17078
|
+
for (let i = 0; i < batch_size; ++i) {
|
|
17079
|
+
const num_patches = all_pixel_values[i].dims[0];
|
|
17080
|
+
if (num_patches < max_num_patches) {
|
|
17081
|
+
all_pixel_values[i] = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.cat)([
|
|
17082
|
+
all_pixel_values[i],
|
|
17083
|
+
(0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.full)([max_num_patches - num_patches, c, h, w], 0),
|
|
17084
|
+
], 0);
|
|
17085
|
+
|
|
17086
|
+
const start_offset = i * pixel_attention_mask_stride + num_patches * h * w;
|
|
17087
|
+
const end_offset = (i + 1) * pixel_attention_mask_stride;
|
|
17088
|
+
pixel_attention_mask_data.fill(false, start_offset, end_offset);
|
|
17089
|
+
}
|
|
17090
|
+
}
|
|
17091
|
+
pixel_values = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.stack)(all_pixel_values, 0);
|
|
17092
|
+
}
|
|
17093
|
+
|
|
17094
|
+
return {
|
|
17095
|
+
pixel_values,
|
|
17096
|
+
pixel_attention_mask,
|
|
17097
|
+
|
|
17098
|
+
original_sizes,
|
|
17099
|
+
reshaped_input_sizes,
|
|
17100
|
+
...(
|
|
17101
|
+
return_row_col_info
|
|
17102
|
+
? { rows: images_list_rows, cols: images_list_cols }
|
|
17103
|
+
: {}
|
|
17104
|
+
),
|
|
17105
|
+
}
|
|
17106
|
+
}
|
|
17107
|
+
|
|
17108
|
+
async split_image(pixel_values, { longest_edge }) {
|
|
17109
|
+
const max_height = longest_edge;
|
|
17110
|
+
const max_width = longest_edge;
|
|
17111
|
+
|
|
17112
|
+
const frames = [];
|
|
17113
|
+
|
|
17114
|
+
const [height, width] = pixel_values.dims.slice(-2);
|
|
17115
|
+
|
|
17116
|
+
let num_splits_h = 0, num_splits_w = 0;
|
|
17117
|
+
|
|
17118
|
+
if (height > max_height || width > max_width) {
|
|
17119
|
+
// Calculate the number of splits
|
|
17120
|
+
num_splits_h = Math.ceil(height / max_height);
|
|
17121
|
+
num_splits_w = Math.ceil(width / max_width);
|
|
17122
|
+
|
|
17123
|
+
// Calculate the optimal width and height for the sub-images
|
|
17124
|
+
const optimal_height = Math.ceil(height / num_splits_h);
|
|
17125
|
+
const optimal_width = Math.ceil(width / num_splits_w);
|
|
17126
|
+
|
|
17127
|
+
// Iterate through each row and column
|
|
17128
|
+
for (let r = 0; r < num_splits_h; r++) {
|
|
17129
|
+
for (let c = 0; c < num_splits_w; c++) {
|
|
17130
|
+
// Calculate the starting point of the crop
|
|
17131
|
+
const start_x = c * optimal_width;
|
|
17132
|
+
const start_y = r * optimal_height;
|
|
17133
|
+
|
|
17134
|
+
// Calculate the ending point of the crop
|
|
17135
|
+
const end_x = Math.min(start_x + optimal_width, width);
|
|
17136
|
+
const end_y = Math.min(start_y + optimal_height, height);
|
|
17137
|
+
|
|
17138
|
+
// Crop the image
|
|
17139
|
+
frames.push(pixel_values.slice(null, null, [start_y, end_y], [start_x, end_x]));
|
|
17140
|
+
}
|
|
17141
|
+
}
|
|
17142
|
+
|
|
17143
|
+
// Resize the global image to match max dimensions for memory efficiency
|
|
17144
|
+
const global_image_height = max_height;
|
|
17145
|
+
const global_image_width = max_width;
|
|
17146
|
+
|
|
17147
|
+
if (height !== global_image_height || width !== global_image_width) {
|
|
17148
|
+
pixel_values = await (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_1__.interpolate_4d)(pixel_values, {
|
|
17149
|
+
size: [global_image_height, global_image_width],
|
|
17150
|
+
})
|
|
17151
|
+
}
|
|
17152
|
+
}
|
|
17153
|
+
|
|
17154
|
+
frames.push(pixel_values);
|
|
17155
|
+
|
|
17156
|
+
return { frames, num_splits_h, num_splits_w };
|
|
17157
|
+
}
|
|
17158
|
+
}
|
|
17159
|
+
|
|
17160
|
+
|
|
17161
|
+
/***/ }),
|
|
17162
|
+
|
|
17163
|
+
/***/ "./src/models/idefics3/processing_idefics3.js":
|
|
17164
|
+
/*!****************************************************!*\
|
|
17165
|
+
!*** ./src/models/idefics3/processing_idefics3.js ***!
|
|
17166
|
+
\****************************************************/
|
|
17167
|
+
/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => {
|
|
17168
|
+
|
|
17169
|
+
__webpack_require__.r(__webpack_exports__);
|
|
17170
|
+
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
17171
|
+
/* harmony export */ Idefics3Processor: () => (/* binding */ Idefics3Processor)
|
|
17172
|
+
/* harmony export */ });
|
|
17173
|
+
/* harmony import */ var _base_processing_utils_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../../base/processing_utils.js */ "./src/base/processing_utils.js");
|
|
17174
|
+
/* harmony import */ var _auto_image_processing_auto_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../auto/image_processing_auto.js */ "./src/models/auto/image_processing_auto.js");
|
|
17175
|
+
/* harmony import */ var _tokenizers_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ../../tokenizers.js */ "./src/tokenizers.js");
|
|
17176
|
+
/* harmony import */ var _utils_image_js__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ../../utils/image.js */ "./src/utils/image.js");
|
|
17177
|
+
/* harmony import */ var _utils_core_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ../../utils/core.js */ "./src/utils/core.js");
|
|
17178
|
+
|
|
17179
|
+
|
|
17180
|
+
|
|
17181
|
+
|
|
17182
|
+
|
|
17183
|
+
|
|
17184
|
+
|
|
17185
|
+
/**
|
|
17186
|
+
* Prompt with expanded image tokens for when the image is split into patches.
|
|
17187
|
+
* @private
|
|
17188
|
+
*/
|
|
17189
|
+
function _prompt_split_image(image_seq_len, image_rows, image_cols, fake_token_around_image, image_token, global_img_token) {
|
|
17190
|
+
let text_split_images = "";
|
|
17191
|
+
for (let n_h = 0; n_h < image_rows; ++n_h) {
|
|
17192
|
+
for (let n_w = 0; n_w < image_cols; ++n_w) {
|
|
17193
|
+
text_split_images += (
|
|
17194
|
+
fake_token_around_image +
|
|
17195
|
+
`<row_${n_h + 1}_col_${n_w + 1}>` +
|
|
17196
|
+
image_token.repeat(image_seq_len)
|
|
17197
|
+
);
|
|
17198
|
+
}
|
|
17199
|
+
text_split_images += "\n";
|
|
17200
|
+
}
|
|
17201
|
+
|
|
17202
|
+
text_split_images += (
|
|
17203
|
+
`\n${fake_token_around_image}` +
|
|
17204
|
+
`${global_img_token}` +
|
|
17205
|
+
image_token.repeat(image_seq_len) +
|
|
17206
|
+
`${fake_token_around_image}`
|
|
17207
|
+
);
|
|
17208
|
+
return text_split_images;
|
|
17209
|
+
}
|
|
17210
|
+
|
|
17211
|
+
/**
|
|
17212
|
+
* Prompt with expanded image tokens for a single image.
|
|
17213
|
+
* @private
|
|
17214
|
+
*/
|
|
17215
|
+
function _prompt_single_image(image_seq_len, fake_token_around_image, image_token, global_img_token) {
|
|
17216
|
+
return (
|
|
17217
|
+
`${fake_token_around_image}` +
|
|
17218
|
+
`${global_img_token}` +
|
|
17219
|
+
image_token.repeat(image_seq_len) +
|
|
17220
|
+
`${fake_token_around_image}`
|
|
17221
|
+
);
|
|
17222
|
+
}
|
|
17223
|
+
|
|
17224
|
+
function get_image_prompt_string(image_rows, image_cols, image_seq_len, fake_token_around_image, image_token, global_img_token) {
|
|
17225
|
+
if (image_rows === 0 && image_cols === 0) {
|
|
17226
|
+
return _prompt_single_image(
|
|
17227
|
+
image_seq_len,
|
|
17228
|
+
fake_token_around_image,
|
|
17229
|
+
image_token,
|
|
17230
|
+
global_img_token
|
|
17231
|
+
);
|
|
17232
|
+
}
|
|
17233
|
+
return _prompt_split_image(
|
|
17234
|
+
image_seq_len, image_rows, image_cols, fake_token_around_image, image_token, global_img_token
|
|
17235
|
+
);
|
|
17236
|
+
}
|
|
17237
|
+
|
|
17238
|
+
|
|
17239
|
+
class Idefics3Processor extends _base_processing_utils_js__WEBPACK_IMPORTED_MODULE_0__.Processor {
|
|
17240
|
+
static image_processor_class = _auto_image_processing_auto_js__WEBPACK_IMPORTED_MODULE_1__.AutoImageProcessor
|
|
17241
|
+
static tokenizer_class = _tokenizers_js__WEBPACK_IMPORTED_MODULE_2__.AutoTokenizer
|
|
17242
|
+
static uses_processor_config = true;
|
|
17243
|
+
|
|
17244
|
+
fake_image_token = "<fake_token_around_image>";
|
|
17245
|
+
image_token = "<image>";
|
|
17246
|
+
global_img_token = "<global-img>";
|
|
17247
|
+
|
|
17248
|
+
/**
|
|
17249
|
+
*
|
|
17250
|
+
* @param {string|string[]} text
|
|
17251
|
+
* @param {RawImage|RawImage[]|RawImage[][]} images
|
|
17252
|
+
* @returns {Promise<any>}
|
|
17253
|
+
*/
|
|
17254
|
+
async _call(text, images = null, options = {}) {
|
|
17255
|
+
options.return_row_col_info ??= true;
|
|
17256
|
+
|
|
17257
|
+
let image_inputs;
|
|
17258
|
+
|
|
17259
|
+
if (images) {
|
|
17260
|
+
image_inputs = await this.image_processor(images, options);
|
|
17261
|
+
}
|
|
17262
|
+
|
|
17263
|
+
// NOTE: We assume text is present
|
|
17264
|
+
if (!Array.isArray(text)) {
|
|
17265
|
+
text = [text];
|
|
17266
|
+
}
|
|
17267
|
+
|
|
17268
|
+
const image_rows = image_inputs.rows ?? [new Array(text.length).fill(0)];
|
|
17269
|
+
const image_cols = image_inputs.cols ?? [new Array(text.length).fill(0)];
|
|
17270
|
+
|
|
17271
|
+
const image_seq_len = this.config.image_seq_len;
|
|
17272
|
+
const n_images_in_text = []
|
|
17273
|
+
const prompt_strings = [];
|
|
17274
|
+
for (let i = 0; i < text.length; ++i) {
|
|
17275
|
+
const sample = text[i];
|
|
17276
|
+
const sample_rows = image_rows[i];
|
|
17277
|
+
const sample_cols = image_cols[i];
|
|
17278
|
+
|
|
17279
|
+
n_images_in_text.push((0,_utils_core_js__WEBPACK_IMPORTED_MODULE_4__.count)(sample, this.image_token));
|
|
17280
|
+
|
|
17281
|
+
// Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
|
|
17282
|
+
const image_prompt_strings = sample_rows.map(
|
|
17283
|
+
(n_rows, j) => get_image_prompt_string(
|
|
17284
|
+
n_rows,
|
|
17285
|
+
sample_cols[j],
|
|
17286
|
+
image_seq_len,
|
|
17287
|
+
this.fake_image_token,
|
|
17288
|
+
this.image_token,
|
|
17289
|
+
this.global_img_token,
|
|
17290
|
+
)
|
|
17291
|
+
);
|
|
17292
|
+
|
|
17293
|
+
const split_sample = sample.split(this.image_token);
|
|
17294
|
+
if (split_sample.length === 0) {
|
|
17295
|
+
throw new Error("The image token should be present in the text.");
|
|
17296
|
+
}
|
|
17297
|
+
|
|
17298
|
+
// Place in the image prompt strings where the image tokens are
|
|
17299
|
+
let new_sample = split_sample[0];
|
|
17300
|
+
for (let j = 0; j < image_prompt_strings.length; ++j) {
|
|
17301
|
+
new_sample += image_prompt_strings[j] + split_sample[j + 1];
|
|
17302
|
+
}
|
|
17303
|
+
prompt_strings.push(new_sample);
|
|
17304
|
+
}
|
|
17305
|
+
|
|
17306
|
+
const text_inputs = this.tokenizer(prompt_strings);
|
|
17307
|
+
|
|
17308
|
+
return {
|
|
17309
|
+
...text_inputs,
|
|
17310
|
+
...image_inputs,
|
|
17311
|
+
}
|
|
17312
|
+
}
|
|
17313
|
+
}
|
|
17314
|
+
|
|
17315
|
+
|
|
16773
17316
|
/***/ }),
|
|
16774
17317
|
|
|
16775
17318
|
/***/ "./src/models/image_processors.js":
|
|
@@ -16797,40 +17340,41 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
16797
17340
|
/* harmony export */ DonutImageProcessor: () => (/* reexport safe */ _donut_image_processing_donut_js__WEBPACK_IMPORTED_MODULE_7__.DonutImageProcessor),
|
|
16798
17341
|
/* harmony export */ EfficientNetImageProcessor: () => (/* reexport safe */ _efficientnet_image_processing_efficientnet_js__WEBPACK_IMPORTED_MODULE_9__.EfficientNetImageProcessor),
|
|
16799
17342
|
/* harmony export */ GLPNFeatureExtractor: () => (/* reexport safe */ _glpn_image_processing_glpn_js__WEBPACK_IMPORTED_MODULE_10__.GLPNFeatureExtractor),
|
|
16800
|
-
/* harmony export */
|
|
16801
|
-
/* harmony export */
|
|
16802
|
-
/* harmony export */
|
|
16803
|
-
/* harmony export */
|
|
16804
|
-
/* harmony export */
|
|
16805
|
-
/* harmony export */
|
|
16806
|
-
/* harmony export */
|
|
16807
|
-
/* harmony export */
|
|
16808
|
-
/* harmony export */
|
|
16809
|
-
/* harmony export */
|
|
16810
|
-
/* harmony export */
|
|
16811
|
-
/* harmony export */
|
|
16812
|
-
/* harmony export */
|
|
16813
|
-
/* harmony export */
|
|
16814
|
-
/* harmony export */
|
|
16815
|
-
/* harmony export */
|
|
16816
|
-
/* harmony export */
|
|
16817
|
-
/* harmony export */
|
|
16818
|
-
/* harmony export */
|
|
16819
|
-
/* harmony export */
|
|
16820
|
-
/* harmony export */
|
|
16821
|
-
/* harmony export */
|
|
16822
|
-
/* harmony export */
|
|
16823
|
-
/* harmony export */
|
|
16824
|
-
/* harmony export */
|
|
16825
|
-
/* harmony export */
|
|
16826
|
-
/* harmony export */
|
|
16827
|
-
/* harmony export */
|
|
16828
|
-
/* harmony export */
|
|
16829
|
-
/* harmony export */
|
|
16830
|
-
/* harmony export */
|
|
16831
|
-
/* harmony export */
|
|
16832
|
-
/* harmony export */
|
|
16833
|
-
/* harmony export */
|
|
17343
|
+
/* harmony export */ Idefics3ImageProcessor: () => (/* reexport safe */ _idefics3_image_processing_idefics3_js__WEBPACK_IMPORTED_MODULE_11__.Idefics3ImageProcessor),
|
|
17344
|
+
/* harmony export */ JinaCLIPImageProcessor: () => (/* reexport safe */ _jina_clip_image_processing_jina_clip_js__WEBPACK_IMPORTED_MODULE_13__.JinaCLIPImageProcessor),
|
|
17345
|
+
/* harmony export */ LlavaOnevisionImageProcessor: () => (/* reexport safe */ _llava_onevision_image_processing_llava_onevision_js__WEBPACK_IMPORTED_MODULE_14__.LlavaOnevisionImageProcessor),
|
|
17346
|
+
/* harmony export */ Mask2FormerImageProcessor: () => (/* reexport safe */ _mask2former_image_processing_mask2former_js__WEBPACK_IMPORTED_MODULE_15__.Mask2FormerImageProcessor),
|
|
17347
|
+
/* harmony export */ MaskFormerFeatureExtractor: () => (/* reexport safe */ _maskformer_image_processing_maskformer_js__WEBPACK_IMPORTED_MODULE_16__.MaskFormerFeatureExtractor),
|
|
17348
|
+
/* harmony export */ MaskFormerImageProcessor: () => (/* reexport safe */ _maskformer_image_processing_maskformer_js__WEBPACK_IMPORTED_MODULE_16__.MaskFormerImageProcessor),
|
|
17349
|
+
/* harmony export */ MobileNetV1FeatureExtractor: () => (/* reexport safe */ _mobilenet_v1_image_processing_mobilenet_v1_js__WEBPACK_IMPORTED_MODULE_17__.MobileNetV1FeatureExtractor),
|
|
17350
|
+
/* harmony export */ MobileNetV1ImageProcessor: () => (/* reexport safe */ _mobilenet_v1_image_processing_mobilenet_v1_js__WEBPACK_IMPORTED_MODULE_17__.MobileNetV1ImageProcessor),
|
|
17351
|
+
/* harmony export */ MobileNetV2FeatureExtractor: () => (/* reexport safe */ _mobilenet_v2_image_processing_mobilenet_v2_js__WEBPACK_IMPORTED_MODULE_18__.MobileNetV2FeatureExtractor),
|
|
17352
|
+
/* harmony export */ MobileNetV2ImageProcessor: () => (/* reexport safe */ _mobilenet_v2_image_processing_mobilenet_v2_js__WEBPACK_IMPORTED_MODULE_18__.MobileNetV2ImageProcessor),
|
|
17353
|
+
/* harmony export */ MobileNetV3FeatureExtractor: () => (/* reexport safe */ _mobilenet_v3_image_processing_mobilenet_v3_js__WEBPACK_IMPORTED_MODULE_19__.MobileNetV3FeatureExtractor),
|
|
17354
|
+
/* harmony export */ MobileNetV3ImageProcessor: () => (/* reexport safe */ _mobilenet_v3_image_processing_mobilenet_v3_js__WEBPACK_IMPORTED_MODULE_19__.MobileNetV3ImageProcessor),
|
|
17355
|
+
/* harmony export */ MobileNetV4FeatureExtractor: () => (/* reexport safe */ _mobilenet_v4_image_processing_mobilenet_v4_js__WEBPACK_IMPORTED_MODULE_20__.MobileNetV4FeatureExtractor),
|
|
17356
|
+
/* harmony export */ MobileNetV4ImageProcessor: () => (/* reexport safe */ _mobilenet_v4_image_processing_mobilenet_v4_js__WEBPACK_IMPORTED_MODULE_20__.MobileNetV4ImageProcessor),
|
|
17357
|
+
/* harmony export */ MobileViTFeatureExtractor: () => (/* reexport safe */ _mobilevit_image_processing_mobilevit_js__WEBPACK_IMPORTED_MODULE_21__.MobileViTFeatureExtractor),
|
|
17358
|
+
/* harmony export */ MobileViTImageProcessor: () => (/* reexport safe */ _mobilevit_image_processing_mobilevit_js__WEBPACK_IMPORTED_MODULE_21__.MobileViTImageProcessor),
|
|
17359
|
+
/* harmony export */ NougatImageProcessor: () => (/* reexport safe */ _nougat_image_processing_nougat_js__WEBPACK_IMPORTED_MODULE_22__.NougatImageProcessor),
|
|
17360
|
+
/* harmony export */ OwlViTFeatureExtractor: () => (/* reexport safe */ _owlvit_image_processing_owlvit_js__WEBPACK_IMPORTED_MODULE_24__.OwlViTFeatureExtractor),
|
|
17361
|
+
/* harmony export */ OwlViTImageProcessor: () => (/* reexport safe */ _owlvit_image_processing_owlvit_js__WEBPACK_IMPORTED_MODULE_24__.OwlViTImageProcessor),
|
|
17362
|
+
/* harmony export */ Owlv2ImageProcessor: () => (/* reexport safe */ _owlv2_image_processing_owlv2_js__WEBPACK_IMPORTED_MODULE_23__.Owlv2ImageProcessor),
|
|
17363
|
+
/* harmony export */ PvtImageProcessor: () => (/* reexport safe */ _pvt_image_processing_pvt_js__WEBPACK_IMPORTED_MODULE_25__.PvtImageProcessor),
|
|
17364
|
+
/* harmony export */ Qwen2VLImageProcessor: () => (/* reexport safe */ _qwen2_vl_image_processing_qwen2_vl_js__WEBPACK_IMPORTED_MODULE_26__.Qwen2VLImageProcessor),
|
|
17365
|
+
/* harmony export */ RTDetrImageProcessor: () => (/* reexport safe */ _rt_detr_image_processing_rt_detr_js__WEBPACK_IMPORTED_MODULE_27__.RTDetrImageProcessor),
|
|
17366
|
+
/* harmony export */ SamImageProcessor: () => (/* reexport safe */ _sam_image_processing_sam_js__WEBPACK_IMPORTED_MODULE_28__.SamImageProcessor),
|
|
17367
|
+
/* harmony export */ SegformerFeatureExtractor: () => (/* reexport safe */ _segformer_image_processing_segformer_js__WEBPACK_IMPORTED_MODULE_29__.SegformerFeatureExtractor),
|
|
17368
|
+
/* harmony export */ SegformerImageProcessor: () => (/* reexport safe */ _segformer_image_processing_segformer_js__WEBPACK_IMPORTED_MODULE_29__.SegformerImageProcessor),
|
|
17369
|
+
/* harmony export */ SiglipImageProcessor: () => (/* reexport safe */ _siglip_image_processing_siglip_js__WEBPACK_IMPORTED_MODULE_30__.SiglipImageProcessor),
|
|
17370
|
+
/* harmony export */ Swin2SRImageProcessor: () => (/* reexport safe */ _swin2sr_image_processing_swin2sr_js__WEBPACK_IMPORTED_MODULE_31__.Swin2SRImageProcessor),
|
|
17371
|
+
/* harmony export */ VLMImageProcessor: () => (/* reexport safe */ _janus_image_processing_janus_js__WEBPACK_IMPORTED_MODULE_12__.VLMImageProcessor),
|
|
17372
|
+
/* harmony export */ ViTFeatureExtractor: () => (/* reexport safe */ _vit_image_processing_vit_js__WEBPACK_IMPORTED_MODULE_32__.ViTFeatureExtractor),
|
|
17373
|
+
/* harmony export */ ViTImageProcessor: () => (/* reexport safe */ _vit_image_processing_vit_js__WEBPACK_IMPORTED_MODULE_32__.ViTImageProcessor),
|
|
17374
|
+
/* harmony export */ VitMatteImageProcessor: () => (/* reexport safe */ _vitmatte_image_processing_vitmatte_js__WEBPACK_IMPORTED_MODULE_33__.VitMatteImageProcessor),
|
|
17375
|
+
/* harmony export */ VitPoseImageProcessor: () => (/* reexport safe */ _vitpose_image_processing_vitpose_js__WEBPACK_IMPORTED_MODULE_34__.VitPoseImageProcessor),
|
|
17376
|
+
/* harmony export */ YolosFeatureExtractor: () => (/* reexport safe */ _yolos_image_processing_yolos_js__WEBPACK_IMPORTED_MODULE_35__.YolosFeatureExtractor),
|
|
17377
|
+
/* harmony export */ YolosImageProcessor: () => (/* reexport safe */ _yolos_image_processing_yolos_js__WEBPACK_IMPORTED_MODULE_35__.YolosImageProcessor)
|
|
16834
17378
|
/* harmony export */ });
|
|
16835
17379
|
/* harmony import */ var _beit_image_processing_beit_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./beit/image_processing_beit.js */ "./src/models/beit/image_processing_beit.js");
|
|
16836
17380
|
/* harmony import */ var _bit_image_processing_bit_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./bit/image_processing_bit.js */ "./src/models/bit/image_processing_bit.js");
|
|
@@ -16843,30 +17387,32 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
16843
17387
|
/* harmony import */ var _dpt_image_processing_dpt_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./dpt/image_processing_dpt.js */ "./src/models/dpt/image_processing_dpt.js");
|
|
16844
17388
|
/* harmony import */ var _efficientnet_image_processing_efficientnet_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./efficientnet/image_processing_efficientnet.js */ "./src/models/efficientnet/image_processing_efficientnet.js");
|
|
16845
17389
|
/* harmony import */ var _glpn_image_processing_glpn_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./glpn/image_processing_glpn.js */ "./src/models/glpn/image_processing_glpn.js");
|
|
16846
|
-
/* harmony import */ var
|
|
16847
|
-
/* harmony import */ var
|
|
16848
|
-
/* harmony import */ var
|
|
16849
|
-
/* harmony import */ var
|
|
16850
|
-
/* harmony import */ var
|
|
16851
|
-
/* harmony import */ var
|
|
16852
|
-
/* harmony import */ var
|
|
16853
|
-
/* harmony import */ var
|
|
16854
|
-
/* harmony import */ var
|
|
16855
|
-
/* harmony import */ var
|
|
16856
|
-
/* harmony import */ var
|
|
16857
|
-
/* harmony import */ var
|
|
16858
|
-
/* harmony import */ var
|
|
16859
|
-
/* harmony import */ var
|
|
16860
|
-
/* harmony import */ var
|
|
16861
|
-
/* harmony import */ var
|
|
16862
|
-
/* harmony import */ var
|
|
16863
|
-
/* harmony import */ var
|
|
16864
|
-
/* harmony import */ var
|
|
16865
|
-
/* harmony import */ var
|
|
16866
|
-
/* harmony import */ var
|
|
16867
|
-
/* harmony import */ var
|
|
16868
|
-
/* harmony import */ var
|
|
16869
|
-
/* harmony import */ var
|
|
17390
|
+
/* harmony import */ var _idefics3_image_processing_idefics3_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ./idefics3/image_processing_idefics3.js */ "./src/models/idefics3/image_processing_idefics3.js");
|
|
17391
|
+
/* harmony import */ var _janus_image_processing_janus_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ./janus/image_processing_janus.js */ "./src/models/janus/image_processing_janus.js");
|
|
17392
|
+
/* harmony import */ var _jina_clip_image_processing_jina_clip_js__WEBPACK_IMPORTED_MODULE_13__ = __webpack_require__(/*! ./jina_clip/image_processing_jina_clip.js */ "./src/models/jina_clip/image_processing_jina_clip.js");
|
|
17393
|
+
/* harmony import */ var _llava_onevision_image_processing_llava_onevision_js__WEBPACK_IMPORTED_MODULE_14__ = __webpack_require__(/*! ./llava_onevision/image_processing_llava_onevision.js */ "./src/models/llava_onevision/image_processing_llava_onevision.js");
|
|
17394
|
+
/* harmony import */ var _mask2former_image_processing_mask2former_js__WEBPACK_IMPORTED_MODULE_15__ = __webpack_require__(/*! ./mask2former/image_processing_mask2former.js */ "./src/models/mask2former/image_processing_mask2former.js");
|
|
17395
|
+
/* harmony import */ var _maskformer_image_processing_maskformer_js__WEBPACK_IMPORTED_MODULE_16__ = __webpack_require__(/*! ./maskformer/image_processing_maskformer.js */ "./src/models/maskformer/image_processing_maskformer.js");
|
|
17396
|
+
/* harmony import */ var _mobilenet_v1_image_processing_mobilenet_v1_js__WEBPACK_IMPORTED_MODULE_17__ = __webpack_require__(/*! ./mobilenet_v1/image_processing_mobilenet_v1.js */ "./src/models/mobilenet_v1/image_processing_mobilenet_v1.js");
|
|
17397
|
+
/* harmony import */ var _mobilenet_v2_image_processing_mobilenet_v2_js__WEBPACK_IMPORTED_MODULE_18__ = __webpack_require__(/*! ./mobilenet_v2/image_processing_mobilenet_v2.js */ "./src/models/mobilenet_v2/image_processing_mobilenet_v2.js");
|
|
17398
|
+
/* harmony import */ var _mobilenet_v3_image_processing_mobilenet_v3_js__WEBPACK_IMPORTED_MODULE_19__ = __webpack_require__(/*! ./mobilenet_v3/image_processing_mobilenet_v3.js */ "./src/models/mobilenet_v3/image_processing_mobilenet_v3.js");
|
|
17399
|
+
/* harmony import */ var _mobilenet_v4_image_processing_mobilenet_v4_js__WEBPACK_IMPORTED_MODULE_20__ = __webpack_require__(/*! ./mobilenet_v4/image_processing_mobilenet_v4.js */ "./src/models/mobilenet_v4/image_processing_mobilenet_v4.js");
|
|
17400
|
+
/* harmony import */ var _mobilevit_image_processing_mobilevit_js__WEBPACK_IMPORTED_MODULE_21__ = __webpack_require__(/*! ./mobilevit/image_processing_mobilevit.js */ "./src/models/mobilevit/image_processing_mobilevit.js");
|
|
17401
|
+
/* harmony import */ var _nougat_image_processing_nougat_js__WEBPACK_IMPORTED_MODULE_22__ = __webpack_require__(/*! ./nougat/image_processing_nougat.js */ "./src/models/nougat/image_processing_nougat.js");
|
|
17402
|
+
/* harmony import */ var _owlv2_image_processing_owlv2_js__WEBPACK_IMPORTED_MODULE_23__ = __webpack_require__(/*! ./owlv2/image_processing_owlv2.js */ "./src/models/owlv2/image_processing_owlv2.js");
|
|
17403
|
+
/* harmony import */ var _owlvit_image_processing_owlvit_js__WEBPACK_IMPORTED_MODULE_24__ = __webpack_require__(/*! ./owlvit/image_processing_owlvit.js */ "./src/models/owlvit/image_processing_owlvit.js");
|
|
17404
|
+
/* harmony import */ var _pvt_image_processing_pvt_js__WEBPACK_IMPORTED_MODULE_25__ = __webpack_require__(/*! ./pvt/image_processing_pvt.js */ "./src/models/pvt/image_processing_pvt.js");
|
|
17405
|
+
/* harmony import */ var _qwen2_vl_image_processing_qwen2_vl_js__WEBPACK_IMPORTED_MODULE_26__ = __webpack_require__(/*! ./qwen2_vl/image_processing_qwen2_vl.js */ "./src/models/qwen2_vl/image_processing_qwen2_vl.js");
|
|
17406
|
+
/* harmony import */ var _rt_detr_image_processing_rt_detr_js__WEBPACK_IMPORTED_MODULE_27__ = __webpack_require__(/*! ./rt_detr/image_processing_rt_detr.js */ "./src/models/rt_detr/image_processing_rt_detr.js");
|
|
17407
|
+
/* harmony import */ var _sam_image_processing_sam_js__WEBPACK_IMPORTED_MODULE_28__ = __webpack_require__(/*! ./sam/image_processing_sam.js */ "./src/models/sam/image_processing_sam.js");
|
|
17408
|
+
/* harmony import */ var _segformer_image_processing_segformer_js__WEBPACK_IMPORTED_MODULE_29__ = __webpack_require__(/*! ./segformer/image_processing_segformer.js */ "./src/models/segformer/image_processing_segformer.js");
|
|
17409
|
+
/* harmony import */ var _siglip_image_processing_siglip_js__WEBPACK_IMPORTED_MODULE_30__ = __webpack_require__(/*! ./siglip/image_processing_siglip.js */ "./src/models/siglip/image_processing_siglip.js");
|
|
17410
|
+
/* harmony import */ var _swin2sr_image_processing_swin2sr_js__WEBPACK_IMPORTED_MODULE_31__ = __webpack_require__(/*! ./swin2sr/image_processing_swin2sr.js */ "./src/models/swin2sr/image_processing_swin2sr.js");
|
|
17411
|
+
/* harmony import */ var _vit_image_processing_vit_js__WEBPACK_IMPORTED_MODULE_32__ = __webpack_require__(/*! ./vit/image_processing_vit.js */ "./src/models/vit/image_processing_vit.js");
|
|
17412
|
+
/* harmony import */ var _vitmatte_image_processing_vitmatte_js__WEBPACK_IMPORTED_MODULE_33__ = __webpack_require__(/*! ./vitmatte/image_processing_vitmatte.js */ "./src/models/vitmatte/image_processing_vitmatte.js");
|
|
17413
|
+
/* harmony import */ var _vitpose_image_processing_vitpose_js__WEBPACK_IMPORTED_MODULE_34__ = __webpack_require__(/*! ./vitpose/image_processing_vitpose.js */ "./src/models/vitpose/image_processing_vitpose.js");
|
|
17414
|
+
/* harmony import */ var _yolos_image_processing_yolos_js__WEBPACK_IMPORTED_MODULE_35__ = __webpack_require__(/*! ./yolos/image_processing_yolos.js */ "./src/models/yolos/image_processing_yolos.js");
|
|
17415
|
+
|
|
16870
17416
|
|
|
16871
17417
|
|
|
16872
17418
|
|
|
@@ -17616,6 +18162,105 @@ class OwlViTProcessor extends _base_processing_utils_js__WEBPACK_IMPORTED_MODULE
|
|
|
17616
18162
|
}
|
|
17617
18163
|
|
|
17618
18164
|
|
|
18165
|
+
/***/ }),
|
|
18166
|
+
|
|
18167
|
+
/***/ "./src/models/paligemma/processing_paligemma.js":
|
|
18168
|
+
/*!******************************************************!*\
|
|
18169
|
+
!*** ./src/models/paligemma/processing_paligemma.js ***!
|
|
18170
|
+
\******************************************************/
|
|
18171
|
+
/***/ ((__unused_webpack___webpack_module__, __webpack_exports__, __webpack_require__) => {
|
|
18172
|
+
|
|
18173
|
+
__webpack_require__.r(__webpack_exports__);
|
|
18174
|
+
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
18175
|
+
/* harmony export */ PaliGemmaProcessor: () => (/* binding */ PaliGemmaProcessor)
|
|
18176
|
+
/* harmony export */ });
|
|
18177
|
+
/* harmony import */ var _base_processing_utils_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../../base/processing_utils.js */ "./src/base/processing_utils.js");
|
|
18178
|
+
/* harmony import */ var _auto_image_processing_auto_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../auto/image_processing_auto.js */ "./src/models/auto/image_processing_auto.js");
|
|
18179
|
+
/* harmony import */ var _tokenizers_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ../../tokenizers.js */ "./src/tokenizers.js");
|
|
18180
|
+
|
|
18181
|
+
|
|
18182
|
+
|
|
18183
|
+
|
|
18184
|
+
const IMAGE_TOKEN = "<image>";
|
|
18185
|
+
|
|
18186
|
+
function build_string_from_input(
|
|
18187
|
+
prompt,
|
|
18188
|
+
bos_token,
|
|
18189
|
+
image_seq_len,
|
|
18190
|
+
image_token,
|
|
18191
|
+
num_images,
|
|
18192
|
+
) {
|
|
18193
|
+
return `${image_token.repeat(image_seq_len * num_images)}${bos_token}${prompt}\n`
|
|
18194
|
+
}
|
|
18195
|
+
|
|
18196
|
+
class PaliGemmaProcessor extends _base_processing_utils_js__WEBPACK_IMPORTED_MODULE_0__.Processor {
|
|
18197
|
+
static tokenizer_class = _tokenizers_js__WEBPACK_IMPORTED_MODULE_2__.AutoTokenizer
|
|
18198
|
+
static image_processor_class = _auto_image_processing_auto_js__WEBPACK_IMPORTED_MODULE_1__.AutoImageProcessor
|
|
18199
|
+
static uses_processor_config = false;
|
|
18200
|
+
|
|
18201
|
+
/**
|
|
18202
|
+
* @typedef {import('../../utils/image.js').RawImage} RawImage
|
|
18203
|
+
*/
|
|
18204
|
+
|
|
18205
|
+
// `images` is required, `text` is optional
|
|
18206
|
+
async _call(/** @type {RawImage|RawImage[]} */ images, text = null, kwargs = {}) {
|
|
18207
|
+
if (!text) {
|
|
18208
|
+
console.warn(
|
|
18209
|
+
"You are using PaliGemma without a text prefix. It will perform as a picture-captioning model."
|
|
18210
|
+
)
|
|
18211
|
+
text = ""
|
|
18212
|
+
}
|
|
18213
|
+
|
|
18214
|
+
if (!Array.isArray(images)) {
|
|
18215
|
+
images = [images]
|
|
18216
|
+
}
|
|
18217
|
+
|
|
18218
|
+
if (!Array.isArray(text)) {
|
|
18219
|
+
text = [text]
|
|
18220
|
+
}
|
|
18221
|
+
|
|
18222
|
+
const bos_token = this.tokenizer.bos_token;
|
|
18223
|
+
const image_seq_length = this.image_processor.config.image_seq_length;
|
|
18224
|
+
let input_strings;
|
|
18225
|
+
if (text.some((t) => t.includes(IMAGE_TOKEN))) {
|
|
18226
|
+
input_strings = text.map(
|
|
18227
|
+
sample => {
|
|
18228
|
+
const expanded_sample = sample.replaceAll(IMAGE_TOKEN, IMAGE_TOKEN.repeat(image_seq_length));
|
|
18229
|
+
const bos_rfind_index = expanded_sample.lastIndexOf(IMAGE_TOKEN);
|
|
18230
|
+
const bos_index = bos_rfind_index === -1 ? 0 : bos_rfind_index + IMAGE_TOKEN.length;
|
|
18231
|
+
return expanded_sample.slice(0, bos_index) + bos_token + expanded_sample.slice(bos_index) + "\n";
|
|
18232
|
+
}
|
|
18233
|
+
)
|
|
18234
|
+
} else {
|
|
18235
|
+
console.warn(
|
|
18236
|
+
"You are passing both `text` and `images` to `PaliGemmaProcessor`. The processor expects special " +
|
|
18237
|
+
"image tokens in the text, as many tokens as there are images per each text. It is recommended to " +
|
|
18238
|
+
"add `<image>` tokens in the very beginning of your text. For this call, we will infer how many images " +
|
|
18239
|
+
"each text has and add special tokens."
|
|
18240
|
+
)
|
|
18241
|
+
|
|
18242
|
+
input_strings = text.map(
|
|
18243
|
+
sample => build_string_from_input(
|
|
18244
|
+
sample,
|
|
18245
|
+
bos_token,
|
|
18246
|
+
image_seq_length,
|
|
18247
|
+
IMAGE_TOKEN,
|
|
18248
|
+
images.length,
|
|
18249
|
+
)
|
|
18250
|
+
)
|
|
18251
|
+
}
|
|
18252
|
+
|
|
18253
|
+
const text_inputs = this.tokenizer(input_strings, kwargs);
|
|
18254
|
+
const image_inputs = await this.image_processor(images, kwargs);
|
|
18255
|
+
|
|
18256
|
+
return {
|
|
18257
|
+
...image_inputs,
|
|
18258
|
+
...text_inputs,
|
|
18259
|
+
}
|
|
18260
|
+
}
|
|
18261
|
+
}
|
|
18262
|
+
|
|
18263
|
+
|
|
17619
18264
|
/***/ }),
|
|
17620
18265
|
|
|
17621
18266
|
/***/ "./src/models/processors.js":
|
|
@@ -17627,28 +18272,34 @@ class OwlViTProcessor extends _base_processing_utils_js__WEBPACK_IMPORTED_MODULE
|
|
|
17627
18272
|
__webpack_require__.r(__webpack_exports__);
|
|
17628
18273
|
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
17629
18274
|
/* harmony export */ Florence2Processor: () => (/* reexport safe */ _florence2_processing_florence2_js__WEBPACK_IMPORTED_MODULE_0__.Florence2Processor),
|
|
17630
|
-
/* harmony export */
|
|
18275
|
+
/* harmony export */ Idefics3Processor: () => (/* reexport safe */ _idefics3_processing_idefics3_js__WEBPACK_IMPORTED_MODULE_2__.Idefics3Processor),
|
|
18276
|
+
/* harmony export */ JinaCLIPProcessor: () => (/* reexport safe */ _jina_clip_processing_jina_clip_js__WEBPACK_IMPORTED_MODULE_4__.JinaCLIPProcessor),
|
|
17631
18277
|
/* harmony export */ MgpstrProcessor: () => (/* reexport safe */ _mgp_str_processing_mgp_str_js__WEBPACK_IMPORTED_MODULE_1__.MgpstrProcessor),
|
|
17632
|
-
/* harmony export */ OwlViTProcessor: () => (/* reexport safe */
|
|
17633
|
-
/* harmony export */
|
|
17634
|
-
/* harmony export */
|
|
17635
|
-
/* harmony export */
|
|
17636
|
-
/* harmony export */
|
|
17637
|
-
/* harmony export */
|
|
17638
|
-
/* harmony export */
|
|
17639
|
-
/* harmony export */
|
|
18278
|
+
/* harmony export */ OwlViTProcessor: () => (/* reexport safe */ _owlvit_processing_owlvit_js__WEBPACK_IMPORTED_MODULE_5__.OwlViTProcessor),
|
|
18279
|
+
/* harmony export */ PaliGemmaProcessor: () => (/* reexport safe */ _paligemma_processing_paligemma_js__WEBPACK_IMPORTED_MODULE_6__.PaliGemmaProcessor),
|
|
18280
|
+
/* harmony export */ PyAnnoteProcessor: () => (/* reexport safe */ _pyannote_processing_pyannote_js__WEBPACK_IMPORTED_MODULE_7__.PyAnnoteProcessor),
|
|
18281
|
+
/* harmony export */ Qwen2VLProcessor: () => (/* reexport safe */ _qwen2_vl_processing_qwen2_vl_js__WEBPACK_IMPORTED_MODULE_8__.Qwen2VLProcessor),
|
|
18282
|
+
/* harmony export */ SamProcessor: () => (/* reexport safe */ _sam_processing_sam_js__WEBPACK_IMPORTED_MODULE_9__.SamProcessor),
|
|
18283
|
+
/* harmony export */ SpeechT5Processor: () => (/* reexport safe */ _speecht5_processing_speecht5_js__WEBPACK_IMPORTED_MODULE_10__.SpeechT5Processor),
|
|
18284
|
+
/* harmony export */ VLChatProcessor: () => (/* reexport safe */ _janus_processing_janus_js__WEBPACK_IMPORTED_MODULE_3__.VLChatProcessor),
|
|
18285
|
+
/* harmony export */ Wav2Vec2ProcessorWithLM: () => (/* reexport safe */ _wav2vec2_processing_wav2vec2_js__WEBPACK_IMPORTED_MODULE_11__.Wav2Vec2ProcessorWithLM),
|
|
18286
|
+
/* harmony export */ WhisperProcessor: () => (/* reexport safe */ _whisper_processing_whisper_js__WEBPACK_IMPORTED_MODULE_12__.WhisperProcessor)
|
|
17640
18287
|
/* harmony export */ });
|
|
17641
18288
|
/* harmony import */ var _florence2_processing_florence2_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./florence2/processing_florence2.js */ "./src/models/florence2/processing_florence2.js");
|
|
17642
18289
|
/* harmony import */ var _mgp_str_processing_mgp_str_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./mgp_str/processing_mgp_str.js */ "./src/models/mgp_str/processing_mgp_str.js");
|
|
17643
|
-
/* harmony import */ var
|
|
17644
|
-
/* harmony import */ var
|
|
17645
|
-
/* harmony import */ var
|
|
17646
|
-
/* harmony import */ var
|
|
17647
|
-
/* harmony import */ var
|
|
17648
|
-
/* harmony import */ var
|
|
17649
|
-
/* harmony import */ var
|
|
17650
|
-
/* harmony import */ var
|
|
17651
|
-
/* harmony import */ var
|
|
18290
|
+
/* harmony import */ var _idefics3_processing_idefics3_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./idefics3/processing_idefics3.js */ "./src/models/idefics3/processing_idefics3.js");
|
|
18291
|
+
/* harmony import */ var _janus_processing_janus_js__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./janus/processing_janus.js */ "./src/models/janus/processing_janus.js");
|
|
18292
|
+
/* harmony import */ var _jina_clip_processing_jina_clip_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./jina_clip/processing_jina_clip.js */ "./src/models/jina_clip/processing_jina_clip.js");
|
|
18293
|
+
/* harmony import */ var _owlvit_processing_owlvit_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./owlvit/processing_owlvit.js */ "./src/models/owlvit/processing_owlvit.js");
|
|
18294
|
+
/* harmony import */ var _paligemma_processing_paligemma_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./paligemma/processing_paligemma.js */ "./src/models/paligemma/processing_paligemma.js");
|
|
18295
|
+
/* harmony import */ var _pyannote_processing_pyannote_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./pyannote/processing_pyannote.js */ "./src/models/pyannote/processing_pyannote.js");
|
|
18296
|
+
/* harmony import */ var _qwen2_vl_processing_qwen2_vl_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ./qwen2_vl/processing_qwen2_vl.js */ "./src/models/qwen2_vl/processing_qwen2_vl.js");
|
|
18297
|
+
/* harmony import */ var _sam_processing_sam_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ./sam/processing_sam.js */ "./src/models/sam/processing_sam.js");
|
|
18298
|
+
/* harmony import */ var _speecht5_processing_speecht5_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ./speecht5/processing_speecht5.js */ "./src/models/speecht5/processing_speecht5.js");
|
|
18299
|
+
/* harmony import */ var _wav2vec2_processing_wav2vec2_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ./wav2vec2/processing_wav2vec2.js */ "./src/models/wav2vec2/processing_wav2vec2.js");
|
|
18300
|
+
/* harmony import */ var _whisper_processing_whisper_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ./whisper/processing_whisper.js */ "./src/models/whisper/processing_whisper.js");
|
|
18301
|
+
|
|
18302
|
+
|
|
17652
18303
|
|
|
17653
18304
|
|
|
17654
18305
|
|
|
@@ -25537,6 +26188,12 @@ class PreTrainedTokenizer extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__
|
|
|
25537
26188
|
this.unk_token = this.getToken('unk_token');
|
|
25538
26189
|
this.unk_token_id = this.model.tokens_to_ids.get(this.unk_token);
|
|
25539
26190
|
|
|
26191
|
+
this.bos_token = this.getToken('bos_token');
|
|
26192
|
+
this.bos_token_id = this.model.tokens_to_ids.get(this.bos_token);
|
|
26193
|
+
|
|
26194
|
+
this.eos_token = this.getToken('eos_token');
|
|
26195
|
+
this.eos_token_id = this.model.tokens_to_ids.get(this.eos_token);
|
|
26196
|
+
|
|
25540
26197
|
this.model_max_length = tokenizerConfig.model_max_length;
|
|
25541
26198
|
|
|
25542
26199
|
/** @type {boolean} Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). */
|
|
@@ -26509,6 +27166,11 @@ class WhisperTokenizer extends PreTrainedTokenizer {
|
|
|
26509
27166
|
let chunk = new_chunk();
|
|
26510
27167
|
let time_offset = 0.0;
|
|
26511
27168
|
const timestamp_begin = this.timestamp_begin;
|
|
27169
|
+
// Whisper timestamp tokens start from 0.00 and go to timestamp 30.00 in 0.02 increments.
|
|
27170
|
+
// We can calculate the last time stamp token as timestamp_begin plus the number of tokens
|
|
27171
|
+
// tokens from 0.00 to 30.00 which is 1500.
|
|
27172
|
+
const total_timestamp_tokens = 1500; // (30.00 - 0.00) / 0.02
|
|
27173
|
+
const timestamp_end = timestamp_begin + total_timestamp_tokens;
|
|
26512
27174
|
|
|
26513
27175
|
let previous_tokens = [];
|
|
26514
27176
|
let previous_token_timestamps = [];
|
|
@@ -26596,7 +27258,7 @@ class WhisperTokenizer extends PreTrainedTokenizer {
|
|
|
26596
27258
|
} else {
|
|
26597
27259
|
// 2/ This is a regular special token, ignoring it
|
|
26598
27260
|
}
|
|
26599
|
-
} else if (token >= timestamp_begin) {
|
|
27261
|
+
} else if (token >= timestamp_begin && token <= timestamp_end) {
|
|
26600
27262
|
// 3/ Timestamp token
|
|
26601
27263
|
const time = (token - timestamp_begin) * time_precision + time_offset;
|
|
26602
27264
|
const rounded_time = (0,_utils_maths_js__WEBPACK_IMPORTED_MODULE_3__.round)(time, 2);
|
|
@@ -28061,6 +28723,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
28061
28723
|
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
28062
28724
|
/* harmony export */ calculateDimensions: () => (/* binding */ calculateDimensions),
|
|
28063
28725
|
/* harmony export */ calculateReflectOffset: () => (/* binding */ calculateReflectOffset),
|
|
28726
|
+
/* harmony export */ count: () => (/* binding */ count),
|
|
28064
28727
|
/* harmony export */ dispatchCallback: () => (/* binding */ dispatchCallback),
|
|
28065
28728
|
/* harmony export */ escapeRegExp: () => (/* binding */ escapeRegExp),
|
|
28066
28729
|
/* harmony export */ isIntegralNumber: () => (/* binding */ isIntegralNumber),
|
|
@@ -28084,15 +28747,45 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
28084
28747
|
*/
|
|
28085
28748
|
|
|
28086
28749
|
/**
|
|
28087
|
-
* @typedef {Object}
|
|
28088
|
-
* @property {'initiate'
|
|
28089
|
-
* @property {string} name
|
|
28090
|
-
*
|
|
28091
|
-
|
|
28092
|
-
|
|
28093
|
-
|
|
28094
|
-
* @
|
|
28095
|
-
* @property {
|
|
28750
|
+
* @typedef {Object} InitiateProgressInfo
|
|
28751
|
+
* @property {'initiate'} status
|
|
28752
|
+
* @property {string} name The model id or directory path.
|
|
28753
|
+
* @property {string} file The name of the file.
|
|
28754
|
+
*/
|
|
28755
|
+
|
|
28756
|
+
/**
|
|
28757
|
+
* @typedef {Object} DownloadProgressInfo
|
|
28758
|
+
* @property {'download'} status
|
|
28759
|
+
* @property {string} name The model id or directory path.
|
|
28760
|
+
* @property {string} file The name of the file.
|
|
28761
|
+
*/
|
|
28762
|
+
|
|
28763
|
+
/**
|
|
28764
|
+
* @typedef {Object} ProgressStatusInfo
|
|
28765
|
+
* @property {'progress'} status
|
|
28766
|
+
* @property {string} name The model id or directory path.
|
|
28767
|
+
* @property {string} file The name of the file.
|
|
28768
|
+
* @property {number} progress A number between 0 and 100.
|
|
28769
|
+
* @property {number} loaded The number of bytes loaded.
|
|
28770
|
+
* @property {number} total The total number of bytes to be loaded.
|
|
28771
|
+
*/
|
|
28772
|
+
|
|
28773
|
+
/**
|
|
28774
|
+
* @typedef {Object} DoneProgressInfo
|
|
28775
|
+
* @property {'done'} status
|
|
28776
|
+
* @property {string} name The model id or directory path.
|
|
28777
|
+
* @property {string} file The name of the file.
|
|
28778
|
+
*/
|
|
28779
|
+
|
|
28780
|
+
/**
|
|
28781
|
+
* @typedef {Object} ReadyProgressInfo
|
|
28782
|
+
* @property {'ready'} status
|
|
28783
|
+
* @property {string} task The loaded task.
|
|
28784
|
+
* @property {string} model The loaded model.
|
|
28785
|
+
*/
|
|
28786
|
+
|
|
28787
|
+
/**
|
|
28788
|
+
* @typedef {InitiateProgressInfo | DownloadProgressInfo | ProgressStatusInfo | DoneProgressInfo | ReadyProgressInfo} ProgressInfo
|
|
28096
28789
|
*/
|
|
28097
28790
|
|
|
28098
28791
|
/**
|
|
@@ -28263,6 +28956,20 @@ function len(s) {
|
|
|
28263
28956
|
return length;
|
|
28264
28957
|
}
|
|
28265
28958
|
|
|
28959
|
+
/**
|
|
28960
|
+
* Count the occurrences of a value in an array or string.
|
|
28961
|
+
* This mimics the behavior of Python's `count` method.
|
|
28962
|
+
* @param {any[]|string} arr The array or string to search.
|
|
28963
|
+
* @param {any} value The value to count.
|
|
28964
|
+
*/
|
|
28965
|
+
function count(arr, value) {
|
|
28966
|
+
let count = 0;
|
|
28967
|
+
for (const v of arr) {
|
|
28968
|
+
if (v === value) ++count;
|
|
28969
|
+
}
|
|
28970
|
+
return count;
|
|
28971
|
+
}
|
|
28972
|
+
|
|
28266
28973
|
|
|
28267
28974
|
/***/ }),
|
|
28268
28975
|
|
|
@@ -28813,6 +29520,7 @@ const isWebGpuFp16Supported = (function () {
|
|
|
28813
29520
|
})();
|
|
28814
29521
|
|
|
28815
29522
|
const DATA_TYPES = Object.freeze({
|
|
29523
|
+
auto: 'auto', // Auto-detect based on environment
|
|
28816
29524
|
fp32: 'fp32',
|
|
28817
29525
|
fp16: 'fp16',
|
|
28818
29526
|
q8: 'q8',
|
|
@@ -28829,7 +29537,7 @@ const DEFAULT_DEVICE_DTYPE_MAPPING = Object.freeze({
|
|
|
28829
29537
|
[_devices_js__WEBPACK_IMPORTED_MODULE_1__.DEVICE_TYPES.wasm]: DATA_TYPES.q8,
|
|
28830
29538
|
});
|
|
28831
29539
|
|
|
28832
|
-
/** @type {Record<DataType, string>} */
|
|
29540
|
+
/** @type {Record<Exclude<DataType, "auto">, string>} */
|
|
28833
29541
|
const DEFAULT_DTYPE_SUFFIX_MAPPING = Object.freeze({
|
|
28834
29542
|
[DATA_TYPES.fp32]: '',
|
|
28835
29543
|
[DATA_TYPES.fp16]: '_fp16',
|
|
@@ -29415,13 +30123,6 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
29415
30123
|
file: filename
|
|
29416
30124
|
})
|
|
29417
30125
|
|
|
29418
|
-
/** @type {import('./core.js').ProgressInfo} */
|
|
29419
|
-
const progressInfo = {
|
|
29420
|
-
status: 'progress',
|
|
29421
|
-
name: path_or_repo_id,
|
|
29422
|
-
file: filename
|
|
29423
|
-
}
|
|
29424
|
-
|
|
29425
30126
|
/** @type {Uint8Array} */
|
|
29426
30127
|
let buffer;
|
|
29427
30128
|
|
|
@@ -29441,7 +30142,9 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
29441
30142
|
|
|
29442
30143
|
// For completeness, we still fire the final progress callback
|
|
29443
30144
|
(0,_core_js__WEBPACK_IMPORTED_MODULE_3__.dispatchCallback)(options.progress_callback, {
|
|
29444
|
-
|
|
30145
|
+
status: 'progress',
|
|
30146
|
+
name: path_or_repo_id,
|
|
30147
|
+
file: filename,
|
|
29445
30148
|
progress: 100,
|
|
29446
30149
|
loaded: buffer.length,
|
|
29447
30150
|
total: buffer.length,
|
|
@@ -29449,7 +30152,9 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
29449
30152
|
} else {
|
|
29450
30153
|
buffer = await readResponse(response, data => {
|
|
29451
30154
|
(0,_core_js__WEBPACK_IMPORTED_MODULE_3__.dispatchCallback)(options.progress_callback, {
|
|
29452
|
-
|
|
30155
|
+
status: 'progress',
|
|
30156
|
+
name: path_or_repo_id,
|
|
30157
|
+
file: filename,
|
|
29453
30158
|
...data,
|
|
29454
30159
|
})
|
|
29455
30160
|
})
|
|
@@ -29506,12 +30211,11 @@ async function getModelJSON(modelPath, fileName, fatal = true, options = {}) {
|
|
|
29506
30211
|
|
|
29507
30212
|
return JSON.parse(jsonData);
|
|
29508
30213
|
}
|
|
29509
|
-
|
|
29510
30214
|
/**
|
|
29511
30215
|
* Read and track progress when reading a Response object
|
|
29512
30216
|
*
|
|
29513
|
-
* @param {
|
|
29514
|
-
* @param {
|
|
30217
|
+
* @param {Response|FileResponse} response The Response object to read
|
|
30218
|
+
* @param {(data: {progress: number, loaded: number, total: number}) => void} progress_callback The function to call with progress updates
|
|
29515
30219
|
* @returns {Promise<Uint8Array>} A Promise that resolves with the Uint8Array buffer
|
|
29516
30220
|
*/
|
|
29517
30221
|
async function readResponse(response, progress_callback) {
|
|
@@ -29594,7 +30298,8 @@ function pathJoin(...parts) {
|
|
|
29594
30298
|
|
|
29595
30299
|
__webpack_require__.r(__webpack_exports__);
|
|
29596
30300
|
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
|
|
29597
|
-
/* harmony export */ RawImage: () => (/* binding */ RawImage)
|
|
30301
|
+
/* harmony export */ RawImage: () => (/* binding */ RawImage),
|
|
30302
|
+
/* harmony export */ load_image: () => (/* binding */ load_image)
|
|
29598
30303
|
/* harmony export */ });
|
|
29599
30304
|
/* harmony import */ var _core_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./core.js */ "./src/utils/core.js");
|
|
29600
30305
|
/* harmony import */ var _hub_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./hub.js */ "./src/utils/hub.js");
|
|
@@ -29619,13 +30324,11 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
29619
30324
|
// Will be empty (or not used) if running in browser or web-worker
|
|
29620
30325
|
|
|
29621
30326
|
|
|
29622
|
-
const BROWSER_ENV = typeof self !== 'undefined';
|
|
29623
|
-
const WEBWORKER_ENV = BROWSER_ENV && self.constructor.name === 'DedicatedWorkerGlobalScope';
|
|
29624
|
-
|
|
29625
30327
|
let createCanvasFunction;
|
|
29626
30328
|
let ImageDataClass;
|
|
29627
30329
|
let loadImageFunction;
|
|
29628
|
-
|
|
30330
|
+
const IS_BROWSER_OR_WEBWORKER = _env_js__WEBPACK_IMPORTED_MODULE_2__.apis.IS_BROWSER_ENV || _env_js__WEBPACK_IMPORTED_MODULE_2__.apis.IS_WEBWORKER_ENV;
|
|
30331
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
29629
30332
|
// Running in browser or web-worker
|
|
29630
30333
|
createCanvasFunction = (/** @type {number} */ width, /** @type {number} */ height) => {
|
|
29631
30334
|
if (!self.OffscreenCanvas) {
|
|
@@ -29735,7 +30438,7 @@ class RawImage {
|
|
|
29735
30438
|
* @returns {RawImage} The image object.
|
|
29736
30439
|
*/
|
|
29737
30440
|
static fromCanvas(canvas) {
|
|
29738
|
-
if (!
|
|
30441
|
+
if (!IS_BROWSER_OR_WEBWORKER) {
|
|
29739
30442
|
throw new Error('fromCanvas() is only supported in browser environments.')
|
|
29740
30443
|
}
|
|
29741
30444
|
|
|
@@ -29764,7 +30467,7 @@ class RawImage {
|
|
|
29764
30467
|
* @returns {Promise<RawImage>} The image object.
|
|
29765
30468
|
*/
|
|
29766
30469
|
static async fromBlob(blob) {
|
|
29767
|
-
if (
|
|
30470
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
29768
30471
|
// Running in environment with canvas
|
|
29769
30472
|
const img = await loadImageFunction(blob);
|
|
29770
30473
|
|
|
@@ -29908,6 +30611,46 @@ class RawImage {
|
|
|
29908
30611
|
return this._update(newData, this.width, this.height, 4);
|
|
29909
30612
|
}
|
|
29910
30613
|
|
|
30614
|
+
/**
|
|
30615
|
+
* Apply an alpha mask to the image. Operates in place.
|
|
30616
|
+
* @param {RawImage} mask The mask to apply. It should have a single channel.
|
|
30617
|
+
* @returns {RawImage} The masked image.
|
|
30618
|
+
* @throws {Error} If the mask is not the same size as the image.
|
|
30619
|
+
* @throws {Error} If the image does not have 4 channels.
|
|
30620
|
+
* @throws {Error} If the mask is not a single channel.
|
|
30621
|
+
*/
|
|
30622
|
+
putAlpha(mask) {
|
|
30623
|
+
if (mask.width !== this.width || mask.height !== this.height) {
|
|
30624
|
+
throw new Error(`Expected mask size to be ${this.width}x${this.height}, but got ${mask.width}x${mask.height}`);
|
|
30625
|
+
}
|
|
30626
|
+
if (mask.channels !== 1) {
|
|
30627
|
+
throw new Error(`Expected mask to have 1 channel, but got ${mask.channels}`);
|
|
30628
|
+
}
|
|
30629
|
+
|
|
30630
|
+
const this_data = this.data;
|
|
30631
|
+
const mask_data = mask.data;
|
|
30632
|
+
const num_pixels = this.width * this.height;
|
|
30633
|
+
if (this.channels === 3) {
|
|
30634
|
+
// Convert to RGBA and simultaneously apply mask to alpha channel
|
|
30635
|
+
const newData = new Uint8ClampedArray(num_pixels * 4);
|
|
30636
|
+
for (let i = 0, in_offset = 0, out_offset = 0; i < num_pixels; ++i) {
|
|
30637
|
+
newData[out_offset++] = this_data[in_offset++];
|
|
30638
|
+
newData[out_offset++] = this_data[in_offset++];
|
|
30639
|
+
newData[out_offset++] = this_data[in_offset++];
|
|
30640
|
+
newData[out_offset++] = mask_data[i];
|
|
30641
|
+
}
|
|
30642
|
+
return this._update(newData, this.width, this.height, 4);
|
|
30643
|
+
|
|
30644
|
+
} else if (this.channels === 4) {
|
|
30645
|
+
// Apply mask to alpha channel in place
|
|
30646
|
+
for (let i = 0; i < num_pixels; ++i) {
|
|
30647
|
+
this_data[4 * i + 3] = mask_data[i];
|
|
30648
|
+
}
|
|
30649
|
+
return this;
|
|
30650
|
+
}
|
|
30651
|
+
throw new Error(`Expected image to have 3 or 4 channels, but got ${this.channels}`);
|
|
30652
|
+
}
|
|
30653
|
+
|
|
29911
30654
|
/**
|
|
29912
30655
|
* Resize the image to the given dimensions. This method uses the canvas API to perform the resizing.
|
|
29913
30656
|
* @param {number} width The width of the new image. `null` or `-1` will preserve the aspect ratio.
|
|
@@ -29942,7 +30685,7 @@ class RawImage {
|
|
|
29942
30685
|
height = (width / this.width) * this.height;
|
|
29943
30686
|
}
|
|
29944
30687
|
|
|
29945
|
-
if (
|
|
30688
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
29946
30689
|
// TODO use `resample` in browser environment
|
|
29947
30690
|
|
|
29948
30691
|
// Store number of channels before resizing
|
|
@@ -30015,7 +30758,7 @@ class RawImage {
|
|
|
30015
30758
|
return this;
|
|
30016
30759
|
}
|
|
30017
30760
|
|
|
30018
|
-
if (
|
|
30761
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
30019
30762
|
// Store number of channels before padding
|
|
30020
30763
|
const numChannels = this.channels;
|
|
30021
30764
|
|
|
@@ -30064,7 +30807,7 @@ class RawImage {
|
|
|
30064
30807
|
const crop_width = x_max - x_min + 1;
|
|
30065
30808
|
const crop_height = y_max - y_min + 1;
|
|
30066
30809
|
|
|
30067
|
-
if (
|
|
30810
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
30068
30811
|
// Store number of channels before resizing
|
|
30069
30812
|
const numChannels = this.channels;
|
|
30070
30813
|
|
|
@@ -30112,7 +30855,7 @@ class RawImage {
|
|
|
30112
30855
|
const height_offset = (this.height - crop_height) / 2;
|
|
30113
30856
|
|
|
30114
30857
|
|
|
30115
|
-
if (
|
|
30858
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
30116
30859
|
// Store number of channels before resizing
|
|
30117
30860
|
const numChannels = this.channels;
|
|
30118
30861
|
|
|
@@ -30217,7 +30960,7 @@ class RawImage {
|
|
|
30217
30960
|
}
|
|
30218
30961
|
|
|
30219
30962
|
async toBlob(type = 'image/png', quality = 1) {
|
|
30220
|
-
if (!
|
|
30963
|
+
if (!IS_BROWSER_OR_WEBWORKER) {
|
|
30221
30964
|
throw new Error('toBlob() is only supported in browser environments.')
|
|
30222
30965
|
}
|
|
30223
30966
|
|
|
@@ -30243,7 +30986,7 @@ class RawImage {
|
|
|
30243
30986
|
}
|
|
30244
30987
|
|
|
30245
30988
|
toCanvas() {
|
|
30246
|
-
if (!
|
|
30989
|
+
if (!IS_BROWSER_OR_WEBWORKER) {
|
|
30247
30990
|
throw new Error('toCanvas() is only supported in browser environments.')
|
|
30248
30991
|
}
|
|
30249
30992
|
|
|
@@ -30347,8 +31090,8 @@ class RawImage {
|
|
|
30347
31090
|
*/
|
|
30348
31091
|
async save(path) {
|
|
30349
31092
|
|
|
30350
|
-
if (
|
|
30351
|
-
if (
|
|
31093
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
31094
|
+
if (_env_js__WEBPACK_IMPORTED_MODULE_2__.apis.IS_WEBWORKER_ENV) {
|
|
30352
31095
|
throw new Error('Unable to save an image from a Web Worker.')
|
|
30353
31096
|
}
|
|
30354
31097
|
|
|
@@ -30384,7 +31127,7 @@ class RawImage {
|
|
|
30384
31127
|
}
|
|
30385
31128
|
|
|
30386
31129
|
toSharp() {
|
|
30387
|
-
if (
|
|
31130
|
+
if (IS_BROWSER_OR_WEBWORKER) {
|
|
30388
31131
|
throw new Error('toSharp() is only supported in server-side environments.')
|
|
30389
31132
|
}
|
|
30390
31133
|
|
|
@@ -30398,6 +31141,11 @@ class RawImage {
|
|
|
30398
31141
|
}
|
|
30399
31142
|
}
|
|
30400
31143
|
|
|
31144
|
+
/**
|
|
31145
|
+
* Helper function to load an image from a URL, path, etc.
|
|
31146
|
+
*/
|
|
31147
|
+
const load_image = RawImage.read.bind(RawImage);
|
|
31148
|
+
|
|
30401
31149
|
|
|
30402
31150
|
/***/ }),
|
|
30403
31151
|
|
|
@@ -31548,6 +32296,8 @@ const DataTypeMap = Object.freeze({
|
|
|
31548
32296
|
int64: BigInt64Array,
|
|
31549
32297
|
uint64: BigUint64Array,
|
|
31550
32298
|
bool: Uint8Array,
|
|
32299
|
+
uint4: Uint8Array,
|
|
32300
|
+
int4: Int8Array,
|
|
31551
32301
|
});
|
|
31552
32302
|
|
|
31553
32303
|
/**
|
|
@@ -32869,7 +33619,7 @@ function fullHelper(size, fill_value, dtype, cls) {
|
|
|
32869
33619
|
/**
|
|
32870
33620
|
* Creates a tensor of size size filled with fill_value. The tensor's dtype is inferred from fill_value.
|
|
32871
33621
|
* @param {number[]} size A sequence of integers defining the shape of the output tensor.
|
|
32872
|
-
* @param {number|bigint} fill_value The value to fill the output tensor with.
|
|
33622
|
+
* @param {number|bigint|boolean} fill_value The value to fill the output tensor with.
|
|
32873
33623
|
* @returns {Tensor} The filled tensor.
|
|
32874
33624
|
*/
|
|
32875
33625
|
function full(size, fill_value) {
|
|
@@ -32881,6 +33631,9 @@ function full(size, fill_value) {
|
|
|
32881
33631
|
} else if (typeof fill_value === 'bigint') {
|
|
32882
33632
|
dtype = 'int64';
|
|
32883
33633
|
typedArrayCls = BigInt64Array;
|
|
33634
|
+
} else if (typeof fill_value === 'boolean') {
|
|
33635
|
+
dtype = 'bool';
|
|
33636
|
+
typedArrayCls = Uint8Array;
|
|
32884
33637
|
} else {
|
|
32885
33638
|
// TODO: support other dtypes
|
|
32886
33639
|
throw new Error(`Unsupported data type: ${typeof fill_value}`);
|
|
@@ -33062,6 +33815,8 @@ function quantize_embeddings(tensor, precision) {
|
|
|
33062
33815
|
/******/
|
|
33063
33816
|
/************************************************************************/
|
|
33064
33817
|
var __webpack_exports__ = {};
|
|
33818
|
+
// This entry needs to be wrapped in an IIFE because it needs to be isolated against other modules in the chunk.
|
|
33819
|
+
(() => {
|
|
33065
33820
|
/*!*****************************!*\
|
|
33066
33821
|
!*** ./src/transformers.js ***!
|
|
33067
33822
|
\*****************************/
|
|
@@ -33328,6 +34083,13 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
33328
34083
|
/* harmony export */ HubertForSequenceClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.HubertForSequenceClassification),
|
|
33329
34084
|
/* harmony export */ HubertModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.HubertModel),
|
|
33330
34085
|
/* harmony export */ HubertPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.HubertPreTrainedModel),
|
|
34086
|
+
/* harmony export */ IJepaForImageClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.IJepaForImageClassification),
|
|
34087
|
+
/* harmony export */ IJepaModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.IJepaModel),
|
|
34088
|
+
/* harmony export */ IJepaPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.IJepaPreTrainedModel),
|
|
34089
|
+
/* harmony export */ Idefics3ForConditionalGeneration: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Idefics3ForConditionalGeneration),
|
|
34090
|
+
/* harmony export */ Idefics3ImageProcessor: () => (/* reexport safe */ _models_image_processors_js__WEBPACK_IMPORTED_MODULE_13__.Idefics3ImageProcessor),
|
|
34091
|
+
/* harmony export */ Idefics3PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Idefics3PreTrainedModel),
|
|
34092
|
+
/* harmony export */ Idefics3Processor: () => (/* reexport safe */ _models_processors_js__WEBPACK_IMPORTED_MODULE_16__.Idefics3Processor),
|
|
33331
34093
|
/* harmony export */ ImageClassificationPipeline: () => (/* reexport safe */ _pipelines_js__WEBPACK_IMPORTED_MODULE_1__.ImageClassificationPipeline),
|
|
33332
34094
|
/* harmony export */ ImageFeatureExtractionPipeline: () => (/* reexport safe */ _pipelines_js__WEBPACK_IMPORTED_MODULE_1__.ImageFeatureExtractionPipeline),
|
|
33333
34095
|
/* harmony export */ ImageFeatureExtractor: () => (/* reexport safe */ _models_feature_extractors_js__WEBPACK_IMPORTED_MODULE_10__.ImageFeatureExtractor),
|
|
@@ -33462,6 +34224,9 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
33462
34224
|
/* harmony export */ OPTModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.OPTModel),
|
|
33463
34225
|
/* harmony export */ OPTPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.OPTPreTrainedModel),
|
|
33464
34226
|
/* harmony export */ ObjectDetectionPipeline: () => (/* reexport safe */ _pipelines_js__WEBPACK_IMPORTED_MODULE_1__.ObjectDetectionPipeline),
|
|
34227
|
+
/* harmony export */ Olmo2ForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Olmo2ForCausalLM),
|
|
34228
|
+
/* harmony export */ Olmo2Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Olmo2Model),
|
|
34229
|
+
/* harmony export */ Olmo2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Olmo2PreTrainedModel),
|
|
33465
34230
|
/* harmony export */ OlmoForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.OlmoForCausalLM),
|
|
33466
34231
|
/* harmony export */ OlmoModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.OlmoModel),
|
|
33467
34232
|
/* harmony export */ OlmoPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.OlmoPreTrainedModel),
|
|
@@ -33478,6 +34243,9 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
33478
34243
|
/* harmony export */ Owlv2ImageProcessor: () => (/* reexport safe */ _models_image_processors_js__WEBPACK_IMPORTED_MODULE_13__.Owlv2ImageProcessor),
|
|
33479
34244
|
/* harmony export */ Owlv2Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Owlv2Model),
|
|
33480
34245
|
/* harmony export */ Owlv2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.Owlv2PreTrainedModel),
|
|
34246
|
+
/* harmony export */ PaliGemmaForConditionalGeneration: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PaliGemmaForConditionalGeneration),
|
|
34247
|
+
/* harmony export */ PaliGemmaPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PaliGemmaPreTrainedModel),
|
|
34248
|
+
/* harmony export */ PaliGemmaProcessor: () => (/* reexport safe */ _models_processors_js__WEBPACK_IMPORTED_MODULE_16__.PaliGemmaProcessor),
|
|
33481
34249
|
/* harmony export */ PatchTSMixerForPrediction: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PatchTSMixerForPrediction),
|
|
33482
34250
|
/* harmony export */ PatchTSMixerModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PatchTSMixerModel),
|
|
33483
34251
|
/* harmony export */ PatchTSMixerPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PatchTSMixerPreTrainedModel),
|
|
@@ -33719,6 +34487,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
33719
34487
|
/* harmony export */ interpolate_data: () => (/* reexport safe */ _utils_maths_js__WEBPACK_IMPORTED_MODULE_8__.interpolate_data),
|
|
33720
34488
|
/* harmony export */ is_chinese_char: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.is_chinese_char),
|
|
33721
34489
|
/* harmony export */ layer_norm: () => (/* reexport safe */ _utils_tensor_js__WEBPACK_IMPORTED_MODULE_7__.layer_norm),
|
|
34490
|
+
/* harmony export */ load_image: () => (/* reexport safe */ _utils_image_js__WEBPACK_IMPORTED_MODULE_6__.load_image),
|
|
33722
34491
|
/* harmony export */ log_softmax: () => (/* reexport safe */ _utils_maths_js__WEBPACK_IMPORTED_MODULE_8__.log_softmax),
|
|
33723
34492
|
/* harmony export */ magnitude: () => (/* reexport safe */ _utils_maths_js__WEBPACK_IMPORTED_MODULE_8__.magnitude),
|
|
33724
34493
|
/* harmony export */ matmul: () => (/* reexport safe */ _utils_tensor_js__WEBPACK_IMPORTED_MODULE_7__.matmul),
|
|
@@ -33808,6 +34577,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
33808
34577
|
|
|
33809
34578
|
|
|
33810
34579
|
|
|
34580
|
+
|
|
34581
|
+
})();
|
|
33811
34582
|
|
|
33812
34583
|
var __webpack_exports__ASTFeatureExtractor = __webpack_exports__.ASTFeatureExtractor;
|
|
33813
34584
|
var __webpack_exports__ASTForAudioClassification = __webpack_exports__.ASTForAudioClassification;
|
|
@@ -34070,6 +34841,13 @@ var __webpack_exports__HubertForCTC = __webpack_exports__.HubertForCTC;
|
|
|
34070
34841
|
var __webpack_exports__HubertForSequenceClassification = __webpack_exports__.HubertForSequenceClassification;
|
|
34071
34842
|
var __webpack_exports__HubertModel = __webpack_exports__.HubertModel;
|
|
34072
34843
|
var __webpack_exports__HubertPreTrainedModel = __webpack_exports__.HubertPreTrainedModel;
|
|
34844
|
+
var __webpack_exports__IJepaForImageClassification = __webpack_exports__.IJepaForImageClassification;
|
|
34845
|
+
var __webpack_exports__IJepaModel = __webpack_exports__.IJepaModel;
|
|
34846
|
+
var __webpack_exports__IJepaPreTrainedModel = __webpack_exports__.IJepaPreTrainedModel;
|
|
34847
|
+
var __webpack_exports__Idefics3ForConditionalGeneration = __webpack_exports__.Idefics3ForConditionalGeneration;
|
|
34848
|
+
var __webpack_exports__Idefics3ImageProcessor = __webpack_exports__.Idefics3ImageProcessor;
|
|
34849
|
+
var __webpack_exports__Idefics3PreTrainedModel = __webpack_exports__.Idefics3PreTrainedModel;
|
|
34850
|
+
var __webpack_exports__Idefics3Processor = __webpack_exports__.Idefics3Processor;
|
|
34073
34851
|
var __webpack_exports__ImageClassificationPipeline = __webpack_exports__.ImageClassificationPipeline;
|
|
34074
34852
|
var __webpack_exports__ImageFeatureExtractionPipeline = __webpack_exports__.ImageFeatureExtractionPipeline;
|
|
34075
34853
|
var __webpack_exports__ImageFeatureExtractor = __webpack_exports__.ImageFeatureExtractor;
|
|
@@ -34204,6 +34982,9 @@ var __webpack_exports__OPTForCausalLM = __webpack_exports__.OPTForCausalLM;
|
|
|
34204
34982
|
var __webpack_exports__OPTModel = __webpack_exports__.OPTModel;
|
|
34205
34983
|
var __webpack_exports__OPTPreTrainedModel = __webpack_exports__.OPTPreTrainedModel;
|
|
34206
34984
|
var __webpack_exports__ObjectDetectionPipeline = __webpack_exports__.ObjectDetectionPipeline;
|
|
34985
|
+
var __webpack_exports__Olmo2ForCausalLM = __webpack_exports__.Olmo2ForCausalLM;
|
|
34986
|
+
var __webpack_exports__Olmo2Model = __webpack_exports__.Olmo2Model;
|
|
34987
|
+
var __webpack_exports__Olmo2PreTrainedModel = __webpack_exports__.Olmo2PreTrainedModel;
|
|
34207
34988
|
var __webpack_exports__OlmoForCausalLM = __webpack_exports__.OlmoForCausalLM;
|
|
34208
34989
|
var __webpack_exports__OlmoModel = __webpack_exports__.OlmoModel;
|
|
34209
34990
|
var __webpack_exports__OlmoPreTrainedModel = __webpack_exports__.OlmoPreTrainedModel;
|
|
@@ -34220,6 +35001,9 @@ var __webpack_exports__Owlv2ForObjectDetection = __webpack_exports__.Owlv2ForObj
|
|
|
34220
35001
|
var __webpack_exports__Owlv2ImageProcessor = __webpack_exports__.Owlv2ImageProcessor;
|
|
34221
35002
|
var __webpack_exports__Owlv2Model = __webpack_exports__.Owlv2Model;
|
|
34222
35003
|
var __webpack_exports__Owlv2PreTrainedModel = __webpack_exports__.Owlv2PreTrainedModel;
|
|
35004
|
+
var __webpack_exports__PaliGemmaForConditionalGeneration = __webpack_exports__.PaliGemmaForConditionalGeneration;
|
|
35005
|
+
var __webpack_exports__PaliGemmaPreTrainedModel = __webpack_exports__.PaliGemmaPreTrainedModel;
|
|
35006
|
+
var __webpack_exports__PaliGemmaProcessor = __webpack_exports__.PaliGemmaProcessor;
|
|
34223
35007
|
var __webpack_exports__PatchTSMixerForPrediction = __webpack_exports__.PatchTSMixerForPrediction;
|
|
34224
35008
|
var __webpack_exports__PatchTSMixerModel = __webpack_exports__.PatchTSMixerModel;
|
|
34225
35009
|
var __webpack_exports__PatchTSMixerPreTrainedModel = __webpack_exports__.PatchTSMixerPreTrainedModel;
|
|
@@ -34461,6 +35245,7 @@ var __webpack_exports__interpolate_4d = __webpack_exports__.interpolate_4d;
|
|
|
34461
35245
|
var __webpack_exports__interpolate_data = __webpack_exports__.interpolate_data;
|
|
34462
35246
|
var __webpack_exports__is_chinese_char = __webpack_exports__.is_chinese_char;
|
|
34463
35247
|
var __webpack_exports__layer_norm = __webpack_exports__.layer_norm;
|
|
35248
|
+
var __webpack_exports__load_image = __webpack_exports__.load_image;
|
|
34464
35249
|
var __webpack_exports__log_softmax = __webpack_exports__.log_softmax;
|
|
34465
35250
|
var __webpack_exports__magnitude = __webpack_exports__.magnitude;
|
|
34466
35251
|
var __webpack_exports__matmul = __webpack_exports__.matmul;
|
|
@@ -34487,6 +35272,6 @@ var __webpack_exports__topk = __webpack_exports__.topk;
|
|
|
34487
35272
|
var __webpack_exports__window_function = __webpack_exports__.window_function;
|
|
34488
35273
|
var __webpack_exports__zeros = __webpack_exports__.zeros;
|
|
34489
35274
|
var __webpack_exports__zeros_like = __webpack_exports__.zeros_like;
|
|
34490
|
-
export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoFeatureExtractor as AutoFeatureExtractor, __webpack_exports__AutoImageProcessor as AutoImageProcessor, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForPoseEstimation as AutoModelForPoseEstimation, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__ClassifierFreeGuidanceLogitsProcessor as ClassifierFreeGuidanceLogitsProcessor, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTImageProcessor as DeiTImageProcessor, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DepthProForDepthEstimation as DepthProForDepthEstimation, __webpack_exports__DepthProPreTrainedModel as DepthProPreTrainedModel, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrImageProcessor as DetrImageProcessor, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutImageProcessor as DonutImageProcessor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor, __webpack_exports__ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__GraniteForCausalLM as GraniteForCausalLM, __webpack_exports__GraniteModel as GraniteModel, __webpack_exports__GranitePreTrainedModel as GranitePreTrainedModel, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageProcessor as ImageProcessor, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__JinaCLIPImageProcessor as JinaCLIPImageProcessor, __webpack_exports__JinaCLIPModel as JinaCLIPModel, __webpack_exports__JinaCLIPPreTrainedModel as JinaCLIPPreTrainedModel, __webpack_exports__JinaCLIPProcessor as JinaCLIPProcessor, __webpack_exports__JinaCLIPTextModel as JinaCLIPTextModel, __webpack_exports__JinaCLIPVisionModel as JinaCLIPVisionModel, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaOnevisionForConditionalGeneration as LlavaOnevisionForConditionalGeneration, __webpack_exports__LlavaOnevisionImageProcessor as LlavaOnevisionImageProcessor, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LogitsProcessor as LogitsProcessor, __webpack_exports__LogitsProcessorList as LogitsProcessorList, __webpack_exports__LogitsWarper as LogitsWarper, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__Mask2FormerImageProcessor as Mask2FormerImageProcessor, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerImageProcessor as MaskFormerImageProcessor, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__MgpstrForSceneTextRecognition as MgpstrForSceneTextRecognition, __webpack_exports__MgpstrModelOutput as MgpstrModelOutput, __webpack_exports__MgpstrPreTrainedModel as MgpstrPreTrainedModel, __webpack_exports__MgpstrProcessor as MgpstrProcessor, __webpack_exports__MgpstrTokenizer as MgpstrTokenizer, __webpack_exports__MinLengthLogitsProcessor as MinLengthLogitsProcessor, __webpack_exports__MinNewTokensLengthLogitsProcessor as MinNewTokensLengthLogitsProcessor, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileLLMForCausalLM as MobileLLMForCausalLM, __webpack_exports__MobileLLMModel as MobileLLMModel, __webpack_exports__MobileLLMPreTrainedModel as MobileLLMPreTrainedModel, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1ImageProcessor as MobileNetV1ImageProcessor, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2ImageProcessor as MobileNetV2ImageProcessor, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3ImageProcessor as MobileNetV3ImageProcessor, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4ImageProcessor as MobileNetV4ImageProcessor, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MultiModalityCausalLM as MultiModalityCausalLM, __webpack_exports__MultiModalityPreTrainedModel as MultiModalityPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NoBadWordsLogitsProcessor as NoBadWordsLogitsProcessor, __webpack_exports__NoRepeatNGramLogitsProcessor as NoRepeatNGramLogitsProcessor, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__OlmoForCausalLM as OlmoForCausalLM, __webpack_exports__OlmoModel as OlmoModel, __webpack_exports__OlmoPreTrainedModel as OlmoPreTrainedModel, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTImageProcessor as OwlViTImageProcessor, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__PatchTSMixerForPrediction as PatchTSMixerForPrediction, __webpack_exports__PatchTSMixerModel as PatchTSMixerModel, __webpack_exports__PatchTSMixerPreTrainedModel as PatchTSMixerPreTrainedModel, __webpack_exports__PatchTSTForPrediction as PatchTSTForPrediction, __webpack_exports__PatchTSTModel as PatchTSTModel, __webpack_exports__PatchTSTPreTrainedModel as PatchTSTPreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGeneration, __webpack_exports__Qwen2VLImageProcessor as Qwen2VLImageProcessor, __webpack_exports__Qwen2VLPreTrainedModel as Qwen2VLPreTrainedModel, __webpack_exports__Qwen2VLProcessor as Qwen2VLProcessor, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawImage as RawImage, __webpack_exports__RepetitionPenaltyLogitsProcessor as RepetitionPenaltyLogitsProcessor, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerImageProcessor as SegformerImageProcessor, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__SuppressTokensAtBeginLogitsProcessor as SuppressTokensAtBeginLogitsProcessor, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__TemperatureLogitsWarper as TemperatureLogitsWarper, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TopKLogitsWarper as TopKLogitsWarper, __webpack_exports__TopPLogitsWarper as TopPLogitsWarper, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__VLChatProcessor as VLChatProcessor, __webpack_exports__VLMImageProcessor as VLMImageProcessor, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitPoseForPoseEstimation as VitPoseForPoseEstimation, __webpack_exports__VitPoseImageProcessor as VitPoseImageProcessor, __webpack_exports__VitPosePreTrainedModel as VitPosePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTimeStampLogitsProcessor as WhisperTimeStampLogitsProcessor, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosImageProcessor as YolosImageProcessor, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
|
|
35275
|
+
export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoFeatureExtractor as AutoFeatureExtractor, __webpack_exports__AutoImageProcessor as AutoImageProcessor, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForPoseEstimation as AutoModelForPoseEstimation, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__ClassifierFreeGuidanceLogitsProcessor as ClassifierFreeGuidanceLogitsProcessor, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTImageProcessor as DeiTImageProcessor, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DepthProForDepthEstimation as DepthProForDepthEstimation, __webpack_exports__DepthProPreTrainedModel as DepthProPreTrainedModel, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrImageProcessor as DetrImageProcessor, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutImageProcessor as DonutImageProcessor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor, __webpack_exports__ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__GraniteForCausalLM as GraniteForCausalLM, __webpack_exports__GraniteModel as GraniteModel, __webpack_exports__GranitePreTrainedModel as GranitePreTrainedModel, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__IJepaForImageClassification as IJepaForImageClassification, __webpack_exports__IJepaModel as IJepaModel, __webpack_exports__IJepaPreTrainedModel as IJepaPreTrainedModel, __webpack_exports__Idefics3ForConditionalGeneration as Idefics3ForConditionalGeneration, __webpack_exports__Idefics3ImageProcessor as Idefics3ImageProcessor, __webpack_exports__Idefics3PreTrainedModel as Idefics3PreTrainedModel, __webpack_exports__Idefics3Processor as Idefics3Processor, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageProcessor as ImageProcessor, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__JinaCLIPImageProcessor as JinaCLIPImageProcessor, __webpack_exports__JinaCLIPModel as JinaCLIPModel, __webpack_exports__JinaCLIPPreTrainedModel as JinaCLIPPreTrainedModel, __webpack_exports__JinaCLIPProcessor as JinaCLIPProcessor, __webpack_exports__JinaCLIPTextModel as JinaCLIPTextModel, __webpack_exports__JinaCLIPVisionModel as JinaCLIPVisionModel, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaOnevisionForConditionalGeneration as LlavaOnevisionForConditionalGeneration, __webpack_exports__LlavaOnevisionImageProcessor as LlavaOnevisionImageProcessor, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LogitsProcessor as LogitsProcessor, __webpack_exports__LogitsProcessorList as LogitsProcessorList, __webpack_exports__LogitsWarper as LogitsWarper, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__Mask2FormerImageProcessor as Mask2FormerImageProcessor, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerImageProcessor as MaskFormerImageProcessor, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__MgpstrForSceneTextRecognition as MgpstrForSceneTextRecognition, __webpack_exports__MgpstrModelOutput as MgpstrModelOutput, __webpack_exports__MgpstrPreTrainedModel as MgpstrPreTrainedModel, __webpack_exports__MgpstrProcessor as MgpstrProcessor, __webpack_exports__MgpstrTokenizer as MgpstrTokenizer, __webpack_exports__MinLengthLogitsProcessor as MinLengthLogitsProcessor, __webpack_exports__MinNewTokensLengthLogitsProcessor as MinNewTokensLengthLogitsProcessor, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileLLMForCausalLM as MobileLLMForCausalLM, __webpack_exports__MobileLLMModel as MobileLLMModel, __webpack_exports__MobileLLMPreTrainedModel as MobileLLMPreTrainedModel, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1ImageProcessor as MobileNetV1ImageProcessor, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2ImageProcessor as MobileNetV2ImageProcessor, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3ImageProcessor as MobileNetV3ImageProcessor, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4ImageProcessor as MobileNetV4ImageProcessor, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MultiModalityCausalLM as MultiModalityCausalLM, __webpack_exports__MultiModalityPreTrainedModel as MultiModalityPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NoBadWordsLogitsProcessor as NoBadWordsLogitsProcessor, __webpack_exports__NoRepeatNGramLogitsProcessor as NoRepeatNGramLogitsProcessor, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__Olmo2ForCausalLM as Olmo2ForCausalLM, __webpack_exports__Olmo2Model as Olmo2Model, __webpack_exports__Olmo2PreTrainedModel as Olmo2PreTrainedModel, __webpack_exports__OlmoForCausalLM as OlmoForCausalLM, __webpack_exports__OlmoModel as OlmoModel, __webpack_exports__OlmoPreTrainedModel as OlmoPreTrainedModel, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTImageProcessor as OwlViTImageProcessor, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__PaliGemmaForConditionalGeneration as PaliGemmaForConditionalGeneration, __webpack_exports__PaliGemmaPreTrainedModel as PaliGemmaPreTrainedModel, __webpack_exports__PaliGemmaProcessor as PaliGemmaProcessor, __webpack_exports__PatchTSMixerForPrediction as PatchTSMixerForPrediction, __webpack_exports__PatchTSMixerModel as PatchTSMixerModel, __webpack_exports__PatchTSMixerPreTrainedModel as PatchTSMixerPreTrainedModel, __webpack_exports__PatchTSTForPrediction as PatchTSTForPrediction, __webpack_exports__PatchTSTModel as PatchTSTModel, __webpack_exports__PatchTSTPreTrainedModel as PatchTSTPreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__Qwen2VLForConditionalGeneration as Qwen2VLForConditionalGeneration, __webpack_exports__Qwen2VLImageProcessor as Qwen2VLImageProcessor, __webpack_exports__Qwen2VLPreTrainedModel as Qwen2VLPreTrainedModel, __webpack_exports__Qwen2VLProcessor as Qwen2VLProcessor, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawImage as RawImage, __webpack_exports__RepetitionPenaltyLogitsProcessor as RepetitionPenaltyLogitsProcessor, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerImageProcessor as SegformerImageProcessor, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__SuppressTokensAtBeginLogitsProcessor as SuppressTokensAtBeginLogitsProcessor, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__TemperatureLogitsWarper as TemperatureLogitsWarper, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TopKLogitsWarper as TopKLogitsWarper, __webpack_exports__TopPLogitsWarper as TopPLogitsWarper, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__VLChatProcessor as VLChatProcessor, __webpack_exports__VLMImageProcessor as VLMImageProcessor, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitPoseForPoseEstimation as VitPoseForPoseEstimation, __webpack_exports__VitPoseImageProcessor as VitPoseImageProcessor, __webpack_exports__VitPosePreTrainedModel as VitPosePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTimeStampLogitsProcessor as WhisperTimeStampLogitsProcessor, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosImageProcessor as YolosImageProcessor, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__load_image as load_image, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
|
|
34491
35276
|
|
|
34492
35277
|
//# sourceMappingURL=transformers.mjs.map
|