@huggingface/transformers 3.0.0-alpha.14 → 3.0.0-alpha.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -6
- package/dist/ort-wasm-simd-threaded.jsep.wasm +0 -0
- package/dist/transformers.cjs +678 -443
- package/dist/transformers.cjs.map +1 -1
- package/dist/transformers.js +1107 -825
- package/dist/transformers.js.map +1 -1
- package/dist/transformers.min.cjs +14 -14
- package/dist/transformers.min.cjs.map +1 -1
- package/dist/transformers.min.js +17 -17
- package/dist/transformers.min.js.map +1 -1
- package/dist/transformers.min.mjs +52 -52
- package/dist/transformers.min.mjs.map +1 -1
- package/dist/transformers.mjs +699 -444
- package/dist/transformers.mjs.map +1 -1
- package/package.json +4 -5
- package/src/configs.js +16 -4
- package/src/env.js +4 -4
- package/src/models.js +151 -58
- package/src/pipelines.js +5 -4
- package/src/processors.js +313 -285
- package/src/tokenizers.js +111 -72
- package/src/utils/core.js +12 -0
- package/src/utils/data-structures.js +13 -11
- package/src/utils/hub.js +1 -1
- package/src/utils/maths.js +13 -4
- package/types/configs.d.ts +25 -3
- package/types/configs.d.ts.map +1 -1
- package/types/models.d.ts +63 -2
- package/types/models.d.ts.map +1 -1
- package/types/pipelines.d.ts.map +1 -1
- package/types/processors.d.ts +42 -52
- package/types/processors.d.ts.map +1 -1
- package/types/tokenizers.d.ts +23 -1
- package/types/tokenizers.d.ts.map +1 -1
- package/types/utils/core.d.ts +7 -0
- package/types/utils/core.d.ts.map +1 -1
- package/types/utils/data-structures.d.ts +6 -6
- package/types/utils/data-structures.d.ts.map +1 -1
- package/types/utils/hub.d.ts +1 -1
- package/types/utils/hub.d.ts.map +1 -1
- package/types/utils/maths.d.ts.map +1 -1
package/dist/transformers.mjs
CHANGED
|
@@ -3756,7 +3756,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
3756
3756
|
// Licensed under the MIT License.
|
|
3757
3757
|
// This file is generated by /js/scripts/update-version.ts
|
|
3758
3758
|
// Do not modify file content manually.
|
|
3759
|
-
const version = '1.19.
|
|
3759
|
+
const version = '1.19.2';
|
|
3760
3760
|
//# sourceMappingURL=version.js.map
|
|
3761
3761
|
|
|
3762
3762
|
/***/ }),
|
|
@@ -4301,16 +4301,23 @@ function getKeyValueShapes(config, {
|
|
|
4301
4301
|
class PretrainedConfig {
|
|
4302
4302
|
// NOTE: Typo in original
|
|
4303
4303
|
|
|
4304
|
+
/** @type {string|null} */
|
|
4305
|
+
model_type = null;
|
|
4306
|
+
|
|
4307
|
+
/** @type {boolean} */
|
|
4308
|
+
is_encoder_decoder = false;
|
|
4309
|
+
|
|
4310
|
+
/** @type {number} */
|
|
4304
4311
|
max_position_embeddings;
|
|
4305
4312
|
|
|
4313
|
+
/** @type {TransformersJSConfig} */
|
|
4314
|
+
'transformers.js_config';
|
|
4315
|
+
|
|
4306
4316
|
/**
|
|
4307
4317
|
* Create a new PreTrainedTokenizer instance.
|
|
4308
4318
|
* @param {Object} configJSON The JSON of the config.
|
|
4309
4319
|
*/
|
|
4310
4320
|
constructor(configJSON) {
|
|
4311
|
-
this.model_type = null;
|
|
4312
|
-
this.is_encoder_decoder = false;
|
|
4313
|
-
|
|
4314
4321
|
Object.assign(this, configJSON);
|
|
4315
4322
|
this.normalized_config = getNormalizedConfig(this);
|
|
4316
4323
|
}
|
|
@@ -4362,7 +4369,12 @@ class AutoConfig {
|
|
|
4362
4369
|
/**
|
|
4363
4370
|
* Transformers.js-specific configuration, possibly present in config.json under the key `transformers.js_config`.
|
|
4364
4371
|
* @typedef {Object} TransformersJSConfig
|
|
4365
|
-
* @property {import('./
|
|
4372
|
+
* @property {import('./utils/tensor.js').DataType} [kv_cache_dtype] The data type of the key-value cache.
|
|
4373
|
+
* @property {Record<string, number>} [free_dimension_overrides] Override the free dimensions of the model.
|
|
4374
|
+
* See https://onnxruntime.ai/docs/tutorials/web/env-flags-and-session-options.html#freedimensionoverrides
|
|
4375
|
+
* for more information.
|
|
4376
|
+
* @property {import('./utils/devices.js').DeviceType} [device] The default device to use for the model.
|
|
4377
|
+
* @property {import('./utils/dtypes.js').DataType} [dtype] The default data type to use for the model.
|
|
4366
4378
|
*/
|
|
4367
4379
|
|
|
4368
4380
|
|
|
@@ -4410,7 +4422,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
4410
4422
|
|
|
4411
4423
|
|
|
4412
4424
|
|
|
4413
|
-
const VERSION = '3.0.0-alpha.
|
|
4425
|
+
const VERSION = '3.0.0-alpha.16';
|
|
4414
4426
|
|
|
4415
4427
|
// Check if various APIs are available (depends on environment)
|
|
4416
4428
|
const IS_BROWSER_ENV = typeof self !== 'undefined';
|
|
@@ -4457,19 +4469,19 @@ const apis = Object.freeze({
|
|
|
4457
4469
|
});
|
|
4458
4470
|
|
|
4459
4471
|
const RUNNING_LOCALLY = IS_FS_AVAILABLE && IS_PATH_AVAILABLE;
|
|
4460
|
-
const
|
|
4472
|
+
const dirname__ = RUNNING_LOCALLY
|
|
4461
4473
|
? path__WEBPACK_IMPORTED_MODULE_1__["default"].dirname(path__WEBPACK_IMPORTED_MODULE_1__["default"].dirname(url__WEBPACK_IMPORTED_MODULE_2__["default"].fileURLToPath(import.meta.url)))
|
|
4462
4474
|
: './';
|
|
4463
4475
|
|
|
4464
4476
|
// Only used for environments with access to file system
|
|
4465
4477
|
const DEFAULT_CACHE_DIR = RUNNING_LOCALLY
|
|
4466
|
-
? path__WEBPACK_IMPORTED_MODULE_1__["default"].join(
|
|
4478
|
+
? path__WEBPACK_IMPORTED_MODULE_1__["default"].join(dirname__, '/.cache/')
|
|
4467
4479
|
: null;
|
|
4468
4480
|
|
|
4469
4481
|
// Set local model path, based on available APIs
|
|
4470
4482
|
const DEFAULT_LOCAL_MODEL_PATH = '/models/';
|
|
4471
4483
|
const localModelPath = RUNNING_LOCALLY
|
|
4472
|
-
? path__WEBPACK_IMPORTED_MODULE_1__["default"].join(
|
|
4484
|
+
? path__WEBPACK_IMPORTED_MODULE_1__["default"].join(dirname__, DEFAULT_LOCAL_MODEL_PATH)
|
|
4473
4485
|
: DEFAULT_LOCAL_MODEL_PATH;
|
|
4474
4486
|
|
|
4475
4487
|
/**
|
|
@@ -6349,6 +6361,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6349
6361
|
/* harmony export */ AutoModelForTextToSpectrogram: () => (/* binding */ AutoModelForTextToSpectrogram),
|
|
6350
6362
|
/* harmony export */ AutoModelForTextToWaveform: () => (/* binding */ AutoModelForTextToWaveform),
|
|
6351
6363
|
/* harmony export */ AutoModelForTokenClassification: () => (/* binding */ AutoModelForTokenClassification),
|
|
6364
|
+
/* harmony export */ AutoModelForUniversalSegmentation: () => (/* binding */ AutoModelForUniversalSegmentation),
|
|
6352
6365
|
/* harmony export */ AutoModelForVision2Seq: () => (/* binding */ AutoModelForVision2Seq),
|
|
6353
6366
|
/* harmony export */ AutoModelForXVector: () => (/* binding */ AutoModelForXVector),
|
|
6354
6367
|
/* harmony export */ AutoModelForZeroShotObjectDetection: () => (/* binding */ AutoModelForZeroShotObjectDetection),
|
|
@@ -6380,7 +6393,9 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6380
6393
|
/* harmony export */ CLIPSegForImageSegmentation: () => (/* binding */ CLIPSegForImageSegmentation),
|
|
6381
6394
|
/* harmony export */ CLIPSegModel: () => (/* binding */ CLIPSegModel),
|
|
6382
6395
|
/* harmony export */ CLIPSegPreTrainedModel: () => (/* binding */ CLIPSegPreTrainedModel),
|
|
6396
|
+
/* harmony export */ CLIPTextModel: () => (/* binding */ CLIPTextModel),
|
|
6383
6397
|
/* harmony export */ CLIPTextModelWithProjection: () => (/* binding */ CLIPTextModelWithProjection),
|
|
6398
|
+
/* harmony export */ CLIPVisionModel: () => (/* binding */ CLIPVisionModel),
|
|
6384
6399
|
/* harmony export */ CLIPVisionModelWithProjection: () => (/* binding */ CLIPVisionModelWithProjection),
|
|
6385
6400
|
/* harmony export */ CamembertForMaskedLM: () => (/* binding */ CamembertForMaskedLM),
|
|
6386
6401
|
/* harmony export */ CamembertForQuestionAnswering: () => (/* binding */ CamembertForQuestionAnswering),
|
|
@@ -6429,6 +6444,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6429
6444
|
/* harmony export */ DebertaV2ForTokenClassification: () => (/* binding */ DebertaV2ForTokenClassification),
|
|
6430
6445
|
/* harmony export */ DebertaV2Model: () => (/* binding */ DebertaV2Model),
|
|
6431
6446
|
/* harmony export */ DebertaV2PreTrainedModel: () => (/* binding */ DebertaV2PreTrainedModel),
|
|
6447
|
+
/* harmony export */ DecisionTransformerModel: () => (/* binding */ DecisionTransformerModel),
|
|
6448
|
+
/* harmony export */ DecisionTransformerPreTrainedModel: () => (/* binding */ DecisionTransformerPreTrainedModel),
|
|
6432
6449
|
/* harmony export */ DeiTForImageClassification: () => (/* binding */ DeiTForImageClassification),
|
|
6433
6450
|
/* harmony export */ DeiTModel: () => (/* binding */ DeiTModel),
|
|
6434
6451
|
/* harmony export */ DeiTPreTrainedModel: () => (/* binding */ DeiTPreTrainedModel),
|
|
@@ -6497,6 +6514,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6497
6514
|
/* harmony export */ GemmaForCausalLM: () => (/* binding */ GemmaForCausalLM),
|
|
6498
6515
|
/* harmony export */ GemmaModel: () => (/* binding */ GemmaModel),
|
|
6499
6516
|
/* harmony export */ GemmaPreTrainedModel: () => (/* binding */ GemmaPreTrainedModel),
|
|
6517
|
+
/* harmony export */ GroupViTModel: () => (/* binding */ GroupViTModel),
|
|
6518
|
+
/* harmony export */ GroupViTPreTrainedModel: () => (/* binding */ GroupViTPreTrainedModel),
|
|
6500
6519
|
/* harmony export */ HieraForImageClassification: () => (/* binding */ HieraForImageClassification),
|
|
6501
6520
|
/* harmony export */ HieraModel: () => (/* binding */ HieraModel),
|
|
6502
6521
|
/* harmony export */ HieraPreTrainedModel: () => (/* binding */ HieraPreTrainedModel),
|
|
@@ -6536,6 +6555,9 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6536
6555
|
/* harmony export */ MarianMTModel: () => (/* binding */ MarianMTModel),
|
|
6537
6556
|
/* harmony export */ MarianModel: () => (/* binding */ MarianModel),
|
|
6538
6557
|
/* harmony export */ MarianPreTrainedModel: () => (/* binding */ MarianPreTrainedModel),
|
|
6558
|
+
/* harmony export */ MaskFormerForInstanceSegmentation: () => (/* binding */ MaskFormerForInstanceSegmentation),
|
|
6559
|
+
/* harmony export */ MaskFormerModel: () => (/* binding */ MaskFormerModel),
|
|
6560
|
+
/* harmony export */ MaskFormerPreTrainedModel: () => (/* binding */ MaskFormerPreTrainedModel),
|
|
6539
6561
|
/* harmony export */ MaskedLMOutput: () => (/* binding */ MaskedLMOutput),
|
|
6540
6562
|
/* harmony export */ MistralForCausalLM: () => (/* binding */ MistralForCausalLM),
|
|
6541
6563
|
/* harmony export */ MistralModel: () => (/* binding */ MistralModel),
|
|
@@ -6594,6 +6616,9 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6594
6616
|
/* harmony export */ PhiPreTrainedModel: () => (/* binding */ PhiPreTrainedModel),
|
|
6595
6617
|
/* harmony export */ PreTrainedModel: () => (/* binding */ PreTrainedModel),
|
|
6596
6618
|
/* harmony export */ PretrainedMixin: () => (/* binding */ PretrainedMixin),
|
|
6619
|
+
/* harmony export */ PvtForImageClassification: () => (/* binding */ PvtForImageClassification),
|
|
6620
|
+
/* harmony export */ PvtModel: () => (/* binding */ PvtModel),
|
|
6621
|
+
/* harmony export */ PvtPreTrainedModel: () => (/* binding */ PvtPreTrainedModel),
|
|
6597
6622
|
/* harmony export */ PyAnnoteForAudioFrameClassification: () => (/* binding */ PyAnnoteForAudioFrameClassification),
|
|
6598
6623
|
/* harmony export */ PyAnnoteModel: () => (/* binding */ PyAnnoteModel),
|
|
6599
6624
|
/* harmony export */ PyAnnotePreTrainedModel: () => (/* binding */ PyAnnotePreTrainedModel),
|
|
@@ -6679,6 +6704,11 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
6679
6704
|
/* harmony export */ UniSpeechSatModel: () => (/* binding */ UniSpeechSatModel),
|
|
6680
6705
|
/* harmony export */ UniSpeechSatPreTrainedModel: () => (/* binding */ UniSpeechSatPreTrainedModel),
|
|
6681
6706
|
/* harmony export */ ViTForImageClassification: () => (/* binding */ ViTForImageClassification),
|
|
6707
|
+
/* harmony export */ ViTMAEModel: () => (/* binding */ ViTMAEModel),
|
|
6708
|
+
/* harmony export */ ViTMAEPreTrainedModel: () => (/* binding */ ViTMAEPreTrainedModel),
|
|
6709
|
+
/* harmony export */ ViTMSNForImageClassification: () => (/* binding */ ViTMSNForImageClassification),
|
|
6710
|
+
/* harmony export */ ViTMSNModel: () => (/* binding */ ViTMSNModel),
|
|
6711
|
+
/* harmony export */ ViTMSNPreTrainedModel: () => (/* binding */ ViTMSNPreTrainedModel),
|
|
6682
6712
|
/* harmony export */ ViTModel: () => (/* binding */ ViTModel),
|
|
6683
6713
|
/* harmony export */ ViTPreTrainedModel: () => (/* binding */ ViTPreTrainedModel),
|
|
6684
6714
|
/* harmony export */ VisionEncoderDecoderModel: () => (/* binding */ VisionEncoderDecoderModel),
|
|
@@ -6838,7 +6868,8 @@ const MODEL_CLASS_TO_NAME_MAPPING = new Map();
|
|
|
6838
6868
|
* @private
|
|
6839
6869
|
*/
|
|
6840
6870
|
async function getSession(pretrained_model_name_or_path, fileName, options) {
|
|
6841
|
-
|
|
6871
|
+
const custom_config = options.config?.['transformers.js_config'] ?? {};
|
|
6872
|
+
let device = options.device ?? custom_config.device;
|
|
6842
6873
|
if (device && typeof device !== 'string') {
|
|
6843
6874
|
if (device.hasOwnProperty(fileName)) {
|
|
6844
6875
|
device = device[fileName];
|
|
@@ -6856,7 +6887,7 @@ async function getSession(pretrained_model_name_or_path, fileName, options) {
|
|
|
6856
6887
|
|
|
6857
6888
|
// If options.dtype is specified, we use it to choose the suffix for the model file.
|
|
6858
6889
|
// Otherwise, we use the default dtype for the device.
|
|
6859
|
-
let dtype = options.dtype;
|
|
6890
|
+
let dtype = options.dtype ?? custom_config.dtype;
|
|
6860
6891
|
if (typeof dtype !== 'string') {
|
|
6861
6892
|
if (dtype && dtype.hasOwnProperty(fileName)) {
|
|
6862
6893
|
dtype = dtype[fileName];
|
|
@@ -6883,6 +6914,16 @@ async function getSession(pretrained_model_name_or_path, fileName, options) {
|
|
|
6883
6914
|
// Overwrite `executionProviders` if not specified
|
|
6884
6915
|
session_options.executionProviders ??= executionProviders;
|
|
6885
6916
|
|
|
6917
|
+
// Overwrite `freeDimensionOverrides` if specified in config and not set in session options
|
|
6918
|
+
const free_dimension_overrides = custom_config.free_dimension_overrides;
|
|
6919
|
+
if (free_dimension_overrides) {
|
|
6920
|
+
session_options.freeDimensionOverrides ??= free_dimension_overrides;
|
|
6921
|
+
} else if (selectedDevice.startsWith('webnn') && !session_options.freeDimensionOverrides) {
|
|
6922
|
+
console.warn(
|
|
6923
|
+
'WebNN does not currently support dynamic shapes and requires `free_dimension_overrides` to be set in config.json as a field within "transformers.js_config". ' +
|
|
6924
|
+
'When `free_dimension_overrides` is not set, you may experience significant performance degradation.'
|
|
6925
|
+
);
|
|
6926
|
+
}
|
|
6886
6927
|
|
|
6887
6928
|
const bufferPromise = (0,_utils_hub_js__WEBPACK_IMPORTED_MODULE_5__.getModelFile)(pretrained_model_name_or_path, modelFileName, true, options);
|
|
6888
6929
|
|
|
@@ -6931,6 +6972,9 @@ async function getSession(pretrained_model_name_or_path, fileName, options) {
|
|
|
6931
6972
|
/** @type {Record<string, import('onnxruntime-common').Tensor.DataLocation>} */
|
|
6932
6973
|
const preferredOutputLocation = {};
|
|
6933
6974
|
for (const key in shapes) {
|
|
6975
|
+
// TODO: For now, we keep encoder outputs on the CPU
|
|
6976
|
+
// (otherwise, this causes a memory leak or throws an error "Error: previous buffer is not registered")
|
|
6977
|
+
if (key.includes('encoder')) continue;
|
|
6934
6978
|
preferredOutputLocation[key] = 'gpu-buffer';
|
|
6935
6979
|
}
|
|
6936
6980
|
session_options.preferredOutputLocation = preferredOutputLocation;
|
|
@@ -7086,37 +7130,6 @@ function toI64Tensor(items) {
|
|
|
7086
7130
|
}
|
|
7087
7131
|
}
|
|
7088
7132
|
|
|
7089
|
-
/**
|
|
7090
|
-
* Prepares an attention mask for a sequence of tokens based on configuration options.
|
|
7091
|
-
* @param {Object} self The calling object instance.
|
|
7092
|
-
* @param {Tensor} tokens The input tokens.
|
|
7093
|
-
* @returns {Tensor} The attention mask tensor.
|
|
7094
|
-
* @private
|
|
7095
|
-
*/
|
|
7096
|
-
function prepareAttentionMask(self, tokens) {
|
|
7097
|
-
|
|
7098
|
-
// Prepare attention mask
|
|
7099
|
-
let pad_token_id = self.config.pad_token_id ?? null;
|
|
7100
|
-
let eos_token_id = self.config.eos_token_id ?? null;
|
|
7101
|
-
if ((0,_utils_core_js__WEBPACK_IMPORTED_MODULE_4__.isIntegralNumber)(eos_token_id)) {
|
|
7102
|
-
eos_token_id = [eos_token_id];
|
|
7103
|
-
}
|
|
7104
|
-
|
|
7105
|
-
let is_pad_token_in_inputs = tokens.indexOf(pad_token_id) !== -1;
|
|
7106
|
-
let is_pad_token_not_equal_to_eos_token_id = (eos_token_id === null) || !eos_token_id.includes(pad_token_id)
|
|
7107
|
-
|
|
7108
|
-
if (is_pad_token_in_inputs && is_pad_token_not_equal_to_eos_token_id) {
|
|
7109
|
-
let data = BigInt64Array.from(
|
|
7110
|
-
// Note: != so that int matches bigint
|
|
7111
|
-
// @ts-ignore
|
|
7112
|
-
tokens.data.map(x => x != pad_token_id)
|
|
7113
|
-
)
|
|
7114
|
-
return new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_8__.Tensor('int64', data, tokens.dims)
|
|
7115
|
-
} else {
|
|
7116
|
-
return (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_8__.ones_like)(tokens);
|
|
7117
|
-
}
|
|
7118
|
-
}
|
|
7119
|
-
|
|
7120
7133
|
/**
|
|
7121
7134
|
* Creates a boolean tensor with a single value.
|
|
7122
7135
|
* @param {boolean} value The value of the tensor.
|
|
@@ -7387,8 +7400,8 @@ function image_text_to_text_prepare_inputs_for_generation(self, ...args) {
|
|
|
7387
7400
|
} else {
|
|
7388
7401
|
return decoder_prepare_inputs_for_generation(self, ...args);
|
|
7389
7402
|
}
|
|
7390
|
-
|
|
7391
7403
|
}
|
|
7404
|
+
|
|
7392
7405
|
//////////////////////////////////////////////////
|
|
7393
7406
|
|
|
7394
7407
|
//////////////////////////////////////////////////
|
|
@@ -8151,13 +8164,12 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
8151
8164
|
// - GenerationMode.BEAM_SEARCH
|
|
8152
8165
|
// - GenerationMode.BEAM_SAMPLE
|
|
8153
8166
|
////////////////////////////////////////////////////
|
|
8154
|
-
let
|
|
8167
|
+
let outputs;
|
|
8155
8168
|
let attentions = {};
|
|
8156
8169
|
while (true) {
|
|
8157
8170
|
// prepare model inputs
|
|
8158
8171
|
model_inputs = this.prepare_inputs_for_generation(all_input_ids, model_inputs, generation_config);
|
|
8159
|
-
|
|
8160
|
-
const outputs = await this.forward(model_inputs);
|
|
8172
|
+
outputs = await this.forward(model_inputs);
|
|
8161
8173
|
|
|
8162
8174
|
if (generation_config.output_attentions && generation_config.return_dict_in_generate) {
|
|
8163
8175
|
// Get attentions if they are present
|
|
@@ -8204,10 +8216,6 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
8204
8216
|
|
|
8205
8217
|
const stop = prepared_stopping_criteria(all_input_ids);
|
|
8206
8218
|
if (stop.every(x => x)) {
|
|
8207
|
-
if (generation_config.return_dict_in_generate) {
|
|
8208
|
-
// Get past key values without disposing buffers
|
|
8209
|
-
past_key_values = this.getPastKeyValues(outputs, model_inputs.past_key_values, false);
|
|
8210
|
-
}
|
|
8211
8219
|
break;
|
|
8212
8220
|
}
|
|
8213
8221
|
|
|
@@ -8220,6 +8228,9 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
8220
8228
|
streamer.end();
|
|
8221
8229
|
}
|
|
8222
8230
|
|
|
8231
|
+
// Retrieve and dispose all final past key values (including encoder attentions)
|
|
8232
|
+
const past_key_values = this.getPastKeyValues(outputs, model_inputs.past_key_values, true);
|
|
8233
|
+
|
|
8223
8234
|
// TODO: ensure all_input_ids is padded correctly...
|
|
8224
8235
|
const sequences = new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_8__.Tensor('int64', all_input_ids.flat(), [all_input_ids.length, all_input_ids[0].length]);
|
|
8225
8236
|
|
|
@@ -8233,6 +8244,12 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
8233
8244
|
// logits,
|
|
8234
8245
|
}
|
|
8235
8246
|
} else {
|
|
8247
|
+
// Dispose all remaining tensors
|
|
8248
|
+
for (const tensor of Object.values(outputs)) {
|
|
8249
|
+
if (tensor.location === 'gpu-buffer') {
|
|
8250
|
+
tensor.dispose();
|
|
8251
|
+
}
|
|
8252
|
+
}
|
|
8236
8253
|
return sequences;
|
|
8237
8254
|
}
|
|
8238
8255
|
}
|
|
@@ -8242,31 +8259,32 @@ class PreTrainedModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_3__.Cal
|
|
|
8242
8259
|
*
|
|
8243
8260
|
* @param {Object} decoderResults The decoder results object.
|
|
8244
8261
|
* @param {Object} pastKeyValues The previous past key values.
|
|
8245
|
-
* @param {boolean} [dispose=true] Whether to dispose of the old gpu buffer.
|
|
8246
8262
|
* @returns {Object} An object containing past key values.
|
|
8247
8263
|
*/
|
|
8248
|
-
getPastKeyValues(decoderResults, pastKeyValues,
|
|
8264
|
+
getPastKeyValues(decoderResults, pastKeyValues, disposeEncoderPKVs = false) {
|
|
8249
8265
|
const pkvs = Object.create(null);
|
|
8250
8266
|
|
|
8251
8267
|
for (const name in decoderResults) {
|
|
8252
8268
|
if (name.startsWith('present')) {
|
|
8253
8269
|
const newName = name.replace('present', 'past_key_values');
|
|
8254
|
-
|
|
8255
|
-
if (
|
|
8256
|
-
// Optimization introduced by optimum to reuse past key values.
|
|
8257
|
-
// outputs with the previous past key values.
|
|
8270
|
+
const is_encoder_pkv = name.includes('encoder');
|
|
8271
|
+
if (is_encoder_pkv && pastKeyValues) {
|
|
8272
|
+
// Optimization introduced by optimum to reuse past key values.
|
|
8273
|
+
// So, we just replace the constant outputs (`decoderResults[name]`) with the previous past key values.
|
|
8258
8274
|
// https://github.com/huggingface/optimum/blob/0bf2c05fb7e1182b52d21b703cfc95fd9e4ea3dc/optimum/onnxruntime/base.py#L677-L704
|
|
8259
8275
|
pkvs[newName] = pastKeyValues[newName];
|
|
8260
|
-
} else {
|
|
8261
|
-
if (dispose && pastKeyValues) {
|
|
8262
|
-
// Free old gpu buffer
|
|
8263
|
-
const t = pastKeyValues[newName];
|
|
8264
|
-
if (t.location === 'gpu-buffer') {
|
|
8265
|
-
t.dispose();
|
|
8266
|
-
}
|
|
8267
|
-
}
|
|
8276
|
+
} else { // decoder or using first encoder PKVs
|
|
8268
8277
|
pkvs[newName] = decoderResults[name];
|
|
8269
8278
|
}
|
|
8279
|
+
|
|
8280
|
+
if (pastKeyValues && (!is_encoder_pkv || disposeEncoderPKVs)) {
|
|
8281
|
+
// - Always dispose decoder PKVs
|
|
8282
|
+
// - Only dispose encoder past key values when requested (after generation)
|
|
8283
|
+
const t = pastKeyValues[newName];
|
|
8284
|
+
if (t.location === 'gpu-buffer') {
|
|
8285
|
+
t.dispose();
|
|
8286
|
+
}
|
|
8287
|
+
}
|
|
8270
8288
|
}
|
|
8271
8289
|
}
|
|
8272
8290
|
return pkvs;
|
|
@@ -10194,6 +10212,18 @@ class CLIPPreTrainedModel extends PreTrainedModel { }
|
|
|
10194
10212
|
*/
|
|
10195
10213
|
class CLIPModel extends CLIPPreTrainedModel { }
|
|
10196
10214
|
|
|
10215
|
+
/**
|
|
10216
|
+
* The text model from CLIP without any head or projection on top.
|
|
10217
|
+
*/
|
|
10218
|
+
class CLIPTextModel extends CLIPPreTrainedModel {
|
|
10219
|
+
/** @type {PreTrainedModel.from_pretrained} */
|
|
10220
|
+
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
|
|
10221
|
+
// Update default model file name if not provided
|
|
10222
|
+
options.model_file_name ??= 'text_model';
|
|
10223
|
+
return super.from_pretrained(pretrained_model_name_or_path, options);
|
|
10224
|
+
}
|
|
10225
|
+
}
|
|
10226
|
+
|
|
10197
10227
|
/**
|
|
10198
10228
|
* CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output)
|
|
10199
10229
|
*
|
|
@@ -10221,7 +10251,6 @@ class CLIPModel extends CLIPPreTrainedModel { }
|
|
|
10221
10251
|
* ```
|
|
10222
10252
|
*/
|
|
10223
10253
|
class CLIPTextModelWithProjection extends CLIPPreTrainedModel {
|
|
10224
|
-
|
|
10225
10254
|
/** @type {PreTrainedModel.from_pretrained} */
|
|
10226
10255
|
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
|
|
10227
10256
|
// Update default model file name if not provided
|
|
@@ -10230,6 +10259,18 @@ class CLIPTextModelWithProjection extends CLIPPreTrainedModel {
|
|
|
10230
10259
|
}
|
|
10231
10260
|
}
|
|
10232
10261
|
|
|
10262
|
+
/**
|
|
10263
|
+
* The vision model from CLIP without any head or projection on top.
|
|
10264
|
+
*/
|
|
10265
|
+
class CLIPVisionModel extends CLIPPreTrainedModel {
|
|
10266
|
+
/** @type {PreTrainedModel.from_pretrained} */
|
|
10267
|
+
static async from_pretrained(pretrained_model_name_or_path, options = {}) {
|
|
10268
|
+
// Update default model file name if not provided
|
|
10269
|
+
options.model_file_name ??= 'vision_model';
|
|
10270
|
+
return super.from_pretrained(pretrained_model_name_or_path, options);
|
|
10271
|
+
}
|
|
10272
|
+
}
|
|
10273
|
+
|
|
10233
10274
|
/**
|
|
10234
10275
|
* CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output)
|
|
10235
10276
|
*
|
|
@@ -10896,6 +10937,43 @@ class ViTForImageClassification extends ViTPreTrainedModel {
|
|
|
10896
10937
|
}
|
|
10897
10938
|
//////////////////////////////////////////////////
|
|
10898
10939
|
|
|
10940
|
+
//////////////////////////////////////////////////
|
|
10941
|
+
class PvtPreTrainedModel extends PreTrainedModel { }
|
|
10942
|
+
class PvtModel extends PvtPreTrainedModel { }
|
|
10943
|
+
class PvtForImageClassification extends PvtPreTrainedModel {
|
|
10944
|
+
/**
|
|
10945
|
+
* @param {any} model_inputs
|
|
10946
|
+
*/
|
|
10947
|
+
async _call(model_inputs) {
|
|
10948
|
+
return new SequenceClassifierOutput(await super._call(model_inputs));
|
|
10949
|
+
}
|
|
10950
|
+
}
|
|
10951
|
+
//////////////////////////////////////////////////
|
|
10952
|
+
|
|
10953
|
+
//////////////////////////////////////////////////
|
|
10954
|
+
class ViTMAEPreTrainedModel extends PreTrainedModel { }
|
|
10955
|
+
class ViTMAEModel extends ViTMAEPreTrainedModel { }
|
|
10956
|
+
//////////////////////////////////////////////////
|
|
10957
|
+
|
|
10958
|
+
|
|
10959
|
+
//////////////////////////////////////////////////
|
|
10960
|
+
class ViTMSNPreTrainedModel extends PreTrainedModel { }
|
|
10961
|
+
class ViTMSNModel extends ViTMSNPreTrainedModel { }
|
|
10962
|
+
class ViTMSNForImageClassification extends ViTMSNPreTrainedModel {
|
|
10963
|
+
/**
|
|
10964
|
+
* @param {any} model_inputs
|
|
10965
|
+
*/
|
|
10966
|
+
async _call(model_inputs) {
|
|
10967
|
+
return new SequenceClassifierOutput(await super._call(model_inputs));
|
|
10968
|
+
}
|
|
10969
|
+
}
|
|
10970
|
+
//////////////////////////////////////////////////
|
|
10971
|
+
|
|
10972
|
+
//////////////////////////////////////////////////
|
|
10973
|
+
class GroupViTPreTrainedModel extends PreTrainedModel { }
|
|
10974
|
+
class GroupViTModel extends GroupViTPreTrainedModel { }
|
|
10975
|
+
//////////////////////////////////////////////////
|
|
10976
|
+
|
|
10899
10977
|
|
|
10900
10978
|
//////////////////////////////////////////////////
|
|
10901
10979
|
class FastViTPreTrainedModel extends PreTrainedModel { }
|
|
@@ -11308,6 +11386,11 @@ class SapiensForDepthEstimation extends SapiensPreTrainedModel { }
|
|
|
11308
11386
|
class SapiensForNormalEstimation extends SapiensPreTrainedModel { }
|
|
11309
11387
|
//////////////////////////////////////////////////
|
|
11310
11388
|
|
|
11389
|
+
//////////////////////////////////////////////////
|
|
11390
|
+
class MaskFormerPreTrainedModel extends PreTrainedModel { }
|
|
11391
|
+
class MaskFormerModel extends MaskFormerPreTrainedModel { }
|
|
11392
|
+
class MaskFormerForInstanceSegmentation extends MaskFormerPreTrainedModel { }
|
|
11393
|
+
//////////////////////////////////////////////////
|
|
11311
11394
|
|
|
11312
11395
|
//////////////////////////////////////////////////
|
|
11313
11396
|
class GLPNPreTrainedModel extends PreTrainedModel { }
|
|
@@ -12830,6 +12913,7 @@ class MusicgenForConditionalGeneration extends PreTrainedModel { // NOTE: not Mu
|
|
|
12830
12913
|
return audio_values;
|
|
12831
12914
|
}
|
|
12832
12915
|
}
|
|
12916
|
+
//////////////////////////////////////////////////
|
|
12833
12917
|
|
|
12834
12918
|
//////////////////////////////////////////////////
|
|
12835
12919
|
// MobileNetV1 models
|
|
@@ -12923,6 +13007,17 @@ class MobileNetV4ForImageClassification extends MobileNetV4PreTrainedModel {
|
|
|
12923
13007
|
}
|
|
12924
13008
|
//////////////////////////////////////////////////
|
|
12925
13009
|
|
|
13010
|
+
//////////////////////////////////////////////////
|
|
13011
|
+
// Decision Transformer models
|
|
13012
|
+
class DecisionTransformerPreTrainedModel extends PreTrainedModel { }
|
|
13013
|
+
|
|
13014
|
+
/**
|
|
13015
|
+
* The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL setting.
|
|
13016
|
+
* Refer to the paper for more details: https://arxiv.org/abs/2106.01345
|
|
13017
|
+
*/
|
|
13018
|
+
class DecisionTransformerModel extends DecisionTransformerPreTrainedModel { }
|
|
13019
|
+
|
|
13020
|
+
//////////////////////////////////////////////////
|
|
12926
13021
|
|
|
12927
13022
|
//////////////////////////////////////////////////
|
|
12928
13023
|
// AutoModels, used to simplify construction of PreTrainedModels
|
|
@@ -12961,7 +13056,7 @@ class PretrainedMixin {
|
|
|
12961
13056
|
session_options = {},
|
|
12962
13057
|
} = {}) {
|
|
12963
13058
|
|
|
12964
|
-
|
|
13059
|
+
const options = {
|
|
12965
13060
|
progress_callback,
|
|
12966
13061
|
config,
|
|
12967
13062
|
cache_dir,
|
|
@@ -12980,7 +13075,7 @@ class PretrainedMixin {
|
|
|
12980
13075
|
throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: " + this.name);
|
|
12981
13076
|
}
|
|
12982
13077
|
|
|
12983
|
-
for (
|
|
13078
|
+
for (const MODEL_CLASS_MAPPING of this.MODEL_CLASS_MAPPINGS) {
|
|
12984
13079
|
const modelInfo = MODEL_CLASS_MAPPING.get(options.config.model_type);
|
|
12985
13080
|
if (!modelInfo) {
|
|
12986
13081
|
continue; // Item not found in this mapping
|
|
@@ -13035,6 +13130,10 @@ const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([
|
|
|
13035
13130
|
['rt_detr', ['RTDetrModel', RTDetrModel]],
|
|
13036
13131
|
['table-transformer', ['TableTransformerModel', TableTransformerModel]],
|
|
13037
13132
|
['vit', ['ViTModel', ViTModel]],
|
|
13133
|
+
['pvt', ['PvtModel', PvtModel]],
|
|
13134
|
+
['vit_msn', ['ViTMSNModel', ViTMSNModel]],
|
|
13135
|
+
['vit_mae', ['ViTMAEModel', ViTMAEModel]],
|
|
13136
|
+
['groupvit', ['GroupViTModel', GroupViTModel]],
|
|
13038
13137
|
['fastvit', ['FastViTModel', FastViTModel]],
|
|
13039
13138
|
['mobilevit', ['MobileViTModel', MobileViTModel]],
|
|
13040
13139
|
['mobilevitv2', ['MobileViTV2Model', MobileViTV2Model]],
|
|
@@ -13057,10 +13156,14 @@ const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([
|
|
|
13057
13156
|
['hifigan', ['SpeechT5HifiGan', SpeechT5HifiGan]],
|
|
13058
13157
|
['efficientnet', ['EfficientNetModel', EfficientNetModel]],
|
|
13059
13158
|
|
|
13159
|
+
['decision_transformer', ['DecisionTransformerModel', DecisionTransformerModel]],
|
|
13160
|
+
|
|
13060
13161
|
['mobilenet_v1', ['MobileNetV1Model', MobileNetV1Model]],
|
|
13061
13162
|
['mobilenet_v2', ['MobileNetV2Model', MobileNetV2Model]],
|
|
13062
13163
|
['mobilenet_v3', ['MobileNetV3Model', MobileNetV3Model]],
|
|
13063
13164
|
['mobilenet_v4', ['MobileNetV4Model', MobileNetV4Model]],
|
|
13165
|
+
|
|
13166
|
+
['maskformer', ['MaskFormerModel', MaskFormerModel]],
|
|
13064
13167
|
]);
|
|
13065
13168
|
|
|
13066
13169
|
const MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([
|
|
@@ -13245,6 +13348,8 @@ const MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([
|
|
|
13245
13348
|
|
|
13246
13349
|
const MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([
|
|
13247
13350
|
['vit', ['ViTForImageClassification', ViTForImageClassification]],
|
|
13351
|
+
['pvt', ['PvtForImageClassification', PvtForImageClassification]],
|
|
13352
|
+
['vit_msn', ['ViTMSNForImageClassification', ViTMSNForImageClassification]],
|
|
13248
13353
|
['fastvit', ['FastViTForImageClassification', FastViTForImageClassification]],
|
|
13249
13354
|
['mobilevit', ['MobileViTForImageClassification', MobileViTForImageClassification]],
|
|
13250
13355
|
['mobilevitv2', ['MobileViTV2ForImageClassification', MobileViTV2ForImageClassification]],
|
|
@@ -13277,6 +13382,7 @@ const MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = new Map([
|
|
|
13277
13382
|
]);
|
|
13278
13383
|
|
|
13279
13384
|
const MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = new Map([
|
|
13385
|
+
// TODO: Do not add new models here
|
|
13280
13386
|
['detr', ['DetrForSegmentation', DetrForSegmentation]],
|
|
13281
13387
|
['clipseg', ['CLIPSegForImageSegmentation', CLIPSegForImageSegmentation]],
|
|
13282
13388
|
]);
|
|
@@ -13286,6 +13392,11 @@ const MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = new Map([
|
|
|
13286
13392
|
['sapiens', ['SapiensForSemanticSegmentation', SapiensForSemanticSegmentation]],
|
|
13287
13393
|
]);
|
|
13288
13394
|
|
|
13395
|
+
const MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = new Map([
|
|
13396
|
+
['detr', ['DetrForSegmentation', DetrForSegmentation]],
|
|
13397
|
+
['maskformer', ['MaskFormerForInstanceSegmentation', MaskFormerForInstanceSegmentation]],
|
|
13398
|
+
]);
|
|
13399
|
+
|
|
13289
13400
|
const MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = new Map([
|
|
13290
13401
|
['sam', ['SamModel', SamModel]],
|
|
13291
13402
|
]);
|
|
@@ -13361,6 +13472,7 @@ const MODEL_CLASS_TYPE_MAPPING = [
|
|
|
13361
13472
|
[MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES, MODEL_TYPES.ImageTextToText],
|
|
13362
13473
|
[MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],
|
|
13363
13474
|
[MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],
|
|
13475
|
+
[MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],
|
|
13364
13476
|
[MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],
|
|
13365
13477
|
[MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],
|
|
13366
13478
|
[MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES, MODEL_TYPES.EncoderOnly],
|
|
@@ -13563,6 +13675,17 @@ class AutoModelForSemanticSegmentation extends PretrainedMixin {
|
|
|
13563
13675
|
static MODEL_CLASS_MAPPINGS = [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES];
|
|
13564
13676
|
}
|
|
13565
13677
|
|
|
13678
|
+
/**
|
|
13679
|
+
* Helper class which is used to instantiate pretrained universal image segmentation models with the `from_pretrained` function.
|
|
13680
|
+
* The chosen model class is determined by the type specified in the model config.
|
|
13681
|
+
*
|
|
13682
|
+
* @example
|
|
13683
|
+
* let model = await AutoModelForUniversalSegmentation.from_pretrained('hf-internal-testing/tiny-random-MaskFormerForInstanceSegmentation');
|
|
13684
|
+
*/
|
|
13685
|
+
class AutoModelForUniversalSegmentation extends PretrainedMixin {
|
|
13686
|
+
static MODEL_CLASS_MAPPINGS = [MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES];
|
|
13687
|
+
}
|
|
13688
|
+
|
|
13566
13689
|
/**
|
|
13567
13690
|
* Helper class which is used to instantiate pretrained object detection models with the `from_pretrained` function.
|
|
13568
13691
|
* The chosen model class is determined by the type specified in the model config.
|
|
@@ -17222,7 +17345,7 @@ const SUPPORTED_TASKS = Object.freeze({
|
|
|
17222
17345
|
"image-segmentation": {
|
|
17223
17346
|
// no tokenizer
|
|
17224
17347
|
"pipeline": ImageSegmentationPipeline,
|
|
17225
|
-
"model": [_models_js__WEBPACK_IMPORTED_MODULE_1__.AutoModelForImageSegmentation, _models_js__WEBPACK_IMPORTED_MODULE_1__.AutoModelForSemanticSegmentation],
|
|
17348
|
+
"model": [_models_js__WEBPACK_IMPORTED_MODULE_1__.AutoModelForImageSegmentation, _models_js__WEBPACK_IMPORTED_MODULE_1__.AutoModelForSemanticSegmentation, _models_js__WEBPACK_IMPORTED_MODULE_1__.AutoModelForUniversalSegmentation],
|
|
17226
17349
|
"processor": _processors_js__WEBPACK_IMPORTED_MODULE_2__.AutoProcessor,
|
|
17227
17350
|
"default": {
|
|
17228
17351
|
// TODO: replace with original
|
|
@@ -17464,7 +17587,7 @@ async function loadItems(mapping, model, pretrainedOptions) {
|
|
|
17464
17587
|
|
|
17465
17588
|
/**@type {Promise[]} */
|
|
17466
17589
|
const promises = [];
|
|
17467
|
-
for (
|
|
17590
|
+
for (const [name, cls] of mapping.entries()) {
|
|
17468
17591
|
if (!cls) continue;
|
|
17469
17592
|
|
|
17470
17593
|
/**@type {Promise} */
|
|
@@ -17472,7 +17595,7 @@ async function loadItems(mapping, model, pretrainedOptions) {
|
|
|
17472
17595
|
if (Array.isArray(cls)) {
|
|
17473
17596
|
promise = new Promise(async (resolve, reject) => {
|
|
17474
17597
|
let e;
|
|
17475
|
-
for (
|
|
17598
|
+
for (const c of cls) {
|
|
17476
17599
|
if (c === null) {
|
|
17477
17600
|
// If null, we resolve it immediately, meaning the relevant
|
|
17478
17601
|
// class was not found, but it is optional.
|
|
@@ -17510,7 +17633,7 @@ async function loadItems(mapping, model, pretrainedOptions) {
|
|
|
17510
17633
|
await Promise.all(promises);
|
|
17511
17634
|
|
|
17512
17635
|
// Then assign to result
|
|
17513
|
-
for (
|
|
17636
|
+
for (const [name, promise] of Object.entries(result)) {
|
|
17514
17637
|
result[name] = await promise;
|
|
17515
17638
|
}
|
|
17516
17639
|
|
|
@@ -17547,6 +17670,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
17547
17670
|
/* harmony export */ Florence2Processor: () => (/* binding */ Florence2Processor),
|
|
17548
17671
|
/* harmony export */ GLPNFeatureExtractor: () => (/* binding */ GLPNFeatureExtractor),
|
|
17549
17672
|
/* harmony export */ ImageFeatureExtractor: () => (/* binding */ ImageFeatureExtractor),
|
|
17673
|
+
/* harmony export */ MaskFormerFeatureExtractor: () => (/* binding */ MaskFormerFeatureExtractor),
|
|
17550
17674
|
/* harmony export */ MobileNetV1FeatureExtractor: () => (/* binding */ MobileNetV1FeatureExtractor),
|
|
17551
17675
|
/* harmony export */ MobileNetV2FeatureExtractor: () => (/* binding */ MobileNetV2FeatureExtractor),
|
|
17552
17676
|
/* harmony export */ MobileNetV3FeatureExtractor: () => (/* binding */ MobileNetV3FeatureExtractor),
|
|
@@ -17558,6 +17682,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
17558
17682
|
/* harmony export */ OwlViTProcessor: () => (/* binding */ OwlViTProcessor),
|
|
17559
17683
|
/* harmony export */ Owlv2ImageProcessor: () => (/* binding */ Owlv2ImageProcessor),
|
|
17560
17684
|
/* harmony export */ Processor: () => (/* binding */ Processor),
|
|
17685
|
+
/* harmony export */ PvtImageProcessor: () => (/* binding */ PvtImageProcessor),
|
|
17561
17686
|
/* harmony export */ PyAnnoteFeatureExtractor: () => (/* binding */ PyAnnoteFeatureExtractor),
|
|
17562
17687
|
/* harmony export */ PyAnnoteProcessor: () => (/* binding */ PyAnnoteProcessor),
|
|
17563
17688
|
/* harmony export */ RTDetrImageProcessor: () => (/* binding */ RTDetrImageProcessor),
|
|
@@ -17646,7 +17771,7 @@ function center_to_corners_format([centerX, centerY, width, height]) {
|
|
|
17646
17771
|
* @param {Tensor} outputs.logits The logits
|
|
17647
17772
|
* @param {Tensor} outputs.pred_boxes The predicted boxes.
|
|
17648
17773
|
* @param {number} [threshold=0.5] The threshold to use for the scores.
|
|
17649
|
-
* @param {number
|
|
17774
|
+
* @param {[number, number][]} [target_sizes=null] The sizes of the original images.
|
|
17650
17775
|
* @param {boolean} [is_zero_shot=false] Whether zero-shot object detection was performed.
|
|
17651
17776
|
* @return {Object[]} An array of objects containing the post-processed outputs.
|
|
17652
17777
|
* @private
|
|
@@ -17727,7 +17852,7 @@ function post_process_object_detection(outputs, threshold = 0.5, target_sizes =
|
|
|
17727
17852
|
/**
|
|
17728
17853
|
* Post-processes the outputs of the model (for semantic segmentation).
|
|
17729
17854
|
* @param {*} outputs Raw outputs of the model.
|
|
17730
|
-
* @param {number
|
|
17855
|
+
* @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size
|
|
17731
17856
|
* (height, width) of each prediction. If unset, predictions will not be resized.
|
|
17732
17857
|
* @returns {{segmentation: Tensor; labels: number[]}[]} The semantic segmentation maps.
|
|
17733
17858
|
*/
|
|
@@ -17787,6 +17912,300 @@ function post_process_semantic_segmentation(outputs, target_sizes = null) {
|
|
|
17787
17912
|
return toReturn;
|
|
17788
17913
|
}
|
|
17789
17914
|
|
|
17915
|
+
|
|
17916
|
+
/**
|
|
17917
|
+
* Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`.
|
|
17918
|
+
* @param {Tensor} class_logits The class logits.
|
|
17919
|
+
* @param {Tensor} mask_logits The mask logits.
|
|
17920
|
+
* @param {number} object_mask_threshold A number between 0 and 1 used to binarize the masks.
|
|
17921
|
+
* @param {number} num_labels The number of labels.
|
|
17922
|
+
* @returns {[Tensor[], number[], number[]]} The binarized masks, the scores, and the labels.
|
|
17923
|
+
* @private
|
|
17924
|
+
*/
|
|
17925
|
+
function remove_low_and_no_objects(class_logits, mask_logits, object_mask_threshold, num_labels) {
|
|
17926
|
+
|
|
17927
|
+
const mask_probs_item = [];
|
|
17928
|
+
const pred_scores_item = [];
|
|
17929
|
+
const pred_labels_item = [];
|
|
17930
|
+
|
|
17931
|
+
for (let j = 0; j < class_logits.dims[0]; ++j) {
|
|
17932
|
+
const cls = class_logits[j];
|
|
17933
|
+
const mask = mask_logits[j];
|
|
17934
|
+
|
|
17935
|
+
const pred_label = (0,_utils_maths_js__WEBPACK_IMPORTED_MODULE_3__.max)(cls.data)[1];
|
|
17936
|
+
if (pred_label === num_labels) {
|
|
17937
|
+
// Is the background, so we ignore it
|
|
17938
|
+
continue;
|
|
17939
|
+
}
|
|
17940
|
+
|
|
17941
|
+
const scores = (0,_utils_maths_js__WEBPACK_IMPORTED_MODULE_3__.softmax)(cls.data);
|
|
17942
|
+
const pred_score = scores[pred_label];
|
|
17943
|
+
if (pred_score > object_mask_threshold) {
|
|
17944
|
+
mask_probs_item.push(mask);
|
|
17945
|
+
pred_scores_item.push(pred_score);
|
|
17946
|
+
pred_labels_item.push(pred_label);
|
|
17947
|
+
}
|
|
17948
|
+
}
|
|
17949
|
+
|
|
17950
|
+
return [mask_probs_item, pred_scores_item, pred_labels_item];
|
|
17951
|
+
}
|
|
17952
|
+
|
|
17953
|
+
/**
|
|
17954
|
+
* Checks whether the segment is valid or not.
|
|
17955
|
+
* @param {Int32Array} mask_labels Labels for each pixel in the mask.
|
|
17956
|
+
* @param {Tensor[]} mask_probs Probabilities for each pixel in the masks.
|
|
17957
|
+
* @param {number} k The class id of the segment.
|
|
17958
|
+
* @param {number} mask_threshold The mask threshold.
|
|
17959
|
+
* @param {number} overlap_mask_area_threshold The overlap mask area threshold.
|
|
17960
|
+
* @returns {[boolean, number[]]} Whether the segment is valid or not, and the indices of the valid labels.
|
|
17961
|
+
* @private
|
|
17962
|
+
*/
|
|
17963
|
+
function check_segment_validity(
|
|
17964
|
+
mask_labels,
|
|
17965
|
+
mask_probs,
|
|
17966
|
+
k,
|
|
17967
|
+
mask_threshold = 0.5,
|
|
17968
|
+
overlap_mask_area_threshold = 0.8
|
|
17969
|
+
) {
|
|
17970
|
+
// mask_k is a 1D array of indices, indicating where the mask is equal to k
|
|
17971
|
+
const mask_k = [];
|
|
17972
|
+
let mask_k_area = 0;
|
|
17973
|
+
let original_area = 0;
|
|
17974
|
+
|
|
17975
|
+
const mask_probs_k_data = mask_probs[k].data;
|
|
17976
|
+
|
|
17977
|
+
// Compute the area of all the stuff in query k
|
|
17978
|
+
for (let i = 0; i < mask_labels.length; ++i) {
|
|
17979
|
+
if (mask_labels[i] === k) {
|
|
17980
|
+
mask_k.push(i);
|
|
17981
|
+
++mask_k_area;
|
|
17982
|
+
}
|
|
17983
|
+
|
|
17984
|
+
if (mask_probs_k_data[i] >= mask_threshold) {
|
|
17985
|
+
++original_area;
|
|
17986
|
+
}
|
|
17987
|
+
}
|
|
17988
|
+
let mask_exists = mask_k_area > 0 && original_area > 0;
|
|
17989
|
+
|
|
17990
|
+
// Eliminate disconnected tiny segments
|
|
17991
|
+
if (mask_exists) {
|
|
17992
|
+
// Perform additional check
|
|
17993
|
+
let area_ratio = mask_k_area / original_area;
|
|
17994
|
+
mask_exists = area_ratio > overlap_mask_area_threshold;
|
|
17995
|
+
}
|
|
17996
|
+
|
|
17997
|
+
return [mask_exists, mask_k]
|
|
17998
|
+
}
|
|
17999
|
+
|
|
18000
|
+
/**
|
|
18001
|
+
* Computes the segments.
|
|
18002
|
+
* @param {Tensor[]} mask_probs The mask probabilities.
|
|
18003
|
+
* @param {number[]} pred_scores The predicted scores.
|
|
18004
|
+
* @param {number[]} pred_labels The predicted labels.
|
|
18005
|
+
* @param {number} mask_threshold The mask threshold.
|
|
18006
|
+
* @param {number} overlap_mask_area_threshold The overlap mask area threshold.
|
|
18007
|
+
* @param {Set<number>} label_ids_to_fuse The label ids to fuse.
|
|
18008
|
+
* @param {number[]} target_size The target size of the image.
|
|
18009
|
+
* @returns {[Tensor, Array<{id: number, label_id: number, score: number}>]} The computed segments.
|
|
18010
|
+
* @private
|
|
18011
|
+
*/
|
|
18012
|
+
function compute_segments(
|
|
18013
|
+
mask_probs,
|
|
18014
|
+
pred_scores,
|
|
18015
|
+
pred_labels,
|
|
18016
|
+
mask_threshold,
|
|
18017
|
+
overlap_mask_area_threshold,
|
|
18018
|
+
label_ids_to_fuse = null,
|
|
18019
|
+
target_size = null,
|
|
18020
|
+
) {
|
|
18021
|
+
const [height, width] = target_size ?? mask_probs[0].dims;
|
|
18022
|
+
|
|
18023
|
+
const segmentation = new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.Tensor(
|
|
18024
|
+
'int32',
|
|
18025
|
+
new Int32Array(height * width),
|
|
18026
|
+
[height, width]
|
|
18027
|
+
);
|
|
18028
|
+
const segments = [];
|
|
18029
|
+
|
|
18030
|
+
// 1. If target_size is not null, we need to resize the masks to the target size
|
|
18031
|
+
if (target_size !== null) {
|
|
18032
|
+
// resize the masks to the target size
|
|
18033
|
+
for (let i = 0; i < mask_probs.length; ++i) {
|
|
18034
|
+
mask_probs[i] = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.interpolate)(mask_probs[i], target_size, 'bilinear', false);
|
|
18035
|
+
}
|
|
18036
|
+
}
|
|
18037
|
+
|
|
18038
|
+
// 2. Weigh each mask by its prediction score
|
|
18039
|
+
// NOTE: `mask_probs` is updated in-place
|
|
18040
|
+
//
|
|
18041
|
+
// Temporary storage for the best label/scores for each pixel ([height, width]):
|
|
18042
|
+
const mask_labels = new Int32Array(mask_probs[0].data.length);
|
|
18043
|
+
const bestScores = new Float32Array(mask_probs[0].data.length);
|
|
18044
|
+
|
|
18045
|
+
for (let i = 0; i < mask_probs.length; ++i) {
|
|
18046
|
+
let score = pred_scores[i];
|
|
18047
|
+
|
|
18048
|
+
const mask_probs_i_data = mask_probs[i].data;
|
|
18049
|
+
|
|
18050
|
+
for (let j = 0; j < mask_probs_i_data.length; ++j) {
|
|
18051
|
+
mask_probs_i_data[j] *= score
|
|
18052
|
+
if (mask_probs_i_data[j] > bestScores[j]) {
|
|
18053
|
+
mask_labels[j] = i;
|
|
18054
|
+
bestScores[j] = mask_probs_i_data[j];
|
|
18055
|
+
}
|
|
18056
|
+
}
|
|
18057
|
+
}
|
|
18058
|
+
|
|
18059
|
+
let current_segment_id = 0;
|
|
18060
|
+
|
|
18061
|
+
// let stuff_memory_list = {}
|
|
18062
|
+
const segmentation_data = segmentation.data;
|
|
18063
|
+
for (let k = 0; k < pred_labels.length; ++k) {
|
|
18064
|
+
const pred_class = pred_labels[k];
|
|
18065
|
+
|
|
18066
|
+
// TODO add `should_fuse`
|
|
18067
|
+
// let should_fuse = pred_class in label_ids_to_fuse
|
|
18068
|
+
|
|
18069
|
+
// Check if mask exists and large enough to be a segment
|
|
18070
|
+
const [mask_exists, mask_k] = check_segment_validity(
|
|
18071
|
+
mask_labels,
|
|
18072
|
+
mask_probs,
|
|
18073
|
+
k,
|
|
18074
|
+
mask_threshold,
|
|
18075
|
+
overlap_mask_area_threshold
|
|
18076
|
+
)
|
|
18077
|
+
|
|
18078
|
+
if (!mask_exists) {
|
|
18079
|
+
// Nothing to see here
|
|
18080
|
+
continue;
|
|
18081
|
+
}
|
|
18082
|
+
|
|
18083
|
+
// TODO
|
|
18084
|
+
// if (pred_class in stuff_memory_list) {
|
|
18085
|
+
// current_segment_id = stuff_memory_list[pred_class]
|
|
18086
|
+
// } else {
|
|
18087
|
+
// current_segment_id += 1;
|
|
18088
|
+
// }
|
|
18089
|
+
++current_segment_id;
|
|
18090
|
+
|
|
18091
|
+
|
|
18092
|
+
// Add current object segment to final segmentation map
|
|
18093
|
+
for (const index of mask_k) {
|
|
18094
|
+
segmentation_data[index] = current_segment_id;
|
|
18095
|
+
}
|
|
18096
|
+
|
|
18097
|
+
segments.push({
|
|
18098
|
+
id: current_segment_id,
|
|
18099
|
+
label_id: pred_class,
|
|
18100
|
+
// was_fused: should_fuse, TODO
|
|
18101
|
+
score: pred_scores[k],
|
|
18102
|
+
})
|
|
18103
|
+
|
|
18104
|
+
// TODO
|
|
18105
|
+
// if(should_fuse){
|
|
18106
|
+
// stuff_memory_list[pred_class] = current_segment_id
|
|
18107
|
+
// }
|
|
18108
|
+
}
|
|
18109
|
+
|
|
18110
|
+
return [segmentation, segments];
|
|
18111
|
+
}
|
|
18112
|
+
|
|
18113
|
+
|
|
18114
|
+
/**
|
|
18115
|
+
* Post-process the model output to generate the final panoptic segmentation.
|
|
18116
|
+
* @param {*} outputs The model output to post process
|
|
18117
|
+
* @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks.
|
|
18118
|
+
* @param {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values.
|
|
18119
|
+
* @param {number} [overlap_mask_area_threshold=0.8] The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask.
|
|
18120
|
+
* @param {Set<number>} [label_ids_to_fuse=null] The labels in this state will have all their instances be fused together.
|
|
18121
|
+
* @param {[number, number][]} [target_sizes=null] The target sizes to resize the masks to.
|
|
18122
|
+
* @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>}
|
|
18123
|
+
*/
|
|
18124
|
+
function post_process_panoptic_segmentation(
|
|
18125
|
+
outputs,
|
|
18126
|
+
threshold = 0.5,
|
|
18127
|
+
mask_threshold = 0.5,
|
|
18128
|
+
overlap_mask_area_threshold = 0.8,
|
|
18129
|
+
label_ids_to_fuse = null,
|
|
18130
|
+
target_sizes = null,
|
|
18131
|
+
) {
|
|
18132
|
+
if (label_ids_to_fuse === null) {
|
|
18133
|
+
console.warn("`label_ids_to_fuse` unset. No instance will be fused.")
|
|
18134
|
+
label_ids_to_fuse = new Set();
|
|
18135
|
+
}
|
|
18136
|
+
|
|
18137
|
+
const class_queries_logits = outputs.class_queries_logits ?? outputs.logits; // [batch_size, num_queries, num_classes+1]
|
|
18138
|
+
const masks_queries_logits = outputs.masks_queries_logits ?? outputs.pred_masks; // [batch_size, num_queries, height, width]
|
|
18139
|
+
|
|
18140
|
+
const mask_probs = masks_queries_logits.sigmoid() // [batch_size, num_queries, height, width]
|
|
18141
|
+
|
|
18142
|
+
let [batch_size, num_queries, num_labels] = class_queries_logits.dims;
|
|
18143
|
+
num_labels -= 1; // Remove last class (background)
|
|
18144
|
+
|
|
18145
|
+
if (target_sizes !== null && target_sizes.length !== batch_size) {
|
|
18146
|
+
throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits")
|
|
18147
|
+
}
|
|
18148
|
+
|
|
18149
|
+
let toReturn = [];
|
|
18150
|
+
for (let i = 0; i < batch_size; ++i) {
|
|
18151
|
+
let target_size = target_sizes !== null ? target_sizes[i] : null;
|
|
18152
|
+
|
|
18153
|
+
let class_logits = class_queries_logits[i];
|
|
18154
|
+
let mask_logits = mask_probs[i];
|
|
18155
|
+
|
|
18156
|
+
let [mask_probs_item, pred_scores_item, pred_labels_item] = remove_low_and_no_objects(class_logits, mask_logits, threshold, num_labels);
|
|
18157
|
+
|
|
18158
|
+
if (pred_labels_item.length === 0) {
|
|
18159
|
+
// No mask found
|
|
18160
|
+
let [height, width] = target_size ?? mask_logits.dims.slice(-2);
|
|
18161
|
+
|
|
18162
|
+
let segmentation = new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.Tensor(
|
|
18163
|
+
'int32',
|
|
18164
|
+
new Int32Array(height * width).fill(-1),
|
|
18165
|
+
[height, width]
|
|
18166
|
+
)
|
|
18167
|
+
toReturn.push({
|
|
18168
|
+
segmentation: segmentation,
|
|
18169
|
+
segments_info: []
|
|
18170
|
+
});
|
|
18171
|
+
continue;
|
|
18172
|
+
}
|
|
18173
|
+
|
|
18174
|
+
|
|
18175
|
+
// Get segmentation map and segment information of batch item
|
|
18176
|
+
let [segmentation, segments] = compute_segments(
|
|
18177
|
+
mask_probs_item,
|
|
18178
|
+
pred_scores_item,
|
|
18179
|
+
pred_labels_item,
|
|
18180
|
+
mask_threshold,
|
|
18181
|
+
overlap_mask_area_threshold,
|
|
18182
|
+
label_ids_to_fuse,
|
|
18183
|
+
target_size,
|
|
18184
|
+
)
|
|
18185
|
+
|
|
18186
|
+
toReturn.push({
|
|
18187
|
+
segmentation: segmentation,
|
|
18188
|
+
segments_info: segments
|
|
18189
|
+
})
|
|
18190
|
+
}
|
|
18191
|
+
|
|
18192
|
+
return toReturn;
|
|
18193
|
+
}
|
|
18194
|
+
|
|
18195
|
+
|
|
18196
|
+
/**
|
|
18197
|
+
* Post-processes the outputs of the model (for instance segmentation).
|
|
18198
|
+
* @param {*} outputs Raw outputs of the model.
|
|
18199
|
+
* @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks.
|
|
18200
|
+
* @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size
|
|
18201
|
+
* (height, width) of each prediction. If unset, predictions will not be resized.
|
|
18202
|
+
* @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>}
|
|
18203
|
+
*/
|
|
18204
|
+
function post_process_instance_segmentation(outputs, threshold = 0.5, target_sizes = null) {
|
|
18205
|
+
throw new Error('Not implemented yet');
|
|
18206
|
+
return [];
|
|
18207
|
+
}
|
|
18208
|
+
|
|
17790
18209
|
/**
|
|
17791
18210
|
* Named tuple to indicate the order we are using is (height x width), even though
|
|
17792
18211
|
* the Graphics’ industry standard is (width x height).
|
|
@@ -18375,6 +18794,7 @@ class SegformerFeatureExtractor extends ImageFeatureExtractor {
|
|
|
18375
18794
|
return post_process_semantic_segmentation(...args);
|
|
18376
18795
|
}
|
|
18377
18796
|
}
|
|
18797
|
+
class PvtImageProcessor extends ImageFeatureExtractor { }
|
|
18378
18798
|
class DPTFeatureExtractor extends ImageFeatureExtractor { }
|
|
18379
18799
|
class DPTImageProcessor extends DPTFeatureExtractor { } // NOTE: extends DPTFeatureExtractor
|
|
18380
18800
|
class BitImageProcessor extends ImageFeatureExtractor { }
|
|
@@ -18514,302 +18934,32 @@ class DetrFeatureExtractor extends ImageFeatureExtractor {
|
|
|
18514
18934
|
// TODO support different mask sizes (not just 64x64)
|
|
18515
18935
|
// Currently, just fill pixel mask with 1s
|
|
18516
18936
|
const maskSize = [result.pixel_values.dims[0], 64, 64];
|
|
18517
|
-
const pixel_mask =
|
|
18518
|
-
'int64',
|
|
18519
|
-
new BigInt64Array(maskSize.reduce((a, b) => a * b)).fill(1n),
|
|
18520
|
-
maskSize
|
|
18521
|
-
);
|
|
18937
|
+
const pixel_mask = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.full)(maskSize, 1n);
|
|
18522
18938
|
|
|
18523
18939
|
return { ...result, pixel_mask };
|
|
18524
18940
|
}
|
|
18525
18941
|
|
|
18526
|
-
/**
|
|
18527
|
-
* Post-processes the outputs of the model (for object detection).
|
|
18528
|
-
* @param {Object} outputs The outputs of the model that must be post-processed
|
|
18529
|
-
* @param {Tensor} outputs.logits The logits
|
|
18530
|
-
* @param {Tensor} outputs.pred_boxes The predicted boxes.
|
|
18531
|
-
* @return {Object[]} An array of objects containing the post-processed outputs.
|
|
18532
|
-
*/
|
|
18533
|
-
|
|
18534
18942
|
/** @type {typeof post_process_object_detection} */
|
|
18535
18943
|
post_process_object_detection(...args) {
|
|
18536
18944
|
return post_process_object_detection(...args);
|
|
18537
18945
|
}
|
|
18538
18946
|
|
|
18539
|
-
/**
|
|
18540
|
-
|
|
18541
|
-
|
|
18542
|
-
* @param {Tensor} mask_logits The mask logits.
|
|
18543
|
-
* @param {number} object_mask_threshold A number between 0 and 1 used to binarize the masks.
|
|
18544
|
-
* @param {number} num_labels The number of labels.
|
|
18545
|
-
* @returns {[Tensor[], number[], number[]]} The binarized masks, the scores, and the labels.
|
|
18546
|
-
*/
|
|
18547
|
-
remove_low_and_no_objects(class_logits, mask_logits, object_mask_threshold, num_labels) {
|
|
18548
|
-
|
|
18549
|
-
let mask_probs_item = [];
|
|
18550
|
-
let pred_scores_item = [];
|
|
18551
|
-
let pred_labels_item = [];
|
|
18552
|
-
|
|
18553
|
-
for (let j = 0; j < class_logits.dims[0]; ++j) {
|
|
18554
|
-
let cls = class_logits[j];
|
|
18555
|
-
let mask = mask_logits[j];
|
|
18556
|
-
|
|
18557
|
-
let pred_label = (0,_utils_maths_js__WEBPACK_IMPORTED_MODULE_3__.max)(cls.data)[1];
|
|
18558
|
-
if (pred_label === num_labels) {
|
|
18559
|
-
// Is the background, so we ignore it
|
|
18560
|
-
continue;
|
|
18561
|
-
}
|
|
18562
|
-
|
|
18563
|
-
let scores = (0,_utils_maths_js__WEBPACK_IMPORTED_MODULE_3__.softmax)(cls.data);
|
|
18564
|
-
let pred_score = scores[pred_label];
|
|
18565
|
-
if (pred_score > object_mask_threshold) {
|
|
18566
|
-
mask_probs_item.push(mask);
|
|
18567
|
-
pred_scores_item.push(pred_score);
|
|
18568
|
-
pred_labels_item.push(pred_label);
|
|
18569
|
-
}
|
|
18570
|
-
}
|
|
18571
|
-
|
|
18572
|
-
return [mask_probs_item, pred_scores_item, pred_labels_item];
|
|
18573
|
-
|
|
18574
|
-
}
|
|
18575
|
-
|
|
18576
|
-
/**
|
|
18577
|
-
* Checks whether the segment is valid or not.
|
|
18578
|
-
* @param {Int32Array} mask_labels Labels for each pixel in the mask.
|
|
18579
|
-
* @param {Tensor[]} mask_probs Probabilities for each pixel in the masks.
|
|
18580
|
-
* @param {number} k The class id of the segment.
|
|
18581
|
-
* @param {number} mask_threshold The mask threshold.
|
|
18582
|
-
* @param {number} overlap_mask_area_threshold The overlap mask area threshold.
|
|
18583
|
-
* @returns {[boolean, number[]]} Whether the segment is valid or not, and the indices of the valid labels.
|
|
18584
|
-
*/
|
|
18585
|
-
check_segment_validity(
|
|
18586
|
-
mask_labels,
|
|
18587
|
-
mask_probs,
|
|
18588
|
-
k,
|
|
18589
|
-
mask_threshold = 0.5,
|
|
18590
|
-
overlap_mask_area_threshold = 0.8
|
|
18591
|
-
) {
|
|
18592
|
-
// mask_k is a 1D array of indices, indicating where the mask is equal to k
|
|
18593
|
-
let mask_k = [];
|
|
18594
|
-
let mask_k_area = 0;
|
|
18595
|
-
let original_area = 0;
|
|
18596
|
-
|
|
18597
|
-
const mask_probs_k_data = mask_probs[k].data;
|
|
18598
|
-
|
|
18599
|
-
// Compute the area of all the stuff in query k
|
|
18600
|
-
for (let i = 0; i < mask_labels.length; ++i) {
|
|
18601
|
-
if (mask_labels[i] === k) {
|
|
18602
|
-
mask_k.push(i);
|
|
18603
|
-
++mask_k_area;
|
|
18604
|
-
}
|
|
18605
|
-
|
|
18606
|
-
if (mask_probs_k_data[i] >= mask_threshold) {
|
|
18607
|
-
++original_area;
|
|
18608
|
-
}
|
|
18609
|
-
}
|
|
18610
|
-
let mask_exists = mask_k_area > 0 && original_area > 0;
|
|
18611
|
-
|
|
18612
|
-
// Eliminate disconnected tiny segments
|
|
18613
|
-
if (mask_exists) {
|
|
18614
|
-
// Perform additional check
|
|
18615
|
-
let area_ratio = mask_k_area / original_area;
|
|
18616
|
-
mask_exists = area_ratio > overlap_mask_area_threshold;
|
|
18617
|
-
}
|
|
18618
|
-
|
|
18619
|
-
return [mask_exists, mask_k]
|
|
18947
|
+
/** @type {typeof post_process_panoptic_segmentation} */
|
|
18948
|
+
post_process_panoptic_segmentation(...args) {
|
|
18949
|
+
return post_process_panoptic_segmentation(...args);
|
|
18620
18950
|
}
|
|
18621
18951
|
|
|
18622
|
-
|
|
18623
|
-
|
|
18624
|
-
|
|
18625
|
-
* @param {number[]} pred_scores The predicted scores.
|
|
18626
|
-
* @param {number[]} pred_labels The predicted labels.
|
|
18627
|
-
* @param {number} mask_threshold The mask threshold.
|
|
18628
|
-
* @param {number} overlap_mask_area_threshold The overlap mask area threshold.
|
|
18629
|
-
* @param {Set<number>} label_ids_to_fuse The label ids to fuse.
|
|
18630
|
-
* @param {number[]} target_size The target size of the image.
|
|
18631
|
-
* @returns {[Tensor, Array<{id: number, label_id: number, score: number}>]} The computed segments.
|
|
18632
|
-
*/
|
|
18633
|
-
compute_segments(
|
|
18634
|
-
mask_probs,
|
|
18635
|
-
pred_scores,
|
|
18636
|
-
pred_labels,
|
|
18637
|
-
mask_threshold,
|
|
18638
|
-
overlap_mask_area_threshold,
|
|
18639
|
-
label_ids_to_fuse = null,
|
|
18640
|
-
target_size = null,
|
|
18641
|
-
) {
|
|
18642
|
-
let [height, width] = target_size ?? mask_probs[0].dims;
|
|
18643
|
-
|
|
18644
|
-
let segmentation = new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.Tensor(
|
|
18645
|
-
'int32',
|
|
18646
|
-
new Int32Array(height * width),
|
|
18647
|
-
[height, width]
|
|
18648
|
-
);
|
|
18649
|
-
let segments = [];
|
|
18650
|
-
|
|
18651
|
-
// 1. If target_size is not null, we need to resize the masks to the target size
|
|
18652
|
-
if (target_size !== null) {
|
|
18653
|
-
// resize the masks to the target size
|
|
18654
|
-
for (let i = 0; i < mask_probs.length; ++i) {
|
|
18655
|
-
mask_probs[i] = (0,_utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.interpolate)(mask_probs[i], target_size, 'bilinear', false);
|
|
18656
|
-
}
|
|
18657
|
-
}
|
|
18658
|
-
|
|
18659
|
-
// 2. Weigh each mask by its prediction score
|
|
18660
|
-
// NOTE: `mask_probs` is updated in-place
|
|
18661
|
-
//
|
|
18662
|
-
// Temporary storage for the best label/scores for each pixel ([height, width]):
|
|
18663
|
-
let mask_labels = new Int32Array(mask_probs[0].data.length);
|
|
18664
|
-
let bestScores = new Float32Array(mask_probs[0].data.length);
|
|
18665
|
-
|
|
18666
|
-
for (let i = 0; i < mask_probs.length; ++i) {
|
|
18667
|
-
let score = pred_scores[i];
|
|
18668
|
-
|
|
18669
|
-
const mask_probs_i_data = mask_probs[i].data;
|
|
18670
|
-
|
|
18671
|
-
for (let j = 0; j < mask_probs_i_data.length; ++j) {
|
|
18672
|
-
mask_probs_i_data[j] *= score
|
|
18673
|
-
if (mask_probs_i_data[j] > bestScores[j]) {
|
|
18674
|
-
mask_labels[j] = i;
|
|
18675
|
-
bestScores[j] = mask_probs_i_data[j];
|
|
18676
|
-
}
|
|
18677
|
-
}
|
|
18678
|
-
}
|
|
18679
|
-
|
|
18680
|
-
let current_segment_id = 0;
|
|
18681
|
-
|
|
18682
|
-
// let stuff_memory_list = {}
|
|
18683
|
-
const segmentation_data = segmentation.data;
|
|
18684
|
-
for (let k = 0; k < pred_labels.length; ++k) {
|
|
18685
|
-
let pred_class = pred_labels[k];
|
|
18686
|
-
|
|
18687
|
-
// TODO add `should_fuse`
|
|
18688
|
-
// let should_fuse = pred_class in label_ids_to_fuse
|
|
18689
|
-
|
|
18690
|
-
// Check if mask exists and large enough to be a segment
|
|
18691
|
-
let [mask_exists, mask_k] = this.check_segment_validity(
|
|
18692
|
-
mask_labels,
|
|
18693
|
-
mask_probs,
|
|
18694
|
-
k,
|
|
18695
|
-
mask_threshold,
|
|
18696
|
-
overlap_mask_area_threshold
|
|
18697
|
-
)
|
|
18698
|
-
|
|
18699
|
-
if (!mask_exists) {
|
|
18700
|
-
// Nothing to see here
|
|
18701
|
-
continue;
|
|
18702
|
-
}
|
|
18703
|
-
|
|
18704
|
-
// TODO
|
|
18705
|
-
// if (pred_class in stuff_memory_list) {
|
|
18706
|
-
// current_segment_id = stuff_memory_list[pred_class]
|
|
18707
|
-
// } else {
|
|
18708
|
-
// current_segment_id += 1;
|
|
18709
|
-
// }
|
|
18710
|
-
++current_segment_id;
|
|
18711
|
-
|
|
18712
|
-
|
|
18713
|
-
// Add current object segment to final segmentation map
|
|
18714
|
-
for (let index of mask_k) {
|
|
18715
|
-
segmentation_data[index] = current_segment_id;
|
|
18716
|
-
}
|
|
18717
|
-
|
|
18718
|
-
segments.push({
|
|
18719
|
-
id: current_segment_id,
|
|
18720
|
-
label_id: pred_class,
|
|
18721
|
-
// was_fused: should_fuse, TODO
|
|
18722
|
-
score: pred_scores[k],
|
|
18723
|
-
})
|
|
18724
|
-
|
|
18725
|
-
// TODO
|
|
18726
|
-
// if(should_fuse){
|
|
18727
|
-
// stuff_memory_list[pred_class] = current_segment_id
|
|
18728
|
-
// }
|
|
18729
|
-
}
|
|
18730
|
-
|
|
18731
|
-
return [segmentation, segments];
|
|
18952
|
+
post_process_instance_segmentation() {
|
|
18953
|
+
// TODO
|
|
18954
|
+
throw Error("Not implemented yet");
|
|
18732
18955
|
}
|
|
18956
|
+
}
|
|
18733
18957
|
|
|
18734
|
-
|
|
18735
|
-
* Post-process the model output to generate the final panoptic segmentation.
|
|
18736
|
-
* @param {*} outputs The model output to post process
|
|
18737
|
-
* @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks.
|
|
18738
|
-
* @param {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values.
|
|
18739
|
-
* @param {number} [overlap_mask_area_threshold=0.8] The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask.
|
|
18740
|
-
* @param {Set<number>} [label_ids_to_fuse=null] The labels in this state will have all their instances be fused together.
|
|
18741
|
-
* @param {number[][]} [target_sizes=null] The target sizes to resize the masks to.
|
|
18742
|
-
* @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>}
|
|
18743
|
-
*/
|
|
18744
|
-
post_process_panoptic_segmentation(
|
|
18745
|
-
outputs,
|
|
18746
|
-
threshold = 0.5,
|
|
18747
|
-
mask_threshold = 0.5,
|
|
18748
|
-
overlap_mask_area_threshold = 0.8,
|
|
18749
|
-
label_ids_to_fuse = null,
|
|
18750
|
-
target_sizes = null,
|
|
18751
|
-
) {
|
|
18752
|
-
if (label_ids_to_fuse === null) {
|
|
18753
|
-
console.warn("`label_ids_to_fuse` unset. No instance will be fused.")
|
|
18754
|
-
label_ids_to_fuse = new Set();
|
|
18755
|
-
}
|
|
18756
|
-
|
|
18757
|
-
const class_queries_logits = outputs.logits; // [batch_size, num_queries, num_classes+1]
|
|
18758
|
-
const masks_queries_logits = outputs.pred_masks; // [batch_size, num_queries, height, width]
|
|
18759
|
-
|
|
18760
|
-
const mask_probs = masks_queries_logits.sigmoid() // [batch_size, num_queries, height, width]
|
|
18761
|
-
|
|
18762
|
-
let [batch_size, num_queries, num_labels] = class_queries_logits.dims;
|
|
18763
|
-
num_labels -= 1; // Remove last class (background)
|
|
18764
|
-
|
|
18765
|
-
if (target_sizes !== null && target_sizes.length !== batch_size) {
|
|
18766
|
-
throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits")
|
|
18767
|
-
}
|
|
18768
|
-
|
|
18769
|
-
let toReturn = [];
|
|
18770
|
-
for (let i = 0; i < batch_size; ++i) {
|
|
18771
|
-
let target_size = target_sizes !== null ? target_sizes[i] : null;
|
|
18772
|
-
|
|
18773
|
-
let class_logits = class_queries_logits[i];
|
|
18774
|
-
let mask_logits = mask_probs[i];
|
|
18775
|
-
|
|
18776
|
-
let [mask_probs_item, pred_scores_item, pred_labels_item] = this.remove_low_and_no_objects(class_logits, mask_logits, threshold, num_labels);
|
|
18777
|
-
|
|
18778
|
-
if (pred_labels_item.length === 0) {
|
|
18779
|
-
// No mask found
|
|
18780
|
-
let [height, width] = target_size ?? mask_logits.dims.slice(-2);
|
|
18781
|
-
|
|
18782
|
-
let segmentation = new _utils_tensor_js__WEBPACK_IMPORTED_MODULE_4__.Tensor(
|
|
18783
|
-
'int32',
|
|
18784
|
-
new Int32Array(height * width).fill(-1),
|
|
18785
|
-
[height, width]
|
|
18786
|
-
)
|
|
18787
|
-
toReturn.push({
|
|
18788
|
-
segmentation: segmentation,
|
|
18789
|
-
segments_info: []
|
|
18790
|
-
});
|
|
18791
|
-
continue;
|
|
18792
|
-
}
|
|
18793
|
-
|
|
18794
|
-
|
|
18795
|
-
// Get segmentation map and segment information of batch item
|
|
18796
|
-
let [segmentation, segments] = this.compute_segments(
|
|
18797
|
-
mask_probs_item,
|
|
18798
|
-
pred_scores_item,
|
|
18799
|
-
pred_labels_item,
|
|
18800
|
-
mask_threshold,
|
|
18801
|
-
overlap_mask_area_threshold,
|
|
18802
|
-
label_ids_to_fuse,
|
|
18803
|
-
target_size,
|
|
18804
|
-
)
|
|
18805
|
-
|
|
18806
|
-
toReturn.push({
|
|
18807
|
-
segmentation: segmentation,
|
|
18808
|
-
segments_info: segments
|
|
18809
|
-
})
|
|
18810
|
-
}
|
|
18958
|
+
class MaskFormerFeatureExtractor extends ImageFeatureExtractor {
|
|
18811
18959
|
|
|
18812
|
-
|
|
18960
|
+
/** @type {typeof post_process_panoptic_segmentation} */
|
|
18961
|
+
post_process_panoptic_segmentation(...args) {
|
|
18962
|
+
return post_process_panoptic_segmentation(...args);
|
|
18813
18963
|
}
|
|
18814
18964
|
|
|
18815
18965
|
post_process_instance_segmentation() {
|
|
@@ -18818,6 +18968,7 @@ class DetrFeatureExtractor extends ImageFeatureExtractor {
|
|
|
18818
18968
|
}
|
|
18819
18969
|
}
|
|
18820
18970
|
|
|
18971
|
+
|
|
18821
18972
|
class YolosFeatureExtractor extends ImageFeatureExtractor {
|
|
18822
18973
|
/** @type {typeof post_process_object_detection} */
|
|
18823
18974
|
post_process_object_detection(...args) {
|
|
@@ -20107,11 +20258,13 @@ class AutoProcessor {
|
|
|
20107
20258
|
BitImageProcessor,
|
|
20108
20259
|
DPTImageProcessor,
|
|
20109
20260
|
DPTFeatureExtractor,
|
|
20261
|
+
PvtImageProcessor,
|
|
20110
20262
|
GLPNFeatureExtractor,
|
|
20111
20263
|
BeitFeatureExtractor,
|
|
20112
20264
|
DeiTFeatureExtractor,
|
|
20113
20265
|
DetrFeatureExtractor,
|
|
20114
20266
|
RTDetrImageProcessor,
|
|
20267
|
+
MaskFormerFeatureExtractor,
|
|
20115
20268
|
YolosFeatureExtractor,
|
|
20116
20269
|
DonutFeatureExtractor,
|
|
20117
20270
|
NougatImageProcessor,
|
|
@@ -20447,7 +20600,7 @@ function clean_up_tokenization(text) {
|
|
|
20447
20600
|
* @returns {string} The text with accents removed.
|
|
20448
20601
|
*/
|
|
20449
20602
|
function remove_accents(text) {
|
|
20450
|
-
return text.replace(/
|
|
20603
|
+
return text.replace(/\p{M}/gu, '');
|
|
20451
20604
|
}
|
|
20452
20605
|
|
|
20453
20606
|
/**
|
|
@@ -20488,23 +20641,26 @@ function is_chinese_char(cp) {
|
|
|
20488
20641
|
}
|
|
20489
20642
|
|
|
20490
20643
|
/**
|
|
20491
|
-
* Helper function to fuse consecutive
|
|
20492
|
-
* @param {string[]} arr The input
|
|
20493
|
-
* @param {any}
|
|
20494
|
-
* @param {
|
|
20644
|
+
* Helper function to fuse consecutive unknown tokens.
|
|
20645
|
+
* @param {string[]} arr The list of input tokens
|
|
20646
|
+
* @param {Map<string, any>} tokens_to_ids The mapping from tokens to token ids.
|
|
20647
|
+
* @param {number} unk_token_id The value to fuse on.
|
|
20648
|
+
* @private
|
|
20495
20649
|
*/
|
|
20496
|
-
function
|
|
20650
|
+
function fuse_unk(arr, tokens_to_ids, unk_token_id) {
|
|
20497
20651
|
const fused = [];
|
|
20498
20652
|
let i = 0;
|
|
20499
20653
|
while (i < arr.length) {
|
|
20500
20654
|
fused.push(arr[i])
|
|
20501
|
-
if ((
|
|
20655
|
+
if ((tokens_to_ids.get(arr[i]) ?? unk_token_id) !== unk_token_id) {
|
|
20502
20656
|
++i;
|
|
20503
20657
|
continue;
|
|
20504
20658
|
}
|
|
20505
20659
|
|
|
20506
|
-
while (i < arr.length && (
|
|
20507
|
-
|
|
20660
|
+
while (++i < arr.length && (tokens_to_ids.get(arr[i]) ?? unk_token_id) === unk_token_id) {
|
|
20661
|
+
if (tokens_to_ids.get(fused.at(-1)) !== unk_token_id) {
|
|
20662
|
+
fused[fused.length - 1] += arr[i];
|
|
20663
|
+
}
|
|
20508
20664
|
}
|
|
20509
20665
|
}
|
|
20510
20666
|
|
|
@@ -20621,15 +20777,15 @@ class TokenizerModel extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__.Call
|
|
|
20621
20777
|
/**
|
|
20622
20778
|
* Internal function to call the TokenizerModel instance.
|
|
20623
20779
|
* @param {string[]} tokens The tokens to encode.
|
|
20624
|
-
* @returns {string[]} The encoded
|
|
20780
|
+
* @returns {string[]} The encoded tokens.
|
|
20625
20781
|
*/
|
|
20626
20782
|
_call(tokens) {
|
|
20627
|
-
|
|
20783
|
+
tokens = this.encode(tokens);
|
|
20628
20784
|
if (this.fuse_unk) {
|
|
20629
20785
|
// Fuse unknown tokens
|
|
20630
|
-
|
|
20786
|
+
tokens = fuse_unk(tokens, this.tokens_to_ids, this.unk_token_id);
|
|
20631
20787
|
}
|
|
20632
|
-
return
|
|
20788
|
+
return tokens;
|
|
20633
20789
|
}
|
|
20634
20790
|
|
|
20635
20791
|
/**
|
|
@@ -20790,18 +20946,18 @@ class Unigram extends TokenizerModel {
|
|
|
20790
20946
|
this.unk_token = this.vocab[config.unk_id];
|
|
20791
20947
|
|
|
20792
20948
|
this.tokens_to_ids = new Map(this.vocab.map((x, i) => [x, i]));
|
|
20793
|
-
this.
|
|
20949
|
+
this.bos_token = ' '; // beginning of a sentence token
|
|
20794
20950
|
|
|
20795
|
-
this.
|
|
20796
|
-
this.
|
|
20951
|
+
this.bos_token_id = this.tokens_to_ids.get(this.bos_token); // NOTE: may be undefined
|
|
20952
|
+
this.eos_token = moreConfig.eos_token;
|
|
20797
20953
|
|
|
20798
|
-
this.
|
|
20799
|
-
this.
|
|
20954
|
+
this.eos_token_id = this.tokens_to_ids.get(this.eos_token);
|
|
20955
|
+
this.unk_token = this.vocab[this.unk_token_id];
|
|
20800
20956
|
|
|
20801
20957
|
this.minScore = (0,_utils_maths_js__WEBPACK_IMPORTED_MODULE_3__.min)(this.scores)[0];
|
|
20802
20958
|
|
|
20803
|
-
this.
|
|
20804
|
-
this.scores[this.unk_token_id] = this.
|
|
20959
|
+
this.unk_score = this.minScore - 10.0;
|
|
20960
|
+
this.scores[this.unk_token_id] = this.unk_score;
|
|
20805
20961
|
|
|
20806
20962
|
this.trie = new _utils_data_structures_js__WEBPACK_IMPORTED_MODULE_5__.CharTrie();
|
|
20807
20963
|
this.trie.extend(this.vocab);
|
|
@@ -20816,26 +20972,27 @@ class Unigram extends TokenizerModel {
|
|
|
20816
20972
|
* @param {TokenLattice} lattice The token lattice to populate with nodes.
|
|
20817
20973
|
*/
|
|
20818
20974
|
populateNodes(lattice) {
|
|
20819
|
-
const
|
|
20820
|
-
const
|
|
20975
|
+
const chars = lattice.chars;
|
|
20976
|
+
const mblen = 1;
|
|
20821
20977
|
let beginPos = 0;
|
|
20822
|
-
while (beginPos <
|
|
20823
|
-
const mblen = 1;
|
|
20978
|
+
while (beginPos < chars.length) {
|
|
20824
20979
|
let hasSingleNode = false;
|
|
20825
|
-
const tokens = [];
|
|
20826
20980
|
|
|
20827
|
-
|
|
20981
|
+
const tokens = [];
|
|
20982
|
+
const sliced = chars.slice(beginPos).join('');
|
|
20983
|
+
const prefixedTokens = this.trie.commonPrefixSearch(sliced);
|
|
20984
|
+
for (const token of prefixedTokens) {
|
|
20828
20985
|
tokens.push(token);
|
|
20829
20986
|
const tokenId = this.tokens_to_ids.get(token);
|
|
20830
20987
|
const tokenScore = this.scores[tokenId];
|
|
20831
|
-
const n = token
|
|
20988
|
+
const n = (0,_utils_core_js__WEBPACK_IMPORTED_MODULE_1__.len)(token);
|
|
20832
20989
|
lattice.insert(beginPos, n, tokenScore, tokenId);
|
|
20833
20990
|
if (!hasSingleNode && n === mblen) {
|
|
20834
20991
|
hasSingleNode = true;
|
|
20835
20992
|
}
|
|
20836
20993
|
}
|
|
20837
20994
|
if (!hasSingleNode) {
|
|
20838
|
-
lattice.insert(beginPos, mblen, this.
|
|
20995
|
+
lattice.insert(beginPos, mblen, this.unk_score, this.unk_token_id);
|
|
20839
20996
|
}
|
|
20840
20997
|
beginPos += mblen;
|
|
20841
20998
|
}
|
|
@@ -20848,7 +21005,7 @@ class Unigram extends TokenizerModel {
|
|
|
20848
21005
|
* @returns {string[]} An array of subtokens obtained by encoding the input tokens using the unigram model.
|
|
20849
21006
|
*/
|
|
20850
21007
|
tokenize(normalized) {
|
|
20851
|
-
const lattice = new _utils_data_structures_js__WEBPACK_IMPORTED_MODULE_5__.TokenLattice(normalized, this.
|
|
21008
|
+
const lattice = new _utils_data_structures_js__WEBPACK_IMPORTED_MODULE_5__.TokenLattice(normalized, this.bos_token_id, this.eos_token_id);
|
|
20852
21009
|
this.populateNodes(lattice);
|
|
20853
21010
|
return lattice.tokens();
|
|
20854
21011
|
}
|
|
@@ -21127,15 +21284,19 @@ class BPE extends TokenizerModel {
|
|
|
21127
21284
|
for (const t of bpe_token_list) {
|
|
21128
21285
|
if (this.tokens_to_ids.has(t)) {
|
|
21129
21286
|
outputTokens.push(t);
|
|
21130
|
-
} else {
|
|
21131
|
-
|
|
21132
|
-
|
|
21133
|
-
|
|
21134
|
-
|
|
21135
|
-
|
|
21287
|
+
} else if (this.byte_fallback) {
|
|
21288
|
+
const byteTokens = Array.from(this.text_encoder.encode(t))
|
|
21289
|
+
.map(x => `<0x${x.toString(16).toUpperCase().padStart(2, '0')}>`);
|
|
21290
|
+
if (byteTokens.every(x => this.tokens_to_ids.has(x))) {
|
|
21291
|
+
// Ensure the byte tokens are actually in the vocabulary, otherwise
|
|
21292
|
+
// we fall back to the unknown token. For more information, see
|
|
21293
|
+
// https://github.com/huggingface/transformers/issues/28096.
|
|
21294
|
+
outputTokens.push(...byteTokens);
|
|
21136
21295
|
} else {
|
|
21137
21296
|
outputTokens.push(this.unk_token);
|
|
21138
21297
|
}
|
|
21298
|
+
} else {
|
|
21299
|
+
outputTokens.push(this.unk_token);
|
|
21139
21300
|
}
|
|
21140
21301
|
}
|
|
21141
21302
|
}
|
|
@@ -21459,7 +21620,8 @@ class BertNormalizer extends Normalizer {
|
|
|
21459
21620
|
* @returns {string} The text with accents removed.
|
|
21460
21621
|
*/
|
|
21461
21622
|
stripAccents(text) {
|
|
21462
|
-
|
|
21623
|
+
// "Mark, Nonspacing" (Mn)
|
|
21624
|
+
return text.normalize('NFD').replace(/\p{Mn}/gu, '');
|
|
21463
21625
|
}
|
|
21464
21626
|
|
|
21465
21627
|
|
|
@@ -22577,7 +22739,7 @@ class Precompiled extends Normalizer {
|
|
|
22577
22739
|
// TODO: detect when a different `this.charsmap` is used.
|
|
22578
22740
|
|
|
22579
22741
|
text = text.replace(/[\u0001-\u0008\u000B\u000E-\u001F\u007F\u008F\u009F]/gm, ''); // Remove control characters
|
|
22580
|
-
text = text.replace(/[\u0009\u000A\u000C\u000D\u1680\
|
|
22742
|
+
text = text.replace(/[\u0009\u000A\u000C\u000D\u00A0\u1680\u2000-\u200F\u2028\u2029\u202F\u205F\u2581\u3000\uFEFF\uFFFD]/gm, '\u0020'); // Replace certain characters with a space
|
|
22581
22743
|
|
|
22582
22744
|
if (text.includes('\uFF5E')) {
|
|
22583
22745
|
// To match the sentencepiece implementation 100%, we must handle a very strange edge-case.
|
|
@@ -22807,11 +22969,10 @@ class PreTrainedTokenizer extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__
|
|
|
22807
22969
|
this.decoder.end_of_word_suffix = this.model.end_of_word_suffix;
|
|
22808
22970
|
}
|
|
22809
22971
|
|
|
22810
|
-
|
|
22811
22972
|
this.added_tokens_regex = this.added_tokens.length > 0 ? new RegExp(
|
|
22812
|
-
this.added_tokens
|
|
22973
|
+
this.added_tokens.slice()
|
|
22813
22974
|
// Sort by length (desc) to avoid early partial matches
|
|
22814
|
-
.
|
|
22975
|
+
.sort((a, b) => b.content.length - a.content.length)
|
|
22815
22976
|
.map(x => `${x.lstrip ? '\\s*' : ''}(${(0,_utils_core_js__WEBPACK_IMPORTED_MODULE_1__.escapeRegExp)(x.content)})${x.rstrip ? '\\s*' : ''}`)
|
|
22816
22977
|
.join('|')
|
|
22817
22978
|
) : null;
|
|
@@ -23309,6 +23470,67 @@ class PreTrainedTokenizer extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__
|
|
|
23309
23470
|
|
|
23310
23471
|
return decoded;
|
|
23311
23472
|
}
|
|
23473
|
+
|
|
23474
|
+
/**
|
|
23475
|
+
* Retrieve the chat template string used for tokenizing chat messages. This template is used
|
|
23476
|
+
* internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat
|
|
23477
|
+
* template for better generation tracking.
|
|
23478
|
+
*
|
|
23479
|
+
* @param {Object} options An optional object containing the following properties:
|
|
23480
|
+
* @param {string} [options.chat_template=null]
|
|
23481
|
+
* A Jinja template or the name of a template to use for this conversion.
|
|
23482
|
+
* It is usually not necessary to pass anything to this argument,
|
|
23483
|
+
* as the model's template will be used by default.
|
|
23484
|
+
* @param {Object[]} [options.tools=null]
|
|
23485
|
+
* A list of tools (callable functions) that will be accessible to the model. If the template does not
|
|
23486
|
+
* support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
|
|
23487
|
+
* giving the name, description and argument types for the tool. See our
|
|
23488
|
+
* [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
|
|
23489
|
+
* for more information.
|
|
23490
|
+
* @returns {string} The chat template string.
|
|
23491
|
+
*/
|
|
23492
|
+
get_chat_template({
|
|
23493
|
+
chat_template = null,
|
|
23494
|
+
tools = null,
|
|
23495
|
+
} = {}) {
|
|
23496
|
+
|
|
23497
|
+
// First, handle the cases when the model has a dict of multiple templates
|
|
23498
|
+
if (this.chat_template && typeof this.chat_template === 'object') {
|
|
23499
|
+
const template_dict = this.chat_template;
|
|
23500
|
+
|
|
23501
|
+
if (chat_template !== null && Object.hasOwn(template_dict, chat_template)) {
|
|
23502
|
+
// The user can pass the name of a template to the chat template argument instead of an entire template
|
|
23503
|
+
chat_template = template_dict[chat_template];
|
|
23504
|
+
} else if (chat_template === null) {
|
|
23505
|
+
if (tools !== null && 'tool_use' in template_dict) {
|
|
23506
|
+
chat_template = template_dict['tool_use'];
|
|
23507
|
+
} else if ('default' in template_dict) {
|
|
23508
|
+
chat_template = template_dict['default'];
|
|
23509
|
+
} else {
|
|
23510
|
+
throw Error(
|
|
23511
|
+
`This model has multiple chat templates with no default specified! Please either pass a chat ` +
|
|
23512
|
+
`template or the name of the template you wish to use to the 'chat_template' argument. Available ` +
|
|
23513
|
+
`template names are ${Object.keys(template_dict).sort()}.`
|
|
23514
|
+
)
|
|
23515
|
+
}
|
|
23516
|
+
}
|
|
23517
|
+
} else if (chat_template === null) {
|
|
23518
|
+
// These are the cases when the model has a single template
|
|
23519
|
+
// priority: `chat_template` argument > `tokenizer.chat_template`
|
|
23520
|
+
if (this.chat_template) {
|
|
23521
|
+
chat_template = this.chat_template;
|
|
23522
|
+
} else {
|
|
23523
|
+
throw Error(
|
|
23524
|
+
"Cannot use apply_chat_template() because tokenizer.chat_template is not set and no template " +
|
|
23525
|
+
"argument was passed! For information about writing templates and setting the " +
|
|
23526
|
+
"tokenizer.chat_template attribute, please see the documentation at " +
|
|
23527
|
+
"https://huggingface.co/docs/transformers/main/en/chat_templating"
|
|
23528
|
+
)
|
|
23529
|
+
}
|
|
23530
|
+
}
|
|
23531
|
+
return chat_template;
|
|
23532
|
+
}
|
|
23533
|
+
|
|
23312
23534
|
/**
|
|
23313
23535
|
* Converts a list of message objects with `"role"` and `"content"` keys to a list of token
|
|
23314
23536
|
* ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to
|
|
@@ -23382,39 +23604,8 @@ class PreTrainedTokenizer extends _utils_generic_js__WEBPACK_IMPORTED_MODULE_0__
|
|
|
23382
23604
|
...kwargs
|
|
23383
23605
|
} = {}) {
|
|
23384
23606
|
|
|
23385
|
-
|
|
23386
|
-
if (
|
|
23387
|
-
(this.chat_template && typeof this.chat_template === 'object')
|
|
23388
|
-
|| this.chat_template === null
|
|
23389
|
-
) {
|
|
23390
|
-
const template_dict = this.chat_template;
|
|
23607
|
+
chat_template = this.get_chat_template({ chat_template, tools });
|
|
23391
23608
|
|
|
23392
|
-
if (chat_template !== null && Object.hasOwn(template_dict, chat_template)) {
|
|
23393
|
-
// The user can pass the name of a template to the chat template argument instead of an entire template
|
|
23394
|
-
chat_template = template_dict[chat_template];
|
|
23395
|
-
} else if (chat_template === null && 'default' in template_dict) {
|
|
23396
|
-
chat_template = template_dict['default'];
|
|
23397
|
-
} else if (chat_template === null) {
|
|
23398
|
-
throw Error(
|
|
23399
|
-
`This model has multiple chat templates with no default specified! Please either pass a chat ` +
|
|
23400
|
-
`template or the name of the template you wish to use to the 'chat_template' argument. Available ` +
|
|
23401
|
-
`template names are ${Object.keys(template_dict).sort()}.`
|
|
23402
|
-
)
|
|
23403
|
-
}
|
|
23404
|
-
} else {
|
|
23405
|
-
// These are the cases when the model has a single template
|
|
23406
|
-
// priority: `chat_template` argument > `tokenizer.chat_template`
|
|
23407
|
-
if (this.chat_template) {
|
|
23408
|
-
chat_template = this.chat_template;
|
|
23409
|
-
} else {
|
|
23410
|
-
throw Error(
|
|
23411
|
-
"Cannot use apply_chat_template() because tokenizer.chat_template is not set and no template " +
|
|
23412
|
-
"argument was passed! For information about writing templates and setting the " +
|
|
23413
|
-
"tokenizer.chat_template attribute, please see the documentation at " +
|
|
23414
|
-
"https://huggingface.co/docs/transformers/main/en/chat_templating"
|
|
23415
|
-
)
|
|
23416
|
-
}
|
|
23417
|
-
}
|
|
23418
23609
|
if (typeof chat_template !== 'string') {
|
|
23419
23610
|
throw Error(`chat_template must be a string, but got ${typeof chat_template}`);
|
|
23420
23611
|
}
|
|
@@ -25401,6 +25592,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
25401
25592
|
/* harmony export */ escapeRegExp: () => (/* binding */ escapeRegExp),
|
|
25402
25593
|
/* harmony export */ isIntegralNumber: () => (/* binding */ isIntegralNumber),
|
|
25403
25594
|
/* harmony export */ isTypedArray: () => (/* binding */ isTypedArray),
|
|
25595
|
+
/* harmony export */ len: () => (/* binding */ len),
|
|
25404
25596
|
/* harmony export */ mergeArrays: () => (/* binding */ mergeArrays),
|
|
25405
25597
|
/* harmony export */ pick: () => (/* binding */ pick),
|
|
25406
25598
|
/* harmony export */ pop: () => (/* binding */ pop),
|
|
@@ -25557,6 +25749,18 @@ function pick(o, props) {
|
|
|
25557
25749
|
);
|
|
25558
25750
|
}
|
|
25559
25751
|
|
|
25752
|
+
/**
|
|
25753
|
+
* Calculate the length of a string, taking multi-byte characters into account.
|
|
25754
|
+
* This mimics the behavior of Python's `len` function.
|
|
25755
|
+
* @param {string} s The string to calculate the length of.
|
|
25756
|
+
* @returns {number} The length of the string.
|
|
25757
|
+
*/
|
|
25758
|
+
function len(s) {
|
|
25759
|
+
let length = 0;
|
|
25760
|
+
for (const c of s) ++length;
|
|
25761
|
+
return length;
|
|
25762
|
+
}
|
|
25763
|
+
|
|
25560
25764
|
|
|
25561
25765
|
/***/ }),
|
|
25562
25766
|
|
|
@@ -25803,7 +26007,7 @@ class CharTrie {
|
|
|
25803
26007
|
* @param {string[]} texts The strings to add to the trie.
|
|
25804
26008
|
*/
|
|
25805
26009
|
extend(texts) {
|
|
25806
|
-
for (
|
|
26010
|
+
for (const text of texts) {
|
|
25807
26011
|
this.push(text);
|
|
25808
26012
|
}
|
|
25809
26013
|
}
|
|
@@ -25814,7 +26018,7 @@ class CharTrie {
|
|
|
25814
26018
|
*/
|
|
25815
26019
|
push(text) {
|
|
25816
26020
|
let node = this.root;
|
|
25817
|
-
for (
|
|
26021
|
+
for (const ch of text) {
|
|
25818
26022
|
let child = node.children.get(ch);
|
|
25819
26023
|
if (child === undefined) {
|
|
25820
26024
|
child = CharTrieNode.default();
|
|
@@ -25832,12 +26036,14 @@ class CharTrie {
|
|
|
25832
26036
|
*/
|
|
25833
26037
|
*commonPrefixSearch(text) {
|
|
25834
26038
|
let node = this.root;
|
|
26039
|
+
if (node === undefined) return;
|
|
26040
|
+
|
|
25835
26041
|
let prefix = "";
|
|
25836
|
-
for (
|
|
25837
|
-
const ch = text[i];
|
|
26042
|
+
for (const ch of text) {
|
|
25838
26043
|
prefix += ch;
|
|
25839
26044
|
node = node.children.get(ch);
|
|
25840
|
-
if (node
|
|
26045
|
+
if (node === undefined) return;
|
|
26046
|
+
if (node.isLeaf) {
|
|
25841
26047
|
yield prefix;
|
|
25842
26048
|
}
|
|
25843
26049
|
}
|
|
@@ -25879,8 +26085,8 @@ class TokenLattice {
|
|
|
25879
26085
|
* @param {number} eosTokenId The end-of-sequence token ID.
|
|
25880
26086
|
*/
|
|
25881
26087
|
constructor(sentence, bosTokenId, eosTokenId) {
|
|
25882
|
-
this.
|
|
25883
|
-
this.len =
|
|
26088
|
+
this.chars = Array.from(sentence);
|
|
26089
|
+
this.len = this.chars.length;
|
|
25884
26090
|
this.bosTokenId = bosTokenId;
|
|
25885
26091
|
this.eosTokenId = eosTokenId;
|
|
25886
26092
|
this.nodes = [];
|
|
@@ -25914,7 +26120,7 @@ class TokenLattice {
|
|
|
25914
26120
|
/**
|
|
25915
26121
|
* Implements the Viterbi algorithm to compute the most likely sequence of tokens.
|
|
25916
26122
|
*
|
|
25917
|
-
* @returns {TokenLatticeNode[]} The
|
|
26123
|
+
* @returns {TokenLatticeNode[]} The most likely sequence of tokens.
|
|
25918
26124
|
*/
|
|
25919
26125
|
viterbi() {
|
|
25920
26126
|
const len = this.len;
|
|
@@ -25968,11 +26174,11 @@ class TokenLattice {
|
|
|
25968
26174
|
* @returns {string} The array of nodes representing the most likely sequence of tokens.
|
|
25969
26175
|
*/
|
|
25970
26176
|
piece(node) {
|
|
25971
|
-
return this.
|
|
26177
|
+
return this.chars.slice(node.pos, node.pos + node.length).join('');
|
|
25972
26178
|
}
|
|
25973
26179
|
|
|
25974
26180
|
/**
|
|
25975
|
-
* @returns {
|
|
26181
|
+
* @returns {string[]} The most likely sequence of tokens.
|
|
25976
26182
|
*/
|
|
25977
26183
|
tokens() {
|
|
25978
26184
|
const nodes = this.viterbi();
|
|
@@ -25980,7 +26186,7 @@ class TokenLattice {
|
|
|
25980
26186
|
}
|
|
25981
26187
|
|
|
25982
26188
|
/**
|
|
25983
|
-
* @returns {
|
|
26189
|
+
* @returns {number[]} The most likely sequence of token ids.
|
|
25984
26190
|
*/
|
|
25985
26191
|
tokenIds() {
|
|
25986
26192
|
const nodes = this.viterbi();
|
|
@@ -26217,7 +26423,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
26217
26423
|
/**
|
|
26218
26424
|
* @typedef {Object} PretrainedOptions Options for loading a pretrained model.
|
|
26219
26425
|
* @property {function} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates.
|
|
26220
|
-
* @property {
|
|
26426
|
+
* @property {import('../configs.js').PretrainedConfig} [config=null] Configuration for the model to use instead of an automatically loaded configuration. Configuration can be automatically loaded when:
|
|
26221
26427
|
* - The model is a model provided by the library (loaded with the *model id* string of a pretrained model).
|
|
26222
26428
|
* - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a configuration JSON file named *config.json* is found in the directory.
|
|
26223
26429
|
* @property {string} [cache_dir=null] Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used.
|
|
@@ -27822,11 +28028,20 @@ function softmax(arr) {
|
|
|
27822
28028
|
* @returns {T} The resulting log_softmax array.
|
|
27823
28029
|
*/
|
|
27824
28030
|
function log_softmax(arr) {
|
|
27825
|
-
// Compute the
|
|
27826
|
-
const
|
|
28031
|
+
// Compute the maximum value in the array
|
|
28032
|
+
const maxVal = max(arr)[0];
|
|
28033
|
+
|
|
28034
|
+
// Compute the sum of the exponentials
|
|
28035
|
+
let sumExps = 0;
|
|
28036
|
+
for(let i = 0; i < arr.length; ++i) {
|
|
28037
|
+
sumExps += Math.exp(arr[i] - maxVal);
|
|
28038
|
+
}
|
|
28039
|
+
|
|
28040
|
+
// Compute the log of the sum
|
|
28041
|
+
const logSum = Math.log(sumExps);
|
|
27827
28042
|
|
|
27828
|
-
//
|
|
27829
|
-
const logSoftmaxArr =
|
|
28043
|
+
// Compute the softmax values
|
|
28044
|
+
const logSoftmaxArr = arr.map(x => x - maxVal - logSum);
|
|
27830
28045
|
|
|
27831
28046
|
return /** @type {T} */(logSoftmaxArr);
|
|
27832
28047
|
}
|
|
@@ -30276,6 +30491,7 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30276
30491
|
/* harmony export */ AutoModelForTextToSpectrogram: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForTextToSpectrogram),
|
|
30277
30492
|
/* harmony export */ AutoModelForTextToWaveform: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForTextToWaveform),
|
|
30278
30493
|
/* harmony export */ AutoModelForTokenClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForTokenClassification),
|
|
30494
|
+
/* harmony export */ AutoModelForUniversalSegmentation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForUniversalSegmentation),
|
|
30279
30495
|
/* harmony export */ AutoModelForVision2Seq: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForVision2Seq),
|
|
30280
30496
|
/* harmony export */ AutoModelForXVector: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForXVector),
|
|
30281
30497
|
/* harmony export */ AutoModelForZeroShotObjectDetection: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.AutoModelForZeroShotObjectDetection),
|
|
@@ -30320,8 +30536,10 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30320
30536
|
/* harmony export */ CLIPSegForImageSegmentation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPSegForImageSegmentation),
|
|
30321
30537
|
/* harmony export */ CLIPSegModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPSegModel),
|
|
30322
30538
|
/* harmony export */ CLIPSegPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPSegPreTrainedModel),
|
|
30539
|
+
/* harmony export */ CLIPTextModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPTextModel),
|
|
30323
30540
|
/* harmony export */ CLIPTextModelWithProjection: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPTextModelWithProjection),
|
|
30324
30541
|
/* harmony export */ CLIPTokenizer: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.CLIPTokenizer),
|
|
30542
|
+
/* harmony export */ CLIPVisionModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPVisionModel),
|
|
30325
30543
|
/* harmony export */ CLIPVisionModelWithProjection: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CLIPVisionModelWithProjection),
|
|
30326
30544
|
/* harmony export */ CamembertForMaskedLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CamembertForMaskedLM),
|
|
30327
30545
|
/* harmony export */ CamembertForQuestionAnswering: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.CamembertForQuestionAnswering),
|
|
@@ -30383,6 +30601,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30383
30601
|
/* harmony export */ DebertaV2Model: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.DebertaV2Model),
|
|
30384
30602
|
/* harmony export */ DebertaV2PreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.DebertaV2PreTrainedModel),
|
|
30385
30603
|
/* harmony export */ DebertaV2Tokenizer: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.DebertaV2Tokenizer),
|
|
30604
|
+
/* harmony export */ DecisionTransformerModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.DecisionTransformerModel),
|
|
30605
|
+
/* harmony export */ DecisionTransformerPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.DecisionTransformerPreTrainedModel),
|
|
30386
30606
|
/* harmony export */ DeiTFeatureExtractor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.DeiTFeatureExtractor),
|
|
30387
30607
|
/* harmony export */ DeiTForImageClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.DeiTForImageClassification),
|
|
30388
30608
|
/* harmony export */ DeiTModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.DeiTModel),
|
|
@@ -30472,6 +30692,8 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30472
30692
|
/* harmony export */ GemmaPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GemmaPreTrainedModel),
|
|
30473
30693
|
/* harmony export */ GemmaTokenizer: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.GemmaTokenizer),
|
|
30474
30694
|
/* harmony export */ Grok1Tokenizer: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.Grok1Tokenizer),
|
|
30695
|
+
/* harmony export */ GroupViTModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GroupViTModel),
|
|
30696
|
+
/* harmony export */ GroupViTPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.GroupViTPreTrainedModel),
|
|
30475
30697
|
/* harmony export */ HerbertTokenizer: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.HerbertTokenizer),
|
|
30476
30698
|
/* harmony export */ HieraForImageClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.HieraForImageClassification),
|
|
30477
30699
|
/* harmony export */ HieraModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.HieraModel),
|
|
@@ -30525,6 +30747,10 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30525
30747
|
/* harmony export */ MarianModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MarianModel),
|
|
30526
30748
|
/* harmony export */ MarianPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MarianPreTrainedModel),
|
|
30527
30749
|
/* harmony export */ MarianTokenizer: () => (/* reexport safe */ _tokenizers_js__WEBPACK_IMPORTED_MODULE_3__.MarianTokenizer),
|
|
30750
|
+
/* harmony export */ MaskFormerFeatureExtractor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.MaskFormerFeatureExtractor),
|
|
30751
|
+
/* harmony export */ MaskFormerForInstanceSegmentation: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskFormerForInstanceSegmentation),
|
|
30752
|
+
/* harmony export */ MaskFormerModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskFormerModel),
|
|
30753
|
+
/* harmony export */ MaskFormerPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskFormerPreTrainedModel),
|
|
30528
30754
|
/* harmony export */ MaskedLMOutput: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MaskedLMOutput),
|
|
30529
30755
|
/* harmony export */ MaxLengthCriteria: () => (/* reexport safe */ _generation_stopping_criteria_js__WEBPACK_IMPORTED_MODULE_11__.MaxLengthCriteria),
|
|
30530
30756
|
/* harmony export */ MistralForCausalLM: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.MistralForCausalLM),
|
|
@@ -30602,6 +30828,10 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30602
30828
|
/* harmony export */ PretrainedConfig: () => (/* reexport safe */ _configs_js__WEBPACK_IMPORTED_MODULE_5__.PretrainedConfig),
|
|
30603
30829
|
/* harmony export */ PretrainedMixin: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PretrainedMixin),
|
|
30604
30830
|
/* harmony export */ Processor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.Processor),
|
|
30831
|
+
/* harmony export */ PvtForImageClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PvtForImageClassification),
|
|
30832
|
+
/* harmony export */ PvtImageProcessor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.PvtImageProcessor),
|
|
30833
|
+
/* harmony export */ PvtModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PvtModel),
|
|
30834
|
+
/* harmony export */ PvtPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PvtPreTrainedModel),
|
|
30605
30835
|
/* harmony export */ PyAnnoteFeatureExtractor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.PyAnnoteFeatureExtractor),
|
|
30606
30836
|
/* harmony export */ PyAnnoteForAudioFrameClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PyAnnoteForAudioFrameClassification),
|
|
30607
30837
|
/* harmony export */ PyAnnoteModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.PyAnnoteModel),
|
|
@@ -30722,6 +30952,11 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
30722
30952
|
/* harmony export */ ViTFeatureExtractor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.ViTFeatureExtractor),
|
|
30723
30953
|
/* harmony export */ ViTForImageClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTForImageClassification),
|
|
30724
30954
|
/* harmony export */ ViTImageProcessor: () => (/* reexport safe */ _processors_js__WEBPACK_IMPORTED_MODULE_4__.ViTImageProcessor),
|
|
30955
|
+
/* harmony export */ ViTMAEModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTMAEModel),
|
|
30956
|
+
/* harmony export */ ViTMAEPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTMAEPreTrainedModel),
|
|
30957
|
+
/* harmony export */ ViTMSNForImageClassification: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTMSNForImageClassification),
|
|
30958
|
+
/* harmony export */ ViTMSNModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTMSNModel),
|
|
30959
|
+
/* harmony export */ ViTMSNPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTMSNPreTrainedModel),
|
|
30725
30960
|
/* harmony export */ ViTModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTModel),
|
|
30726
30961
|
/* harmony export */ ViTPreTrainedModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.ViTPreTrainedModel),
|
|
30727
30962
|
/* harmony export */ VisionEncoderDecoderModel: () => (/* reexport safe */ _models_js__WEBPACK_IMPORTED_MODULE_2__.VisionEncoderDecoderModel),
|
|
@@ -30904,6 +31139,7 @@ var __webpack_exports__AutoModelForSpeechSeq2Seq = __webpack_exports__.AutoModel
|
|
|
30904
31139
|
var __webpack_exports__AutoModelForTextToSpectrogram = __webpack_exports__.AutoModelForTextToSpectrogram;
|
|
30905
31140
|
var __webpack_exports__AutoModelForTextToWaveform = __webpack_exports__.AutoModelForTextToWaveform;
|
|
30906
31141
|
var __webpack_exports__AutoModelForTokenClassification = __webpack_exports__.AutoModelForTokenClassification;
|
|
31142
|
+
var __webpack_exports__AutoModelForUniversalSegmentation = __webpack_exports__.AutoModelForUniversalSegmentation;
|
|
30907
31143
|
var __webpack_exports__AutoModelForVision2Seq = __webpack_exports__.AutoModelForVision2Seq;
|
|
30908
31144
|
var __webpack_exports__AutoModelForXVector = __webpack_exports__.AutoModelForXVector;
|
|
30909
31145
|
var __webpack_exports__AutoModelForZeroShotObjectDetection = __webpack_exports__.AutoModelForZeroShotObjectDetection;
|
|
@@ -30948,8 +31184,10 @@ var __webpack_exports__CLIPPreTrainedModel = __webpack_exports__.CLIPPreTrainedM
|
|
|
30948
31184
|
var __webpack_exports__CLIPSegForImageSegmentation = __webpack_exports__.CLIPSegForImageSegmentation;
|
|
30949
31185
|
var __webpack_exports__CLIPSegModel = __webpack_exports__.CLIPSegModel;
|
|
30950
31186
|
var __webpack_exports__CLIPSegPreTrainedModel = __webpack_exports__.CLIPSegPreTrainedModel;
|
|
31187
|
+
var __webpack_exports__CLIPTextModel = __webpack_exports__.CLIPTextModel;
|
|
30951
31188
|
var __webpack_exports__CLIPTextModelWithProjection = __webpack_exports__.CLIPTextModelWithProjection;
|
|
30952
31189
|
var __webpack_exports__CLIPTokenizer = __webpack_exports__.CLIPTokenizer;
|
|
31190
|
+
var __webpack_exports__CLIPVisionModel = __webpack_exports__.CLIPVisionModel;
|
|
30953
31191
|
var __webpack_exports__CLIPVisionModelWithProjection = __webpack_exports__.CLIPVisionModelWithProjection;
|
|
30954
31192
|
var __webpack_exports__CamembertForMaskedLM = __webpack_exports__.CamembertForMaskedLM;
|
|
30955
31193
|
var __webpack_exports__CamembertForQuestionAnswering = __webpack_exports__.CamembertForQuestionAnswering;
|
|
@@ -31011,6 +31249,8 @@ var __webpack_exports__DebertaV2ForTokenClassification = __webpack_exports__.Deb
|
|
|
31011
31249
|
var __webpack_exports__DebertaV2Model = __webpack_exports__.DebertaV2Model;
|
|
31012
31250
|
var __webpack_exports__DebertaV2PreTrainedModel = __webpack_exports__.DebertaV2PreTrainedModel;
|
|
31013
31251
|
var __webpack_exports__DebertaV2Tokenizer = __webpack_exports__.DebertaV2Tokenizer;
|
|
31252
|
+
var __webpack_exports__DecisionTransformerModel = __webpack_exports__.DecisionTransformerModel;
|
|
31253
|
+
var __webpack_exports__DecisionTransformerPreTrainedModel = __webpack_exports__.DecisionTransformerPreTrainedModel;
|
|
31014
31254
|
var __webpack_exports__DeiTFeatureExtractor = __webpack_exports__.DeiTFeatureExtractor;
|
|
31015
31255
|
var __webpack_exports__DeiTForImageClassification = __webpack_exports__.DeiTForImageClassification;
|
|
31016
31256
|
var __webpack_exports__DeiTModel = __webpack_exports__.DeiTModel;
|
|
@@ -31100,6 +31340,8 @@ var __webpack_exports__GemmaModel = __webpack_exports__.GemmaModel;
|
|
|
31100
31340
|
var __webpack_exports__GemmaPreTrainedModel = __webpack_exports__.GemmaPreTrainedModel;
|
|
31101
31341
|
var __webpack_exports__GemmaTokenizer = __webpack_exports__.GemmaTokenizer;
|
|
31102
31342
|
var __webpack_exports__Grok1Tokenizer = __webpack_exports__.Grok1Tokenizer;
|
|
31343
|
+
var __webpack_exports__GroupViTModel = __webpack_exports__.GroupViTModel;
|
|
31344
|
+
var __webpack_exports__GroupViTPreTrainedModel = __webpack_exports__.GroupViTPreTrainedModel;
|
|
31103
31345
|
var __webpack_exports__HerbertTokenizer = __webpack_exports__.HerbertTokenizer;
|
|
31104
31346
|
var __webpack_exports__HieraForImageClassification = __webpack_exports__.HieraForImageClassification;
|
|
31105
31347
|
var __webpack_exports__HieraModel = __webpack_exports__.HieraModel;
|
|
@@ -31153,6 +31395,10 @@ var __webpack_exports__MarianMTModel = __webpack_exports__.MarianMTModel;
|
|
|
31153
31395
|
var __webpack_exports__MarianModel = __webpack_exports__.MarianModel;
|
|
31154
31396
|
var __webpack_exports__MarianPreTrainedModel = __webpack_exports__.MarianPreTrainedModel;
|
|
31155
31397
|
var __webpack_exports__MarianTokenizer = __webpack_exports__.MarianTokenizer;
|
|
31398
|
+
var __webpack_exports__MaskFormerFeatureExtractor = __webpack_exports__.MaskFormerFeatureExtractor;
|
|
31399
|
+
var __webpack_exports__MaskFormerForInstanceSegmentation = __webpack_exports__.MaskFormerForInstanceSegmentation;
|
|
31400
|
+
var __webpack_exports__MaskFormerModel = __webpack_exports__.MaskFormerModel;
|
|
31401
|
+
var __webpack_exports__MaskFormerPreTrainedModel = __webpack_exports__.MaskFormerPreTrainedModel;
|
|
31156
31402
|
var __webpack_exports__MaskedLMOutput = __webpack_exports__.MaskedLMOutput;
|
|
31157
31403
|
var __webpack_exports__MaxLengthCriteria = __webpack_exports__.MaxLengthCriteria;
|
|
31158
31404
|
var __webpack_exports__MistralForCausalLM = __webpack_exports__.MistralForCausalLM;
|
|
@@ -31230,6 +31476,10 @@ var __webpack_exports__PreTrainedTokenizer = __webpack_exports__.PreTrainedToken
|
|
|
31230
31476
|
var __webpack_exports__PretrainedConfig = __webpack_exports__.PretrainedConfig;
|
|
31231
31477
|
var __webpack_exports__PretrainedMixin = __webpack_exports__.PretrainedMixin;
|
|
31232
31478
|
var __webpack_exports__Processor = __webpack_exports__.Processor;
|
|
31479
|
+
var __webpack_exports__PvtForImageClassification = __webpack_exports__.PvtForImageClassification;
|
|
31480
|
+
var __webpack_exports__PvtImageProcessor = __webpack_exports__.PvtImageProcessor;
|
|
31481
|
+
var __webpack_exports__PvtModel = __webpack_exports__.PvtModel;
|
|
31482
|
+
var __webpack_exports__PvtPreTrainedModel = __webpack_exports__.PvtPreTrainedModel;
|
|
31233
31483
|
var __webpack_exports__PyAnnoteFeatureExtractor = __webpack_exports__.PyAnnoteFeatureExtractor;
|
|
31234
31484
|
var __webpack_exports__PyAnnoteForAudioFrameClassification = __webpack_exports__.PyAnnoteForAudioFrameClassification;
|
|
31235
31485
|
var __webpack_exports__PyAnnoteModel = __webpack_exports__.PyAnnoteModel;
|
|
@@ -31350,6 +31600,11 @@ var __webpack_exports__UniSpeechSatPreTrainedModel = __webpack_exports__.UniSpee
|
|
|
31350
31600
|
var __webpack_exports__ViTFeatureExtractor = __webpack_exports__.ViTFeatureExtractor;
|
|
31351
31601
|
var __webpack_exports__ViTForImageClassification = __webpack_exports__.ViTForImageClassification;
|
|
31352
31602
|
var __webpack_exports__ViTImageProcessor = __webpack_exports__.ViTImageProcessor;
|
|
31603
|
+
var __webpack_exports__ViTMAEModel = __webpack_exports__.ViTMAEModel;
|
|
31604
|
+
var __webpack_exports__ViTMAEPreTrainedModel = __webpack_exports__.ViTMAEPreTrainedModel;
|
|
31605
|
+
var __webpack_exports__ViTMSNForImageClassification = __webpack_exports__.ViTMSNForImageClassification;
|
|
31606
|
+
var __webpack_exports__ViTMSNModel = __webpack_exports__.ViTMSNModel;
|
|
31607
|
+
var __webpack_exports__ViTMSNPreTrainedModel = __webpack_exports__.ViTMSNPreTrainedModel;
|
|
31353
31608
|
var __webpack_exports__ViTModel = __webpack_exports__.ViTModel;
|
|
31354
31609
|
var __webpack_exports__ViTPreTrainedModel = __webpack_exports__.ViTPreTrainedModel;
|
|
31355
31610
|
var __webpack_exports__VisionEncoderDecoderModel = __webpack_exports__.VisionEncoderDecoderModel;
|
|
@@ -31454,6 +31709,6 @@ var __webpack_exports__topk = __webpack_exports__.topk;
|
|
|
31454
31709
|
var __webpack_exports__window_function = __webpack_exports__.window_function;
|
|
31455
31710
|
var __webpack_exports__zeros = __webpack_exports__.zeros;
|
|
31456
31711
|
var __webpack_exports__zeros_like = __webpack_exports__.zeros_like;
|
|
31457
|
-
export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawImage as RawImage, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensFeatureExtractor as SapiensFeatureExtractor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
|
|
31712
|
+
export { __webpack_exports__ASTFeatureExtractor as ASTFeatureExtractor, __webpack_exports__ASTForAudioClassification as ASTForAudioClassification, __webpack_exports__ASTModel as ASTModel, __webpack_exports__ASTPreTrainedModel as ASTPreTrainedModel, __webpack_exports__AlbertForMaskedLM as AlbertForMaskedLM, __webpack_exports__AlbertForQuestionAnswering as AlbertForQuestionAnswering, __webpack_exports__AlbertForSequenceClassification as AlbertForSequenceClassification, __webpack_exports__AlbertModel as AlbertModel, __webpack_exports__AlbertPreTrainedModel as AlbertPreTrainedModel, __webpack_exports__AlbertTokenizer as AlbertTokenizer, __webpack_exports__AudioClassificationPipeline as AudioClassificationPipeline, __webpack_exports__AutoConfig as AutoConfig, __webpack_exports__AutoModel as AutoModel, __webpack_exports__AutoModelForAudioClassification as AutoModelForAudioClassification, __webpack_exports__AutoModelForAudioFrameClassification as AutoModelForAudioFrameClassification, __webpack_exports__AutoModelForCTC as AutoModelForCTC, __webpack_exports__AutoModelForCausalLM as AutoModelForCausalLM, __webpack_exports__AutoModelForDepthEstimation as AutoModelForDepthEstimation, __webpack_exports__AutoModelForDocumentQuestionAnswering as AutoModelForDocumentQuestionAnswering, __webpack_exports__AutoModelForImageClassification as AutoModelForImageClassification, __webpack_exports__AutoModelForImageFeatureExtraction as AutoModelForImageFeatureExtraction, __webpack_exports__AutoModelForImageMatting as AutoModelForImageMatting, __webpack_exports__AutoModelForImageSegmentation as AutoModelForImageSegmentation, __webpack_exports__AutoModelForImageToImage as AutoModelForImageToImage, __webpack_exports__AutoModelForMaskGeneration as AutoModelForMaskGeneration, __webpack_exports__AutoModelForMaskedLM as AutoModelForMaskedLM, __webpack_exports__AutoModelForNormalEstimation as AutoModelForNormalEstimation, __webpack_exports__AutoModelForObjectDetection as AutoModelForObjectDetection, __webpack_exports__AutoModelForQuestionAnswering as AutoModelForQuestionAnswering, __webpack_exports__AutoModelForSemanticSegmentation as AutoModelForSemanticSegmentation, __webpack_exports__AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLM, __webpack_exports__AutoModelForSequenceClassification as AutoModelForSequenceClassification, __webpack_exports__AutoModelForSpeechSeq2Seq as AutoModelForSpeechSeq2Seq, __webpack_exports__AutoModelForTextToSpectrogram as AutoModelForTextToSpectrogram, __webpack_exports__AutoModelForTextToWaveform as AutoModelForTextToWaveform, __webpack_exports__AutoModelForTokenClassification as AutoModelForTokenClassification, __webpack_exports__AutoModelForUniversalSegmentation as AutoModelForUniversalSegmentation, __webpack_exports__AutoModelForVision2Seq as AutoModelForVision2Seq, __webpack_exports__AutoModelForXVector as AutoModelForXVector, __webpack_exports__AutoModelForZeroShotObjectDetection as AutoModelForZeroShotObjectDetection, __webpack_exports__AutoProcessor as AutoProcessor, __webpack_exports__AutoTokenizer as AutoTokenizer, __webpack_exports__AutomaticSpeechRecognitionPipeline as AutomaticSpeechRecognitionPipeline, __webpack_exports__BartForConditionalGeneration as BartForConditionalGeneration, __webpack_exports__BartForSequenceClassification as BartForSequenceClassification, __webpack_exports__BartModel as BartModel, __webpack_exports__BartPretrainedModel as BartPretrainedModel, __webpack_exports__BartTokenizer as BartTokenizer, __webpack_exports__BaseModelOutput as BaseModelOutput, __webpack_exports__BaseStreamer as BaseStreamer, __webpack_exports__BeitFeatureExtractor as BeitFeatureExtractor, __webpack_exports__BeitForImageClassification as BeitForImageClassification, __webpack_exports__BeitModel as BeitModel, __webpack_exports__BeitPreTrainedModel as BeitPreTrainedModel, __webpack_exports__BertForMaskedLM as BertForMaskedLM, __webpack_exports__BertForQuestionAnswering as BertForQuestionAnswering, __webpack_exports__BertForSequenceClassification as BertForSequenceClassification, __webpack_exports__BertForTokenClassification as BertForTokenClassification, __webpack_exports__BertModel as BertModel, __webpack_exports__BertPreTrainedModel as BertPreTrainedModel, __webpack_exports__BertTokenizer as BertTokenizer, __webpack_exports__BitImageProcessor as BitImageProcessor, __webpack_exports__BlenderbotForConditionalGeneration as BlenderbotForConditionalGeneration, __webpack_exports__BlenderbotModel as BlenderbotModel, __webpack_exports__BlenderbotPreTrainedModel as BlenderbotPreTrainedModel, __webpack_exports__BlenderbotSmallForConditionalGeneration as BlenderbotSmallForConditionalGeneration, __webpack_exports__BlenderbotSmallModel as BlenderbotSmallModel, __webpack_exports__BlenderbotSmallPreTrainedModel as BlenderbotSmallPreTrainedModel, __webpack_exports__BlenderbotSmallTokenizer as BlenderbotSmallTokenizer, __webpack_exports__BlenderbotTokenizer as BlenderbotTokenizer, __webpack_exports__BloomForCausalLM as BloomForCausalLM, __webpack_exports__BloomModel as BloomModel, __webpack_exports__BloomPreTrainedModel as BloomPreTrainedModel, __webpack_exports__BloomTokenizer as BloomTokenizer, __webpack_exports__CLIPFeatureExtractor as CLIPFeatureExtractor, __webpack_exports__CLIPImageProcessor as CLIPImageProcessor, __webpack_exports__CLIPModel as CLIPModel, __webpack_exports__CLIPPreTrainedModel as CLIPPreTrainedModel, __webpack_exports__CLIPSegForImageSegmentation as CLIPSegForImageSegmentation, __webpack_exports__CLIPSegModel as CLIPSegModel, __webpack_exports__CLIPSegPreTrainedModel as CLIPSegPreTrainedModel, __webpack_exports__CLIPTextModel as CLIPTextModel, __webpack_exports__CLIPTextModelWithProjection as CLIPTextModelWithProjection, __webpack_exports__CLIPTokenizer as CLIPTokenizer, __webpack_exports__CLIPVisionModel as CLIPVisionModel, __webpack_exports__CLIPVisionModelWithProjection as CLIPVisionModelWithProjection, __webpack_exports__CamembertForMaskedLM as CamembertForMaskedLM, __webpack_exports__CamembertForQuestionAnswering as CamembertForQuestionAnswering, __webpack_exports__CamembertForSequenceClassification as CamembertForSequenceClassification, __webpack_exports__CamembertForTokenClassification as CamembertForTokenClassification, __webpack_exports__CamembertModel as CamembertModel, __webpack_exports__CamembertPreTrainedModel as CamembertPreTrainedModel, __webpack_exports__CamembertTokenizer as CamembertTokenizer, __webpack_exports__CausalLMOutput as CausalLMOutput, __webpack_exports__CausalLMOutputWithPast as CausalLMOutputWithPast, __webpack_exports__ChineseCLIPFeatureExtractor as ChineseCLIPFeatureExtractor, __webpack_exports__ChineseCLIPModel as ChineseCLIPModel, __webpack_exports__ChineseCLIPPreTrainedModel as ChineseCLIPPreTrainedModel, __webpack_exports__ClapAudioModelWithProjection as ClapAudioModelWithProjection, __webpack_exports__ClapFeatureExtractor as ClapFeatureExtractor, __webpack_exports__ClapModel as ClapModel, __webpack_exports__ClapPreTrainedModel as ClapPreTrainedModel, __webpack_exports__ClapTextModelWithProjection as ClapTextModelWithProjection, __webpack_exports__CodeGenForCausalLM as CodeGenForCausalLM, __webpack_exports__CodeGenModel as CodeGenModel, __webpack_exports__CodeGenPreTrainedModel as CodeGenPreTrainedModel, __webpack_exports__CodeGenTokenizer as CodeGenTokenizer, __webpack_exports__CodeLlamaTokenizer as CodeLlamaTokenizer, __webpack_exports__CohereForCausalLM as CohereForCausalLM, __webpack_exports__CohereModel as CohereModel, __webpack_exports__CoherePreTrainedModel as CoherePreTrainedModel, __webpack_exports__CohereTokenizer as CohereTokenizer, __webpack_exports__ConvBertForMaskedLM as ConvBertForMaskedLM, __webpack_exports__ConvBertForQuestionAnswering as ConvBertForQuestionAnswering, __webpack_exports__ConvBertForSequenceClassification as ConvBertForSequenceClassification, __webpack_exports__ConvBertForTokenClassification as ConvBertForTokenClassification, __webpack_exports__ConvBertModel as ConvBertModel, __webpack_exports__ConvBertPreTrainedModel as ConvBertPreTrainedModel, __webpack_exports__ConvBertTokenizer as ConvBertTokenizer, __webpack_exports__ConvNextFeatureExtractor as ConvNextFeatureExtractor, __webpack_exports__ConvNextForImageClassification as ConvNextForImageClassification, __webpack_exports__ConvNextImageProcessor as ConvNextImageProcessor, __webpack_exports__ConvNextModel as ConvNextModel, __webpack_exports__ConvNextPreTrainedModel as ConvNextPreTrainedModel, __webpack_exports__ConvNextV2ForImageClassification as ConvNextV2ForImageClassification, __webpack_exports__ConvNextV2Model as ConvNextV2Model, __webpack_exports__ConvNextV2PreTrainedModel as ConvNextV2PreTrainedModel, __webpack_exports__DPTFeatureExtractor as DPTFeatureExtractor, __webpack_exports__DPTForDepthEstimation as DPTForDepthEstimation, __webpack_exports__DPTImageProcessor as DPTImageProcessor, __webpack_exports__DPTModel as DPTModel, __webpack_exports__DPTPreTrainedModel as DPTPreTrainedModel, __webpack_exports__DebertaForMaskedLM as DebertaForMaskedLM, __webpack_exports__DebertaForQuestionAnswering as DebertaForQuestionAnswering, __webpack_exports__DebertaForSequenceClassification as DebertaForSequenceClassification, __webpack_exports__DebertaForTokenClassification as DebertaForTokenClassification, __webpack_exports__DebertaModel as DebertaModel, __webpack_exports__DebertaPreTrainedModel as DebertaPreTrainedModel, __webpack_exports__DebertaTokenizer as DebertaTokenizer, __webpack_exports__DebertaV2ForMaskedLM as DebertaV2ForMaskedLM, __webpack_exports__DebertaV2ForQuestionAnswering as DebertaV2ForQuestionAnswering, __webpack_exports__DebertaV2ForSequenceClassification as DebertaV2ForSequenceClassification, __webpack_exports__DebertaV2ForTokenClassification as DebertaV2ForTokenClassification, __webpack_exports__DebertaV2Model as DebertaV2Model, __webpack_exports__DebertaV2PreTrainedModel as DebertaV2PreTrainedModel, __webpack_exports__DebertaV2Tokenizer as DebertaV2Tokenizer, __webpack_exports__DecisionTransformerModel as DecisionTransformerModel, __webpack_exports__DecisionTransformerPreTrainedModel as DecisionTransformerPreTrainedModel, __webpack_exports__DeiTFeatureExtractor as DeiTFeatureExtractor, __webpack_exports__DeiTForImageClassification as DeiTForImageClassification, __webpack_exports__DeiTModel as DeiTModel, __webpack_exports__DeiTPreTrainedModel as DeiTPreTrainedModel, __webpack_exports__DepthAnythingForDepthEstimation as DepthAnythingForDepthEstimation, __webpack_exports__DepthAnythingPreTrainedModel as DepthAnythingPreTrainedModel, __webpack_exports__DepthEstimationPipeline as DepthEstimationPipeline, __webpack_exports__DetrFeatureExtractor as DetrFeatureExtractor, __webpack_exports__DetrForObjectDetection as DetrForObjectDetection, __webpack_exports__DetrForSegmentation as DetrForSegmentation, __webpack_exports__DetrModel as DetrModel, __webpack_exports__DetrObjectDetectionOutput as DetrObjectDetectionOutput, __webpack_exports__DetrPreTrainedModel as DetrPreTrainedModel, __webpack_exports__DetrSegmentationOutput as DetrSegmentationOutput, __webpack_exports__Dinov2ForImageClassification as Dinov2ForImageClassification, __webpack_exports__Dinov2Model as Dinov2Model, __webpack_exports__Dinov2PreTrainedModel as Dinov2PreTrainedModel, __webpack_exports__DistilBertForMaskedLM as DistilBertForMaskedLM, __webpack_exports__DistilBertForQuestionAnswering as DistilBertForQuestionAnswering, __webpack_exports__DistilBertForSequenceClassification as DistilBertForSequenceClassification, __webpack_exports__DistilBertForTokenClassification as DistilBertForTokenClassification, __webpack_exports__DistilBertModel as DistilBertModel, __webpack_exports__DistilBertPreTrainedModel as DistilBertPreTrainedModel, __webpack_exports__DistilBertTokenizer as DistilBertTokenizer, __webpack_exports__DocumentQuestionAnsweringPipeline as DocumentQuestionAnsweringPipeline, __webpack_exports__DonutFeatureExtractor as DonutFeatureExtractor, __webpack_exports__DonutSwinModel as DonutSwinModel, __webpack_exports__DonutSwinPreTrainedModel as DonutSwinPreTrainedModel, __webpack_exports__EfficientNetForImageClassification as EfficientNetForImageClassification, __webpack_exports__EfficientNetImageProcessor as EfficientNetImageProcessor, __webpack_exports__EfficientNetModel as EfficientNetModel, __webpack_exports__EfficientNetPreTrainedModel as EfficientNetPreTrainedModel, __webpack_exports__ElectraForMaskedLM as ElectraForMaskedLM, __webpack_exports__ElectraForQuestionAnswering as ElectraForQuestionAnswering, __webpack_exports__ElectraForSequenceClassification as ElectraForSequenceClassification, __webpack_exports__ElectraForTokenClassification as ElectraForTokenClassification, __webpack_exports__ElectraModel as ElectraModel, __webpack_exports__ElectraPreTrainedModel as ElectraPreTrainedModel, __webpack_exports__ElectraTokenizer as ElectraTokenizer, __webpack_exports__EosTokenCriteria as EosTokenCriteria, __webpack_exports__EsmForMaskedLM as EsmForMaskedLM, __webpack_exports__EsmForSequenceClassification as EsmForSequenceClassification, __webpack_exports__EsmForTokenClassification as EsmForTokenClassification, __webpack_exports__EsmModel as EsmModel, __webpack_exports__EsmPreTrainedModel as EsmPreTrainedModel, __webpack_exports__EsmTokenizer as EsmTokenizer, __webpack_exports__FFT as FFT, __webpack_exports__FalconForCausalLM as FalconForCausalLM, __webpack_exports__FalconModel as FalconModel, __webpack_exports__FalconPreTrainedModel as FalconPreTrainedModel, __webpack_exports__FalconTokenizer as FalconTokenizer, __webpack_exports__FastViTForImageClassification as FastViTForImageClassification, __webpack_exports__FastViTModel as FastViTModel, __webpack_exports__FastViTPreTrainedModel as FastViTPreTrainedModel, __webpack_exports__FeatureExtractionPipeline as FeatureExtractionPipeline, __webpack_exports__FeatureExtractor as FeatureExtractor, __webpack_exports__FillMaskPipeline as FillMaskPipeline, __webpack_exports__Florence2ForConditionalGeneration as Florence2ForConditionalGeneration, __webpack_exports__Florence2PreTrainedModel as Florence2PreTrainedModel, __webpack_exports__Florence2Processor as Florence2Processor, __webpack_exports__GLPNFeatureExtractor as GLPNFeatureExtractor, __webpack_exports__GLPNForDepthEstimation as GLPNForDepthEstimation, __webpack_exports__GLPNModel as GLPNModel, __webpack_exports__GLPNPreTrainedModel as GLPNPreTrainedModel, __webpack_exports__GPT2LMHeadModel as GPT2LMHeadModel, __webpack_exports__GPT2Model as GPT2Model, __webpack_exports__GPT2PreTrainedModel as GPT2PreTrainedModel, __webpack_exports__GPT2Tokenizer as GPT2Tokenizer, __webpack_exports__GPTBigCodeForCausalLM as GPTBigCodeForCausalLM, __webpack_exports__GPTBigCodeModel as GPTBigCodeModel, __webpack_exports__GPTBigCodePreTrainedModel as GPTBigCodePreTrainedModel, __webpack_exports__GPTJForCausalLM as GPTJForCausalLM, __webpack_exports__GPTJModel as GPTJModel, __webpack_exports__GPTJPreTrainedModel as GPTJPreTrainedModel, __webpack_exports__GPTNeoForCausalLM as GPTNeoForCausalLM, __webpack_exports__GPTNeoModel as GPTNeoModel, __webpack_exports__GPTNeoPreTrainedModel as GPTNeoPreTrainedModel, __webpack_exports__GPTNeoXForCausalLM as GPTNeoXForCausalLM, __webpack_exports__GPTNeoXModel as GPTNeoXModel, __webpack_exports__GPTNeoXPreTrainedModel as GPTNeoXPreTrainedModel, __webpack_exports__GPTNeoXTokenizer as GPTNeoXTokenizer, __webpack_exports__Gemma2ForCausalLM as Gemma2ForCausalLM, __webpack_exports__Gemma2Model as Gemma2Model, __webpack_exports__Gemma2PreTrainedModel as Gemma2PreTrainedModel, __webpack_exports__GemmaForCausalLM as GemmaForCausalLM, __webpack_exports__GemmaModel as GemmaModel, __webpack_exports__GemmaPreTrainedModel as GemmaPreTrainedModel, __webpack_exports__GemmaTokenizer as GemmaTokenizer, __webpack_exports__Grok1Tokenizer as Grok1Tokenizer, __webpack_exports__GroupViTModel as GroupViTModel, __webpack_exports__GroupViTPreTrainedModel as GroupViTPreTrainedModel, __webpack_exports__HerbertTokenizer as HerbertTokenizer, __webpack_exports__HieraForImageClassification as HieraForImageClassification, __webpack_exports__HieraModel as HieraModel, __webpack_exports__HieraPreTrainedModel as HieraPreTrainedModel, __webpack_exports__HubertForCTC as HubertForCTC, __webpack_exports__HubertForSequenceClassification as HubertForSequenceClassification, __webpack_exports__HubertModel as HubertModel, __webpack_exports__HubertPreTrainedModel as HubertPreTrainedModel, __webpack_exports__ImageClassificationPipeline as ImageClassificationPipeline, __webpack_exports__ImageFeatureExtractionPipeline as ImageFeatureExtractionPipeline, __webpack_exports__ImageFeatureExtractor as ImageFeatureExtractor, __webpack_exports__ImageMattingOutput as ImageMattingOutput, __webpack_exports__ImageSegmentationPipeline as ImageSegmentationPipeline, __webpack_exports__ImageToImagePipeline as ImageToImagePipeline, __webpack_exports__ImageToTextPipeline as ImageToTextPipeline, __webpack_exports__InterruptableStoppingCriteria as InterruptableStoppingCriteria, __webpack_exports__JAISLMHeadModel as JAISLMHeadModel, __webpack_exports__JAISModel as JAISModel, __webpack_exports__JAISPreTrainedModel as JAISPreTrainedModel, __webpack_exports__LlamaForCausalLM as LlamaForCausalLM, __webpack_exports__LlamaModel as LlamaModel, __webpack_exports__LlamaPreTrainedModel as LlamaPreTrainedModel, __webpack_exports__LlamaTokenizer as LlamaTokenizer, __webpack_exports__LlavaForConditionalGeneration as LlavaForConditionalGeneration, __webpack_exports__LlavaPreTrainedModel as LlavaPreTrainedModel, __webpack_exports__LongT5ForConditionalGeneration as LongT5ForConditionalGeneration, __webpack_exports__LongT5Model as LongT5Model, __webpack_exports__LongT5PreTrainedModel as LongT5PreTrainedModel, __webpack_exports__M2M100ForConditionalGeneration as M2M100ForConditionalGeneration, __webpack_exports__M2M100Model as M2M100Model, __webpack_exports__M2M100PreTrainedModel as M2M100PreTrainedModel, __webpack_exports__M2M100Tokenizer as M2M100Tokenizer, __webpack_exports__MBart50Tokenizer as MBart50Tokenizer, __webpack_exports__MBartForCausalLM as MBartForCausalLM, __webpack_exports__MBartForConditionalGeneration as MBartForConditionalGeneration, __webpack_exports__MBartForSequenceClassification as MBartForSequenceClassification, __webpack_exports__MBartModel as MBartModel, __webpack_exports__MBartPreTrainedModel as MBartPreTrainedModel, __webpack_exports__MBartTokenizer as MBartTokenizer, __webpack_exports__MPNetForMaskedLM as MPNetForMaskedLM, __webpack_exports__MPNetForQuestionAnswering as MPNetForQuestionAnswering, __webpack_exports__MPNetForSequenceClassification as MPNetForSequenceClassification, __webpack_exports__MPNetForTokenClassification as MPNetForTokenClassification, __webpack_exports__MPNetModel as MPNetModel, __webpack_exports__MPNetPreTrainedModel as MPNetPreTrainedModel, __webpack_exports__MPNetTokenizer as MPNetTokenizer, __webpack_exports__MT5ForConditionalGeneration as MT5ForConditionalGeneration, __webpack_exports__MT5Model as MT5Model, __webpack_exports__MT5PreTrainedModel as MT5PreTrainedModel, __webpack_exports__MarianMTModel as MarianMTModel, __webpack_exports__MarianModel as MarianModel, __webpack_exports__MarianPreTrainedModel as MarianPreTrainedModel, __webpack_exports__MarianTokenizer as MarianTokenizer, __webpack_exports__MaskFormerFeatureExtractor as MaskFormerFeatureExtractor, __webpack_exports__MaskFormerForInstanceSegmentation as MaskFormerForInstanceSegmentation, __webpack_exports__MaskFormerModel as MaskFormerModel, __webpack_exports__MaskFormerPreTrainedModel as MaskFormerPreTrainedModel, __webpack_exports__MaskedLMOutput as MaskedLMOutput, __webpack_exports__MaxLengthCriteria as MaxLengthCriteria, __webpack_exports__MistralForCausalLM as MistralForCausalLM, __webpack_exports__MistralModel as MistralModel, __webpack_exports__MistralPreTrainedModel as MistralPreTrainedModel, __webpack_exports__MobileBertForMaskedLM as MobileBertForMaskedLM, __webpack_exports__MobileBertForQuestionAnswering as MobileBertForQuestionAnswering, __webpack_exports__MobileBertForSequenceClassification as MobileBertForSequenceClassification, __webpack_exports__MobileBertModel as MobileBertModel, __webpack_exports__MobileBertPreTrainedModel as MobileBertPreTrainedModel, __webpack_exports__MobileBertTokenizer as MobileBertTokenizer, __webpack_exports__MobileNetV1FeatureExtractor as MobileNetV1FeatureExtractor, __webpack_exports__MobileNetV1ForImageClassification as MobileNetV1ForImageClassification, __webpack_exports__MobileNetV1Model as MobileNetV1Model, __webpack_exports__MobileNetV1PreTrainedModel as MobileNetV1PreTrainedModel, __webpack_exports__MobileNetV2FeatureExtractor as MobileNetV2FeatureExtractor, __webpack_exports__MobileNetV2ForImageClassification as MobileNetV2ForImageClassification, __webpack_exports__MobileNetV2Model as MobileNetV2Model, __webpack_exports__MobileNetV2PreTrainedModel as MobileNetV2PreTrainedModel, __webpack_exports__MobileNetV3FeatureExtractor as MobileNetV3FeatureExtractor, __webpack_exports__MobileNetV3ForImageClassification as MobileNetV3ForImageClassification, __webpack_exports__MobileNetV3Model as MobileNetV3Model, __webpack_exports__MobileNetV3PreTrainedModel as MobileNetV3PreTrainedModel, __webpack_exports__MobileNetV4FeatureExtractor as MobileNetV4FeatureExtractor, __webpack_exports__MobileNetV4ForImageClassification as MobileNetV4ForImageClassification, __webpack_exports__MobileNetV4Model as MobileNetV4Model, __webpack_exports__MobileNetV4PreTrainedModel as MobileNetV4PreTrainedModel, __webpack_exports__MobileViTFeatureExtractor as MobileViTFeatureExtractor, __webpack_exports__MobileViTForImageClassification as MobileViTForImageClassification, __webpack_exports__MobileViTImageProcessor as MobileViTImageProcessor, __webpack_exports__MobileViTModel as MobileViTModel, __webpack_exports__MobileViTPreTrainedModel as MobileViTPreTrainedModel, __webpack_exports__MobileViTV2ForImageClassification as MobileViTV2ForImageClassification, __webpack_exports__MobileViTV2Model as MobileViTV2Model, __webpack_exports__MobileViTV2PreTrainedModel as MobileViTV2PreTrainedModel, __webpack_exports__ModelOutput as ModelOutput, __webpack_exports__Moondream1ForConditionalGeneration as Moondream1ForConditionalGeneration, __webpack_exports__MptForCausalLM as MptForCausalLM, __webpack_exports__MptModel as MptModel, __webpack_exports__MptPreTrainedModel as MptPreTrainedModel, __webpack_exports__MusicgenForCausalLM as MusicgenForCausalLM, __webpack_exports__MusicgenForConditionalGeneration as MusicgenForConditionalGeneration, __webpack_exports__MusicgenModel as MusicgenModel, __webpack_exports__MusicgenPreTrainedModel as MusicgenPreTrainedModel, __webpack_exports__NllbTokenizer as NllbTokenizer, __webpack_exports__NomicBertModel as NomicBertModel, __webpack_exports__NomicBertPreTrainedModel as NomicBertPreTrainedModel, __webpack_exports__NougatImageProcessor as NougatImageProcessor, __webpack_exports__NougatTokenizer as NougatTokenizer, __webpack_exports__OPTForCausalLM as OPTForCausalLM, __webpack_exports__OPTModel as OPTModel, __webpack_exports__OPTPreTrainedModel as OPTPreTrainedModel, __webpack_exports__ObjectDetectionPipeline as ObjectDetectionPipeline, __webpack_exports__OpenELMForCausalLM as OpenELMForCausalLM, __webpack_exports__OpenELMModel as OpenELMModel, __webpack_exports__OpenELMPreTrainedModel as OpenELMPreTrainedModel, __webpack_exports__OwlViTFeatureExtractor as OwlViTFeatureExtractor, __webpack_exports__OwlViTForObjectDetection as OwlViTForObjectDetection, __webpack_exports__OwlViTModel as OwlViTModel, __webpack_exports__OwlViTPreTrainedModel as OwlViTPreTrainedModel, __webpack_exports__OwlViTProcessor as OwlViTProcessor, __webpack_exports__Owlv2ForObjectDetection as Owlv2ForObjectDetection, __webpack_exports__Owlv2ImageProcessor as Owlv2ImageProcessor, __webpack_exports__Owlv2Model as Owlv2Model, __webpack_exports__Owlv2PreTrainedModel as Owlv2PreTrainedModel, __webpack_exports__Phi3ForCausalLM as Phi3ForCausalLM, __webpack_exports__Phi3Model as Phi3Model, __webpack_exports__Phi3PreTrainedModel as Phi3PreTrainedModel, __webpack_exports__PhiForCausalLM as PhiForCausalLM, __webpack_exports__PhiModel as PhiModel, __webpack_exports__PhiPreTrainedModel as PhiPreTrainedModel, __webpack_exports__Pipeline as Pipeline, __webpack_exports__PreTrainedModel as PreTrainedModel, __webpack_exports__PreTrainedTokenizer as PreTrainedTokenizer, __webpack_exports__PretrainedConfig as PretrainedConfig, __webpack_exports__PretrainedMixin as PretrainedMixin, __webpack_exports__Processor as Processor, __webpack_exports__PvtForImageClassification as PvtForImageClassification, __webpack_exports__PvtImageProcessor as PvtImageProcessor, __webpack_exports__PvtModel as PvtModel, __webpack_exports__PvtPreTrainedModel as PvtPreTrainedModel, __webpack_exports__PyAnnoteFeatureExtractor as PyAnnoteFeatureExtractor, __webpack_exports__PyAnnoteForAudioFrameClassification as PyAnnoteForAudioFrameClassification, __webpack_exports__PyAnnoteModel as PyAnnoteModel, __webpack_exports__PyAnnotePreTrainedModel as PyAnnotePreTrainedModel, __webpack_exports__PyAnnoteProcessor as PyAnnoteProcessor, __webpack_exports__QuestionAnsweringModelOutput as QuestionAnsweringModelOutput, __webpack_exports__QuestionAnsweringPipeline as QuestionAnsweringPipeline, __webpack_exports__Qwen2ForCausalLM as Qwen2ForCausalLM, __webpack_exports__Qwen2Model as Qwen2Model, __webpack_exports__Qwen2PreTrainedModel as Qwen2PreTrainedModel, __webpack_exports__Qwen2Tokenizer as Qwen2Tokenizer, __webpack_exports__RTDetrForObjectDetection as RTDetrForObjectDetection, __webpack_exports__RTDetrImageProcessor as RTDetrImageProcessor, __webpack_exports__RTDetrModel as RTDetrModel, __webpack_exports__RTDetrObjectDetectionOutput as RTDetrObjectDetectionOutput, __webpack_exports__RTDetrPreTrainedModel as RTDetrPreTrainedModel, __webpack_exports__RawImage as RawImage, __webpack_exports__ResNetForImageClassification as ResNetForImageClassification, __webpack_exports__ResNetModel as ResNetModel, __webpack_exports__ResNetPreTrainedModel as ResNetPreTrainedModel, __webpack_exports__RoFormerForMaskedLM as RoFormerForMaskedLM, __webpack_exports__RoFormerForQuestionAnswering as RoFormerForQuestionAnswering, __webpack_exports__RoFormerForSequenceClassification as RoFormerForSequenceClassification, __webpack_exports__RoFormerForTokenClassification as RoFormerForTokenClassification, __webpack_exports__RoFormerModel as RoFormerModel, __webpack_exports__RoFormerPreTrainedModel as RoFormerPreTrainedModel, __webpack_exports__RoFormerTokenizer as RoFormerTokenizer, __webpack_exports__RobertaForMaskedLM as RobertaForMaskedLM, __webpack_exports__RobertaForQuestionAnswering as RobertaForQuestionAnswering, __webpack_exports__RobertaForSequenceClassification as RobertaForSequenceClassification, __webpack_exports__RobertaForTokenClassification as RobertaForTokenClassification, __webpack_exports__RobertaModel as RobertaModel, __webpack_exports__RobertaPreTrainedModel as RobertaPreTrainedModel, __webpack_exports__RobertaTokenizer as RobertaTokenizer, __webpack_exports__SamImageProcessor as SamImageProcessor, __webpack_exports__SamImageSegmentationOutput as SamImageSegmentationOutput, __webpack_exports__SamModel as SamModel, __webpack_exports__SamPreTrainedModel as SamPreTrainedModel, __webpack_exports__SamProcessor as SamProcessor, __webpack_exports__SapiensFeatureExtractor as SapiensFeatureExtractor, __webpack_exports__SapiensForDepthEstimation as SapiensForDepthEstimation, __webpack_exports__SapiensForNormalEstimation as SapiensForNormalEstimation, __webpack_exports__SapiensForSemanticSegmentation as SapiensForSemanticSegmentation, __webpack_exports__SapiensPreTrainedModel as SapiensPreTrainedModel, __webpack_exports__SeamlessM4TFeatureExtractor as SeamlessM4TFeatureExtractor, __webpack_exports__SegformerFeatureExtractor as SegformerFeatureExtractor, __webpack_exports__SegformerForImageClassification as SegformerForImageClassification, __webpack_exports__SegformerForSemanticSegmentation as SegformerForSemanticSegmentation, __webpack_exports__SegformerModel as SegformerModel, __webpack_exports__SegformerPreTrainedModel as SegformerPreTrainedModel, __webpack_exports__Seq2SeqLMOutput as Seq2SeqLMOutput, __webpack_exports__SequenceClassifierOutput as SequenceClassifierOutput, __webpack_exports__SiglipImageProcessor as SiglipImageProcessor, __webpack_exports__SiglipModel as SiglipModel, __webpack_exports__SiglipPreTrainedModel as SiglipPreTrainedModel, __webpack_exports__SiglipTextModel as SiglipTextModel, __webpack_exports__SiglipTokenizer as SiglipTokenizer, __webpack_exports__SiglipVisionModel as SiglipVisionModel, __webpack_exports__SpeechT5FeatureExtractor as SpeechT5FeatureExtractor, __webpack_exports__SpeechT5ForSpeechToText as SpeechT5ForSpeechToText, __webpack_exports__SpeechT5ForTextToSpeech as SpeechT5ForTextToSpeech, __webpack_exports__SpeechT5HifiGan as SpeechT5HifiGan, __webpack_exports__SpeechT5Model as SpeechT5Model, __webpack_exports__SpeechT5PreTrainedModel as SpeechT5PreTrainedModel, __webpack_exports__SpeechT5Processor as SpeechT5Processor, __webpack_exports__SpeechT5Tokenizer as SpeechT5Tokenizer, __webpack_exports__SqueezeBertForMaskedLM as SqueezeBertForMaskedLM, __webpack_exports__SqueezeBertForQuestionAnswering as SqueezeBertForQuestionAnswering, __webpack_exports__SqueezeBertForSequenceClassification as SqueezeBertForSequenceClassification, __webpack_exports__SqueezeBertModel as SqueezeBertModel, __webpack_exports__SqueezeBertPreTrainedModel as SqueezeBertPreTrainedModel, __webpack_exports__SqueezeBertTokenizer as SqueezeBertTokenizer, __webpack_exports__StableLmForCausalLM as StableLmForCausalLM, __webpack_exports__StableLmModel as StableLmModel, __webpack_exports__StableLmPreTrainedModel as StableLmPreTrainedModel, __webpack_exports__Starcoder2ForCausalLM as Starcoder2ForCausalLM, __webpack_exports__Starcoder2Model as Starcoder2Model, __webpack_exports__Starcoder2PreTrainedModel as Starcoder2PreTrainedModel, __webpack_exports__StoppingCriteria as StoppingCriteria, __webpack_exports__StoppingCriteriaList as StoppingCriteriaList, __webpack_exports__SummarizationPipeline as SummarizationPipeline, __webpack_exports__Swin2SRForImageSuperResolution as Swin2SRForImageSuperResolution, __webpack_exports__Swin2SRImageProcessor as Swin2SRImageProcessor, __webpack_exports__Swin2SRModel as Swin2SRModel, __webpack_exports__Swin2SRPreTrainedModel as Swin2SRPreTrainedModel, __webpack_exports__SwinForImageClassification as SwinForImageClassification, __webpack_exports__SwinModel as SwinModel, __webpack_exports__SwinPreTrainedModel as SwinPreTrainedModel, __webpack_exports__T5ForConditionalGeneration as T5ForConditionalGeneration, __webpack_exports__T5Model as T5Model, __webpack_exports__T5PreTrainedModel as T5PreTrainedModel, __webpack_exports__T5Tokenizer as T5Tokenizer, __webpack_exports__TableTransformerForObjectDetection as TableTransformerForObjectDetection, __webpack_exports__TableTransformerModel as TableTransformerModel, __webpack_exports__TableTransformerObjectDetectionOutput as TableTransformerObjectDetectionOutput, __webpack_exports__TableTransformerPreTrainedModel as TableTransformerPreTrainedModel, __webpack_exports__Tensor as Tensor, __webpack_exports__Text2TextGenerationPipeline as Text2TextGenerationPipeline, __webpack_exports__TextClassificationPipeline as TextClassificationPipeline, __webpack_exports__TextGenerationPipeline as TextGenerationPipeline, __webpack_exports__TextStreamer as TextStreamer, __webpack_exports__TextToAudioPipeline as TextToAudioPipeline, __webpack_exports__TokenClassificationPipeline as TokenClassificationPipeline, __webpack_exports__TokenClassifierOutput as TokenClassifierOutput, __webpack_exports__TokenizerModel as TokenizerModel, __webpack_exports__TrOCRForCausalLM as TrOCRForCausalLM, __webpack_exports__TrOCRPreTrainedModel as TrOCRPreTrainedModel, __webpack_exports__TranslationPipeline as TranslationPipeline, __webpack_exports__UniSpeechForCTC as UniSpeechForCTC, __webpack_exports__UniSpeechForSequenceClassification as UniSpeechForSequenceClassification, __webpack_exports__UniSpeechModel as UniSpeechModel, __webpack_exports__UniSpeechPreTrainedModel as UniSpeechPreTrainedModel, __webpack_exports__UniSpeechSatForAudioFrameClassification as UniSpeechSatForAudioFrameClassification, __webpack_exports__UniSpeechSatForCTC as UniSpeechSatForCTC, __webpack_exports__UniSpeechSatForSequenceClassification as UniSpeechSatForSequenceClassification, __webpack_exports__UniSpeechSatModel as UniSpeechSatModel, __webpack_exports__UniSpeechSatPreTrainedModel as UniSpeechSatPreTrainedModel, __webpack_exports__ViTFeatureExtractor as ViTFeatureExtractor, __webpack_exports__ViTForImageClassification as ViTForImageClassification, __webpack_exports__ViTImageProcessor as ViTImageProcessor, __webpack_exports__ViTMAEModel as ViTMAEModel, __webpack_exports__ViTMAEPreTrainedModel as ViTMAEPreTrainedModel, __webpack_exports__ViTMSNForImageClassification as ViTMSNForImageClassification, __webpack_exports__ViTMSNModel as ViTMSNModel, __webpack_exports__ViTMSNPreTrainedModel as ViTMSNPreTrainedModel, __webpack_exports__ViTModel as ViTModel, __webpack_exports__ViTPreTrainedModel as ViTPreTrainedModel, __webpack_exports__VisionEncoderDecoderModel as VisionEncoderDecoderModel, __webpack_exports__VitMatteForImageMatting as VitMatteForImageMatting, __webpack_exports__VitMatteImageProcessor as VitMatteImageProcessor, __webpack_exports__VitMattePreTrainedModel as VitMattePreTrainedModel, __webpack_exports__VitsModel as VitsModel, __webpack_exports__VitsModelOutput as VitsModelOutput, __webpack_exports__VitsPreTrainedModel as VitsPreTrainedModel, __webpack_exports__VitsTokenizer as VitsTokenizer, __webpack_exports__Wav2Vec2BertForCTC as Wav2Vec2BertForCTC, __webpack_exports__Wav2Vec2BertForSequenceClassification as Wav2Vec2BertForSequenceClassification, __webpack_exports__Wav2Vec2BertModel as Wav2Vec2BertModel, __webpack_exports__Wav2Vec2BertPreTrainedModel as Wav2Vec2BertPreTrainedModel, __webpack_exports__Wav2Vec2CTCTokenizer as Wav2Vec2CTCTokenizer, __webpack_exports__Wav2Vec2FeatureExtractor as Wav2Vec2FeatureExtractor, __webpack_exports__Wav2Vec2ForAudioFrameClassification as Wav2Vec2ForAudioFrameClassification, __webpack_exports__Wav2Vec2ForCTC as Wav2Vec2ForCTC, __webpack_exports__Wav2Vec2ForSequenceClassification as Wav2Vec2ForSequenceClassification, __webpack_exports__Wav2Vec2Model as Wav2Vec2Model, __webpack_exports__Wav2Vec2PreTrainedModel as Wav2Vec2PreTrainedModel, __webpack_exports__Wav2Vec2ProcessorWithLM as Wav2Vec2ProcessorWithLM, __webpack_exports__WavLMForAudioFrameClassification as WavLMForAudioFrameClassification, __webpack_exports__WavLMForCTC as WavLMForCTC, __webpack_exports__WavLMForSequenceClassification as WavLMForSequenceClassification, __webpack_exports__WavLMForXVector as WavLMForXVector, __webpack_exports__WavLMModel as WavLMModel, __webpack_exports__WavLMPreTrainedModel as WavLMPreTrainedModel, __webpack_exports__WeSpeakerFeatureExtractor as WeSpeakerFeatureExtractor, __webpack_exports__WeSpeakerResNetModel as WeSpeakerResNetModel, __webpack_exports__WeSpeakerResNetPreTrainedModel as WeSpeakerResNetPreTrainedModel, __webpack_exports__WhisperFeatureExtractor as WhisperFeatureExtractor, __webpack_exports__WhisperForConditionalGeneration as WhisperForConditionalGeneration, __webpack_exports__WhisperModel as WhisperModel, __webpack_exports__WhisperPreTrainedModel as WhisperPreTrainedModel, __webpack_exports__WhisperProcessor as WhisperProcessor, __webpack_exports__WhisperTextStreamer as WhisperTextStreamer, __webpack_exports__WhisperTokenizer as WhisperTokenizer, __webpack_exports__XLMForQuestionAnswering as XLMForQuestionAnswering, __webpack_exports__XLMForSequenceClassification as XLMForSequenceClassification, __webpack_exports__XLMForTokenClassification as XLMForTokenClassification, __webpack_exports__XLMModel as XLMModel, __webpack_exports__XLMPreTrainedModel as XLMPreTrainedModel, __webpack_exports__XLMRobertaForMaskedLM as XLMRobertaForMaskedLM, __webpack_exports__XLMRobertaForQuestionAnswering as XLMRobertaForQuestionAnswering, __webpack_exports__XLMRobertaForSequenceClassification as XLMRobertaForSequenceClassification, __webpack_exports__XLMRobertaForTokenClassification as XLMRobertaForTokenClassification, __webpack_exports__XLMRobertaModel as XLMRobertaModel, __webpack_exports__XLMRobertaPreTrainedModel as XLMRobertaPreTrainedModel, __webpack_exports__XLMRobertaTokenizer as XLMRobertaTokenizer, __webpack_exports__XLMTokenizer as XLMTokenizer, __webpack_exports__XLMWithLMHeadModel as XLMWithLMHeadModel, __webpack_exports__XVectorOutput as XVectorOutput, __webpack_exports__YolosFeatureExtractor as YolosFeatureExtractor, __webpack_exports__YolosForObjectDetection as YolosForObjectDetection, __webpack_exports__YolosModel as YolosModel, __webpack_exports__YolosObjectDetectionOutput as YolosObjectDetectionOutput, __webpack_exports__YolosPreTrainedModel as YolosPreTrainedModel, __webpack_exports__ZeroShotAudioClassificationPipeline as ZeroShotAudioClassificationPipeline, __webpack_exports__ZeroShotClassificationPipeline as ZeroShotClassificationPipeline, __webpack_exports__ZeroShotImageClassificationPipeline as ZeroShotImageClassificationPipeline, __webpack_exports__ZeroShotObjectDetectionPipeline as ZeroShotObjectDetectionPipeline, __webpack_exports__bankers_round as bankers_round, __webpack_exports__cat as cat, __webpack_exports__cos_sim as cos_sim, __webpack_exports__dot as dot, __webpack_exports__dynamic_time_warping as dynamic_time_warping, __webpack_exports__env as env, __webpack_exports__full as full, __webpack_exports__full_like as full_like, __webpack_exports__getKeyValueShapes as getKeyValueShapes, __webpack_exports__hamming as hamming, __webpack_exports__hanning as hanning, __webpack_exports__interpolate as interpolate, __webpack_exports__interpolate_4d as interpolate_4d, __webpack_exports__interpolate_data as interpolate_data, __webpack_exports__is_chinese_char as is_chinese_char, __webpack_exports__layer_norm as layer_norm, __webpack_exports__log_softmax as log_softmax, __webpack_exports__magnitude as magnitude, __webpack_exports__matmul as matmul, __webpack_exports__max as max, __webpack_exports__mean as mean, __webpack_exports__mean_pooling as mean_pooling, __webpack_exports__medianFilter as medianFilter, __webpack_exports__mel_filter_bank as mel_filter_bank, __webpack_exports__min as min, __webpack_exports__ones as ones, __webpack_exports__ones_like as ones_like, __webpack_exports__permute as permute, __webpack_exports__permute_data as permute_data, __webpack_exports__pipeline as pipeline, __webpack_exports__quantize_embeddings as quantize_embeddings, __webpack_exports__read_audio as read_audio, __webpack_exports__rfft as rfft, __webpack_exports__round as round, __webpack_exports__softmax as softmax, __webpack_exports__spectrogram as spectrogram, __webpack_exports__stack as stack, __webpack_exports__std_mean as std_mean, __webpack_exports__topk as topk, __webpack_exports__window_function as window_function, __webpack_exports__zeros as zeros, __webpack_exports__zeros_like as zeros_like };
|
|
31458
31713
|
|
|
31459
31714
|
//# sourceMappingURL=transformers.mjs.map
|