web-llm-runner 0.1.17 → 0.1.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.js +41 -536
- package/lib/index.js.map +1 -1
- package/lib/onnx_engine.d.ts.map +1 -1
- package/lib/wrapper/WebLLMWrapper.d.ts.map +1 -1
- package/package.json +1 -1
package/lib/index.js
CHANGED
|
@@ -12814,7 +12814,7 @@ class EnvImpl {
|
|
|
12814
12814
|
/**
|
|
12815
12815
|
* Represent a set of flags as a global singleton.
|
|
12816
12816
|
*/
|
|
12817
|
-
const env$
|
|
12817
|
+
const env$2 = new EnvImpl();
|
|
12818
12818
|
|
|
12819
12819
|
// Copyright (c) Microsoft Corporation. All rights reserved.
|
|
12820
12820
|
// Licensed under the MIT License.
|
|
@@ -13513,7 +13513,7 @@ var lib = /*#__PURE__*/Object.freeze({
|
|
|
13513
13513
|
__proto__: null,
|
|
13514
13514
|
InferenceSession: InferenceSession$1,
|
|
13515
13515
|
Tensor: Tensor$1,
|
|
13516
|
-
env: env$
|
|
13516
|
+
env: env$2,
|
|
13517
13517
|
registerBackend: registerBackend
|
|
13518
13518
|
});
|
|
13519
13519
|
|
|
@@ -13673,7 +13673,7 @@ if (onnx_env?.wasm) {
|
|
|
13673
13673
|
* @property {Object} customCache The custom cache to use. Defaults to `null`. Note: this must be an object which
|
|
13674
13674
|
* implements the `match` and `put` functions of the Web Cache API. For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache
|
|
13675
13675
|
*/
|
|
13676
|
-
const env$
|
|
13676
|
+
const env$1 = {
|
|
13677
13677
|
version: VERSION,
|
|
13678
13678
|
|
|
13679
13679
|
remoteHost: 'https://huggingface.co/',
|
|
@@ -13862,12 +13862,12 @@ function isValidUrl(string, protocols = null, validHosts = null) {
|
|
|
13862
13862
|
*/
|
|
13863
13863
|
async function getFile(urlOrPath) {
|
|
13864
13864
|
|
|
13865
|
-
if (env$
|
|
13865
|
+
if (env$1.useFS && !isValidUrl(urlOrPath, ['http:', 'https:', 'blob:'])) {
|
|
13866
13866
|
return new FileResponse(urlOrPath);
|
|
13867
13867
|
|
|
13868
13868
|
} else if (typeof process !== 'undefined' && process?.release?.name === 'node') {
|
|
13869
13869
|
const IS_CI = !!process.env?.TESTING_REMOTELY;
|
|
13870
|
-
const version = env$
|
|
13870
|
+
const version = env$1.version;
|
|
13871
13871
|
|
|
13872
13872
|
const headers = new Headers();
|
|
13873
13873
|
headers.set('User-Agent', `transformers.js/${version}; is_ci/${IS_CI};`);
|
|
@@ -14014,7 +14014,7 @@ async function tryCache(cache, ...names) {
|
|
|
14014
14014
|
*/
|
|
14015
14015
|
async function getModelFile(path_or_repo_id, filename, fatal = true, options = {}) {
|
|
14016
14016
|
|
|
14017
|
-
if (!env$
|
|
14017
|
+
if (!env$1.allowLocalModels) {
|
|
14018
14018
|
// User has disabled local models, so we just make sure other settings are correct.
|
|
14019
14019
|
|
|
14020
14020
|
if (options.local_files_only) {
|
|
@@ -14032,7 +14032,7 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
14032
14032
|
// First, check if the a caching backend is available
|
|
14033
14033
|
// If no caching mechanism available, will download the file every time
|
|
14034
14034
|
let cache;
|
|
14035
|
-
if (!cache && env$
|
|
14035
|
+
if (!cache && env$1.useBrowserCache) {
|
|
14036
14036
|
if (typeof caches === 'undefined') {
|
|
14037
14037
|
throw Error('Browser cache is not available in this environment.')
|
|
14038
14038
|
}
|
|
@@ -14048,21 +14048,21 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
14048
14048
|
}
|
|
14049
14049
|
}
|
|
14050
14050
|
|
|
14051
|
-
if (!cache && env$
|
|
14051
|
+
if (!cache && env$1.useFSCache) {
|
|
14052
14052
|
// TODO throw error if not available
|
|
14053
14053
|
|
|
14054
14054
|
// If `cache_dir` is not specified, use the default cache directory
|
|
14055
|
-
cache = new FileCache(options.cache_dir ?? env$
|
|
14055
|
+
cache = new FileCache(options.cache_dir ?? env$1.cacheDir);
|
|
14056
14056
|
}
|
|
14057
14057
|
|
|
14058
14058
|
const revision = options.revision ?? 'main';
|
|
14059
14059
|
|
|
14060
14060
|
let requestURL = pathJoin(path_or_repo_id, filename);
|
|
14061
|
-
let localPath = pathJoin(env$
|
|
14061
|
+
let localPath = pathJoin(env$1.localModelPath, requestURL);
|
|
14062
14062
|
|
|
14063
14063
|
let remoteURL = pathJoin(
|
|
14064
|
-
env$
|
|
14065
|
-
env$
|
|
14064
|
+
env$1.remoteHost,
|
|
14065
|
+
env$1.remotePathTemplate
|
|
14066
14066
|
.replaceAll('{model}', path_or_repo_id)
|
|
14067
14067
|
.replaceAll('{revision}', encodeURIComponent(revision)),
|
|
14068
14068
|
filename
|
|
@@ -14096,7 +14096,7 @@ async function getModelFile(path_or_repo_id, filename, fatal = true, options = {
|
|
|
14096
14096
|
if (response === undefined) {
|
|
14097
14097
|
// Caching not available, or file is not cached, so we perform the request
|
|
14098
14098
|
|
|
14099
|
-
if (env$
|
|
14099
|
+
if (env$1.allowLocalModels) {
|
|
14100
14100
|
// Accessing local models is enabled, so we try to get the file locally.
|
|
14101
14101
|
// If request is a valid HTTP URL, we skip the local file check. Otherwise, we try to get the file locally.
|
|
14102
14102
|
const isURL = isValidUrl(requestURL, ['http:', 'https:']);
|
|
@@ -23790,7 +23790,7 @@ class BeamSearchSampler extends Sampler {
|
|
|
23790
23790
|
}
|
|
23791
23791
|
}
|
|
23792
23792
|
|
|
23793
|
-
const { InferenceSession, Tensor: ONNXTensor, env
|
|
23793
|
+
const { InferenceSession, Tensor: ONNXTensor, env } = ONNX;
|
|
23794
23794
|
|
|
23795
23795
|
/** @typedef {import('onnxruntime-web').InferenceSession} InferenceSession */
|
|
23796
23796
|
|
|
@@ -23877,7 +23877,7 @@ function validateInputs(session, inputs) {
|
|
|
23877
23877
|
// NOTE: When `env.wasm.proxy is true` the tensor is moved across the Worker
|
|
23878
23878
|
// boundary, transferring ownership to the worker and invalidating the tensor.
|
|
23879
23879
|
// So, in this case, we simply sacrifice a clone for it.
|
|
23880
|
-
checkedInputs[inputName] = env
|
|
23880
|
+
checkedInputs[inputName] = env.wasm.proxy ? tensor.clone() : tensor;
|
|
23881
23881
|
}
|
|
23882
23882
|
if (missingInputs.length > 0) {
|
|
23883
23883
|
throw new Error(
|
|
@@ -30562,7 +30562,7 @@ class RawImage {
|
|
|
30562
30562
|
// Clean up: remove the anchor element from the DOM
|
|
30563
30563
|
downloadLink.remove();
|
|
30564
30564
|
|
|
30565
|
-
} else if (!env$
|
|
30565
|
+
} else if (!env$1.useFS) {
|
|
30566
30566
|
throw new Error('Unable to save the image because filesystem is disabled in this environment.')
|
|
30567
30567
|
|
|
30568
30568
|
} else {
|
|
@@ -36561,7 +36561,7 @@ const TASK_ALIASES = Object.freeze({
|
|
|
36561
36561
|
* @returns {Promise<AllTasks[T]>} A Pipeline object for the specified task.
|
|
36562
36562
|
* @throws {Error} If an unsupported pipeline is requested.
|
|
36563
36563
|
*/
|
|
36564
|
-
async function pipeline
|
|
36564
|
+
async function pipeline(
|
|
36565
36565
|
task,
|
|
36566
36566
|
model = null,
|
|
36567
36567
|
{
|
|
@@ -36679,511 +36679,6 @@ async function loadItems(mapping, model, pretrainedOptions) {
|
|
|
36679
36679
|
return result;
|
|
36680
36680
|
}
|
|
36681
36681
|
|
|
36682
|
-
/**
|
|
36683
|
-
* @file Entry point for the Transformers.js library. Only the exports from this file
|
|
36684
|
-
* are available to the end user, and are grouped as follows:
|
|
36685
|
-
*
|
|
36686
|
-
* 1. [Pipelines](./pipelines)
|
|
36687
|
-
* 2. [Environment variables](./env)
|
|
36688
|
-
* 3. [Models](./models)
|
|
36689
|
-
* 4. [Tokenizers](./tokenizers)
|
|
36690
|
-
* 5. [Processors](./processors)
|
|
36691
|
-
*
|
|
36692
|
-
* @module transformers
|
|
36693
|
-
*/
|
|
36694
|
-
|
|
36695
|
-
var Transformers = /*#__PURE__*/Object.freeze({
|
|
36696
|
-
__proto__: null,
|
|
36697
|
-
ASTFeatureExtractor: ASTFeatureExtractor,
|
|
36698
|
-
ASTForAudioClassification: ASTForAudioClassification,
|
|
36699
|
-
ASTModel: ASTModel,
|
|
36700
|
-
ASTPreTrainedModel: ASTPreTrainedModel,
|
|
36701
|
-
AlbertForMaskedLM: AlbertForMaskedLM,
|
|
36702
|
-
AlbertForQuestionAnswering: AlbertForQuestionAnswering,
|
|
36703
|
-
AlbertForSequenceClassification: AlbertForSequenceClassification,
|
|
36704
|
-
AlbertModel: AlbertModel,
|
|
36705
|
-
AlbertPreTrainedModel: AlbertPreTrainedModel,
|
|
36706
|
-
AlbertTokenizer: AlbertTokenizer,
|
|
36707
|
-
AudioClassificationPipeline: AudioClassificationPipeline,
|
|
36708
|
-
AutoConfig: AutoConfig,
|
|
36709
|
-
AutoModel: AutoModel,
|
|
36710
|
-
AutoModelForAudioClassification: AutoModelForAudioClassification,
|
|
36711
|
-
AutoModelForCTC: AutoModelForCTC,
|
|
36712
|
-
AutoModelForCausalLM: AutoModelForCausalLM,
|
|
36713
|
-
AutoModelForDepthEstimation: AutoModelForDepthEstimation,
|
|
36714
|
-
AutoModelForDocumentQuestionAnswering: AutoModelForDocumentQuestionAnswering,
|
|
36715
|
-
AutoModelForImageClassification: AutoModelForImageClassification,
|
|
36716
|
-
AutoModelForImageFeatureExtraction: AutoModelForImageFeatureExtraction,
|
|
36717
|
-
AutoModelForImageSegmentation: AutoModelForImageSegmentation,
|
|
36718
|
-
AutoModelForImageToImage: AutoModelForImageToImage,
|
|
36719
|
-
AutoModelForMaskedLM: AutoModelForMaskedLM,
|
|
36720
|
-
AutoModelForObjectDetection: AutoModelForObjectDetection,
|
|
36721
|
-
AutoModelForQuestionAnswering: AutoModelForQuestionAnswering,
|
|
36722
|
-
AutoModelForSemanticSegmentation: AutoModelForSemanticSegmentation,
|
|
36723
|
-
AutoModelForSeq2SeqLM: AutoModelForSeq2SeqLM,
|
|
36724
|
-
AutoModelForSequenceClassification: AutoModelForSequenceClassification,
|
|
36725
|
-
AutoModelForSpeechSeq2Seq: AutoModelForSpeechSeq2Seq,
|
|
36726
|
-
AutoModelForTextToSpectrogram: AutoModelForTextToSpectrogram,
|
|
36727
|
-
AutoModelForTextToWaveform: AutoModelForTextToWaveform,
|
|
36728
|
-
AutoModelForTokenClassification: AutoModelForTokenClassification,
|
|
36729
|
-
AutoModelForVision2Seq: AutoModelForVision2Seq,
|
|
36730
|
-
AutoModelForZeroShotObjectDetection: AutoModelForZeroShotObjectDetection,
|
|
36731
|
-
AutoProcessor: AutoProcessor,
|
|
36732
|
-
AutoTokenizer: AutoTokenizer,
|
|
36733
|
-
AutomaticSpeechRecognitionPipeline: AutomaticSpeechRecognitionPipeline,
|
|
36734
|
-
BartForConditionalGeneration: BartForConditionalGeneration,
|
|
36735
|
-
BartForSequenceClassification: BartForSequenceClassification,
|
|
36736
|
-
BartModel: BartModel,
|
|
36737
|
-
BartPretrainedModel: BartPretrainedModel,
|
|
36738
|
-
BartTokenizer: BartTokenizer,
|
|
36739
|
-
BeitFeatureExtractor: BeitFeatureExtractor,
|
|
36740
|
-
BeitForImageClassification: BeitForImageClassification,
|
|
36741
|
-
BeitModel: BeitModel,
|
|
36742
|
-
BeitPreTrainedModel: BeitPreTrainedModel,
|
|
36743
|
-
BertForMaskedLM: BertForMaskedLM,
|
|
36744
|
-
BertForQuestionAnswering: BertForQuestionAnswering,
|
|
36745
|
-
BertForSequenceClassification: BertForSequenceClassification,
|
|
36746
|
-
BertForTokenClassification: BertForTokenClassification,
|
|
36747
|
-
BertModel: BertModel,
|
|
36748
|
-
BertPreTrainedModel: BertPreTrainedModel,
|
|
36749
|
-
BertTokenizer: BertTokenizer,
|
|
36750
|
-
BitImageProcessor: BitImageProcessor,
|
|
36751
|
-
BlenderbotForConditionalGeneration: BlenderbotForConditionalGeneration,
|
|
36752
|
-
BlenderbotModel: BlenderbotModel,
|
|
36753
|
-
BlenderbotPreTrainedModel: BlenderbotPreTrainedModel,
|
|
36754
|
-
BlenderbotSmallForConditionalGeneration: BlenderbotSmallForConditionalGeneration,
|
|
36755
|
-
BlenderbotSmallModel: BlenderbotSmallModel,
|
|
36756
|
-
BlenderbotSmallPreTrainedModel: BlenderbotSmallPreTrainedModel,
|
|
36757
|
-
BlenderbotSmallTokenizer: BlenderbotSmallTokenizer,
|
|
36758
|
-
BlenderbotTokenizer: BlenderbotTokenizer,
|
|
36759
|
-
BloomForCausalLM: BloomForCausalLM,
|
|
36760
|
-
BloomModel: BloomModel,
|
|
36761
|
-
BloomPreTrainedModel: BloomPreTrainedModel,
|
|
36762
|
-
BloomTokenizer: BloomTokenizer,
|
|
36763
|
-
CLIPFeatureExtractor: CLIPFeatureExtractor,
|
|
36764
|
-
CLIPModel: CLIPModel,
|
|
36765
|
-
CLIPPreTrainedModel: CLIPPreTrainedModel,
|
|
36766
|
-
CLIPSegForImageSegmentation: CLIPSegForImageSegmentation,
|
|
36767
|
-
CLIPSegModel: CLIPSegModel,
|
|
36768
|
-
CLIPSegPreTrainedModel: CLIPSegPreTrainedModel,
|
|
36769
|
-
CLIPTextModelWithProjection: CLIPTextModelWithProjection,
|
|
36770
|
-
CLIPTokenizer: CLIPTokenizer,
|
|
36771
|
-
CLIPVisionModelWithProjection: CLIPVisionModelWithProjection,
|
|
36772
|
-
CamembertForMaskedLM: CamembertForMaskedLM,
|
|
36773
|
-
CamembertForQuestionAnswering: CamembertForQuestionAnswering,
|
|
36774
|
-
CamembertForSequenceClassification: CamembertForSequenceClassification,
|
|
36775
|
-
CamembertForTokenClassification: CamembertForTokenClassification,
|
|
36776
|
-
CamembertModel: CamembertModel,
|
|
36777
|
-
CamembertPreTrainedModel: CamembertPreTrainedModel,
|
|
36778
|
-
CamembertTokenizer: CamembertTokenizer,
|
|
36779
|
-
CausalLMOutput: CausalLMOutput,
|
|
36780
|
-
ChineseCLIPFeatureExtractor: ChineseCLIPFeatureExtractor,
|
|
36781
|
-
ChineseCLIPModel: ChineseCLIPModel,
|
|
36782
|
-
ChineseCLIPPreTrainedModel: ChineseCLIPPreTrainedModel,
|
|
36783
|
-
ClapAudioModelWithProjection: ClapAudioModelWithProjection,
|
|
36784
|
-
ClapFeatureExtractor: ClapFeatureExtractor,
|
|
36785
|
-
ClapModel: ClapModel,
|
|
36786
|
-
ClapPreTrainedModel: ClapPreTrainedModel,
|
|
36787
|
-
ClapTextModelWithProjection: ClapTextModelWithProjection,
|
|
36788
|
-
CodeGenForCausalLM: CodeGenForCausalLM,
|
|
36789
|
-
CodeGenModel: CodeGenModel,
|
|
36790
|
-
CodeGenPreTrainedModel: CodeGenPreTrainedModel,
|
|
36791
|
-
CodeGenTokenizer: CodeGenTokenizer,
|
|
36792
|
-
CodeLlamaTokenizer: CodeLlamaTokenizer,
|
|
36793
|
-
CohereTokenizer: CohereTokenizer,
|
|
36794
|
-
ConvBertForMaskedLM: ConvBertForMaskedLM,
|
|
36795
|
-
ConvBertForQuestionAnswering: ConvBertForQuestionAnswering,
|
|
36796
|
-
ConvBertForSequenceClassification: ConvBertForSequenceClassification,
|
|
36797
|
-
ConvBertForTokenClassification: ConvBertForTokenClassification,
|
|
36798
|
-
ConvBertModel: ConvBertModel,
|
|
36799
|
-
ConvBertPreTrainedModel: ConvBertPreTrainedModel,
|
|
36800
|
-
ConvBertTokenizer: ConvBertTokenizer,
|
|
36801
|
-
ConvNextFeatureExtractor: ConvNextFeatureExtractor,
|
|
36802
|
-
ConvNextForImageClassification: ConvNextForImageClassification,
|
|
36803
|
-
ConvNextImageProcessor: ConvNextImageProcessor,
|
|
36804
|
-
ConvNextModel: ConvNextModel,
|
|
36805
|
-
ConvNextPreTrainedModel: ConvNextPreTrainedModel,
|
|
36806
|
-
ConvNextV2ForImageClassification: ConvNextV2ForImageClassification,
|
|
36807
|
-
ConvNextV2Model: ConvNextV2Model,
|
|
36808
|
-
ConvNextV2PreTrainedModel: ConvNextV2PreTrainedModel,
|
|
36809
|
-
DPTFeatureExtractor: DPTFeatureExtractor,
|
|
36810
|
-
DPTForDepthEstimation: DPTForDepthEstimation,
|
|
36811
|
-
DPTImageProcessor: DPTImageProcessor,
|
|
36812
|
-
DPTModel: DPTModel,
|
|
36813
|
-
DPTPreTrainedModel: DPTPreTrainedModel,
|
|
36814
|
-
DebertaForMaskedLM: DebertaForMaskedLM,
|
|
36815
|
-
DebertaForQuestionAnswering: DebertaForQuestionAnswering,
|
|
36816
|
-
DebertaForSequenceClassification: DebertaForSequenceClassification,
|
|
36817
|
-
DebertaForTokenClassification: DebertaForTokenClassification,
|
|
36818
|
-
DebertaModel: DebertaModel,
|
|
36819
|
-
DebertaPreTrainedModel: DebertaPreTrainedModel,
|
|
36820
|
-
DebertaTokenizer: DebertaTokenizer,
|
|
36821
|
-
DebertaV2ForMaskedLM: DebertaV2ForMaskedLM,
|
|
36822
|
-
DebertaV2ForQuestionAnswering: DebertaV2ForQuestionAnswering,
|
|
36823
|
-
DebertaV2ForSequenceClassification: DebertaV2ForSequenceClassification,
|
|
36824
|
-
DebertaV2ForTokenClassification: DebertaV2ForTokenClassification,
|
|
36825
|
-
DebertaV2Model: DebertaV2Model,
|
|
36826
|
-
DebertaV2PreTrainedModel: DebertaV2PreTrainedModel,
|
|
36827
|
-
DebertaV2Tokenizer: DebertaV2Tokenizer,
|
|
36828
|
-
DeiTFeatureExtractor: DeiTFeatureExtractor,
|
|
36829
|
-
DeiTForImageClassification: DeiTForImageClassification,
|
|
36830
|
-
DeiTModel: DeiTModel,
|
|
36831
|
-
DeiTPreTrainedModel: DeiTPreTrainedModel,
|
|
36832
|
-
DepthAnythingForDepthEstimation: DepthAnythingForDepthEstimation,
|
|
36833
|
-
DepthAnythingPreTrainedModel: DepthAnythingPreTrainedModel,
|
|
36834
|
-
DepthEstimationPipeline: DepthEstimationPipeline,
|
|
36835
|
-
DetrFeatureExtractor: DetrFeatureExtractor,
|
|
36836
|
-
DetrForObjectDetection: DetrForObjectDetection,
|
|
36837
|
-
DetrForSegmentation: DetrForSegmentation,
|
|
36838
|
-
DetrModel: DetrModel,
|
|
36839
|
-
DetrObjectDetectionOutput: DetrObjectDetectionOutput,
|
|
36840
|
-
DetrPreTrainedModel: DetrPreTrainedModel,
|
|
36841
|
-
DetrSegmentationOutput: DetrSegmentationOutput,
|
|
36842
|
-
Dinov2ForImageClassification: Dinov2ForImageClassification,
|
|
36843
|
-
Dinov2Model: Dinov2Model,
|
|
36844
|
-
Dinov2PreTrainedModel: Dinov2PreTrainedModel,
|
|
36845
|
-
DistilBertForMaskedLM: DistilBertForMaskedLM,
|
|
36846
|
-
DistilBertForQuestionAnswering: DistilBertForQuestionAnswering,
|
|
36847
|
-
DistilBertForSequenceClassification: DistilBertForSequenceClassification,
|
|
36848
|
-
DistilBertForTokenClassification: DistilBertForTokenClassification,
|
|
36849
|
-
DistilBertModel: DistilBertModel,
|
|
36850
|
-
DistilBertPreTrainedModel: DistilBertPreTrainedModel,
|
|
36851
|
-
DistilBertTokenizer: DistilBertTokenizer,
|
|
36852
|
-
DocumentQuestionAnsweringPipeline: DocumentQuestionAnsweringPipeline,
|
|
36853
|
-
DonutFeatureExtractor: DonutFeatureExtractor,
|
|
36854
|
-
DonutSwinModel: DonutSwinModel,
|
|
36855
|
-
DonutSwinPreTrainedModel: DonutSwinPreTrainedModel,
|
|
36856
|
-
EfficientNetForImageClassification: EfficientNetForImageClassification,
|
|
36857
|
-
EfficientNetImageProcessor: EfficientNetImageProcessor,
|
|
36858
|
-
EfficientNetModel: EfficientNetModel,
|
|
36859
|
-
EfficientNetPreTrainedModel: EfficientNetPreTrainedModel,
|
|
36860
|
-
ElectraForMaskedLM: ElectraForMaskedLM,
|
|
36861
|
-
ElectraForQuestionAnswering: ElectraForQuestionAnswering,
|
|
36862
|
-
ElectraForSequenceClassification: ElectraForSequenceClassification,
|
|
36863
|
-
ElectraForTokenClassification: ElectraForTokenClassification,
|
|
36864
|
-
ElectraModel: ElectraModel,
|
|
36865
|
-
ElectraPreTrainedModel: ElectraPreTrainedModel,
|
|
36866
|
-
ElectraTokenizer: ElectraTokenizer,
|
|
36867
|
-
EsmForMaskedLM: EsmForMaskedLM,
|
|
36868
|
-
EsmForSequenceClassification: EsmForSequenceClassification,
|
|
36869
|
-
EsmForTokenClassification: EsmForTokenClassification,
|
|
36870
|
-
EsmModel: EsmModel,
|
|
36871
|
-
EsmPreTrainedModel: EsmPreTrainedModel,
|
|
36872
|
-
EsmTokenizer: EsmTokenizer,
|
|
36873
|
-
FFT: FFT,
|
|
36874
|
-
FalconForCausalLM: FalconForCausalLM,
|
|
36875
|
-
FalconModel: FalconModel,
|
|
36876
|
-
FalconPreTrainedModel: FalconPreTrainedModel,
|
|
36877
|
-
FalconTokenizer: FalconTokenizer,
|
|
36878
|
-
FastViTForImageClassification: FastViTForImageClassification,
|
|
36879
|
-
FastViTModel: FastViTModel,
|
|
36880
|
-
FastViTPreTrainedModel: FastViTPreTrainedModel,
|
|
36881
|
-
FeatureExtractionPipeline: FeatureExtractionPipeline,
|
|
36882
|
-
FeatureExtractor: FeatureExtractor,
|
|
36883
|
-
FillMaskPipeline: FillMaskPipeline,
|
|
36884
|
-
GLPNFeatureExtractor: GLPNFeatureExtractor,
|
|
36885
|
-
GLPNForDepthEstimation: GLPNForDepthEstimation,
|
|
36886
|
-
GLPNModel: GLPNModel,
|
|
36887
|
-
GLPNPreTrainedModel: GLPNPreTrainedModel,
|
|
36888
|
-
GPT2LMHeadModel: GPT2LMHeadModel,
|
|
36889
|
-
GPT2Model: GPT2Model,
|
|
36890
|
-
GPT2PreTrainedModel: GPT2PreTrainedModel,
|
|
36891
|
-
GPT2Tokenizer: GPT2Tokenizer,
|
|
36892
|
-
GPTBigCodeForCausalLM: GPTBigCodeForCausalLM,
|
|
36893
|
-
GPTBigCodeModel: GPTBigCodeModel,
|
|
36894
|
-
GPTBigCodePreTrainedModel: GPTBigCodePreTrainedModel,
|
|
36895
|
-
GPTJForCausalLM: GPTJForCausalLM,
|
|
36896
|
-
GPTJModel: GPTJModel,
|
|
36897
|
-
GPTJPreTrainedModel: GPTJPreTrainedModel,
|
|
36898
|
-
GPTNeoForCausalLM: GPTNeoForCausalLM,
|
|
36899
|
-
GPTNeoModel: GPTNeoModel,
|
|
36900
|
-
GPTNeoPreTrainedModel: GPTNeoPreTrainedModel,
|
|
36901
|
-
GPTNeoXForCausalLM: GPTNeoXForCausalLM,
|
|
36902
|
-
GPTNeoXModel: GPTNeoXModel,
|
|
36903
|
-
GPTNeoXPreTrainedModel: GPTNeoXPreTrainedModel,
|
|
36904
|
-
GPTNeoXTokenizer: GPTNeoXTokenizer,
|
|
36905
|
-
GemmaTokenizer: GemmaTokenizer,
|
|
36906
|
-
Grok1Tokenizer: Grok1Tokenizer,
|
|
36907
|
-
HerbertTokenizer: HerbertTokenizer,
|
|
36908
|
-
HubertForCTC: HubertForCTC,
|
|
36909
|
-
HubertForSequenceClassification: HubertForSequenceClassification,
|
|
36910
|
-
HubertModel: HubertModel,
|
|
36911
|
-
ImageClassificationPipeline: ImageClassificationPipeline,
|
|
36912
|
-
ImageFeatureExtractionPipeline: ImageFeatureExtractionPipeline,
|
|
36913
|
-
ImageFeatureExtractor: ImageFeatureExtractor,
|
|
36914
|
-
ImageMattingOutput: ImageMattingOutput,
|
|
36915
|
-
ImageSegmentationPipeline: ImageSegmentationPipeline,
|
|
36916
|
-
ImageToImagePipeline: ImageToImagePipeline,
|
|
36917
|
-
ImageToTextPipeline: ImageToTextPipeline,
|
|
36918
|
-
LlamaForCausalLM: LlamaForCausalLM,
|
|
36919
|
-
LlamaModel: LlamaModel,
|
|
36920
|
-
LlamaPreTrainedModel: LlamaPreTrainedModel,
|
|
36921
|
-
LlamaTokenizer: LlamaTokenizer,
|
|
36922
|
-
LongT5ForConditionalGeneration: LongT5ForConditionalGeneration,
|
|
36923
|
-
LongT5Model: LongT5Model,
|
|
36924
|
-
LongT5PreTrainedModel: LongT5PreTrainedModel,
|
|
36925
|
-
M2M100ForConditionalGeneration: M2M100ForConditionalGeneration,
|
|
36926
|
-
M2M100Model: M2M100Model,
|
|
36927
|
-
M2M100PreTrainedModel: M2M100PreTrainedModel,
|
|
36928
|
-
M2M100Tokenizer: M2M100Tokenizer,
|
|
36929
|
-
MBart50Tokenizer: MBart50Tokenizer,
|
|
36930
|
-
MBartForCausalLM: MBartForCausalLM,
|
|
36931
|
-
MBartForConditionalGeneration: MBartForConditionalGeneration,
|
|
36932
|
-
MBartForSequenceClassification: MBartForSequenceClassification,
|
|
36933
|
-
MBartModel: MBartModel,
|
|
36934
|
-
MBartPreTrainedModel: MBartPreTrainedModel,
|
|
36935
|
-
MBartTokenizer: MBartTokenizer,
|
|
36936
|
-
MPNetForMaskedLM: MPNetForMaskedLM,
|
|
36937
|
-
MPNetForQuestionAnswering: MPNetForQuestionAnswering,
|
|
36938
|
-
MPNetForSequenceClassification: MPNetForSequenceClassification,
|
|
36939
|
-
MPNetForTokenClassification: MPNetForTokenClassification,
|
|
36940
|
-
MPNetModel: MPNetModel,
|
|
36941
|
-
MPNetPreTrainedModel: MPNetPreTrainedModel,
|
|
36942
|
-
MPNetTokenizer: MPNetTokenizer,
|
|
36943
|
-
MT5ForConditionalGeneration: MT5ForConditionalGeneration,
|
|
36944
|
-
MT5Model: MT5Model,
|
|
36945
|
-
MT5PreTrainedModel: MT5PreTrainedModel,
|
|
36946
|
-
MarianMTModel: MarianMTModel,
|
|
36947
|
-
MarianModel: MarianModel,
|
|
36948
|
-
MarianPreTrainedModel: MarianPreTrainedModel,
|
|
36949
|
-
MarianTokenizer: MarianTokenizer,
|
|
36950
|
-
MaskedLMOutput: MaskedLMOutput,
|
|
36951
|
-
MistralForCausalLM: MistralForCausalLM,
|
|
36952
|
-
MistralModel: MistralModel,
|
|
36953
|
-
MistralPreTrainedModel: MistralPreTrainedModel,
|
|
36954
|
-
MobileBertForMaskedLM: MobileBertForMaskedLM,
|
|
36955
|
-
MobileBertForQuestionAnswering: MobileBertForQuestionAnswering,
|
|
36956
|
-
MobileBertForSequenceClassification: MobileBertForSequenceClassification,
|
|
36957
|
-
MobileBertModel: MobileBertModel,
|
|
36958
|
-
MobileBertPreTrainedModel: MobileBertPreTrainedModel,
|
|
36959
|
-
MobileBertTokenizer: MobileBertTokenizer,
|
|
36960
|
-
MobileViTFeatureExtractor: MobileViTFeatureExtractor,
|
|
36961
|
-
MobileViTForImageClassification: MobileViTForImageClassification,
|
|
36962
|
-
MobileViTImageProcessor: MobileViTImageProcessor,
|
|
36963
|
-
MobileViTModel: MobileViTModel,
|
|
36964
|
-
MobileViTPreTrainedModel: MobileViTPreTrainedModel,
|
|
36965
|
-
MobileViTV2ForImageClassification: MobileViTV2ForImageClassification,
|
|
36966
|
-
MobileViTV2Model: MobileViTV2Model,
|
|
36967
|
-
MobileViTV2PreTrainedModel: MobileViTV2PreTrainedModel,
|
|
36968
|
-
ModelOutput: ModelOutput,
|
|
36969
|
-
MptForCausalLM: MptForCausalLM,
|
|
36970
|
-
MptModel: MptModel,
|
|
36971
|
-
MptPreTrainedModel: MptPreTrainedModel,
|
|
36972
|
-
NllbTokenizer: NllbTokenizer,
|
|
36973
|
-
NomicBertModel: NomicBertModel,
|
|
36974
|
-
NomicBertPreTrainedModel: NomicBertPreTrainedModel,
|
|
36975
|
-
NougatImageProcessor: NougatImageProcessor,
|
|
36976
|
-
NougatTokenizer: NougatTokenizer,
|
|
36977
|
-
OPTForCausalLM: OPTForCausalLM,
|
|
36978
|
-
OPTModel: OPTModel,
|
|
36979
|
-
OPTPreTrainedModel: OPTPreTrainedModel,
|
|
36980
|
-
ObjectDetectionPipeline: ObjectDetectionPipeline,
|
|
36981
|
-
OwlViTFeatureExtractor: OwlViTFeatureExtractor,
|
|
36982
|
-
OwlViTForObjectDetection: OwlViTForObjectDetection,
|
|
36983
|
-
OwlViTModel: OwlViTModel,
|
|
36984
|
-
OwlViTPreTrainedModel: OwlViTPreTrainedModel,
|
|
36985
|
-
OwlViTProcessor: OwlViTProcessor,
|
|
36986
|
-
Owlv2ForObjectDetection: Owlv2ForObjectDetection,
|
|
36987
|
-
Owlv2ImageProcessor: Owlv2ImageProcessor,
|
|
36988
|
-
Owlv2Model: Owlv2Model,
|
|
36989
|
-
Owlv2PreTrainedModel: Owlv2PreTrainedModel,
|
|
36990
|
-
PhiForCausalLM: PhiForCausalLM,
|
|
36991
|
-
PhiModel: PhiModel,
|
|
36992
|
-
PhiPreTrainedModel: PhiPreTrainedModel,
|
|
36993
|
-
Pipeline: Pipeline,
|
|
36994
|
-
PreTrainedModel: PreTrainedModel,
|
|
36995
|
-
PreTrainedTokenizer: PreTrainedTokenizer,
|
|
36996
|
-
PretrainedConfig: PretrainedConfig,
|
|
36997
|
-
PretrainedMixin: PretrainedMixin,
|
|
36998
|
-
Processor: Processor,
|
|
36999
|
-
QuestionAnsweringModelOutput: QuestionAnsweringModelOutput,
|
|
37000
|
-
QuestionAnsweringPipeline: QuestionAnsweringPipeline,
|
|
37001
|
-
Qwen2ForCausalLM: Qwen2ForCausalLM,
|
|
37002
|
-
Qwen2Model: Qwen2Model,
|
|
37003
|
-
Qwen2PreTrainedModel: Qwen2PreTrainedModel,
|
|
37004
|
-
Qwen2Tokenizer: Qwen2Tokenizer,
|
|
37005
|
-
RawImage: RawImage,
|
|
37006
|
-
ResNetForImageClassification: ResNetForImageClassification,
|
|
37007
|
-
ResNetModel: ResNetModel,
|
|
37008
|
-
ResNetPreTrainedModel: ResNetPreTrainedModel,
|
|
37009
|
-
RoFormerForMaskedLM: RoFormerForMaskedLM,
|
|
37010
|
-
RoFormerForQuestionAnswering: RoFormerForQuestionAnswering,
|
|
37011
|
-
RoFormerForSequenceClassification: RoFormerForSequenceClassification,
|
|
37012
|
-
RoFormerForTokenClassification: RoFormerForTokenClassification,
|
|
37013
|
-
RoFormerModel: RoFormerModel,
|
|
37014
|
-
RoFormerPreTrainedModel: RoFormerPreTrainedModel,
|
|
37015
|
-
RoFormerTokenizer: RoFormerTokenizer,
|
|
37016
|
-
RobertaForMaskedLM: RobertaForMaskedLM,
|
|
37017
|
-
RobertaForQuestionAnswering: RobertaForQuestionAnswering,
|
|
37018
|
-
RobertaForSequenceClassification: RobertaForSequenceClassification,
|
|
37019
|
-
RobertaForTokenClassification: RobertaForTokenClassification,
|
|
37020
|
-
RobertaModel: RobertaModel,
|
|
37021
|
-
RobertaPreTrainedModel: RobertaPreTrainedModel,
|
|
37022
|
-
RobertaTokenizer: RobertaTokenizer,
|
|
37023
|
-
SamImageProcessor: SamImageProcessor,
|
|
37024
|
-
SamImageSegmentationOutput: SamImageSegmentationOutput,
|
|
37025
|
-
SamModel: SamModel,
|
|
37026
|
-
SamPreTrainedModel: SamPreTrainedModel,
|
|
37027
|
-
SamProcessor: SamProcessor,
|
|
37028
|
-
SeamlessM4TFeatureExtractor: SeamlessM4TFeatureExtractor,
|
|
37029
|
-
SegformerFeatureExtractor: SegformerFeatureExtractor,
|
|
37030
|
-
SegformerForImageClassification: SegformerForImageClassification,
|
|
37031
|
-
SegformerForSemanticSegmentation: SegformerForSemanticSegmentation,
|
|
37032
|
-
SegformerPreTrainedModel: SegformerPreTrainedModel,
|
|
37033
|
-
Seq2SeqLMOutput: Seq2SeqLMOutput,
|
|
37034
|
-
SequenceClassifierOutput: SequenceClassifierOutput,
|
|
37035
|
-
SiglipImageProcessor: SiglipImageProcessor,
|
|
37036
|
-
SiglipModel: SiglipModel,
|
|
37037
|
-
SiglipPreTrainedModel: SiglipPreTrainedModel,
|
|
37038
|
-
SiglipTextModel: SiglipTextModel,
|
|
37039
|
-
SiglipTokenizer: SiglipTokenizer,
|
|
37040
|
-
SiglipVisionModel: SiglipVisionModel,
|
|
37041
|
-
SpeechT5FeatureExtractor: SpeechT5FeatureExtractor,
|
|
37042
|
-
SpeechT5ForSpeechToText: SpeechT5ForSpeechToText,
|
|
37043
|
-
SpeechT5ForTextToSpeech: SpeechT5ForTextToSpeech,
|
|
37044
|
-
SpeechT5HifiGan: SpeechT5HifiGan,
|
|
37045
|
-
SpeechT5PreTrainedModel: SpeechT5PreTrainedModel,
|
|
37046
|
-
SpeechT5Processor: SpeechT5Processor,
|
|
37047
|
-
SpeechT5Tokenizer: SpeechT5Tokenizer,
|
|
37048
|
-
SqueezeBertForMaskedLM: SqueezeBertForMaskedLM,
|
|
37049
|
-
SqueezeBertForQuestionAnswering: SqueezeBertForQuestionAnswering,
|
|
37050
|
-
SqueezeBertForSequenceClassification: SqueezeBertForSequenceClassification,
|
|
37051
|
-
SqueezeBertModel: SqueezeBertModel,
|
|
37052
|
-
SqueezeBertPreTrainedModel: SqueezeBertPreTrainedModel,
|
|
37053
|
-
SqueezeBertTokenizer: SqueezeBertTokenizer,
|
|
37054
|
-
StableLmForCausalLM: StableLmForCausalLM,
|
|
37055
|
-
StableLmPreTrainedModel: StableLmPreTrainedModel,
|
|
37056
|
-
Starcoder2ForCausalLM: Starcoder2ForCausalLM,
|
|
37057
|
-
Starcoder2Model: Starcoder2Model,
|
|
37058
|
-
Starcoder2PreTrainedModel: Starcoder2PreTrainedModel,
|
|
37059
|
-
SummarizationPipeline: SummarizationPipeline,
|
|
37060
|
-
Swin2SRForImageSuperResolution: Swin2SRForImageSuperResolution,
|
|
37061
|
-
Swin2SRImageProcessor: Swin2SRImageProcessor,
|
|
37062
|
-
Swin2SRModel: Swin2SRModel,
|
|
37063
|
-
Swin2SRPreTrainedModel: Swin2SRPreTrainedModel,
|
|
37064
|
-
SwinForImageClassification: SwinForImageClassification,
|
|
37065
|
-
SwinModel: SwinModel,
|
|
37066
|
-
SwinPreTrainedModel: SwinPreTrainedModel,
|
|
37067
|
-
T5ForConditionalGeneration: T5ForConditionalGeneration,
|
|
37068
|
-
T5Model: T5Model,
|
|
37069
|
-
T5PreTrainedModel: T5PreTrainedModel,
|
|
37070
|
-
T5Tokenizer: T5Tokenizer,
|
|
37071
|
-
TableTransformerForObjectDetection: TableTransformerForObjectDetection,
|
|
37072
|
-
TableTransformerModel: TableTransformerModel,
|
|
37073
|
-
TableTransformerObjectDetectionOutput: TableTransformerObjectDetectionOutput,
|
|
37074
|
-
TableTransformerPreTrainedModel: TableTransformerPreTrainedModel,
|
|
37075
|
-
Tensor: Tensor,
|
|
37076
|
-
Text2TextGenerationPipeline: Text2TextGenerationPipeline,
|
|
37077
|
-
TextClassificationPipeline: TextClassificationPipeline,
|
|
37078
|
-
TextGenerationPipeline: TextGenerationPipeline,
|
|
37079
|
-
TextToAudioPipeline: TextToAudioPipeline,
|
|
37080
|
-
TokenClassificationPipeline: TokenClassificationPipeline,
|
|
37081
|
-
TokenClassifierOutput: TokenClassifierOutput,
|
|
37082
|
-
TokenizerModel: TokenizerModel,
|
|
37083
|
-
TrOCRForCausalLM: TrOCRForCausalLM,
|
|
37084
|
-
TrOCRPreTrainedModel: TrOCRPreTrainedModel,
|
|
37085
|
-
TranslationPipeline: TranslationPipeline,
|
|
37086
|
-
UniSpeechForCTC: UniSpeechForCTC,
|
|
37087
|
-
UniSpeechForSequenceClassification: UniSpeechForSequenceClassification,
|
|
37088
|
-
UniSpeechModel: UniSpeechModel,
|
|
37089
|
-
UniSpeechPreTrainedModel: UniSpeechPreTrainedModel,
|
|
37090
|
-
UniSpeechSatForAudioFrameClassification: UniSpeechSatForAudioFrameClassification,
|
|
37091
|
-
UniSpeechSatForCTC: UniSpeechSatForCTC,
|
|
37092
|
-
UniSpeechSatForSequenceClassification: UniSpeechSatForSequenceClassification,
|
|
37093
|
-
UniSpeechSatModel: UniSpeechSatModel,
|
|
37094
|
-
UniSpeechSatPreTrainedModel: UniSpeechSatPreTrainedModel,
|
|
37095
|
-
ViTFeatureExtractor: ViTFeatureExtractor,
|
|
37096
|
-
ViTForImageClassification: ViTForImageClassification,
|
|
37097
|
-
ViTImageProcessor: ViTImageProcessor,
|
|
37098
|
-
ViTModel: ViTModel,
|
|
37099
|
-
ViTPreTrainedModel: ViTPreTrainedModel,
|
|
37100
|
-
VisionEncoderDecoderModel: VisionEncoderDecoderModel,
|
|
37101
|
-
VitMatteForImageMatting: VitMatteForImageMatting,
|
|
37102
|
-
VitMatteImageProcessor: VitMatteImageProcessor,
|
|
37103
|
-
VitMattePreTrainedModel: VitMattePreTrainedModel,
|
|
37104
|
-
VitsModel: VitsModel,
|
|
37105
|
-
VitsModelOutput: VitsModelOutput,
|
|
37106
|
-
VitsPreTrainedModel: VitsPreTrainedModel,
|
|
37107
|
-
VitsTokenizer: VitsTokenizer,
|
|
37108
|
-
Wav2Vec2BertForCTC: Wav2Vec2BertForCTC,
|
|
37109
|
-
Wav2Vec2BertForSequenceClassification: Wav2Vec2BertForSequenceClassification,
|
|
37110
|
-
Wav2Vec2BertModel: Wav2Vec2BertModel,
|
|
37111
|
-
Wav2Vec2BertPreTrainedModel: Wav2Vec2BertPreTrainedModel,
|
|
37112
|
-
Wav2Vec2CTCTokenizer: Wav2Vec2CTCTokenizer,
|
|
37113
|
-
Wav2Vec2FeatureExtractor: Wav2Vec2FeatureExtractor,
|
|
37114
|
-
Wav2Vec2ForAudioFrameClassification: Wav2Vec2ForAudioFrameClassification,
|
|
37115
|
-
Wav2Vec2ForCTC: Wav2Vec2ForCTC,
|
|
37116
|
-
Wav2Vec2ForSequenceClassification: Wav2Vec2ForSequenceClassification,
|
|
37117
|
-
Wav2Vec2Model: Wav2Vec2Model,
|
|
37118
|
-
Wav2Vec2PreTrainedModel: Wav2Vec2PreTrainedModel,
|
|
37119
|
-
Wav2Vec2ProcessorWithLM: Wav2Vec2ProcessorWithLM,
|
|
37120
|
-
WavLMForAudioFrameClassification: WavLMForAudioFrameClassification,
|
|
37121
|
-
WavLMForCTC: WavLMForCTC,
|
|
37122
|
-
WavLMForSequenceClassification: WavLMForSequenceClassification,
|
|
37123
|
-
WavLMForXVector: WavLMForXVector,
|
|
37124
|
-
WavLMModel: WavLMModel,
|
|
37125
|
-
WavLMPreTrainedModel: WavLMPreTrainedModel,
|
|
37126
|
-
WhisperFeatureExtractor: WhisperFeatureExtractor,
|
|
37127
|
-
WhisperForConditionalGeneration: WhisperForConditionalGeneration,
|
|
37128
|
-
WhisperModel: WhisperModel,
|
|
37129
|
-
WhisperPreTrainedModel: WhisperPreTrainedModel,
|
|
37130
|
-
WhisperProcessor: WhisperProcessor,
|
|
37131
|
-
WhisperTokenizer: WhisperTokenizer,
|
|
37132
|
-
XLMForQuestionAnswering: XLMForQuestionAnswering,
|
|
37133
|
-
XLMForSequenceClassification: XLMForSequenceClassification,
|
|
37134
|
-
XLMForTokenClassification: XLMForTokenClassification,
|
|
37135
|
-
XLMModel: XLMModel,
|
|
37136
|
-
XLMPreTrainedModel: XLMPreTrainedModel,
|
|
37137
|
-
XLMRobertaForMaskedLM: XLMRobertaForMaskedLM,
|
|
37138
|
-
XLMRobertaForQuestionAnswering: XLMRobertaForQuestionAnswering,
|
|
37139
|
-
XLMRobertaForSequenceClassification: XLMRobertaForSequenceClassification,
|
|
37140
|
-
XLMRobertaForTokenClassification: XLMRobertaForTokenClassification,
|
|
37141
|
-
XLMRobertaModel: XLMRobertaModel,
|
|
37142
|
-
XLMRobertaPreTrainedModel: XLMRobertaPreTrainedModel,
|
|
37143
|
-
XLMRobertaTokenizer: XLMRobertaTokenizer,
|
|
37144
|
-
XLMTokenizer: XLMTokenizer,
|
|
37145
|
-
XLMWithLMHeadModel: XLMWithLMHeadModel,
|
|
37146
|
-
XVectorOutput: XVectorOutput,
|
|
37147
|
-
YolosFeatureExtractor: YolosFeatureExtractor,
|
|
37148
|
-
YolosForObjectDetection: YolosForObjectDetection,
|
|
37149
|
-
YolosModel: YolosModel,
|
|
37150
|
-
YolosObjectDetectionOutput: YolosObjectDetectionOutput,
|
|
37151
|
-
YolosPreTrainedModel: YolosPreTrainedModel,
|
|
37152
|
-
ZeroShotAudioClassificationPipeline: ZeroShotAudioClassificationPipeline,
|
|
37153
|
-
ZeroShotClassificationPipeline: ZeroShotClassificationPipeline,
|
|
37154
|
-
ZeroShotImageClassificationPipeline: ZeroShotImageClassificationPipeline,
|
|
37155
|
-
ZeroShotObjectDetectionPipeline: ZeroShotObjectDetectionPipeline,
|
|
37156
|
-
bankers_round: bankers_round,
|
|
37157
|
-
cat: cat,
|
|
37158
|
-
dynamicTimeWarping: dynamicTimeWarping,
|
|
37159
|
-
env: env$2,
|
|
37160
|
-
getTopItems: getTopItems,
|
|
37161
|
-
hanning: hanning,
|
|
37162
|
-
interpolate: interpolate,
|
|
37163
|
-
interpolate_data: interpolate_data,
|
|
37164
|
-
log_softmax: log_softmax,
|
|
37165
|
-
max: max,
|
|
37166
|
-
mean: mean,
|
|
37167
|
-
mean_pooling: mean_pooling,
|
|
37168
|
-
medianFilter: medianFilter,
|
|
37169
|
-
mel_filter_bank: mel_filter_bank,
|
|
37170
|
-
min: min,
|
|
37171
|
-
ones: ones,
|
|
37172
|
-
ones_like: ones_like,
|
|
37173
|
-
permute: permute,
|
|
37174
|
-
permute_data: permute_data,
|
|
37175
|
-
pipeline: pipeline$1,
|
|
37176
|
-
quantize_embeddings: quantize_embeddings,
|
|
37177
|
-
read_audio: read_audio,
|
|
37178
|
-
round: round,
|
|
37179
|
-
softmax: softmax,
|
|
37180
|
-
spectrogram: spectrogram,
|
|
37181
|
-
stack: stack,
|
|
37182
|
-
std_mean: std_mean,
|
|
37183
|
-
window_function: window_function
|
|
37184
|
-
});
|
|
37185
|
-
|
|
37186
|
-
const { pipeline, env } = Transformers;
|
|
37187
36682
|
/**
|
|
37188
36683
|
* ONNXEngine implements MLCEngineInterface using Transformers.js (ONNX Runtime Web).
|
|
37189
36684
|
* This provides a CPU/WASM fallback for browsers that do not support WebGPU.
|
|
@@ -37204,8 +36699,8 @@ class ONNXEngine {
|
|
|
37204
36699
|
this.embeddings = new Embeddings(this);
|
|
37205
36700
|
this.appConfig = prebuiltAppConfig;
|
|
37206
36701
|
// Default env settings for browser
|
|
37207
|
-
env.allowLocalModels = false;
|
|
37208
|
-
env.useBrowserCache = true;
|
|
36702
|
+
env$1.allowLocalModels = false;
|
|
36703
|
+
env$1.useBrowserCache = true;
|
|
37209
36704
|
}
|
|
37210
36705
|
setInitProgressCallback(initProgressCallback) {
|
|
37211
36706
|
this.initProgressCallback = initProgressCallback;
|
|
@@ -37327,12 +36822,7 @@ class ONNXEngine {
|
|
|
37327
36822
|
const id = crypto.randomUUID();
|
|
37328
36823
|
const queue = [];
|
|
37329
36824
|
let isDone = false;
|
|
37330
|
-
|
|
37331
|
-
skip_prompt: true,
|
|
37332
|
-
callback_function: (text) => {
|
|
37333
|
-
queue.push(text);
|
|
37334
|
-
},
|
|
37335
|
-
});
|
|
36825
|
+
let fullTextSoFar = "";
|
|
37336
36826
|
// Run generation in the background
|
|
37337
36827
|
(this.repoId || "").toLowerCase().includes("t5") ? "text2text-generation" : "text-generation";
|
|
37338
36828
|
this.generator(prompt, {
|
|
@@ -37341,7 +36831,14 @@ class ONNXEngine {
|
|
|
37341
36831
|
top_p: request.top_p || 1.0,
|
|
37342
36832
|
do_sample: (request.temperature ?? 1.0) > 0,
|
|
37343
36833
|
repetition_penalty: request.repetition_penalty || 1.1,
|
|
37344
|
-
|
|
36834
|
+
callback_function: (beams) => {
|
|
36835
|
+
const decoded = this.generator.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true });
|
|
36836
|
+
const delta = decoded.slice(fullTextSoFar.length);
|
|
36837
|
+
if (delta) {
|
|
36838
|
+
queue.push(delta);
|
|
36839
|
+
fullTextSoFar = decoded;
|
|
36840
|
+
}
|
|
36841
|
+
},
|
|
37345
36842
|
}).finally(() => {
|
|
37346
36843
|
isDone = true;
|
|
37347
36844
|
});
|
|
@@ -37414,14 +36911,18 @@ class ONNXEngine {
|
|
|
37414
36911
|
const model = this.modelId;
|
|
37415
36912
|
const queue = [];
|
|
37416
36913
|
let isDone = false;
|
|
37417
|
-
|
|
37418
|
-
skip_prompt: true,
|
|
37419
|
-
callback_function: (text) => { queue.push(text); },
|
|
37420
|
-
});
|
|
36914
|
+
let fullTextSoFar = "";
|
|
37421
36915
|
this.generator(prompt, {
|
|
37422
36916
|
max_new_tokens: request.max_tokens || 256,
|
|
37423
36917
|
temperature: request.temperature || 0.7,
|
|
37424
|
-
|
|
36918
|
+
callback_function: (beams) => {
|
|
36919
|
+
const decoded = this.generator.tokenizer.decode(beams[0].output_token_ids, { skip_special_tokens: true });
|
|
36920
|
+
const delta = decoded.slice(fullTextSoFar.length);
|
|
36921
|
+
if (delta) {
|
|
36922
|
+
queue.push(delta);
|
|
36923
|
+
fullTextSoFar = decoded;
|
|
36924
|
+
}
|
|
36925
|
+
},
|
|
37425
36926
|
}).finally(() => { isDone = true; });
|
|
37426
36927
|
while (!isDone || queue.length > 0) {
|
|
37427
36928
|
if (queue.length > 0) {
|
|
@@ -38649,6 +38150,10 @@ class WebLLM {
|
|
|
38649
38150
|
];
|
|
38650
38151
|
list = list.filter(m => approvedIds.includes(m.model_id));
|
|
38651
38152
|
}
|
|
38153
|
+
else {
|
|
38154
|
+
// On Desktop, filter out those that are exclusively ONNX-id based (not for WebGPU)
|
|
38155
|
+
list = list.filter(m => !m.onnx_id);
|
|
38156
|
+
}
|
|
38652
38157
|
return list.map((m) => m.model_id);
|
|
38653
38158
|
}
|
|
38654
38159
|
async local_model_available(model_id) {
|