@huggingface/transformers 4.0.0-next.3 → 4.0.0-next.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -4
- package/dist/ort-wasm-simd-threaded.jsep.mjs +28 -28
- package/dist/transformers.js +3109 -2099
- package/dist/transformers.min.js +17 -19
- package/dist/transformers.node.cjs +3100 -2060
- package/dist/transformers.node.min.cjs +19 -21
- package/dist/transformers.node.min.mjs +19 -21
- package/dist/transformers.node.mjs +3085 -2060
- package/dist/transformers.web.js +1312 -276
- package/dist/transformers.web.min.js +15 -15
- package/package.json +4 -4
- package/src/backends/onnx.js +66 -10
- package/src/backends/utils/cacheWasm.js +9 -6
- package/src/configs.js +52 -3
- package/src/env.js +66 -7
- package/src/generation/logits_sampler.js +3 -15
- package/src/image_processors_utils.js +2 -6
- package/src/models/afmoe/modeling_afmoe.js +5 -0
- package/src/models/auto/image_processing_auto.js +2 -1
- package/src/models/auto/modeling_auto.js +2 -1
- package/src/models/auto/tokenization_auto.js +2 -1
- package/src/models/clap/feature_extraction_clap.js +2 -1
- package/src/models/cohere2/modeling_cohere2.js +5 -0
- package/src/models/marian/tokenization_marian.js +3 -2
- package/src/models/modeling_utils.js +14 -4
- package/src/models/models.js +6 -0
- package/src/models/paligemma/processing_paligemma.js +3 -2
- package/src/models/processors.js +2 -0
- package/src/models/qwen2_5_vl/modeling_qwen2_5_vl.js +5 -0
- package/src/models/qwen2_5_vl/processing_qwen2_5_vl.js +3 -0
- package/src/models/qwen2_vl/image_processing_qwen2_vl.js +54 -0
- package/src/models/qwen2_vl/modeling_qwen2_vl.js +8 -2
- package/src/models/qwen3_5/modeling_qwen3_5.js +3 -0
- package/src/models/qwen3_5_moe/modeling_qwen3_5_moe.js +3 -0
- package/src/models/qwen3_vl/modeling_qwen3_vl.js +3 -0
- package/src/models/qwen3_vl/processing_qwen3_vl.js +3 -0
- package/src/models/registry.js +9 -1
- package/src/models/session.js +16 -50
- package/src/models/whisper/feature_extraction_whisper.js +2 -1
- package/src/models/whisper/modeling_whisper.js +6 -5
- package/src/models/xlm/tokenization_xlm.js +2 -1
- package/src/pipelines/automatic-speech-recognition.js +3 -2
- package/src/pipelines/index.js +395 -0
- package/src/pipelines/text-generation.js +4 -0
- package/src/pipelines/text-to-audio.js +4 -2
- package/src/pipelines/zero-shot-classification.js +3 -2
- package/src/pipelines.js +104 -356
- package/src/tokenization_utils.js +42 -21
- package/src/transformers.js +8 -1
- package/src/utils/audio.js +2 -1
- package/src/utils/cache.js +4 -1
- package/src/utils/core.js +23 -1
- package/src/utils/devices.js +22 -0
- package/src/utils/dtypes.js +55 -0
- package/src/utils/hub/files.js +17 -2
- package/src/utils/hub/utils.js +10 -4
- package/src/utils/hub.js +57 -17
- package/src/utils/image.js +2 -1
- package/src/utils/logger.js +67 -0
- package/src/utils/model-loader.js +35 -17
- package/src/utils/model_registry/ModelRegistry.js +299 -0
- package/src/utils/model_registry/clear_cache.js +128 -0
- package/src/utils/model_registry/get_file_metadata.js +149 -0
- package/src/utils/model_registry/get_files.js +42 -0
- package/src/utils/model_registry/get_model_files.js +182 -0
- package/src/utils/model_registry/get_pipeline_files.js +53 -0
- package/src/utils/model_registry/get_processor_files.js +20 -0
- package/src/utils/model_registry/get_tokenizer_files.js +21 -0
- package/src/utils/model_registry/is_cached.js +92 -0
- package/src/utils/random.js +225 -0
- package/src/utils/tensor.js +8 -21
- package/src/utils/video.js +2 -2
- package/types/backends/onnx.d.ts.map +1 -1
- package/types/backends/utils/cacheWasm.d.ts.map +1 -1
- package/types/configs.d.ts.map +1 -1
- package/types/env.d.ts +42 -24
- package/types/env.d.ts.map +1 -1
- package/types/generation/logits_sampler.d.ts +2 -2
- package/types/generation/logits_sampler.d.ts.map +1 -1
- package/types/image_processors_utils.d.ts.map +1 -1
- package/types/models/afmoe/modeling_afmoe.d.ts +8 -0
- package/types/models/afmoe/modeling_afmoe.d.ts.map +1 -0
- package/types/models/auto/image_processing_auto.d.ts.map +1 -1
- package/types/models/auto/modeling_auto.d.ts.map +1 -1
- package/types/models/auto/tokenization_auto.d.ts.map +1 -1
- package/types/models/clap/feature_extraction_clap.d.ts.map +1 -1
- package/types/models/cohere2/modeling_cohere2.d.ts +8 -0
- package/types/models/cohere2/modeling_cohere2.d.ts.map +1 -0
- package/types/models/marian/tokenization_marian.d.ts.map +1 -1
- package/types/models/modeling_utils.d.ts.map +1 -1
- package/types/models/models.d.ts +6 -0
- package/types/models/paligemma/processing_paligemma.d.ts.map +1 -1
- package/types/models/processors.d.ts +2 -0
- package/types/models/qwen2_5_vl/modeling_qwen2_5_vl.d.ts +4 -0
- package/types/models/qwen2_5_vl/modeling_qwen2_5_vl.d.ts.map +1 -0
- package/types/models/qwen2_5_vl/processing_qwen2_5_vl.d.ts +4 -0
- package/types/models/qwen2_5_vl/processing_qwen2_5_vl.d.ts.map +1 -0
- package/types/models/qwen2_vl/image_processing_qwen2_vl.d.ts +3 -0
- package/types/models/qwen2_vl/image_processing_qwen2_vl.d.ts.map +1 -1
- package/types/models/qwen2_vl/modeling_qwen2_vl.d.ts +1 -0
- package/types/models/qwen2_vl/modeling_qwen2_vl.d.ts.map +1 -1
- package/types/models/qwen3_5/modeling_qwen3_5.d.ts +4 -0
- package/types/models/qwen3_5/modeling_qwen3_5.d.ts.map +1 -0
- package/types/models/qwen3_5_moe/modeling_qwen3_5_moe.d.ts +4 -0
- package/types/models/qwen3_5_moe/modeling_qwen3_5_moe.d.ts.map +1 -0
- package/types/models/qwen3_vl/modeling_qwen3_vl.d.ts +4 -0
- package/types/models/qwen3_vl/modeling_qwen3_vl.d.ts.map +1 -0
- package/types/models/qwen3_vl/processing_qwen3_vl.d.ts +4 -0
- package/types/models/qwen3_vl/processing_qwen3_vl.d.ts.map +1 -0
- package/types/models/registry.d.ts.map +1 -1
- package/types/models/session.d.ts.map +1 -1
- package/types/models/whisper/feature_extraction_whisper.d.ts.map +1 -1
- package/types/models/whisper/modeling_whisper.d.ts.map +1 -1
- package/types/models/xlm/tokenization_xlm.d.ts.map +1 -1
- package/types/pipelines/automatic-speech-recognition.d.ts.map +1 -1
- package/types/pipelines/index.d.ts +299 -0
- package/types/pipelines/index.d.ts.map +1 -0
- package/types/pipelines/text-generation.d.ts +5 -1
- package/types/pipelines/text-generation.d.ts.map +1 -1
- package/types/pipelines/text-to-audio.d.ts.map +1 -1
- package/types/pipelines/zero-shot-classification.d.ts.map +1 -1
- package/types/pipelines.d.ts +50 -291
- package/types/pipelines.d.ts.map +1 -1
- package/types/tokenization_utils.d.ts +44 -26
- package/types/tokenization_utils.d.ts.map +1 -1
- package/types/transformers.d.ts +6 -1
- package/types/transformers.d.ts.map +1 -1
- package/types/utils/audio.d.ts.map +1 -1
- package/types/utils/cache.d.ts +6 -0
- package/types/utils/cache.d.ts.map +1 -1
- package/types/utils/core.d.ts +59 -2
- package/types/utils/core.d.ts.map +1 -1
- package/types/utils/devices.d.ts +15 -0
- package/types/utils/devices.d.ts.map +1 -1
- package/types/utils/dtypes.d.ts +16 -0
- package/types/utils/dtypes.d.ts.map +1 -1
- package/types/utils/hub/files.d.ts +6 -0
- package/types/utils/hub/files.d.ts.map +1 -1
- package/types/utils/hub/utils.d.ts +2 -1
- package/types/utils/hub/utils.d.ts.map +1 -1
- package/types/utils/hub.d.ts +29 -0
- package/types/utils/hub.d.ts.map +1 -1
- package/types/utils/image.d.ts.map +1 -1
- package/types/utils/logger.d.ts +28 -0
- package/types/utils/logger.d.ts.map +1 -0
- package/types/utils/model-loader.d.ts +15 -0
- package/types/utils/model-loader.d.ts.map +1 -1
- package/types/utils/model_registry/ModelRegistry.d.ts +211 -0
- package/types/utils/model_registry/ModelRegistry.d.ts.map +1 -0
- package/types/utils/model_registry/clear_cache.d.ts +74 -0
- package/types/utils/model_registry/clear_cache.d.ts.map +1 -0
- package/types/utils/model_registry/get_file_metadata.d.ts +20 -0
- package/types/utils/model_registry/get_file_metadata.d.ts.map +1 -0
- package/types/utils/model_registry/get_files.d.ts +23 -0
- package/types/utils/model_registry/get_files.d.ts.map +1 -0
- package/types/utils/model_registry/get_model_files.d.ts +22 -0
- package/types/utils/model_registry/get_model_files.d.ts.map +1 -0
- package/types/utils/model_registry/get_pipeline_files.d.ts +21 -0
- package/types/utils/model_registry/get_pipeline_files.d.ts.map +1 -0
- package/types/utils/model_registry/get_processor_files.d.ts +9 -0
- package/types/utils/model_registry/get_processor_files.d.ts.map +1 -0
- package/types/utils/model_registry/get_tokenizer_files.d.ts +9 -0
- package/types/utils/model_registry/get_tokenizer_files.d.ts.map +1 -0
- package/types/utils/model_registry/is_cached.d.ts +62 -0
- package/types/utils/model_registry/is_cached.d.ts.map +1 -0
- package/types/utils/random.d.ts +86 -0
- package/types/utils/random.d.ts.map +1 -0
- package/types/utils/tensor.d.ts.map +1 -1
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
import { DEFAULT_DTYPE_SUFFIX_MAPPING, selectDtype } from '../dtypes.js';
|
|
2
|
+
import { selectDevice } from '../devices.js';
|
|
3
|
+
import { resolveExternalDataFormat, getExternalDataChunkNames } from '../model-loader.js';
|
|
4
|
+
import { MODEL_TYPES, MODEL_TYPE_MAPPING } from '../../models/modeling_utils.js';
|
|
5
|
+
import { AutoConfig } from '../../configs.js';
|
|
6
|
+
import { GITHUB_ISSUE_URL } from '../constants.js';
|
|
7
|
+
import { logger } from '../logger.js';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Returns the list of files that will be loaded for a model based on its configuration.
|
|
11
|
+
*
|
|
12
|
+
* This function reads configuration from the model's config.json on the hub.
|
|
13
|
+
* If dtype/device are not specified in the config, you can provide them to match
|
|
14
|
+
* what the pipeline will actually use.
|
|
15
|
+
*
|
|
16
|
+
* @param {string} modelId The model id (e.g., "onnx-community/granite-4.0-350m-ONNX-web")
|
|
17
|
+
* @param {Object} [options] Optional parameters
|
|
18
|
+
* @param {import('../../configs.js').PretrainedConfig} [options.config=null] Pre-loaded model config (optional, will be fetched if not provided)
|
|
19
|
+
* @param {import('../dtypes.js').DataType|Record<string, import('../dtypes.js').DataType>} [options.dtype=null] Override dtype (use this if passing dtype to pipeline)
|
|
20
|
+
* @param {import('../devices.js').DeviceType|Record<string, import('../devices.js').DeviceType>} [options.device=null] Override device (use this if passing device to pipeline)
|
|
21
|
+
* @param {string} [options.model_file_name=null] Override the model file name (excluding .onnx suffix).
|
|
22
|
+
* @returns {Promise<string[]>} Array of file paths that will be loaded
|
|
23
|
+
*/
|
|
24
|
+
export async function get_model_files(
|
|
25
|
+
modelId,
|
|
26
|
+
{ config = null, dtype: overrideDtype = null, device: overrideDevice = null, model_file_name = null } = {},
|
|
27
|
+
) {
|
|
28
|
+
config = await AutoConfig.from_pretrained(modelId, { config });
|
|
29
|
+
|
|
30
|
+
const files = [
|
|
31
|
+
// Add config.json (always loaded)
|
|
32
|
+
'config.json',
|
|
33
|
+
];
|
|
34
|
+
const custom_config = config['transformers.js_config'] ?? {};
|
|
35
|
+
|
|
36
|
+
const use_external_data_format = custom_config.use_external_data_format;
|
|
37
|
+
const subfolder = 'onnx'; // Always 'onnx' as per the default in from_pretrained
|
|
38
|
+
|
|
39
|
+
const rawDevice = overrideDevice ?? custom_config.device;
|
|
40
|
+
let dtype = overrideDtype ?? custom_config.dtype;
|
|
41
|
+
|
|
42
|
+
// Infer model type from config
|
|
43
|
+
let modelType;
|
|
44
|
+
|
|
45
|
+
// @ts-ignore - architectures is set via Object.assign in PretrainedConfig constructor
|
|
46
|
+
const architectures = /** @type {string[]} */ (config.architectures || []);
|
|
47
|
+
|
|
48
|
+
// Try to find a known architecture in MODEL_TYPE_MAPPING
|
|
49
|
+
// This ensures we use the same logic as from_pretrained()
|
|
50
|
+
let foundInMapping = false;
|
|
51
|
+
for (const arch of architectures) {
|
|
52
|
+
const mappedType = MODEL_TYPE_MAPPING.get(arch);
|
|
53
|
+
if (mappedType !== undefined) {
|
|
54
|
+
modelType = mappedType;
|
|
55
|
+
foundInMapping = true;
|
|
56
|
+
break;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// If not found by architecture, try model_type (handles custom models with no architectures)
|
|
61
|
+
if (!foundInMapping && config.model_type) {
|
|
62
|
+
const mappedType = MODEL_TYPE_MAPPING.get(config.model_type);
|
|
63
|
+
if (mappedType !== undefined) {
|
|
64
|
+
modelType = mappedType;
|
|
65
|
+
foundInMapping = true;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Fall back to EncoderOnly if not found in mapping
|
|
70
|
+
if (!foundInMapping) {
|
|
71
|
+
const archList = architectures.length > 0 ? architectures.join(', ') : '(none)';
|
|
72
|
+
logger.warn(
|
|
73
|
+
`[get_model_files] Architecture(s) not found in MODEL_TYPE_MAPPING: [${archList}] ` +
|
|
74
|
+
`for model type '${config.model_type}'. Falling back to EncoderOnly (single model.onnx file). ` +
|
|
75
|
+
`If you encounter issues, please report at: ${GITHUB_ISSUE_URL}`,
|
|
76
|
+
);
|
|
77
|
+
|
|
78
|
+
// Always fallback to EncoderOnly (single model.onnx file)
|
|
79
|
+
// Other model types (Vision2Seq, Musicgen, etc.) require specific file structures
|
|
80
|
+
// and should be properly registered in MODEL_TYPE_MAPPING if they are valid.
|
|
81
|
+
modelType = MODEL_TYPES.EncoderOnly;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const add_model_file = (fileName, baseName = null) => {
|
|
85
|
+
baseName = baseName ?? fileName;
|
|
86
|
+
const selectedDevice = selectDevice(rawDevice, fileName);
|
|
87
|
+
const selectedDtype = selectDtype(dtype, fileName, selectedDevice);
|
|
88
|
+
|
|
89
|
+
const suffix = DEFAULT_DTYPE_SUFFIX_MAPPING[selectedDtype] ?? '';
|
|
90
|
+
const fullName = `${baseName}${suffix}.onnx`;
|
|
91
|
+
const fullPath = subfolder ? `${subfolder}/${fullName}` : fullName;
|
|
92
|
+
files.push(fullPath);
|
|
93
|
+
|
|
94
|
+
// Check for external data files
|
|
95
|
+
const num_chunks = resolveExternalDataFormat(use_external_data_format, fullName, fileName);
|
|
96
|
+
for (const dataFileName of getExternalDataChunkNames(fullName, num_chunks)) {
|
|
97
|
+
const dataFilePath = subfolder ? `${subfolder}/${dataFileName}` : dataFileName;
|
|
98
|
+
files.push(dataFilePath);
|
|
99
|
+
}
|
|
100
|
+
};
|
|
101
|
+
|
|
102
|
+
// model_file_name overrides the default ONNX file name for single-model architectures
|
|
103
|
+
// (encoder-only, decoder-only). Multi-component models use fixed names.
|
|
104
|
+
const singleModelName = model_file_name ?? 'model';
|
|
105
|
+
|
|
106
|
+
// Add model files based on model type
|
|
107
|
+
if (modelType === MODEL_TYPES.DecoderOnly) {
|
|
108
|
+
add_model_file('model', singleModelName);
|
|
109
|
+
files.push('generation_config.json');
|
|
110
|
+
} else if (modelType === MODEL_TYPES.DecoderOnlyWithoutHead) {
|
|
111
|
+
add_model_file('model', singleModelName);
|
|
112
|
+
// Do not load generation_config.json for models without generation head
|
|
113
|
+
} else if (modelType === MODEL_TYPES.Seq2Seq || modelType === MODEL_TYPES.Vision2Seq) {
|
|
114
|
+
add_model_file('model', 'encoder_model');
|
|
115
|
+
add_model_file('decoder_model_merged');
|
|
116
|
+
// Note: generation_config.json is only loaded for generation models (e.g., T5ForConditionalGeneration)
|
|
117
|
+
// not for base models (e.g., T5Model). Since we can't determine the specific class here,
|
|
118
|
+
// we include it as it's loaded for most use cases.
|
|
119
|
+
files.push('generation_config.json');
|
|
120
|
+
} else if (modelType === MODEL_TYPES.MaskGeneration) {
|
|
121
|
+
add_model_file('model', 'vision_encoder');
|
|
122
|
+
add_model_file('prompt_encoder_mask_decoder');
|
|
123
|
+
} else if (modelType === MODEL_TYPES.EncoderDecoder) {
|
|
124
|
+
add_model_file('model', 'encoder_model');
|
|
125
|
+
add_model_file('decoder_model_merged');
|
|
126
|
+
} else if (modelType === MODEL_TYPES.ImageTextToText) {
|
|
127
|
+
add_model_file('embed_tokens');
|
|
128
|
+
add_model_file('vision_encoder');
|
|
129
|
+
add_model_file('decoder_model_merged');
|
|
130
|
+
if (config.is_encoder_decoder) {
|
|
131
|
+
add_model_file('model', 'encoder_model');
|
|
132
|
+
}
|
|
133
|
+
files.push('generation_config.json');
|
|
134
|
+
} else if (modelType === MODEL_TYPES.AudioTextToText) {
|
|
135
|
+
add_model_file('embed_tokens');
|
|
136
|
+
add_model_file('audio_encoder');
|
|
137
|
+
add_model_file('decoder_model_merged');
|
|
138
|
+
files.push('generation_config.json');
|
|
139
|
+
} else if (modelType === MODEL_TYPES.ImageAudioTextToText) {
|
|
140
|
+
add_model_file('embed_tokens');
|
|
141
|
+
add_model_file('audio_encoder');
|
|
142
|
+
add_model_file('vision_encoder');
|
|
143
|
+
add_model_file('decoder_model_merged');
|
|
144
|
+
files.push('generation_config.json');
|
|
145
|
+
} else if (modelType === MODEL_TYPES.Musicgen) {
|
|
146
|
+
add_model_file('model', 'text_encoder');
|
|
147
|
+
add_model_file('decoder_model_merged');
|
|
148
|
+
add_model_file('encodec_decode');
|
|
149
|
+
files.push('generation_config.json');
|
|
150
|
+
} else if (modelType === MODEL_TYPES.MultiModality) {
|
|
151
|
+
add_model_file('prepare_inputs_embeds');
|
|
152
|
+
add_model_file('model', 'language_model');
|
|
153
|
+
add_model_file('lm_head');
|
|
154
|
+
add_model_file('gen_head');
|
|
155
|
+
add_model_file('gen_img_embeds');
|
|
156
|
+
add_model_file('image_decode');
|
|
157
|
+
files.push('generation_config.json');
|
|
158
|
+
} else if (modelType === MODEL_TYPES.Phi3V) {
|
|
159
|
+
add_model_file('prepare_inputs_embeds');
|
|
160
|
+
add_model_file('model');
|
|
161
|
+
add_model_file('vision_encoder');
|
|
162
|
+
files.push('generation_config.json');
|
|
163
|
+
} else if (modelType === MODEL_TYPES.Chatterbox) {
|
|
164
|
+
add_model_file('embed_tokens');
|
|
165
|
+
add_model_file('speech_encoder');
|
|
166
|
+
add_model_file('model', 'language_model');
|
|
167
|
+
add_model_file('conditional_decoder');
|
|
168
|
+
files.push('generation_config.json');
|
|
169
|
+
} else if (modelType === MODEL_TYPES.AutoEncoder) {
|
|
170
|
+
add_model_file('encoder_model');
|
|
171
|
+
add_model_file('decoder_model');
|
|
172
|
+
} else if (modelType === MODEL_TYPES.Supertonic) {
|
|
173
|
+
add_model_file('text_encoder');
|
|
174
|
+
add_model_file('latent_denoiser');
|
|
175
|
+
add_model_file('voice_decoder');
|
|
176
|
+
} else {
|
|
177
|
+
// MODEL_TYPES.EncoderOnly or unknown
|
|
178
|
+
add_model_file('model', singleModelName);
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
return files;
|
|
182
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { get_files } from './get_files.js';
|
|
2
|
+
import { SUPPORTED_TASKS, TASK_ALIASES } from '../../pipelines/index.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Extract component requirements from SUPPORTED_TASKS
|
|
6
|
+
* @private
|
|
7
|
+
* @param {string} task
|
|
8
|
+
* @returns {{tokenizer: boolean, processor: boolean}}
|
|
9
|
+
*/
|
|
10
|
+
function get_task_components(task) {
|
|
11
|
+
const taskConfig = SUPPORTED_TASKS[task];
|
|
12
|
+
if (!taskConfig) {
|
|
13
|
+
return null;
|
|
14
|
+
}
|
|
15
|
+
return {
|
|
16
|
+
tokenizer: !!taskConfig.tokenizer,
|
|
17
|
+
processor: !!taskConfig.processor,
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Get all files needed for a specific pipeline task.
|
|
23
|
+
* Automatically determines which components (tokenizer, processor) are needed based on the task.
|
|
24
|
+
*
|
|
25
|
+
* @param {string} task - The pipeline task (e.g., "text-generation", "image-classification")
|
|
26
|
+
* @param {string} modelId - The model id (e.g., "Xenova/bert-base-uncased")
|
|
27
|
+
* @param {Object} [options] - Optional parameters
|
|
28
|
+
* @param {import('../../configs.js').PretrainedConfig} [options.config=null] - Pre-loaded config
|
|
29
|
+
* @param {import('../dtypes.js').DataType|Record<string, import('../dtypes.js').DataType>} [options.dtype=null] - Override dtype
|
|
30
|
+
* @param {import('../devices.js').DeviceType|Record<string, import('../devices.js').DeviceType>} [options.device=null] - Override device
|
|
31
|
+
* @param {string} [options.model_file_name=null] - Override the model file name (excluding .onnx suffix)
|
|
32
|
+
* @returns {Promise<string[]>} Array of file paths that will be loaded
|
|
33
|
+
* @throws {Error} If the task is not supported
|
|
34
|
+
*/
|
|
35
|
+
export async function get_pipeline_files(task, modelId, options = {}) {
|
|
36
|
+
// Apply task aliases
|
|
37
|
+
task = TASK_ALIASES[task] ?? task;
|
|
38
|
+
|
|
39
|
+
// Get component requirements for this task from SUPPORTED_TASKS
|
|
40
|
+
const components = get_task_components(task);
|
|
41
|
+
if (!components) {
|
|
42
|
+
throw new Error(
|
|
43
|
+
`Unsupported pipeline task: ${task}. Must be one of [${Object.keys(SUPPORTED_TASKS).join(', ')}]`,
|
|
44
|
+
);
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Get files with appropriate component flags
|
|
48
|
+
return get_files(modelId, {
|
|
49
|
+
...options,
|
|
50
|
+
include_tokenizer: components.tokenizer,
|
|
51
|
+
include_processor: components.processor,
|
|
52
|
+
});
|
|
53
|
+
}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { IMAGE_PROCESSOR_NAME } from '../constants.js';
|
|
2
|
+
import { get_file_metadata } from './get_file_metadata.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Returns the list of processor files that will be loaded for a model.
|
|
6
|
+
* Auto-detects if the model has a processor by checking if preprocessor_config.json exists.
|
|
7
|
+
*
|
|
8
|
+
* @param {string} modelId The model id (e.g., "Xenova/detr-resnet-50")
|
|
9
|
+
* @returns {Promise<string[]>} Array of processor file names (empty if no processor)
|
|
10
|
+
*/
|
|
11
|
+
export async function get_processor_files(modelId) {
|
|
12
|
+
if (!modelId) {
|
|
13
|
+
throw new Error('modelId is required');
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
// Check if preprocessor_config.json exists
|
|
17
|
+
const metadata = await get_file_metadata(modelId, IMAGE_PROCESSOR_NAME, {});
|
|
18
|
+
|
|
19
|
+
return metadata.exists ? [IMAGE_PROCESSOR_NAME] : [];
|
|
20
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { get_file_metadata } from './get_file_metadata.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Returns the list of files that will be loaded for a tokenizer.
|
|
5
|
+
* Automatically detects whether the model has tokenizer files.
|
|
6
|
+
*
|
|
7
|
+
* @param {string} modelId The model id to check for tokenizer files
|
|
8
|
+
* @returns {Promise<string[]>} An array of file names that will be loaded
|
|
9
|
+
*/
|
|
10
|
+
export async function get_tokenizer_files(modelId) {
|
|
11
|
+
if (!modelId) {
|
|
12
|
+
throw new Error('modelId is required for get_tokenizer_files');
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
const metadata = await get_file_metadata(modelId, 'tokenizer_config.json', {});
|
|
16
|
+
if (metadata.exists) {
|
|
17
|
+
return ['tokenizer.json', 'tokenizer_config.json'];
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
return [];
|
|
21
|
+
}
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { getCache } from '../cache.js';
|
|
2
|
+
import { buildResourcePaths, checkCachedResource } from '../hub.js';
|
|
3
|
+
import { get_files } from './get_files.js';
|
|
4
|
+
import { get_pipeline_files } from './get_pipeline_files.js';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* @typedef {Object} FileCacheStatus
|
|
8
|
+
* @property {string} file - The file path
|
|
9
|
+
* @property {boolean} cached - Whether the file is cached
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* @typedef {Object} CacheCheckResult
|
|
14
|
+
* @property {boolean} allCached - Whether all files are cached
|
|
15
|
+
* @property {FileCacheStatus[]} files - Array of files with their cache status
|
|
16
|
+
*/
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Internal helper to check cache status for a list of files
|
|
20
|
+
* @private
|
|
21
|
+
* @param {string} modelId - The model id
|
|
22
|
+
* @param {string[]} files - List of file paths to check
|
|
23
|
+
* @param {Object} options - Options including cache_dir
|
|
24
|
+
* @returns {Promise<CacheCheckResult>}
|
|
25
|
+
*/
|
|
26
|
+
async function check_files_cache(modelId, files, options = {}) {
|
|
27
|
+
const cache = await getCache(options?.cache_dir);
|
|
28
|
+
|
|
29
|
+
if (!cache) {
|
|
30
|
+
const fileStatuses = files.map((filename) => ({ file: filename, cached: false }));
|
|
31
|
+
// No cache available, all files considered not cached
|
|
32
|
+
return { allCached: false, files: fileStatuses };
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const fileStatuses = await Promise.all(
|
|
36
|
+
files.map(async (filename) => {
|
|
37
|
+
const { localPath, proposedCacheKey } = buildResourcePaths(modelId, filename, options, cache);
|
|
38
|
+
const cached = await checkCachedResource(cache, localPath, proposedCacheKey);
|
|
39
|
+
return { file: filename, cached: !!cached };
|
|
40
|
+
}),
|
|
41
|
+
);
|
|
42
|
+
|
|
43
|
+
return { allCached: fileStatuses.every((f) => f.cached), files: fileStatuses };
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Checks if all files for a given model are already cached.
|
|
48
|
+
* Automatically determines which files are needed using get_files().
|
|
49
|
+
*
|
|
50
|
+
* @param {string} modelId The model id (e.g., "Xenova/gpt2")
|
|
51
|
+
* @param {Object} [options] Optional parameters
|
|
52
|
+
* @param {string} [options.cache_dir] Custom cache directory
|
|
53
|
+
* @param {string} [options.revision] Model revision (default: 'main')
|
|
54
|
+
* @param {import('../../configs.js').PretrainedConfig} [options.config] Pre-loaded config
|
|
55
|
+
* @param {import('../dtypes.js').DataType|Record<string, import('../dtypes.js').DataType>} [options.dtype] Override dtype
|
|
56
|
+
* @param {import('../devices.js').DeviceType|Record<string, import('../devices.js').DeviceType>} [options.device] Override device
|
|
57
|
+
* @returns {Promise<CacheCheckResult>} Object with allCached boolean and files array with cache status
|
|
58
|
+
*/
|
|
59
|
+
export async function is_cached(modelId, options = {}) {
|
|
60
|
+
if (!modelId) {
|
|
61
|
+
throw new Error('modelId is required');
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const files = await get_files(modelId, options);
|
|
65
|
+
return await check_files_cache(modelId, files, options);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Checks if all files for a specific pipeline task are already cached.
|
|
70
|
+
* Automatically determines which components are needed based on the task.
|
|
71
|
+
*
|
|
72
|
+
* @param {string} task - The pipeline task (e.g., "text-generation", "image-classification")
|
|
73
|
+
* @param {string} modelId - The model id (e.g., "Xenova/gpt2")
|
|
74
|
+
* @param {Object} [options] - Optional parameters
|
|
75
|
+
* @param {string} [options.cache_dir] - Custom cache directory
|
|
76
|
+
* @param {string} [options.revision] - Model revision (default: 'main')
|
|
77
|
+
* @param {import('../../configs.js').PretrainedConfig} [options.config] - Pre-loaded config
|
|
78
|
+
* @param {import('../dtypes.js').DataType|Record<string, import('../dtypes.js').DataType>} [options.dtype] - Override dtype
|
|
79
|
+
* @param {import('../devices.js').DeviceType|Record<string, import('../devices.js').DeviceType>} [options.device] - Override device
|
|
80
|
+
* @returns {Promise<CacheCheckResult>} Object with allCached boolean and files array with cache status
|
|
81
|
+
*/
|
|
82
|
+
export async function is_pipeline_cached(task, modelId, options = {}) {
|
|
83
|
+
if (!task) {
|
|
84
|
+
throw new Error('task is required');
|
|
85
|
+
}
|
|
86
|
+
if (!modelId) {
|
|
87
|
+
throw new Error('modelId is required');
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const files = await get_pipeline_files(task, modelId, options);
|
|
91
|
+
return await check_files_cache(modelId, files, options);
|
|
92
|
+
}
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Let there be order amidst the chaos.
|
|
3
|
+
*
|
|
4
|
+
* This file implements Mersenne Twister 19937, matching Python's `random` module exactly for reproducibility.
|
|
5
|
+
*
|
|
6
|
+
* ```javascript
|
|
7
|
+
* import { random } from '@huggingface/transformers';
|
|
8
|
+
*
|
|
9
|
+
* random.seed(42);
|
|
10
|
+
* random.random(); // 0.6394267984578837 (matches Python)
|
|
11
|
+
* random.gauss(0, 1); // normal-distributed value
|
|
12
|
+
* random.choices(['a','b'], [3, 1]); // weighted pick
|
|
13
|
+
*
|
|
14
|
+
* const arr = [1, 2, 3, 4, 5];
|
|
15
|
+
* random.shuffle(arr); // in-place Fisher-Yates shuffle
|
|
16
|
+
*
|
|
17
|
+
* // Use a separate instance to avoid affecting the global state:
|
|
18
|
+
* const rng = new random.Random(42);
|
|
19
|
+
* rng.random(); // 0.6394267984578837 (same seed, independent state)
|
|
20
|
+
* ```
|
|
21
|
+
*
|
|
22
|
+
* **Note on Reproducibility:**
|
|
23
|
+
* Similarly to the [Python random](https://docs.python.org/3/library/random.html#notes-on-reproducibility)
|
|
24
|
+
* module, it is useful to be able to reproduce the sequences given by a pseudo-random number generator.
|
|
25
|
+
* By reusing a seed value, the same sequence should be reproducible from run to run as long as multiple
|
|
26
|
+
* threads or asynchronous operations are not running concurrently.
|
|
27
|
+
*
|
|
28
|
+
* @module utils/random
|
|
29
|
+
*/
|
|
30
|
+
|
|
31
|
+
import { apis } from '../env.js';
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Mersenne Twister 19937 PRNG, matching Python's `random.Random` class exactly.
|
|
35
|
+
*
|
|
36
|
+
* Each instance has its own independent state, so seeding one instance does not
|
|
37
|
+
* affect any other instance or the global helper functions.
|
|
38
|
+
*
|
|
39
|
+
* @example
|
|
40
|
+
* const rng1 = new Random(42);
|
|
41
|
+
* const rng2 = new Random(42);
|
|
42
|
+
* rng1.random() === rng2.random(); // true (same seed, independent state)
|
|
43
|
+
*/
|
|
44
|
+
export class Random {
|
|
45
|
+
constructor(seed) {
|
|
46
|
+
this._mt = new Uint32Array(624);
|
|
47
|
+
this._idx = 625;
|
|
48
|
+
this._gauss_next = null;
|
|
49
|
+
this._random_fn = this.random.bind(this);
|
|
50
|
+
this.seed(seed);
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Seeds this instance's PRNG.
|
|
55
|
+
*
|
|
56
|
+
* When called with a number, initializes the state deterministically from that value.
|
|
57
|
+
* When called with no arguments (or `undefined`/`null`), seeds from OS entropy
|
|
58
|
+
* via `crypto.getRandomValues`, matching Python's `random.seed()` behaviour.
|
|
59
|
+
*
|
|
60
|
+
* @param {number} [n] The seed value. Omit to seed from OS entropy.
|
|
61
|
+
*/
|
|
62
|
+
seed(n) {
|
|
63
|
+
if (n === undefined || n === null) {
|
|
64
|
+
if (apis.IS_CRYPTO_AVAILABLE) {
|
|
65
|
+
const buf = new Uint32Array(1);
|
|
66
|
+
crypto.getRandomValues(buf);
|
|
67
|
+
n = buf[0];
|
|
68
|
+
} else {
|
|
69
|
+
n = Date.now() >>> 0;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
const mt = this._mt;
|
|
73
|
+
const u = (a, b) => Math.imul(a, b) >>> 0,
|
|
74
|
+
key = [];
|
|
75
|
+
for (let v = n || 0; v > 0; v = Math.floor(v / 0x100000000)) key.push(v & 0xffffffff);
|
|
76
|
+
if (!key.length) key.push(0);
|
|
77
|
+
mt[0] = 19650218;
|
|
78
|
+
for (let k = 1; k < 624; ++k) mt[k] = (u(1812433253, mt[k - 1] ^ (mt[k - 1] >>> 30)) + k) >>> 0;
|
|
79
|
+
let i = 1,
|
|
80
|
+
j = 0;
|
|
81
|
+
for (let k = Math.max(624, key.length); k > 0; --k, ++i, ++j) {
|
|
82
|
+
if (i >= 624) {
|
|
83
|
+
mt[0] = mt[623];
|
|
84
|
+
i = 1;
|
|
85
|
+
}
|
|
86
|
+
if (j >= key.length) j = 0;
|
|
87
|
+
mt[i] = ((mt[i] ^ u(mt[i - 1] ^ (mt[i - 1] >>> 30), 1664525)) + key[j] + j) >>> 0;
|
|
88
|
+
}
|
|
89
|
+
for (let k = 623; k > 0; --k, ++i) {
|
|
90
|
+
if (i >= 624) {
|
|
91
|
+
mt[0] = mt[623];
|
|
92
|
+
i = 1;
|
|
93
|
+
}
|
|
94
|
+
mt[i] = ((mt[i] ^ u(mt[i - 1] ^ (mt[i - 1] >>> 30), 1566083941)) - i) >>> 0;
|
|
95
|
+
}
|
|
96
|
+
mt[0] = 0x80000000;
|
|
97
|
+
this._idx = 624;
|
|
98
|
+
this._gauss_next = null;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* Generates a random unsigned 32-bit integer.
|
|
103
|
+
*
|
|
104
|
+
* Performs the "twist" step when the state buffer is exhausted,
|
|
105
|
+
* then applies the standard MT19937 tempering transform.
|
|
106
|
+
*
|
|
107
|
+
* @returns {number} A random integer in the range [0, 2^32 - 1].
|
|
108
|
+
*/
|
|
109
|
+
_int32() {
|
|
110
|
+
const mt = this._mt;
|
|
111
|
+
if (this._idx >= 624) {
|
|
112
|
+
for (let k = 0; k < 624; ++k) {
|
|
113
|
+
// twist
|
|
114
|
+
const y = (mt[k] & 0x80000000) | (mt[(k + 1) % 624] & 0x7fffffff);
|
|
115
|
+
mt[k] = (mt[(k + 397) % 624] ^ (y >>> 1) ^ (y & 1 ? 0x9908b0df : 0)) >>> 0;
|
|
116
|
+
}
|
|
117
|
+
this._idx = 0;
|
|
118
|
+
}
|
|
119
|
+
let y = mt[this._idx++];
|
|
120
|
+
y ^= y >>> 11;
|
|
121
|
+
y ^= (y << 7) & 0x9d2c5680;
|
|
122
|
+
y ^= (y << 15) & 0xefc60000;
|
|
123
|
+
y ^= y >>> 18;
|
|
124
|
+
return y >>> 0;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Generates a random floating-point number in the half-open interval [0, 1).
|
|
129
|
+
*
|
|
130
|
+
* Combines two 32-bit integers (using 53 bits of precision) to produce
|
|
131
|
+
* a uniformly distributed double, matching Python's `random.random()`.
|
|
132
|
+
*
|
|
133
|
+
* @returns {number} A random float in [0, 1).
|
|
134
|
+
*/
|
|
135
|
+
random() {
|
|
136
|
+
return ((this._int32() >>> 5) * 67108864.0 + (this._int32() >>> 6)) / 9007199254740992.0;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Generates a random number from a Gaussian (normal) distribution.
|
|
141
|
+
*
|
|
142
|
+
* Uses the Box-Muller transform with a cached spare value,
|
|
143
|
+
* matching Python's `random.gauss()` output for the same seed.
|
|
144
|
+
*
|
|
145
|
+
* @param {number} [mu=0] The mean of the distribution.
|
|
146
|
+
* @param {number} [sigma=1] The standard deviation of the distribution.
|
|
147
|
+
* @returns {number} A normally distributed random value.
|
|
148
|
+
*/
|
|
149
|
+
gauss(mu = 0, sigma = 1) {
|
|
150
|
+
let z = this._gauss_next;
|
|
151
|
+
this._gauss_next = null;
|
|
152
|
+
if (z === null) {
|
|
153
|
+
const x2pi = this.random() * 2 * Math.PI,
|
|
154
|
+
g2rad = Math.sqrt(-2 * Math.log(1 - this.random()));
|
|
155
|
+
z = Math.cos(x2pi) * g2rad;
|
|
156
|
+
this._gauss_next = Math.sin(x2pi) * g2rad;
|
|
157
|
+
}
|
|
158
|
+
return mu + z * sigma;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Shuffles an array in-place using the Fisher-Yates algorithm.
|
|
163
|
+
*
|
|
164
|
+
* Uses rejection sampling via `getrandbits`-style bit masking to ensure
|
|
165
|
+
* a uniform distribution, matching Python's `random.shuffle()`.
|
|
166
|
+
*
|
|
167
|
+
* @param {any[]} arr The array to shuffle in-place.
|
|
168
|
+
*/
|
|
169
|
+
shuffle(arr) {
|
|
170
|
+
for (let i = arr.length - 1; i > 0; --i) {
|
|
171
|
+
const k = 32 - Math.clz32(i + 1);
|
|
172
|
+
let r = this._int32() >>> (32 - k);
|
|
173
|
+
while (r > i) r = this._int32() >>> (32 - k);
|
|
174
|
+
const t = arr[i];
|
|
175
|
+
arr[i] = arr[r];
|
|
176
|
+
arr[r] = t;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Selects a single element from a weighted population.
|
|
182
|
+
*
|
|
183
|
+
* Matches Python's `random.choices(population, weights=weights, k=1)[0]`
|
|
184
|
+
*
|
|
185
|
+
* @param {any[]} population The array of items to choose from.
|
|
186
|
+
* @param {number[]} weights An array of non-negative weights, one per population element.
|
|
187
|
+
* @returns {*} A single randomly selected element from the population.
|
|
188
|
+
*/
|
|
189
|
+
choices(population, weights) {
|
|
190
|
+
return population[_weightedIndexWith(this._random_fn, weights)];
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
/**
|
|
195
|
+
* Returns a random index into `weights`, where each index's probability
|
|
196
|
+
* is proportional to its weight. Uses a linear scan: O(n) time, O(1) memory.
|
|
197
|
+
*
|
|
198
|
+
* @param {() => number} randomFn A function returning a uniform random float in [0, 1).
|
|
199
|
+
* @param {ArrayLike<number>} weights Non-negative weights.
|
|
200
|
+
* @returns {number} A randomly selected index in `[0, weights.length)`.
|
|
201
|
+
*/
|
|
202
|
+
function _weightedIndexWith(randomFn, weights) {
|
|
203
|
+
let sum = 0;
|
|
204
|
+
for (let i = 0; i < weights.length; ++i) sum += weights[i];
|
|
205
|
+
let x = randomFn() * sum;
|
|
206
|
+
for (let i = 0; i < weights.length; ++i) {
|
|
207
|
+
x -= weights[i];
|
|
208
|
+
if (x < 0) return i;
|
|
209
|
+
}
|
|
210
|
+
return weights.length - 1; // floating-point guard
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// Global default instance: mirrors the module-level functions in Python's `random` module.
|
|
214
|
+
const _default = new Random();
|
|
215
|
+
export const random = Object.freeze({
|
|
216
|
+
Random,
|
|
217
|
+
seed: _default.seed.bind(_default),
|
|
218
|
+
random: _default.random.bind(_default),
|
|
219
|
+
gauss: _default.gauss.bind(_default),
|
|
220
|
+
shuffle: _default.shuffle.bind(_default),
|
|
221
|
+
choices: _default.choices.bind(_default),
|
|
222
|
+
});
|
|
223
|
+
|
|
224
|
+
// Private helper function, used by LogitsSampler, but not exported as part of the public API.
|
|
225
|
+
export const _weightedIndex = (weights) => _weightedIndexWith(random.random, weights);
|
package/src/utils/tensor.js
CHANGED
|
@@ -15,6 +15,8 @@ import { TensorOpRegistry } from '../ops/registry.js';
|
|
|
15
15
|
|
|
16
16
|
import { DataTypeMap } from './dtypes.js';
|
|
17
17
|
|
|
18
|
+
import { random } from './random.js';
|
|
19
|
+
|
|
18
20
|
/**
|
|
19
21
|
* @typedef {keyof typeof DataTypeMap} DataType
|
|
20
22
|
* @typedef {import('./maths.js').AnyTypedArray | any[]} DataArray
|
|
@@ -1591,7 +1593,7 @@ export function rand(size) {
|
|
|
1591
1593
|
const length = size.reduce((a, b) => a * b, 1);
|
|
1592
1594
|
return new Tensor(
|
|
1593
1595
|
'float32',
|
|
1594
|
-
Float32Array.from({ length }, () =>
|
|
1596
|
+
Float32Array.from({ length }, () => random.random()),
|
|
1595
1597
|
size,
|
|
1596
1598
|
);
|
|
1597
1599
|
}
|
|
@@ -1603,26 +1605,11 @@ export function rand(size) {
|
|
|
1603
1605
|
*/
|
|
1604
1606
|
export function randn(size) {
|
|
1605
1607
|
const length = size.reduce((a, b) => a * b, 1);
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
const v = Math.random();
|
|
1612
|
-
|
|
1613
|
-
const mag = Math.sqrt(-2.0 * Math.log(u));
|
|
1614
|
-
const angle = 2.0 * Math.PI * v;
|
|
1615
|
-
|
|
1616
|
-
// Assign the first value
|
|
1617
|
-
data[i] = mag * Math.cos(angle);
|
|
1618
|
-
|
|
1619
|
-
// Assign the second value (if valid index)
|
|
1620
|
-
if (i + 1 < length) {
|
|
1621
|
-
data[i + 1] = mag * Math.sin(angle);
|
|
1622
|
-
}
|
|
1623
|
-
}
|
|
1624
|
-
|
|
1625
|
-
return new Tensor('float32', data, size);
|
|
1608
|
+
return new Tensor(
|
|
1609
|
+
'float32',
|
|
1610
|
+
Float32Array.from({ length }, () => random.gauss()),
|
|
1611
|
+
size,
|
|
1612
|
+
);
|
|
1626
1613
|
}
|
|
1627
1614
|
|
|
1628
1615
|
/**
|
package/src/utils/video.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { RawImage } from './image.js';
|
|
2
|
-
import { apis } from '../env.js';
|
|
2
|
+
import { env, apis } from '../env.js';
|
|
3
3
|
|
|
4
4
|
export class RawVideoFrame {
|
|
5
5
|
/**
|
|
@@ -79,7 +79,7 @@ export async function load_video(src, { num_frames = null, fps = null } = {}) {
|
|
|
79
79
|
|
|
80
80
|
if (video.seekable.start(0) === video.seekable.end(0)) {
|
|
81
81
|
// Fallback: Download entire video if not seekable
|
|
82
|
-
const response = await fetch(video.src);
|
|
82
|
+
const response = await env.fetch(video.src);
|
|
83
83
|
const blob = await response.blob();
|
|
84
84
|
video.src = URL.createObjectURL(blob);
|
|
85
85
|
await new Promise((resolve) => (video.onloadedmetadata = resolve));
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"onnx.d.ts","sourceRoot":"","sources":["../../src/backends/onnx.js"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"onnx.d.ts","sourceRoot":"","sources":["../../src/backends/onnx.js"],"names":[],"mappings":"AA0JA;;;;GAIG;AACH,oDAHW,OAAO,qBAAqB,EAAE,UAAU,GAAC,MAAM,GAAC,IAAI,GAClD,sBAAsB,EAAE,CAmBpC;AAoFD;;;;;;GAMG;AACH,uDALW,UAAU,GAAC,MAAM,mBACjB,OAAO,oBAAoB,EAAE,gBAAgB,CAAC,cAAc,wBAE1D,OAAO,CAAC,OAAO,oBAAoB,EAAE,gBAAgB,GAAG;IAAE,MAAM,MAAQ;CAAE,CAAC,CAcvF;AASD;;;;;GAKG;AACH,6CAJW,OAAO,oBAAoB,EAAE,gBAAgB,WAC7C,MAAM,CAAC,MAAM,EAAE,OAAO,oBAAoB,EAAE,MAAM,CAAC,GACjD,OAAO,CAAC,MAAM,CAAC,MAAM,EAAE,OAAO,oBAAoB,EAAE,MAAM,CAAC,CAAC,CAMxE;AAED;;;;GAIG;AACH,gCAHW,GAAG,GACD,OAAO,CAInB;AAqCD;;;GAGG;AACH,+BAFa,OAAO,CAKnB;;qCAlUY,OAAO,oBAAoB,EAAE,gBAAgB,CAAC,uBAAuB"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"cacheWasm.d.ts","sourceRoot":"","sources":["../../../src/backends/utils/cacheWasm.js"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"cacheWasm.d.ts","sourceRoot":"","sources":["../../../src/backends/utils/cacheWasm.js"],"names":[],"mappings":"AAgDA;;;;GAIG;AAEH,wCAJW,MAAM,GACJ,OAAO,CAAC,WAAW,GAAC,IAAI,CAAC,CAarC;AAED;;;;GAIG;AACH,wCAHW,MAAM,GACJ,OAAO,CAAC,MAAM,GAAC,IAAI,CAAC,CAkBhC;AAED;;;;;GAKG;AACH,+BAHW,MAAM,GACJ,OAAO,CAInB;AAED;;;;;;GAMG;AACH,mCAHW,MAAM,GACJ,MAAM,CAiBlB"}
|