@huggingface/tasks 0.3.3 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +46 -4
- package/dist/index.d.ts +2029 -12
- package/dist/index.js +46 -4
- package/package.json +1 -1
- package/src/index.ts +2 -0
- package/src/model-data.ts +32 -7
- package/src/model-libraries-snippets.ts +41 -3
- package/src/model-libraries.ts +7 -0
- package/src/pipelines.ts +1 -1
- package/src/tasks/index.ts +58 -0
- package/src/tasks/summarization/inference.ts +3 -6
- package/src/tasks/summarization/spec/output.json +9 -2
- package/src/tasks/translation/inference.ts +3 -6
- package/src/tasks/translation/spec/output.json +9 -2
- package/src/widget-example.ts +10 -0
package/dist/index.cjs
CHANGED
|
@@ -1295,7 +1295,7 @@ var PIPELINE_DATA = {
|
|
|
1295
1295
|
color: "green"
|
|
1296
1296
|
},
|
|
1297
1297
|
"image-text-to-text": {
|
|
1298
|
-
name: "Image
|
|
1298
|
+
name: "Image-Text-to-Text",
|
|
1299
1299
|
modality: "multimodal",
|
|
1300
1300
|
color: "red",
|
|
1301
1301
|
hideInDatasets: true
|
|
@@ -4222,7 +4222,7 @@ model = joblib.load(
|
|
|
4222
4222
|
};
|
|
4223
4223
|
var sklearn = (model) => {
|
|
4224
4224
|
if (model.tags?.includes("skops")) {
|
|
4225
|
-
const skopsmodelFile = model.config?.sklearn?.
|
|
4225
|
+
const skopsmodelFile = model.config?.sklearn?.model?.file;
|
|
4226
4226
|
const skopssaveFormat = model.config?.sklearn?.model_format;
|
|
4227
4227
|
if (!skopsmodelFile) {
|
|
4228
4228
|
return [`# \u26A0\uFE0F Model filename not specified in config.json`];
|
|
@@ -4292,7 +4292,7 @@ var speechBrainMethod = (speechbrainInterface) => {
|
|
|
4292
4292
|
}
|
|
4293
4293
|
};
|
|
4294
4294
|
var speechbrain = (model) => {
|
|
4295
|
-
const speechbrainInterface = model.config?.speechbrain?.
|
|
4295
|
+
const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface;
|
|
4296
4296
|
if (speechbrainInterface === void 0) {
|
|
4297
4297
|
return [`# interface not specified in config.json`];
|
|
4298
4298
|
}
|
|
@@ -4370,7 +4370,7 @@ var peftTask = (peftTaskType) => {
|
|
|
4370
4370
|
}
|
|
4371
4371
|
};
|
|
4372
4372
|
var peft = (model) => {
|
|
4373
|
-
const {
|
|
4373
|
+
const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
|
|
4374
4374
|
const pefttask = peftTask(peftTaskType);
|
|
4375
4375
|
if (!pefttask) {
|
|
4376
4376
|
return [`Task type is invalid.`];
|
|
@@ -4441,6 +4441,41 @@ var pythae = (model) => [
|
|
|
4441
4441
|
|
|
4442
4442
|
model = AutoModel.load_from_hf_hub("${model.id}")`
|
|
4443
4443
|
];
|
|
4444
|
+
var musicgen = (model) => [
|
|
4445
|
+
`from audiocraft.models import MusicGen
|
|
4446
|
+
|
|
4447
|
+
model = MusicGen.get_pretrained("${model.id}")
|
|
4448
|
+
|
|
4449
|
+
descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
|
|
4450
|
+
wav = model.generate(descriptions) # generates 3 samples.`
|
|
4451
|
+
];
|
|
4452
|
+
var magnet = (model) => [
|
|
4453
|
+
`from audiocraft.models import MAGNeT
|
|
4454
|
+
|
|
4455
|
+
model = MAGNeT.get_pretrained("${model.id}")
|
|
4456
|
+
|
|
4457
|
+
descriptions = ['disco beat', 'energetic EDM', 'funky groove']
|
|
4458
|
+
wav = model.generate(descriptions) # generates 3 samples.`
|
|
4459
|
+
];
|
|
4460
|
+
var audiogen = (model) => [
|
|
4461
|
+
`from audiocraft.models import AudioGen
|
|
4462
|
+
|
|
4463
|
+
model = AudioGen.get_pretrained("${model.id}")
|
|
4464
|
+
model.set_generation_params(duration=5) # generate 5 seconds.
|
|
4465
|
+
descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor']
|
|
4466
|
+
wav = model.generate(descriptions) # generates 3 samples.`
|
|
4467
|
+
];
|
|
4468
|
+
var audiocraft = (model) => {
|
|
4469
|
+
if (model.tags?.includes("musicgen")) {
|
|
4470
|
+
return musicgen(model);
|
|
4471
|
+
} else if (model.tags?.includes("audiogen")) {
|
|
4472
|
+
return audiogen(model);
|
|
4473
|
+
} else if (model.tags?.includes("magnet")) {
|
|
4474
|
+
return magnet(model);
|
|
4475
|
+
} else {
|
|
4476
|
+
return [`# Type of model unknown.`];
|
|
4477
|
+
}
|
|
4478
|
+
};
|
|
4444
4479
|
|
|
4445
4480
|
// src/model-libraries.ts
|
|
4446
4481
|
var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
@@ -4474,6 +4509,13 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
4474
4509
|
term: { path: "pytorch_model.bin" }
|
|
4475
4510
|
}
|
|
4476
4511
|
},
|
|
4512
|
+
audiocraft: {
|
|
4513
|
+
prettyLabel: "Audiocraft",
|
|
4514
|
+
repoName: "audiocraft",
|
|
4515
|
+
repoUrl: "https://github.com/facebookresearch/audiocraft",
|
|
4516
|
+
snippets: audiocraft,
|
|
4517
|
+
filter: false
|
|
4518
|
+
},
|
|
4477
4519
|
bertopic: {
|
|
4478
4520
|
prettyLabel: "BERTopic",
|
|
4479
4521
|
repoName: "BERTopic",
|