@huggingface/tasks 0.9.1 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -14
- package/dist/index.cjs +91 -18
- package/dist/index.js +90 -18
- package/dist/scripts/inference-codegen.d.ts +2 -0
- package/dist/scripts/inference-codegen.d.ts.map +1 -0
- package/dist/scripts/inference-tgi-import.d.ts +2 -0
- package/dist/scripts/inference-tgi-import.d.ts.map +1 -0
- package/dist/src/default-widget-inputs.d.ts +6 -0
- package/dist/src/default-widget-inputs.d.ts.map +1 -0
- package/dist/src/index.d.ts +17 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/library-to-tasks.d.ts +11 -0
- package/dist/src/library-to-tasks.d.ts.map +1 -0
- package/dist/src/local-apps.d.ts +104 -0
- package/dist/src/local-apps.d.ts.map +1 -0
- package/dist/src/model-data.d.ts +144 -0
- package/dist/src/model-data.d.ts.map +1 -0
- package/dist/src/model-libraries-downloads.d.ts +26 -0
- package/dist/src/model-libraries-downloads.d.ts.map +1 -0
- package/dist/src/model-libraries-snippets.d.ts +43 -0
- package/dist/src/model-libraries-snippets.d.ts.map +1 -0
- package/dist/src/model-libraries.d.ts +501 -0
- package/dist/src/model-libraries.d.ts.map +1 -0
- package/dist/src/pipelines.d.ts +404 -0
- package/dist/src/pipelines.d.ts.map +1 -0
- package/dist/src/snippets/curl.d.ts +9 -0
- package/dist/src/snippets/curl.d.ts.map +1 -0
- package/dist/src/snippets/index.d.ts +6 -0
- package/dist/src/snippets/index.d.ts.map +1 -0
- package/dist/src/snippets/inputs.d.ts +3 -0
- package/dist/src/snippets/inputs.d.ts.map +1 -0
- package/dist/src/snippets/js.d.ts +11 -0
- package/dist/src/snippets/js.d.ts.map +1 -0
- package/dist/src/snippets/python.d.ts +14 -0
- package/dist/src/snippets/python.d.ts.map +1 -0
- package/dist/src/snippets/types.d.ts +8 -0
- package/dist/src/snippets/types.d.ts.map +1 -0
- package/dist/src/tasks/audio-classification/data.d.ts +4 -0
- package/dist/src/tasks/audio-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/audio-classification/inference.d.ts +52 -0
- package/dist/src/tasks/audio-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/audio-to-audio/data.d.ts +4 -0
- package/dist/src/tasks/audio-to-audio/data.d.ts.map +1 -0
- package/dist/src/tasks/automatic-speech-recognition/data.d.ts +4 -0
- package/dist/src/tasks/automatic-speech-recognition/data.d.ts.map +1 -0
- package/dist/src/tasks/automatic-speech-recognition/inference.d.ts +154 -0
- package/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map +1 -0
- package/dist/src/tasks/chat-completion/inference.d.ts +254 -0
- package/dist/src/tasks/chat-completion/inference.d.ts.map +1 -0
- package/dist/src/tasks/depth-estimation/data.d.ts +4 -0
- package/dist/src/tasks/depth-estimation/data.d.ts.map +1 -0
- package/dist/src/tasks/depth-estimation/inference.d.ts +36 -0
- package/dist/src/tasks/depth-estimation/inference.d.ts.map +1 -0
- package/dist/src/tasks/document-question-answering/data.d.ts +4 -0
- package/dist/src/tasks/document-question-answering/data.d.ts.map +1 -0
- package/dist/src/tasks/document-question-answering/inference.d.ts +111 -0
- package/dist/src/tasks/document-question-answering/inference.d.ts.map +1 -0
- package/dist/src/tasks/feature-extraction/data.d.ts +4 -0
- package/dist/src/tasks/feature-extraction/data.d.ts.map +1 -0
- package/dist/src/tasks/feature-extraction/inference.d.ts +23 -0
- package/dist/src/tasks/feature-extraction/inference.d.ts.map +1 -0
- package/dist/src/tasks/fill-mask/data.d.ts +4 -0
- package/dist/src/tasks/fill-mask/data.d.ts.map +1 -0
- package/dist/src/tasks/fill-mask/inference.d.ts +63 -0
- package/dist/src/tasks/fill-mask/inference.d.ts.map +1 -0
- package/dist/src/tasks/image-classification/data.d.ts +4 -0
- package/dist/src/tasks/image-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/image-classification/inference.d.ts +52 -0
- package/dist/src/tasks/image-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/image-feature-extraction/data.d.ts +4 -0
- package/dist/src/tasks/image-feature-extraction/data.d.ts.map +1 -0
- package/dist/src/tasks/image-segmentation/data.d.ts +4 -0
- package/dist/src/tasks/image-segmentation/data.d.ts.map +1 -0
- package/dist/src/tasks/image-segmentation/inference.d.ts +66 -0
- package/dist/src/tasks/image-segmentation/inference.d.ts.map +1 -0
- package/dist/src/tasks/image-to-image/data.d.ts +4 -0
- package/dist/src/tasks/image-to-image/data.d.ts.map +1 -0
- package/dist/src/tasks/image-to-image/inference.d.ts +64 -0
- package/dist/src/tasks/image-to-image/inference.d.ts.map +1 -0
- package/dist/src/tasks/image-to-text/data.d.ts +4 -0
- package/dist/src/tasks/image-to-text/data.d.ts.map +1 -0
- package/dist/src/tasks/image-to-text/inference.d.ts +139 -0
- package/dist/src/tasks/image-to-text/inference.d.ts.map +1 -0
- package/dist/src/tasks/index.d.ts +87 -0
- package/dist/src/tasks/index.d.ts.map +1 -0
- package/dist/src/tasks/mask-generation/data.d.ts +4 -0
- package/dist/src/tasks/mask-generation/data.d.ts.map +1 -0
- package/dist/src/tasks/object-detection/data.d.ts +4 -0
- package/dist/src/tasks/object-detection/data.d.ts.map +1 -0
- package/dist/src/tasks/object-detection/inference.d.ts +63 -0
- package/dist/src/tasks/object-detection/inference.d.ts.map +1 -0
- package/dist/src/tasks/placeholder/data.d.ts +4 -0
- package/dist/src/tasks/placeholder/data.d.ts.map +1 -0
- package/dist/src/tasks/question-answering/data.d.ts +4 -0
- package/dist/src/tasks/question-answering/data.d.ts.map +1 -0
- package/dist/src/tasks/question-answering/inference.d.ts +100 -0
- package/dist/src/tasks/question-answering/inference.d.ts.map +1 -0
- package/dist/src/tasks/reinforcement-learning/data.d.ts +4 -0
- package/dist/src/tasks/reinforcement-learning/data.d.ts.map +1 -0
- package/dist/src/tasks/sentence-similarity/data.d.ts +4 -0
- package/dist/src/tasks/sentence-similarity/data.d.ts.map +1 -0
- package/dist/src/tasks/sentence-similarity/inference.d.ts +32 -0
- package/dist/src/tasks/sentence-similarity/inference.d.ts.map +1 -0
- package/dist/src/tasks/summarization/data.d.ts +4 -0
- package/dist/src/tasks/summarization/data.d.ts.map +1 -0
- package/dist/src/tasks/summarization/inference.d.ts +55 -0
- package/dist/src/tasks/summarization/inference.d.ts.map +1 -0
- package/dist/src/tasks/table-question-answering/data.d.ts +4 -0
- package/dist/src/tasks/table-question-answering/data.d.ts.map +1 -0
- package/dist/src/tasks/table-question-answering/inference.d.ts +62 -0
- package/dist/src/tasks/table-question-answering/inference.d.ts.map +1 -0
- package/dist/src/tasks/tabular-classification/data.d.ts +4 -0
- package/dist/src/tasks/tabular-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/tabular-regression/data.d.ts +4 -0
- package/dist/src/tasks/tabular-regression/data.d.ts.map +1 -0
- package/dist/src/tasks/text-classification/data.d.ts +4 -0
- package/dist/src/tasks/text-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/text-classification/inference.d.ts +52 -0
- package/dist/src/tasks/text-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/text-generation/data.d.ts +4 -0
- package/dist/src/tasks/text-generation/data.d.ts.map +1 -0
- package/dist/src/tasks/text-generation/inference.d.ts +126 -0
- package/dist/src/tasks/text-generation/inference.d.ts.map +1 -0
- package/dist/src/tasks/text-to-audio/inference.d.ts +139 -0
- package/dist/src/tasks/text-to-audio/inference.d.ts.map +1 -0
- package/dist/src/tasks/text-to-image/data.d.ts +4 -0
- package/dist/src/tasks/text-to-image/data.d.ts.map +1 -0
- package/dist/src/tasks/text-to-image/inference.d.ts +68 -0
- package/dist/src/tasks/text-to-image/inference.d.ts.map +1 -0
- package/dist/src/tasks/text-to-speech/data.d.ts +4 -0
- package/dist/src/tasks/text-to-speech/data.d.ts.map +1 -0
- package/dist/src/tasks/text-to-speech/inference.d.ts +143 -0
- package/dist/src/tasks/text-to-speech/inference.d.ts.map +1 -0
- package/dist/src/tasks/text-to-video/data.d.ts +4 -0
- package/dist/src/tasks/text-to-video/data.d.ts.map +1 -0
- package/dist/src/tasks/text2text-generation/inference.d.ts +54 -0
- package/dist/src/tasks/text2text-generation/inference.d.ts.map +1 -0
- package/dist/src/tasks/token-classification/data.d.ts +4 -0
- package/dist/src/tasks/token-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/token-classification/inference.d.ts +83 -0
- package/dist/src/tasks/token-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/translation/data.d.ts +4 -0
- package/dist/src/tasks/translation/data.d.ts.map +1 -0
- package/dist/src/tasks/translation/inference.d.ts +55 -0
- package/dist/src/tasks/translation/inference.d.ts.map +1 -0
- package/dist/src/tasks/unconditional-image-generation/data.d.ts +4 -0
- package/dist/src/tasks/unconditional-image-generation/data.d.ts.map +1 -0
- package/dist/src/tasks/video-classification/data.d.ts +4 -0
- package/dist/src/tasks/video-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/video-classification/inference.d.ts +60 -0
- package/dist/src/tasks/video-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/visual-question-answering/data.d.ts +4 -0
- package/dist/src/tasks/visual-question-answering/data.d.ts.map +1 -0
- package/dist/src/tasks/visual-question-answering/inference.d.ts +64 -0
- package/dist/src/tasks/visual-question-answering/inference.d.ts.map +1 -0
- package/dist/src/tasks/zero-shot-classification/data.d.ts +4 -0
- package/dist/src/tasks/zero-shot-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/zero-shot-classification/inference.d.ts +68 -0
- package/dist/src/tasks/zero-shot-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts +4 -0
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts.map +1 -0
- package/dist/src/tasks/zero-shot-image-classification/inference.d.ts +62 -0
- package/dist/src/tasks/zero-shot-image-classification/inference.d.ts.map +1 -0
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts +4 -0
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts.map +1 -0
- package/dist/src/tasks/zero-shot-object-detection/inference.d.ts +67 -0
- package/dist/src/tasks/zero-shot-object-detection/inference.d.ts.map +1 -0
- package/dist/src/tokenizer-data.d.ts +26 -0
- package/dist/src/tokenizer-data.d.ts.map +1 -0
- package/dist/src/widget-example.d.ts +86 -0
- package/dist/src/widget-example.d.ts.map +1 -0
- package/package.json +8 -6
- package/src/index.ts +3 -0
- package/src/local-apps.ts +119 -0
- package/src/model-data.ts +1 -5
- package/src/model-libraries-snippets.ts +21 -18
- package/src/model-libraries.ts +9 -0
- package/src/tasks/chat-completion/inference.ts +204 -85
- package/src/tasks/chat-completion/spec/input.json +198 -34
- package/src/tasks/chat-completion/spec/output.json +178 -40
- package/src/tasks/chat-completion/spec/stream_output.json +170 -0
- package/src/tasks/index.ts +7 -8
- package/src/tasks/text-generation/inference.ts +58 -170
- package/src/tasks/text-generation/spec/input.json +130 -29
- package/src/tasks/text-generation/spec/output.json +104 -90
- package/src/tasks/text-generation/spec/stream_output.json +97 -0
- package/tsconfig.json +3 -1
- package/dist/index.d.ts +0 -3542
- package/src/tasks/chat-completion/spec/output_stream.json +0 -48
- package/src/tasks/text-generation/spec/output_stream.json +0 -47
package/README.md
CHANGED
|
@@ -1,8 +1,18 @@
|
|
|
1
1
|
# Tasks
|
|
2
2
|
|
|
3
|
-
This package contains
|
|
3
|
+
This package contains the definition files (written in Typescript) for the huggingface.co hub's:
|
|
4
|
+
|
|
5
|
+
- **pipeline types** (a.k.a. **task types**) - used to determine which widget to display on the model page, and which inference API to run.
|
|
6
|
+
- **default widget inputs** - when they aren't provided in the model card.
|
|
7
|
+
- definitions and UI elements for **model libraries** (and soon for **dataset libraries**).
|
|
8
|
+
|
|
9
|
+
Please add any missing ones to these definitions by opening a PR. Thanks 🔥
|
|
4
10
|
|
|
5
|
-
|
|
11
|
+
⚠️ The hub's definitive doc is at https://huggingface.co/docs/hub.
|
|
12
|
+
|
|
13
|
+
## Definition of Tasks
|
|
14
|
+
|
|
15
|
+
This package also contains data used to define https://huggingface.co/tasks.
|
|
6
16
|
|
|
7
17
|
The Task pages are made to lower the barrier of entry to understand a task that can be solved with machine learning and use or train a model to accomplish it. It's a collaborative documentation effort made to help out software developers, social scientists, or anyone with no background in machine learning that is interested in understanding how machine learning models can be used to solve a problem.
|
|
8
18
|
|
|
@@ -19,16 +29,4 @@ We have a [`dataset`](https://huggingface.co/datasets/huggingfacejs/tasks) that
|
|
|
19
29
|
|
|
20
30
|
This might seem overwhelming, but you don't necessarily need to add all of these in one pull request or on your own, you can simply contribute one section. Feel free to ask for help whenever you need.
|
|
21
31
|
|
|
22
|
-
## Other data
|
|
23
|
-
|
|
24
|
-
This package contains the definition files (written in Typescript) for the huggingface.co hub's:
|
|
25
|
-
|
|
26
|
-
- **pipeline types** a.k.a. **task types** (used to determine which widget to display on the model page, and which inference API to run)
|
|
27
|
-
- **default widget inputs** (when they aren't provided in the model card)
|
|
28
|
-
- definitions and UI elements for **model libraries** (and soon for **dataset libraries**).
|
|
29
|
-
|
|
30
|
-
Please add to any of those definitions by opening a PR. Thanks 🔥
|
|
31
|
-
|
|
32
|
-
⚠️ The hub's definitive doc is at https://huggingface.co/docs/hub.
|
|
33
|
-
|
|
34
32
|
## Feedback (feature requests, bugs, etc.) is super welcome 💙💚💛💜♥️🧡
|
package/dist/index.cjs
CHANGED
|
@@ -24,6 +24,7 @@ __export(src_exports, {
|
|
|
24
24
|
ALL_MODEL_LIBRARY_KEYS: () => ALL_MODEL_LIBRARY_KEYS,
|
|
25
25
|
InferenceDisplayability: () => InferenceDisplayability,
|
|
26
26
|
LIBRARY_TASK_MAPPING: () => LIBRARY_TASK_MAPPING,
|
|
27
|
+
LOCAL_APPS: () => LOCAL_APPS,
|
|
27
28
|
MAPPING_DEFAULT_WIDGET: () => MAPPING_DEFAULT_WIDGET,
|
|
28
29
|
MODALITIES: () => MODALITIES,
|
|
29
30
|
MODALITY_LABELS: () => MODALITY_LABELS,
|
|
@@ -4067,7 +4068,7 @@ predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "quest
|
|
|
4067
4068
|
predictions = predictor.predict_json(predictor_input)`
|
|
4068
4069
|
];
|
|
4069
4070
|
var allennlp = (model) => {
|
|
4070
|
-
if (model.tags
|
|
4071
|
+
if (model.tags.includes("question-answering")) {
|
|
4071
4072
|
return allennlpQuestionAnswering(model);
|
|
4072
4073
|
}
|
|
4073
4074
|
return allennlpUnknown(model);
|
|
@@ -4111,11 +4112,11 @@ pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}
|
|
|
4111
4112
|
pipeline.load_textual_inversion("${model.id}")`
|
|
4112
4113
|
];
|
|
4113
4114
|
var diffusers = (model) => {
|
|
4114
|
-
if (model.tags
|
|
4115
|
+
if (model.tags.includes("controlnet")) {
|
|
4115
4116
|
return diffusers_controlnet(model);
|
|
4116
|
-
} else if (model.tags
|
|
4117
|
+
} else if (model.tags.includes("lora")) {
|
|
4117
4118
|
return diffusers_lora(model);
|
|
4118
|
-
} else if (model.tags
|
|
4119
|
+
} else if (model.tags.includes("textual_inversion")) {
|
|
4119
4120
|
return diffusers_textual_inversion(model);
|
|
4120
4121
|
} else {
|
|
4121
4122
|
return diffusers_default(model);
|
|
@@ -4140,9 +4141,9 @@ text, *_ = model(speech)[0]`
|
|
|
4140
4141
|
];
|
|
4141
4142
|
var espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
|
|
4142
4143
|
var espnet = (model) => {
|
|
4143
|
-
if (model.tags
|
|
4144
|
+
if (model.tags.includes("text-to-speech")) {
|
|
4144
4145
|
return espnetTTS(model);
|
|
4145
|
-
} else if (model.tags
|
|
4146
|
+
} else if (model.tags.includes("automatic-speech-recognition")) {
|
|
4146
4147
|
return espnetASR(model);
|
|
4147
4148
|
}
|
|
4148
4149
|
return espnetUnknown();
|
|
@@ -4171,7 +4172,10 @@ model = from_pretrained_keras("${model.id}")
|
|
|
4171
4172
|
`
|
|
4172
4173
|
];
|
|
4173
4174
|
var keras_nlp = (model) => [
|
|
4174
|
-
|
|
4175
|
+
`# Available backend options are: "jax", "tensorflow", "torch".
|
|
4176
|
+
os.environ["KERAS_BACKEND"] = "tensorflow"
|
|
4177
|
+
|
|
4178
|
+
import keras_nlp
|
|
4175
4179
|
|
|
4176
4180
|
tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
|
|
4177
4181
|
backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
|
|
@@ -4237,7 +4241,7 @@ excerpt = Segment(start=2.0, end=5.0)
|
|
|
4237
4241
|
inference.crop("file.wav", excerpt)`
|
|
4238
4242
|
];
|
|
4239
4243
|
var pyannote_audio = (model) => {
|
|
4240
|
-
if (model.tags
|
|
4244
|
+
if (model.tags.includes("pyannote-audio-pipeline")) {
|
|
4241
4245
|
return pyannote_audio_pipeline(model);
|
|
4242
4246
|
}
|
|
4243
4247
|
return pyannote_audio_model(model);
|
|
@@ -4263,9 +4267,9 @@ model = TFAutoModel.from_pretrained("${model.id}")
|
|
|
4263
4267
|
`
|
|
4264
4268
|
];
|
|
4265
4269
|
var tensorflowtts = (model) => {
|
|
4266
|
-
if (model.tags
|
|
4270
|
+
if (model.tags.includes("text-to-mel")) {
|
|
4267
4271
|
return tensorflowttsTextToMel(model);
|
|
4268
|
-
} else if (model.tags
|
|
4272
|
+
} else if (model.tags.includes("mel-to-wav")) {
|
|
4269
4273
|
return tensorflowttsMelToWav(model);
|
|
4270
4274
|
}
|
|
4271
4275
|
return tensorflowttsUnknown(model);
|
|
@@ -4309,7 +4313,7 @@ model = joblib.load(
|
|
|
4309
4313
|
];
|
|
4310
4314
|
};
|
|
4311
4315
|
var sklearn = (model) => {
|
|
4312
|
-
if (model.tags
|
|
4316
|
+
if (model.tags.includes("skops")) {
|
|
4313
4317
|
const skopsmodelFile = model.config?.sklearn?.model?.file;
|
|
4314
4318
|
const skopssaveFormat = model.config?.sklearn?.model_format;
|
|
4315
4319
|
if (!skopsmodelFile) {
|
|
@@ -4401,7 +4405,7 @@ var transformers = (model) => {
|
|
|
4401
4405
|
if (!info) {
|
|
4402
4406
|
return [`# \u26A0\uFE0F Type of model unknown`];
|
|
4403
4407
|
}
|
|
4404
|
-
const remote_code_snippet = model.tags
|
|
4408
|
+
const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
|
|
4405
4409
|
let autoSnippet;
|
|
4406
4410
|
if (info.processor) {
|
|
4407
4411
|
const varName = info.processor === "AutoTokenizer" ? "tokenizer" : info.processor === "AutoFeatureExtractor" ? "extractor" : "processor";
|
|
@@ -4471,8 +4475,8 @@ var peft = (model) => {
|
|
|
4471
4475
|
from transformers import AutoModelFor${pefttask}
|
|
4472
4476
|
|
|
4473
4477
|
config = PeftConfig.from_pretrained("${model.id}")
|
|
4474
|
-
|
|
4475
|
-
model = PeftModel.from_pretrained(
|
|
4478
|
+
base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}")
|
|
4479
|
+
model = PeftModel.from_pretrained(base_model, "${model.id}")`
|
|
4476
4480
|
];
|
|
4477
4481
|
};
|
|
4478
4482
|
var fasttext = (model) => [
|
|
@@ -4529,7 +4533,7 @@ model = create_model(${model.id})`
|
|
|
4529
4533
|
];
|
|
4530
4534
|
var nemo = (model) => {
|
|
4531
4535
|
let command = void 0;
|
|
4532
|
-
if (model.tags
|
|
4536
|
+
if (model.tags.includes("automatic-speech-recognition")) {
|
|
4533
4537
|
command = nemoDomainResolver("ASR", model);
|
|
4534
4538
|
}
|
|
4535
4539
|
return command ?? [`# tag did not correspond to a valid NeMo domain.`];
|
|
@@ -4564,11 +4568,11 @@ descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a
|
|
|
4564
4568
|
wav = model.generate(descriptions) # generates 3 samples.`
|
|
4565
4569
|
];
|
|
4566
4570
|
var audiocraft = (model) => {
|
|
4567
|
-
if (model.tags
|
|
4571
|
+
if (model.tags.includes("musicgen")) {
|
|
4568
4572
|
return musicgen(model);
|
|
4569
|
-
} else if (model.tags
|
|
4573
|
+
} else if (model.tags.includes("audiogen")) {
|
|
4570
4574
|
return audiogen(model);
|
|
4571
|
-
} else if (model.tags
|
|
4575
|
+
} else if (model.tags.includes("magnet")) {
|
|
4572
4576
|
return magnet(model);
|
|
4573
4577
|
} else {
|
|
4574
4578
|
return [`# Type of model unknown.`];
|
|
@@ -4899,6 +4903,15 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
4899
4903
|
repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
|
|
4900
4904
|
snippets: tensorflowtts
|
|
4901
4905
|
},
|
|
4906
|
+
timesfm: {
|
|
4907
|
+
prettyLabel: "TimesFM",
|
|
4908
|
+
repoName: "timesfm",
|
|
4909
|
+
repoUrl: "https://github.com/google-research/timesfm",
|
|
4910
|
+
filter: false,
|
|
4911
|
+
countDownloads: {
|
|
4912
|
+
term: { path: "checkpoints/checkpoint_1100000/state/checkpoint" }
|
|
4913
|
+
}
|
|
4914
|
+
},
|
|
4902
4915
|
timm: {
|
|
4903
4916
|
prettyLabel: "timm",
|
|
4904
4917
|
repoName: "pytorch-image-models",
|
|
@@ -5430,12 +5443,72 @@ function getJsInferenceSnippet(model, accessToken) {
|
|
|
5430
5443
|
function hasJsInferenceSnippet(model) {
|
|
5431
5444
|
return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
|
|
5432
5445
|
}
|
|
5446
|
+
|
|
5447
|
+
// src/local-apps.ts
|
|
5448
|
+
function isGgufModel(model) {
|
|
5449
|
+
return model.tags.includes("gguf");
|
|
5450
|
+
}
|
|
5451
|
+
var snippetLlamacpp = (model) => {
|
|
5452
|
+
return `./main --hf-repo ${model.id} -m file.gguf -p "I believe the meaning of life is " -n 128`;
|
|
5453
|
+
};
|
|
5454
|
+
var LOCAL_APPS = {
|
|
5455
|
+
"llama.cpp": {
|
|
5456
|
+
prettyLabel: "llama.cpp",
|
|
5457
|
+
docsUrl: "https://github.com/ggerganov/llama.cpp",
|
|
5458
|
+
mainTask: "text-generation",
|
|
5459
|
+
displayOnModelPage: isGgufModel,
|
|
5460
|
+
snippet: snippetLlamacpp
|
|
5461
|
+
},
|
|
5462
|
+
lmstudio: {
|
|
5463
|
+
prettyLabel: "LM Studio",
|
|
5464
|
+
docsUrl: "https://lmstudio.ai",
|
|
5465
|
+
mainTask: "text-generation",
|
|
5466
|
+
displayOnModelPage: isGgufModel,
|
|
5467
|
+
deeplink: (model) => new URL(`lmstudio://open_from_hf?model=${model.id}`)
|
|
5468
|
+
},
|
|
5469
|
+
jan: {
|
|
5470
|
+
prettyLabel: "Jan",
|
|
5471
|
+
docsUrl: "https://jan.ai",
|
|
5472
|
+
mainTask: "text-generation",
|
|
5473
|
+
displayOnModelPage: isGgufModel,
|
|
5474
|
+
deeplink: (model) => new URL(`jan://open_from_hf?model=${model.id}`)
|
|
5475
|
+
},
|
|
5476
|
+
faraday: {
|
|
5477
|
+
prettyLabel: "Faraday",
|
|
5478
|
+
docsUrl: "https://faraday.dev",
|
|
5479
|
+
mainTask: "text-generation",
|
|
5480
|
+
macOSOnly: true,
|
|
5481
|
+
displayOnModelPage: isGgufModel,
|
|
5482
|
+
deeplink: (model) => new URL(`faraday://open_from_hf?model=${model.id}`)
|
|
5483
|
+
},
|
|
5484
|
+
drawthings: {
|
|
5485
|
+
prettyLabel: "Draw Things",
|
|
5486
|
+
docsUrl: "https://drawthings.ai",
|
|
5487
|
+
mainTask: "text-to-image",
|
|
5488
|
+
macOSOnly: true,
|
|
5489
|
+
/**
|
|
5490
|
+
* random function, will need to refine the actual conditions:
|
|
5491
|
+
*/
|
|
5492
|
+
displayOnModelPage: (model) => model.tags.includes("textual_inversion"),
|
|
5493
|
+
deeplink: (model) => new URL(`drawthings://open_from_hf?model=${model.id}`)
|
|
5494
|
+
},
|
|
5495
|
+
diffusionbee: {
|
|
5496
|
+
prettyLabel: "DiffusionBee",
|
|
5497
|
+
docsUrl: "https://diffusionbee.com",
|
|
5498
|
+
mainTask: "text-to-image",
|
|
5499
|
+
macOSOnly: true,
|
|
5500
|
+
comingSoon: true,
|
|
5501
|
+
displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
|
|
5502
|
+
deeplink: (model) => new URL(`diffusionbee://open_from_hf?model=${model.id}`)
|
|
5503
|
+
}
|
|
5504
|
+
};
|
|
5433
5505
|
// Annotate the CommonJS export names for ESM import in node:
|
|
5434
5506
|
0 && (module.exports = {
|
|
5435
5507
|
ALL_DISPLAY_MODEL_LIBRARY_KEYS,
|
|
5436
5508
|
ALL_MODEL_LIBRARY_KEYS,
|
|
5437
5509
|
InferenceDisplayability,
|
|
5438
5510
|
LIBRARY_TASK_MAPPING,
|
|
5511
|
+
LOCAL_APPS,
|
|
5439
5512
|
MAPPING_DEFAULT_WIDGET,
|
|
5440
5513
|
MODALITIES,
|
|
5441
5514
|
MODALITY_LABELS,
|
package/dist/index.js
CHANGED
|
@@ -4032,7 +4032,7 @@ predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "quest
|
|
|
4032
4032
|
predictions = predictor.predict_json(predictor_input)`
|
|
4033
4033
|
];
|
|
4034
4034
|
var allennlp = (model) => {
|
|
4035
|
-
if (model.tags
|
|
4035
|
+
if (model.tags.includes("question-answering")) {
|
|
4036
4036
|
return allennlpQuestionAnswering(model);
|
|
4037
4037
|
}
|
|
4038
4038
|
return allennlpUnknown(model);
|
|
@@ -4076,11 +4076,11 @@ pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}
|
|
|
4076
4076
|
pipeline.load_textual_inversion("${model.id}")`
|
|
4077
4077
|
];
|
|
4078
4078
|
var diffusers = (model) => {
|
|
4079
|
-
if (model.tags
|
|
4079
|
+
if (model.tags.includes("controlnet")) {
|
|
4080
4080
|
return diffusers_controlnet(model);
|
|
4081
|
-
} else if (model.tags
|
|
4081
|
+
} else if (model.tags.includes("lora")) {
|
|
4082
4082
|
return diffusers_lora(model);
|
|
4083
|
-
} else if (model.tags
|
|
4083
|
+
} else if (model.tags.includes("textual_inversion")) {
|
|
4084
4084
|
return diffusers_textual_inversion(model);
|
|
4085
4085
|
} else {
|
|
4086
4086
|
return diffusers_default(model);
|
|
@@ -4105,9 +4105,9 @@ text, *_ = model(speech)[0]`
|
|
|
4105
4105
|
];
|
|
4106
4106
|
var espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
|
|
4107
4107
|
var espnet = (model) => {
|
|
4108
|
-
if (model.tags
|
|
4108
|
+
if (model.tags.includes("text-to-speech")) {
|
|
4109
4109
|
return espnetTTS(model);
|
|
4110
|
-
} else if (model.tags
|
|
4110
|
+
} else if (model.tags.includes("automatic-speech-recognition")) {
|
|
4111
4111
|
return espnetASR(model);
|
|
4112
4112
|
}
|
|
4113
4113
|
return espnetUnknown();
|
|
@@ -4136,7 +4136,10 @@ model = from_pretrained_keras("${model.id}")
|
|
|
4136
4136
|
`
|
|
4137
4137
|
];
|
|
4138
4138
|
var keras_nlp = (model) => [
|
|
4139
|
-
|
|
4139
|
+
`# Available backend options are: "jax", "tensorflow", "torch".
|
|
4140
|
+
os.environ["KERAS_BACKEND"] = "tensorflow"
|
|
4141
|
+
|
|
4142
|
+
import keras_nlp
|
|
4140
4143
|
|
|
4141
4144
|
tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
|
|
4142
4145
|
backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
|
|
@@ -4202,7 +4205,7 @@ excerpt = Segment(start=2.0, end=5.0)
|
|
|
4202
4205
|
inference.crop("file.wav", excerpt)`
|
|
4203
4206
|
];
|
|
4204
4207
|
var pyannote_audio = (model) => {
|
|
4205
|
-
if (model.tags
|
|
4208
|
+
if (model.tags.includes("pyannote-audio-pipeline")) {
|
|
4206
4209
|
return pyannote_audio_pipeline(model);
|
|
4207
4210
|
}
|
|
4208
4211
|
return pyannote_audio_model(model);
|
|
@@ -4228,9 +4231,9 @@ model = TFAutoModel.from_pretrained("${model.id}")
|
|
|
4228
4231
|
`
|
|
4229
4232
|
];
|
|
4230
4233
|
var tensorflowtts = (model) => {
|
|
4231
|
-
if (model.tags
|
|
4234
|
+
if (model.tags.includes("text-to-mel")) {
|
|
4232
4235
|
return tensorflowttsTextToMel(model);
|
|
4233
|
-
} else if (model.tags
|
|
4236
|
+
} else if (model.tags.includes("mel-to-wav")) {
|
|
4234
4237
|
return tensorflowttsMelToWav(model);
|
|
4235
4238
|
}
|
|
4236
4239
|
return tensorflowttsUnknown(model);
|
|
@@ -4274,7 +4277,7 @@ model = joblib.load(
|
|
|
4274
4277
|
];
|
|
4275
4278
|
};
|
|
4276
4279
|
var sklearn = (model) => {
|
|
4277
|
-
if (model.tags
|
|
4280
|
+
if (model.tags.includes("skops")) {
|
|
4278
4281
|
const skopsmodelFile = model.config?.sklearn?.model?.file;
|
|
4279
4282
|
const skopssaveFormat = model.config?.sklearn?.model_format;
|
|
4280
4283
|
if (!skopsmodelFile) {
|
|
@@ -4366,7 +4369,7 @@ var transformers = (model) => {
|
|
|
4366
4369
|
if (!info) {
|
|
4367
4370
|
return [`# \u26A0\uFE0F Type of model unknown`];
|
|
4368
4371
|
}
|
|
4369
|
-
const remote_code_snippet = model.tags
|
|
4372
|
+
const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
|
|
4370
4373
|
let autoSnippet;
|
|
4371
4374
|
if (info.processor) {
|
|
4372
4375
|
const varName = info.processor === "AutoTokenizer" ? "tokenizer" : info.processor === "AutoFeatureExtractor" ? "extractor" : "processor";
|
|
@@ -4436,8 +4439,8 @@ var peft = (model) => {
|
|
|
4436
4439
|
from transformers import AutoModelFor${pefttask}
|
|
4437
4440
|
|
|
4438
4441
|
config = PeftConfig.from_pretrained("${model.id}")
|
|
4439
|
-
|
|
4440
|
-
model = PeftModel.from_pretrained(
|
|
4442
|
+
base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}")
|
|
4443
|
+
model = PeftModel.from_pretrained(base_model, "${model.id}")`
|
|
4441
4444
|
];
|
|
4442
4445
|
};
|
|
4443
4446
|
var fasttext = (model) => [
|
|
@@ -4494,7 +4497,7 @@ model = create_model(${model.id})`
|
|
|
4494
4497
|
];
|
|
4495
4498
|
var nemo = (model) => {
|
|
4496
4499
|
let command = void 0;
|
|
4497
|
-
if (model.tags
|
|
4500
|
+
if (model.tags.includes("automatic-speech-recognition")) {
|
|
4498
4501
|
command = nemoDomainResolver("ASR", model);
|
|
4499
4502
|
}
|
|
4500
4503
|
return command ?? [`# tag did not correspond to a valid NeMo domain.`];
|
|
@@ -4529,11 +4532,11 @@ descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a
|
|
|
4529
4532
|
wav = model.generate(descriptions) # generates 3 samples.`
|
|
4530
4533
|
];
|
|
4531
4534
|
var audiocraft = (model) => {
|
|
4532
|
-
if (model.tags
|
|
4535
|
+
if (model.tags.includes("musicgen")) {
|
|
4533
4536
|
return musicgen(model);
|
|
4534
|
-
} else if (model.tags
|
|
4537
|
+
} else if (model.tags.includes("audiogen")) {
|
|
4535
4538
|
return audiogen(model);
|
|
4536
|
-
} else if (model.tags
|
|
4539
|
+
} else if (model.tags.includes("magnet")) {
|
|
4537
4540
|
return magnet(model);
|
|
4538
4541
|
} else {
|
|
4539
4542
|
return [`# Type of model unknown.`];
|
|
@@ -4864,6 +4867,15 @@ var MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
4864
4867
|
repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
|
|
4865
4868
|
snippets: tensorflowtts
|
|
4866
4869
|
},
|
|
4870
|
+
timesfm: {
|
|
4871
|
+
prettyLabel: "TimesFM",
|
|
4872
|
+
repoName: "timesfm",
|
|
4873
|
+
repoUrl: "https://github.com/google-research/timesfm",
|
|
4874
|
+
filter: false,
|
|
4875
|
+
countDownloads: {
|
|
4876
|
+
term: { path: "checkpoints/checkpoint_1100000/state/checkpoint" }
|
|
4877
|
+
}
|
|
4878
|
+
},
|
|
4867
4879
|
timm: {
|
|
4868
4880
|
prettyLabel: "timm",
|
|
4869
4881
|
repoName: "pytorch-image-models",
|
|
@@ -5395,11 +5407,71 @@ function getJsInferenceSnippet(model, accessToken) {
|
|
|
5395
5407
|
function hasJsInferenceSnippet(model) {
|
|
5396
5408
|
return !!model.pipeline_tag && model.pipeline_tag in jsSnippets;
|
|
5397
5409
|
}
|
|
5410
|
+
|
|
5411
|
+
// src/local-apps.ts
|
|
5412
|
+
function isGgufModel(model) {
|
|
5413
|
+
return model.tags.includes("gguf");
|
|
5414
|
+
}
|
|
5415
|
+
var snippetLlamacpp = (model) => {
|
|
5416
|
+
return `./main --hf-repo ${model.id} -m file.gguf -p "I believe the meaning of life is " -n 128`;
|
|
5417
|
+
};
|
|
5418
|
+
var LOCAL_APPS = {
|
|
5419
|
+
"llama.cpp": {
|
|
5420
|
+
prettyLabel: "llama.cpp",
|
|
5421
|
+
docsUrl: "https://github.com/ggerganov/llama.cpp",
|
|
5422
|
+
mainTask: "text-generation",
|
|
5423
|
+
displayOnModelPage: isGgufModel,
|
|
5424
|
+
snippet: snippetLlamacpp
|
|
5425
|
+
},
|
|
5426
|
+
lmstudio: {
|
|
5427
|
+
prettyLabel: "LM Studio",
|
|
5428
|
+
docsUrl: "https://lmstudio.ai",
|
|
5429
|
+
mainTask: "text-generation",
|
|
5430
|
+
displayOnModelPage: isGgufModel,
|
|
5431
|
+
deeplink: (model) => new URL(`lmstudio://open_from_hf?model=${model.id}`)
|
|
5432
|
+
},
|
|
5433
|
+
jan: {
|
|
5434
|
+
prettyLabel: "Jan",
|
|
5435
|
+
docsUrl: "https://jan.ai",
|
|
5436
|
+
mainTask: "text-generation",
|
|
5437
|
+
displayOnModelPage: isGgufModel,
|
|
5438
|
+
deeplink: (model) => new URL(`jan://open_from_hf?model=${model.id}`)
|
|
5439
|
+
},
|
|
5440
|
+
faraday: {
|
|
5441
|
+
prettyLabel: "Faraday",
|
|
5442
|
+
docsUrl: "https://faraday.dev",
|
|
5443
|
+
mainTask: "text-generation",
|
|
5444
|
+
macOSOnly: true,
|
|
5445
|
+
displayOnModelPage: isGgufModel,
|
|
5446
|
+
deeplink: (model) => new URL(`faraday://open_from_hf?model=${model.id}`)
|
|
5447
|
+
},
|
|
5448
|
+
drawthings: {
|
|
5449
|
+
prettyLabel: "Draw Things",
|
|
5450
|
+
docsUrl: "https://drawthings.ai",
|
|
5451
|
+
mainTask: "text-to-image",
|
|
5452
|
+
macOSOnly: true,
|
|
5453
|
+
/**
|
|
5454
|
+
* random function, will need to refine the actual conditions:
|
|
5455
|
+
*/
|
|
5456
|
+
displayOnModelPage: (model) => model.tags.includes("textual_inversion"),
|
|
5457
|
+
deeplink: (model) => new URL(`drawthings://open_from_hf?model=${model.id}`)
|
|
5458
|
+
},
|
|
5459
|
+
diffusionbee: {
|
|
5460
|
+
prettyLabel: "DiffusionBee",
|
|
5461
|
+
docsUrl: "https://diffusionbee.com",
|
|
5462
|
+
mainTask: "text-to-image",
|
|
5463
|
+
macOSOnly: true,
|
|
5464
|
+
comingSoon: true,
|
|
5465
|
+
displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
|
|
5466
|
+
deeplink: (model) => new URL(`diffusionbee://open_from_hf?model=${model.id}`)
|
|
5467
|
+
}
|
|
5468
|
+
};
|
|
5398
5469
|
export {
|
|
5399
5470
|
ALL_DISPLAY_MODEL_LIBRARY_KEYS,
|
|
5400
5471
|
ALL_MODEL_LIBRARY_KEYS,
|
|
5401
5472
|
InferenceDisplayability,
|
|
5402
5473
|
LIBRARY_TASK_MAPPING,
|
|
5474
|
+
LOCAL_APPS,
|
|
5403
5475
|
MAPPING_DEFAULT_WIDGET,
|
|
5404
5476
|
MODALITIES,
|
|
5405
5477
|
MODALITY_LABELS,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"inference-codegen.d.ts","sourceRoot":"","sources":["../../scripts/inference-codegen.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"inference-tgi-import.d.ts","sourceRoot":"","sources":["../../scripts/inference-tgi-import.ts"],"names":[],"mappings":""}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import type { WidgetExample } from "./widget-example";
|
|
2
|
+
import type { WidgetType } from "./pipelines";
|
|
3
|
+
type PerLanguageMapping = Map<WidgetType, string[] | WidgetExample[]>;
|
|
4
|
+
export declare const MAPPING_DEFAULT_WIDGET: Map<string, PerLanguageMapping>;
|
|
5
|
+
export {};
|
|
6
|
+
//# sourceMappingURL=default-widget-inputs.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"default-widget-inputs.d.ts","sourceRoot":"","sources":["../../src/default-widget-inputs.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AAI9C,KAAK,kBAAkB,GAAG,GAAG,CAAC,UAAU,EAAE,MAAM,EAAE,GAAG,aAAa,EAAE,CAAC,CAAC;AAyrBtE,eAAO,MAAM,sBAAsB,iCAejC,CAAC"}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
export { LIBRARY_TASK_MAPPING } from "./library-to-tasks";
|
|
2
|
+
export { MAPPING_DEFAULT_WIDGET } from "./default-widget-inputs";
|
|
3
|
+
export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./tasks";
|
|
4
|
+
export * from "./tasks";
|
|
5
|
+
export { PIPELINE_DATA, PIPELINE_TYPES, type WidgetType, type PipelineType, type PipelineData, type Modality, MODALITIES, MODALITY_LABELS, SUBTASK_TYPES, PIPELINE_TYPES_SET, } from "./pipelines";
|
|
6
|
+
export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, MODEL_LIBRARIES_UI_ELEMENTS } from "./model-libraries";
|
|
7
|
+
export type { LibraryUiElement, ModelLibraryKey } from "./model-libraries";
|
|
8
|
+
export type { ModelData, TransformersInfo } from "./model-data";
|
|
9
|
+
export type { AddedToken, SpecialTokensMap, TokenizerConfig } from "./tokenizer-data";
|
|
10
|
+
export type { ChatMessage, WidgetExample, WidgetExampleAttribute, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleChatInput, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, WidgetExampleOutput, WidgetExampleOutputUrl, WidgetExampleOutputLabels, WidgetExampleOutputAnswerScore, WidgetExampleOutputText, } from "./widget-example";
|
|
11
|
+
export { InferenceDisplayability } from "./model-data";
|
|
12
|
+
export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data";
|
|
13
|
+
import * as snippets from "./snippets";
|
|
14
|
+
export { snippets };
|
|
15
|
+
export { LOCAL_APPS } from "./local-apps";
|
|
16
|
+
export type { LocalApp, LocalAppKey } from "./local-apps";
|
|
17
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAC1D,OAAO,EAAE,sBAAsB,EAAE,MAAM,yBAAyB,CAAC;AACjE,YAAY,EAAE,QAAQ,EAAE,QAAQ,EAAE,aAAa,EAAE,WAAW,EAAE,MAAM,SAAS,CAAC;AAC9E,cAAc,SAAS,CAAC;AACxB,OAAO,EACN,aAAa,EACb,cAAc,EACd,KAAK,UAAU,EACf,KAAK,YAAY,EACjB,KAAK,YAAY,EACjB,KAAK,QAAQ,EACb,UAAU,EACV,eAAe,EACf,aAAa,EACb,kBAAkB,GAClB,MAAM,aAAa,CAAC;AACrB,OAAO,EAAE,8BAA8B,EAAE,sBAAsB,EAAE,2BAA2B,EAAE,MAAM,mBAAmB,CAAC;AACxH,YAAY,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AAC3E,YAAY,EAAE,SAAS,EAAE,gBAAgB,EAAE,MAAM,cAAc,CAAC;AAChE,YAAY,EAAE,UAAU,EAAE,gBAAgB,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AACtF,YAAY,EACX,WAAW,EACX,aAAa,EACb,sBAAsB,EACtB,gCAAgC,EAChC,8BAA8B,EAC9B,kCAAkC,EAClC,uBAAuB,EACvB,sBAAsB,EACtB,oCAAoC,EACpC,gCAAgC,EAChC,2BAA2B,EAC3B,gCAAgC,EAChC,8BAA8B,EAC9B,sBAAsB,EACtB,8BAA8B,EAC9B,mBAAmB,EACnB,sBAAsB,EACtB,yBAAyB,EACzB,8BAA8B,EAC9B,uBAAuB,GACvB,MAAM,kBAAkB,CAAC;AAC1B,OAAO,EAAE,uBAAuB,EAAE,MAAM,cAAc,CAAC;AACvD,OAAO,EAAE,yBAAyB,EAAE,MAAM,kBAAkB,CAAC;AAE7D,OAAO,KAAK,QAAQ,MAAM,YAAY,CAAC;AACvC,OAAO,EAAE,QAAQ,EAAE,CAAC;AAEpB,OAAO,EAAE,UAAU,EAAE,MAAM,cAAc,CAAC;AAC1C,YAAY,EAAE,QAAQ,EAAE,WAAW,EAAE,MAAM,cAAc,CAAC"}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { ModelLibraryKey } from "./model-libraries";
|
|
2
|
+
import type { PipelineType } from "./pipelines";
|
|
3
|
+
/**
|
|
4
|
+
* Mapping from library name to its supported tasks.
|
|
5
|
+
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
|
|
6
|
+
* This mapping is partially generated automatically by "python-api-export-tasks" action in
|
|
7
|
+
* huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually
|
|
8
|
+
* based on api-inference (hf_types.rs).
|
|
9
|
+
*/
|
|
10
|
+
export declare const LIBRARY_TASK_MAPPING: Partial<Record<ModelLibraryKey, PipelineType[]>>;
|
|
11
|
+
//# sourceMappingURL=library-to-tasks.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"library-to-tasks.d.ts","sourceRoot":"","sources":["../../src/library-to-tasks.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AACzD,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;;;;;GAMG;AACH,eAAO,MAAM,oBAAoB,EAAE,OAAO,CAAC,MAAM,CAAC,eAAe,EAAE,YAAY,EAAE,CAAC,CAgEjF,CAAC"}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import type { ModelData } from "./model-data";
|
|
2
|
+
import type { PipelineType } from "./pipelines";
|
|
3
|
+
/**
|
|
4
|
+
* Elements configurable by a local app.
|
|
5
|
+
*/
|
|
6
|
+
export type LocalApp = {
|
|
7
|
+
/**
|
|
8
|
+
* Name that appears in buttons
|
|
9
|
+
*/
|
|
10
|
+
prettyLabel: string;
|
|
11
|
+
/**
|
|
12
|
+
* Link to get more info about a local app (website etc)
|
|
13
|
+
*/
|
|
14
|
+
docsUrl: string;
|
|
15
|
+
/**
|
|
16
|
+
* main category of app
|
|
17
|
+
*/
|
|
18
|
+
mainTask: PipelineType;
|
|
19
|
+
/**
|
|
20
|
+
* Whether to display a pill "macOS-only"
|
|
21
|
+
*/
|
|
22
|
+
macOSOnly?: boolean;
|
|
23
|
+
comingSoon?: boolean;
|
|
24
|
+
/**
|
|
25
|
+
* IMPORTANT: function to figure out whether to display the button on a model page's main "Use this model" dropdown.
|
|
26
|
+
*/
|
|
27
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
28
|
+
} & ({
|
|
29
|
+
/**
|
|
30
|
+
* If the app supports deeplink, URL to open.
|
|
31
|
+
*/
|
|
32
|
+
deeplink: (model: ModelData) => URL;
|
|
33
|
+
} | {
|
|
34
|
+
/**
|
|
35
|
+
* And if not (mostly llama.cpp), snippet to copy/paste in your terminal
|
|
36
|
+
*/
|
|
37
|
+
snippet: (model: ModelData) => string;
|
|
38
|
+
});
|
|
39
|
+
declare function isGgufModel(model: ModelData): boolean;
|
|
40
|
+
/**
|
|
41
|
+
* Add your new local app here.
|
|
42
|
+
*
|
|
43
|
+
* This is open to new suggestions and awesome upcoming apps.
|
|
44
|
+
*
|
|
45
|
+
* /!\ IMPORTANT
|
|
46
|
+
*
|
|
47
|
+
* If possible, you need to support deeplinks and be as cross-platform as possible.
|
|
48
|
+
*
|
|
49
|
+
* Ping the HF team if we can help with anything!
|
|
50
|
+
*/
|
|
51
|
+
export declare const LOCAL_APPS: {
|
|
52
|
+
"llama.cpp": {
|
|
53
|
+
prettyLabel: string;
|
|
54
|
+
docsUrl: string;
|
|
55
|
+
mainTask: "text-generation";
|
|
56
|
+
displayOnModelPage: typeof isGgufModel;
|
|
57
|
+
snippet: (model: ModelData) => string;
|
|
58
|
+
};
|
|
59
|
+
lmstudio: {
|
|
60
|
+
prettyLabel: string;
|
|
61
|
+
docsUrl: string;
|
|
62
|
+
mainTask: "text-generation";
|
|
63
|
+
displayOnModelPage: typeof isGgufModel;
|
|
64
|
+
deeplink: (model: ModelData) => URL;
|
|
65
|
+
};
|
|
66
|
+
jan: {
|
|
67
|
+
prettyLabel: string;
|
|
68
|
+
docsUrl: string;
|
|
69
|
+
mainTask: "text-generation";
|
|
70
|
+
displayOnModelPage: typeof isGgufModel;
|
|
71
|
+
deeplink: (model: ModelData) => URL;
|
|
72
|
+
};
|
|
73
|
+
faraday: {
|
|
74
|
+
prettyLabel: string;
|
|
75
|
+
docsUrl: string;
|
|
76
|
+
mainTask: "text-generation";
|
|
77
|
+
macOSOnly: true;
|
|
78
|
+
displayOnModelPage: typeof isGgufModel;
|
|
79
|
+
deeplink: (model: ModelData) => URL;
|
|
80
|
+
};
|
|
81
|
+
drawthings: {
|
|
82
|
+
prettyLabel: string;
|
|
83
|
+
docsUrl: string;
|
|
84
|
+
mainTask: "text-to-image";
|
|
85
|
+
macOSOnly: true;
|
|
86
|
+
/**
|
|
87
|
+
* random function, will need to refine the actual conditions:
|
|
88
|
+
*/
|
|
89
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
90
|
+
deeplink: (model: ModelData) => URL;
|
|
91
|
+
};
|
|
92
|
+
diffusionbee: {
|
|
93
|
+
prettyLabel: string;
|
|
94
|
+
docsUrl: string;
|
|
95
|
+
mainTask: "text-to-image";
|
|
96
|
+
macOSOnly: true;
|
|
97
|
+
comingSoon: true;
|
|
98
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
99
|
+
deeplink: (model: ModelData) => URL;
|
|
100
|
+
};
|
|
101
|
+
};
|
|
102
|
+
export type LocalAppKey = keyof typeof LOCAL_APPS;
|
|
103
|
+
export {};
|
|
104
|
+
//# sourceMappingURL=local-apps.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAEhD;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,GAAG,CAAC;CACnC,GACD;IACA;;OAEG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,CAAC;CACrC,CACH,CAAC;AAEF,iBAAS,WAAW,CAAC,KAAK,EAAE,SAAS,WAEpC;AASD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBAlBS,SAAS,KAAG,MAAM;;;;;;;;;;;;;;;;;;;;;;;;;;;;;QAqDhD;;WAEG;;;;;;;;;;;;;CAa+B,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
|