@huggingface/tasks 0.2.1 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{index.mjs → index.cjs} +280 -133
- package/dist/index.d.ts +4 -3
- package/dist/index.js +245 -168
- package/package.json +13 -8
- package/src/library-to-tasks.ts +1 -1
- package/src/library-ui-elements.ts +11 -11
- package/src/model-data.ts +1 -1
- package/src/model-libraries.ts +1 -1
- package/src/pipelines.ts +1 -1
- package/src/tasks/audio-classification/about.md +1 -1
- package/src/tasks/audio-classification/inference.ts +51 -0
- package/src/tasks/audio-classification/spec/input.json +34 -0
- package/src/tasks/audio-classification/spec/output.json +21 -0
- package/src/tasks/audio-to-audio/about.md +1 -1
- package/src/tasks/automatic-speech-recognition/about.md +4 -2
- package/src/tasks/automatic-speech-recognition/inference.ts +154 -0
- package/src/tasks/automatic-speech-recognition/spec/input.json +34 -0
- package/src/tasks/automatic-speech-recognition/spec/output.json +36 -0
- package/src/tasks/common-definitions.json +109 -0
- package/src/tasks/depth-estimation/data.ts +8 -4
- package/src/tasks/depth-estimation/inference.ts +35 -0
- package/src/tasks/depth-estimation/spec/input.json +30 -0
- package/src/tasks/depth-estimation/spec/output.json +10 -0
- package/src/tasks/document-question-answering/inference.ts +102 -0
- package/src/tasks/document-question-answering/spec/input.json +85 -0
- package/src/tasks/document-question-answering/spec/output.json +36 -0
- package/src/tasks/feature-extraction/inference.ts +22 -0
- package/src/tasks/feature-extraction/spec/input.json +26 -0
- package/src/tasks/feature-extraction/spec/output.json +7 -0
- package/src/tasks/fill-mask/inference.ts +61 -0
- package/src/tasks/fill-mask/spec/input.json +38 -0
- package/src/tasks/fill-mask/spec/output.json +29 -0
- package/src/tasks/image-classification/inference.ts +51 -0
- package/src/tasks/image-classification/spec/input.json +34 -0
- package/src/tasks/image-classification/spec/output.json +10 -0
- package/src/tasks/image-segmentation/inference.ts +65 -0
- package/src/tasks/image-segmentation/spec/input.json +54 -0
- package/src/tasks/image-segmentation/spec/output.json +25 -0
- package/src/tasks/image-to-image/inference.ts +67 -0
- package/src/tasks/image-to-image/spec/input.json +52 -0
- package/src/tasks/image-to-image/spec/output.json +12 -0
- package/src/tasks/image-to-text/inference.ts +138 -0
- package/src/tasks/image-to-text/spec/input.json +34 -0
- package/src/tasks/image-to-text/spec/output.json +17 -0
- package/src/tasks/index.ts +5 -2
- package/src/tasks/mask-generation/about.md +65 -0
- package/src/tasks/mask-generation/data.ts +42 -5
- package/src/tasks/object-detection/inference.ts +62 -0
- package/src/tasks/object-detection/spec/input.json +30 -0
- package/src/tasks/object-detection/spec/output.json +46 -0
- package/src/tasks/placeholder/data.ts +3 -0
- package/src/tasks/placeholder/spec/input.json +35 -0
- package/src/tasks/placeholder/spec/output.json +17 -0
- package/src/tasks/question-answering/inference.ts +99 -0
- package/src/tasks/question-answering/spec/input.json +67 -0
- package/src/tasks/question-answering/spec/output.json +29 -0
- package/src/tasks/sentence-similarity/about.md +2 -2
- package/src/tasks/sentence-similarity/inference.ts +32 -0
- package/src/tasks/sentence-similarity/spec/input.json +40 -0
- package/src/tasks/sentence-similarity/spec/output.json +12 -0
- package/src/tasks/summarization/data.ts +1 -0
- package/src/tasks/summarization/inference.ts +58 -0
- package/src/tasks/summarization/spec/input.json +7 -0
- package/src/tasks/summarization/spec/output.json +7 -0
- package/src/tasks/table-question-answering/inference.ts +61 -0
- package/src/tasks/table-question-answering/spec/input.json +39 -0
- package/src/tasks/table-question-answering/spec/output.json +40 -0
- package/src/tasks/tabular-classification/about.md +1 -1
- package/src/tasks/tabular-regression/about.md +1 -1
- package/src/tasks/text-classification/about.md +1 -0
- package/src/tasks/text-classification/inference.ts +51 -0
- package/src/tasks/text-classification/spec/input.json +35 -0
- package/src/tasks/text-classification/spec/output.json +10 -0
- package/src/tasks/text-generation/about.md +24 -13
- package/src/tasks/text-generation/data.ts +22 -38
- package/src/tasks/text-generation/inference.ts +85 -0
- package/src/tasks/text-generation/spec/input.json +74 -0
- package/src/tasks/text-generation/spec/output.json +17 -0
- package/src/tasks/text-to-audio/inference.ts +138 -0
- package/src/tasks/text-to-audio/spec/input.json +31 -0
- package/src/tasks/text-to-audio/spec/output.json +20 -0
- package/src/tasks/text-to-image/about.md +11 -2
- package/src/tasks/text-to-image/data.ts +6 -2
- package/src/tasks/text-to-image/inference.ts +73 -0
- package/src/tasks/text-to-image/spec/input.json +57 -0
- package/src/tasks/text-to-image/spec/output.json +15 -0
- package/src/tasks/text-to-speech/about.md +4 -2
- package/src/tasks/text-to-speech/data.ts +1 -0
- package/src/tasks/text-to-speech/inference.ts +146 -0
- package/src/tasks/text-to-speech/spec/input.json +7 -0
- package/src/tasks/text-to-speech/spec/output.json +7 -0
- package/src/tasks/text2text-generation/inference.ts +53 -0
- package/src/tasks/text2text-generation/spec/input.json +55 -0
- package/src/tasks/text2text-generation/spec/output.json +17 -0
- package/src/tasks/token-classification/inference.ts +82 -0
- package/src/tasks/token-classification/spec/input.json +65 -0
- package/src/tasks/token-classification/spec/output.json +33 -0
- package/src/tasks/translation/data.ts +1 -0
- package/src/tasks/translation/inference.ts +58 -0
- package/src/tasks/translation/spec/input.json +7 -0
- package/src/tasks/translation/spec/output.json +7 -0
- package/src/tasks/video-classification/inference.ts +59 -0
- package/src/tasks/video-classification/spec/input.json +42 -0
- package/src/tasks/video-classification/spec/output.json +10 -0
- package/src/tasks/visual-question-answering/inference.ts +63 -0
- package/src/tasks/visual-question-answering/spec/input.json +41 -0
- package/src/tasks/visual-question-answering/spec/output.json +21 -0
- package/src/tasks/zero-shot-classification/inference.ts +67 -0
- package/src/tasks/zero-shot-classification/spec/input.json +50 -0
- package/src/tasks/zero-shot-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-image-classification/data.ts +8 -5
- package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
- package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
- package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-object-detection/about.md +6 -0
- package/src/tasks/zero-shot-object-detection/data.ts +6 -1
- package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
- package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
- package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
- package/tsconfig.json +3 -3
package/package.json
CHANGED
|
@@ -1,23 +1,24 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/tasks",
|
|
3
3
|
"packageManager": "pnpm@8.10.5",
|
|
4
|
-
"version": "0.2.
|
|
4
|
+
"version": "0.2.2",
|
|
5
5
|
"description": "List of ML tasks for huggingface.co/tasks",
|
|
6
6
|
"repository": "https://github.com/huggingface/huggingface.js.git",
|
|
7
7
|
"publishConfig": {
|
|
8
8
|
"access": "public"
|
|
9
9
|
},
|
|
10
|
-
"main": "./dist/index.
|
|
11
|
-
"module": "./dist/index.
|
|
10
|
+
"main": "./dist/index.cjs",
|
|
11
|
+
"module": "./dist/index.js",
|
|
12
12
|
"types": "./dist/index.d.ts",
|
|
13
13
|
"exports": {
|
|
14
14
|
".": {
|
|
15
15
|
"types": "./dist/index.d.ts",
|
|
16
|
-
"require": "./dist/index.
|
|
17
|
-
"import": "./dist/index.
|
|
16
|
+
"require": "./dist/index.cjs",
|
|
17
|
+
"import": "./dist/index.js"
|
|
18
18
|
}
|
|
19
19
|
},
|
|
20
20
|
"source": "src/index.ts",
|
|
21
|
+
"type": "module",
|
|
21
22
|
"files": [
|
|
22
23
|
"dist",
|
|
23
24
|
"src",
|
|
@@ -30,13 +31,17 @@
|
|
|
30
31
|
],
|
|
31
32
|
"author": "Hugging Face",
|
|
32
33
|
"license": "MIT",
|
|
33
|
-
"devDependencies": {
|
|
34
|
+
"devDependencies": {
|
|
35
|
+
"@types/node": "^20.11.5",
|
|
36
|
+
"quicktype-core": "https://github.com/huggingface/quicktype/raw/pack-18.0.15/packages/quicktype-core/quicktype-core-18.0.15.tgz"
|
|
37
|
+
},
|
|
34
38
|
"scripts": {
|
|
35
39
|
"lint": "eslint --quiet --fix --ext .cjs,.ts .",
|
|
36
40
|
"lint:check": "eslint --ext .cjs,.ts .",
|
|
37
41
|
"format": "prettier --write .",
|
|
38
42
|
"format:check": "prettier --check .",
|
|
39
|
-
"build": "tsup src/index.ts --format cjs,esm --clean --dts",
|
|
40
|
-
"check": "tsc"
|
|
43
|
+
"build": "tsup src/index.ts --format cjs,esm --clean --dts && pnpm run inference-codegen",
|
|
44
|
+
"check": "tsc",
|
|
45
|
+
"inference-codegen": "tsx scripts/inference-codegen.ts && prettier --write src/tasks/*/inference.ts"
|
|
41
46
|
}
|
|
42
47
|
}
|
package/src/library-to-tasks.ts
CHANGED
|
@@ -3,7 +3,7 @@ import type { PipelineType } from "./pipelines";
|
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
5
|
* Mapping from library name (excluding Transformers) to its supported tasks.
|
|
6
|
-
* Inference
|
|
6
|
+
* Inference Endpoints (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
|
|
7
7
|
* As an exception, we assume Transformers supports all inference tasks.
|
|
8
8
|
* This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
|
|
9
9
|
* Ref: https://github.com/huggingface/api-inference-community/pull/158
|
|
@@ -35,11 +35,11 @@ function nameWithoutNamespace(modelId: string): string {
|
|
|
35
35
|
|
|
36
36
|
//#region snippets
|
|
37
37
|
|
|
38
|
-
const
|
|
39
|
-
`from
|
|
38
|
+
const adapters = (model: ModelData) => [
|
|
39
|
+
`from adapters import AutoAdapterModel
|
|
40
40
|
|
|
41
|
-
model =
|
|
42
|
-
model.load_adapter("${model.id}",
|
|
41
|
+
model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}")
|
|
42
|
+
model.load_adapter("${model.id}", set_active=True)`,
|
|
43
43
|
];
|
|
44
44
|
|
|
45
45
|
const allennlpUnknown = (model: ModelData) => [
|
|
@@ -541,12 +541,12 @@ transcriptions = asr_model.transcribe(["file.wav"])`,
|
|
|
541
541
|
|
|
542
542
|
const mlAgents = (model: ModelData) => [`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./downloads"`];
|
|
543
543
|
|
|
544
|
-
const sentis = (model: ModelData) => [
|
|
544
|
+
const sentis = (/* model: ModelData */) => [
|
|
545
545
|
`string modelName = "[Your model name here].sentis";
|
|
546
546
|
Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName);
|
|
547
547
|
IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
|
|
548
548
|
// Please see provided C# file for more details
|
|
549
|
-
|
|
549
|
+
`,
|
|
550
550
|
];
|
|
551
551
|
|
|
552
552
|
const mlx = (model: ModelData) => [
|
|
@@ -576,11 +576,11 @@ model = AutoModel.load_from_hf_hub("${model.id}")`,
|
|
|
576
576
|
|
|
577
577
|
export const MODEL_LIBRARIES_UI_ELEMENTS: Partial<Record<ModelLibraryKey, LibraryUiElement>> = {
|
|
578
578
|
"adapter-transformers": {
|
|
579
|
-
btnLabel: "
|
|
580
|
-
repoName: "
|
|
581
|
-
repoUrl: "https://github.com/Adapter-Hub/
|
|
582
|
-
docsUrl: "https://huggingface.co/docs/hub/
|
|
583
|
-
snippets:
|
|
579
|
+
btnLabel: "Adapters",
|
|
580
|
+
repoName: "adapters",
|
|
581
|
+
repoUrl: "https://github.com/Adapter-Hub/adapters",
|
|
582
|
+
docsUrl: "https://huggingface.co/docs/hub/adapters",
|
|
583
|
+
snippets: adapters,
|
|
584
584
|
},
|
|
585
585
|
allennlp: {
|
|
586
586
|
btnLabel: "AllenNLP",
|
package/src/model-data.ts
CHANGED
|
@@ -78,7 +78,7 @@ export interface ModelData {
|
|
|
78
78
|
*/
|
|
79
79
|
widgetData?: WidgetExample[] | undefined;
|
|
80
80
|
/**
|
|
81
|
-
* Parameters that will be used by the widget when calling Inference
|
|
81
|
+
* Parameters that will be used by the widget when calling Inference Endpoints (serverless)
|
|
82
82
|
* https://huggingface.co/docs/api-inference/detailed_parameters
|
|
83
83
|
*
|
|
84
84
|
* can be set in the model card metadata (under `inference/parameters`)
|
package/src/model-libraries.ts
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* File formats live in an enum inside the internal codebase.
|
|
6
6
|
*/
|
|
7
7
|
export enum ModelLibrary {
|
|
8
|
-
"adapter-transformers" = "
|
|
8
|
+
"adapter-transformers" = "Adapters",
|
|
9
9
|
"allennlp" = "allenNLP",
|
|
10
10
|
"asteroid" = "Asteroid",
|
|
11
11
|
"bertopic" = "BERTopic",
|
package/src/pipelines.ts
CHANGED
|
@@ -62,7 +62,7 @@ export interface PipelineData {
|
|
|
62
62
|
/// This type is used in multiple places in the Hugging Face
|
|
63
63
|
/// ecosystem:
|
|
64
64
|
/// - To determine which widget to show.
|
|
65
|
-
/// - To determine which endpoint of Inference
|
|
65
|
+
/// - To determine which endpoint of Inference Endpoints to use.
|
|
66
66
|
/// - As filters at the left of models and datasets page.
|
|
67
67
|
///
|
|
68
68
|
/// Note that this is sensitive to order.
|
|
@@ -26,7 +26,7 @@ Datasets such as VoxLingua107 allow anyone to train language identification mode
|
|
|
26
26
|
|
|
27
27
|
### Emotion recognition
|
|
28
28
|
|
|
29
|
-
Emotion recognition is self explanatory. In addition to trying the widgets, you can use
|
|
29
|
+
Emotion recognition is self explanatory. In addition to trying the widgets, you can use Inference Endpoints to perform audio classification. Here is a simple example that uses a [HuBERT](https://huggingface.co/superb/hubert-large-superb-er) model fine-tuned for this task.
|
|
30
30
|
|
|
31
31
|
```python
|
|
32
32
|
import json
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Audio Classification inference
|
|
8
|
+
*/
|
|
9
|
+
export interface AudioClassificationInput {
|
|
10
|
+
/**
|
|
11
|
+
* The input audio data
|
|
12
|
+
*/
|
|
13
|
+
data: unknown;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: AudioClassificationParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Audio Classification
|
|
24
|
+
*/
|
|
25
|
+
export interface AudioClassificationParameters {
|
|
26
|
+
functionToApply?: ClassificationOutputTransform;
|
|
27
|
+
/**
|
|
28
|
+
* When specified, limits the output to the top K most probable classes.
|
|
29
|
+
*/
|
|
30
|
+
topK?: number;
|
|
31
|
+
[property: string]: unknown;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* The function to apply to the model outputs in order to retrieve the scores.
|
|
35
|
+
*/
|
|
36
|
+
export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none";
|
|
37
|
+
export type AudioClassificationOutput = AudioClassificationOutputElement[];
|
|
38
|
+
/**
|
|
39
|
+
* Outputs for Audio Classification inference
|
|
40
|
+
*/
|
|
41
|
+
export interface AudioClassificationOutputElement {
|
|
42
|
+
/**
|
|
43
|
+
* The predicted class label (model specific).
|
|
44
|
+
*/
|
|
45
|
+
label: string;
|
|
46
|
+
/**
|
|
47
|
+
* The corresponding probability.
|
|
48
|
+
*/
|
|
49
|
+
score: number;
|
|
50
|
+
[property: string]: unknown;
|
|
51
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/audio-classification/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Audio Classification inference",
|
|
5
|
+
"title": "AudioClassificationInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input audio data"
|
|
10
|
+
},
|
|
11
|
+
"parameters": {
|
|
12
|
+
"description": "Additional inference parameters",
|
|
13
|
+
"$ref": "#/$defs/AudioClassificationParameters"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"$defs": {
|
|
17
|
+
"AudioClassificationParameters": {
|
|
18
|
+
"title": "AudioClassificationParameters",
|
|
19
|
+
"description": "Additional inference parameters for Audio Classification",
|
|
20
|
+
"type": "object",
|
|
21
|
+
"properties": {
|
|
22
|
+
"functionToApply": {
|
|
23
|
+
"title": "AudioClassificationOutputTransform",
|
|
24
|
+
"$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform"
|
|
25
|
+
},
|
|
26
|
+
"topK": {
|
|
27
|
+
"type": "integer",
|
|
28
|
+
"description": "When specified, limits the output to the top K most probable classes."
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"required": ["data"]
|
|
34
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/audio-classification/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"title": "AudioClassificationOutput",
|
|
5
|
+
"description": "Outputs for Audio Classification inference",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"label": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"description": "The predicted class label (model specific)."
|
|
13
|
+
},
|
|
14
|
+
"score": {
|
|
15
|
+
"type": "number",
|
|
16
|
+
"description": "The corresponding probability."
|
|
17
|
+
}
|
|
18
|
+
},
|
|
19
|
+
"required": ["label", "score"]
|
|
20
|
+
}
|
|
21
|
+
}
|
|
@@ -12,7 +12,7 @@ model = SpectralMaskEnhancement.from_hparams(
|
|
|
12
12
|
model.enhance_file("file.wav")
|
|
13
13
|
```
|
|
14
14
|
|
|
15
|
-
Alternatively, you can use
|
|
15
|
+
Alternatively, you can use [Inference Endpoints](https://huggingface.co/inference-endpoints) to solve this task
|
|
16
16
|
|
|
17
17
|
```python
|
|
18
18
|
import json
|
|
@@ -18,7 +18,7 @@ The use of Multilingual ASR has become popular, the idea of maintaining just a s
|
|
|
18
18
|
|
|
19
19
|
## Inference
|
|
20
20
|
|
|
21
|
-
The Hub contains over [~9,000 ASR models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using
|
|
21
|
+
The Hub contains over [~9,000 ASR models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using Inference Endpoints. Here is a simple code snippet to do exactly this:
|
|
22
22
|
|
|
23
23
|
```python
|
|
24
24
|
import json
|
|
@@ -83,6 +83,8 @@ These events help democratize ASR for all languages, including low-resource lang
|
|
|
83
83
|
- [Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters](https://arxiv.org/pdf/2007.03001.pdf)
|
|
84
84
|
- An ASR toolkit made by [NVIDIA: NeMo](https://github.com/NVIDIA/NeMo) with code and pretrained models useful for new ASR models. Watch the [introductory video](https://www.youtube.com/embed/wBgpMf_KQVw) for an overview.
|
|
85
85
|
- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5)
|
|
86
|
-
- [
|
|
86
|
+
- [Fine-tune Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper)
|
|
87
87
|
- [Automatic speech recognition task guide](https://huggingface.co/docs/transformers/tasks/asr)
|
|
88
88
|
- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5)
|
|
89
|
+
- [Fine-Tune W2V2-Bert for low-resource ASR with 🤗 Transformers](https://huggingface.co/blog/fine-tune-w2v2-bert)
|
|
90
|
+
- [Speculative Decoding for 2x Faster Whisper Inference](https://huggingface.co/blog/whisper-speculative-decoding)
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Automatic Speech Recognition inference
|
|
8
|
+
*/
|
|
9
|
+
export interface AutomaticSpeechRecognitionInput {
|
|
10
|
+
/**
|
|
11
|
+
* The input audio data
|
|
12
|
+
*/
|
|
13
|
+
data: unknown;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: AutomaticSpeechRecognitionParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Automatic Speech Recognition
|
|
24
|
+
*/
|
|
25
|
+
export interface AutomaticSpeechRecognitionParameters {
|
|
26
|
+
/**
|
|
27
|
+
* Parametrization of the text generation process
|
|
28
|
+
*/
|
|
29
|
+
generate?: GenerationParameters;
|
|
30
|
+
/**
|
|
31
|
+
* Whether to output corresponding timestamps with the generated text
|
|
32
|
+
*/
|
|
33
|
+
returnTimestamps?: boolean;
|
|
34
|
+
[property: string]: unknown;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Parametrization of the text generation process
|
|
38
|
+
*
|
|
39
|
+
* Ad-hoc parametrization of the text generation process
|
|
40
|
+
*/
|
|
41
|
+
export interface GenerationParameters {
|
|
42
|
+
/**
|
|
43
|
+
* Whether to use sampling instead of greedy decoding when generating new tokens.
|
|
44
|
+
*/
|
|
45
|
+
doSample?: boolean;
|
|
46
|
+
/**
|
|
47
|
+
* Controls the stopping condition for beam-based methods.
|
|
48
|
+
*/
|
|
49
|
+
earlyStopping?: EarlyStoppingUnion;
|
|
50
|
+
/**
|
|
51
|
+
* If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
52
|
+
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
53
|
+
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
54
|
+
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
55
|
+
*/
|
|
56
|
+
epsilonCutoff?: number;
|
|
57
|
+
/**
|
|
58
|
+
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
59
|
+
* float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
60
|
+
* eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
61
|
+
* term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
62
|
+
* the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
63
|
+
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
64
|
+
* for more details.
|
|
65
|
+
*/
|
|
66
|
+
etaCutoff?: number;
|
|
67
|
+
/**
|
|
68
|
+
* The maximum length (in tokens) of the generated text, including the input.
|
|
69
|
+
*/
|
|
70
|
+
maxLength?: number;
|
|
71
|
+
/**
|
|
72
|
+
* The maximum number of tokens to generate. Takes precedence over maxLength.
|
|
73
|
+
*/
|
|
74
|
+
maxNewTokens?: number;
|
|
75
|
+
/**
|
|
76
|
+
* The minimum length (in tokens) of the generated text, including the input.
|
|
77
|
+
*/
|
|
78
|
+
minLength?: number;
|
|
79
|
+
/**
|
|
80
|
+
* The minimum number of tokens to generate. Takes precedence over maxLength.
|
|
81
|
+
*/
|
|
82
|
+
minNewTokens?: number;
|
|
83
|
+
/**
|
|
84
|
+
* Number of groups to divide num_beams into in order to ensure diversity among different
|
|
85
|
+
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
86
|
+
*/
|
|
87
|
+
numBeamGroups?: number;
|
|
88
|
+
/**
|
|
89
|
+
* Number of beams to use for beam search.
|
|
90
|
+
*/
|
|
91
|
+
numBeams?: number;
|
|
92
|
+
/**
|
|
93
|
+
* The value balances the model confidence and the degeneration penalty in contrastive
|
|
94
|
+
* search decoding.
|
|
95
|
+
*/
|
|
96
|
+
penaltyAlpha?: number;
|
|
97
|
+
/**
|
|
98
|
+
* The value used to modulate the next token probabilities.
|
|
99
|
+
*/
|
|
100
|
+
temperature?: number;
|
|
101
|
+
/**
|
|
102
|
+
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
103
|
+
*/
|
|
104
|
+
topK?: number;
|
|
105
|
+
/**
|
|
106
|
+
* If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
107
|
+
* that add up to top_p or higher are kept for generation.
|
|
108
|
+
*/
|
|
109
|
+
topP?: number;
|
|
110
|
+
/**
|
|
111
|
+
* Local typicality measures how similar the conditional probability of predicting a target
|
|
112
|
+
* token next is to the expected conditional probability of predicting a random token next,
|
|
113
|
+
* given the partial text already generated. If set to float < 1, the smallest set of the
|
|
114
|
+
* most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
115
|
+
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
116
|
+
*/
|
|
117
|
+
typicalP?: number;
|
|
118
|
+
/**
|
|
119
|
+
* Whether the model should use the past last key/values attentions to speed up decoding
|
|
120
|
+
*/
|
|
121
|
+
useCache?: boolean;
|
|
122
|
+
[property: string]: unknown;
|
|
123
|
+
}
|
|
124
|
+
/**
|
|
125
|
+
* Controls the stopping condition for beam-based methods.
|
|
126
|
+
*/
|
|
127
|
+
export type EarlyStoppingUnion = boolean | "never";
|
|
128
|
+
export interface AutomaticSpeechRecognitionOutputChunk {
|
|
129
|
+
/**
|
|
130
|
+
* A chunk of text identified by the model
|
|
131
|
+
*/
|
|
132
|
+
text: string;
|
|
133
|
+
/**
|
|
134
|
+
* The start and end timestamps corresponding with the text
|
|
135
|
+
*/
|
|
136
|
+
timestamps: number[];
|
|
137
|
+
[property: string]: unknown;
|
|
138
|
+
}
|
|
139
|
+
export type AutomaticSpeechRecognitionOutput = AutomaticSpeechRecognitionOutputElement[];
|
|
140
|
+
/**
|
|
141
|
+
* Outputs of inference for the Automatic Speech Recognition task
|
|
142
|
+
*/
|
|
143
|
+
export interface AutomaticSpeechRecognitionOutputElement {
|
|
144
|
+
/**
|
|
145
|
+
* When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
|
|
146
|
+
* the model.
|
|
147
|
+
*/
|
|
148
|
+
chunks?: AutomaticSpeechRecognitionOutputChunk[];
|
|
149
|
+
/**
|
|
150
|
+
* The recognized text.
|
|
151
|
+
*/
|
|
152
|
+
text: string;
|
|
153
|
+
[property: string]: unknown;
|
|
154
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/automatic-speech-recognition/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Automatic Speech Recognition inference",
|
|
5
|
+
"title": "AutomaticSpeechRecognitionInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input audio data"
|
|
10
|
+
},
|
|
11
|
+
"parameters": {
|
|
12
|
+
"description": "Additional inference parameters",
|
|
13
|
+
"$ref": "#/$defs/AutomaticSpeechRecognitionParameters"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"$defs": {
|
|
17
|
+
"AutomaticSpeechRecognitionParameters": {
|
|
18
|
+
"title": "AutomaticSpeechRecognitionParameters",
|
|
19
|
+
"description": "Additional inference parameters for Automatic Speech Recognition",
|
|
20
|
+
"type": "object",
|
|
21
|
+
"properties": {
|
|
22
|
+
"returnTimestamps": {
|
|
23
|
+
"type": "boolean",
|
|
24
|
+
"description": "Whether to output corresponding timestamps with the generated text"
|
|
25
|
+
},
|
|
26
|
+
"generate": {
|
|
27
|
+
"description": "Parametrization of the text generation process",
|
|
28
|
+
"$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"required": ["data"]
|
|
34
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/automatic-speech-recognition/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Automatic Speech Recognition task",
|
|
5
|
+
"title": "AutomaticSpeechRecognitionOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"text": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"description": "The recognized text."
|
|
13
|
+
},
|
|
14
|
+
"chunks": {
|
|
15
|
+
"type": "array",
|
|
16
|
+
"description": "When returnTimestamps is enabled, chunks contains a list of audio chunks identified by the model.",
|
|
17
|
+
"items": {
|
|
18
|
+
"type": "object",
|
|
19
|
+
"title": "AutomaticSpeechRecognitionOutputChunk",
|
|
20
|
+
"properties": {
|
|
21
|
+
"text": { "type": "string", "description": "A chunk of text identified by the model" },
|
|
22
|
+
"timestamps": {
|
|
23
|
+
"type": "array",
|
|
24
|
+
"description": "The start and end timestamps corresponding with the text",
|
|
25
|
+
"items": { "type": "number" },
|
|
26
|
+
"minLength": 2,
|
|
27
|
+
"maxLength": 2
|
|
28
|
+
}
|
|
29
|
+
},
|
|
30
|
+
"required": ["text", "timestamps"]
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
},
|
|
34
|
+
"required": ["text"]
|
|
35
|
+
}
|
|
36
|
+
}
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/common-definitions.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "(Incomplete!) Common type definitions shared by several tasks",
|
|
5
|
+
"definitions": {
|
|
6
|
+
"ClassificationOutputTransform": {
|
|
7
|
+
"title": "ClassificationOutputTransform",
|
|
8
|
+
"type": "string",
|
|
9
|
+
"description": "The function to apply to the model outputs in order to retrieve the scores.",
|
|
10
|
+
"oneOf": [
|
|
11
|
+
{
|
|
12
|
+
"const": "sigmoid"
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"const": "softmax"
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"const": "none"
|
|
19
|
+
}
|
|
20
|
+
]
|
|
21
|
+
},
|
|
22
|
+
"ClassificationOutput": {
|
|
23
|
+
"title": "ClassificationOutput",
|
|
24
|
+
"type": "object",
|
|
25
|
+
"properties": {
|
|
26
|
+
"label": {
|
|
27
|
+
"type": "string",
|
|
28
|
+
"description": "The predicted class label."
|
|
29
|
+
},
|
|
30
|
+
"score": {
|
|
31
|
+
"type": "number",
|
|
32
|
+
"description": "The corresponding probability."
|
|
33
|
+
}
|
|
34
|
+
},
|
|
35
|
+
"required": ["label", "score"]
|
|
36
|
+
},
|
|
37
|
+
"GenerationParameters": {
|
|
38
|
+
"title": "GenerationParameters",
|
|
39
|
+
"description": "Ad-hoc parametrization of the text generation process",
|
|
40
|
+
"type": "object",
|
|
41
|
+
"properties": {
|
|
42
|
+
"temperature": {
|
|
43
|
+
"type": "number",
|
|
44
|
+
"description": "The value used to modulate the next token probabilities."
|
|
45
|
+
},
|
|
46
|
+
"topK": {
|
|
47
|
+
"type": "integer",
|
|
48
|
+
"description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
|
|
49
|
+
},
|
|
50
|
+
"topP": {
|
|
51
|
+
"type": "number",
|
|
52
|
+
"description": "If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation."
|
|
53
|
+
},
|
|
54
|
+
"typicalP": {
|
|
55
|
+
"type": "number",
|
|
56
|
+
"description": " Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details."
|
|
57
|
+
},
|
|
58
|
+
"epsilonCutoff": {
|
|
59
|
+
"type": "number",
|
|
60
|
+
"description": "If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details."
|
|
61
|
+
},
|
|
62
|
+
"etaCutoff": {
|
|
63
|
+
"type": "number",
|
|
64
|
+
"description": "Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details."
|
|
65
|
+
},
|
|
66
|
+
"maxLength": {
|
|
67
|
+
"type": "integer",
|
|
68
|
+
"description": "The maximum length (in tokens) of the generated text, including the input."
|
|
69
|
+
},
|
|
70
|
+
"maxNewTokens": {
|
|
71
|
+
"type": "integer",
|
|
72
|
+
"description": "The maximum number of tokens to generate. Takes precedence over maxLength."
|
|
73
|
+
},
|
|
74
|
+
"minLength": {
|
|
75
|
+
"type": "integer",
|
|
76
|
+
"description": "The minimum length (in tokens) of the generated text, including the input."
|
|
77
|
+
},
|
|
78
|
+
"minNewTokens": {
|
|
79
|
+
"type": "integer",
|
|
80
|
+
"description": "The minimum number of tokens to generate. Takes precedence over maxLength."
|
|
81
|
+
},
|
|
82
|
+
"doSample": {
|
|
83
|
+
"type": "boolean",
|
|
84
|
+
"description": "Whether to use sampling instead of greedy decoding when generating new tokens."
|
|
85
|
+
},
|
|
86
|
+
"earlyStopping": {
|
|
87
|
+
"description": "Controls the stopping condition for beam-based methods.",
|
|
88
|
+
"oneOf": [{ "type": "boolean" }, { "const": "never", "type": "string" }]
|
|
89
|
+
},
|
|
90
|
+
"numBeams": {
|
|
91
|
+
"type": "integer",
|
|
92
|
+
"description": "Number of beams to use for beam search."
|
|
93
|
+
},
|
|
94
|
+
"numBeamGroups": {
|
|
95
|
+
"type": "integer",
|
|
96
|
+
"description": "Number of groups to divide num_beams into in order to ensure diversity among different groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details."
|
|
97
|
+
},
|
|
98
|
+
"penaltyAlpha": {
|
|
99
|
+
"type": "number",
|
|
100
|
+
"description": "The value balances the model confidence and the degeneration penalty in contrastive search decoding."
|
|
101
|
+
},
|
|
102
|
+
"useCache": {
|
|
103
|
+
"type": "boolean",
|
|
104
|
+
"description": "Whether the model should use the past last key/values attentions to speed up decoding"
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
@@ -28,8 +28,8 @@ const taskData: TaskDataCustom = {
|
|
|
28
28
|
id: "Intel/dpt-large",
|
|
29
29
|
},
|
|
30
30
|
{
|
|
31
|
-
description: "Strong Depth Estimation model trained on
|
|
32
|
-
id: "
|
|
31
|
+
description: "Strong Depth Estimation model trained on a big compilation of datasets.",
|
|
32
|
+
id: "LiheYoung/depth-anything-large-hf",
|
|
33
33
|
},
|
|
34
34
|
{
|
|
35
35
|
description: "A strong monocular depth estimation model.",
|
|
@@ -42,8 +42,12 @@ const taskData: TaskDataCustom = {
|
|
|
42
42
|
id: "radames/dpt-depth-estimation-3d-voxels",
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
description: "An application
|
|
46
|
-
id: "
|
|
45
|
+
description: "An application to compare the outputs of different depth estimation models.",
|
|
46
|
+
id: "LiheYoung/Depth-Anything",
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
description: "An application to try state-of-the-art depth estimation.",
|
|
50
|
+
id: "merve/compare_depth_models",
|
|
47
51
|
},
|
|
48
52
|
],
|
|
49
53
|
summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
|