@huggingface/tasks 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{index.mjs → index.cjs} +295 -134
- package/dist/index.d.ts +8 -6
- package/dist/index.js +260 -169
- package/package.json +13 -8
- package/src/library-to-tasks.ts +1 -1
- package/src/library-ui-elements.ts +24 -10
- package/src/model-data.ts +1 -1
- package/src/model-libraries.ts +3 -2
- package/src/pipelines.ts +1 -1
- package/src/tasks/audio-classification/about.md +1 -1
- package/src/tasks/audio-classification/inference.ts +51 -0
- package/src/tasks/audio-classification/spec/input.json +34 -0
- package/src/tasks/audio-classification/spec/output.json +21 -0
- package/src/tasks/audio-to-audio/about.md +1 -1
- package/src/tasks/automatic-speech-recognition/about.md +4 -2
- package/src/tasks/automatic-speech-recognition/inference.ts +154 -0
- package/src/tasks/automatic-speech-recognition/spec/input.json +34 -0
- package/src/tasks/automatic-speech-recognition/spec/output.json +36 -0
- package/src/tasks/common-definitions.json +109 -0
- package/src/tasks/depth-estimation/data.ts +8 -4
- package/src/tasks/depth-estimation/inference.ts +35 -0
- package/src/tasks/depth-estimation/spec/input.json +30 -0
- package/src/tasks/depth-estimation/spec/output.json +10 -0
- package/src/tasks/document-question-answering/inference.ts +102 -0
- package/src/tasks/document-question-answering/spec/input.json +85 -0
- package/src/tasks/document-question-answering/spec/output.json +36 -0
- package/src/tasks/feature-extraction/inference.ts +22 -0
- package/src/tasks/feature-extraction/spec/input.json +26 -0
- package/src/tasks/feature-extraction/spec/output.json +7 -0
- package/src/tasks/fill-mask/inference.ts +61 -0
- package/src/tasks/fill-mask/spec/input.json +38 -0
- package/src/tasks/fill-mask/spec/output.json +29 -0
- package/src/tasks/image-classification/inference.ts +51 -0
- package/src/tasks/image-classification/spec/input.json +34 -0
- package/src/tasks/image-classification/spec/output.json +10 -0
- package/src/tasks/image-segmentation/inference.ts +65 -0
- package/src/tasks/image-segmentation/spec/input.json +54 -0
- package/src/tasks/image-segmentation/spec/output.json +25 -0
- package/src/tasks/image-to-image/inference.ts +67 -0
- package/src/tasks/image-to-image/spec/input.json +52 -0
- package/src/tasks/image-to-image/spec/output.json +12 -0
- package/src/tasks/image-to-text/inference.ts +138 -0
- package/src/tasks/image-to-text/spec/input.json +34 -0
- package/src/tasks/image-to-text/spec/output.json +17 -0
- package/src/tasks/index.ts +5 -2
- package/src/tasks/mask-generation/about.md +65 -0
- package/src/tasks/mask-generation/data.ts +55 -0
- package/src/tasks/object-detection/inference.ts +62 -0
- package/src/tasks/object-detection/spec/input.json +30 -0
- package/src/tasks/object-detection/spec/output.json +46 -0
- package/src/tasks/placeholder/data.ts +3 -0
- package/src/tasks/placeholder/spec/input.json +35 -0
- package/src/tasks/placeholder/spec/output.json +17 -0
- package/src/tasks/question-answering/inference.ts +99 -0
- package/src/tasks/question-answering/spec/input.json +67 -0
- package/src/tasks/question-answering/spec/output.json +29 -0
- package/src/tasks/sentence-similarity/about.md +2 -2
- package/src/tasks/sentence-similarity/inference.ts +32 -0
- package/src/tasks/sentence-similarity/spec/input.json +40 -0
- package/src/tasks/sentence-similarity/spec/output.json +12 -0
- package/src/tasks/summarization/data.ts +1 -0
- package/src/tasks/summarization/inference.ts +58 -0
- package/src/tasks/summarization/spec/input.json +7 -0
- package/src/tasks/summarization/spec/output.json +7 -0
- package/src/tasks/table-question-answering/inference.ts +61 -0
- package/src/tasks/table-question-answering/spec/input.json +39 -0
- package/src/tasks/table-question-answering/spec/output.json +40 -0
- package/src/tasks/tabular-classification/about.md +1 -1
- package/src/tasks/tabular-regression/about.md +1 -1
- package/src/tasks/text-classification/about.md +1 -0
- package/src/tasks/text-classification/inference.ts +51 -0
- package/src/tasks/text-classification/spec/input.json +35 -0
- package/src/tasks/text-classification/spec/output.json +10 -0
- package/src/tasks/text-generation/about.md +24 -13
- package/src/tasks/text-generation/data.ts +22 -38
- package/src/tasks/text-generation/inference.ts +85 -0
- package/src/tasks/text-generation/spec/input.json +74 -0
- package/src/tasks/text-generation/spec/output.json +17 -0
- package/src/tasks/text-to-audio/inference.ts +138 -0
- package/src/tasks/text-to-audio/spec/input.json +31 -0
- package/src/tasks/text-to-audio/spec/output.json +20 -0
- package/src/tasks/text-to-image/about.md +11 -2
- package/src/tasks/text-to-image/data.ts +6 -2
- package/src/tasks/text-to-image/inference.ts +73 -0
- package/src/tasks/text-to-image/spec/input.json +57 -0
- package/src/tasks/text-to-image/spec/output.json +15 -0
- package/src/tasks/text-to-speech/about.md +4 -2
- package/src/tasks/text-to-speech/data.ts +1 -0
- package/src/tasks/text-to-speech/inference.ts +146 -0
- package/src/tasks/text-to-speech/spec/input.json +7 -0
- package/src/tasks/text-to-speech/spec/output.json +7 -0
- package/src/tasks/text2text-generation/inference.ts +53 -0
- package/src/tasks/text2text-generation/spec/input.json +55 -0
- package/src/tasks/text2text-generation/spec/output.json +17 -0
- package/src/tasks/token-classification/inference.ts +82 -0
- package/src/tasks/token-classification/spec/input.json +65 -0
- package/src/tasks/token-classification/spec/output.json +33 -0
- package/src/tasks/translation/data.ts +1 -0
- package/src/tasks/translation/inference.ts +58 -0
- package/src/tasks/translation/spec/input.json +7 -0
- package/src/tasks/translation/spec/output.json +7 -0
- package/src/tasks/video-classification/inference.ts +59 -0
- package/src/tasks/video-classification/spec/input.json +42 -0
- package/src/tasks/video-classification/spec/output.json +10 -0
- package/src/tasks/visual-question-answering/inference.ts +63 -0
- package/src/tasks/visual-question-answering/spec/input.json +41 -0
- package/src/tasks/visual-question-answering/spec/output.json +21 -0
- package/src/tasks/zero-shot-classification/inference.ts +67 -0
- package/src/tasks/zero-shot-classification/spec/input.json +50 -0
- package/src/tasks/zero-shot-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-image-classification/data.ts +8 -5
- package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
- package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
- package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-object-detection/about.md +45 -0
- package/src/tasks/zero-shot-object-detection/data.ts +62 -0
- package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
- package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
- package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
- package/tsconfig.json +3 -3
|
@@ -79,13 +79,17 @@ const taskData: TaskDataCustom = {
|
|
|
79
79
|
id: "latent-consistency/lcm-lora-for-sdxl",
|
|
80
80
|
},
|
|
81
81
|
{
|
|
82
|
-
description: "A
|
|
83
|
-
id: "
|
|
82
|
+
description: "A gallery to explore various text-to-image models.",
|
|
83
|
+
id: "multimodalart/LoraTheExplorer",
|
|
84
84
|
},
|
|
85
85
|
{
|
|
86
86
|
description: "An application for `text-to-image`, `image-to-image` and image inpainting.",
|
|
87
87
|
id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI",
|
|
88
88
|
},
|
|
89
|
+
{
|
|
90
|
+
description: "An application to generate realistic images given photos of a person and a prompt.",
|
|
91
|
+
id: "InstantX/InstantID",
|
|
92
|
+
},
|
|
89
93
|
],
|
|
90
94
|
summary:
|
|
91
95
|
"Generates images from input text. These models can be used to generate and modify images based on text prompts.",
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Inputs for Text To Image inference
|
|
9
|
+
*/
|
|
10
|
+
export interface TextToImageInput {
|
|
11
|
+
/**
|
|
12
|
+
* The input text data (sometimes called "prompt"
|
|
13
|
+
*/
|
|
14
|
+
data: string;
|
|
15
|
+
/**
|
|
16
|
+
* Additional inference parameters
|
|
17
|
+
*/
|
|
18
|
+
parameters?: TextToImageParameters;
|
|
19
|
+
[property: string]: unknown;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Additional inference parameters
|
|
24
|
+
*
|
|
25
|
+
* Additional inference parameters for Text To Image
|
|
26
|
+
*/
|
|
27
|
+
export interface TextToImageParameters {
|
|
28
|
+
/**
|
|
29
|
+
* For diffusion models. A higher guidance scale value encourages the model to generate
|
|
30
|
+
* images closely linked to the text prompt at the expense of lower image quality.
|
|
31
|
+
*/
|
|
32
|
+
guidanceScale?: number;
|
|
33
|
+
/**
|
|
34
|
+
* One or several prompt to guide what NOT to include in image generation.
|
|
35
|
+
*/
|
|
36
|
+
negativePrompt?: string[];
|
|
37
|
+
/**
|
|
38
|
+
* For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
|
39
|
+
* a higher quality image at the expense of slower inference.
|
|
40
|
+
*/
|
|
41
|
+
numInferenceSteps?: number;
|
|
42
|
+
/**
|
|
43
|
+
* For diffusion models. Override the scheduler with a compatible one
|
|
44
|
+
*/
|
|
45
|
+
scheduler?: string;
|
|
46
|
+
/**
|
|
47
|
+
* The size in pixel of the output image
|
|
48
|
+
*/
|
|
49
|
+
targetSize?: TargetSize;
|
|
50
|
+
[property: string]: unknown;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* The size in pixel of the output image
|
|
55
|
+
*/
|
|
56
|
+
export interface TargetSize {
|
|
57
|
+
height: number;
|
|
58
|
+
width: number;
|
|
59
|
+
[property: string]: unknown;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Outputs of inference for the Text To Image task
|
|
64
|
+
*/
|
|
65
|
+
export type TextToImageOutput = unknown[] | boolean | number | number | null | TextToImageOutputObject | string;
|
|
66
|
+
|
|
67
|
+
export interface TextToImageOutputObject {
|
|
68
|
+
/**
|
|
69
|
+
* The generated image
|
|
70
|
+
*/
|
|
71
|
+
image: unknown;
|
|
72
|
+
[property: string]: unknown;
|
|
73
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/text-to-image/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Text To Image inference",
|
|
5
|
+
"title": "TextToImageInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input text data (sometimes called \"prompt\"",
|
|
10
|
+
"type": "string"
|
|
11
|
+
},
|
|
12
|
+
"parameters": {
|
|
13
|
+
"description": "Additional inference parameters",
|
|
14
|
+
"$ref": "#/$defs/TextToImageParameters"
|
|
15
|
+
}
|
|
16
|
+
},
|
|
17
|
+
"$defs": {
|
|
18
|
+
"TextToImageParameters": {
|
|
19
|
+
"title": "TextToImageParameters",
|
|
20
|
+
"description": "Additional inference parameters for Text To Image",
|
|
21
|
+
"type": "object",
|
|
22
|
+
"properties": {
|
|
23
|
+
"guidanceScale": {
|
|
24
|
+
"type": "number",
|
|
25
|
+
"description": "For diffusion models. A higher guidance scale value encourages the model to generate images closely linked to the text prompt at the expense of lower image quality."
|
|
26
|
+
},
|
|
27
|
+
"negativePrompt": {
|
|
28
|
+
"type": "array",
|
|
29
|
+
"items": { "type": "string" },
|
|
30
|
+
"description": "One or several prompt to guide what NOT to include in image generation."
|
|
31
|
+
},
|
|
32
|
+
"numInferenceSteps": {
|
|
33
|
+
"type": "integer",
|
|
34
|
+
"description": "For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference."
|
|
35
|
+
},
|
|
36
|
+
"targetSize": {
|
|
37
|
+
"type": "object",
|
|
38
|
+
"description": "The size in pixel of the output image",
|
|
39
|
+
"properties": {
|
|
40
|
+
"width": {
|
|
41
|
+
"type": "integer"
|
|
42
|
+
},
|
|
43
|
+
"height": {
|
|
44
|
+
"type": "integer"
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
"required": ["width", "height"]
|
|
48
|
+
},
|
|
49
|
+
"scheduler": {
|
|
50
|
+
"type": "string",
|
|
51
|
+
"description": "For diffusion models. Override the scheduler with a compatible one"
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
},
|
|
56
|
+
"required": ["data"]
|
|
57
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/text-to-image/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Text To Image task",
|
|
5
|
+
"title": "TextToImageOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"properties": {
|
|
9
|
+
"image": {
|
|
10
|
+
"description": "The generated image"
|
|
11
|
+
}
|
|
12
|
+
},
|
|
13
|
+
"required": ["image"]
|
|
14
|
+
}
|
|
15
|
+
}
|
|
@@ -10,9 +10,9 @@ TTS models are used to create voice assistants on smart devices. These models ar
|
|
|
10
10
|
|
|
11
11
|
TTS models are widely used in airport and public transportation announcement systems to convert the announcement of a given text into speech.
|
|
12
12
|
|
|
13
|
-
## Inference
|
|
13
|
+
## Inference Endpoints
|
|
14
14
|
|
|
15
|
-
The Hub contains over [1500 TTS models](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using
|
|
15
|
+
The Hub contains over [1500 TTS models](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using Inference Endpoints. Here is a simple code snippet to get you started:
|
|
16
16
|
|
|
17
17
|
```python
|
|
18
18
|
import json
|
|
@@ -61,3 +61,5 @@ await inference.textToSpeech({
|
|
|
61
61
|
- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5).
|
|
62
62
|
- [A guide on Fine-tuning Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper)
|
|
63
63
|
- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5)
|
|
64
|
+
- [Optimizing a Text-To-Speech model using 🤗 Transformers](https://huggingface.co/blog/optimizing-bark)
|
|
65
|
+
-
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Inputs for Text to Speech inference
|
|
9
|
+
*
|
|
10
|
+
* Inputs for Text To Audio inference
|
|
11
|
+
*/
|
|
12
|
+
export interface TextToSpeechInput {
|
|
13
|
+
/**
|
|
14
|
+
* The input text data
|
|
15
|
+
*/
|
|
16
|
+
data: string;
|
|
17
|
+
/**
|
|
18
|
+
* Additional inference parameters
|
|
19
|
+
*/
|
|
20
|
+
parameters?: TextToAudioParameters;
|
|
21
|
+
[property: string]: unknown;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Additional inference parameters
|
|
26
|
+
*
|
|
27
|
+
* Additional inference parameters for Text To Audio
|
|
28
|
+
*/
|
|
29
|
+
export interface TextToAudioParameters {
|
|
30
|
+
/**
|
|
31
|
+
* Parametrization of the text generation process
|
|
32
|
+
*/
|
|
33
|
+
generate?: GenerationParameters;
|
|
34
|
+
[property: string]: unknown;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Parametrization of the text generation process
|
|
39
|
+
*
|
|
40
|
+
* Ad-hoc parametrization of the text generation process
|
|
41
|
+
*/
|
|
42
|
+
export interface GenerationParameters {
|
|
43
|
+
/**
|
|
44
|
+
* Whether to use sampling instead of greedy decoding when generating new tokens.
|
|
45
|
+
*/
|
|
46
|
+
doSample?: boolean;
|
|
47
|
+
/**
|
|
48
|
+
* Controls the stopping condition for beam-based methods.
|
|
49
|
+
*/
|
|
50
|
+
earlyStopping?: EarlyStoppingUnion;
|
|
51
|
+
/**
|
|
52
|
+
* If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
53
|
+
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
54
|
+
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
55
|
+
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
56
|
+
*/
|
|
57
|
+
epsilonCutoff?: number;
|
|
58
|
+
/**
|
|
59
|
+
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
60
|
+
* float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
61
|
+
* eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
62
|
+
* term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
63
|
+
* the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
64
|
+
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
65
|
+
* for more details.
|
|
66
|
+
*/
|
|
67
|
+
etaCutoff?: number;
|
|
68
|
+
/**
|
|
69
|
+
* The maximum length (in tokens) of the generated text, including the input.
|
|
70
|
+
*/
|
|
71
|
+
maxLength?: number;
|
|
72
|
+
/**
|
|
73
|
+
* The maximum number of tokens to generate. Takes precedence over maxLength.
|
|
74
|
+
*/
|
|
75
|
+
maxNewTokens?: number;
|
|
76
|
+
/**
|
|
77
|
+
* The minimum length (in tokens) of the generated text, including the input.
|
|
78
|
+
*/
|
|
79
|
+
minLength?: number;
|
|
80
|
+
/**
|
|
81
|
+
* The minimum number of tokens to generate. Takes precedence over maxLength.
|
|
82
|
+
*/
|
|
83
|
+
minNewTokens?: number;
|
|
84
|
+
/**
|
|
85
|
+
* Number of groups to divide num_beams into in order to ensure diversity among different
|
|
86
|
+
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
87
|
+
*/
|
|
88
|
+
numBeamGroups?: number;
|
|
89
|
+
/**
|
|
90
|
+
* Number of beams to use for beam search.
|
|
91
|
+
*/
|
|
92
|
+
numBeams?: number;
|
|
93
|
+
/**
|
|
94
|
+
* The value balances the model confidence and the degeneration penalty in contrastive
|
|
95
|
+
* search decoding.
|
|
96
|
+
*/
|
|
97
|
+
penaltyAlpha?: number;
|
|
98
|
+
/**
|
|
99
|
+
* The value used to modulate the next token probabilities.
|
|
100
|
+
*/
|
|
101
|
+
temperature?: number;
|
|
102
|
+
/**
|
|
103
|
+
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
104
|
+
*/
|
|
105
|
+
topK?: number;
|
|
106
|
+
/**
|
|
107
|
+
* If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
108
|
+
* that add up to top_p or higher are kept for generation.
|
|
109
|
+
*/
|
|
110
|
+
topP?: number;
|
|
111
|
+
/**
|
|
112
|
+
* Local typicality measures how similar the conditional probability of predicting a target
|
|
113
|
+
* token next is to the expected conditional probability of predicting a random token next,
|
|
114
|
+
* given the partial text already generated. If set to float < 1, the smallest set of the
|
|
115
|
+
* most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
116
|
+
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
117
|
+
*/
|
|
118
|
+
typicalP?: number;
|
|
119
|
+
/**
|
|
120
|
+
* Whether the model should use the past last key/values attentions to speed up decoding
|
|
121
|
+
*/
|
|
122
|
+
useCache?: boolean;
|
|
123
|
+
[property: string]: unknown;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Controls the stopping condition for beam-based methods.
|
|
128
|
+
*/
|
|
129
|
+
export type EarlyStoppingUnion = boolean | "never";
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Outputs for Text to Speech inference
|
|
133
|
+
*
|
|
134
|
+
* Outputs of inference for the Text To Audio task
|
|
135
|
+
*/
|
|
136
|
+
export interface TextToSpeechOutput {
|
|
137
|
+
/**
|
|
138
|
+
* The generated audio waveform.
|
|
139
|
+
*/
|
|
140
|
+
audio: unknown;
|
|
141
|
+
/**
|
|
142
|
+
* The sampling rate of the generated audio waveform.
|
|
143
|
+
*/
|
|
144
|
+
samplingRate: number;
|
|
145
|
+
[property: string]: unknown;
|
|
146
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Text2text Generation inference
|
|
8
|
+
*/
|
|
9
|
+
export interface Text2TextGenerationInput {
|
|
10
|
+
/**
|
|
11
|
+
* The input text data
|
|
12
|
+
*/
|
|
13
|
+
data: string;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: Text2TextGenerationParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Text2text Generation
|
|
24
|
+
*/
|
|
25
|
+
export interface Text2TextGenerationParameters {
|
|
26
|
+
/**
|
|
27
|
+
* Whether to clean up the potential extra spaces in the text output.
|
|
28
|
+
*/
|
|
29
|
+
cleanUpTokenizationSpaces?: boolean;
|
|
30
|
+
/**
|
|
31
|
+
* Additional parametrization of the text generation algorithm
|
|
32
|
+
*/
|
|
33
|
+
generateParameters?: {
|
|
34
|
+
[key: string]: unknown;
|
|
35
|
+
};
|
|
36
|
+
/**
|
|
37
|
+
* The truncation strategy to use
|
|
38
|
+
*/
|
|
39
|
+
truncation?: Text2TextGenerationTruncationStrategy;
|
|
40
|
+
[property: string]: unknown;
|
|
41
|
+
}
|
|
42
|
+
export type Text2TextGenerationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second";
|
|
43
|
+
export type Text2TextGenerationOutput = Text2TextGenerationOutputElement[];
|
|
44
|
+
/**
|
|
45
|
+
* Outputs of inference for the Text2text Generation task
|
|
46
|
+
*/
|
|
47
|
+
export interface Text2TextGenerationOutputElement {
|
|
48
|
+
/**
|
|
49
|
+
* The generated text.
|
|
50
|
+
*/
|
|
51
|
+
generatedText: string;
|
|
52
|
+
[property: string]: unknown;
|
|
53
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/text2text-generation/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Text2text Generation inference",
|
|
5
|
+
"title": "Text2TextGenerationInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input text data",
|
|
10
|
+
"type": "string"
|
|
11
|
+
},
|
|
12
|
+
"parameters": {
|
|
13
|
+
"description": "Additional inference parameters",
|
|
14
|
+
"$ref": "#/$defs/Text2textGenerationParameters"
|
|
15
|
+
}
|
|
16
|
+
},
|
|
17
|
+
"$defs": {
|
|
18
|
+
"Text2textGenerationParameters": {
|
|
19
|
+
"title": "Text2textGenerationParameters",
|
|
20
|
+
"description": "Additional inference parameters for Text2text Generation",
|
|
21
|
+
"type": "object",
|
|
22
|
+
"properties": {
|
|
23
|
+
"cleanUpTokenizationSpaces": {
|
|
24
|
+
"type": "boolean",
|
|
25
|
+
"description": "Whether to clean up the potential extra spaces in the text output."
|
|
26
|
+
},
|
|
27
|
+
"truncation": {
|
|
28
|
+
"title": "Text2textGenerationTruncationStrategy",
|
|
29
|
+
"type": "string",
|
|
30
|
+
"description": "The truncation strategy to use",
|
|
31
|
+
"oneOf": [
|
|
32
|
+
{
|
|
33
|
+
"const": "do_not_truncate"
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"const": "longest_first"
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"const": "only_first"
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"const": "only_second"
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
},
|
|
46
|
+
"generateParameters": {
|
|
47
|
+
"title": "generateParameters",
|
|
48
|
+
"type": "object",
|
|
49
|
+
"description": "Additional parametrization of the text generation algorithm"
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
},
|
|
54
|
+
"required": ["data"]
|
|
55
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/text2text-generation/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Text2text Generation task",
|
|
5
|
+
"title": "Text2TextGenerationOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"generatedText": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"description": "The generated text."
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"required": ["generatedText"]
|
|
16
|
+
}
|
|
17
|
+
}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Token Classification inference
|
|
8
|
+
*/
|
|
9
|
+
export interface TokenClassificationInput {
|
|
10
|
+
/**
|
|
11
|
+
* The input text data
|
|
12
|
+
*/
|
|
13
|
+
data: string;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: TokenClassificationParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Token Classification
|
|
24
|
+
*/
|
|
25
|
+
export interface TokenClassificationParameters {
|
|
26
|
+
/**
|
|
27
|
+
* The strategy used to fuse tokens based on model predictions
|
|
28
|
+
*/
|
|
29
|
+
aggregationStrategy?: TokenClassificationAggregationStrategy;
|
|
30
|
+
/**
|
|
31
|
+
* A list of labels to ignore
|
|
32
|
+
*/
|
|
33
|
+
ignoreLabels?: string[];
|
|
34
|
+
/**
|
|
35
|
+
* The number of overlapping tokens between chunks when splitting the input text.
|
|
36
|
+
*/
|
|
37
|
+
stride?: number;
|
|
38
|
+
[property: string]: unknown;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Do not aggregate tokens
|
|
42
|
+
*
|
|
43
|
+
* Group consecutive tokens with the same label in a single entity.
|
|
44
|
+
*
|
|
45
|
+
* Similar to "simple", also preserves word integrity (use the label predicted for the first
|
|
46
|
+
* token in a word).
|
|
47
|
+
*
|
|
48
|
+
* Similar to "simple", also preserves word integrity (uses the label with the highest
|
|
49
|
+
* score, averaged across the word's tokens).
|
|
50
|
+
*
|
|
51
|
+
* Similar to "simple", also preserves word integrity (uses the label with the highest score
|
|
52
|
+
* across the word's tokens).
|
|
53
|
+
*/
|
|
54
|
+
export type TokenClassificationAggregationStrategy = "none" | "simple" | "first" | "average" | "max";
|
|
55
|
+
export type TokenClassificationOutput = TokenClassificationOutputElement[];
|
|
56
|
+
/**
|
|
57
|
+
* Outputs of inference for the Token Classification task
|
|
58
|
+
*/
|
|
59
|
+
export interface TokenClassificationOutputElement {
|
|
60
|
+
/**
|
|
61
|
+
* The character position in the input where this group ends.
|
|
62
|
+
*/
|
|
63
|
+
end?: number;
|
|
64
|
+
/**
|
|
65
|
+
* The predicted label for that group of tokens
|
|
66
|
+
*/
|
|
67
|
+
entityGroup?: string;
|
|
68
|
+
label: unknown;
|
|
69
|
+
/**
|
|
70
|
+
* The associated score / probability
|
|
71
|
+
*/
|
|
72
|
+
score: number;
|
|
73
|
+
/**
|
|
74
|
+
* The character position in the input where this group begins.
|
|
75
|
+
*/
|
|
76
|
+
start?: number;
|
|
77
|
+
/**
|
|
78
|
+
* The corresponding text
|
|
79
|
+
*/
|
|
80
|
+
word?: string;
|
|
81
|
+
[property: string]: unknown;
|
|
82
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/token-classification/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Token Classification inference",
|
|
5
|
+
"title": "TokenClassificationInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input text data",
|
|
10
|
+
"type": "string"
|
|
11
|
+
},
|
|
12
|
+
"parameters": {
|
|
13
|
+
"description": "Additional inference parameters",
|
|
14
|
+
"$ref": "#/$defs/TokenClassificationParameters"
|
|
15
|
+
}
|
|
16
|
+
},
|
|
17
|
+
"$defs": {
|
|
18
|
+
"TokenClassificationParameters": {
|
|
19
|
+
"title": "TokenClassificationParameters",
|
|
20
|
+
"description": "Additional inference parameters for Token Classification",
|
|
21
|
+
"type": "object",
|
|
22
|
+
"properties": {
|
|
23
|
+
"ignoreLabels": {
|
|
24
|
+
"type": "array",
|
|
25
|
+
"items": {
|
|
26
|
+
"type": "string"
|
|
27
|
+
},
|
|
28
|
+
"description": "A list of labels to ignore"
|
|
29
|
+
},
|
|
30
|
+
"stride": {
|
|
31
|
+
"type": "integer",
|
|
32
|
+
"description": "The number of overlapping tokens between chunks when splitting the input text."
|
|
33
|
+
},
|
|
34
|
+
"aggregationStrategy": {
|
|
35
|
+
"title": "TokenClassificationAggregationStrategy",
|
|
36
|
+
"type": "string",
|
|
37
|
+
"description": "The strategy used to fuse tokens based on model predictions",
|
|
38
|
+
"oneOf": [
|
|
39
|
+
{
|
|
40
|
+
"const": "none",
|
|
41
|
+
"description": "Do not aggregate tokens"
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"const": "simple",
|
|
45
|
+
"description": "Group consecutive tokens with the same label in a single entity."
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"const": "first",
|
|
49
|
+
"description": "Similar to \"simple\", also preserves word integrity (use the label predicted for the first token in a word)."
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"const": "average",
|
|
53
|
+
"description": "Similar to \"simple\", also preserves word integrity (uses the label with the highest score, averaged across the word's tokens)."
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"const": "max",
|
|
57
|
+
"description": "Similar to \"simple\", also preserves word integrity (uses the label with the highest score across the word's tokens)."
|
|
58
|
+
}
|
|
59
|
+
]
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
},
|
|
64
|
+
"required": ["data"]
|
|
65
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/token-classification/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Token Classification task",
|
|
5
|
+
"title": "TokenClassificationOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"entityGroup": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"description": "The predicted label for that group of tokens"
|
|
13
|
+
},
|
|
14
|
+
"score": {
|
|
15
|
+
"type": "number",
|
|
16
|
+
"description": "The associated score / probability"
|
|
17
|
+
},
|
|
18
|
+
"word": {
|
|
19
|
+
"type": "string",
|
|
20
|
+
"description": "The corresponding text"
|
|
21
|
+
},
|
|
22
|
+
"start": {
|
|
23
|
+
"type": "integer",
|
|
24
|
+
"description": "The character position in the input where this group begins."
|
|
25
|
+
},
|
|
26
|
+
"end": {
|
|
27
|
+
"type": "integer",
|
|
28
|
+
"description": "The character position in the input where this group ends."
|
|
29
|
+
}
|
|
30
|
+
},
|
|
31
|
+
"required": ["label", "score"]
|
|
32
|
+
}
|
|
33
|
+
}
|