@huggingface/tasks 0.2.2 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.cjs +3144 -3085
- package/dist/index.d.ts +441 -74
- package/dist/index.js +3143 -3084
- package/package.json +1 -1
- package/src/index.ts +2 -5
- package/src/library-to-tasks.ts +1 -1
- package/src/model-libraries-downloads.ts +20 -0
- package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +46 -292
- package/src/model-libraries.ts +375 -44
- package/src/pipelines.ts +14 -8
- package/src/tasks/audio-classification/inference.ts +4 -4
- package/src/tasks/audio-classification/spec/input.json +4 -4
- package/src/tasks/audio-classification/spec/output.json +1 -12
- package/src/tasks/automatic-speech-recognition/inference.ts +35 -30
- package/src/tasks/automatic-speech-recognition/spec/input.json +3 -3
- package/src/tasks/automatic-speech-recognition/spec/output.json +30 -28
- package/src/tasks/common-definitions.json +25 -17
- package/src/tasks/depth-estimation/inference.ts +10 -10
- package/src/tasks/depth-estimation/spec/input.json +3 -8
- package/src/tasks/depth-estimation/spec/output.json +9 -3
- package/src/tasks/document-question-answering/inference.ts +16 -8
- package/src/tasks/document-question-answering/spec/input.json +9 -9
- package/src/tasks/document-question-answering/spec/output.json +2 -2
- package/src/tasks/feature-extraction/inference.ts +1 -1
- package/src/tasks/feature-extraction/spec/input.json +2 -2
- package/src/tasks/fill-mask/inference.ts +4 -3
- package/src/tasks/fill-mask/spec/input.json +3 -3
- package/src/tasks/fill-mask/spec/output.json +1 -1
- package/src/tasks/image-classification/inference.ts +3 -3
- package/src/tasks/image-classification/spec/input.json +4 -4
- package/src/tasks/image-segmentation/inference.ts +3 -3
- package/src/tasks/image-segmentation/spec/input.json +4 -4
- package/src/tasks/image-to-image/inference.ts +5 -5
- package/src/tasks/image-to-image/spec/input.json +9 -7
- package/src/tasks/image-to-text/inference.ts +25 -20
- package/src/tasks/image-to-text/spec/input.json +3 -3
- package/src/tasks/image-to-text/spec/output.json +8 -11
- package/src/tasks/index.ts +2 -0
- package/src/tasks/object-detection/inference.ts +1 -1
- package/src/tasks/object-detection/spec/input.json +2 -2
- package/src/tasks/placeholder/spec/input.json +4 -4
- package/src/tasks/placeholder/spec/output.json +1 -1
- package/src/tasks/question-answering/inference.ts +8 -8
- package/src/tasks/question-answering/spec/input.json +9 -9
- package/src/tasks/sentence-similarity/inference.ts +1 -1
- package/src/tasks/sentence-similarity/spec/input.json +2 -2
- package/src/tasks/summarization/inference.ts +5 -4
- package/src/tasks/table-question-answering/inference.ts +1 -1
- package/src/tasks/table-question-answering/spec/input.json +8 -3
- package/src/tasks/text-classification/inference.ts +3 -3
- package/src/tasks/text-classification/spec/input.json +4 -4
- package/src/tasks/text-generation/inference.ts +123 -14
- package/src/tasks/text-generation/spec/input.json +28 -12
- package/src/tasks/text-generation/spec/output.json +112 -9
- package/src/tasks/text-to-audio/inference.ts +24 -19
- package/src/tasks/text-to-audio/spec/input.json +2 -2
- package/src/tasks/text-to-audio/spec/output.json +10 -13
- package/src/tasks/text-to-image/inference.ts +6 -8
- package/src/tasks/text-to-image/spec/input.json +9 -7
- package/src/tasks/text-to-image/spec/output.json +7 -9
- package/src/tasks/text-to-speech/inference.ts +18 -17
- package/src/tasks/text2text-generation/inference.ts +10 -8
- package/src/tasks/text2text-generation/spec/input.json +4 -4
- package/src/tasks/text2text-generation/spec/output.json +8 -11
- package/src/tasks/token-classification/inference.ts +4 -4
- package/src/tasks/token-classification/spec/input.json +4 -4
- package/src/tasks/token-classification/spec/output.json +1 -1
- package/src/tasks/translation/inference.ts +5 -4
- package/src/tasks/video-classification/inference.ts +5 -5
- package/src/tasks/video-classification/spec/input.json +6 -6
- package/src/tasks/visual-question-answering/inference.ts +2 -2
- package/src/tasks/visual-question-answering/spec/input.json +3 -3
- package/src/tasks/zero-shot-classification/inference.ts +3 -3
- package/src/tasks/zero-shot-classification/spec/input.json +4 -4
- package/src/tasks/zero-shot-image-classification/inference.ts +2 -2
- package/src/tasks/zero-shot-image-classification/spec/input.json +3 -3
- package/src/tasks/zero-shot-object-detection/inference.ts +1 -1
- package/src/tasks/zero-shot-object-detection/spec/input.json +2 -2
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
"title": "TableQuestionAnsweringInput",
|
|
6
6
|
"type": "object",
|
|
7
7
|
"properties": {
|
|
8
|
-
"
|
|
8
|
+
"inputs": {
|
|
9
9
|
"description": "One (table, question) pair to answer",
|
|
10
10
|
"title": "TableQuestionAnsweringInputData",
|
|
11
11
|
"type": "object",
|
|
@@ -13,7 +13,12 @@
|
|
|
13
13
|
"table": {
|
|
14
14
|
"description": "The table to serve as context for the questions",
|
|
15
15
|
"type": "object",
|
|
16
|
-
"additionalProperties": {
|
|
16
|
+
"additionalProperties": {
|
|
17
|
+
"type": "array",
|
|
18
|
+
"items": {
|
|
19
|
+
"type": "string"
|
|
20
|
+
}
|
|
21
|
+
}
|
|
17
22
|
},
|
|
18
23
|
"question": {
|
|
19
24
|
"description": "The question to be answered about the table",
|
|
@@ -35,5 +40,5 @@
|
|
|
35
40
|
"properties": {}
|
|
36
41
|
}
|
|
37
42
|
},
|
|
38
|
-
"required": ["
|
|
43
|
+
"required": ["inputs"]
|
|
39
44
|
}
|
|
@@ -10,7 +10,7 @@ export interface TextClassificationInput {
|
|
|
10
10
|
/**
|
|
11
11
|
* The text to classify
|
|
12
12
|
*/
|
|
13
|
-
|
|
13
|
+
inputs: string;
|
|
14
14
|
/**
|
|
15
15
|
* Additional inference parameters
|
|
16
16
|
*/
|
|
@@ -23,11 +23,11 @@ export interface TextClassificationInput {
|
|
|
23
23
|
* Additional inference parameters for Text Classification
|
|
24
24
|
*/
|
|
25
25
|
export interface TextClassificationParameters {
|
|
26
|
-
|
|
26
|
+
function_to_apply?: ClassificationOutputTransform;
|
|
27
27
|
/**
|
|
28
28
|
* When specified, limits the output to the top K most probable classes.
|
|
29
29
|
*/
|
|
30
|
-
|
|
30
|
+
top_k?: number;
|
|
31
31
|
[property: string]: unknown;
|
|
32
32
|
}
|
|
33
33
|
/**
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
"title": "TextClassificationInput",
|
|
6
6
|
"type": "object",
|
|
7
7
|
"properties": {
|
|
8
|
-
"
|
|
8
|
+
"inputs": {
|
|
9
9
|
"description": "The text to classify",
|
|
10
10
|
"type": "string"
|
|
11
11
|
},
|
|
@@ -20,16 +20,16 @@
|
|
|
20
20
|
"description": "Additional inference parameters for Text Classification",
|
|
21
21
|
"type": "object",
|
|
22
22
|
"properties": {
|
|
23
|
-
"
|
|
23
|
+
"function_to_apply": {
|
|
24
24
|
"title": "TextClassificationOutputTransform",
|
|
25
25
|
"$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform"
|
|
26
26
|
},
|
|
27
|
-
"
|
|
27
|
+
"top_k": {
|
|
28
28
|
"type": "integer",
|
|
29
29
|
"description": "When specified, limits the output to the top K most probable classes."
|
|
30
30
|
}
|
|
31
31
|
}
|
|
32
32
|
}
|
|
33
33
|
},
|
|
34
|
-
"required": ["
|
|
34
|
+
"required": ["inputs"]
|
|
35
35
|
}
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Using src/scripts/inference-codegen
|
|
5
5
|
*/
|
|
6
|
+
|
|
6
7
|
/**
|
|
7
8
|
* Inputs for Text Generation inference
|
|
8
9
|
*/
|
|
@@ -10,13 +11,14 @@ export interface TextGenerationInput {
|
|
|
10
11
|
/**
|
|
11
12
|
* The text to initialize generation with
|
|
12
13
|
*/
|
|
13
|
-
|
|
14
|
+
inputs: string;
|
|
14
15
|
/**
|
|
15
16
|
* Additional inference parameters
|
|
16
17
|
*/
|
|
17
18
|
parameters?: TextGenerationParameters;
|
|
18
19
|
[property: string]: unknown;
|
|
19
20
|
}
|
|
21
|
+
|
|
20
22
|
/**
|
|
21
23
|
* Additional inference parameters
|
|
22
24
|
*
|
|
@@ -24,26 +26,43 @@ export interface TextGenerationInput {
|
|
|
24
26
|
*/
|
|
25
27
|
export interface TextGenerationParameters {
|
|
26
28
|
/**
|
|
27
|
-
*
|
|
29
|
+
* The number of sampling queries to run. Only the best one (in terms of total logprob) will
|
|
30
|
+
* be returned.
|
|
28
31
|
*/
|
|
29
|
-
|
|
32
|
+
best_of?: number;
|
|
30
33
|
/**
|
|
31
|
-
*
|
|
34
|
+
* Whether or not to output decoder input details
|
|
32
35
|
*/
|
|
33
|
-
|
|
36
|
+
decoder_input_details?: boolean;
|
|
37
|
+
/**
|
|
38
|
+
* Whether or not to output details
|
|
39
|
+
*/
|
|
40
|
+
details?: boolean;
|
|
41
|
+
/**
|
|
42
|
+
* Whether to use logits sampling instead of greedy decoding when generating new tokens.
|
|
43
|
+
*/
|
|
44
|
+
do_sample?: boolean;
|
|
45
|
+
/**
|
|
46
|
+
* The maximum number of tokens to generate.
|
|
47
|
+
*/
|
|
48
|
+
max_new_tokens?: number;
|
|
34
49
|
/**
|
|
35
50
|
* The parameter for repetition penalty. A value of 1.0 means no penalty. See [this
|
|
36
51
|
* paper](https://hf.co/papers/1909.05858) for more details.
|
|
37
52
|
*/
|
|
38
|
-
|
|
53
|
+
repetition_penalty?: number;
|
|
39
54
|
/**
|
|
40
55
|
* Whether to prepend the prompt to the generated text.
|
|
41
56
|
*/
|
|
42
|
-
|
|
57
|
+
return_full_text?: boolean;
|
|
58
|
+
/**
|
|
59
|
+
* The random sampling seed.
|
|
60
|
+
*/
|
|
61
|
+
seed?: number;
|
|
43
62
|
/**
|
|
44
63
|
* Stop generating tokens if a member of `stop_sequences` is generated.
|
|
45
64
|
*/
|
|
46
|
-
|
|
65
|
+
stop_sequences?: string[];
|
|
47
66
|
/**
|
|
48
67
|
* The value used to modulate the logits distribution.
|
|
49
68
|
*/
|
|
@@ -51,12 +70,12 @@ export interface TextGenerationParameters {
|
|
|
51
70
|
/**
|
|
52
71
|
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
53
72
|
*/
|
|
54
|
-
|
|
73
|
+
top_k?: number;
|
|
55
74
|
/**
|
|
56
75
|
* If set to < 1, only the smallest set of most probable tokens with probabilities that add
|
|
57
76
|
* up to `top_p` or higher are kept for generation.
|
|
58
77
|
*/
|
|
59
|
-
|
|
78
|
+
top_p?: number;
|
|
60
79
|
/**
|
|
61
80
|
* Truncate input tokens to the given size.
|
|
62
81
|
*/
|
|
@@ -65,21 +84,111 @@ export interface TextGenerationParameters {
|
|
|
65
84
|
* Typical Decoding mass. See [Typical Decoding for Natural Language
|
|
66
85
|
* Generation](https://hf.co/papers/2202.00666) for more information
|
|
67
86
|
*/
|
|
68
|
-
|
|
87
|
+
typical_p?: number;
|
|
69
88
|
/**
|
|
70
89
|
* Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)
|
|
71
90
|
*/
|
|
72
91
|
watermark?: boolean;
|
|
73
92
|
[property: string]: unknown;
|
|
74
93
|
}
|
|
75
|
-
|
|
94
|
+
|
|
76
95
|
/**
|
|
77
96
|
* Outputs for Text Generation inference
|
|
78
97
|
*/
|
|
79
|
-
export interface
|
|
98
|
+
export interface TextGenerationOutput {
|
|
99
|
+
/**
|
|
100
|
+
* When enabled, details about the generation
|
|
101
|
+
*/
|
|
102
|
+
details?: TextGenerationOutputDetails;
|
|
80
103
|
/**
|
|
81
104
|
* The generated text
|
|
82
105
|
*/
|
|
83
|
-
|
|
106
|
+
generated_text: string;
|
|
107
|
+
[property: string]: unknown;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* When enabled, details about the generation
|
|
112
|
+
*/
|
|
113
|
+
export interface TextGenerationOutputDetails {
|
|
114
|
+
/**
|
|
115
|
+
* Details about additional sequences when best_of is provided
|
|
116
|
+
*/
|
|
117
|
+
best_of_sequences?: TextGenerationSequenceDetails[];
|
|
118
|
+
/**
|
|
119
|
+
* The reason why the generation was stopped.
|
|
120
|
+
*/
|
|
121
|
+
finish_reason: FinishReason;
|
|
122
|
+
/**
|
|
123
|
+
* The number of generated tokens
|
|
124
|
+
*/
|
|
125
|
+
generated_tokens: number;
|
|
126
|
+
prefill: PrefillToken[];
|
|
127
|
+
/**
|
|
128
|
+
* The random seed used for generation
|
|
129
|
+
*/
|
|
130
|
+
seed?: number;
|
|
131
|
+
/**
|
|
132
|
+
* The generated tokens and associated details
|
|
133
|
+
*/
|
|
134
|
+
tokens: Token[];
|
|
135
|
+
[property: string]: unknown;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
export interface TextGenerationSequenceDetails {
|
|
139
|
+
/**
|
|
140
|
+
* The reason why the generation was stopped.
|
|
141
|
+
*/
|
|
142
|
+
finish_reason: FinishReason;
|
|
143
|
+
/**
|
|
144
|
+
* The generated text
|
|
145
|
+
*/
|
|
146
|
+
generated_text: number;
|
|
147
|
+
/**
|
|
148
|
+
* The number of generated tokens
|
|
149
|
+
*/
|
|
150
|
+
generated_tokens: number;
|
|
151
|
+
prefill: PrefillToken[];
|
|
152
|
+
/**
|
|
153
|
+
* The random seed used for generation
|
|
154
|
+
*/
|
|
155
|
+
seed?: number;
|
|
156
|
+
/**
|
|
157
|
+
* The generated tokens and associated details
|
|
158
|
+
*/
|
|
159
|
+
tokens: Token[];
|
|
160
|
+
[property: string]: unknown;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/**
|
|
164
|
+
* The generated sequence reached the maximum allowed length
|
|
165
|
+
*
|
|
166
|
+
* The model generated an end-of-sentence (EOS) token
|
|
167
|
+
*
|
|
168
|
+
* One of the sequence in stop_sequences was generated
|
|
169
|
+
*/
|
|
170
|
+
export type FinishReason = "length" | "eos_token" | "stop_sequence";
|
|
171
|
+
|
|
172
|
+
export interface PrefillToken {
|
|
173
|
+
id: number;
|
|
174
|
+
logprob: number;
|
|
175
|
+
/**
|
|
176
|
+
* The text associated with that token
|
|
177
|
+
*/
|
|
178
|
+
text: string;
|
|
179
|
+
[property: string]: unknown;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
export interface Token {
|
|
183
|
+
id: number;
|
|
184
|
+
logprob: number;
|
|
185
|
+
/**
|
|
186
|
+
* Whether or not that token is a special one
|
|
187
|
+
*/
|
|
188
|
+
special: boolean;
|
|
189
|
+
/**
|
|
190
|
+
* The text associated with that token
|
|
191
|
+
*/
|
|
192
|
+
text: string;
|
|
84
193
|
[property: string]: unknown;
|
|
85
194
|
}
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
"title": "TextGenerationInput",
|
|
6
6
|
"type": "object",
|
|
7
7
|
"properties": {
|
|
8
|
-
"
|
|
8
|
+
"inputs": {
|
|
9
9
|
"description": "The text to initialize generation with",
|
|
10
10
|
"type": "string"
|
|
11
11
|
},
|
|
@@ -20,23 +20,39 @@
|
|
|
20
20
|
"description": "Additional inference parameters for Text Generation",
|
|
21
21
|
"type": "object",
|
|
22
22
|
"properties": {
|
|
23
|
-
"
|
|
23
|
+
"best_of": {
|
|
24
|
+
"type": "integer",
|
|
25
|
+
"description": "The number of sampling queries to run. Only the best one (in terms of total logprob) will be returned."
|
|
26
|
+
},
|
|
27
|
+
"decoder_input_details": {
|
|
28
|
+
"type": "boolean",
|
|
29
|
+
"description": "Whether or not to output decoder input details"
|
|
30
|
+
},
|
|
31
|
+
"details": {
|
|
24
32
|
"type": "boolean",
|
|
25
|
-
"description": "Whether
|
|
33
|
+
"description": "Whether or not to output details"
|
|
26
34
|
},
|
|
27
|
-
"
|
|
35
|
+
"do_sample": {
|
|
36
|
+
"type": "boolean",
|
|
37
|
+
"description": "Whether to use logits sampling instead of greedy decoding when generating new tokens."
|
|
38
|
+
},
|
|
39
|
+
"max_new_tokens": {
|
|
28
40
|
"type": "integer",
|
|
29
|
-
"description": "
|
|
41
|
+
"description": "The maximum number of tokens to generate."
|
|
30
42
|
},
|
|
31
|
-
"
|
|
43
|
+
"repetition_penalty": {
|
|
32
44
|
"type": "number",
|
|
33
45
|
"description": "The parameter for repetition penalty. A value of 1.0 means no penalty. See [this paper](https://hf.co/papers/1909.05858) for more details."
|
|
34
46
|
},
|
|
35
|
-
"
|
|
47
|
+
"return_full_text": {
|
|
36
48
|
"type": "boolean",
|
|
37
49
|
"description": "Whether to prepend the prompt to the generated text."
|
|
38
50
|
},
|
|
39
|
-
"
|
|
51
|
+
"seed": {
|
|
52
|
+
"type": "integer",
|
|
53
|
+
"description": "The random sampling seed."
|
|
54
|
+
},
|
|
55
|
+
"stop_sequences": {
|
|
40
56
|
"type": "array",
|
|
41
57
|
"items": {
|
|
42
58
|
"type": "string"
|
|
@@ -47,11 +63,11 @@
|
|
|
47
63
|
"type": "number",
|
|
48
64
|
"description": "The value used to modulate the logits distribution."
|
|
49
65
|
},
|
|
50
|
-
"
|
|
66
|
+
"top_k": {
|
|
51
67
|
"type": "integer",
|
|
52
68
|
"description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
|
|
53
69
|
},
|
|
54
|
-
"
|
|
70
|
+
"top_p": {
|
|
55
71
|
"type": "number",
|
|
56
72
|
"description": "If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation."
|
|
57
73
|
},
|
|
@@ -59,7 +75,7 @@
|
|
|
59
75
|
"type": "integer",
|
|
60
76
|
"description": "Truncate input tokens to the given size."
|
|
61
77
|
},
|
|
62
|
-
"
|
|
78
|
+
"typical_p": {
|
|
63
79
|
"type": "number",
|
|
64
80
|
"description": "Typical Decoding mass. See [Typical Decoding for Natural Language Generation](https://hf.co/papers/2202.00666) for more information"
|
|
65
81
|
},
|
|
@@ -70,5 +86,5 @@
|
|
|
70
86
|
}
|
|
71
87
|
}
|
|
72
88
|
},
|
|
73
|
-
"required": ["
|
|
89
|
+
"required": ["inputs"]
|
|
74
90
|
}
|
|
@@ -3,15 +3,118 @@
|
|
|
3
3
|
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
4
|
"description": "Outputs for Text Generation inference",
|
|
5
5
|
"title": "TextGenerationOutput",
|
|
6
|
-
"type": "
|
|
7
|
-
"
|
|
8
|
-
"
|
|
9
|
-
|
|
10
|
-
"
|
|
11
|
-
"type": "string",
|
|
12
|
-
"description": "The generated text"
|
|
13
|
-
}
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"generated_text": {
|
|
9
|
+
"type": "string",
|
|
10
|
+
"description": "The generated text"
|
|
14
11
|
},
|
|
15
|
-
"
|
|
12
|
+
"details": {
|
|
13
|
+
"description": "When enabled, details about the generation",
|
|
14
|
+
"title": "TextGenerationOutputDetails",
|
|
15
|
+
"allOf": [
|
|
16
|
+
{ "$ref": "#/$defs/SequenceDetails" },
|
|
17
|
+
{
|
|
18
|
+
"type": "object",
|
|
19
|
+
"properties": {
|
|
20
|
+
"best_of_sequences": {
|
|
21
|
+
"type": "array",
|
|
22
|
+
"description": "Details about additional sequences when best_of is provided",
|
|
23
|
+
"items": {
|
|
24
|
+
"allOf": [
|
|
25
|
+
{ "$ref": "#/$defs/SequenceDetails" },
|
|
26
|
+
{
|
|
27
|
+
"type": "object",
|
|
28
|
+
"properties": {
|
|
29
|
+
"generated_text": {
|
|
30
|
+
"type": "integer",
|
|
31
|
+
"description": "The generated text"
|
|
32
|
+
}
|
|
33
|
+
},
|
|
34
|
+
"required": ["generated_text"]
|
|
35
|
+
}
|
|
36
|
+
]
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
]
|
|
42
|
+
}
|
|
43
|
+
},
|
|
44
|
+
"required": ["generated_text"],
|
|
45
|
+
|
|
46
|
+
"$defs": {
|
|
47
|
+
"Token": {
|
|
48
|
+
"type": "object",
|
|
49
|
+
"title": "Token",
|
|
50
|
+
"properties": {
|
|
51
|
+
"id": {
|
|
52
|
+
"type": "integer"
|
|
53
|
+
},
|
|
54
|
+
"logprob": {
|
|
55
|
+
"type": "number"
|
|
56
|
+
},
|
|
57
|
+
"special": {
|
|
58
|
+
"type": "boolean",
|
|
59
|
+
"description": "Whether or not that token is a special one"
|
|
60
|
+
},
|
|
61
|
+
"text": {
|
|
62
|
+
"type": "string",
|
|
63
|
+
"description": "The text associated with that token"
|
|
64
|
+
}
|
|
65
|
+
},
|
|
66
|
+
"required": ["id", "logprob", "special", "text"]
|
|
67
|
+
},
|
|
68
|
+
"SequenceDetails": {
|
|
69
|
+
"type": "object",
|
|
70
|
+
"title": "TextGenerationSequenceDetails",
|
|
71
|
+
"properties": {
|
|
72
|
+
"finish_reason": {
|
|
73
|
+
"type": "string",
|
|
74
|
+
"description": "The reason why the generation was stopped.",
|
|
75
|
+
"oneOf": [
|
|
76
|
+
{ "const": "length", "description": "The generated sequence reached the maximum allowed length" },
|
|
77
|
+
{ "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
|
|
78
|
+
{ "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
|
|
79
|
+
]
|
|
80
|
+
},
|
|
81
|
+
"generated_tokens": {
|
|
82
|
+
"type": "integer",
|
|
83
|
+
"description": "The number of generated tokens"
|
|
84
|
+
},
|
|
85
|
+
"prefill": {
|
|
86
|
+
"type": "array",
|
|
87
|
+
"items": {
|
|
88
|
+
"title": "PrefillToken",
|
|
89
|
+
"type": "object",
|
|
90
|
+
"properties": {
|
|
91
|
+
"id": {
|
|
92
|
+
"type": "integer"
|
|
93
|
+
},
|
|
94
|
+
"logprob": {
|
|
95
|
+
"type": "number"
|
|
96
|
+
},
|
|
97
|
+
"text": {
|
|
98
|
+
"type": "string",
|
|
99
|
+
"description": "The text associated with that token"
|
|
100
|
+
}
|
|
101
|
+
},
|
|
102
|
+
"required": ["id", "logprob", "text"]
|
|
103
|
+
}
|
|
104
|
+
},
|
|
105
|
+
"seed": {
|
|
106
|
+
"type": "integer",
|
|
107
|
+
"description": "The random seed used for generation"
|
|
108
|
+
},
|
|
109
|
+
"tokens": {
|
|
110
|
+
"type": "array",
|
|
111
|
+
"description": "The generated tokens and associated details",
|
|
112
|
+
"items": {
|
|
113
|
+
"$ref": "#/$defs/Token"
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
},
|
|
117
|
+
"required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
|
|
118
|
+
}
|
|
16
119
|
}
|
|
17
120
|
}
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Using src/scripts/inference-codegen
|
|
5
5
|
*/
|
|
6
|
+
|
|
6
7
|
/**
|
|
7
8
|
* Inputs for Text To Audio inference
|
|
8
9
|
*/
|
|
@@ -10,13 +11,14 @@ export interface TextToAudioInput {
|
|
|
10
11
|
/**
|
|
11
12
|
* The input text data
|
|
12
13
|
*/
|
|
13
|
-
|
|
14
|
+
inputs: string;
|
|
14
15
|
/**
|
|
15
16
|
* Additional inference parameters
|
|
16
17
|
*/
|
|
17
18
|
parameters?: TextToAudioParameters;
|
|
18
19
|
[property: string]: unknown;
|
|
19
20
|
}
|
|
21
|
+
|
|
20
22
|
/**
|
|
21
23
|
* Additional inference parameters
|
|
22
24
|
*
|
|
@@ -29,6 +31,7 @@ export interface TextToAudioParameters {
|
|
|
29
31
|
generate?: GenerationParameters;
|
|
30
32
|
[property: string]: unknown;
|
|
31
33
|
}
|
|
34
|
+
|
|
32
35
|
/**
|
|
33
36
|
* Parametrization of the text generation process
|
|
34
37
|
*
|
|
@@ -38,18 +41,18 @@ export interface GenerationParameters {
|
|
|
38
41
|
/**
|
|
39
42
|
* Whether to use sampling instead of greedy decoding when generating new tokens.
|
|
40
43
|
*/
|
|
41
|
-
|
|
44
|
+
do_sample?: boolean;
|
|
42
45
|
/**
|
|
43
46
|
* Controls the stopping condition for beam-based methods.
|
|
44
47
|
*/
|
|
45
|
-
|
|
48
|
+
early_stopping?: EarlyStoppingUnion;
|
|
46
49
|
/**
|
|
47
50
|
* If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
48
51
|
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
49
52
|
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
50
53
|
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
51
54
|
*/
|
|
52
|
-
|
|
55
|
+
epsilon_cutoff?: number;
|
|
53
56
|
/**
|
|
54
57
|
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
55
58
|
* float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
@@ -59,37 +62,37 @@ export interface GenerationParameters {
|
|
|
59
62
|
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
60
63
|
* for more details.
|
|
61
64
|
*/
|
|
62
|
-
|
|
65
|
+
eta_cutoff?: number;
|
|
63
66
|
/**
|
|
64
67
|
* The maximum length (in tokens) of the generated text, including the input.
|
|
65
68
|
*/
|
|
66
|
-
|
|
69
|
+
max_length?: number;
|
|
67
70
|
/**
|
|
68
71
|
* The maximum number of tokens to generate. Takes precedence over maxLength.
|
|
69
72
|
*/
|
|
70
|
-
|
|
73
|
+
max_new_tokens?: number;
|
|
71
74
|
/**
|
|
72
75
|
* The minimum length (in tokens) of the generated text, including the input.
|
|
73
76
|
*/
|
|
74
|
-
|
|
77
|
+
min_length?: number;
|
|
75
78
|
/**
|
|
76
79
|
* The minimum number of tokens to generate. Takes precedence over maxLength.
|
|
77
80
|
*/
|
|
78
|
-
|
|
81
|
+
min_new_tokens?: number;
|
|
79
82
|
/**
|
|
80
83
|
* Number of groups to divide num_beams into in order to ensure diversity among different
|
|
81
84
|
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
82
85
|
*/
|
|
83
|
-
|
|
86
|
+
num_beam_groups?: number;
|
|
84
87
|
/**
|
|
85
88
|
* Number of beams to use for beam search.
|
|
86
89
|
*/
|
|
87
|
-
|
|
90
|
+
num_beams?: number;
|
|
88
91
|
/**
|
|
89
92
|
* The value balances the model confidence and the degeneration penalty in contrastive
|
|
90
93
|
* search decoding.
|
|
91
94
|
*/
|
|
92
|
-
|
|
95
|
+
penalty_alpha?: number;
|
|
93
96
|
/**
|
|
94
97
|
* The value used to modulate the next token probabilities.
|
|
95
98
|
*/
|
|
@@ -97,12 +100,12 @@ export interface GenerationParameters {
|
|
|
97
100
|
/**
|
|
98
101
|
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
99
102
|
*/
|
|
100
|
-
|
|
103
|
+
top_k?: number;
|
|
101
104
|
/**
|
|
102
105
|
* If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
103
106
|
* that add up to top_p or higher are kept for generation.
|
|
104
107
|
*/
|
|
105
|
-
|
|
108
|
+
top_p?: number;
|
|
106
109
|
/**
|
|
107
110
|
* Local typicality measures how similar the conditional probability of predicting a target
|
|
108
111
|
* token next is to the expected conditional probability of predicting a random token next,
|
|
@@ -110,29 +113,31 @@ export interface GenerationParameters {
|
|
|
110
113
|
* most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
111
114
|
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
112
115
|
*/
|
|
113
|
-
|
|
116
|
+
typical_p?: number;
|
|
114
117
|
/**
|
|
115
118
|
* Whether the model should use the past last key/values attentions to speed up decoding
|
|
116
119
|
*/
|
|
117
|
-
|
|
120
|
+
use_cache?: boolean;
|
|
118
121
|
[property: string]: unknown;
|
|
119
122
|
}
|
|
123
|
+
|
|
120
124
|
/**
|
|
121
125
|
* Controls the stopping condition for beam-based methods.
|
|
122
126
|
*/
|
|
123
127
|
export type EarlyStoppingUnion = boolean | "never";
|
|
124
|
-
|
|
128
|
+
|
|
125
129
|
/**
|
|
126
130
|
* Outputs of inference for the Text To Audio task
|
|
127
131
|
*/
|
|
128
|
-
export interface
|
|
132
|
+
export interface TextToAudioOutput {
|
|
129
133
|
/**
|
|
130
134
|
* The generated audio waveform.
|
|
131
135
|
*/
|
|
132
136
|
audio: unknown;
|
|
137
|
+
samplingRate: unknown;
|
|
133
138
|
/**
|
|
134
139
|
* The sampling rate of the generated audio waveform.
|
|
135
140
|
*/
|
|
136
|
-
|
|
141
|
+
sampling_rate?: number;
|
|
137
142
|
[property: string]: unknown;
|
|
138
143
|
}
|