@huggingface/tasks 0.2.2 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/README.md +1 -1
  2. package/dist/index.cjs +3136 -3085
  3. package/dist/index.d.ts +425 -64
  4. package/dist/index.js +3135 -3084
  5. package/package.json +1 -1
  6. package/src/index.ts +2 -5
  7. package/src/library-to-tasks.ts +1 -1
  8. package/src/model-libraries-downloads.ts +20 -0
  9. package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +46 -292
  10. package/src/model-libraries.ts +375 -44
  11. package/src/tasks/audio-classification/inference.ts +4 -4
  12. package/src/tasks/audio-classification/spec/input.json +4 -4
  13. package/src/tasks/audio-classification/spec/output.json +1 -12
  14. package/src/tasks/automatic-speech-recognition/inference.ts +35 -30
  15. package/src/tasks/automatic-speech-recognition/spec/input.json +3 -3
  16. package/src/tasks/automatic-speech-recognition/spec/output.json +30 -28
  17. package/src/tasks/common-definitions.json +25 -17
  18. package/src/tasks/depth-estimation/inference.ts +10 -10
  19. package/src/tasks/depth-estimation/spec/input.json +3 -8
  20. package/src/tasks/depth-estimation/spec/output.json +9 -3
  21. package/src/tasks/document-question-answering/inference.ts +16 -8
  22. package/src/tasks/document-question-answering/spec/input.json +9 -9
  23. package/src/tasks/document-question-answering/spec/output.json +2 -2
  24. package/src/tasks/feature-extraction/inference.ts +1 -1
  25. package/src/tasks/feature-extraction/spec/input.json +2 -2
  26. package/src/tasks/fill-mask/inference.ts +4 -3
  27. package/src/tasks/fill-mask/spec/input.json +3 -3
  28. package/src/tasks/fill-mask/spec/output.json +1 -1
  29. package/src/tasks/image-classification/inference.ts +3 -3
  30. package/src/tasks/image-classification/spec/input.json +4 -4
  31. package/src/tasks/image-segmentation/inference.ts +3 -3
  32. package/src/tasks/image-segmentation/spec/input.json +4 -4
  33. package/src/tasks/image-to-image/inference.ts +5 -5
  34. package/src/tasks/image-to-image/spec/input.json +9 -7
  35. package/src/tasks/image-to-text/inference.ts +25 -20
  36. package/src/tasks/image-to-text/spec/input.json +3 -3
  37. package/src/tasks/image-to-text/spec/output.json +8 -11
  38. package/src/tasks/object-detection/inference.ts +1 -1
  39. package/src/tasks/object-detection/spec/input.json +2 -2
  40. package/src/tasks/placeholder/spec/input.json +4 -4
  41. package/src/tasks/placeholder/spec/output.json +1 -1
  42. package/src/tasks/question-answering/inference.ts +8 -8
  43. package/src/tasks/question-answering/spec/input.json +9 -9
  44. package/src/tasks/sentence-similarity/inference.ts +1 -1
  45. package/src/tasks/sentence-similarity/spec/input.json +2 -2
  46. package/src/tasks/summarization/inference.ts +5 -4
  47. package/src/tasks/table-question-answering/inference.ts +1 -1
  48. package/src/tasks/table-question-answering/spec/input.json +8 -3
  49. package/src/tasks/text-classification/inference.ts +3 -3
  50. package/src/tasks/text-classification/spec/input.json +4 -4
  51. package/src/tasks/text-generation/inference.ts +123 -14
  52. package/src/tasks/text-generation/spec/input.json +28 -12
  53. package/src/tasks/text-generation/spec/output.json +112 -9
  54. package/src/tasks/text-to-audio/inference.ts +24 -19
  55. package/src/tasks/text-to-audio/spec/input.json +2 -2
  56. package/src/tasks/text-to-audio/spec/output.json +10 -13
  57. package/src/tasks/text-to-image/inference.ts +6 -8
  58. package/src/tasks/text-to-image/spec/input.json +9 -7
  59. package/src/tasks/text-to-image/spec/output.json +7 -9
  60. package/src/tasks/text-to-speech/inference.ts +18 -17
  61. package/src/tasks/text2text-generation/inference.ts +10 -8
  62. package/src/tasks/text2text-generation/spec/input.json +4 -4
  63. package/src/tasks/text2text-generation/spec/output.json +8 -11
  64. package/src/tasks/token-classification/inference.ts +4 -4
  65. package/src/tasks/token-classification/spec/input.json +4 -4
  66. package/src/tasks/token-classification/spec/output.json +1 -1
  67. package/src/tasks/translation/inference.ts +5 -4
  68. package/src/tasks/video-classification/inference.ts +5 -5
  69. package/src/tasks/video-classification/spec/input.json +6 -6
  70. package/src/tasks/visual-question-answering/inference.ts +2 -2
  71. package/src/tasks/visual-question-answering/spec/input.json +3 -3
  72. package/src/tasks/zero-shot-classification/inference.ts +3 -3
  73. package/src/tasks/zero-shot-classification/spec/input.json +4 -4
  74. package/src/tasks/zero-shot-image-classification/inference.ts +2 -2
  75. package/src/tasks/zero-shot-image-classification/spec/input.json +3 -3
  76. package/src/tasks/zero-shot-object-detection/inference.ts +1 -1
  77. package/src/tasks/zero-shot-object-detection/spec/input.json +2 -2
@@ -3,6 +3,7 @@
3
3
  *
4
4
  * Using src/scripts/inference-codegen
5
5
  */
6
+
6
7
  /**
7
8
  * Inputs for Text Generation inference
8
9
  */
@@ -10,13 +11,14 @@ export interface TextGenerationInput {
10
11
  /**
11
12
  * The text to initialize generation with
12
13
  */
13
- data: string;
14
+ inputs: string;
14
15
  /**
15
16
  * Additional inference parameters
16
17
  */
17
18
  parameters?: TextGenerationParameters;
18
19
  [property: string]: unknown;
19
20
  }
21
+
20
22
  /**
21
23
  * Additional inference parameters
22
24
  *
@@ -24,26 +26,43 @@ export interface TextGenerationInput {
24
26
  */
25
27
  export interface TextGenerationParameters {
26
28
  /**
27
- * Whether to use logit sampling (true) or greedy search (false).
29
+ * The number of sampling queries to run. Only the best one (in terms of total logprob) will
30
+ * be returned.
28
31
  */
29
- doSample?: boolean;
32
+ best_of?: number;
30
33
  /**
31
- * Maximum number of generated tokens.
34
+ * Whether or not to output decoder input details
32
35
  */
33
- maxNewTokens?: number;
36
+ decoder_input_details?: boolean;
37
+ /**
38
+ * Whether or not to output details
39
+ */
40
+ details?: boolean;
41
+ /**
42
+ * Whether to use logits sampling instead of greedy decoding when generating new tokens.
43
+ */
44
+ do_sample?: boolean;
45
+ /**
46
+ * The maximum number of tokens to generate.
47
+ */
48
+ max_new_tokens?: number;
34
49
  /**
35
50
  * The parameter for repetition penalty. A value of 1.0 means no penalty. See [this
36
51
  * paper](https://hf.co/papers/1909.05858) for more details.
37
52
  */
38
- repetitionPenalty?: number;
53
+ repetition_penalty?: number;
39
54
  /**
40
55
  * Whether to prepend the prompt to the generated text.
41
56
  */
42
- returnFullText?: boolean;
57
+ return_full_text?: boolean;
58
+ /**
59
+ * The random sampling seed.
60
+ */
61
+ seed?: number;
43
62
  /**
44
63
  * Stop generating tokens if a member of `stop_sequences` is generated.
45
64
  */
46
- stopSequences?: string[];
65
+ stop_sequences?: string[];
47
66
  /**
48
67
  * The value used to modulate the logits distribution.
49
68
  */
@@ -51,12 +70,12 @@ export interface TextGenerationParameters {
51
70
  /**
52
71
  * The number of highest probability vocabulary tokens to keep for top-k-filtering.
53
72
  */
54
- topK?: number;
73
+ top_k?: number;
55
74
  /**
56
75
  * If set to < 1, only the smallest set of most probable tokens with probabilities that add
57
76
  * up to `top_p` or higher are kept for generation.
58
77
  */
59
- topP?: number;
78
+ top_p?: number;
60
79
  /**
61
80
  * Truncate input tokens to the given size.
62
81
  */
@@ -65,21 +84,111 @@ export interface TextGenerationParameters {
65
84
  * Typical Decoding mass. See [Typical Decoding for Natural Language
66
85
  * Generation](https://hf.co/papers/2202.00666) for more information
67
86
  */
68
- typicalP?: number;
87
+ typical_p?: number;
69
88
  /**
70
89
  * Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)
71
90
  */
72
91
  watermark?: boolean;
73
92
  [property: string]: unknown;
74
93
  }
75
- export type TextGenerationOutput = TextGenerationOutputElement[];
94
+
76
95
  /**
77
96
  * Outputs for Text Generation inference
78
97
  */
79
- export interface TextGenerationOutputElement {
98
+ export interface TextGenerationOutput {
99
+ /**
100
+ * When enabled, details about the generation
101
+ */
102
+ details?: TextGenerationOutputDetails;
80
103
  /**
81
104
  * The generated text
82
105
  */
83
- generatedText: string;
106
+ generated_text: string;
107
+ [property: string]: unknown;
108
+ }
109
+
110
+ /**
111
+ * When enabled, details about the generation
112
+ */
113
+ export interface TextGenerationOutputDetails {
114
+ /**
115
+ * Details about additional sequences when best_of is provided
116
+ */
117
+ best_of_sequences?: TextGenerationSequenceDetails[];
118
+ /**
119
+ * The reason why the generation was stopped.
120
+ */
121
+ finish_reason: FinishReason;
122
+ /**
123
+ * The number of generated tokens
124
+ */
125
+ generated_tokens: number;
126
+ prefill: PrefillToken[];
127
+ /**
128
+ * The random seed used for generation
129
+ */
130
+ seed?: number;
131
+ /**
132
+ * The generated tokens and associated details
133
+ */
134
+ tokens: Token[];
135
+ [property: string]: unknown;
136
+ }
137
+
138
+ export interface TextGenerationSequenceDetails {
139
+ /**
140
+ * The reason why the generation was stopped.
141
+ */
142
+ finish_reason: FinishReason;
143
+ /**
144
+ * The generated text
145
+ */
146
+ generated_text: number;
147
+ /**
148
+ * The number of generated tokens
149
+ */
150
+ generated_tokens: number;
151
+ prefill: PrefillToken[];
152
+ /**
153
+ * The random seed used for generation
154
+ */
155
+ seed?: number;
156
+ /**
157
+ * The generated tokens and associated details
158
+ */
159
+ tokens: Token[];
160
+ [property: string]: unknown;
161
+ }
162
+
163
+ /**
164
+ * The generated sequence reached the maximum allowed length
165
+ *
166
+ * The model generated an end-of-sentence (EOS) token
167
+ *
168
+ * One of the sequence in stop_sequences was generated
169
+ */
170
+ export type FinishReason = "length" | "eos_token" | "stop_sequence";
171
+
172
+ export interface PrefillToken {
173
+ id: number;
174
+ logprob: number;
175
+ /**
176
+ * The text associated with that token
177
+ */
178
+ text: string;
179
+ [property: string]: unknown;
180
+ }
181
+
182
+ export interface Token {
183
+ id: number;
184
+ logprob: number;
185
+ /**
186
+ * Whether or not that token is a special one
187
+ */
188
+ special: boolean;
189
+ /**
190
+ * The text associated with that token
191
+ */
192
+ text: string;
84
193
  [property: string]: unknown;
85
194
  }
@@ -5,7 +5,7 @@
5
5
  "title": "TextGenerationInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "The text to initialize generation with",
10
10
  "type": "string"
11
11
  },
@@ -20,23 +20,39 @@
20
20
  "description": "Additional inference parameters for Text Generation",
21
21
  "type": "object",
22
22
  "properties": {
23
- "doSample": {
23
+ "best_of": {
24
+ "type": "integer",
25
+ "description": "The number of sampling queries to run. Only the best one (in terms of total logprob) will be returned."
26
+ },
27
+ "decoder_input_details": {
28
+ "type": "boolean",
29
+ "description": "Whether or not to output decoder input details"
30
+ },
31
+ "details": {
24
32
  "type": "boolean",
25
- "description": "Whether to use logit sampling (true) or greedy search (false)."
33
+ "description": "Whether or not to output details"
26
34
  },
27
- "maxNewTokens": {
35
+ "do_sample": {
36
+ "type": "boolean",
37
+ "description": "Whether to use logits sampling instead of greedy decoding when generating new tokens."
38
+ },
39
+ "max_new_tokens": {
28
40
  "type": "integer",
29
- "description": "Maximum number of generated tokens."
41
+ "description": "The maximum number of tokens to generate."
30
42
  },
31
- "repetitionPenalty": {
43
+ "repetition_penalty": {
32
44
  "type": "number",
33
45
  "description": "The parameter for repetition penalty. A value of 1.0 means no penalty. See [this paper](https://hf.co/papers/1909.05858) for more details."
34
46
  },
35
- "returnFullText": {
47
+ "return_full_text": {
36
48
  "type": "boolean",
37
49
  "description": "Whether to prepend the prompt to the generated text."
38
50
  },
39
- "stopSequences": {
51
+ "seed": {
52
+ "type": "integer",
53
+ "description": "The random sampling seed."
54
+ },
55
+ "stop_sequences": {
40
56
  "type": "array",
41
57
  "items": {
42
58
  "type": "string"
@@ -47,11 +63,11 @@
47
63
  "type": "number",
48
64
  "description": "The value used to modulate the logits distribution."
49
65
  },
50
- "topK": {
66
+ "top_k": {
51
67
  "type": "integer",
52
68
  "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
53
69
  },
54
- "topP": {
70
+ "top_p": {
55
71
  "type": "number",
56
72
  "description": "If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation."
57
73
  },
@@ -59,7 +75,7 @@
59
75
  "type": "integer",
60
76
  "description": "Truncate input tokens to the given size."
61
77
  },
62
- "typicalP": {
78
+ "typical_p": {
63
79
  "type": "number",
64
80
  "description": "Typical Decoding mass. See [Typical Decoding for Natural Language Generation](https://hf.co/papers/2202.00666) for more information"
65
81
  },
@@ -70,5 +86,5 @@
70
86
  }
71
87
  }
72
88
  },
73
- "required": ["data"]
89
+ "required": ["inputs"]
74
90
  }
@@ -3,15 +3,118 @@
3
3
  "$schema": "http://json-schema.org/draft-06/schema#",
4
4
  "description": "Outputs for Text Generation inference",
5
5
  "title": "TextGenerationOutput",
6
- "type": "array",
7
- "items": {
8
- "type": "object",
9
- "properties": {
10
- "generatedText": {
11
- "type": "string",
12
- "description": "The generated text"
13
- }
6
+ "type": "object",
7
+ "properties": {
8
+ "generated_text": {
9
+ "type": "string",
10
+ "description": "The generated text"
14
11
  },
15
- "required": ["generatedText"]
12
+ "details": {
13
+ "description": "When enabled, details about the generation",
14
+ "title": "TextGenerationOutputDetails",
15
+ "allOf": [
16
+ { "$ref": "#/$defs/SequenceDetails" },
17
+ {
18
+ "type": "object",
19
+ "properties": {
20
+ "best_of_sequences": {
21
+ "type": "array",
22
+ "description": "Details about additional sequences when best_of is provided",
23
+ "items": {
24
+ "allOf": [
25
+ { "$ref": "#/$defs/SequenceDetails" },
26
+ {
27
+ "type": "object",
28
+ "properties": {
29
+ "generated_text": {
30
+ "type": "integer",
31
+ "description": "The generated text"
32
+ }
33
+ },
34
+ "required": ["generated_text"]
35
+ }
36
+ ]
37
+ }
38
+ }
39
+ }
40
+ }
41
+ ]
42
+ }
43
+ },
44
+ "required": ["generated_text"],
45
+
46
+ "$defs": {
47
+ "Token": {
48
+ "type": "object",
49
+ "title": "Token",
50
+ "properties": {
51
+ "id": {
52
+ "type": "integer"
53
+ },
54
+ "logprob": {
55
+ "type": "number"
56
+ },
57
+ "special": {
58
+ "type": "boolean",
59
+ "description": "Whether or not that token is a special one"
60
+ },
61
+ "text": {
62
+ "type": "string",
63
+ "description": "The text associated with that token"
64
+ }
65
+ },
66
+ "required": ["id", "logprob", "special", "text"]
67
+ },
68
+ "SequenceDetails": {
69
+ "type": "object",
70
+ "title": "TextGenerationSequenceDetails",
71
+ "properties": {
72
+ "finish_reason": {
73
+ "type": "string",
74
+ "description": "The reason why the generation was stopped.",
75
+ "oneOf": [
76
+ { "const": "length", "description": "The generated sequence reached the maximum allowed length" },
77
+ { "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
78
+ { "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
79
+ ]
80
+ },
81
+ "generated_tokens": {
82
+ "type": "integer",
83
+ "description": "The number of generated tokens"
84
+ },
85
+ "prefill": {
86
+ "type": "array",
87
+ "items": {
88
+ "title": "PrefillToken",
89
+ "type": "object",
90
+ "properties": {
91
+ "id": {
92
+ "type": "integer"
93
+ },
94
+ "logprob": {
95
+ "type": "number"
96
+ },
97
+ "text": {
98
+ "type": "string",
99
+ "description": "The text associated with that token"
100
+ }
101
+ },
102
+ "required": ["id", "logprob", "text"]
103
+ }
104
+ },
105
+ "seed": {
106
+ "type": "integer",
107
+ "description": "The random seed used for generation"
108
+ },
109
+ "tokens": {
110
+ "type": "array",
111
+ "description": "The generated tokens and associated details",
112
+ "items": {
113
+ "$ref": "#/$defs/Token"
114
+ }
115
+ }
116
+ },
117
+ "required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
118
+ }
16
119
  }
17
120
  }
@@ -3,6 +3,7 @@
3
3
  *
4
4
  * Using src/scripts/inference-codegen
5
5
  */
6
+
6
7
  /**
7
8
  * Inputs for Text To Audio inference
8
9
  */
@@ -10,13 +11,14 @@ export interface TextToAudioInput {
10
11
  /**
11
12
  * The input text data
12
13
  */
13
- data: string;
14
+ inputs: string;
14
15
  /**
15
16
  * Additional inference parameters
16
17
  */
17
18
  parameters?: TextToAudioParameters;
18
19
  [property: string]: unknown;
19
20
  }
21
+
20
22
  /**
21
23
  * Additional inference parameters
22
24
  *
@@ -29,6 +31,7 @@ export interface TextToAudioParameters {
29
31
  generate?: GenerationParameters;
30
32
  [property: string]: unknown;
31
33
  }
34
+
32
35
  /**
33
36
  * Parametrization of the text generation process
34
37
  *
@@ -38,18 +41,18 @@ export interface GenerationParameters {
38
41
  /**
39
42
  * Whether to use sampling instead of greedy decoding when generating new tokens.
40
43
  */
41
- doSample?: boolean;
44
+ do_sample?: boolean;
42
45
  /**
43
46
  * Controls the stopping condition for beam-based methods.
44
47
  */
45
- earlyStopping?: EarlyStoppingUnion;
48
+ early_stopping?: EarlyStoppingUnion;
46
49
  /**
47
50
  * If set to float strictly between 0 and 1, only tokens with a conditional probability
48
51
  * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
49
52
  * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
50
53
  * Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
51
54
  */
52
- epsilonCutoff?: number;
55
+ epsilon_cutoff?: number;
53
56
  /**
54
57
  * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
55
58
  * float strictly between 0 and 1, a token is only considered if it is greater than either
@@ -59,37 +62,37 @@ export interface GenerationParameters {
59
62
  * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
60
63
  * for more details.
61
64
  */
62
- etaCutoff?: number;
65
+ eta_cutoff?: number;
63
66
  /**
64
67
  * The maximum length (in tokens) of the generated text, including the input.
65
68
  */
66
- maxLength?: number;
69
+ max_length?: number;
67
70
  /**
68
71
  * The maximum number of tokens to generate. Takes precedence over maxLength.
69
72
  */
70
- maxNewTokens?: number;
73
+ max_new_tokens?: number;
71
74
  /**
72
75
  * The minimum length (in tokens) of the generated text, including the input.
73
76
  */
74
- minLength?: number;
77
+ min_length?: number;
75
78
  /**
76
79
  * The minimum number of tokens to generate. Takes precedence over maxLength.
77
80
  */
78
- minNewTokens?: number;
81
+ min_new_tokens?: number;
79
82
  /**
80
83
  * Number of groups to divide num_beams into in order to ensure diversity among different
81
84
  * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
82
85
  */
83
- numBeamGroups?: number;
86
+ num_beam_groups?: number;
84
87
  /**
85
88
  * Number of beams to use for beam search.
86
89
  */
87
- numBeams?: number;
90
+ num_beams?: number;
88
91
  /**
89
92
  * The value balances the model confidence and the degeneration penalty in contrastive
90
93
  * search decoding.
91
94
  */
92
- penaltyAlpha?: number;
95
+ penalty_alpha?: number;
93
96
  /**
94
97
  * The value used to modulate the next token probabilities.
95
98
  */
@@ -97,12 +100,12 @@ export interface GenerationParameters {
97
100
  /**
98
101
  * The number of highest probability vocabulary tokens to keep for top-k-filtering.
99
102
  */
100
- topK?: number;
103
+ top_k?: number;
101
104
  /**
102
105
  * If set to float < 1, only the smallest set of most probable tokens with probabilities
103
106
  * that add up to top_p or higher are kept for generation.
104
107
  */
105
- topP?: number;
108
+ top_p?: number;
106
109
  /**
107
110
  * Local typicality measures how similar the conditional probability of predicting a target
108
111
  * token next is to the expected conditional probability of predicting a random token next,
@@ -110,29 +113,31 @@ export interface GenerationParameters {
110
113
  * most locally typical tokens with probabilities that add up to typical_p or higher are
111
114
  * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
112
115
  */
113
- typicalP?: number;
116
+ typical_p?: number;
114
117
  /**
115
118
  * Whether the model should use the past last key/values attentions to speed up decoding
116
119
  */
117
- useCache?: boolean;
120
+ use_cache?: boolean;
118
121
  [property: string]: unknown;
119
122
  }
123
+
120
124
  /**
121
125
  * Controls the stopping condition for beam-based methods.
122
126
  */
123
127
  export type EarlyStoppingUnion = boolean | "never";
124
- export type TextToAudioOutput = TextToAudioOutputElement[];
128
+
125
129
  /**
126
130
  * Outputs of inference for the Text To Audio task
127
131
  */
128
- export interface TextToAudioOutputElement {
132
+ export interface TextToAudioOutput {
129
133
  /**
130
134
  * The generated audio waveform.
131
135
  */
132
136
  audio: unknown;
137
+ samplingRate: unknown;
133
138
  /**
134
139
  * The sampling rate of the generated audio waveform.
135
140
  */
136
- samplingRate: number;
141
+ sampling_rate?: number;
137
142
  [property: string]: unknown;
138
143
  }
@@ -5,7 +5,7 @@
5
5
  "title": "TextToAudioInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "The input text data",
10
10
  "type": "string"
11
11
  },
@@ -27,5 +27,5 @@
27
27
  }
28
28
  }
29
29
  },
30
- "required": ["data"]
30
+ "required": ["inputs"]
31
31
  }
@@ -3,18 +3,15 @@
3
3
  "$schema": "http://json-schema.org/draft-06/schema#",
4
4
  "description": "Outputs of inference for the Text To Audio task",
5
5
  "title": "TextToAudioOutput",
6
- "type": "array",
7
- "items": {
8
- "type": "object",
9
- "properties": {
10
- "audio": {
11
- "description": "The generated audio waveform."
12
- },
13
- "samplingRate": {
14
- "type": "number",
15
- "description": "The sampling rate of the generated audio waveform."
16
- }
6
+ "type": "object",
7
+ "properties": {
8
+ "audio": {
9
+ "description": "The generated audio waveform."
17
10
  },
18
- "required": ["audio", "samplingRate"]
19
- }
11
+ "sampling_rate": {
12
+ "type": "number",
13
+ "description": "The sampling rate of the generated audio waveform."
14
+ }
15
+ },
16
+ "required": ["audio", "samplingRate"]
20
17
  }
@@ -11,7 +11,7 @@ export interface TextToImageInput {
11
11
  /**
12
12
  * The input text data (sometimes called "prompt"
13
13
  */
14
- data: string;
14
+ inputs: string;
15
15
  /**
16
16
  * Additional inference parameters
17
17
  */
@@ -29,16 +29,16 @@ export interface TextToImageParameters {
29
29
  * For diffusion models. A higher guidance scale value encourages the model to generate
30
30
  * images closely linked to the text prompt at the expense of lower image quality.
31
31
  */
32
- guidanceScale?: number;
32
+ guidance_scale?: number;
33
33
  /**
34
34
  * One or several prompt to guide what NOT to include in image generation.
35
35
  */
36
- negativePrompt?: string[];
36
+ negative_prompt?: string[];
37
37
  /**
38
38
  * For diffusion models. The number of denoising steps. More denoising steps usually lead to
39
39
  * a higher quality image at the expense of slower inference.
40
40
  */
41
- numInferenceSteps?: number;
41
+ num_inference_steps?: number;
42
42
  /**
43
43
  * For diffusion models. Override the scheduler with a compatible one
44
44
  */
@@ -46,7 +46,7 @@ export interface TextToImageParameters {
46
46
  /**
47
47
  * The size in pixel of the output image
48
48
  */
49
- targetSize?: TargetSize;
49
+ target_size?: TargetSize;
50
50
  [property: string]: unknown;
51
51
  }
52
52
 
@@ -62,9 +62,7 @@ export interface TargetSize {
62
62
  /**
63
63
  * Outputs of inference for the Text To Image task
64
64
  */
65
- export type TextToImageOutput = unknown[] | boolean | number | number | null | TextToImageOutputObject | string;
66
-
67
- export interface TextToImageOutputObject {
65
+ export interface TextToImageOutput {
68
66
  /**
69
67
  * The generated image
70
68
  */