@huggingface/tasks 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/README.md +1 -1
  2. package/dist/{index.mjs → index.cjs} +2695 -2497
  3. package/dist/index.d.ts +427 -65
  4. package/dist/index.js +2660 -2532
  5. package/package.json +13 -8
  6. package/src/index.ts +2 -5
  7. package/src/library-to-tasks.ts +1 -1
  8. package/src/model-data.ts +1 -1
  9. package/src/model-libraries-downloads.ts +20 -0
  10. package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +50 -296
  11. package/src/model-libraries.ts +375 -44
  12. package/src/pipelines.ts +1 -1
  13. package/src/tasks/audio-classification/about.md +1 -1
  14. package/src/tasks/audio-classification/inference.ts +51 -0
  15. package/src/tasks/audio-classification/spec/input.json +34 -0
  16. package/src/tasks/audio-classification/spec/output.json +10 -0
  17. package/src/tasks/audio-to-audio/about.md +1 -1
  18. package/src/tasks/automatic-speech-recognition/about.md +4 -2
  19. package/src/tasks/automatic-speech-recognition/inference.ts +159 -0
  20. package/src/tasks/automatic-speech-recognition/spec/input.json +34 -0
  21. package/src/tasks/automatic-speech-recognition/spec/output.json +38 -0
  22. package/src/tasks/common-definitions.json +117 -0
  23. package/src/tasks/depth-estimation/data.ts +8 -4
  24. package/src/tasks/depth-estimation/inference.ts +35 -0
  25. package/src/tasks/depth-estimation/spec/input.json +25 -0
  26. package/src/tasks/depth-estimation/spec/output.json +16 -0
  27. package/src/tasks/document-question-answering/inference.ts +110 -0
  28. package/src/tasks/document-question-answering/spec/input.json +85 -0
  29. package/src/tasks/document-question-answering/spec/output.json +36 -0
  30. package/src/tasks/feature-extraction/inference.ts +22 -0
  31. package/src/tasks/feature-extraction/spec/input.json +26 -0
  32. package/src/tasks/feature-extraction/spec/output.json +7 -0
  33. package/src/tasks/fill-mask/inference.ts +62 -0
  34. package/src/tasks/fill-mask/spec/input.json +38 -0
  35. package/src/tasks/fill-mask/spec/output.json +29 -0
  36. package/src/tasks/image-classification/inference.ts +51 -0
  37. package/src/tasks/image-classification/spec/input.json +34 -0
  38. package/src/tasks/image-classification/spec/output.json +10 -0
  39. package/src/tasks/image-segmentation/inference.ts +65 -0
  40. package/src/tasks/image-segmentation/spec/input.json +54 -0
  41. package/src/tasks/image-segmentation/spec/output.json +25 -0
  42. package/src/tasks/image-to-image/inference.ts +67 -0
  43. package/src/tasks/image-to-image/spec/input.json +54 -0
  44. package/src/tasks/image-to-image/spec/output.json +12 -0
  45. package/src/tasks/image-to-text/inference.ts +143 -0
  46. package/src/tasks/image-to-text/spec/input.json +34 -0
  47. package/src/tasks/image-to-text/spec/output.json +14 -0
  48. package/src/tasks/index.ts +5 -2
  49. package/src/tasks/mask-generation/about.md +65 -0
  50. package/src/tasks/mask-generation/data.ts +42 -5
  51. package/src/tasks/object-detection/inference.ts +62 -0
  52. package/src/tasks/object-detection/spec/input.json +30 -0
  53. package/src/tasks/object-detection/spec/output.json +46 -0
  54. package/src/tasks/placeholder/data.ts +3 -0
  55. package/src/tasks/placeholder/spec/input.json +35 -0
  56. package/src/tasks/placeholder/spec/output.json +17 -0
  57. package/src/tasks/question-answering/inference.ts +99 -0
  58. package/src/tasks/question-answering/spec/input.json +67 -0
  59. package/src/tasks/question-answering/spec/output.json +29 -0
  60. package/src/tasks/sentence-similarity/about.md +2 -2
  61. package/src/tasks/sentence-similarity/inference.ts +32 -0
  62. package/src/tasks/sentence-similarity/spec/input.json +40 -0
  63. package/src/tasks/sentence-similarity/spec/output.json +12 -0
  64. package/src/tasks/summarization/data.ts +1 -0
  65. package/src/tasks/summarization/inference.ts +59 -0
  66. package/src/tasks/summarization/spec/input.json +7 -0
  67. package/src/tasks/summarization/spec/output.json +7 -0
  68. package/src/tasks/table-question-answering/inference.ts +61 -0
  69. package/src/tasks/table-question-answering/spec/input.json +44 -0
  70. package/src/tasks/table-question-answering/spec/output.json +40 -0
  71. package/src/tasks/tabular-classification/about.md +1 -1
  72. package/src/tasks/tabular-regression/about.md +1 -1
  73. package/src/tasks/text-classification/about.md +1 -0
  74. package/src/tasks/text-classification/inference.ts +51 -0
  75. package/src/tasks/text-classification/spec/input.json +35 -0
  76. package/src/tasks/text-classification/spec/output.json +10 -0
  77. package/src/tasks/text-generation/about.md +24 -13
  78. package/src/tasks/text-generation/data.ts +22 -38
  79. package/src/tasks/text-generation/inference.ts +194 -0
  80. package/src/tasks/text-generation/spec/input.json +90 -0
  81. package/src/tasks/text-generation/spec/output.json +120 -0
  82. package/src/tasks/text-to-audio/inference.ts +143 -0
  83. package/src/tasks/text-to-audio/spec/input.json +31 -0
  84. package/src/tasks/text-to-audio/spec/output.json +17 -0
  85. package/src/tasks/text-to-image/about.md +11 -2
  86. package/src/tasks/text-to-image/data.ts +6 -2
  87. package/src/tasks/text-to-image/inference.ts +71 -0
  88. package/src/tasks/text-to-image/spec/input.json +59 -0
  89. package/src/tasks/text-to-image/spec/output.json +13 -0
  90. package/src/tasks/text-to-speech/about.md +4 -2
  91. package/src/tasks/text-to-speech/data.ts +1 -0
  92. package/src/tasks/text-to-speech/inference.ts +147 -0
  93. package/src/tasks/text-to-speech/spec/input.json +7 -0
  94. package/src/tasks/text-to-speech/spec/output.json +7 -0
  95. package/src/tasks/text2text-generation/inference.ts +55 -0
  96. package/src/tasks/text2text-generation/spec/input.json +55 -0
  97. package/src/tasks/text2text-generation/spec/output.json +14 -0
  98. package/src/tasks/token-classification/inference.ts +82 -0
  99. package/src/tasks/token-classification/spec/input.json +65 -0
  100. package/src/tasks/token-classification/spec/output.json +33 -0
  101. package/src/tasks/translation/data.ts +1 -0
  102. package/src/tasks/translation/inference.ts +59 -0
  103. package/src/tasks/translation/spec/input.json +7 -0
  104. package/src/tasks/translation/spec/output.json +7 -0
  105. package/src/tasks/video-classification/inference.ts +59 -0
  106. package/src/tasks/video-classification/spec/input.json +42 -0
  107. package/src/tasks/video-classification/spec/output.json +10 -0
  108. package/src/tasks/visual-question-answering/inference.ts +63 -0
  109. package/src/tasks/visual-question-answering/spec/input.json +41 -0
  110. package/src/tasks/visual-question-answering/spec/output.json +21 -0
  111. package/src/tasks/zero-shot-classification/inference.ts +67 -0
  112. package/src/tasks/zero-shot-classification/spec/input.json +50 -0
  113. package/src/tasks/zero-shot-classification/spec/output.json +10 -0
  114. package/src/tasks/zero-shot-image-classification/data.ts +8 -5
  115. package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
  116. package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
  117. package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
  118. package/src/tasks/zero-shot-object-detection/about.md +6 -0
  119. package/src/tasks/zero-shot-object-detection/data.ts +6 -1
  120. package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
  121. package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
  122. package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
  123. package/tsconfig.json +3 -3
@@ -12,12 +12,12 @@ const taskData: TaskDataCustom = {
12
12
  id: "the_pile",
13
13
  },
14
14
  {
15
- description: "A crowd-sourced instruction dataset to develop an AI assistant.",
16
- id: "OpenAssistant/oasst1",
15
+ description: "Truly open-source, curated and cleaned dialogue dataset.",
16
+ id: "HuggingFaceH4/ultrachat_200k",
17
17
  },
18
18
  {
19
- description: "A crowd-sourced instruction dataset created by Databricks employees.",
20
- id: "databricks/databricks-dolly-15k",
19
+ description: "An instruction dataset with preference ratings on responses.",
20
+ id: "openbmb/UltraFeedback",
21
21
  },
22
22
  ],
23
23
  demo: {
@@ -59,66 +59,50 @@ const taskData: TaskDataCustom = {
59
59
  id: "bigcode/starcoder",
60
60
  },
61
61
  {
62
- description: "A model trained to follow instructions, uses Pythia-12b as base model.",
63
- id: "databricks/dolly-v2-12b",
62
+ description: "A very powerful text generation model.",
63
+ id: "mistralai/Mixtral-8x7B-Instruct-v0.1",
64
64
  },
65
65
  {
66
- description: "A model trained to follow instructions curated by community, uses Pythia-12b as base model.",
67
- id: "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
66
+ description: "Small yet powerful text generation model.",
67
+ id: "microsoft/phi-2",
68
68
  },
69
69
  {
70
- description: "A large language model trained to generate text in English.",
71
- id: "stabilityai/stablelm-tuned-alpha-7b",
70
+ description: "A very powerful model that can chat, do mathematical reasoning and write code.",
71
+ id: "openchat/openchat-3.5-0106",
72
72
  },
73
73
  {
74
- description: "A model trained to follow instructions, based on mosaicml/mpt-7b.",
75
- id: "mosaicml/mpt-7b-instruct",
74
+ description: "Very strong yet small assistant model.",
75
+ id: "HuggingFaceH4/zephyr-7b-beta",
76
76
  },
77
77
  {
78
- description: "A large language model trained to generate text in English.",
79
- id: "EleutherAI/pythia-12b",
80
- },
81
- {
82
- description: "A large text-to-text model trained to follow instructions.",
83
- id: "google/flan-ul2",
84
- },
85
- {
86
- description: "A large and powerful text generation model.",
87
- id: "tiiuae/falcon-40b",
88
- },
89
- {
90
- description: "State-of-the-art open-source large language model.",
78
+ description: "Very strong open-source large language model.",
91
79
  id: "meta-llama/Llama-2-70b-hf",
92
80
  },
93
81
  ],
94
82
  spaces: [
95
83
  {
96
- description: "A robust text generation model that can perform various tasks through natural language prompting.",
97
- id: "huggingface/bloom_demo",
84
+ description: "A leaderboard to compare different open-source text generation models based on various benchmarks.",
85
+ id: "HuggingFaceH4/open_llm_leaderboard",
98
86
  },
99
87
  {
100
- description: "An text generation based application that can write code for 80+ languages.",
101
- id: "bigcode/bigcode-playground",
88
+ description: "An text generation based application based on a very powerful LLaMA2 model.",
89
+ id: "ysharma/Explore_llamav2_with_TGI",
102
90
  },
103
91
  {
104
- description: "An text generation based application for conversations.",
105
- id: "h2oai/h2ogpt-chatbot",
92
+ description: "An text generation based application to converse with Zephyr model.",
93
+ id: "HuggingFaceH4/zephyr-chat",
106
94
  },
107
95
  {
108
96
  description: "An text generation application that combines OpenAI and Hugging Face models.",
109
97
  id: "microsoft/HuggingGPT",
110
98
  },
111
99
  {
112
- description: "An text generation application that uses StableLM-tuned-alpha-7b.",
113
- id: "stabilityai/stablelm-tuned-alpha-chat",
114
- },
115
- {
116
- description: "An UI that uses StableLM-tuned-alpha-7b.",
117
- id: "togethercomputer/OpenChatKit",
100
+ description: "An chatbot to converse with a very powerful text generation model.",
101
+ id: "mlabonne/phixtral-chat",
118
102
  },
119
103
  ],
120
104
  summary:
121
- "Generating text is the task of producing new text. These models can, for example, fill in incomplete text or paraphrase.",
105
+ "Generating text is the task of generating new text given another text. These models can, for example, fill in incomplete text or paraphrase.",
122
106
  widgetModels: ["HuggingFaceH4/zephyr-7b-beta"],
123
107
  youtubeId: "Vpjb1lu0MDk",
124
108
  };
@@ -0,0 +1,194 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+
7
+ /**
8
+ * Inputs for Text Generation inference
9
+ */
10
+ export interface TextGenerationInput {
11
+ /**
12
+ * The text to initialize generation with
13
+ */
14
+ inputs: string;
15
+ /**
16
+ * Additional inference parameters
17
+ */
18
+ parameters?: TextGenerationParameters;
19
+ [property: string]: unknown;
20
+ }
21
+
22
+ /**
23
+ * Additional inference parameters
24
+ *
25
+ * Additional inference parameters for Text Generation
26
+ */
27
+ export interface TextGenerationParameters {
28
+ /**
29
+ * The number of sampling queries to run. Only the best one (in terms of total logprob) will
30
+ * be returned.
31
+ */
32
+ best_of?: number;
33
+ /**
34
+ * Whether or not to output decoder input details
35
+ */
36
+ decoder_input_details?: boolean;
37
+ /**
38
+ * Whether or not to output details
39
+ */
40
+ details?: boolean;
41
+ /**
42
+ * Whether to use logits sampling instead of greedy decoding when generating new tokens.
43
+ */
44
+ do_sample?: boolean;
45
+ /**
46
+ * The maximum number of tokens to generate.
47
+ */
48
+ max_new_tokens?: number;
49
+ /**
50
+ * The parameter for repetition penalty. A value of 1.0 means no penalty. See [this
51
+ * paper](https://hf.co/papers/1909.05858) for more details.
52
+ */
53
+ repetition_penalty?: number;
54
+ /**
55
+ * Whether to prepend the prompt to the generated text.
56
+ */
57
+ return_full_text?: boolean;
58
+ /**
59
+ * The random sampling seed.
60
+ */
61
+ seed?: number;
62
+ /**
63
+ * Stop generating tokens if a member of `stop_sequences` is generated.
64
+ */
65
+ stop_sequences?: string[];
66
+ /**
67
+ * The value used to modulate the logits distribution.
68
+ */
69
+ temperature?: number;
70
+ /**
71
+ * The number of highest probability vocabulary tokens to keep for top-k-filtering.
72
+ */
73
+ top_k?: number;
74
+ /**
75
+ * If set to < 1, only the smallest set of most probable tokens with probabilities that add
76
+ * up to `top_p` or higher are kept for generation.
77
+ */
78
+ top_p?: number;
79
+ /**
80
+ * Truncate input tokens to the given size.
81
+ */
82
+ truncate?: number;
83
+ /**
84
+ * Typical Decoding mass. See [Typical Decoding for Natural Language
85
+ * Generation](https://hf.co/papers/2202.00666) for more information
86
+ */
87
+ typical_p?: number;
88
+ /**
89
+ * Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)
90
+ */
91
+ watermark?: boolean;
92
+ [property: string]: unknown;
93
+ }
94
+
95
+ /**
96
+ * Outputs for Text Generation inference
97
+ */
98
+ export interface TextGenerationOutput {
99
+ /**
100
+ * When enabled, details about the generation
101
+ */
102
+ details?: TextGenerationOutputDetails;
103
+ /**
104
+ * The generated text
105
+ */
106
+ generated_text: string;
107
+ [property: string]: unknown;
108
+ }
109
+
110
+ /**
111
+ * When enabled, details about the generation
112
+ */
113
+ export interface TextGenerationOutputDetails {
114
+ /**
115
+ * Details about additional sequences when best_of is provided
116
+ */
117
+ best_of_sequences?: TextGenerationSequenceDetails[];
118
+ /**
119
+ * The reason why the generation was stopped.
120
+ */
121
+ finish_reason: FinishReason;
122
+ /**
123
+ * The number of generated tokens
124
+ */
125
+ generated_tokens: number;
126
+ prefill: PrefillToken[];
127
+ /**
128
+ * The random seed used for generation
129
+ */
130
+ seed?: number;
131
+ /**
132
+ * The generated tokens and associated details
133
+ */
134
+ tokens: Token[];
135
+ [property: string]: unknown;
136
+ }
137
+
138
+ export interface TextGenerationSequenceDetails {
139
+ /**
140
+ * The reason why the generation was stopped.
141
+ */
142
+ finish_reason: FinishReason;
143
+ /**
144
+ * The generated text
145
+ */
146
+ generated_text: number;
147
+ /**
148
+ * The number of generated tokens
149
+ */
150
+ generated_tokens: number;
151
+ prefill: PrefillToken[];
152
+ /**
153
+ * The random seed used for generation
154
+ */
155
+ seed?: number;
156
+ /**
157
+ * The generated tokens and associated details
158
+ */
159
+ tokens: Token[];
160
+ [property: string]: unknown;
161
+ }
162
+
163
+ /**
164
+ * The generated sequence reached the maximum allowed length
165
+ *
166
+ * The model generated an end-of-sentence (EOS) token
167
+ *
168
+ * One of the sequence in stop_sequences was generated
169
+ */
170
+ export type FinishReason = "length" | "eos_token" | "stop_sequence";
171
+
172
+ export interface PrefillToken {
173
+ id: number;
174
+ logprob: number;
175
+ /**
176
+ * The text associated with that token
177
+ */
178
+ text: string;
179
+ [property: string]: unknown;
180
+ }
181
+
182
+ export interface Token {
183
+ id: number;
184
+ logprob: number;
185
+ /**
186
+ * Whether or not that token is a special one
187
+ */
188
+ special: boolean;
189
+ /**
190
+ * The text associated with that token
191
+ */
192
+ text: string;
193
+ [property: string]: unknown;
194
+ }
@@ -0,0 +1,90 @@
1
+ {
2
+ "$id": "/inference/schemas/text-generation/input.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Inputs for Text Generation inference",
5
+ "title": "TextGenerationInput",
6
+ "type": "object",
7
+ "properties": {
8
+ "inputs": {
9
+ "description": "The text to initialize generation with",
10
+ "type": "string"
11
+ },
12
+ "parameters": {
13
+ "description": "Additional inference parameters",
14
+ "$ref": "#/$defs/TextGenerationParameters"
15
+ }
16
+ },
17
+ "$defs": {
18
+ "TextGenerationParameters": {
19
+ "title": "TextGenerationParameters",
20
+ "description": "Additional inference parameters for Text Generation",
21
+ "type": "object",
22
+ "properties": {
23
+ "best_of": {
24
+ "type": "integer",
25
+ "description": "The number of sampling queries to run. Only the best one (in terms of total logprob) will be returned."
26
+ },
27
+ "decoder_input_details": {
28
+ "type": "boolean",
29
+ "description": "Whether or not to output decoder input details"
30
+ },
31
+ "details": {
32
+ "type": "boolean",
33
+ "description": "Whether or not to output details"
34
+ },
35
+ "do_sample": {
36
+ "type": "boolean",
37
+ "description": "Whether to use logits sampling instead of greedy decoding when generating new tokens."
38
+ },
39
+ "max_new_tokens": {
40
+ "type": "integer",
41
+ "description": "The maximum number of tokens to generate."
42
+ },
43
+ "repetition_penalty": {
44
+ "type": "number",
45
+ "description": "The parameter for repetition penalty. A value of 1.0 means no penalty. See [this paper](https://hf.co/papers/1909.05858) for more details."
46
+ },
47
+ "return_full_text": {
48
+ "type": "boolean",
49
+ "description": "Whether to prepend the prompt to the generated text."
50
+ },
51
+ "seed": {
52
+ "type": "integer",
53
+ "description": "The random sampling seed."
54
+ },
55
+ "stop_sequences": {
56
+ "type": "array",
57
+ "items": {
58
+ "type": "string"
59
+ },
60
+ "description": "Stop generating tokens if a member of `stop_sequences` is generated."
61
+ },
62
+ "temperature": {
63
+ "type": "number",
64
+ "description": "The value used to modulate the logits distribution."
65
+ },
66
+ "top_k": {
67
+ "type": "integer",
68
+ "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
69
+ },
70
+ "top_p": {
71
+ "type": "number",
72
+ "description": "If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation."
73
+ },
74
+ "truncate": {
75
+ "type": "integer",
76
+ "description": "Truncate input tokens to the given size."
77
+ },
78
+ "typical_p": {
79
+ "type": "number",
80
+ "description": "Typical Decoding mass. See [Typical Decoding for Natural Language Generation](https://hf.co/papers/2202.00666) for more information"
81
+ },
82
+ "watermark": {
83
+ "type": "boolean",
84
+ "description": "Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)"
85
+ }
86
+ }
87
+ }
88
+ },
89
+ "required": ["inputs"]
90
+ }
@@ -0,0 +1,120 @@
1
+ {
2
+ "$id": "/inference/schemas/text-generation/output.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Outputs for Text Generation inference",
5
+ "title": "TextGenerationOutput",
6
+ "type": "object",
7
+ "properties": {
8
+ "generated_text": {
9
+ "type": "string",
10
+ "description": "The generated text"
11
+ },
12
+ "details": {
13
+ "description": "When enabled, details about the generation",
14
+ "title": "TextGenerationOutputDetails",
15
+ "allOf": [
16
+ { "$ref": "#/$defs/SequenceDetails" },
17
+ {
18
+ "type": "object",
19
+ "properties": {
20
+ "best_of_sequences": {
21
+ "type": "array",
22
+ "description": "Details about additional sequences when best_of is provided",
23
+ "items": {
24
+ "allOf": [
25
+ { "$ref": "#/$defs/SequenceDetails" },
26
+ {
27
+ "type": "object",
28
+ "properties": {
29
+ "generated_text": {
30
+ "type": "integer",
31
+ "description": "The generated text"
32
+ }
33
+ },
34
+ "required": ["generated_text"]
35
+ }
36
+ ]
37
+ }
38
+ }
39
+ }
40
+ }
41
+ ]
42
+ }
43
+ },
44
+ "required": ["generated_text"],
45
+
46
+ "$defs": {
47
+ "Token": {
48
+ "type": "object",
49
+ "title": "Token",
50
+ "properties": {
51
+ "id": {
52
+ "type": "integer"
53
+ },
54
+ "logprob": {
55
+ "type": "number"
56
+ },
57
+ "special": {
58
+ "type": "boolean",
59
+ "description": "Whether or not that token is a special one"
60
+ },
61
+ "text": {
62
+ "type": "string",
63
+ "description": "The text associated with that token"
64
+ }
65
+ },
66
+ "required": ["id", "logprob", "special", "text"]
67
+ },
68
+ "SequenceDetails": {
69
+ "type": "object",
70
+ "title": "TextGenerationSequenceDetails",
71
+ "properties": {
72
+ "finish_reason": {
73
+ "type": "string",
74
+ "description": "The reason why the generation was stopped.",
75
+ "oneOf": [
76
+ { "const": "length", "description": "The generated sequence reached the maximum allowed length" },
77
+ { "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
78
+ { "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
79
+ ]
80
+ },
81
+ "generated_tokens": {
82
+ "type": "integer",
83
+ "description": "The number of generated tokens"
84
+ },
85
+ "prefill": {
86
+ "type": "array",
87
+ "items": {
88
+ "title": "PrefillToken",
89
+ "type": "object",
90
+ "properties": {
91
+ "id": {
92
+ "type": "integer"
93
+ },
94
+ "logprob": {
95
+ "type": "number"
96
+ },
97
+ "text": {
98
+ "type": "string",
99
+ "description": "The text associated with that token"
100
+ }
101
+ },
102
+ "required": ["id", "logprob", "text"]
103
+ }
104
+ },
105
+ "seed": {
106
+ "type": "integer",
107
+ "description": "The random seed used for generation"
108
+ },
109
+ "tokens": {
110
+ "type": "array",
111
+ "description": "The generated tokens and associated details",
112
+ "items": {
113
+ "$ref": "#/$defs/Token"
114
+ }
115
+ }
116
+ },
117
+ "required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
118
+ }
119
+ }
120
+ }
@@ -0,0 +1,143 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+
7
+ /**
8
+ * Inputs for Text To Audio inference
9
+ */
10
+ export interface TextToAudioInput {
11
+ /**
12
+ * The input text data
13
+ */
14
+ inputs: string;
15
+ /**
16
+ * Additional inference parameters
17
+ */
18
+ parameters?: TextToAudioParameters;
19
+ [property: string]: unknown;
20
+ }
21
+
22
+ /**
23
+ * Additional inference parameters
24
+ *
25
+ * Additional inference parameters for Text To Audio
26
+ */
27
+ export interface TextToAudioParameters {
28
+ /**
29
+ * Parametrization of the text generation process
30
+ */
31
+ generate?: GenerationParameters;
32
+ [property: string]: unknown;
33
+ }
34
+
35
+ /**
36
+ * Parametrization of the text generation process
37
+ *
38
+ * Ad-hoc parametrization of the text generation process
39
+ */
40
+ export interface GenerationParameters {
41
+ /**
42
+ * Whether to use sampling instead of greedy decoding when generating new tokens.
43
+ */
44
+ do_sample?: boolean;
45
+ /**
46
+ * Controls the stopping condition for beam-based methods.
47
+ */
48
+ early_stopping?: EarlyStoppingUnion;
49
+ /**
50
+ * If set to float strictly between 0 and 1, only tokens with a conditional probability
51
+ * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
52
+ * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
53
+ * Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
54
+ */
55
+ epsilon_cutoff?: number;
56
+ /**
57
+ * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
58
+ * float strictly between 0 and 1, a token is only considered if it is greater than either
59
+ * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
60
+ * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
61
+ * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
62
+ * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
63
+ * for more details.
64
+ */
65
+ eta_cutoff?: number;
66
+ /**
67
+ * The maximum length (in tokens) of the generated text, including the input.
68
+ */
69
+ max_length?: number;
70
+ /**
71
+ * The maximum number of tokens to generate. Takes precedence over maxLength.
72
+ */
73
+ max_new_tokens?: number;
74
+ /**
75
+ * The minimum length (in tokens) of the generated text, including the input.
76
+ */
77
+ min_length?: number;
78
+ /**
79
+ * The minimum number of tokens to generate. Takes precedence over maxLength.
80
+ */
81
+ min_new_tokens?: number;
82
+ /**
83
+ * Number of groups to divide num_beams into in order to ensure diversity among different
84
+ * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
85
+ */
86
+ num_beam_groups?: number;
87
+ /**
88
+ * Number of beams to use for beam search.
89
+ */
90
+ num_beams?: number;
91
+ /**
92
+ * The value balances the model confidence and the degeneration penalty in contrastive
93
+ * search decoding.
94
+ */
95
+ penalty_alpha?: number;
96
+ /**
97
+ * The value used to modulate the next token probabilities.
98
+ */
99
+ temperature?: number;
100
+ /**
101
+ * The number of highest probability vocabulary tokens to keep for top-k-filtering.
102
+ */
103
+ top_k?: number;
104
+ /**
105
+ * If set to float < 1, only the smallest set of most probable tokens with probabilities
106
+ * that add up to top_p or higher are kept for generation.
107
+ */
108
+ top_p?: number;
109
+ /**
110
+ * Local typicality measures how similar the conditional probability of predicting a target
111
+ * token next is to the expected conditional probability of predicting a random token next,
112
+ * given the partial text already generated. If set to float < 1, the smallest set of the
113
+ * most locally typical tokens with probabilities that add up to typical_p or higher are
114
+ * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
115
+ */
116
+ typical_p?: number;
117
+ /**
118
+ * Whether the model should use the past last key/values attentions to speed up decoding
119
+ */
120
+ use_cache?: boolean;
121
+ [property: string]: unknown;
122
+ }
123
+
124
+ /**
125
+ * Controls the stopping condition for beam-based methods.
126
+ */
127
+ export type EarlyStoppingUnion = boolean | "never";
128
+
129
+ /**
130
+ * Outputs of inference for the Text To Audio task
131
+ */
132
+ export interface TextToAudioOutput {
133
+ /**
134
+ * The generated audio waveform.
135
+ */
136
+ audio: unknown;
137
+ samplingRate: unknown;
138
+ /**
139
+ * The sampling rate of the generated audio waveform.
140
+ */
141
+ sampling_rate?: number;
142
+ [property: string]: unknown;
143
+ }
@@ -0,0 +1,31 @@
1
+ {
2
+ "$id": "/inference/schemas/text-to-audio/input.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Inputs for Text To Audio inference",
5
+ "title": "TextToAudioInput",
6
+ "type": "object",
7
+ "properties": {
8
+ "inputs": {
9
+ "description": "The input text data",
10
+ "type": "string"
11
+ },
12
+ "parameters": {
13
+ "description": "Additional inference parameters",
14
+ "$ref": "#/$defs/TextToAudioParameters"
15
+ }
16
+ },
17
+ "$defs": {
18
+ "TextToAudioParameters": {
19
+ "title": "TextToAudioParameters",
20
+ "description": "Additional inference parameters for Text To Audio",
21
+ "type": "object",
22
+ "properties": {
23
+ "generate": {
24
+ "description": "Parametrization of the text generation process",
25
+ "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
26
+ }
27
+ }
28
+ }
29
+ },
30
+ "required": ["inputs"]
31
+ }