@huggingface/tasks 0.13.0-test → 0.13.0-test2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.cts +4778 -0
- package/dist/index.d.ts +4778 -0
- package/package.json +2 -3
- package/dist/scripts/inference-codegen.d.ts +0 -2
- package/dist/scripts/inference-codegen.d.ts.map +0 -1
- package/dist/scripts/inference-tei-import.d.ts +0 -2
- package/dist/scripts/inference-tei-import.d.ts.map +0 -1
- package/dist/scripts/inference-tgi-import.d.ts +0 -2
- package/dist/scripts/inference-tgi-import.d.ts.map +0 -1
- package/dist/src/dataset-libraries.d.ts +0 -87
- package/dist/src/dataset-libraries.d.ts.map +0 -1
- package/dist/src/default-widget-inputs.d.ts +0 -6
- package/dist/src/default-widget-inputs.d.ts.map +0 -1
- package/dist/src/gguf.d.ts +0 -35
- package/dist/src/gguf.d.ts.map +0 -1
- package/dist/src/hardware.d.ts +0 -478
- package/dist/src/hardware.d.ts.map +0 -1
- package/dist/src/index.d.ts +0 -21
- package/dist/src/index.d.ts.map +0 -1
- package/dist/src/library-to-tasks.d.ts +0 -11
- package/dist/src/library-to-tasks.d.ts.map +0 -1
- package/dist/src/local-apps.d.ts +0 -195
- package/dist/src/local-apps.d.ts.map +0 -1
- package/dist/src/model-data.d.ts +0 -146
- package/dist/src/model-data.d.ts.map +0 -1
- package/dist/src/model-libraries-downloads.d.ts +0 -18
- package/dist/src/model-libraries-downloads.d.ts.map +0 -1
- package/dist/src/model-libraries-snippets.d.ts +0 -72
- package/dist/src/model-libraries-snippets.d.ts.map +0 -1
- package/dist/src/model-libraries.d.ts +0 -804
- package/dist/src/model-libraries.d.ts.map +0 -1
- package/dist/src/pipelines.d.ts +0 -425
- package/dist/src/pipelines.d.ts.map +0 -1
- package/dist/src/snippets/common.d.ts +0 -14
- package/dist/src/snippets/common.d.ts.map +0 -1
- package/dist/src/snippets/curl.d.ts +0 -17
- package/dist/src/snippets/curl.d.ts.map +0 -1
- package/dist/src/snippets/curl.spec.d.ts +0 -2
- package/dist/src/snippets/curl.spec.d.ts.map +0 -1
- package/dist/src/snippets/index.d.ts +0 -6
- package/dist/src/snippets/index.d.ts.map +0 -1
- package/dist/src/snippets/inputs.d.ts +0 -4
- package/dist/src/snippets/inputs.d.ts.map +0 -1
- package/dist/src/snippets/js.d.ts +0 -19
- package/dist/src/snippets/js.d.ts.map +0 -1
- package/dist/src/snippets/js.spec.d.ts +0 -2
- package/dist/src/snippets/js.spec.d.ts.map +0 -1
- package/dist/src/snippets/python.d.ts +0 -22
- package/dist/src/snippets/python.d.ts.map +0 -1
- package/dist/src/snippets/python.spec.d.ts +0 -2
- package/dist/src/snippets/python.spec.d.ts.map +0 -1
- package/dist/src/snippets/types.d.ts +0 -12
- package/dist/src/snippets/types.d.ts.map +0 -1
- package/dist/src/tasks/audio-classification/data.d.ts +0 -4
- package/dist/src/tasks/audio-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/audio-classification/inference.d.ts +0 -53
- package/dist/src/tasks/audio-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/audio-to-audio/data.d.ts +0 -4
- package/dist/src/tasks/audio-to-audio/data.d.ts.map +0 -1
- package/dist/src/tasks/automatic-speech-recognition/data.d.ts +0 -4
- package/dist/src/tasks/automatic-speech-recognition/data.d.ts.map +0 -1
- package/dist/src/tasks/automatic-speech-recognition/inference.d.ts +0 -155
- package/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map +0 -1
- package/dist/src/tasks/chat-completion/inference.d.ts +0 -291
- package/dist/src/tasks/chat-completion/inference.d.ts.map +0 -1
- package/dist/src/tasks/depth-estimation/data.d.ts +0 -4
- package/dist/src/tasks/depth-estimation/data.d.ts.map +0 -1
- package/dist/src/tasks/depth-estimation/inference.d.ts +0 -36
- package/dist/src/tasks/depth-estimation/inference.d.ts.map +0 -1
- package/dist/src/tasks/document-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/document-question-answering/data.d.ts.map +0 -1
- package/dist/src/tasks/document-question-answering/inference.d.ts +0 -111
- package/dist/src/tasks/document-question-answering/inference.d.ts.map +0 -1
- package/dist/src/tasks/feature-extraction/data.d.ts +0 -4
- package/dist/src/tasks/feature-extraction/data.d.ts.map +0 -1
- package/dist/src/tasks/feature-extraction/inference.d.ts +0 -38
- package/dist/src/tasks/feature-extraction/inference.d.ts.map +0 -1
- package/dist/src/tasks/fill-mask/data.d.ts +0 -4
- package/dist/src/tasks/fill-mask/data.d.ts.map +0 -1
- package/dist/src/tasks/fill-mask/inference.d.ts +0 -63
- package/dist/src/tasks/fill-mask/inference.d.ts.map +0 -1
- package/dist/src/tasks/image-classification/data.d.ts +0 -4
- package/dist/src/tasks/image-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/image-classification/inference.d.ts +0 -53
- package/dist/src/tasks/image-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/image-feature-extraction/data.d.ts +0 -4
- package/dist/src/tasks/image-feature-extraction/data.d.ts.map +0 -1
- package/dist/src/tasks/image-segmentation/data.d.ts +0 -4
- package/dist/src/tasks/image-segmentation/data.d.ts.map +0 -1
- package/dist/src/tasks/image-segmentation/inference.d.ts +0 -70
- package/dist/src/tasks/image-segmentation/inference.d.ts.map +0 -1
- package/dist/src/tasks/image-text-to-text/data.d.ts +0 -4
- package/dist/src/tasks/image-text-to-text/data.d.ts.map +0 -1
- package/dist/src/tasks/image-to-3d/data.d.ts +0 -4
- package/dist/src/tasks/image-to-3d/data.d.ts.map +0 -1
- package/dist/src/tasks/image-to-image/data.d.ts +0 -4
- package/dist/src/tasks/image-to-image/data.d.ts.map +0 -1
- package/dist/src/tasks/image-to-image/inference.d.ts +0 -65
- package/dist/src/tasks/image-to-image/inference.d.ts.map +0 -1
- package/dist/src/tasks/image-to-text/data.d.ts +0 -4
- package/dist/src/tasks/image-to-text/data.d.ts.map +0 -1
- package/dist/src/tasks/image-to-text/inference.d.ts +0 -139
- package/dist/src/tasks/image-to-text/inference.d.ts.map +0 -1
- package/dist/src/tasks/index.d.ts +0 -87
- package/dist/src/tasks/index.d.ts.map +0 -1
- package/dist/src/tasks/keypoint-detection/data.d.ts +0 -4
- package/dist/src/tasks/keypoint-detection/data.d.ts.map +0 -1
- package/dist/src/tasks/mask-generation/data.d.ts +0 -4
- package/dist/src/tasks/mask-generation/data.d.ts.map +0 -1
- package/dist/src/tasks/object-detection/data.d.ts +0 -4
- package/dist/src/tasks/object-detection/data.d.ts.map +0 -1
- package/dist/src/tasks/object-detection/inference.d.ts +0 -76
- package/dist/src/tasks/object-detection/inference.d.ts.map +0 -1
- package/dist/src/tasks/placeholder/data.d.ts +0 -4
- package/dist/src/tasks/placeholder/data.d.ts.map +0 -1
- package/dist/src/tasks/question-answering/data.d.ts +0 -4
- package/dist/src/tasks/question-answering/data.d.ts.map +0 -1
- package/dist/src/tasks/question-answering/inference.d.ts +0 -100
- package/dist/src/tasks/question-answering/inference.d.ts.map +0 -1
- package/dist/src/tasks/reinforcement-learning/data.d.ts +0 -4
- package/dist/src/tasks/reinforcement-learning/data.d.ts.map +0 -1
- package/dist/src/tasks/sentence-similarity/data.d.ts +0 -4
- package/dist/src/tasks/sentence-similarity/data.d.ts.map +0 -1
- package/dist/src/tasks/sentence-similarity/inference.d.ts +0 -32
- package/dist/src/tasks/sentence-similarity/inference.d.ts.map +0 -1
- package/dist/src/tasks/summarization/data.d.ts +0 -4
- package/dist/src/tasks/summarization/data.d.ts.map +0 -1
- package/dist/src/tasks/summarization/inference.d.ts +0 -56
- package/dist/src/tasks/summarization/inference.d.ts.map +0 -1
- package/dist/src/tasks/table-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/table-question-answering/data.d.ts.map +0 -1
- package/dist/src/tasks/table-question-answering/inference.d.ts +0 -62
- package/dist/src/tasks/table-question-answering/inference.d.ts.map +0 -1
- package/dist/src/tasks/tabular-classification/data.d.ts +0 -4
- package/dist/src/tasks/tabular-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/tabular-regression/data.d.ts +0 -4
- package/dist/src/tasks/tabular-regression/data.d.ts.map +0 -1
- package/dist/src/tasks/text-classification/data.d.ts +0 -4
- package/dist/src/tasks/text-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/text-classification/inference.d.ts +0 -52
- package/dist/src/tasks/text-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/text-generation/data.d.ts +0 -4
- package/dist/src/tasks/text-generation/data.d.ts.map +0 -1
- package/dist/src/tasks/text-generation/inference.d.ts +0 -188
- package/dist/src/tasks/text-generation/inference.d.ts.map +0 -1
- package/dist/src/tasks/text-to-3d/data.d.ts +0 -4
- package/dist/src/tasks/text-to-3d/data.d.ts.map +0 -1
- package/dist/src/tasks/text-to-audio/inference.d.ts +0 -139
- package/dist/src/tasks/text-to-audio/inference.d.ts.map +0 -1
- package/dist/src/tasks/text-to-image/data.d.ts +0 -4
- package/dist/src/tasks/text-to-image/data.d.ts.map +0 -1
- package/dist/src/tasks/text-to-image/inference.d.ts +0 -72
- package/dist/src/tasks/text-to-image/inference.d.ts.map +0 -1
- package/dist/src/tasks/text-to-speech/data.d.ts +0 -4
- package/dist/src/tasks/text-to-speech/data.d.ts.map +0 -1
- package/dist/src/tasks/text-to-speech/inference.d.ts +0 -141
- package/dist/src/tasks/text-to-speech/inference.d.ts.map +0 -1
- package/dist/src/tasks/text-to-video/data.d.ts +0 -4
- package/dist/src/tasks/text-to-video/data.d.ts.map +0 -1
- package/dist/src/tasks/text2text-generation/inference.d.ts +0 -54
- package/dist/src/tasks/text2text-generation/inference.d.ts.map +0 -1
- package/dist/src/tasks/token-classification/data.d.ts +0 -4
- package/dist/src/tasks/token-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/token-classification/inference.d.ts +0 -86
- package/dist/src/tasks/token-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/translation/data.d.ts +0 -4
- package/dist/src/tasks/translation/data.d.ts.map +0 -1
- package/dist/src/tasks/translation/inference.d.ts +0 -66
- package/dist/src/tasks/translation/inference.d.ts.map +0 -1
- package/dist/src/tasks/unconditional-image-generation/data.d.ts +0 -4
- package/dist/src/tasks/unconditional-image-generation/data.d.ts.map +0 -1
- package/dist/src/tasks/video-classification/data.d.ts +0 -4
- package/dist/src/tasks/video-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/video-classification/inference.d.ts +0 -60
- package/dist/src/tasks/video-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/video-text-to-text/data.d.ts +0 -4
- package/dist/src/tasks/video-text-to-text/data.d.ts.map +0 -1
- package/dist/src/tasks/visual-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/visual-question-answering/data.d.ts.map +0 -1
- package/dist/src/tasks/visual-question-answering/inference.d.ts +0 -63
- package/dist/src/tasks/visual-question-answering/inference.d.ts.map +0 -1
- package/dist/src/tasks/zero-shot-classification/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/zero-shot-classification/inference.d.ts +0 -68
- package/dist/src/tasks/zero-shot-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts.map +0 -1
- package/dist/src/tasks/zero-shot-image-classification/inference.d.ts +0 -62
- package/dist/src/tasks/zero-shot-image-classification/inference.d.ts.map +0 -1
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts.map +0 -1
- package/dist/src/tasks/zero-shot-object-detection/inference.d.ts +0 -67
- package/dist/src/tasks/zero-shot-object-detection/inference.d.ts.map +0 -1
- package/dist/src/tokenizer-data.d.ts +0 -26
- package/dist/src/tokenizer-data.d.ts.map +0 -1
- package/dist/src/widget-example.d.ts +0 -83
- package/dist/src/widget-example.d.ts.map +0 -1
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,4778 @@
|
|
|
1
|
+
declare const MODALITIES: readonly ["multimodal", "nlp", "cv", "audio", "tabular", "rl", "other"];
|
|
2
|
+
type Modality = (typeof MODALITIES)[number];
|
|
3
|
+
declare const MODALITY_LABELS: {
|
|
4
|
+
multimodal: string;
|
|
5
|
+
nlp: string;
|
|
6
|
+
audio: string;
|
|
7
|
+
cv: string;
|
|
8
|
+
rl: string;
|
|
9
|
+
tabular: string;
|
|
10
|
+
other: string;
|
|
11
|
+
};
|
|
12
|
+
/**
|
|
13
|
+
* Public interface for a sub task.
|
|
14
|
+
*
|
|
15
|
+
* This can be used in a model card's `model-index` metadata.
|
|
16
|
+
* and is more granular classification that can grow significantly
|
|
17
|
+
* over time as new tasks are added.
|
|
18
|
+
*/
|
|
19
|
+
interface SubTask {
|
|
20
|
+
/**
|
|
21
|
+
* type of the task (e.g. audio-source-separation)
|
|
22
|
+
*/
|
|
23
|
+
type: string;
|
|
24
|
+
/**
|
|
25
|
+
* displayed name of the task (e.g. Audio Source Separation)
|
|
26
|
+
*/
|
|
27
|
+
name: string;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Public interface for a PipelineData.
|
|
31
|
+
*
|
|
32
|
+
* This information corresponds to a pipeline type (aka task)
|
|
33
|
+
* in the Hub.
|
|
34
|
+
*/
|
|
35
|
+
interface PipelineData {
|
|
36
|
+
/**
|
|
37
|
+
* displayed name of the task (e.g. Text Classification)
|
|
38
|
+
*/
|
|
39
|
+
name: string;
|
|
40
|
+
subtasks?: SubTask[];
|
|
41
|
+
modality: Modality;
|
|
42
|
+
/**
|
|
43
|
+
* color for the tag icon.
|
|
44
|
+
*/
|
|
45
|
+
color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow";
|
|
46
|
+
/**
|
|
47
|
+
* whether to hide in /models filters
|
|
48
|
+
*/
|
|
49
|
+
hideInModels?: boolean;
|
|
50
|
+
/**
|
|
51
|
+
* whether to hide in /datasets filters
|
|
52
|
+
*/
|
|
53
|
+
hideInDatasets?: boolean;
|
|
54
|
+
}
|
|
55
|
+
declare const PIPELINE_DATA: {
|
|
56
|
+
"text-classification": {
|
|
57
|
+
name: string;
|
|
58
|
+
subtasks: {
|
|
59
|
+
type: string;
|
|
60
|
+
name: string;
|
|
61
|
+
}[];
|
|
62
|
+
modality: "nlp";
|
|
63
|
+
color: "orange";
|
|
64
|
+
};
|
|
65
|
+
"token-classification": {
|
|
66
|
+
name: string;
|
|
67
|
+
subtasks: {
|
|
68
|
+
type: string;
|
|
69
|
+
name: string;
|
|
70
|
+
}[];
|
|
71
|
+
modality: "nlp";
|
|
72
|
+
color: "blue";
|
|
73
|
+
};
|
|
74
|
+
"table-question-answering": {
|
|
75
|
+
name: string;
|
|
76
|
+
modality: "nlp";
|
|
77
|
+
color: "green";
|
|
78
|
+
};
|
|
79
|
+
"question-answering": {
|
|
80
|
+
name: string;
|
|
81
|
+
subtasks: {
|
|
82
|
+
type: string;
|
|
83
|
+
name: string;
|
|
84
|
+
}[];
|
|
85
|
+
modality: "nlp";
|
|
86
|
+
color: "blue";
|
|
87
|
+
};
|
|
88
|
+
"zero-shot-classification": {
|
|
89
|
+
name: string;
|
|
90
|
+
modality: "nlp";
|
|
91
|
+
color: "yellow";
|
|
92
|
+
};
|
|
93
|
+
translation: {
|
|
94
|
+
name: string;
|
|
95
|
+
modality: "nlp";
|
|
96
|
+
color: "green";
|
|
97
|
+
};
|
|
98
|
+
summarization: {
|
|
99
|
+
name: string;
|
|
100
|
+
subtasks: {
|
|
101
|
+
type: string;
|
|
102
|
+
name: string;
|
|
103
|
+
}[];
|
|
104
|
+
modality: "nlp";
|
|
105
|
+
color: "indigo";
|
|
106
|
+
};
|
|
107
|
+
"feature-extraction": {
|
|
108
|
+
name: string;
|
|
109
|
+
modality: "nlp";
|
|
110
|
+
color: "red";
|
|
111
|
+
};
|
|
112
|
+
"text-generation": {
|
|
113
|
+
name: string;
|
|
114
|
+
subtasks: {
|
|
115
|
+
type: string;
|
|
116
|
+
name: string;
|
|
117
|
+
}[];
|
|
118
|
+
modality: "nlp";
|
|
119
|
+
color: "indigo";
|
|
120
|
+
};
|
|
121
|
+
"text2text-generation": {
|
|
122
|
+
name: string;
|
|
123
|
+
subtasks: {
|
|
124
|
+
type: string;
|
|
125
|
+
name: string;
|
|
126
|
+
}[];
|
|
127
|
+
modality: "nlp";
|
|
128
|
+
color: "indigo";
|
|
129
|
+
};
|
|
130
|
+
"fill-mask": {
|
|
131
|
+
name: string;
|
|
132
|
+
subtasks: {
|
|
133
|
+
type: string;
|
|
134
|
+
name: string;
|
|
135
|
+
}[];
|
|
136
|
+
modality: "nlp";
|
|
137
|
+
color: "red";
|
|
138
|
+
};
|
|
139
|
+
"sentence-similarity": {
|
|
140
|
+
name: string;
|
|
141
|
+
modality: "nlp";
|
|
142
|
+
color: "yellow";
|
|
143
|
+
};
|
|
144
|
+
"text-to-speech": {
|
|
145
|
+
name: string;
|
|
146
|
+
modality: "audio";
|
|
147
|
+
color: "yellow";
|
|
148
|
+
};
|
|
149
|
+
"text-to-audio": {
|
|
150
|
+
name: string;
|
|
151
|
+
modality: "audio";
|
|
152
|
+
color: "yellow";
|
|
153
|
+
};
|
|
154
|
+
"automatic-speech-recognition": {
|
|
155
|
+
name: string;
|
|
156
|
+
modality: "audio";
|
|
157
|
+
color: "yellow";
|
|
158
|
+
};
|
|
159
|
+
"audio-to-audio": {
|
|
160
|
+
name: string;
|
|
161
|
+
modality: "audio";
|
|
162
|
+
color: "blue";
|
|
163
|
+
};
|
|
164
|
+
"audio-classification": {
|
|
165
|
+
name: string;
|
|
166
|
+
subtasks: {
|
|
167
|
+
type: string;
|
|
168
|
+
name: string;
|
|
169
|
+
}[];
|
|
170
|
+
modality: "audio";
|
|
171
|
+
color: "green";
|
|
172
|
+
};
|
|
173
|
+
"voice-activity-detection": {
|
|
174
|
+
name: string;
|
|
175
|
+
modality: "audio";
|
|
176
|
+
color: "red";
|
|
177
|
+
};
|
|
178
|
+
"depth-estimation": {
|
|
179
|
+
name: string;
|
|
180
|
+
modality: "cv";
|
|
181
|
+
color: "yellow";
|
|
182
|
+
};
|
|
183
|
+
"image-classification": {
|
|
184
|
+
name: string;
|
|
185
|
+
subtasks: {
|
|
186
|
+
type: string;
|
|
187
|
+
name: string;
|
|
188
|
+
}[];
|
|
189
|
+
modality: "cv";
|
|
190
|
+
color: "blue";
|
|
191
|
+
};
|
|
192
|
+
"object-detection": {
|
|
193
|
+
name: string;
|
|
194
|
+
subtasks: {
|
|
195
|
+
type: string;
|
|
196
|
+
name: string;
|
|
197
|
+
}[];
|
|
198
|
+
modality: "cv";
|
|
199
|
+
color: "yellow";
|
|
200
|
+
};
|
|
201
|
+
"image-segmentation": {
|
|
202
|
+
name: string;
|
|
203
|
+
subtasks: {
|
|
204
|
+
type: string;
|
|
205
|
+
name: string;
|
|
206
|
+
}[];
|
|
207
|
+
modality: "cv";
|
|
208
|
+
color: "green";
|
|
209
|
+
};
|
|
210
|
+
"text-to-image": {
|
|
211
|
+
name: string;
|
|
212
|
+
modality: "cv";
|
|
213
|
+
color: "yellow";
|
|
214
|
+
};
|
|
215
|
+
"image-to-text": {
|
|
216
|
+
name: string;
|
|
217
|
+
subtasks: {
|
|
218
|
+
type: string;
|
|
219
|
+
name: string;
|
|
220
|
+
}[];
|
|
221
|
+
modality: "cv";
|
|
222
|
+
color: "red";
|
|
223
|
+
};
|
|
224
|
+
"image-to-image": {
|
|
225
|
+
name: string;
|
|
226
|
+
subtasks: {
|
|
227
|
+
type: string;
|
|
228
|
+
name: string;
|
|
229
|
+
}[];
|
|
230
|
+
modality: "cv";
|
|
231
|
+
color: "indigo";
|
|
232
|
+
};
|
|
233
|
+
"image-to-video": {
|
|
234
|
+
name: string;
|
|
235
|
+
modality: "cv";
|
|
236
|
+
color: "indigo";
|
|
237
|
+
};
|
|
238
|
+
"unconditional-image-generation": {
|
|
239
|
+
name: string;
|
|
240
|
+
modality: "cv";
|
|
241
|
+
color: "green";
|
|
242
|
+
};
|
|
243
|
+
"video-classification": {
|
|
244
|
+
name: string;
|
|
245
|
+
modality: "cv";
|
|
246
|
+
color: "blue";
|
|
247
|
+
};
|
|
248
|
+
"reinforcement-learning": {
|
|
249
|
+
name: string;
|
|
250
|
+
modality: "rl";
|
|
251
|
+
color: "red";
|
|
252
|
+
};
|
|
253
|
+
robotics: {
|
|
254
|
+
name: string;
|
|
255
|
+
modality: "rl";
|
|
256
|
+
subtasks: {
|
|
257
|
+
type: string;
|
|
258
|
+
name: string;
|
|
259
|
+
}[];
|
|
260
|
+
color: "blue";
|
|
261
|
+
};
|
|
262
|
+
"tabular-classification": {
|
|
263
|
+
name: string;
|
|
264
|
+
modality: "tabular";
|
|
265
|
+
subtasks: {
|
|
266
|
+
type: string;
|
|
267
|
+
name: string;
|
|
268
|
+
}[];
|
|
269
|
+
color: "blue";
|
|
270
|
+
};
|
|
271
|
+
"tabular-regression": {
|
|
272
|
+
name: string;
|
|
273
|
+
modality: "tabular";
|
|
274
|
+
subtasks: {
|
|
275
|
+
type: string;
|
|
276
|
+
name: string;
|
|
277
|
+
}[];
|
|
278
|
+
color: "blue";
|
|
279
|
+
};
|
|
280
|
+
"tabular-to-text": {
|
|
281
|
+
name: string;
|
|
282
|
+
modality: "tabular";
|
|
283
|
+
subtasks: {
|
|
284
|
+
type: string;
|
|
285
|
+
name: string;
|
|
286
|
+
}[];
|
|
287
|
+
color: "blue";
|
|
288
|
+
hideInModels: true;
|
|
289
|
+
};
|
|
290
|
+
"table-to-text": {
|
|
291
|
+
name: string;
|
|
292
|
+
modality: "nlp";
|
|
293
|
+
color: "blue";
|
|
294
|
+
hideInModels: true;
|
|
295
|
+
};
|
|
296
|
+
"multiple-choice": {
|
|
297
|
+
name: string;
|
|
298
|
+
subtasks: {
|
|
299
|
+
type: string;
|
|
300
|
+
name: string;
|
|
301
|
+
}[];
|
|
302
|
+
modality: "nlp";
|
|
303
|
+
color: "blue";
|
|
304
|
+
hideInModels: true;
|
|
305
|
+
};
|
|
306
|
+
"text-retrieval": {
|
|
307
|
+
name: string;
|
|
308
|
+
subtasks: {
|
|
309
|
+
type: string;
|
|
310
|
+
name: string;
|
|
311
|
+
}[];
|
|
312
|
+
modality: "nlp";
|
|
313
|
+
color: "indigo";
|
|
314
|
+
hideInModels: true;
|
|
315
|
+
};
|
|
316
|
+
"time-series-forecasting": {
|
|
317
|
+
name: string;
|
|
318
|
+
modality: "tabular";
|
|
319
|
+
subtasks: {
|
|
320
|
+
type: string;
|
|
321
|
+
name: string;
|
|
322
|
+
}[];
|
|
323
|
+
color: "blue";
|
|
324
|
+
};
|
|
325
|
+
"text-to-video": {
|
|
326
|
+
name: string;
|
|
327
|
+
modality: "cv";
|
|
328
|
+
color: "green";
|
|
329
|
+
};
|
|
330
|
+
"image-text-to-text": {
|
|
331
|
+
name: string;
|
|
332
|
+
modality: "multimodal";
|
|
333
|
+
color: "red";
|
|
334
|
+
hideInDatasets: true;
|
|
335
|
+
};
|
|
336
|
+
"visual-question-answering": {
|
|
337
|
+
name: string;
|
|
338
|
+
subtasks: {
|
|
339
|
+
type: string;
|
|
340
|
+
name: string;
|
|
341
|
+
}[];
|
|
342
|
+
modality: "multimodal";
|
|
343
|
+
color: "red";
|
|
344
|
+
};
|
|
345
|
+
"document-question-answering": {
|
|
346
|
+
name: string;
|
|
347
|
+
subtasks: {
|
|
348
|
+
type: string;
|
|
349
|
+
name: string;
|
|
350
|
+
}[];
|
|
351
|
+
modality: "multimodal";
|
|
352
|
+
color: "blue";
|
|
353
|
+
hideInDatasets: true;
|
|
354
|
+
};
|
|
355
|
+
"zero-shot-image-classification": {
|
|
356
|
+
name: string;
|
|
357
|
+
modality: "cv";
|
|
358
|
+
color: "yellow";
|
|
359
|
+
};
|
|
360
|
+
"graph-ml": {
|
|
361
|
+
name: string;
|
|
362
|
+
modality: "other";
|
|
363
|
+
color: "green";
|
|
364
|
+
};
|
|
365
|
+
"mask-generation": {
|
|
366
|
+
name: string;
|
|
367
|
+
modality: "cv";
|
|
368
|
+
color: "indigo";
|
|
369
|
+
};
|
|
370
|
+
"zero-shot-object-detection": {
|
|
371
|
+
name: string;
|
|
372
|
+
modality: "cv";
|
|
373
|
+
color: "yellow";
|
|
374
|
+
};
|
|
375
|
+
"text-to-3d": {
|
|
376
|
+
name: string;
|
|
377
|
+
modality: "cv";
|
|
378
|
+
color: "yellow";
|
|
379
|
+
};
|
|
380
|
+
"image-to-3d": {
|
|
381
|
+
name: string;
|
|
382
|
+
modality: "cv";
|
|
383
|
+
color: "green";
|
|
384
|
+
};
|
|
385
|
+
"image-feature-extraction": {
|
|
386
|
+
name: string;
|
|
387
|
+
modality: "cv";
|
|
388
|
+
color: "indigo";
|
|
389
|
+
};
|
|
390
|
+
"video-text-to-text": {
|
|
391
|
+
name: string;
|
|
392
|
+
modality: "multimodal";
|
|
393
|
+
color: "blue";
|
|
394
|
+
hideInDatasets: false;
|
|
395
|
+
};
|
|
396
|
+
"keypoint-detection": {
|
|
397
|
+
name: string;
|
|
398
|
+
subtasks: {
|
|
399
|
+
type: string;
|
|
400
|
+
name: string;
|
|
401
|
+
}[];
|
|
402
|
+
modality: "cv";
|
|
403
|
+
color: "red";
|
|
404
|
+
hideInDatasets: true;
|
|
405
|
+
};
|
|
406
|
+
"any-to-any": {
|
|
407
|
+
name: string;
|
|
408
|
+
modality: "multimodal";
|
|
409
|
+
color: "yellow";
|
|
410
|
+
hideInDatasets: true;
|
|
411
|
+
};
|
|
412
|
+
other: {
|
|
413
|
+
name: string;
|
|
414
|
+
modality: "other";
|
|
415
|
+
color: "blue";
|
|
416
|
+
hideInModels: true;
|
|
417
|
+
hideInDatasets: true;
|
|
418
|
+
};
|
|
419
|
+
};
|
|
420
|
+
type PipelineType = keyof typeof PIPELINE_DATA;
|
|
421
|
+
type WidgetType = PipelineType | "conversational";
|
|
422
|
+
declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction" | "video-text-to-text" | "keypoint-detection" | "any-to-any")[];
|
|
423
|
+
declare const SUBTASK_TYPES: string[];
|
|
424
|
+
declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction" | "video-text-to-text" | "keypoint-detection" | "any-to-any">;
|
|
425
|
+
|
|
426
|
+
/**
|
|
427
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
428
|
+
*
|
|
429
|
+
* Using src/scripts/inference-codegen
|
|
430
|
+
*/
|
|
431
|
+
/**
|
|
432
|
+
* Inputs for Audio Classification inference
|
|
433
|
+
*/
|
|
434
|
+
interface AudioClassificationInput {
|
|
435
|
+
/**
|
|
436
|
+
* The input audio data as a base64-encoded string. If no `parameters` are provided, you can
|
|
437
|
+
* also provide the audio data as a raw bytes payload.
|
|
438
|
+
*/
|
|
439
|
+
inputs: string;
|
|
440
|
+
/**
|
|
441
|
+
* Additional inference parameters
|
|
442
|
+
*/
|
|
443
|
+
parameters?: AudioClassificationParameters;
|
|
444
|
+
[property: string]: unknown;
|
|
445
|
+
}
|
|
446
|
+
/**
|
|
447
|
+
* Additional inference parameters
|
|
448
|
+
*
|
|
449
|
+
* Additional inference parameters for Audio Classification
|
|
450
|
+
*/
|
|
451
|
+
interface AudioClassificationParameters {
|
|
452
|
+
function_to_apply?: ClassificationOutputTransform$3;
|
|
453
|
+
/**
|
|
454
|
+
* When specified, limits the output to the top K most probable classes.
|
|
455
|
+
*/
|
|
456
|
+
top_k?: number;
|
|
457
|
+
[property: string]: unknown;
|
|
458
|
+
}
|
|
459
|
+
/**
|
|
460
|
+
* The function to apply to the model outputs in order to retrieve the scores.
|
|
461
|
+
*/
|
|
462
|
+
type ClassificationOutputTransform$3 = "sigmoid" | "softmax" | "none";
|
|
463
|
+
type AudioClassificationOutput = AudioClassificationOutputElement[];
|
|
464
|
+
/**
|
|
465
|
+
* Outputs for Audio Classification inference
|
|
466
|
+
*/
|
|
467
|
+
interface AudioClassificationOutputElement {
|
|
468
|
+
/**
|
|
469
|
+
* The predicted class label.
|
|
470
|
+
*/
|
|
471
|
+
label: string;
|
|
472
|
+
/**
|
|
473
|
+
* The corresponding probability.
|
|
474
|
+
*/
|
|
475
|
+
score: number;
|
|
476
|
+
[property: string]: unknown;
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
/**
|
|
480
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
481
|
+
*
|
|
482
|
+
* Using src/scripts/inference-codegen
|
|
483
|
+
*/
|
|
484
|
+
/**
|
|
485
|
+
* Inputs for Automatic Speech Recognition inference
|
|
486
|
+
*/
|
|
487
|
+
interface AutomaticSpeechRecognitionInput {
|
|
488
|
+
/**
|
|
489
|
+
* The input audio data as a base64-encoded string. If no `parameters` are provided, you can
|
|
490
|
+
* also provide the audio data as a raw bytes payload.
|
|
491
|
+
*/
|
|
492
|
+
inputs: string;
|
|
493
|
+
/**
|
|
494
|
+
* Additional inference parameters
|
|
495
|
+
*/
|
|
496
|
+
parameters?: AutomaticSpeechRecognitionParameters;
|
|
497
|
+
[property: string]: unknown;
|
|
498
|
+
}
|
|
499
|
+
/**
|
|
500
|
+
* Additional inference parameters
|
|
501
|
+
*
|
|
502
|
+
* Additional inference parameters for Automatic Speech Recognition
|
|
503
|
+
*/
|
|
504
|
+
interface AutomaticSpeechRecognitionParameters {
|
|
505
|
+
/**
|
|
506
|
+
* Parametrization of the text generation process
|
|
507
|
+
*/
|
|
508
|
+
generation_parameters?: GenerationParameters$2;
|
|
509
|
+
/**
|
|
510
|
+
* Whether to output corresponding timestamps with the generated text
|
|
511
|
+
*/
|
|
512
|
+
return_timestamps?: boolean;
|
|
513
|
+
[property: string]: unknown;
|
|
514
|
+
}
|
|
515
|
+
/**
|
|
516
|
+
* Parametrization of the text generation process
|
|
517
|
+
*
|
|
518
|
+
* Ad-hoc parametrization of the text generation process
|
|
519
|
+
*/
|
|
520
|
+
interface GenerationParameters$2 {
|
|
521
|
+
/**
|
|
522
|
+
* Whether to use sampling instead of greedy decoding when generating new tokens.
|
|
523
|
+
*/
|
|
524
|
+
do_sample?: boolean;
|
|
525
|
+
/**
|
|
526
|
+
* Controls the stopping condition for beam-based methods.
|
|
527
|
+
*/
|
|
528
|
+
early_stopping?: EarlyStoppingUnion$2;
|
|
529
|
+
/**
|
|
530
|
+
* If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
531
|
+
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
532
|
+
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
533
|
+
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
534
|
+
*/
|
|
535
|
+
epsilon_cutoff?: number;
|
|
536
|
+
/**
|
|
537
|
+
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
538
|
+
* float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
539
|
+
* eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
540
|
+
* term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
541
|
+
* the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
542
|
+
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
543
|
+
* for more details.
|
|
544
|
+
*/
|
|
545
|
+
eta_cutoff?: number;
|
|
546
|
+
/**
|
|
547
|
+
* The maximum length (in tokens) of the generated text, including the input.
|
|
548
|
+
*/
|
|
549
|
+
max_length?: number;
|
|
550
|
+
/**
|
|
551
|
+
* The maximum number of tokens to generate. Takes precedence over max_length.
|
|
552
|
+
*/
|
|
553
|
+
max_new_tokens?: number;
|
|
554
|
+
/**
|
|
555
|
+
* The minimum length (in tokens) of the generated text, including the input.
|
|
556
|
+
*/
|
|
557
|
+
min_length?: number;
|
|
558
|
+
/**
|
|
559
|
+
* The minimum number of tokens to generate. Takes precedence over min_length.
|
|
560
|
+
*/
|
|
561
|
+
min_new_tokens?: number;
|
|
562
|
+
/**
|
|
563
|
+
* Number of groups to divide num_beams into in order to ensure diversity among different
|
|
564
|
+
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
565
|
+
*/
|
|
566
|
+
num_beam_groups?: number;
|
|
567
|
+
/**
|
|
568
|
+
* Number of beams to use for beam search.
|
|
569
|
+
*/
|
|
570
|
+
num_beams?: number;
|
|
571
|
+
/**
|
|
572
|
+
* The value balances the model confidence and the degeneration penalty in contrastive
|
|
573
|
+
* search decoding.
|
|
574
|
+
*/
|
|
575
|
+
penalty_alpha?: number;
|
|
576
|
+
/**
|
|
577
|
+
* The value used to modulate the next token probabilities.
|
|
578
|
+
*/
|
|
579
|
+
temperature?: number;
|
|
580
|
+
/**
|
|
581
|
+
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
582
|
+
*/
|
|
583
|
+
top_k?: number;
|
|
584
|
+
/**
|
|
585
|
+
* If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
586
|
+
* that add up to top_p or higher are kept for generation.
|
|
587
|
+
*/
|
|
588
|
+
top_p?: number;
|
|
589
|
+
/**
|
|
590
|
+
* Local typicality measures how similar the conditional probability of predicting a target
|
|
591
|
+
* token next is to the expected conditional probability of predicting a random token next,
|
|
592
|
+
* given the partial text already generated. If set to float < 1, the smallest set of the
|
|
593
|
+
* most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
594
|
+
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
595
|
+
*/
|
|
596
|
+
typical_p?: number;
|
|
597
|
+
/**
|
|
598
|
+
* Whether the model should use the past last key/values attentions to speed up decoding
|
|
599
|
+
*/
|
|
600
|
+
use_cache?: boolean;
|
|
601
|
+
[property: string]: unknown;
|
|
602
|
+
}
|
|
603
|
+
/**
|
|
604
|
+
* Controls the stopping condition for beam-based methods.
|
|
605
|
+
*/
|
|
606
|
+
type EarlyStoppingUnion$2 = boolean | "never";
|
|
607
|
+
/**
|
|
608
|
+
* Outputs of inference for the Automatic Speech Recognition task
|
|
609
|
+
*/
|
|
610
|
+
interface AutomaticSpeechRecognitionOutput {
|
|
611
|
+
/**
|
|
612
|
+
* When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
|
|
613
|
+
* the model.
|
|
614
|
+
*/
|
|
615
|
+
chunks?: AutomaticSpeechRecognitionOutputChunk[];
|
|
616
|
+
/**
|
|
617
|
+
* The recognized text.
|
|
618
|
+
*/
|
|
619
|
+
text: string;
|
|
620
|
+
[property: string]: unknown;
|
|
621
|
+
}
|
|
622
|
+
interface AutomaticSpeechRecognitionOutputChunk {
|
|
623
|
+
/**
|
|
624
|
+
* A chunk of text identified by the model
|
|
625
|
+
*/
|
|
626
|
+
text: string;
|
|
627
|
+
/**
|
|
628
|
+
* The start and end timestamps corresponding with the text
|
|
629
|
+
*/
|
|
630
|
+
timestamps: number[];
|
|
631
|
+
[property: string]: unknown;
|
|
632
|
+
}
|
|
633
|
+
|
|
634
|
+
/**
|
|
635
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
636
|
+
*
|
|
637
|
+
* Using src/scripts/inference-codegen
|
|
638
|
+
*/
|
|
639
|
+
/**
|
|
640
|
+
* Chat Completion Input.
|
|
641
|
+
*
|
|
642
|
+
* Auto-generated from TGI specs.
|
|
643
|
+
* For more details, check out
|
|
644
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
645
|
+
*/
|
|
646
|
+
interface ChatCompletionInput {
|
|
647
|
+
/**
|
|
648
|
+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
|
|
649
|
+
* frequency in the text so far,
|
|
650
|
+
* decreasing the model's likelihood to repeat the same line verbatim.
|
|
651
|
+
*/
|
|
652
|
+
frequency_penalty?: number;
|
|
653
|
+
/**
|
|
654
|
+
* UNUSED
|
|
655
|
+
* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
|
|
656
|
+
* object that maps tokens
|
|
657
|
+
* (specified by their token ID in the tokenizer) to an associated bias value from -100 to
|
|
658
|
+
* 100. Mathematically,
|
|
659
|
+
* the bias is added to the logits generated by the model prior to sampling. The exact
|
|
660
|
+
* effect will vary per model,
|
|
661
|
+
* but values between -1 and 1 should decrease or increase likelihood of selection; values
|
|
662
|
+
* like -100 or 100 should
|
|
663
|
+
* result in a ban or exclusive selection of the relevant token.
|
|
664
|
+
*/
|
|
665
|
+
logit_bias?: number[];
|
|
666
|
+
/**
|
|
667
|
+
* Whether to return log probabilities of the output tokens or not. If true, returns the log
|
|
668
|
+
* probabilities of each
|
|
669
|
+
* output token returned in the content of message.
|
|
670
|
+
*/
|
|
671
|
+
logprobs?: boolean;
|
|
672
|
+
/**
|
|
673
|
+
* The maximum number of tokens that can be generated in the chat completion.
|
|
674
|
+
*/
|
|
675
|
+
max_tokens?: number;
|
|
676
|
+
/**
|
|
677
|
+
* A list of messages comprising the conversation so far.
|
|
678
|
+
*/
|
|
679
|
+
messages: ChatCompletionInputMessage[];
|
|
680
|
+
/**
|
|
681
|
+
* [UNUSED] ID of the model to use. See the model endpoint compatibility table for details
|
|
682
|
+
* on which models work with the Chat API.
|
|
683
|
+
*/
|
|
684
|
+
model?: string;
|
|
685
|
+
/**
|
|
686
|
+
* UNUSED
|
|
687
|
+
* How many chat completion choices to generate for each input message. Note that you will
|
|
688
|
+
* be charged based on the
|
|
689
|
+
* number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
|
|
690
|
+
*/
|
|
691
|
+
n?: number;
|
|
692
|
+
/**
|
|
693
|
+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
|
|
694
|
+
* appear in the text so far,
|
|
695
|
+
* increasing the model's likelihood to talk about new topics
|
|
696
|
+
*/
|
|
697
|
+
presence_penalty?: number;
|
|
698
|
+
response_format?: ChatCompletionInputGrammarType;
|
|
699
|
+
seed?: number;
|
|
700
|
+
/**
|
|
701
|
+
* Up to 4 sequences where the API will stop generating further tokens.
|
|
702
|
+
*/
|
|
703
|
+
stop?: string[];
|
|
704
|
+
stream?: boolean;
|
|
705
|
+
stream_options?: ChatCompletionInputStreamOptions;
|
|
706
|
+
/**
|
|
707
|
+
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the
|
|
708
|
+
* output more random, while
|
|
709
|
+
* lower values like 0.2 will make it more focused and deterministic.
|
|
710
|
+
*
|
|
711
|
+
* We generally recommend altering this or `top_p` but not both.
|
|
712
|
+
*/
|
|
713
|
+
temperature?: number;
|
|
714
|
+
tool_choice?: ChatCompletionInputTool;
|
|
715
|
+
/**
|
|
716
|
+
* A prompt to be appended before the tools
|
|
717
|
+
*/
|
|
718
|
+
tool_prompt?: string;
|
|
719
|
+
/**
|
|
720
|
+
* A list of tools the model may call. Currently, only functions are supported as a tool.
|
|
721
|
+
* Use this to provide a list of
|
|
722
|
+
* functions the model may generate JSON inputs for.
|
|
723
|
+
*/
|
|
724
|
+
tools?: ToolElement[];
|
|
725
|
+
/**
|
|
726
|
+
* An integer between 0 and 5 specifying the number of most likely tokens to return at each
|
|
727
|
+
* token position, each with
|
|
728
|
+
* an associated log probability. logprobs must be set to true if this parameter is used.
|
|
729
|
+
*/
|
|
730
|
+
top_logprobs?: number;
|
|
731
|
+
/**
|
|
732
|
+
* An alternative to sampling with temperature, called nucleus sampling, where the model
|
|
733
|
+
* considers the results of the
|
|
734
|
+
* tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%
|
|
735
|
+
* probability mass are considered.
|
|
736
|
+
*/
|
|
737
|
+
top_p?: number;
|
|
738
|
+
[property: string]: unknown;
|
|
739
|
+
}
|
|
740
|
+
interface ChatCompletionInputMessage {
|
|
741
|
+
content: ChatCompletionInputMessageContent;
|
|
742
|
+
name?: string;
|
|
743
|
+
role: string;
|
|
744
|
+
[property: string]: unknown;
|
|
745
|
+
}
|
|
746
|
+
type ChatCompletionInputMessageContent = ChatCompletionInputMessageChunk[] | string;
|
|
747
|
+
interface ChatCompletionInputMessageChunk {
|
|
748
|
+
image_url?: ChatCompletionInputURL;
|
|
749
|
+
text?: string;
|
|
750
|
+
type: ChatCompletionInputMessageChunkType;
|
|
751
|
+
[property: string]: unknown;
|
|
752
|
+
}
|
|
753
|
+
interface ChatCompletionInputURL {
|
|
754
|
+
url: string;
|
|
755
|
+
[property: string]: unknown;
|
|
756
|
+
}
|
|
757
|
+
type ChatCompletionInputMessageChunkType = "text" | "image_url";
|
|
758
|
+
interface ChatCompletionInputGrammarType {
|
|
759
|
+
type: ChatCompletionInputGrammarTypeType;
|
|
760
|
+
/**
|
|
761
|
+
* A string that represents a [JSON Schema](https://json-schema.org/).
|
|
762
|
+
*
|
|
763
|
+
* JSON Schema is a declarative language that allows to annotate JSON documents
|
|
764
|
+
* with types and descriptions.
|
|
765
|
+
*/
|
|
766
|
+
value: unknown;
|
|
767
|
+
[property: string]: unknown;
|
|
768
|
+
}
|
|
769
|
+
type ChatCompletionInputGrammarTypeType = "json" | "regex";
|
|
770
|
+
interface ChatCompletionInputStreamOptions {
|
|
771
|
+
/**
|
|
772
|
+
* If set, an additional chunk will be streamed before the data: [DONE] message. The usage
|
|
773
|
+
* field on this chunk shows the token usage statistics for the entire request, and the
|
|
774
|
+
* choices field will always be an empty array. All other chunks will also include a usage
|
|
775
|
+
* field, but with a null value.
|
|
776
|
+
*/
|
|
777
|
+
include_usage: boolean;
|
|
778
|
+
[property: string]: unknown;
|
|
779
|
+
}
|
|
780
|
+
type ChatCompletionInputTool = ChatCompletionInputToolType | string;
|
|
781
|
+
interface ChatCompletionInputToolType {
|
|
782
|
+
function?: ChatCompletionInputFunctionName;
|
|
783
|
+
[property: string]: unknown;
|
|
784
|
+
}
|
|
785
|
+
interface ChatCompletionInputFunctionName {
|
|
786
|
+
name: string;
|
|
787
|
+
[property: string]: unknown;
|
|
788
|
+
}
|
|
789
|
+
interface ToolElement {
|
|
790
|
+
function: ChatCompletionInputFunctionDefinition;
|
|
791
|
+
type: string;
|
|
792
|
+
[property: string]: unknown;
|
|
793
|
+
}
|
|
794
|
+
interface ChatCompletionInputFunctionDefinition {
|
|
795
|
+
arguments: unknown;
|
|
796
|
+
description?: string;
|
|
797
|
+
name: string;
|
|
798
|
+
[property: string]: unknown;
|
|
799
|
+
}
|
|
800
|
+
/**
|
|
801
|
+
* Chat Completion Output.
|
|
802
|
+
*
|
|
803
|
+
* Auto-generated from TGI specs.
|
|
804
|
+
* For more details, check out
|
|
805
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
806
|
+
*/
|
|
807
|
+
interface ChatCompletionOutput {
|
|
808
|
+
choices: ChatCompletionOutputComplete[];
|
|
809
|
+
created: number;
|
|
810
|
+
id: string;
|
|
811
|
+
model: string;
|
|
812
|
+
system_fingerprint: string;
|
|
813
|
+
usage: ChatCompletionOutputUsage;
|
|
814
|
+
[property: string]: unknown;
|
|
815
|
+
}
|
|
816
|
+
interface ChatCompletionOutputComplete {
|
|
817
|
+
finish_reason: string;
|
|
818
|
+
index: number;
|
|
819
|
+
logprobs?: ChatCompletionOutputLogprobs;
|
|
820
|
+
message: ChatCompletionOutputMessage;
|
|
821
|
+
[property: string]: unknown;
|
|
822
|
+
}
|
|
823
|
+
interface ChatCompletionOutputLogprobs {
|
|
824
|
+
content: ChatCompletionOutputLogprob[];
|
|
825
|
+
[property: string]: unknown;
|
|
826
|
+
}
|
|
827
|
+
interface ChatCompletionOutputLogprob {
|
|
828
|
+
logprob: number;
|
|
829
|
+
token: string;
|
|
830
|
+
top_logprobs: ChatCompletionOutputTopLogprob[];
|
|
831
|
+
[property: string]: unknown;
|
|
832
|
+
}
|
|
833
|
+
interface ChatCompletionOutputTopLogprob {
|
|
834
|
+
logprob: number;
|
|
835
|
+
token: string;
|
|
836
|
+
[property: string]: unknown;
|
|
837
|
+
}
|
|
838
|
+
interface ChatCompletionOutputMessage {
|
|
839
|
+
content?: string;
|
|
840
|
+
role: string;
|
|
841
|
+
tool_calls?: ChatCompletionOutputToolCall[];
|
|
842
|
+
[property: string]: unknown;
|
|
843
|
+
}
|
|
844
|
+
interface ChatCompletionOutputToolCall {
|
|
845
|
+
function: ChatCompletionOutputFunctionDefinition;
|
|
846
|
+
id: string;
|
|
847
|
+
type: string;
|
|
848
|
+
[property: string]: unknown;
|
|
849
|
+
}
|
|
850
|
+
interface ChatCompletionOutputFunctionDefinition {
|
|
851
|
+
arguments: unknown;
|
|
852
|
+
description?: string;
|
|
853
|
+
name: string;
|
|
854
|
+
[property: string]: unknown;
|
|
855
|
+
}
|
|
856
|
+
interface ChatCompletionOutputUsage {
|
|
857
|
+
completion_tokens: number;
|
|
858
|
+
prompt_tokens: number;
|
|
859
|
+
total_tokens: number;
|
|
860
|
+
[property: string]: unknown;
|
|
861
|
+
}
|
|
862
|
+
/**
|
|
863
|
+
* Chat Completion Stream Output.
|
|
864
|
+
*
|
|
865
|
+
* Auto-generated from TGI specs.
|
|
866
|
+
* For more details, check out
|
|
867
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
868
|
+
*/
|
|
869
|
+
interface ChatCompletionStreamOutput {
|
|
870
|
+
choices: ChatCompletionStreamOutputChoice[];
|
|
871
|
+
created: number;
|
|
872
|
+
id: string;
|
|
873
|
+
model: string;
|
|
874
|
+
system_fingerprint: string;
|
|
875
|
+
usage?: ChatCompletionStreamOutputUsage;
|
|
876
|
+
[property: string]: unknown;
|
|
877
|
+
}
|
|
878
|
+
interface ChatCompletionStreamOutputChoice {
|
|
879
|
+
delta: ChatCompletionStreamOutputDelta;
|
|
880
|
+
finish_reason?: string;
|
|
881
|
+
index: number;
|
|
882
|
+
logprobs?: ChatCompletionStreamOutputLogprobs;
|
|
883
|
+
[property: string]: unknown;
|
|
884
|
+
}
|
|
885
|
+
interface ChatCompletionStreamOutputDelta {
|
|
886
|
+
content?: string;
|
|
887
|
+
role: string;
|
|
888
|
+
tool_calls?: ChatCompletionStreamOutputDeltaToolCall;
|
|
889
|
+
[property: string]: unknown;
|
|
890
|
+
}
|
|
891
|
+
interface ChatCompletionStreamOutputDeltaToolCall {
|
|
892
|
+
function: ChatCompletionStreamOutputFunction;
|
|
893
|
+
id: string;
|
|
894
|
+
index: number;
|
|
895
|
+
type: string;
|
|
896
|
+
[property: string]: unknown;
|
|
897
|
+
}
|
|
898
|
+
interface ChatCompletionStreamOutputFunction {
|
|
899
|
+
arguments: string;
|
|
900
|
+
name?: string;
|
|
901
|
+
[property: string]: unknown;
|
|
902
|
+
}
|
|
903
|
+
interface ChatCompletionStreamOutputLogprobs {
|
|
904
|
+
content: ChatCompletionStreamOutputLogprob[];
|
|
905
|
+
[property: string]: unknown;
|
|
906
|
+
}
|
|
907
|
+
interface ChatCompletionStreamOutputLogprob {
|
|
908
|
+
logprob: number;
|
|
909
|
+
token: string;
|
|
910
|
+
top_logprobs: ChatCompletionStreamOutputTopLogprob[];
|
|
911
|
+
[property: string]: unknown;
|
|
912
|
+
}
|
|
913
|
+
interface ChatCompletionStreamOutputTopLogprob {
|
|
914
|
+
logprob: number;
|
|
915
|
+
token: string;
|
|
916
|
+
[property: string]: unknown;
|
|
917
|
+
}
|
|
918
|
+
interface ChatCompletionStreamOutputUsage {
|
|
919
|
+
completion_tokens: number;
|
|
920
|
+
prompt_tokens: number;
|
|
921
|
+
total_tokens: number;
|
|
922
|
+
[property: string]: unknown;
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
/**
|
|
926
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
927
|
+
*
|
|
928
|
+
* Using src/scripts/inference-codegen
|
|
929
|
+
*/
|
|
930
|
+
/**
|
|
931
|
+
* Inputs for Document Question Answering inference
|
|
932
|
+
*/
|
|
933
|
+
interface DocumentQuestionAnsweringInput {
|
|
934
|
+
/**
|
|
935
|
+
* One (document, question) pair to answer
|
|
936
|
+
*/
|
|
937
|
+
inputs: DocumentQuestionAnsweringInputData;
|
|
938
|
+
/**
|
|
939
|
+
* Additional inference parameters
|
|
940
|
+
*/
|
|
941
|
+
parameters?: DocumentQuestionAnsweringParameters;
|
|
942
|
+
[property: string]: unknown;
|
|
943
|
+
}
|
|
944
|
+
/**
|
|
945
|
+
* One (document, question) pair to answer
|
|
946
|
+
*/
|
|
947
|
+
interface DocumentQuestionAnsweringInputData {
|
|
948
|
+
/**
|
|
949
|
+
* The image on which the question is asked
|
|
950
|
+
*/
|
|
951
|
+
image: unknown;
|
|
952
|
+
/**
|
|
953
|
+
* A question to ask of the document
|
|
954
|
+
*/
|
|
955
|
+
question: string;
|
|
956
|
+
[property: string]: unknown;
|
|
957
|
+
}
|
|
958
|
+
/**
|
|
959
|
+
* Additional inference parameters
|
|
960
|
+
*
|
|
961
|
+
* Additional inference parameters for Document Question Answering
|
|
962
|
+
*/
|
|
963
|
+
interface DocumentQuestionAnsweringParameters {
|
|
964
|
+
/**
|
|
965
|
+
* If the words in the document are too long to fit with the question for the model, it will
|
|
966
|
+
* be split in several chunks with some overlap. This argument controls the size of that
|
|
967
|
+
* overlap.
|
|
968
|
+
*/
|
|
969
|
+
doc_stride?: number;
|
|
970
|
+
/**
|
|
971
|
+
* Whether to accept impossible as an answer
|
|
972
|
+
*/
|
|
973
|
+
handle_impossible_answer?: boolean;
|
|
974
|
+
/**
|
|
975
|
+
* Language to use while running OCR. Defaults to english.
|
|
976
|
+
*/
|
|
977
|
+
lang?: string;
|
|
978
|
+
/**
|
|
979
|
+
* The maximum length of predicted answers (e.g., only answers with a shorter length are
|
|
980
|
+
* considered).
|
|
981
|
+
*/
|
|
982
|
+
max_answer_len?: number;
|
|
983
|
+
/**
|
|
984
|
+
* The maximum length of the question after tokenization. It will be truncated if needed.
|
|
985
|
+
*/
|
|
986
|
+
max_question_len?: number;
|
|
987
|
+
/**
|
|
988
|
+
* The maximum length of the total sentence (context + question) in tokens of each chunk
|
|
989
|
+
* passed to the model. The context will be split in several chunks (using doc_stride as
|
|
990
|
+
* overlap) if needed.
|
|
991
|
+
*/
|
|
992
|
+
max_seq_len?: number;
|
|
993
|
+
/**
|
|
994
|
+
* The number of answers to return (will be chosen by order of likelihood). Can return less
|
|
995
|
+
* than top_k answers if there are not enough options available within the context.
|
|
996
|
+
*/
|
|
997
|
+
top_k?: number;
|
|
998
|
+
/**
|
|
999
|
+
* A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
|
|
1000
|
+
* skip the OCR step and use the provided bounding boxes instead.
|
|
1001
|
+
*/
|
|
1002
|
+
word_boxes?: WordBox[];
|
|
1003
|
+
[property: string]: unknown;
|
|
1004
|
+
}
|
|
1005
|
+
type WordBox = number[] | string;
|
|
1006
|
+
type DocumentQuestionAnsweringOutput = DocumentQuestionAnsweringOutputElement[];
|
|
1007
|
+
/**
|
|
1008
|
+
* Outputs of inference for the Document Question Answering task
|
|
1009
|
+
*/
|
|
1010
|
+
interface DocumentQuestionAnsweringOutputElement {
|
|
1011
|
+
/**
|
|
1012
|
+
* The answer to the question.
|
|
1013
|
+
*/
|
|
1014
|
+
answer: string;
|
|
1015
|
+
/**
|
|
1016
|
+
* The end word index of the answer (in the OCR’d version of the input or provided word
|
|
1017
|
+
* boxes).
|
|
1018
|
+
*/
|
|
1019
|
+
end: number;
|
|
1020
|
+
/**
|
|
1021
|
+
* The probability associated to the answer.
|
|
1022
|
+
*/
|
|
1023
|
+
score: number;
|
|
1024
|
+
/**
|
|
1025
|
+
* The start word index of the answer (in the OCR’d version of the input or provided word
|
|
1026
|
+
* boxes).
|
|
1027
|
+
*/
|
|
1028
|
+
start: number;
|
|
1029
|
+
/**
|
|
1030
|
+
* The index of each word/box pair that is in the answer
|
|
1031
|
+
*/
|
|
1032
|
+
words: number[];
|
|
1033
|
+
[property: string]: unknown;
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
/**
|
|
1037
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1038
|
+
*
|
|
1039
|
+
* Using src/scripts/inference-codegen
|
|
1040
|
+
*/
|
|
1041
|
+
type FeatureExtractionOutput = Array<number[]>;
|
|
1042
|
+
/**
|
|
1043
|
+
* Feature Extraction Input.
|
|
1044
|
+
*
|
|
1045
|
+
* Auto-generated from TEI specs.
|
|
1046
|
+
* For more details, check out
|
|
1047
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
|
|
1048
|
+
*/
|
|
1049
|
+
interface FeatureExtractionInput {
|
|
1050
|
+
/**
|
|
1051
|
+
* The text to embed.
|
|
1052
|
+
*/
|
|
1053
|
+
inputs: string;
|
|
1054
|
+
normalize?: boolean;
|
|
1055
|
+
/**
|
|
1056
|
+
* The name of the prompt that should be used by for encoding. If not set, no prompt
|
|
1057
|
+
* will be applied.
|
|
1058
|
+
*
|
|
1059
|
+
* Must be a key in the `Sentence Transformers` configuration `prompts` dictionary.
|
|
1060
|
+
*
|
|
1061
|
+
* For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",
|
|
1062
|
+
* ...},
|
|
1063
|
+
* then the sentence "What is the capital of France?" will be encoded as
|
|
1064
|
+
* "query: What is the capital of France?" because the prompt text will be prepended before
|
|
1065
|
+
* any text to encode.
|
|
1066
|
+
*/
|
|
1067
|
+
prompt_name?: string;
|
|
1068
|
+
truncate?: boolean;
|
|
1069
|
+
truncation_direction?: FeatureExtractionInputTruncationDirection;
|
|
1070
|
+
[property: string]: unknown;
|
|
1071
|
+
}
|
|
1072
|
+
type FeatureExtractionInputTruncationDirection = "Left" | "Right";
|
|
1073
|
+
|
|
1074
|
+
/**
|
|
1075
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1076
|
+
*
|
|
1077
|
+
* Using src/scripts/inference-codegen
|
|
1078
|
+
*/
|
|
1079
|
+
/**
|
|
1080
|
+
* Inputs for Fill Mask inference
|
|
1081
|
+
*/
|
|
1082
|
+
interface FillMaskInput {
|
|
1083
|
+
/**
|
|
1084
|
+
* The text with masked tokens
|
|
1085
|
+
*/
|
|
1086
|
+
inputs: string;
|
|
1087
|
+
/**
|
|
1088
|
+
* Additional inference parameters
|
|
1089
|
+
*/
|
|
1090
|
+
parameters?: FillMaskParameters;
|
|
1091
|
+
[property: string]: unknown;
|
|
1092
|
+
}
|
|
1093
|
+
/**
|
|
1094
|
+
* Additional inference parameters
|
|
1095
|
+
*
|
|
1096
|
+
* Additional inference parameters for Fill Mask
|
|
1097
|
+
*/
|
|
1098
|
+
interface FillMaskParameters {
|
|
1099
|
+
/**
|
|
1100
|
+
* When passed, the model will limit the scores to the passed targets instead of looking up
|
|
1101
|
+
* in the whole vocabulary. If the provided targets are not in the model vocab, they will be
|
|
1102
|
+
* tokenized and the first resulting token will be used (with a warning, and that might be
|
|
1103
|
+
* slower).
|
|
1104
|
+
*/
|
|
1105
|
+
targets?: string[];
|
|
1106
|
+
/**
|
|
1107
|
+
* When passed, overrides the number of predictions to return.
|
|
1108
|
+
*/
|
|
1109
|
+
top_k?: number;
|
|
1110
|
+
[property: string]: unknown;
|
|
1111
|
+
}
|
|
1112
|
+
type FillMaskOutput = FillMaskOutputElement[];
|
|
1113
|
+
/**
|
|
1114
|
+
* Outputs of inference for the Fill Mask task
|
|
1115
|
+
*/
|
|
1116
|
+
interface FillMaskOutputElement {
|
|
1117
|
+
/**
|
|
1118
|
+
* The corresponding probability
|
|
1119
|
+
*/
|
|
1120
|
+
score: number;
|
|
1121
|
+
/**
|
|
1122
|
+
* The corresponding input with the mask token prediction.
|
|
1123
|
+
*/
|
|
1124
|
+
sequence: string;
|
|
1125
|
+
/**
|
|
1126
|
+
* The predicted token id (to replace the masked one).
|
|
1127
|
+
*/
|
|
1128
|
+
token: number;
|
|
1129
|
+
tokenStr: unknown;
|
|
1130
|
+
/**
|
|
1131
|
+
* The predicted token (to replace the masked one).
|
|
1132
|
+
*/
|
|
1133
|
+
token_str?: string;
|
|
1134
|
+
[property: string]: unknown;
|
|
1135
|
+
}
|
|
1136
|
+
|
|
1137
|
+
/**
|
|
1138
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1139
|
+
*
|
|
1140
|
+
* Using src/scripts/inference-codegen
|
|
1141
|
+
*/
|
|
1142
|
+
/**
|
|
1143
|
+
* Inputs for Image Classification inference
|
|
1144
|
+
*/
|
|
1145
|
+
interface ImageClassificationInput {
|
|
1146
|
+
/**
|
|
1147
|
+
* The input image data as a base64-encoded string. If no `parameters` are provided, you can
|
|
1148
|
+
* also provide the image data as a raw bytes payload.
|
|
1149
|
+
*/
|
|
1150
|
+
inputs: string;
|
|
1151
|
+
/**
|
|
1152
|
+
* Additional inference parameters
|
|
1153
|
+
*/
|
|
1154
|
+
parameters?: ImageClassificationParameters;
|
|
1155
|
+
[property: string]: unknown;
|
|
1156
|
+
}
|
|
1157
|
+
/**
|
|
1158
|
+
* Additional inference parameters
|
|
1159
|
+
*
|
|
1160
|
+
* Additional inference parameters for Image Classification
|
|
1161
|
+
*/
|
|
1162
|
+
interface ImageClassificationParameters {
|
|
1163
|
+
function_to_apply?: ClassificationOutputTransform$2;
|
|
1164
|
+
/**
|
|
1165
|
+
* When specified, limits the output to the top K most probable classes.
|
|
1166
|
+
*/
|
|
1167
|
+
top_k?: number;
|
|
1168
|
+
[property: string]: unknown;
|
|
1169
|
+
}
|
|
1170
|
+
/**
|
|
1171
|
+
* The function to apply to the model outputs in order to retrieve the scores.
|
|
1172
|
+
*/
|
|
1173
|
+
type ClassificationOutputTransform$2 = "sigmoid" | "softmax" | "none";
|
|
1174
|
+
type ImageClassificationOutput = ImageClassificationOutputElement[];
|
|
1175
|
+
/**
|
|
1176
|
+
* Outputs of inference for the Image Classification task
|
|
1177
|
+
*/
|
|
1178
|
+
interface ImageClassificationOutputElement {
|
|
1179
|
+
/**
|
|
1180
|
+
* The predicted class label.
|
|
1181
|
+
*/
|
|
1182
|
+
label: string;
|
|
1183
|
+
/**
|
|
1184
|
+
* The corresponding probability.
|
|
1185
|
+
*/
|
|
1186
|
+
score: number;
|
|
1187
|
+
[property: string]: unknown;
|
|
1188
|
+
}
|
|
1189
|
+
|
|
1190
|
+
/**
|
|
1191
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1192
|
+
*
|
|
1193
|
+
* Using src/scripts/inference-codegen
|
|
1194
|
+
*/
|
|
1195
|
+
/**
|
|
1196
|
+
* Inputs for Image To Image inference
|
|
1197
|
+
*/
|
|
1198
|
+
interface ImageToImageInput {
|
|
1199
|
+
/**
|
|
1200
|
+
* The input image data as a base64-encoded string. If no `parameters` are provided, you can
|
|
1201
|
+
* also provide the image data as a raw bytes payload.
|
|
1202
|
+
*/
|
|
1203
|
+
inputs: string;
|
|
1204
|
+
/**
|
|
1205
|
+
* Additional inference parameters
|
|
1206
|
+
*/
|
|
1207
|
+
parameters?: ImageToImageParameters;
|
|
1208
|
+
[property: string]: unknown;
|
|
1209
|
+
}
|
|
1210
|
+
/**
|
|
1211
|
+
* Additional inference parameters
|
|
1212
|
+
*
|
|
1213
|
+
* Additional inference parameters for Image To Image
|
|
1214
|
+
*/
|
|
1215
|
+
interface ImageToImageParameters {
|
|
1216
|
+
/**
|
|
1217
|
+
* For diffusion models. A higher guidance scale value encourages the model to generate
|
|
1218
|
+
* images closely linked to the text prompt at the expense of lower image quality.
|
|
1219
|
+
*/
|
|
1220
|
+
guidance_scale?: number;
|
|
1221
|
+
/**
|
|
1222
|
+
* One or several prompt to guide what NOT to include in image generation.
|
|
1223
|
+
*/
|
|
1224
|
+
negative_prompt?: string[];
|
|
1225
|
+
/**
|
|
1226
|
+
* For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
|
1227
|
+
* a higher quality image at the expense of slower inference.
|
|
1228
|
+
*/
|
|
1229
|
+
num_inference_steps?: number;
|
|
1230
|
+
/**
|
|
1231
|
+
* The size in pixel of the output image.
|
|
1232
|
+
*/
|
|
1233
|
+
target_size?: TargetSize$1;
|
|
1234
|
+
[property: string]: unknown;
|
|
1235
|
+
}
|
|
1236
|
+
/**
|
|
1237
|
+
* The size in pixel of the output image.
|
|
1238
|
+
*/
|
|
1239
|
+
interface TargetSize$1 {
|
|
1240
|
+
height: number;
|
|
1241
|
+
width: number;
|
|
1242
|
+
[property: string]: unknown;
|
|
1243
|
+
}
|
|
1244
|
+
/**
|
|
1245
|
+
* Outputs of inference for the Image To Image task
|
|
1246
|
+
*/
|
|
1247
|
+
interface ImageToImageOutput {
|
|
1248
|
+
/**
|
|
1249
|
+
* The output image returned as raw bytes in the payload.
|
|
1250
|
+
*/
|
|
1251
|
+
image?: unknown;
|
|
1252
|
+
[property: string]: unknown;
|
|
1253
|
+
}
|
|
1254
|
+
|
|
1255
|
+
/**
|
|
1256
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1257
|
+
*
|
|
1258
|
+
* Using src/scripts/inference-codegen
|
|
1259
|
+
*/
|
|
1260
|
+
/**
|
|
1261
|
+
* Inputs for Image To Text inference
|
|
1262
|
+
*/
|
|
1263
|
+
interface ImageToTextInput {
|
|
1264
|
+
/**
|
|
1265
|
+
* The input image data
|
|
1266
|
+
*/
|
|
1267
|
+
inputs: unknown;
|
|
1268
|
+
/**
|
|
1269
|
+
* Additional inference parameters
|
|
1270
|
+
*/
|
|
1271
|
+
parameters?: ImageToTextParameters;
|
|
1272
|
+
[property: string]: unknown;
|
|
1273
|
+
}
|
|
1274
|
+
/**
|
|
1275
|
+
* Additional inference parameters
|
|
1276
|
+
*
|
|
1277
|
+
* Additional inference parameters for Image To Text
|
|
1278
|
+
*/
|
|
1279
|
+
interface ImageToTextParameters {
|
|
1280
|
+
/**
|
|
1281
|
+
* Parametrization of the text generation process
|
|
1282
|
+
*/
|
|
1283
|
+
generation_parameters?: GenerationParameters$1;
|
|
1284
|
+
/**
|
|
1285
|
+
* The amount of maximum tokens to generate.
|
|
1286
|
+
*/
|
|
1287
|
+
max_new_tokens?: number;
|
|
1288
|
+
[property: string]: unknown;
|
|
1289
|
+
}
|
|
1290
|
+
/**
|
|
1291
|
+
* Parametrization of the text generation process
|
|
1292
|
+
*
|
|
1293
|
+
* Ad-hoc parametrization of the text generation process
|
|
1294
|
+
*/
|
|
1295
|
+
interface GenerationParameters$1 {
|
|
1296
|
+
/**
|
|
1297
|
+
* Whether to use sampling instead of greedy decoding when generating new tokens.
|
|
1298
|
+
*/
|
|
1299
|
+
do_sample?: boolean;
|
|
1300
|
+
/**
|
|
1301
|
+
* Controls the stopping condition for beam-based methods.
|
|
1302
|
+
*/
|
|
1303
|
+
early_stopping?: EarlyStoppingUnion$1;
|
|
1304
|
+
/**
|
|
1305
|
+
* If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
1306
|
+
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
1307
|
+
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
1308
|
+
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
1309
|
+
*/
|
|
1310
|
+
epsilon_cutoff?: number;
|
|
1311
|
+
/**
|
|
1312
|
+
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
1313
|
+
* float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
1314
|
+
* eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
1315
|
+
* term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
1316
|
+
* the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
1317
|
+
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
1318
|
+
* for more details.
|
|
1319
|
+
*/
|
|
1320
|
+
eta_cutoff?: number;
|
|
1321
|
+
/**
|
|
1322
|
+
* The maximum length (in tokens) of the generated text, including the input.
|
|
1323
|
+
*/
|
|
1324
|
+
max_length?: number;
|
|
1325
|
+
/**
|
|
1326
|
+
* The maximum number of tokens to generate. Takes precedence over max_length.
|
|
1327
|
+
*/
|
|
1328
|
+
max_new_tokens?: number;
|
|
1329
|
+
/**
|
|
1330
|
+
* The minimum length (in tokens) of the generated text, including the input.
|
|
1331
|
+
*/
|
|
1332
|
+
min_length?: number;
|
|
1333
|
+
/**
|
|
1334
|
+
* The minimum number of tokens to generate. Takes precedence over min_length.
|
|
1335
|
+
*/
|
|
1336
|
+
min_new_tokens?: number;
|
|
1337
|
+
/**
|
|
1338
|
+
* Number of groups to divide num_beams into in order to ensure diversity among different
|
|
1339
|
+
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
1340
|
+
*/
|
|
1341
|
+
num_beam_groups?: number;
|
|
1342
|
+
/**
|
|
1343
|
+
* Number of beams to use for beam search.
|
|
1344
|
+
*/
|
|
1345
|
+
num_beams?: number;
|
|
1346
|
+
/**
|
|
1347
|
+
* The value balances the model confidence and the degeneration penalty in contrastive
|
|
1348
|
+
* search decoding.
|
|
1349
|
+
*/
|
|
1350
|
+
penalty_alpha?: number;
|
|
1351
|
+
/**
|
|
1352
|
+
* The value used to modulate the next token probabilities.
|
|
1353
|
+
*/
|
|
1354
|
+
temperature?: number;
|
|
1355
|
+
/**
|
|
1356
|
+
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
1357
|
+
*/
|
|
1358
|
+
top_k?: number;
|
|
1359
|
+
/**
|
|
1360
|
+
* If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
1361
|
+
* that add up to top_p or higher are kept for generation.
|
|
1362
|
+
*/
|
|
1363
|
+
top_p?: number;
|
|
1364
|
+
/**
|
|
1365
|
+
* Local typicality measures how similar the conditional probability of predicting a target
|
|
1366
|
+
* token next is to the expected conditional probability of predicting a random token next,
|
|
1367
|
+
* given the partial text already generated. If set to float < 1, the smallest set of the
|
|
1368
|
+
* most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
1369
|
+
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
1370
|
+
*/
|
|
1371
|
+
typical_p?: number;
|
|
1372
|
+
/**
|
|
1373
|
+
* Whether the model should use the past last key/values attentions to speed up decoding
|
|
1374
|
+
*/
|
|
1375
|
+
use_cache?: boolean;
|
|
1376
|
+
[property: string]: unknown;
|
|
1377
|
+
}
|
|
1378
|
+
/**
|
|
1379
|
+
* Controls the stopping condition for beam-based methods.
|
|
1380
|
+
*/
|
|
1381
|
+
type EarlyStoppingUnion$1 = boolean | "never";
|
|
1382
|
+
/**
|
|
1383
|
+
* Outputs of inference for the Image To Text task
|
|
1384
|
+
*/
|
|
1385
|
+
interface ImageToTextOutput {
|
|
1386
|
+
generatedText: unknown;
|
|
1387
|
+
/**
|
|
1388
|
+
* The generated text.
|
|
1389
|
+
*/
|
|
1390
|
+
generated_text?: string;
|
|
1391
|
+
[property: string]: unknown;
|
|
1392
|
+
}
|
|
1393
|
+
|
|
1394
|
+
/**
|
|
1395
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1396
|
+
*
|
|
1397
|
+
* Using src/scripts/inference-codegen
|
|
1398
|
+
*/
|
|
1399
|
+
/**
|
|
1400
|
+
* Inputs for Image Segmentation inference
|
|
1401
|
+
*/
|
|
1402
|
+
interface ImageSegmentationInput {
|
|
1403
|
+
/**
|
|
1404
|
+
* The input image data as a base64-encoded string. If no `parameters` are provided, you can
|
|
1405
|
+
* also provide the image data as a raw bytes payload.
|
|
1406
|
+
*/
|
|
1407
|
+
inputs: string;
|
|
1408
|
+
/**
|
|
1409
|
+
* Additional inference parameters
|
|
1410
|
+
*/
|
|
1411
|
+
parameters?: ImageSegmentationParameters;
|
|
1412
|
+
[property: string]: unknown;
|
|
1413
|
+
}
|
|
1414
|
+
/**
|
|
1415
|
+
* Additional inference parameters
|
|
1416
|
+
*
|
|
1417
|
+
* Additional inference parameters for Image Segmentation
|
|
1418
|
+
*/
|
|
1419
|
+
interface ImageSegmentationParameters {
|
|
1420
|
+
/**
|
|
1421
|
+
* Threshold to use when turning the predicted masks into binary values.
|
|
1422
|
+
*/
|
|
1423
|
+
mask_threshold?: number;
|
|
1424
|
+
/**
|
|
1425
|
+
* Mask overlap threshold to eliminate small, disconnected segments.
|
|
1426
|
+
*/
|
|
1427
|
+
overlap_mask_area_threshold?: number;
|
|
1428
|
+
/**
|
|
1429
|
+
* Segmentation task to be performed, depending on model capabilities.
|
|
1430
|
+
*/
|
|
1431
|
+
subtask?: ImageSegmentationSubtask;
|
|
1432
|
+
/**
|
|
1433
|
+
* Probability threshold to filter out predicted masks.
|
|
1434
|
+
*/
|
|
1435
|
+
threshold?: number;
|
|
1436
|
+
[property: string]: unknown;
|
|
1437
|
+
}
|
|
1438
|
+
/**
|
|
1439
|
+
* Segmentation task to be performed, depending on model capabilities.
|
|
1440
|
+
*/
|
|
1441
|
+
type ImageSegmentationSubtask = "instance" | "panoptic" | "semantic";
|
|
1442
|
+
type ImageSegmentationOutput = ImageSegmentationOutputElement[];
|
|
1443
|
+
/**
|
|
1444
|
+
* Outputs of inference for the Image Segmentation task
|
|
1445
|
+
*
|
|
1446
|
+
* A predicted mask / segment
|
|
1447
|
+
*/
|
|
1448
|
+
interface ImageSegmentationOutputElement {
|
|
1449
|
+
/**
|
|
1450
|
+
* The label of the predicted segment.
|
|
1451
|
+
*/
|
|
1452
|
+
label: string;
|
|
1453
|
+
/**
|
|
1454
|
+
* The corresponding mask as a black-and-white image (base64-encoded).
|
|
1455
|
+
*/
|
|
1456
|
+
mask: string;
|
|
1457
|
+
/**
|
|
1458
|
+
* The score or confidence degree the model has.
|
|
1459
|
+
*/
|
|
1460
|
+
score?: number;
|
|
1461
|
+
[property: string]: unknown;
|
|
1462
|
+
}
|
|
1463
|
+
|
|
1464
|
+
/**
|
|
1465
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1466
|
+
*
|
|
1467
|
+
* Using src/scripts/inference-codegen
|
|
1468
|
+
*/
|
|
1469
|
+
/**
|
|
1470
|
+
* Inputs for Object Detection inference
|
|
1471
|
+
*/
|
|
1472
|
+
interface ObjectDetectionInput {
|
|
1473
|
+
/**
|
|
1474
|
+
* The input image data as a base64-encoded string. If no `parameters` are provided, you can
|
|
1475
|
+
* also provide the image data as a raw bytes payload.
|
|
1476
|
+
*/
|
|
1477
|
+
inputs: string;
|
|
1478
|
+
/**
|
|
1479
|
+
* Additional inference parameters
|
|
1480
|
+
*/
|
|
1481
|
+
parameters?: ObjectDetectionParameters;
|
|
1482
|
+
[property: string]: unknown;
|
|
1483
|
+
}
|
|
1484
|
+
/**
|
|
1485
|
+
* Additional inference parameters
|
|
1486
|
+
*
|
|
1487
|
+
* Additional inference parameters for Object Detection
|
|
1488
|
+
*/
|
|
1489
|
+
interface ObjectDetectionParameters {
|
|
1490
|
+
/**
|
|
1491
|
+
* The probability necessary to make a prediction.
|
|
1492
|
+
*/
|
|
1493
|
+
threshold?: number;
|
|
1494
|
+
[property: string]: unknown;
|
|
1495
|
+
}
|
|
1496
|
+
/**
|
|
1497
|
+
* The predicted bounding box. Coordinates are relative to the top left corner of the input
|
|
1498
|
+
* image.
|
|
1499
|
+
*/
|
|
1500
|
+
interface BoundingBox$1 {
|
|
1501
|
+
/**
|
|
1502
|
+
* The x-coordinate of the bottom-right corner of the bounding box.
|
|
1503
|
+
*/
|
|
1504
|
+
xmax: number;
|
|
1505
|
+
/**
|
|
1506
|
+
* The x-coordinate of the top-left corner of the bounding box.
|
|
1507
|
+
*/
|
|
1508
|
+
xmin: number;
|
|
1509
|
+
/**
|
|
1510
|
+
* The y-coordinate of the bottom-right corner of the bounding box.
|
|
1511
|
+
*/
|
|
1512
|
+
ymax: number;
|
|
1513
|
+
/**
|
|
1514
|
+
* The y-coordinate of the top-left corner of the bounding box.
|
|
1515
|
+
*/
|
|
1516
|
+
ymin: number;
|
|
1517
|
+
[property: string]: unknown;
|
|
1518
|
+
}
|
|
1519
|
+
type ObjectDetectionOutput = ObjectDetectionOutputElement[];
|
|
1520
|
+
/**
|
|
1521
|
+
* Outputs of inference for the Object Detection task
|
|
1522
|
+
*/
|
|
1523
|
+
interface ObjectDetectionOutputElement {
|
|
1524
|
+
/**
|
|
1525
|
+
* The predicted bounding box. Coordinates are relative to the top left corner of the input
|
|
1526
|
+
* image.
|
|
1527
|
+
*/
|
|
1528
|
+
box: BoundingBox$1;
|
|
1529
|
+
/**
|
|
1530
|
+
* The predicted label for the bounding box.
|
|
1531
|
+
*/
|
|
1532
|
+
label: string;
|
|
1533
|
+
/**
|
|
1534
|
+
* The associated score / probability.
|
|
1535
|
+
*/
|
|
1536
|
+
score: number;
|
|
1537
|
+
[property: string]: unknown;
|
|
1538
|
+
}
|
|
1539
|
+
|
|
1540
|
+
/**
|
|
1541
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1542
|
+
*
|
|
1543
|
+
* Using src/scripts/inference-codegen
|
|
1544
|
+
*/
|
|
1545
|
+
/**
|
|
1546
|
+
* Inputs for Depth Estimation inference
|
|
1547
|
+
*/
|
|
1548
|
+
interface DepthEstimationInput {
|
|
1549
|
+
/**
|
|
1550
|
+
* The input image data
|
|
1551
|
+
*/
|
|
1552
|
+
inputs: unknown;
|
|
1553
|
+
/**
|
|
1554
|
+
* Additional inference parameters
|
|
1555
|
+
*/
|
|
1556
|
+
parameters?: {
|
|
1557
|
+
[key: string]: unknown;
|
|
1558
|
+
};
|
|
1559
|
+
[property: string]: unknown;
|
|
1560
|
+
}
|
|
1561
|
+
/**
|
|
1562
|
+
* Outputs of inference for the Depth Estimation task
|
|
1563
|
+
*/
|
|
1564
|
+
interface DepthEstimationOutput {
|
|
1565
|
+
/**
|
|
1566
|
+
* The predicted depth as an image
|
|
1567
|
+
*/
|
|
1568
|
+
depth?: unknown;
|
|
1569
|
+
/**
|
|
1570
|
+
* The predicted depth as a tensor
|
|
1571
|
+
*/
|
|
1572
|
+
predicted_depth?: unknown;
|
|
1573
|
+
[property: string]: unknown;
|
|
1574
|
+
}
|
|
1575
|
+
|
|
1576
|
+
/**
|
|
1577
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1578
|
+
*
|
|
1579
|
+
* Using src/scripts/inference-codegen
|
|
1580
|
+
*/
|
|
1581
|
+
/**
|
|
1582
|
+
* Inputs for Question Answering inference
|
|
1583
|
+
*/
|
|
1584
|
+
interface QuestionAnsweringInput {
|
|
1585
|
+
/**
|
|
1586
|
+
* One (context, question) pair to answer
|
|
1587
|
+
*/
|
|
1588
|
+
inputs: QuestionAnsweringInputData;
|
|
1589
|
+
/**
|
|
1590
|
+
* Additional inference parameters
|
|
1591
|
+
*/
|
|
1592
|
+
parameters?: QuestionAnsweringParameters;
|
|
1593
|
+
[property: string]: unknown;
|
|
1594
|
+
}
|
|
1595
|
+
/**
|
|
1596
|
+
* One (context, question) pair to answer
|
|
1597
|
+
*/
|
|
1598
|
+
interface QuestionAnsweringInputData {
|
|
1599
|
+
/**
|
|
1600
|
+
* The context to be used for answering the question
|
|
1601
|
+
*/
|
|
1602
|
+
context: string;
|
|
1603
|
+
/**
|
|
1604
|
+
* The question to be answered
|
|
1605
|
+
*/
|
|
1606
|
+
question: string;
|
|
1607
|
+
[property: string]: unknown;
|
|
1608
|
+
}
|
|
1609
|
+
/**
|
|
1610
|
+
* Additional inference parameters
|
|
1611
|
+
*
|
|
1612
|
+
* Additional inference parameters for Question Answering
|
|
1613
|
+
*/
|
|
1614
|
+
interface QuestionAnsweringParameters {
|
|
1615
|
+
/**
|
|
1616
|
+
* Attempts to align the answer to real words. Improves quality on space separated
|
|
1617
|
+
* languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
|
|
1618
|
+
*/
|
|
1619
|
+
align_to_words?: boolean;
|
|
1620
|
+
/**
|
|
1621
|
+
* If the context is too long to fit with the question for the model, it will be split in
|
|
1622
|
+
* several chunks with some overlap. This argument controls the size of that overlap.
|
|
1623
|
+
*/
|
|
1624
|
+
doc_stride?: number;
|
|
1625
|
+
/**
|
|
1626
|
+
* Whether to accept impossible as an answer.
|
|
1627
|
+
*/
|
|
1628
|
+
handle_impossible_answer?: boolean;
|
|
1629
|
+
/**
|
|
1630
|
+
* The maximum length of predicted answers (e.g., only answers with a shorter length are
|
|
1631
|
+
* considered).
|
|
1632
|
+
*/
|
|
1633
|
+
max_answer_len?: number;
|
|
1634
|
+
/**
|
|
1635
|
+
* The maximum length of the question after tokenization. It will be truncated if needed.
|
|
1636
|
+
*/
|
|
1637
|
+
max_question_len?: number;
|
|
1638
|
+
/**
|
|
1639
|
+
* The maximum length of the total sentence (context + question) in tokens of each chunk
|
|
1640
|
+
* passed to the model. The context will be split in several chunks (using docStride as
|
|
1641
|
+
* overlap) if needed.
|
|
1642
|
+
*/
|
|
1643
|
+
max_seq_len?: number;
|
|
1644
|
+
/**
|
|
1645
|
+
* The number of answers to return (will be chosen by order of likelihood). Note that we
|
|
1646
|
+
* return less than topk answers if there are not enough options available within the
|
|
1647
|
+
* context.
|
|
1648
|
+
*/
|
|
1649
|
+
top_k?: number;
|
|
1650
|
+
[property: string]: unknown;
|
|
1651
|
+
}
|
|
1652
|
+
type QuestionAnsweringOutput = QuestionAnsweringOutputElement[];
|
|
1653
|
+
/**
|
|
1654
|
+
* Outputs of inference for the Question Answering task
|
|
1655
|
+
*/
|
|
1656
|
+
interface QuestionAnsweringOutputElement {
|
|
1657
|
+
/**
|
|
1658
|
+
* The answer to the question.
|
|
1659
|
+
*/
|
|
1660
|
+
answer: string;
|
|
1661
|
+
/**
|
|
1662
|
+
* The character position in the input where the answer ends.
|
|
1663
|
+
*/
|
|
1664
|
+
end: number;
|
|
1665
|
+
/**
|
|
1666
|
+
* The probability associated to the answer.
|
|
1667
|
+
*/
|
|
1668
|
+
score: number;
|
|
1669
|
+
/**
|
|
1670
|
+
* The character position in the input where the answer begins.
|
|
1671
|
+
*/
|
|
1672
|
+
start: number;
|
|
1673
|
+
[property: string]: unknown;
|
|
1674
|
+
}
|
|
1675
|
+
|
|
1676
|
+
/**
|
|
1677
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1678
|
+
*
|
|
1679
|
+
* Using src/scripts/inference-codegen
|
|
1680
|
+
*/
|
|
1681
|
+
type SentenceSimilarityOutput = number[];
|
|
1682
|
+
/**
|
|
1683
|
+
* Inputs for Sentence similarity inference
|
|
1684
|
+
*/
|
|
1685
|
+
interface SentenceSimilarityInput {
|
|
1686
|
+
inputs: SentenceSimilarityInputData;
|
|
1687
|
+
/**
|
|
1688
|
+
* Additional inference parameters
|
|
1689
|
+
*/
|
|
1690
|
+
parameters?: {
|
|
1691
|
+
[key: string]: unknown;
|
|
1692
|
+
};
|
|
1693
|
+
[property: string]: unknown;
|
|
1694
|
+
}
|
|
1695
|
+
interface SentenceSimilarityInputData {
|
|
1696
|
+
/**
|
|
1697
|
+
* A list of strings which will be compared against the source_sentence.
|
|
1698
|
+
*/
|
|
1699
|
+
sentences: string[];
|
|
1700
|
+
/**
|
|
1701
|
+
* The string that you wish to compare the other strings with. This can be a phrase,
|
|
1702
|
+
* sentence, or longer passage, depending on the model being used.
|
|
1703
|
+
*/
|
|
1704
|
+
sourceSentence: string;
|
|
1705
|
+
[property: string]: unknown;
|
|
1706
|
+
}
|
|
1707
|
+
|
|
1708
|
+
/**
|
|
1709
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1710
|
+
*
|
|
1711
|
+
* Using src/scripts/inference-codegen
|
|
1712
|
+
*/
|
|
1713
|
+
/**
|
|
1714
|
+
* Inputs for Summarization inference
|
|
1715
|
+
*/
|
|
1716
|
+
interface SummarizationInput {
|
|
1717
|
+
/**
|
|
1718
|
+
* The input text to summarize.
|
|
1719
|
+
*/
|
|
1720
|
+
inputs: string;
|
|
1721
|
+
/**
|
|
1722
|
+
* Additional inference parameters.
|
|
1723
|
+
*/
|
|
1724
|
+
parameters?: SummarizationParameters;
|
|
1725
|
+
[property: string]: unknown;
|
|
1726
|
+
}
|
|
1727
|
+
/**
|
|
1728
|
+
* Additional inference parameters.
|
|
1729
|
+
*
|
|
1730
|
+
* Additional inference parameters for summarization.
|
|
1731
|
+
*/
|
|
1732
|
+
interface SummarizationParameters {
|
|
1733
|
+
/**
|
|
1734
|
+
* Whether to clean up the potential extra spaces in the text output.
|
|
1735
|
+
*/
|
|
1736
|
+
clean_up_tokenization_spaces?: boolean;
|
|
1737
|
+
/**
|
|
1738
|
+
* Additional parametrization of the text generation algorithm.
|
|
1739
|
+
*/
|
|
1740
|
+
generate_parameters?: {
|
|
1741
|
+
[key: string]: unknown;
|
|
1742
|
+
};
|
|
1743
|
+
/**
|
|
1744
|
+
* The truncation strategy to use.
|
|
1745
|
+
*/
|
|
1746
|
+
truncation?: SummarizationTruncationStrategy;
|
|
1747
|
+
[property: string]: unknown;
|
|
1748
|
+
}
|
|
1749
|
+
/**
|
|
1750
|
+
* The truncation strategy to use.
|
|
1751
|
+
*/
|
|
1752
|
+
type SummarizationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second";
|
|
1753
|
+
/**
|
|
1754
|
+
* Outputs of inference for the Summarization task
|
|
1755
|
+
*/
|
|
1756
|
+
interface SummarizationOutput {
|
|
1757
|
+
/**
|
|
1758
|
+
* The summarized text.
|
|
1759
|
+
*/
|
|
1760
|
+
summary_text: string;
|
|
1761
|
+
[property: string]: unknown;
|
|
1762
|
+
}
|
|
1763
|
+
|
|
1764
|
+
/**
|
|
1765
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1766
|
+
*
|
|
1767
|
+
* Using src/scripts/inference-codegen
|
|
1768
|
+
*/
|
|
1769
|
+
/**
|
|
1770
|
+
* Inputs for Table Question Answering inference
|
|
1771
|
+
*/
|
|
1772
|
+
interface TableQuestionAnsweringInput {
|
|
1773
|
+
/**
|
|
1774
|
+
* One (table, question) pair to answer
|
|
1775
|
+
*/
|
|
1776
|
+
inputs: TableQuestionAnsweringInputData;
|
|
1777
|
+
/**
|
|
1778
|
+
* Additional inference parameters
|
|
1779
|
+
*/
|
|
1780
|
+
parameters?: {
|
|
1781
|
+
[key: string]: unknown;
|
|
1782
|
+
};
|
|
1783
|
+
[property: string]: unknown;
|
|
1784
|
+
}
|
|
1785
|
+
/**
|
|
1786
|
+
* One (table, question) pair to answer
|
|
1787
|
+
*/
|
|
1788
|
+
interface TableQuestionAnsweringInputData {
|
|
1789
|
+
/**
|
|
1790
|
+
* The question to be answered about the table
|
|
1791
|
+
*/
|
|
1792
|
+
question: string;
|
|
1793
|
+
/**
|
|
1794
|
+
* The table to serve as context for the questions
|
|
1795
|
+
*/
|
|
1796
|
+
table: {
|
|
1797
|
+
[key: string]: string[];
|
|
1798
|
+
};
|
|
1799
|
+
[property: string]: unknown;
|
|
1800
|
+
}
|
|
1801
|
+
type TableQuestionAnsweringOutput = TableQuestionAnsweringOutputElement[];
|
|
1802
|
+
/**
|
|
1803
|
+
* Outputs of inference for the Table Question Answering task
|
|
1804
|
+
*/
|
|
1805
|
+
interface TableQuestionAnsweringOutputElement {
|
|
1806
|
+
/**
|
|
1807
|
+
* If the model has an aggregator, this returns the aggregator.
|
|
1808
|
+
*/
|
|
1809
|
+
aggregator?: string;
|
|
1810
|
+
/**
|
|
1811
|
+
* The answer of the question given the table. If there is an aggregator, the answer will be
|
|
1812
|
+
* preceded by `AGGREGATOR >`.
|
|
1813
|
+
*/
|
|
1814
|
+
answer: string;
|
|
1815
|
+
/**
|
|
1816
|
+
* List of strings made up of the answer cell values.
|
|
1817
|
+
*/
|
|
1818
|
+
cells: string[];
|
|
1819
|
+
/**
|
|
1820
|
+
* Coordinates of the cells of the answers.
|
|
1821
|
+
*/
|
|
1822
|
+
coordinates: Array<number[]>;
|
|
1823
|
+
[property: string]: unknown;
|
|
1824
|
+
}
|
|
1825
|
+
|
|
1826
|
+
/**
|
|
1827
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1828
|
+
*
|
|
1829
|
+
* Using src/scripts/inference-codegen
|
|
1830
|
+
*/
|
|
1831
|
+
/**
|
|
1832
|
+
* Inputs for Text To Image inference
|
|
1833
|
+
*/
|
|
1834
|
+
interface TextToImageInput {
|
|
1835
|
+
/**
|
|
1836
|
+
* The input text data (sometimes called "prompt")
|
|
1837
|
+
*/
|
|
1838
|
+
inputs: string;
|
|
1839
|
+
/**
|
|
1840
|
+
* Additional inference parameters
|
|
1841
|
+
*/
|
|
1842
|
+
parameters?: TextToImageParameters;
|
|
1843
|
+
[property: string]: unknown;
|
|
1844
|
+
}
|
|
1845
|
+
/**
|
|
1846
|
+
* Additional inference parameters
|
|
1847
|
+
*
|
|
1848
|
+
* Additional inference parameters for Text To Image
|
|
1849
|
+
*/
|
|
1850
|
+
interface TextToImageParameters {
|
|
1851
|
+
/**
|
|
1852
|
+
* A higher guidance scale value encourages the model to generate images closely linked to
|
|
1853
|
+
* the text prompt, but values too high may cause saturation and other artifacts.
|
|
1854
|
+
*/
|
|
1855
|
+
guidance_scale?: number;
|
|
1856
|
+
/**
|
|
1857
|
+
* One or several prompt to guide what NOT to include in image generation.
|
|
1858
|
+
*/
|
|
1859
|
+
negative_prompt?: string[];
|
|
1860
|
+
/**
|
|
1861
|
+
* The number of denoising steps. More denoising steps usually lead to a higher quality
|
|
1862
|
+
* image at the expense of slower inference.
|
|
1863
|
+
*/
|
|
1864
|
+
num_inference_steps?: number;
|
|
1865
|
+
/**
|
|
1866
|
+
* Override the scheduler with a compatible one.
|
|
1867
|
+
*/
|
|
1868
|
+
scheduler?: string;
|
|
1869
|
+
/**
|
|
1870
|
+
* Seed for the random number generator.
|
|
1871
|
+
*/
|
|
1872
|
+
seed?: number;
|
|
1873
|
+
/**
|
|
1874
|
+
* The size in pixel of the output image
|
|
1875
|
+
*/
|
|
1876
|
+
target_size?: TargetSize;
|
|
1877
|
+
[property: string]: unknown;
|
|
1878
|
+
}
|
|
1879
|
+
/**
|
|
1880
|
+
* The size in pixel of the output image
|
|
1881
|
+
*/
|
|
1882
|
+
interface TargetSize {
|
|
1883
|
+
height: number;
|
|
1884
|
+
width: number;
|
|
1885
|
+
[property: string]: unknown;
|
|
1886
|
+
}
|
|
1887
|
+
/**
|
|
1888
|
+
* Outputs of inference for the Text To Image task
|
|
1889
|
+
*/
|
|
1890
|
+
interface TextToImageOutput {
|
|
1891
|
+
/**
|
|
1892
|
+
* The generated image returned as raw bytes in the payload.
|
|
1893
|
+
*/
|
|
1894
|
+
image: unknown;
|
|
1895
|
+
[property: string]: unknown;
|
|
1896
|
+
}
|
|
1897
|
+
|
|
1898
|
+
/**
|
|
1899
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
1900
|
+
*
|
|
1901
|
+
* Using src/scripts/inference-codegen
|
|
1902
|
+
*/
|
|
1903
|
+
/**
|
|
1904
|
+
* Inputs for Text To Speech inference
|
|
1905
|
+
*/
|
|
1906
|
+
interface TextToSpeechInput {
|
|
1907
|
+
/**
|
|
1908
|
+
* The input text data
|
|
1909
|
+
*/
|
|
1910
|
+
inputs: string;
|
|
1911
|
+
/**
|
|
1912
|
+
* Additional inference parameters
|
|
1913
|
+
*/
|
|
1914
|
+
parameters?: TextToSpeechParameters;
|
|
1915
|
+
[property: string]: unknown;
|
|
1916
|
+
}
|
|
1917
|
+
/**
|
|
1918
|
+
* Additional inference parameters
|
|
1919
|
+
*
|
|
1920
|
+
* Additional inference parameters for Text To Speech
|
|
1921
|
+
*/
|
|
1922
|
+
interface TextToSpeechParameters {
|
|
1923
|
+
/**
|
|
1924
|
+
* Parametrization of the text generation process
|
|
1925
|
+
*/
|
|
1926
|
+
generation_parameters?: GenerationParameters;
|
|
1927
|
+
[property: string]: unknown;
|
|
1928
|
+
}
|
|
1929
|
+
/**
|
|
1930
|
+
* Parametrization of the text generation process
|
|
1931
|
+
*
|
|
1932
|
+
* Ad-hoc parametrization of the text generation process
|
|
1933
|
+
*/
|
|
1934
|
+
interface GenerationParameters {
|
|
1935
|
+
/**
|
|
1936
|
+
* Whether to use sampling instead of greedy decoding when generating new tokens.
|
|
1937
|
+
*/
|
|
1938
|
+
do_sample?: boolean;
|
|
1939
|
+
/**
|
|
1940
|
+
* Controls the stopping condition for beam-based methods.
|
|
1941
|
+
*/
|
|
1942
|
+
early_stopping?: EarlyStoppingUnion;
|
|
1943
|
+
/**
|
|
1944
|
+
* If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
1945
|
+
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
1946
|
+
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
1947
|
+
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
1948
|
+
*/
|
|
1949
|
+
epsilon_cutoff?: number;
|
|
1950
|
+
/**
|
|
1951
|
+
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
1952
|
+
* float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
1953
|
+
* eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
1954
|
+
* term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
1955
|
+
* the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
1956
|
+
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
1957
|
+
* for more details.
|
|
1958
|
+
*/
|
|
1959
|
+
eta_cutoff?: number;
|
|
1960
|
+
/**
|
|
1961
|
+
* The maximum length (in tokens) of the generated text, including the input.
|
|
1962
|
+
*/
|
|
1963
|
+
max_length?: number;
|
|
1964
|
+
/**
|
|
1965
|
+
* The maximum number of tokens to generate. Takes precedence over max_length.
|
|
1966
|
+
*/
|
|
1967
|
+
max_new_tokens?: number;
|
|
1968
|
+
/**
|
|
1969
|
+
* The minimum length (in tokens) of the generated text, including the input.
|
|
1970
|
+
*/
|
|
1971
|
+
min_length?: number;
|
|
1972
|
+
/**
|
|
1973
|
+
* The minimum number of tokens to generate. Takes precedence over min_length.
|
|
1974
|
+
*/
|
|
1975
|
+
min_new_tokens?: number;
|
|
1976
|
+
/**
|
|
1977
|
+
* Number of groups to divide num_beams into in order to ensure diversity among different
|
|
1978
|
+
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
1979
|
+
*/
|
|
1980
|
+
num_beam_groups?: number;
|
|
1981
|
+
/**
|
|
1982
|
+
* Number of beams to use for beam search.
|
|
1983
|
+
*/
|
|
1984
|
+
num_beams?: number;
|
|
1985
|
+
/**
|
|
1986
|
+
* The value balances the model confidence and the degeneration penalty in contrastive
|
|
1987
|
+
* search decoding.
|
|
1988
|
+
*/
|
|
1989
|
+
penalty_alpha?: number;
|
|
1990
|
+
/**
|
|
1991
|
+
* The value used to modulate the next token probabilities.
|
|
1992
|
+
*/
|
|
1993
|
+
temperature?: number;
|
|
1994
|
+
/**
|
|
1995
|
+
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
1996
|
+
*/
|
|
1997
|
+
top_k?: number;
|
|
1998
|
+
/**
|
|
1999
|
+
* If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
2000
|
+
* that add up to top_p or higher are kept for generation.
|
|
2001
|
+
*/
|
|
2002
|
+
top_p?: number;
|
|
2003
|
+
/**
|
|
2004
|
+
* Local typicality measures how similar the conditional probability of predicting a target
|
|
2005
|
+
* token next is to the expected conditional probability of predicting a random token next,
|
|
2006
|
+
* given the partial text already generated. If set to float < 1, the smallest set of the
|
|
2007
|
+
* most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
2008
|
+
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
2009
|
+
*/
|
|
2010
|
+
typical_p?: number;
|
|
2011
|
+
/**
|
|
2012
|
+
* Whether the model should use the past last key/values attentions to speed up decoding
|
|
2013
|
+
*/
|
|
2014
|
+
use_cache?: boolean;
|
|
2015
|
+
[property: string]: unknown;
|
|
2016
|
+
}
|
|
2017
|
+
/**
|
|
2018
|
+
* Controls the stopping condition for beam-based methods.
|
|
2019
|
+
*/
|
|
2020
|
+
type EarlyStoppingUnion = boolean | "never";
|
|
2021
|
+
/**
|
|
2022
|
+
* Outputs for Text to Speech inference
|
|
2023
|
+
*
|
|
2024
|
+
* Outputs of inference for the Text To Audio task
|
|
2025
|
+
*/
|
|
2026
|
+
interface TextToSpeechOutput {
|
|
2027
|
+
/**
|
|
2028
|
+
* The generated audio waveform.
|
|
2029
|
+
*/
|
|
2030
|
+
audio: unknown;
|
|
2031
|
+
samplingRate: unknown;
|
|
2032
|
+
/**
|
|
2033
|
+
* The sampling rate of the generated audio waveform.
|
|
2034
|
+
*/
|
|
2035
|
+
sampling_rate?: number;
|
|
2036
|
+
[property: string]: unknown;
|
|
2037
|
+
}
|
|
2038
|
+
|
|
2039
|
+
/**
|
|
2040
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2041
|
+
*
|
|
2042
|
+
* Using src/scripts/inference-codegen
|
|
2043
|
+
*/
|
|
2044
|
+
/**
|
|
2045
|
+
* Inputs for Token Classification inference
|
|
2046
|
+
*/
|
|
2047
|
+
interface TokenClassificationInput {
|
|
2048
|
+
/**
|
|
2049
|
+
* The input text data
|
|
2050
|
+
*/
|
|
2051
|
+
inputs: string;
|
|
2052
|
+
/**
|
|
2053
|
+
* Additional inference parameters
|
|
2054
|
+
*/
|
|
2055
|
+
parameters?: TokenClassificationParameters;
|
|
2056
|
+
[property: string]: unknown;
|
|
2057
|
+
}
|
|
2058
|
+
/**
|
|
2059
|
+
* Additional inference parameters
|
|
2060
|
+
*
|
|
2061
|
+
* Additional inference parameters for Token Classification
|
|
2062
|
+
*/
|
|
2063
|
+
interface TokenClassificationParameters {
|
|
2064
|
+
/**
|
|
2065
|
+
* The strategy used to fuse tokens based on model predictions
|
|
2066
|
+
*/
|
|
2067
|
+
aggregation_strategy?: TokenClassificationAggregationStrategy;
|
|
2068
|
+
/**
|
|
2069
|
+
* A list of labels to ignore
|
|
2070
|
+
*/
|
|
2071
|
+
ignore_labels?: string[];
|
|
2072
|
+
/**
|
|
2073
|
+
* The number of overlapping tokens between chunks when splitting the input text.
|
|
2074
|
+
*/
|
|
2075
|
+
stride?: number;
|
|
2076
|
+
[property: string]: unknown;
|
|
2077
|
+
}
|
|
2078
|
+
/**
|
|
2079
|
+
* Do not aggregate tokens
|
|
2080
|
+
*
|
|
2081
|
+
* Group consecutive tokens with the same label in a single entity.
|
|
2082
|
+
*
|
|
2083
|
+
* Similar to "simple", also preserves word integrity (use the label predicted for the first
|
|
2084
|
+
* token in a word).
|
|
2085
|
+
*
|
|
2086
|
+
* Similar to "simple", also preserves word integrity (uses the label with the highest
|
|
2087
|
+
* score, averaged across the word's tokens).
|
|
2088
|
+
*
|
|
2089
|
+
* Similar to "simple", also preserves word integrity (uses the label with the highest score
|
|
2090
|
+
* across the word's tokens).
|
|
2091
|
+
*/
|
|
2092
|
+
type TokenClassificationAggregationStrategy = "none" | "simple" | "first" | "average" | "max";
|
|
2093
|
+
type TokenClassificationOutput = TokenClassificationOutputElement[];
|
|
2094
|
+
/**
|
|
2095
|
+
* Outputs of inference for the Token Classification task
|
|
2096
|
+
*/
|
|
2097
|
+
interface TokenClassificationOutputElement {
|
|
2098
|
+
/**
|
|
2099
|
+
* The character position in the input where this group ends.
|
|
2100
|
+
*/
|
|
2101
|
+
end: number;
|
|
2102
|
+
/**
|
|
2103
|
+
* The predicted label for a single token
|
|
2104
|
+
*/
|
|
2105
|
+
entity?: string;
|
|
2106
|
+
/**
|
|
2107
|
+
* The predicted label for a group of one or more tokens
|
|
2108
|
+
*/
|
|
2109
|
+
entity_group?: string;
|
|
2110
|
+
/**
|
|
2111
|
+
* The associated score / probability
|
|
2112
|
+
*/
|
|
2113
|
+
score: number;
|
|
2114
|
+
/**
|
|
2115
|
+
* The character position in the input where this group begins.
|
|
2116
|
+
*/
|
|
2117
|
+
start: number;
|
|
2118
|
+
/**
|
|
2119
|
+
* The corresponding text
|
|
2120
|
+
*/
|
|
2121
|
+
word: string;
|
|
2122
|
+
[property: string]: unknown;
|
|
2123
|
+
}
|
|
2124
|
+
|
|
2125
|
+
/**
|
|
2126
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2127
|
+
*
|
|
2128
|
+
* Using src/scripts/inference-codegen
|
|
2129
|
+
*/
|
|
2130
|
+
/**
|
|
2131
|
+
* Inputs for Translation inference
|
|
2132
|
+
*/
|
|
2133
|
+
interface TranslationInput {
|
|
2134
|
+
/**
|
|
2135
|
+
* The text to translate.
|
|
2136
|
+
*/
|
|
2137
|
+
inputs: string;
|
|
2138
|
+
/**
|
|
2139
|
+
* Additional inference parameters
|
|
2140
|
+
*/
|
|
2141
|
+
parameters?: TranslationParameters;
|
|
2142
|
+
[property: string]: unknown;
|
|
2143
|
+
}
|
|
2144
|
+
/**
|
|
2145
|
+
* Additional inference parameters
|
|
2146
|
+
*
|
|
2147
|
+
* Additional inference parameters for Translation
|
|
2148
|
+
*/
|
|
2149
|
+
interface TranslationParameters {
|
|
2150
|
+
/**
|
|
2151
|
+
* Whether to clean up the potential extra spaces in the text output.
|
|
2152
|
+
*/
|
|
2153
|
+
clean_up_tokenization_spaces?: boolean;
|
|
2154
|
+
/**
|
|
2155
|
+
* Additional parametrization of the text generation algorithm.
|
|
2156
|
+
*/
|
|
2157
|
+
generate_parameters?: {
|
|
2158
|
+
[key: string]: unknown;
|
|
2159
|
+
};
|
|
2160
|
+
/**
|
|
2161
|
+
* The source language of the text. Required for models that can translate from multiple
|
|
2162
|
+
* languages.
|
|
2163
|
+
*/
|
|
2164
|
+
src_lang?: string;
|
|
2165
|
+
/**
|
|
2166
|
+
* Target language to translate to. Required for models that can translate to multiple
|
|
2167
|
+
* languages.
|
|
2168
|
+
*/
|
|
2169
|
+
tgt_lang?: string;
|
|
2170
|
+
/**
|
|
2171
|
+
* The truncation strategy to use.
|
|
2172
|
+
*/
|
|
2173
|
+
truncation?: TranslationTruncationStrategy;
|
|
2174
|
+
[property: string]: unknown;
|
|
2175
|
+
}
|
|
2176
|
+
/**
|
|
2177
|
+
* The truncation strategy to use.
|
|
2178
|
+
*/
|
|
2179
|
+
type TranslationTruncationStrategy = "do_not_truncate" | "longest_first" | "only_first" | "only_second";
|
|
2180
|
+
/**
|
|
2181
|
+
* Outputs of inference for the Translation task
|
|
2182
|
+
*/
|
|
2183
|
+
interface TranslationOutput {
|
|
2184
|
+
/**
|
|
2185
|
+
* The translated text.
|
|
2186
|
+
*/
|
|
2187
|
+
translation_text: string;
|
|
2188
|
+
[property: string]: unknown;
|
|
2189
|
+
}
|
|
2190
|
+
|
|
2191
|
+
/**
|
|
2192
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2193
|
+
*
|
|
2194
|
+
* Using src/scripts/inference-codegen
|
|
2195
|
+
*/
|
|
2196
|
+
/**
|
|
2197
|
+
* Inputs for Text Classification inference
|
|
2198
|
+
*/
|
|
2199
|
+
interface TextClassificationInput {
|
|
2200
|
+
/**
|
|
2201
|
+
* The text to classify
|
|
2202
|
+
*/
|
|
2203
|
+
inputs: string;
|
|
2204
|
+
/**
|
|
2205
|
+
* Additional inference parameters
|
|
2206
|
+
*/
|
|
2207
|
+
parameters?: TextClassificationParameters;
|
|
2208
|
+
[property: string]: unknown;
|
|
2209
|
+
}
|
|
2210
|
+
/**
|
|
2211
|
+
* Additional inference parameters
|
|
2212
|
+
*
|
|
2213
|
+
* Additional inference parameters for Text Classification
|
|
2214
|
+
*/
|
|
2215
|
+
interface TextClassificationParameters {
|
|
2216
|
+
function_to_apply?: ClassificationOutputTransform$1;
|
|
2217
|
+
/**
|
|
2218
|
+
* When specified, limits the output to the top K most probable classes.
|
|
2219
|
+
*/
|
|
2220
|
+
top_k?: number;
|
|
2221
|
+
[property: string]: unknown;
|
|
2222
|
+
}
|
|
2223
|
+
/**
|
|
2224
|
+
* The function to apply to the model outputs in order to retrieve the scores.
|
|
2225
|
+
*/
|
|
2226
|
+
type ClassificationOutputTransform$1 = "sigmoid" | "softmax" | "none";
|
|
2227
|
+
type TextClassificationOutput = TextClassificationOutputElement[];
|
|
2228
|
+
/**
|
|
2229
|
+
* Outputs of inference for the Text Classification task
|
|
2230
|
+
*/
|
|
2231
|
+
interface TextClassificationOutputElement {
|
|
2232
|
+
/**
|
|
2233
|
+
* The predicted class label.
|
|
2234
|
+
*/
|
|
2235
|
+
label: string;
|
|
2236
|
+
/**
|
|
2237
|
+
* The corresponding probability.
|
|
2238
|
+
*/
|
|
2239
|
+
score: number;
|
|
2240
|
+
[property: string]: unknown;
|
|
2241
|
+
}
|
|
2242
|
+
|
|
2243
|
+
/**
|
|
2244
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2245
|
+
*
|
|
2246
|
+
* Using src/scripts/inference-codegen
|
|
2247
|
+
*/
|
|
2248
|
+
/**
|
|
2249
|
+
* Text Generation Input.
|
|
2250
|
+
*
|
|
2251
|
+
* Auto-generated from TGI specs.
|
|
2252
|
+
* For more details, check out
|
|
2253
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
2254
|
+
*/
|
|
2255
|
+
interface TextGenerationInput {
|
|
2256
|
+
inputs: string;
|
|
2257
|
+
parameters?: TextGenerationInputGenerateParameters;
|
|
2258
|
+
stream?: boolean;
|
|
2259
|
+
[property: string]: unknown;
|
|
2260
|
+
}
|
|
2261
|
+
interface TextGenerationInputGenerateParameters {
|
|
2262
|
+
/**
|
|
2263
|
+
* Lora adapter id
|
|
2264
|
+
*/
|
|
2265
|
+
adapter_id?: string;
|
|
2266
|
+
/**
|
|
2267
|
+
* Generate best_of sequences and return the one if the highest token logprobs.
|
|
2268
|
+
*/
|
|
2269
|
+
best_of?: number;
|
|
2270
|
+
/**
|
|
2271
|
+
* Whether to return decoder input token logprobs and ids.
|
|
2272
|
+
*/
|
|
2273
|
+
decoder_input_details?: boolean;
|
|
2274
|
+
/**
|
|
2275
|
+
* Whether to return generation details.
|
|
2276
|
+
*/
|
|
2277
|
+
details?: boolean;
|
|
2278
|
+
/**
|
|
2279
|
+
* Activate logits sampling.
|
|
2280
|
+
*/
|
|
2281
|
+
do_sample?: boolean;
|
|
2282
|
+
/**
|
|
2283
|
+
* The parameter for frequency penalty. 1.0 means no penalty
|
|
2284
|
+
* Penalize new tokens based on their existing frequency in the text so far,
|
|
2285
|
+
* decreasing the model's likelihood to repeat the same line verbatim.
|
|
2286
|
+
*/
|
|
2287
|
+
frequency_penalty?: number;
|
|
2288
|
+
grammar?: TextGenerationInputGrammarType;
|
|
2289
|
+
/**
|
|
2290
|
+
* Maximum number of tokens to generate.
|
|
2291
|
+
*/
|
|
2292
|
+
max_new_tokens?: number;
|
|
2293
|
+
/**
|
|
2294
|
+
* The parameter for repetition penalty. 1.0 means no penalty.
|
|
2295
|
+
* See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
|
|
2296
|
+
*/
|
|
2297
|
+
repetition_penalty?: number;
|
|
2298
|
+
/**
|
|
2299
|
+
* Whether to prepend the prompt to the generated text
|
|
2300
|
+
*/
|
|
2301
|
+
return_full_text?: boolean;
|
|
2302
|
+
/**
|
|
2303
|
+
* Random sampling seed.
|
|
2304
|
+
*/
|
|
2305
|
+
seed?: number;
|
|
2306
|
+
/**
|
|
2307
|
+
* Stop generating tokens if a member of `stop` is generated.
|
|
2308
|
+
*/
|
|
2309
|
+
stop?: string[];
|
|
2310
|
+
/**
|
|
2311
|
+
* The value used to module the logits distribution.
|
|
2312
|
+
*/
|
|
2313
|
+
temperature?: number;
|
|
2314
|
+
/**
|
|
2315
|
+
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
|
|
2316
|
+
*/
|
|
2317
|
+
top_k?: number;
|
|
2318
|
+
/**
|
|
2319
|
+
* The number of highest probability vocabulary tokens to keep for top-n-filtering.
|
|
2320
|
+
*/
|
|
2321
|
+
top_n_tokens?: number;
|
|
2322
|
+
/**
|
|
2323
|
+
* Top-p value for nucleus sampling.
|
|
2324
|
+
*/
|
|
2325
|
+
top_p?: number;
|
|
2326
|
+
/**
|
|
2327
|
+
* Truncate inputs tokens to the given size.
|
|
2328
|
+
*/
|
|
2329
|
+
truncate?: number;
|
|
2330
|
+
/**
|
|
2331
|
+
* Typical Decoding mass
|
|
2332
|
+
* See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666)
|
|
2333
|
+
* for more information.
|
|
2334
|
+
*/
|
|
2335
|
+
typical_p?: number;
|
|
2336
|
+
/**
|
|
2337
|
+
* Watermarking with [A Watermark for Large Language
|
|
2338
|
+
* Models](https://arxiv.org/abs/2301.10226).
|
|
2339
|
+
*/
|
|
2340
|
+
watermark?: boolean;
|
|
2341
|
+
[property: string]: unknown;
|
|
2342
|
+
}
|
|
2343
|
+
interface TextGenerationInputGrammarType {
|
|
2344
|
+
type: Type;
|
|
2345
|
+
/**
|
|
2346
|
+
* A string that represents a [JSON Schema](https://json-schema.org/).
|
|
2347
|
+
*
|
|
2348
|
+
* JSON Schema is a declarative language that allows to annotate JSON documents
|
|
2349
|
+
* with types and descriptions.
|
|
2350
|
+
*/
|
|
2351
|
+
value: unknown;
|
|
2352
|
+
[property: string]: unknown;
|
|
2353
|
+
}
|
|
2354
|
+
type Type = "json" | "regex";
|
|
2355
|
+
/**
|
|
2356
|
+
* Text Generation Output.
|
|
2357
|
+
*
|
|
2358
|
+
* Auto-generated from TGI specs.
|
|
2359
|
+
* For more details, check out
|
|
2360
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
2361
|
+
*/
|
|
2362
|
+
interface TextGenerationOutput {
|
|
2363
|
+
details?: TextGenerationOutputDetails;
|
|
2364
|
+
generated_text: string;
|
|
2365
|
+
[property: string]: unknown;
|
|
2366
|
+
}
|
|
2367
|
+
interface TextGenerationOutputDetails {
|
|
2368
|
+
best_of_sequences?: TextGenerationOutputBestOfSequence[];
|
|
2369
|
+
finish_reason: TextGenerationOutputFinishReason;
|
|
2370
|
+
generated_tokens: number;
|
|
2371
|
+
prefill: TextGenerationOutputPrefillToken[];
|
|
2372
|
+
seed?: number;
|
|
2373
|
+
tokens: TextGenerationOutputToken[];
|
|
2374
|
+
top_tokens?: Array<TextGenerationOutputToken[]>;
|
|
2375
|
+
[property: string]: unknown;
|
|
2376
|
+
}
|
|
2377
|
+
interface TextGenerationOutputBestOfSequence {
|
|
2378
|
+
finish_reason: TextGenerationOutputFinishReason;
|
|
2379
|
+
generated_text: string;
|
|
2380
|
+
generated_tokens: number;
|
|
2381
|
+
prefill: TextGenerationOutputPrefillToken[];
|
|
2382
|
+
seed?: number;
|
|
2383
|
+
tokens: TextGenerationOutputToken[];
|
|
2384
|
+
top_tokens?: Array<TextGenerationOutputToken[]>;
|
|
2385
|
+
[property: string]: unknown;
|
|
2386
|
+
}
|
|
2387
|
+
type TextGenerationOutputFinishReason = "length" | "eos_token" | "stop_sequence";
|
|
2388
|
+
interface TextGenerationOutputPrefillToken {
|
|
2389
|
+
id: number;
|
|
2390
|
+
logprob: number;
|
|
2391
|
+
text: string;
|
|
2392
|
+
[property: string]: unknown;
|
|
2393
|
+
}
|
|
2394
|
+
interface TextGenerationOutputToken {
|
|
2395
|
+
id: number;
|
|
2396
|
+
logprob: number;
|
|
2397
|
+
special: boolean;
|
|
2398
|
+
text: string;
|
|
2399
|
+
[property: string]: unknown;
|
|
2400
|
+
}
|
|
2401
|
+
/**
|
|
2402
|
+
* Text Generation Stream Output.
|
|
2403
|
+
*
|
|
2404
|
+
* Auto-generated from TGI specs.
|
|
2405
|
+
* For more details, check out
|
|
2406
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
2407
|
+
*/
|
|
2408
|
+
interface TextGenerationStreamOutput {
|
|
2409
|
+
details?: TextGenerationStreamOutputStreamDetails;
|
|
2410
|
+
generated_text?: string;
|
|
2411
|
+
index: number;
|
|
2412
|
+
token: TextGenerationStreamOutputToken;
|
|
2413
|
+
top_tokens?: TextGenerationStreamOutputToken[];
|
|
2414
|
+
[property: string]: unknown;
|
|
2415
|
+
}
|
|
2416
|
+
interface TextGenerationStreamOutputStreamDetails {
|
|
2417
|
+
finish_reason: TextGenerationOutputFinishReason;
|
|
2418
|
+
generated_tokens: number;
|
|
2419
|
+
input_length: number;
|
|
2420
|
+
seed?: number;
|
|
2421
|
+
[property: string]: unknown;
|
|
2422
|
+
}
|
|
2423
|
+
interface TextGenerationStreamOutputToken {
|
|
2424
|
+
id: number;
|
|
2425
|
+
logprob: number;
|
|
2426
|
+
special: boolean;
|
|
2427
|
+
text: string;
|
|
2428
|
+
[property: string]: unknown;
|
|
2429
|
+
}
|
|
2430
|
+
|
|
2431
|
+
/**
|
|
2432
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2433
|
+
*
|
|
2434
|
+
* Using src/scripts/inference-codegen
|
|
2435
|
+
*/
|
|
2436
|
+
/**
|
|
2437
|
+
* Inputs for Video Classification inference
|
|
2438
|
+
*/
|
|
2439
|
+
interface VideoClassificationInput {
|
|
2440
|
+
/**
|
|
2441
|
+
* The input video data
|
|
2442
|
+
*/
|
|
2443
|
+
inputs: unknown;
|
|
2444
|
+
/**
|
|
2445
|
+
* Additional inference parameters
|
|
2446
|
+
*/
|
|
2447
|
+
parameters?: VideoClassificationParameters;
|
|
2448
|
+
[property: string]: unknown;
|
|
2449
|
+
}
|
|
2450
|
+
/**
|
|
2451
|
+
* Additional inference parameters
|
|
2452
|
+
*
|
|
2453
|
+
* Additional inference parameters for Video Classification
|
|
2454
|
+
*/
|
|
2455
|
+
interface VideoClassificationParameters {
|
|
2456
|
+
/**
|
|
2457
|
+
* The sampling rate used to select frames from the video.
|
|
2458
|
+
*/
|
|
2459
|
+
frame_sampling_rate?: number;
|
|
2460
|
+
function_to_apply?: ClassificationOutputTransform;
|
|
2461
|
+
/**
|
|
2462
|
+
* The number of sampled frames to consider for classification.
|
|
2463
|
+
*/
|
|
2464
|
+
num_frames?: number;
|
|
2465
|
+
/**
|
|
2466
|
+
* When specified, limits the output to the top K most probable classes.
|
|
2467
|
+
*/
|
|
2468
|
+
top_k?: number;
|
|
2469
|
+
[property: string]: unknown;
|
|
2470
|
+
}
|
|
2471
|
+
/**
|
|
2472
|
+
* The function to apply to the model outputs in order to retrieve the scores.
|
|
2473
|
+
*/
|
|
2474
|
+
type ClassificationOutputTransform = "sigmoid" | "softmax" | "none";
|
|
2475
|
+
type VideoClassificationOutput = VideoClassificationOutputElement[];
|
|
2476
|
+
/**
|
|
2477
|
+
* Outputs of inference for the Video Classification task
|
|
2478
|
+
*/
|
|
2479
|
+
interface VideoClassificationOutputElement {
|
|
2480
|
+
/**
|
|
2481
|
+
* The predicted class label.
|
|
2482
|
+
*/
|
|
2483
|
+
label: string;
|
|
2484
|
+
/**
|
|
2485
|
+
* The corresponding probability.
|
|
2486
|
+
*/
|
|
2487
|
+
score: number;
|
|
2488
|
+
[property: string]: unknown;
|
|
2489
|
+
}
|
|
2490
|
+
|
|
2491
|
+
/**
|
|
2492
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2493
|
+
*
|
|
2494
|
+
* Using src/scripts/inference-codegen
|
|
2495
|
+
*/
|
|
2496
|
+
/**
|
|
2497
|
+
* Inputs for Visual Question Answering inference
|
|
2498
|
+
*/
|
|
2499
|
+
interface VisualQuestionAnsweringInput {
|
|
2500
|
+
/**
|
|
2501
|
+
* One (image, question) pair to answer
|
|
2502
|
+
*/
|
|
2503
|
+
inputs: VisualQuestionAnsweringInputData;
|
|
2504
|
+
/**
|
|
2505
|
+
* Additional inference parameters
|
|
2506
|
+
*/
|
|
2507
|
+
parameters?: VisualQuestionAnsweringParameters;
|
|
2508
|
+
[property: string]: unknown;
|
|
2509
|
+
}
|
|
2510
|
+
/**
|
|
2511
|
+
* One (image, question) pair to answer
|
|
2512
|
+
*/
|
|
2513
|
+
interface VisualQuestionAnsweringInputData {
|
|
2514
|
+
/**
|
|
2515
|
+
* The image.
|
|
2516
|
+
*/
|
|
2517
|
+
image: unknown;
|
|
2518
|
+
/**
|
|
2519
|
+
* The question to answer based on the image.
|
|
2520
|
+
*/
|
|
2521
|
+
question: unknown;
|
|
2522
|
+
[property: string]: unknown;
|
|
2523
|
+
}
|
|
2524
|
+
/**
|
|
2525
|
+
* Additional inference parameters
|
|
2526
|
+
*
|
|
2527
|
+
* Additional inference parameters for Visual Question Answering
|
|
2528
|
+
*/
|
|
2529
|
+
interface VisualQuestionAnsweringParameters {
|
|
2530
|
+
/**
|
|
2531
|
+
* The number of answers to return (will be chosen by order of likelihood). Note that we
|
|
2532
|
+
* return less than topk answers if there are not enough options available within the
|
|
2533
|
+
* context.
|
|
2534
|
+
*/
|
|
2535
|
+
top_k?: number;
|
|
2536
|
+
[property: string]: unknown;
|
|
2537
|
+
}
|
|
2538
|
+
type VisualQuestionAnsweringOutput = VisualQuestionAnsweringOutputElement[];
|
|
2539
|
+
/**
|
|
2540
|
+
* Outputs of inference for the Visual Question Answering task
|
|
2541
|
+
*/
|
|
2542
|
+
interface VisualQuestionAnsweringOutputElement {
|
|
2543
|
+
/**
|
|
2544
|
+
* The answer to the question
|
|
2545
|
+
*/
|
|
2546
|
+
answer?: string;
|
|
2547
|
+
/**
|
|
2548
|
+
* The associated score / probability
|
|
2549
|
+
*/
|
|
2550
|
+
score: number;
|
|
2551
|
+
[property: string]: unknown;
|
|
2552
|
+
}
|
|
2553
|
+
|
|
2554
|
+
/**
|
|
2555
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2556
|
+
*
|
|
2557
|
+
* Using src/scripts/inference-codegen
|
|
2558
|
+
*/
|
|
2559
|
+
/**
|
|
2560
|
+
* Inputs for Zero Shot Classification inference
|
|
2561
|
+
*/
|
|
2562
|
+
interface ZeroShotClassificationInput {
|
|
2563
|
+
/**
|
|
2564
|
+
* The input text data, with candidate labels
|
|
2565
|
+
*/
|
|
2566
|
+
inputs: ZeroShotClassificationInputData;
|
|
2567
|
+
/**
|
|
2568
|
+
* Additional inference parameters
|
|
2569
|
+
*/
|
|
2570
|
+
parameters?: ZeroShotClassificationParameters;
|
|
2571
|
+
[property: string]: unknown;
|
|
2572
|
+
}
|
|
2573
|
+
/**
|
|
2574
|
+
* The input text data, with candidate labels
|
|
2575
|
+
*/
|
|
2576
|
+
interface ZeroShotClassificationInputData {
|
|
2577
|
+
/**
|
|
2578
|
+
* The set of possible class labels to classify the text into.
|
|
2579
|
+
*/
|
|
2580
|
+
candidateLabels: string[];
|
|
2581
|
+
/**
|
|
2582
|
+
* The text to classify
|
|
2583
|
+
*/
|
|
2584
|
+
text: string;
|
|
2585
|
+
[property: string]: unknown;
|
|
2586
|
+
}
|
|
2587
|
+
/**
|
|
2588
|
+
* Additional inference parameters
|
|
2589
|
+
*
|
|
2590
|
+
* Additional inference parameters for Zero Shot Classification
|
|
2591
|
+
*/
|
|
2592
|
+
interface ZeroShotClassificationParameters {
|
|
2593
|
+
/**
|
|
2594
|
+
* The sentence used in conjunction with candidateLabels to attempt the text classification
|
|
2595
|
+
* by replacing the placeholder with the candidate labels.
|
|
2596
|
+
*/
|
|
2597
|
+
hypothesis_template?: string;
|
|
2598
|
+
/**
|
|
2599
|
+
* Whether multiple candidate labels can be true. If false, the scores are normalized such
|
|
2600
|
+
* that the sum of the label likelihoods for each sequence is 1. If true, the labels are
|
|
2601
|
+
* considered independent and probabilities are normalized for each candidate.
|
|
2602
|
+
*/
|
|
2603
|
+
multi_label?: boolean;
|
|
2604
|
+
[property: string]: unknown;
|
|
2605
|
+
}
|
|
2606
|
+
type ZeroShotClassificationOutput = ZeroShotClassificationOutputElement[];
|
|
2607
|
+
/**
|
|
2608
|
+
* Outputs of inference for the Zero Shot Classification task
|
|
2609
|
+
*/
|
|
2610
|
+
interface ZeroShotClassificationOutputElement {
|
|
2611
|
+
/**
|
|
2612
|
+
* The predicted class label.
|
|
2613
|
+
*/
|
|
2614
|
+
label: string;
|
|
2615
|
+
/**
|
|
2616
|
+
* The corresponding probability.
|
|
2617
|
+
*/
|
|
2618
|
+
score: number;
|
|
2619
|
+
[property: string]: unknown;
|
|
2620
|
+
}
|
|
2621
|
+
|
|
2622
|
+
/**
|
|
2623
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2624
|
+
*
|
|
2625
|
+
* Using src/scripts/inference-codegen
|
|
2626
|
+
*/
|
|
2627
|
+
/**
|
|
2628
|
+
* Inputs for Zero Shot Image Classification inference
|
|
2629
|
+
*/
|
|
2630
|
+
interface ZeroShotImageClassificationInput {
|
|
2631
|
+
/**
|
|
2632
|
+
* The input image data, with candidate labels
|
|
2633
|
+
*/
|
|
2634
|
+
inputs: ZeroShotImageClassificationInputData;
|
|
2635
|
+
/**
|
|
2636
|
+
* Additional inference parameters
|
|
2637
|
+
*/
|
|
2638
|
+
parameters?: ZeroShotImageClassificationParameters;
|
|
2639
|
+
[property: string]: unknown;
|
|
2640
|
+
}
|
|
2641
|
+
/**
|
|
2642
|
+
* The input image data, with candidate labels
|
|
2643
|
+
*/
|
|
2644
|
+
interface ZeroShotImageClassificationInputData {
|
|
2645
|
+
/**
|
|
2646
|
+
* The candidate labels for this image
|
|
2647
|
+
*/
|
|
2648
|
+
candidateLabels: string[];
|
|
2649
|
+
/**
|
|
2650
|
+
* The image data to classify
|
|
2651
|
+
*/
|
|
2652
|
+
image: unknown;
|
|
2653
|
+
[property: string]: unknown;
|
|
2654
|
+
}
|
|
2655
|
+
/**
|
|
2656
|
+
* Additional inference parameters
|
|
2657
|
+
*
|
|
2658
|
+
* Additional inference parameters for Zero Shot Image Classification
|
|
2659
|
+
*/
|
|
2660
|
+
interface ZeroShotImageClassificationParameters {
|
|
2661
|
+
/**
|
|
2662
|
+
* The sentence used in conjunction with candidateLabels to attempt the text classification
|
|
2663
|
+
* by replacing the placeholder with the candidate labels.
|
|
2664
|
+
*/
|
|
2665
|
+
hypothesis_template?: string;
|
|
2666
|
+
[property: string]: unknown;
|
|
2667
|
+
}
|
|
2668
|
+
type ZeroShotImageClassificationOutput = ZeroShotImageClassificationOutputElement[];
|
|
2669
|
+
/**
|
|
2670
|
+
* Outputs of inference for the Zero Shot Image Classification task
|
|
2671
|
+
*/
|
|
2672
|
+
interface ZeroShotImageClassificationOutputElement {
|
|
2673
|
+
/**
|
|
2674
|
+
* The predicted class label.
|
|
2675
|
+
*/
|
|
2676
|
+
label: string;
|
|
2677
|
+
/**
|
|
2678
|
+
* The corresponding probability.
|
|
2679
|
+
*/
|
|
2680
|
+
score: number;
|
|
2681
|
+
[property: string]: unknown;
|
|
2682
|
+
}
|
|
2683
|
+
|
|
2684
|
+
/**
|
|
2685
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
2686
|
+
*
|
|
2687
|
+
* Using src/scripts/inference-codegen
|
|
2688
|
+
*/
|
|
2689
|
+
/**
|
|
2690
|
+
* Inputs for Zero Shot Object Detection inference
|
|
2691
|
+
*/
|
|
2692
|
+
interface ZeroShotObjectDetectionInput {
|
|
2693
|
+
/**
|
|
2694
|
+
* The input image data, with candidate labels
|
|
2695
|
+
*/
|
|
2696
|
+
inputs: ZeroShotObjectDetectionInputData;
|
|
2697
|
+
/**
|
|
2698
|
+
* Additional inference parameters
|
|
2699
|
+
*/
|
|
2700
|
+
parameters?: {
|
|
2701
|
+
[key: string]: unknown;
|
|
2702
|
+
};
|
|
2703
|
+
[property: string]: unknown;
|
|
2704
|
+
}
|
|
2705
|
+
/**
|
|
2706
|
+
* The input image data, with candidate labels
|
|
2707
|
+
*/
|
|
2708
|
+
interface ZeroShotObjectDetectionInputData {
|
|
2709
|
+
/**
|
|
2710
|
+
* The candidate labels for this image
|
|
2711
|
+
*/
|
|
2712
|
+
candidateLabels: string[];
|
|
2713
|
+
/**
|
|
2714
|
+
* The image data to generate bounding boxes from
|
|
2715
|
+
*/
|
|
2716
|
+
image: unknown;
|
|
2717
|
+
[property: string]: unknown;
|
|
2718
|
+
}
|
|
2719
|
+
/**
|
|
2720
|
+
* The predicted bounding box. Coordinates are relative to the top left corner of the input
|
|
2721
|
+
* image.
|
|
2722
|
+
*/
|
|
2723
|
+
interface BoundingBox {
|
|
2724
|
+
xmax: number;
|
|
2725
|
+
xmin: number;
|
|
2726
|
+
ymax: number;
|
|
2727
|
+
ymin: number;
|
|
2728
|
+
[property: string]: unknown;
|
|
2729
|
+
}
|
|
2730
|
+
type ZeroShotObjectDetectionOutput = ZeroShotObjectDetectionOutputElement[];
|
|
2731
|
+
/**
|
|
2732
|
+
* Outputs of inference for the Zero Shot Object Detection task
|
|
2733
|
+
*/
|
|
2734
|
+
interface ZeroShotObjectDetectionOutputElement {
|
|
2735
|
+
/**
|
|
2736
|
+
* The predicted bounding box. Coordinates are relative to the top left corner of the input
|
|
2737
|
+
* image.
|
|
2738
|
+
*/
|
|
2739
|
+
box: BoundingBox;
|
|
2740
|
+
/**
|
|
2741
|
+
* A candidate label
|
|
2742
|
+
*/
|
|
2743
|
+
label: string;
|
|
2744
|
+
/**
|
|
2745
|
+
* The associated score / probability
|
|
2746
|
+
*/
|
|
2747
|
+
score: number;
|
|
2748
|
+
[property: string]: unknown;
|
|
2749
|
+
}
|
|
2750
|
+
|
|
2751
|
+
/**
|
|
2752
|
+
* Model libraries compatible with each ML task
|
|
2753
|
+
*/
|
|
2754
|
+
declare const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]>;
|
|
2755
|
+
declare const TASKS_DATA: Record<PipelineType, TaskData | undefined>;
|
|
2756
|
+
interface ExampleRepo {
|
|
2757
|
+
description: string;
|
|
2758
|
+
id: string;
|
|
2759
|
+
}
|
|
2760
|
+
type TaskDemoEntry = {
|
|
2761
|
+
filename: string;
|
|
2762
|
+
type: "audio";
|
|
2763
|
+
} | {
|
|
2764
|
+
data: Array<{
|
|
2765
|
+
label: string;
|
|
2766
|
+
score: number;
|
|
2767
|
+
}>;
|
|
2768
|
+
type: "chart";
|
|
2769
|
+
} | {
|
|
2770
|
+
filename: string;
|
|
2771
|
+
type: "img";
|
|
2772
|
+
} | {
|
|
2773
|
+
table: string[][];
|
|
2774
|
+
type: "tabular";
|
|
2775
|
+
} | {
|
|
2776
|
+
content: string;
|
|
2777
|
+
label: string;
|
|
2778
|
+
type: "text";
|
|
2779
|
+
} | {
|
|
2780
|
+
text: string;
|
|
2781
|
+
tokens: Array<{
|
|
2782
|
+
end: number;
|
|
2783
|
+
start: number;
|
|
2784
|
+
type: string;
|
|
2785
|
+
}>;
|
|
2786
|
+
type: "text-with-tokens";
|
|
2787
|
+
};
|
|
2788
|
+
interface TaskDemo {
|
|
2789
|
+
inputs: TaskDemoEntry[];
|
|
2790
|
+
outputs: TaskDemoEntry[];
|
|
2791
|
+
}
|
|
2792
|
+
interface TaskData {
|
|
2793
|
+
datasets: ExampleRepo[];
|
|
2794
|
+
demo: TaskDemo;
|
|
2795
|
+
id: PipelineType;
|
|
2796
|
+
canonicalId?: PipelineType;
|
|
2797
|
+
isPlaceholder?: boolean;
|
|
2798
|
+
label: string;
|
|
2799
|
+
libraries: ModelLibraryKey[];
|
|
2800
|
+
metrics: ExampleRepo[];
|
|
2801
|
+
models: ExampleRepo[];
|
|
2802
|
+
spaces: ExampleRepo[];
|
|
2803
|
+
summary: string;
|
|
2804
|
+
widgetModels: string[];
|
|
2805
|
+
youtubeId?: string;
|
|
2806
|
+
}
|
|
2807
|
+
type TaskDataCustom = Omit<TaskData, "id" | "label" | "libraries">;
|
|
2808
|
+
|
|
2809
|
+
/**
|
|
2810
|
+
* See default-widget-inputs.ts for the default widget inputs, this files only contains the types
|
|
2811
|
+
*/
|
|
2812
|
+
|
|
2813
|
+
type TableData = Record<string, (string | number)[]>;
|
|
2814
|
+
type WidgetExampleOutputLabels = Array<{
|
|
2815
|
+
label: string;
|
|
2816
|
+
score: number;
|
|
2817
|
+
}>;
|
|
2818
|
+
interface WidgetExampleOutputAnswerScore {
|
|
2819
|
+
answer: string;
|
|
2820
|
+
score: number;
|
|
2821
|
+
}
|
|
2822
|
+
interface WidgetExampleOutputText {
|
|
2823
|
+
text: string;
|
|
2824
|
+
}
|
|
2825
|
+
interface WidgetExampleOutputUrl {
|
|
2826
|
+
url: string;
|
|
2827
|
+
}
|
|
2828
|
+
type WidgetExampleOutput = WidgetExampleOutputLabels | WidgetExampleOutputAnswerScore | WidgetExampleOutputText | WidgetExampleOutputUrl;
|
|
2829
|
+
interface WidgetExampleBase<TOutput> {
|
|
2830
|
+
example_title?: string;
|
|
2831
|
+
group?: string;
|
|
2832
|
+
/**
|
|
2833
|
+
* Potential overrides to API parameters for this specific example
|
|
2834
|
+
* (takes precedences over the model card metadata's inference.parameters)
|
|
2835
|
+
*/
|
|
2836
|
+
parameters?: {
|
|
2837
|
+
aggregation_strategy?: string;
|
|
2838
|
+
top_k?: number;
|
|
2839
|
+
top_p?: number;
|
|
2840
|
+
temperature?: number;
|
|
2841
|
+
max_new_tokens?: number;
|
|
2842
|
+
do_sample?: boolean;
|
|
2843
|
+
negative_prompt?: string;
|
|
2844
|
+
guidance_scale?: number;
|
|
2845
|
+
num_inference_steps?: number;
|
|
2846
|
+
};
|
|
2847
|
+
/**
|
|
2848
|
+
* Optional output
|
|
2849
|
+
*/
|
|
2850
|
+
output?: TOutput;
|
|
2851
|
+
}
|
|
2852
|
+
interface WidgetExampleChatInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
|
|
2853
|
+
messages: ChatCompletionInputMessage[];
|
|
2854
|
+
}
|
|
2855
|
+
interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
|
|
2856
|
+
text: string;
|
|
2857
|
+
}
|
|
2858
|
+
interface WidgetExampleTextAndContextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
|
|
2859
|
+
context: string;
|
|
2860
|
+
}
|
|
2861
|
+
interface WidgetExampleTextAndTableInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
|
|
2862
|
+
table: TableData;
|
|
2863
|
+
}
|
|
2864
|
+
interface WidgetExampleAssetInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
|
|
2865
|
+
src: string;
|
|
2866
|
+
}
|
|
2867
|
+
interface WidgetExampleAssetAndPromptInput<TOutput = WidgetExampleOutput> extends WidgetExampleAssetInput<TOutput> {
|
|
2868
|
+
prompt: string;
|
|
2869
|
+
}
|
|
2870
|
+
type WidgetExampleAssetAndTextInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> & WidgetExampleTextInput<TOutput>;
|
|
2871
|
+
type WidgetExampleAssetAndZeroShotInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> & WidgetExampleZeroShotTextInput<TOutput>;
|
|
2872
|
+
interface WidgetExampleStructuredDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
|
|
2873
|
+
structured_data: TableData;
|
|
2874
|
+
}
|
|
2875
|
+
interface WidgetExampleTableDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
|
|
2876
|
+
table: TableData;
|
|
2877
|
+
}
|
|
2878
|
+
interface WidgetExampleZeroShotTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
|
|
2879
|
+
text: string;
|
|
2880
|
+
candidate_labels: string;
|
|
2881
|
+
multi_class: boolean;
|
|
2882
|
+
}
|
|
2883
|
+
interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
|
|
2884
|
+
source_sentence: string;
|
|
2885
|
+
sentences: string[];
|
|
2886
|
+
}
|
|
2887
|
+
type WidgetExample<TOutput = WidgetExampleOutput> = WidgetExampleChatInput<TOutput> | WidgetExampleTextInput<TOutput> | WidgetExampleTextAndContextInput<TOutput> | WidgetExampleTextAndTableInput<TOutput> | WidgetExampleAssetInput<TOutput> | WidgetExampleAssetAndPromptInput<TOutput> | WidgetExampleAssetAndTextInput<TOutput> | WidgetExampleAssetAndZeroShotInput<TOutput> | WidgetExampleStructuredDataInput<TOutput> | WidgetExampleTableDataInput<TOutput> | WidgetExampleZeroShotTextInput<TOutput> | WidgetExampleSentenceSimilarityInput<TOutput>;
|
|
2888
|
+
type KeysOfUnion<T> = T extends unknown ? keyof T : never;
|
|
2889
|
+
type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;
|
|
2890
|
+
|
|
2891
|
+
declare const SPECIAL_TOKENS_ATTRIBUTES: readonly ["bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token"];
|
|
2892
|
+
/**
|
|
2893
|
+
* Public interface for a tokenizer's special tokens mapping
|
|
2894
|
+
*/
|
|
2895
|
+
interface AddedToken {
|
|
2896
|
+
__type: "AddedToken";
|
|
2897
|
+
content?: string;
|
|
2898
|
+
lstrip?: boolean;
|
|
2899
|
+
normalized?: boolean;
|
|
2900
|
+
rstrip?: boolean;
|
|
2901
|
+
single_word?: boolean;
|
|
2902
|
+
}
|
|
2903
|
+
type SpecialTokensMap = {
|
|
2904
|
+
[key in (typeof SPECIAL_TOKENS_ATTRIBUTES)[number]]?: string | AddedToken | null;
|
|
2905
|
+
};
|
|
2906
|
+
/**
|
|
2907
|
+
* Public interface for tokenizer config
|
|
2908
|
+
*/
|
|
2909
|
+
interface TokenizerConfig extends SpecialTokensMap {
|
|
2910
|
+
use_default_system_prompt?: boolean;
|
|
2911
|
+
chat_template?: string | Array<{
|
|
2912
|
+
name: string;
|
|
2913
|
+
template: string;
|
|
2914
|
+
}>;
|
|
2915
|
+
}
|
|
2916
|
+
|
|
2917
|
+
/**
|
|
2918
|
+
* Public interface for model metadata
|
|
2919
|
+
*/
|
|
2920
|
+
interface ModelData {
|
|
2921
|
+
/**
|
|
2922
|
+
* id of model (e.g. 'user/repo_name')
|
|
2923
|
+
*/
|
|
2924
|
+
id: string;
|
|
2925
|
+
/**
|
|
2926
|
+
* Whether or not to enable inference widget for this model
|
|
2927
|
+
* TODO(type it)
|
|
2928
|
+
*/
|
|
2929
|
+
inference: string;
|
|
2930
|
+
/**
|
|
2931
|
+
* is this model private?
|
|
2932
|
+
*/
|
|
2933
|
+
private?: boolean;
|
|
2934
|
+
/**
|
|
2935
|
+
* this dictionary has useful information about the model configuration
|
|
2936
|
+
*/
|
|
2937
|
+
config?: {
|
|
2938
|
+
architectures?: string[];
|
|
2939
|
+
/**
|
|
2940
|
+
* Dict of AutoModel or Auto… class name to local import path in the repo
|
|
2941
|
+
*/
|
|
2942
|
+
auto_map?: {
|
|
2943
|
+
/**
|
|
2944
|
+
* String Property
|
|
2945
|
+
*/
|
|
2946
|
+
[x: string]: string;
|
|
2947
|
+
};
|
|
2948
|
+
model_type?: string;
|
|
2949
|
+
quantization_config?: {
|
|
2950
|
+
bits?: number;
|
|
2951
|
+
load_in_4bit?: boolean;
|
|
2952
|
+
load_in_8bit?: boolean;
|
|
2953
|
+
/**
|
|
2954
|
+
* awq, gptq, aqlm, marlin, … Used by vLLM
|
|
2955
|
+
*/
|
|
2956
|
+
quant_method?: string;
|
|
2957
|
+
};
|
|
2958
|
+
tokenizer_config?: TokenizerConfig;
|
|
2959
|
+
adapter_transformers?: {
|
|
2960
|
+
model_name?: string;
|
|
2961
|
+
model_class?: string;
|
|
2962
|
+
};
|
|
2963
|
+
diffusers?: {
|
|
2964
|
+
_class_name?: string;
|
|
2965
|
+
};
|
|
2966
|
+
sklearn?: {
|
|
2967
|
+
model?: {
|
|
2968
|
+
file?: string;
|
|
2969
|
+
};
|
|
2970
|
+
model_format?: string;
|
|
2971
|
+
};
|
|
2972
|
+
speechbrain?: {
|
|
2973
|
+
speechbrain_interface?: string;
|
|
2974
|
+
vocoder_interface?: string;
|
|
2975
|
+
vocoder_model_id?: string;
|
|
2976
|
+
};
|
|
2977
|
+
peft?: {
|
|
2978
|
+
base_model_name_or_path?: string;
|
|
2979
|
+
task_type?: string;
|
|
2980
|
+
};
|
|
2981
|
+
};
|
|
2982
|
+
/**
|
|
2983
|
+
* all the model tags
|
|
2984
|
+
*/
|
|
2985
|
+
tags: string[];
|
|
2986
|
+
/**
|
|
2987
|
+
* transformers-specific info to display in the code sample.
|
|
2988
|
+
*/
|
|
2989
|
+
transformersInfo?: TransformersInfo;
|
|
2990
|
+
/**
|
|
2991
|
+
* Pipeline type
|
|
2992
|
+
*/
|
|
2993
|
+
pipeline_tag?: PipelineType | undefined;
|
|
2994
|
+
/**
|
|
2995
|
+
* for relevant models, get mask token
|
|
2996
|
+
*/
|
|
2997
|
+
mask_token?: string | undefined;
|
|
2998
|
+
/**
|
|
2999
|
+
* Example data that will be fed into the widget.
|
|
3000
|
+
*
|
|
3001
|
+
* can be set in the model card metadata (under `widget`),
|
|
3002
|
+
* or by default in `DefaultWidget.ts`
|
|
3003
|
+
*/
|
|
3004
|
+
widgetData?: WidgetExample[] | undefined;
|
|
3005
|
+
/**
|
|
3006
|
+
* Parameters that will be used by the widget when calling Inference API (serverless)
|
|
3007
|
+
* https://huggingface.co/docs/api-inference/detailed_parameters
|
|
3008
|
+
*
|
|
3009
|
+
* can be set in the model card metadata (under `inference/parameters`)
|
|
3010
|
+
* Example:
|
|
3011
|
+
* inference:
|
|
3012
|
+
* parameters:
|
|
3013
|
+
* key: val
|
|
3014
|
+
*/
|
|
3015
|
+
cardData?: {
|
|
3016
|
+
inference?: boolean | {
|
|
3017
|
+
parameters?: Record<string, unknown>;
|
|
3018
|
+
};
|
|
3019
|
+
base_model?: string | string[];
|
|
3020
|
+
instance_prompt?: string | null;
|
|
3021
|
+
};
|
|
3022
|
+
/**
|
|
3023
|
+
* Library name
|
|
3024
|
+
* Example: transformers, SpeechBrain, Stanza, etc.
|
|
3025
|
+
*/
|
|
3026
|
+
library_name?: string;
|
|
3027
|
+
safetensors?: {
|
|
3028
|
+
parameters: Record<string, number>;
|
|
3029
|
+
total: number;
|
|
3030
|
+
sharded: boolean;
|
|
3031
|
+
};
|
|
3032
|
+
gguf?: {
|
|
3033
|
+
total: number;
|
|
3034
|
+
architecture?: string;
|
|
3035
|
+
context_length?: number;
|
|
3036
|
+
};
|
|
3037
|
+
}
|
|
3038
|
+
/**
|
|
3039
|
+
* transformers-specific info to display in the code sample.
|
|
3040
|
+
*/
|
|
3041
|
+
interface TransformersInfo {
|
|
3042
|
+
/**
|
|
3043
|
+
* e.g. AutoModelForSequenceClassification
|
|
3044
|
+
*/
|
|
3045
|
+
auto_model: string;
|
|
3046
|
+
/**
|
|
3047
|
+
* if set in config.json's auto_map
|
|
3048
|
+
*/
|
|
3049
|
+
custom_class?: string;
|
|
3050
|
+
/**
|
|
3051
|
+
* e.g. text-classification
|
|
3052
|
+
*/
|
|
3053
|
+
pipeline_tag?: PipelineType;
|
|
3054
|
+
/**
|
|
3055
|
+
* e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor"
|
|
3056
|
+
*/
|
|
3057
|
+
processor?: string;
|
|
3058
|
+
}
|
|
3059
|
+
|
|
3060
|
+
/**
|
|
3061
|
+
* This file contains the (simplified) types used
|
|
3062
|
+
* to represent queries that are made to Elastic
|
|
3063
|
+
* in order to count number of model downloads
|
|
3064
|
+
*
|
|
3065
|
+
* Read this doc about download stats on the Hub:
|
|
3066
|
+
*
|
|
3067
|
+
* https://huggingface.co/docs/hub/models-download-stats
|
|
3068
|
+
* Available fields:
|
|
3069
|
+
* - path: the complete file path (relative) (e.g: "prefix/file.extension")
|
|
3070
|
+
* - path_prefix: the prefix of the file path (e.g: "prefix/", empty if no prefix)
|
|
3071
|
+
* - path_extension: the extension of the file path (e.g: "extension")
|
|
3072
|
+
* - path_filename: the name of the file path (e.g: "file")
|
|
3073
|
+
* see also:
|
|
3074
|
+
* https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html
|
|
3075
|
+
*/
|
|
3076
|
+
type ElasticSearchQuery = string;
|
|
3077
|
+
|
|
3078
|
+
/**
|
|
3079
|
+
* Elements configurable by a model library.
|
|
3080
|
+
*/
|
|
3081
|
+
interface LibraryUiElement {
|
|
3082
|
+
/**
|
|
3083
|
+
* Pretty name of the library.
|
|
3084
|
+
* displayed in tags, and on the main
|
|
3085
|
+
* call-to-action button on the model page.
|
|
3086
|
+
*/
|
|
3087
|
+
prettyLabel: string;
|
|
3088
|
+
/**
|
|
3089
|
+
* Repo name of the library's (usually on GitHub) code repo
|
|
3090
|
+
*/
|
|
3091
|
+
repoName: string;
|
|
3092
|
+
/**
|
|
3093
|
+
* URL to library's (usually on GitHub) code repo
|
|
3094
|
+
*/
|
|
3095
|
+
repoUrl: string;
|
|
3096
|
+
/**
|
|
3097
|
+
* URL to library's docs
|
|
3098
|
+
*/
|
|
3099
|
+
docsUrl?: string;
|
|
3100
|
+
/**
|
|
3101
|
+
* Code snippet(s) displayed on model page
|
|
3102
|
+
*/
|
|
3103
|
+
snippets?: (model: ModelData) => string[];
|
|
3104
|
+
/**
|
|
3105
|
+
* Elastic query used to count this library's model downloads
|
|
3106
|
+
*
|
|
3107
|
+
* By default, those files are counted:
|
|
3108
|
+
* "config.json", "config.yaml", "hyperparams.yaml", "meta.yaml"
|
|
3109
|
+
*/
|
|
3110
|
+
countDownloads?: ElasticSearchQuery;
|
|
3111
|
+
/**
|
|
3112
|
+
* should we display this library in hf.co/models filter
|
|
3113
|
+
* (only for popular libraries with > 100 models)
|
|
3114
|
+
*/
|
|
3115
|
+
filter?: boolean;
|
|
3116
|
+
}
|
|
3117
|
+
/**
|
|
3118
|
+
* Add your new library here.
|
|
3119
|
+
*
|
|
3120
|
+
* This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc).
|
|
3121
|
+
* (unlike libraries, file formats live in an enum inside the internal codebase.)
|
|
3122
|
+
*
|
|
3123
|
+
* Doc on how to add a library to the Hub:
|
|
3124
|
+
*
|
|
3125
|
+
* https://huggingface.co/docs/hub/models-adding-libraries
|
|
3126
|
+
*
|
|
3127
|
+
* /!\ IMPORTANT
|
|
3128
|
+
*
|
|
3129
|
+
* The key you choose is the tag your models have in their library_name on the Hub.
|
|
3130
|
+
*/
|
|
3131
|
+
declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
3132
|
+
"adapter-transformers": {
|
|
3133
|
+
prettyLabel: string;
|
|
3134
|
+
repoName: string;
|
|
3135
|
+
repoUrl: string;
|
|
3136
|
+
docsUrl: string;
|
|
3137
|
+
snippets: (model: ModelData) => string[];
|
|
3138
|
+
filter: true;
|
|
3139
|
+
countDownloads: string;
|
|
3140
|
+
};
|
|
3141
|
+
allennlp: {
|
|
3142
|
+
prettyLabel: string;
|
|
3143
|
+
repoName: string;
|
|
3144
|
+
repoUrl: string;
|
|
3145
|
+
docsUrl: string;
|
|
3146
|
+
snippets: (model: ModelData) => string[];
|
|
3147
|
+
filter: true;
|
|
3148
|
+
};
|
|
3149
|
+
asteroid: {
|
|
3150
|
+
prettyLabel: string;
|
|
3151
|
+
repoName: string;
|
|
3152
|
+
repoUrl: string;
|
|
3153
|
+
docsUrl: string;
|
|
3154
|
+
snippets: (model: ModelData) => string[];
|
|
3155
|
+
filter: true;
|
|
3156
|
+
countDownloads: string;
|
|
3157
|
+
};
|
|
3158
|
+
audiocraft: {
|
|
3159
|
+
prettyLabel: string;
|
|
3160
|
+
repoName: string;
|
|
3161
|
+
repoUrl: string;
|
|
3162
|
+
snippets: (model: ModelData) => string[];
|
|
3163
|
+
filter: false;
|
|
3164
|
+
countDownloads: string;
|
|
3165
|
+
};
|
|
3166
|
+
audioseal: {
|
|
3167
|
+
prettyLabel: string;
|
|
3168
|
+
repoName: string;
|
|
3169
|
+
repoUrl: string;
|
|
3170
|
+
filter: false;
|
|
3171
|
+
countDownloads: string;
|
|
3172
|
+
snippets: (model: ModelData) => string[];
|
|
3173
|
+
};
|
|
3174
|
+
bertopic: {
|
|
3175
|
+
prettyLabel: string;
|
|
3176
|
+
repoName: string;
|
|
3177
|
+
repoUrl: string;
|
|
3178
|
+
snippets: (model: ModelData) => string[];
|
|
3179
|
+
filter: true;
|
|
3180
|
+
};
|
|
3181
|
+
big_vision: {
|
|
3182
|
+
prettyLabel: string;
|
|
3183
|
+
repoName: string;
|
|
3184
|
+
repoUrl: string;
|
|
3185
|
+
filter: false;
|
|
3186
|
+
countDownloads: string;
|
|
3187
|
+
};
|
|
3188
|
+
birefnet: {
|
|
3189
|
+
prettyLabel: string;
|
|
3190
|
+
repoName: string;
|
|
3191
|
+
repoUrl: string;
|
|
3192
|
+
snippets: (model: ModelData) => string[];
|
|
3193
|
+
filter: false;
|
|
3194
|
+
};
|
|
3195
|
+
bm25s: {
|
|
3196
|
+
prettyLabel: string;
|
|
3197
|
+
repoName: string;
|
|
3198
|
+
repoUrl: string;
|
|
3199
|
+
snippets: (model: ModelData) => string[];
|
|
3200
|
+
filter: false;
|
|
3201
|
+
countDownloads: string;
|
|
3202
|
+
};
|
|
3203
|
+
champ: {
|
|
3204
|
+
prettyLabel: string;
|
|
3205
|
+
repoName: string;
|
|
3206
|
+
repoUrl: string;
|
|
3207
|
+
countDownloads: string;
|
|
3208
|
+
};
|
|
3209
|
+
chat_tts: {
|
|
3210
|
+
prettyLabel: string;
|
|
3211
|
+
repoName: string;
|
|
3212
|
+
repoUrl: string;
|
|
3213
|
+
snippets: () => string[];
|
|
3214
|
+
filter: false;
|
|
3215
|
+
countDownloads: string;
|
|
3216
|
+
};
|
|
3217
|
+
colpali: {
|
|
3218
|
+
prettyLabel: string;
|
|
3219
|
+
repoName: string;
|
|
3220
|
+
repoUrl: string;
|
|
3221
|
+
filter: false;
|
|
3222
|
+
countDownloads: string;
|
|
3223
|
+
};
|
|
3224
|
+
deepforest: {
|
|
3225
|
+
prettyLabel: string;
|
|
3226
|
+
repoName: string;
|
|
3227
|
+
docsUrl: string;
|
|
3228
|
+
repoUrl: string;
|
|
3229
|
+
countDownloads: string;
|
|
3230
|
+
};
|
|
3231
|
+
"depth-anything-v2": {
|
|
3232
|
+
prettyLabel: string;
|
|
3233
|
+
repoName: string;
|
|
3234
|
+
repoUrl: string;
|
|
3235
|
+
snippets: (model: ModelData) => string[];
|
|
3236
|
+
filter: false;
|
|
3237
|
+
countDownloads: string;
|
|
3238
|
+
};
|
|
3239
|
+
"depth-pro": {
|
|
3240
|
+
prettyLabel: string;
|
|
3241
|
+
repoName: string;
|
|
3242
|
+
repoUrl: string;
|
|
3243
|
+
countDownloads: string;
|
|
3244
|
+
snippets: (model: ModelData) => string[];
|
|
3245
|
+
filter: false;
|
|
3246
|
+
};
|
|
3247
|
+
diffree: {
|
|
3248
|
+
prettyLabel: string;
|
|
3249
|
+
repoName: string;
|
|
3250
|
+
repoUrl: string;
|
|
3251
|
+
filter: false;
|
|
3252
|
+
countDownloads: string;
|
|
3253
|
+
};
|
|
3254
|
+
diffusers: {
|
|
3255
|
+
prettyLabel: string;
|
|
3256
|
+
repoName: string;
|
|
3257
|
+
repoUrl: string;
|
|
3258
|
+
docsUrl: string;
|
|
3259
|
+
snippets: (model: ModelData) => string[];
|
|
3260
|
+
filter: true;
|
|
3261
|
+
};
|
|
3262
|
+
diffusionkit: {
|
|
3263
|
+
prettyLabel: string;
|
|
3264
|
+
repoName: string;
|
|
3265
|
+
repoUrl: string;
|
|
3266
|
+
snippets: (model: ModelData) => string[];
|
|
3267
|
+
};
|
|
3268
|
+
doctr: {
|
|
3269
|
+
prettyLabel: string;
|
|
3270
|
+
repoName: string;
|
|
3271
|
+
repoUrl: string;
|
|
3272
|
+
};
|
|
3273
|
+
cartesia_pytorch: {
|
|
3274
|
+
prettyLabel: string;
|
|
3275
|
+
repoName: string;
|
|
3276
|
+
repoUrl: string;
|
|
3277
|
+
snippets: (model: ModelData) => string[];
|
|
3278
|
+
};
|
|
3279
|
+
cartesia_mlx: {
|
|
3280
|
+
prettyLabel: string;
|
|
3281
|
+
repoName: string;
|
|
3282
|
+
repoUrl: string;
|
|
3283
|
+
snippets: (model: ModelData) => string[];
|
|
3284
|
+
};
|
|
3285
|
+
cotracker: {
|
|
3286
|
+
prettyLabel: string;
|
|
3287
|
+
repoName: string;
|
|
3288
|
+
repoUrl: string;
|
|
3289
|
+
filter: false;
|
|
3290
|
+
countDownloads: string;
|
|
3291
|
+
};
|
|
3292
|
+
edsnlp: {
|
|
3293
|
+
prettyLabel: string;
|
|
3294
|
+
repoName: string;
|
|
3295
|
+
repoUrl: string;
|
|
3296
|
+
docsUrl: string;
|
|
3297
|
+
filter: false;
|
|
3298
|
+
snippets: (model: ModelData) => string[];
|
|
3299
|
+
countDownloads: string;
|
|
3300
|
+
};
|
|
3301
|
+
elm: {
|
|
3302
|
+
prettyLabel: string;
|
|
3303
|
+
repoName: string;
|
|
3304
|
+
repoUrl: string;
|
|
3305
|
+
filter: false;
|
|
3306
|
+
countDownloads: string;
|
|
3307
|
+
};
|
|
3308
|
+
espnet: {
|
|
3309
|
+
prettyLabel: string;
|
|
3310
|
+
repoName: string;
|
|
3311
|
+
repoUrl: string;
|
|
3312
|
+
docsUrl: string;
|
|
3313
|
+
snippets: (model: ModelData) => string[];
|
|
3314
|
+
filter: true;
|
|
3315
|
+
};
|
|
3316
|
+
fairseq: {
|
|
3317
|
+
prettyLabel: string;
|
|
3318
|
+
repoName: string;
|
|
3319
|
+
repoUrl: string;
|
|
3320
|
+
snippets: (model: ModelData) => string[];
|
|
3321
|
+
filter: true;
|
|
3322
|
+
};
|
|
3323
|
+
fastai: {
|
|
3324
|
+
prettyLabel: string;
|
|
3325
|
+
repoName: string;
|
|
3326
|
+
repoUrl: string;
|
|
3327
|
+
docsUrl: string;
|
|
3328
|
+
snippets: (model: ModelData) => string[];
|
|
3329
|
+
filter: true;
|
|
3330
|
+
};
|
|
3331
|
+
fasttext: {
|
|
3332
|
+
prettyLabel: string;
|
|
3333
|
+
repoName: string;
|
|
3334
|
+
repoUrl: string;
|
|
3335
|
+
snippets: (model: ModelData) => string[];
|
|
3336
|
+
filter: true;
|
|
3337
|
+
countDownloads: string;
|
|
3338
|
+
};
|
|
3339
|
+
flair: {
|
|
3340
|
+
prettyLabel: string;
|
|
3341
|
+
repoName: string;
|
|
3342
|
+
repoUrl: string;
|
|
3343
|
+
docsUrl: string;
|
|
3344
|
+
snippets: (model: ModelData) => string[];
|
|
3345
|
+
filter: true;
|
|
3346
|
+
countDownloads: string;
|
|
3347
|
+
};
|
|
3348
|
+
"gemma.cpp": {
|
|
3349
|
+
prettyLabel: string;
|
|
3350
|
+
repoName: string;
|
|
3351
|
+
repoUrl: string;
|
|
3352
|
+
filter: false;
|
|
3353
|
+
countDownloads: string;
|
|
3354
|
+
};
|
|
3355
|
+
gliner: {
|
|
3356
|
+
prettyLabel: string;
|
|
3357
|
+
repoName: string;
|
|
3358
|
+
repoUrl: string;
|
|
3359
|
+
snippets: (model: ModelData) => string[];
|
|
3360
|
+
filter: false;
|
|
3361
|
+
countDownloads: string;
|
|
3362
|
+
};
|
|
3363
|
+
"glyph-byt5": {
|
|
3364
|
+
prettyLabel: string;
|
|
3365
|
+
repoName: string;
|
|
3366
|
+
repoUrl: string;
|
|
3367
|
+
filter: false;
|
|
3368
|
+
countDownloads: string;
|
|
3369
|
+
};
|
|
3370
|
+
grok: {
|
|
3371
|
+
prettyLabel: string;
|
|
3372
|
+
repoName: string;
|
|
3373
|
+
repoUrl: string;
|
|
3374
|
+
filter: false;
|
|
3375
|
+
countDownloads: string;
|
|
3376
|
+
};
|
|
3377
|
+
hallo: {
|
|
3378
|
+
prettyLabel: string;
|
|
3379
|
+
repoName: string;
|
|
3380
|
+
repoUrl: string;
|
|
3381
|
+
countDownloads: string;
|
|
3382
|
+
};
|
|
3383
|
+
hezar: {
|
|
3384
|
+
prettyLabel: string;
|
|
3385
|
+
repoName: string;
|
|
3386
|
+
repoUrl: string;
|
|
3387
|
+
docsUrl: string;
|
|
3388
|
+
countDownloads: string;
|
|
3389
|
+
};
|
|
3390
|
+
htrflow: {
|
|
3391
|
+
prettyLabel: string;
|
|
3392
|
+
repoName: string;
|
|
3393
|
+
repoUrl: string;
|
|
3394
|
+
docsUrl: string;
|
|
3395
|
+
snippets: (model: ModelData) => string[];
|
|
3396
|
+
};
|
|
3397
|
+
"hunyuan-dit": {
|
|
3398
|
+
prettyLabel: string;
|
|
3399
|
+
repoName: string;
|
|
3400
|
+
repoUrl: string;
|
|
3401
|
+
countDownloads: string;
|
|
3402
|
+
};
|
|
3403
|
+
imstoucan: {
|
|
3404
|
+
prettyLabel: string;
|
|
3405
|
+
repoName: string;
|
|
3406
|
+
repoUrl: string;
|
|
3407
|
+
countDownloads: string;
|
|
3408
|
+
};
|
|
3409
|
+
keras: {
|
|
3410
|
+
prettyLabel: string;
|
|
3411
|
+
repoName: string;
|
|
3412
|
+
repoUrl: string;
|
|
3413
|
+
docsUrl: string;
|
|
3414
|
+
snippets: (model: ModelData) => string[];
|
|
3415
|
+
filter: true;
|
|
3416
|
+
countDownloads: string;
|
|
3417
|
+
};
|
|
3418
|
+
"tf-keras": {
|
|
3419
|
+
prettyLabel: string;
|
|
3420
|
+
repoName: string;
|
|
3421
|
+
repoUrl: string;
|
|
3422
|
+
docsUrl: string;
|
|
3423
|
+
snippets: (model: ModelData) => string[];
|
|
3424
|
+
countDownloads: string;
|
|
3425
|
+
};
|
|
3426
|
+
"keras-nlp": {
|
|
3427
|
+
prettyLabel: string;
|
|
3428
|
+
repoName: string;
|
|
3429
|
+
repoUrl: string;
|
|
3430
|
+
docsUrl: string;
|
|
3431
|
+
snippets: (model: ModelData) => string[];
|
|
3432
|
+
};
|
|
3433
|
+
"keras-hub": {
|
|
3434
|
+
prettyLabel: string;
|
|
3435
|
+
repoName: string;
|
|
3436
|
+
repoUrl: string;
|
|
3437
|
+
docsUrl: string;
|
|
3438
|
+
snippets: (model: ModelData) => string[];
|
|
3439
|
+
filter: true;
|
|
3440
|
+
};
|
|
3441
|
+
k2: {
|
|
3442
|
+
prettyLabel: string;
|
|
3443
|
+
repoName: string;
|
|
3444
|
+
repoUrl: string;
|
|
3445
|
+
};
|
|
3446
|
+
liveportrait: {
|
|
3447
|
+
prettyLabel: string;
|
|
3448
|
+
repoName: string;
|
|
3449
|
+
repoUrl: string;
|
|
3450
|
+
filter: false;
|
|
3451
|
+
countDownloads: string;
|
|
3452
|
+
};
|
|
3453
|
+
"llama-cpp-python": {
|
|
3454
|
+
prettyLabel: string;
|
|
3455
|
+
repoName: string;
|
|
3456
|
+
repoUrl: string;
|
|
3457
|
+
snippets: (model: ModelData) => string[];
|
|
3458
|
+
};
|
|
3459
|
+
"mini-omni2": {
|
|
3460
|
+
prettyLabel: string;
|
|
3461
|
+
repoName: string;
|
|
3462
|
+
repoUrl: string;
|
|
3463
|
+
countDownloads: string;
|
|
3464
|
+
};
|
|
3465
|
+
mindspore: {
|
|
3466
|
+
prettyLabel: string;
|
|
3467
|
+
repoName: string;
|
|
3468
|
+
repoUrl: string;
|
|
3469
|
+
};
|
|
3470
|
+
"mamba-ssm": {
|
|
3471
|
+
prettyLabel: string;
|
|
3472
|
+
repoName: string;
|
|
3473
|
+
repoUrl: string;
|
|
3474
|
+
filter: false;
|
|
3475
|
+
snippets: (model: ModelData) => string[];
|
|
3476
|
+
};
|
|
3477
|
+
"mars5-tts": {
|
|
3478
|
+
prettyLabel: string;
|
|
3479
|
+
repoName: string;
|
|
3480
|
+
repoUrl: string;
|
|
3481
|
+
filter: false;
|
|
3482
|
+
countDownloads: string;
|
|
3483
|
+
snippets: (model: ModelData) => string[];
|
|
3484
|
+
};
|
|
3485
|
+
"mesh-anything": {
|
|
3486
|
+
prettyLabel: string;
|
|
3487
|
+
repoName: string;
|
|
3488
|
+
repoUrl: string;
|
|
3489
|
+
filter: false;
|
|
3490
|
+
countDownloads: string;
|
|
3491
|
+
snippets: () => string[];
|
|
3492
|
+
};
|
|
3493
|
+
"ml-agents": {
|
|
3494
|
+
prettyLabel: string;
|
|
3495
|
+
repoName: string;
|
|
3496
|
+
repoUrl: string;
|
|
3497
|
+
docsUrl: string;
|
|
3498
|
+
snippets: (model: ModelData) => string[];
|
|
3499
|
+
filter: true;
|
|
3500
|
+
countDownloads: string;
|
|
3501
|
+
};
|
|
3502
|
+
mlx: {
|
|
3503
|
+
prettyLabel: string;
|
|
3504
|
+
repoName: string;
|
|
3505
|
+
repoUrl: string;
|
|
3506
|
+
snippets: (model: ModelData) => string[];
|
|
3507
|
+
filter: true;
|
|
3508
|
+
};
|
|
3509
|
+
"mlx-image": {
|
|
3510
|
+
prettyLabel: string;
|
|
3511
|
+
repoName: string;
|
|
3512
|
+
repoUrl: string;
|
|
3513
|
+
docsUrl: string;
|
|
3514
|
+
snippets: (model: ModelData) => string[];
|
|
3515
|
+
filter: false;
|
|
3516
|
+
countDownloads: string;
|
|
3517
|
+
};
|
|
3518
|
+
"mlc-llm": {
|
|
3519
|
+
prettyLabel: string;
|
|
3520
|
+
repoName: string;
|
|
3521
|
+
repoUrl: string;
|
|
3522
|
+
docsUrl: string;
|
|
3523
|
+
filter: false;
|
|
3524
|
+
countDownloads: string;
|
|
3525
|
+
};
|
|
3526
|
+
model2vec: {
|
|
3527
|
+
prettyLabel: string;
|
|
3528
|
+
repoName: string;
|
|
3529
|
+
repoUrl: string;
|
|
3530
|
+
snippets: (model: ModelData) => string[];
|
|
3531
|
+
filter: false;
|
|
3532
|
+
};
|
|
3533
|
+
moshi: {
|
|
3534
|
+
prettyLabel: string;
|
|
3535
|
+
repoName: string;
|
|
3536
|
+
repoUrl: string;
|
|
3537
|
+
filter: false;
|
|
3538
|
+
countDownloads: string;
|
|
3539
|
+
};
|
|
3540
|
+
nemo: {
|
|
3541
|
+
prettyLabel: string;
|
|
3542
|
+
repoName: string;
|
|
3543
|
+
repoUrl: string;
|
|
3544
|
+
snippets: (model: ModelData) => string[];
|
|
3545
|
+
filter: true;
|
|
3546
|
+
countDownloads: string;
|
|
3547
|
+
};
|
|
3548
|
+
"open-oasis": {
|
|
3549
|
+
prettyLabel: string;
|
|
3550
|
+
repoName: string;
|
|
3551
|
+
repoUrl: string;
|
|
3552
|
+
countDownloads: string;
|
|
3553
|
+
};
|
|
3554
|
+
open_clip: {
|
|
3555
|
+
prettyLabel: string;
|
|
3556
|
+
repoName: string;
|
|
3557
|
+
repoUrl: string;
|
|
3558
|
+
snippets: (model: ModelData) => string[];
|
|
3559
|
+
filter: true;
|
|
3560
|
+
countDownloads: string;
|
|
3561
|
+
};
|
|
3562
|
+
paddlenlp: {
|
|
3563
|
+
prettyLabel: string;
|
|
3564
|
+
repoName: string;
|
|
3565
|
+
repoUrl: string;
|
|
3566
|
+
docsUrl: string;
|
|
3567
|
+
snippets: (model: ModelData) => string[];
|
|
3568
|
+
filter: true;
|
|
3569
|
+
countDownloads: string;
|
|
3570
|
+
};
|
|
3571
|
+
peft: {
|
|
3572
|
+
prettyLabel: string;
|
|
3573
|
+
repoName: string;
|
|
3574
|
+
repoUrl: string;
|
|
3575
|
+
snippets: (model: ModelData) => string[];
|
|
3576
|
+
filter: true;
|
|
3577
|
+
countDownloads: string;
|
|
3578
|
+
};
|
|
3579
|
+
pxia: {
|
|
3580
|
+
prettyLabel: string;
|
|
3581
|
+
repoName: string;
|
|
3582
|
+
repoUrl: string;
|
|
3583
|
+
snippets: (model: ModelData) => string[];
|
|
3584
|
+
filter: false;
|
|
3585
|
+
};
|
|
3586
|
+
"pyannote-audio": {
|
|
3587
|
+
prettyLabel: string;
|
|
3588
|
+
repoName: string;
|
|
3589
|
+
repoUrl: string;
|
|
3590
|
+
snippets: (model: ModelData) => string[];
|
|
3591
|
+
filter: true;
|
|
3592
|
+
};
|
|
3593
|
+
"py-feat": {
|
|
3594
|
+
prettyLabel: string;
|
|
3595
|
+
repoName: string;
|
|
3596
|
+
repoUrl: string;
|
|
3597
|
+
docsUrl: string;
|
|
3598
|
+
filter: false;
|
|
3599
|
+
};
|
|
3600
|
+
pythae: {
|
|
3601
|
+
prettyLabel: string;
|
|
3602
|
+
repoName: string;
|
|
3603
|
+
repoUrl: string;
|
|
3604
|
+
snippets: (model: ModelData) => string[];
|
|
3605
|
+
filter: false;
|
|
3606
|
+
};
|
|
3607
|
+
recurrentgemma: {
|
|
3608
|
+
prettyLabel: string;
|
|
3609
|
+
repoName: string;
|
|
3610
|
+
repoUrl: string;
|
|
3611
|
+
filter: false;
|
|
3612
|
+
countDownloads: string;
|
|
3613
|
+
};
|
|
3614
|
+
relik: {
|
|
3615
|
+
prettyLabel: string;
|
|
3616
|
+
repoName: string;
|
|
3617
|
+
repoUrl: string;
|
|
3618
|
+
snippets: (model: ModelData) => string[];
|
|
3619
|
+
filter: false;
|
|
3620
|
+
};
|
|
3621
|
+
refiners: {
|
|
3622
|
+
prettyLabel: string;
|
|
3623
|
+
repoName: string;
|
|
3624
|
+
repoUrl: string;
|
|
3625
|
+
docsUrl: string;
|
|
3626
|
+
filter: false;
|
|
3627
|
+
countDownloads: string;
|
|
3628
|
+
};
|
|
3629
|
+
reverb: {
|
|
3630
|
+
prettyLabel: string;
|
|
3631
|
+
repoName: string;
|
|
3632
|
+
repoUrl: string;
|
|
3633
|
+
filter: false;
|
|
3634
|
+
};
|
|
3635
|
+
saelens: {
|
|
3636
|
+
prettyLabel: string;
|
|
3637
|
+
repoName: string;
|
|
3638
|
+
repoUrl: string;
|
|
3639
|
+
snippets: () => string[];
|
|
3640
|
+
filter: false;
|
|
3641
|
+
};
|
|
3642
|
+
sam2: {
|
|
3643
|
+
prettyLabel: string;
|
|
3644
|
+
repoName: string;
|
|
3645
|
+
repoUrl: string;
|
|
3646
|
+
filter: false;
|
|
3647
|
+
snippets: (model: ModelData) => string[];
|
|
3648
|
+
countDownloads: string;
|
|
3649
|
+
};
|
|
3650
|
+
"sample-factory": {
|
|
3651
|
+
prettyLabel: string;
|
|
3652
|
+
repoName: string;
|
|
3653
|
+
repoUrl: string;
|
|
3654
|
+
docsUrl: string;
|
|
3655
|
+
snippets: (model: ModelData) => string[];
|
|
3656
|
+
filter: true;
|
|
3657
|
+
countDownloads: string;
|
|
3658
|
+
};
|
|
3659
|
+
sapiens: {
|
|
3660
|
+
prettyLabel: string;
|
|
3661
|
+
repoName: string;
|
|
3662
|
+
repoUrl: string;
|
|
3663
|
+
filter: false;
|
|
3664
|
+
countDownloads: string;
|
|
3665
|
+
};
|
|
3666
|
+
"sentence-transformers": {
|
|
3667
|
+
prettyLabel: string;
|
|
3668
|
+
repoName: string;
|
|
3669
|
+
repoUrl: string;
|
|
3670
|
+
docsUrl: string;
|
|
3671
|
+
snippets: (model: ModelData) => string[];
|
|
3672
|
+
filter: true;
|
|
3673
|
+
};
|
|
3674
|
+
setfit: {
|
|
3675
|
+
prettyLabel: string;
|
|
3676
|
+
repoName: string;
|
|
3677
|
+
repoUrl: string;
|
|
3678
|
+
docsUrl: string;
|
|
3679
|
+
snippets: (model: ModelData) => string[];
|
|
3680
|
+
filter: true;
|
|
3681
|
+
};
|
|
3682
|
+
sklearn: {
|
|
3683
|
+
prettyLabel: string;
|
|
3684
|
+
repoName: string;
|
|
3685
|
+
repoUrl: string;
|
|
3686
|
+
snippets: (model: ModelData) => string[];
|
|
3687
|
+
filter: true;
|
|
3688
|
+
countDownloads: string;
|
|
3689
|
+
};
|
|
3690
|
+
spacy: {
|
|
3691
|
+
prettyLabel: string;
|
|
3692
|
+
repoName: string;
|
|
3693
|
+
repoUrl: string;
|
|
3694
|
+
docsUrl: string;
|
|
3695
|
+
snippets: (model: ModelData) => string[];
|
|
3696
|
+
filter: true;
|
|
3697
|
+
countDownloads: string;
|
|
3698
|
+
};
|
|
3699
|
+
"span-marker": {
|
|
3700
|
+
prettyLabel: string;
|
|
3701
|
+
repoName: string;
|
|
3702
|
+
repoUrl: string;
|
|
3703
|
+
docsUrl: string;
|
|
3704
|
+
snippets: (model: ModelData) => string[];
|
|
3705
|
+
filter: true;
|
|
3706
|
+
};
|
|
3707
|
+
speechbrain: {
|
|
3708
|
+
prettyLabel: string;
|
|
3709
|
+
repoName: string;
|
|
3710
|
+
repoUrl: string;
|
|
3711
|
+
docsUrl: string;
|
|
3712
|
+
snippets: (model: ModelData) => string[];
|
|
3713
|
+
filter: true;
|
|
3714
|
+
countDownloads: string;
|
|
3715
|
+
};
|
|
3716
|
+
"ssr-speech": {
|
|
3717
|
+
prettyLabel: string;
|
|
3718
|
+
repoName: string;
|
|
3719
|
+
repoUrl: string;
|
|
3720
|
+
filter: false;
|
|
3721
|
+
countDownloads: string;
|
|
3722
|
+
};
|
|
3723
|
+
"stable-audio-tools": {
|
|
3724
|
+
prettyLabel: string;
|
|
3725
|
+
repoName: string;
|
|
3726
|
+
repoUrl: string;
|
|
3727
|
+
filter: false;
|
|
3728
|
+
countDownloads: string;
|
|
3729
|
+
snippets: (model: ModelData) => string[];
|
|
3730
|
+
};
|
|
3731
|
+
"diffusion-single-file": {
|
|
3732
|
+
prettyLabel: string;
|
|
3733
|
+
repoName: string;
|
|
3734
|
+
repoUrl: string;
|
|
3735
|
+
filter: false;
|
|
3736
|
+
countDownloads: string;
|
|
3737
|
+
};
|
|
3738
|
+
"seed-story": {
|
|
3739
|
+
prettyLabel: string;
|
|
3740
|
+
repoName: string;
|
|
3741
|
+
repoUrl: string;
|
|
3742
|
+
filter: false;
|
|
3743
|
+
countDownloads: string;
|
|
3744
|
+
snippets: () => string[];
|
|
3745
|
+
};
|
|
3746
|
+
soloaudio: {
|
|
3747
|
+
prettyLabel: string;
|
|
3748
|
+
repoName: string;
|
|
3749
|
+
repoUrl: string;
|
|
3750
|
+
filter: false;
|
|
3751
|
+
countDownloads: string;
|
|
3752
|
+
};
|
|
3753
|
+
"stable-baselines3": {
|
|
3754
|
+
prettyLabel: string;
|
|
3755
|
+
repoName: string;
|
|
3756
|
+
repoUrl: string;
|
|
3757
|
+
docsUrl: string;
|
|
3758
|
+
snippets: (model: ModelData) => string[];
|
|
3759
|
+
filter: true;
|
|
3760
|
+
countDownloads: string;
|
|
3761
|
+
};
|
|
3762
|
+
stanza: {
|
|
3763
|
+
prettyLabel: string;
|
|
3764
|
+
repoName: string;
|
|
3765
|
+
repoUrl: string;
|
|
3766
|
+
docsUrl: string;
|
|
3767
|
+
snippets: (model: ModelData) => string[];
|
|
3768
|
+
filter: true;
|
|
3769
|
+
countDownloads: string;
|
|
3770
|
+
};
|
|
3771
|
+
"f5-tts": {
|
|
3772
|
+
prettyLabel: string;
|
|
3773
|
+
repoName: string;
|
|
3774
|
+
repoUrl: string;
|
|
3775
|
+
filter: false;
|
|
3776
|
+
countDownloads: string;
|
|
3777
|
+
};
|
|
3778
|
+
genmo: {
|
|
3779
|
+
prettyLabel: string;
|
|
3780
|
+
repoName: string;
|
|
3781
|
+
repoUrl: string;
|
|
3782
|
+
filter: false;
|
|
3783
|
+
countDownloads: string;
|
|
3784
|
+
};
|
|
3785
|
+
tensorflowtts: {
|
|
3786
|
+
prettyLabel: string;
|
|
3787
|
+
repoName: string;
|
|
3788
|
+
repoUrl: string;
|
|
3789
|
+
snippets: (model: ModelData) => string[];
|
|
3790
|
+
};
|
|
3791
|
+
"tic-clip": {
|
|
3792
|
+
prettyLabel: string;
|
|
3793
|
+
repoName: string;
|
|
3794
|
+
repoUrl: string;
|
|
3795
|
+
filter: false;
|
|
3796
|
+
countDownloads: string;
|
|
3797
|
+
};
|
|
3798
|
+
timesfm: {
|
|
3799
|
+
prettyLabel: string;
|
|
3800
|
+
repoName: string;
|
|
3801
|
+
repoUrl: string;
|
|
3802
|
+
filter: false;
|
|
3803
|
+
countDownloads: string;
|
|
3804
|
+
};
|
|
3805
|
+
timm: {
|
|
3806
|
+
prettyLabel: string;
|
|
3807
|
+
repoName: string;
|
|
3808
|
+
repoUrl: string;
|
|
3809
|
+
docsUrl: string;
|
|
3810
|
+
snippets: (model: ModelData) => string[];
|
|
3811
|
+
filter: true;
|
|
3812
|
+
countDownloads: string;
|
|
3813
|
+
};
|
|
3814
|
+
transformers: {
|
|
3815
|
+
prettyLabel: string;
|
|
3816
|
+
repoName: string;
|
|
3817
|
+
repoUrl: string;
|
|
3818
|
+
docsUrl: string;
|
|
3819
|
+
snippets: (model: ModelData) => string[];
|
|
3820
|
+
filter: true;
|
|
3821
|
+
};
|
|
3822
|
+
"transformers.js": {
|
|
3823
|
+
prettyLabel: string;
|
|
3824
|
+
repoName: string;
|
|
3825
|
+
repoUrl: string;
|
|
3826
|
+
docsUrl: string;
|
|
3827
|
+
snippets: (model: ModelData) => string[];
|
|
3828
|
+
filter: true;
|
|
3829
|
+
};
|
|
3830
|
+
"unity-sentis": {
|
|
3831
|
+
prettyLabel: string;
|
|
3832
|
+
repoName: string;
|
|
3833
|
+
repoUrl: string;
|
|
3834
|
+
snippets: () => string[];
|
|
3835
|
+
filter: true;
|
|
3836
|
+
countDownloads: string;
|
|
3837
|
+
};
|
|
3838
|
+
"vfi-mamba": {
|
|
3839
|
+
prettyLabel: string;
|
|
3840
|
+
repoName: string;
|
|
3841
|
+
repoUrl: string;
|
|
3842
|
+
countDownloads: string;
|
|
3843
|
+
snippets: (model: ModelData) => string[];
|
|
3844
|
+
};
|
|
3845
|
+
voicecraft: {
|
|
3846
|
+
prettyLabel: string;
|
|
3847
|
+
repoName: string;
|
|
3848
|
+
repoUrl: string;
|
|
3849
|
+
docsUrl: string;
|
|
3850
|
+
snippets: (model: ModelData) => string[];
|
|
3851
|
+
};
|
|
3852
|
+
yolov10: {
|
|
3853
|
+
prettyLabel: string;
|
|
3854
|
+
repoName: string;
|
|
3855
|
+
repoUrl: string;
|
|
3856
|
+
docsUrl: string;
|
|
3857
|
+
snippets: (model: ModelData) => string[];
|
|
3858
|
+
};
|
|
3859
|
+
whisperkit: {
|
|
3860
|
+
prettyLabel: string;
|
|
3861
|
+
repoName: string;
|
|
3862
|
+
repoUrl: string;
|
|
3863
|
+
docsUrl: string;
|
|
3864
|
+
snippets: () => string[];
|
|
3865
|
+
countDownloads: string;
|
|
3866
|
+
};
|
|
3867
|
+
"3dtopia-xl": {
|
|
3868
|
+
prettyLabel: string;
|
|
3869
|
+
repoName: string;
|
|
3870
|
+
repoUrl: string;
|
|
3871
|
+
filter: false;
|
|
3872
|
+
countDownloads: string;
|
|
3873
|
+
snippets: (model: ModelData) => string[];
|
|
3874
|
+
};
|
|
3875
|
+
};
|
|
3876
|
+
type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
|
|
3877
|
+
declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "genmo" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
3878
|
+
declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "deepforest" | "depth-anything-v2" | "depth-pro" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "genmo" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "vfi-mamba" | "voicecraft" | "yolov10" | "whisperkit" | "3dtopia-xl")[];
|
|
3879
|
+
|
|
3880
|
+
/**
|
|
3881
|
+
* Mapping from library name to its supported tasks.
|
|
3882
|
+
* Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
|
|
3883
|
+
* This mapping is partially generated automatically by "python-api-export-tasks" action in
|
|
3884
|
+
* huggingface/api-inference-community repo upon merge. For transformers, the mapping is manually
|
|
3885
|
+
* based on api-inference (hf_types.rs).
|
|
3886
|
+
*/
|
|
3887
|
+
declare const LIBRARY_TASK_MAPPING: Partial<Record<ModelLibraryKey, PipelineType[]>>;
|
|
3888
|
+
|
|
3889
|
+
type PerLanguageMapping = Map<WidgetType, string[] | WidgetExample[]>;
|
|
3890
|
+
declare const MAPPING_DEFAULT_WIDGET: Map<string, PerLanguageMapping>;
|
|
3891
|
+
|
|
3892
|
+
/**
|
|
3893
|
+
* Minimal model data required for snippets.
|
|
3894
|
+
*
|
|
3895
|
+
* Add more fields as needed.
|
|
3896
|
+
*/
|
|
3897
|
+
type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags" | "inference">;
|
|
3898
|
+
interface InferenceSnippet {
|
|
3899
|
+
content: string;
|
|
3900
|
+
client?: string;
|
|
3901
|
+
}
|
|
3902
|
+
|
|
3903
|
+
declare function getModelInputSnippet(model: ModelDataMinimal, noWrap?: boolean, noQuotes?: boolean): string | ChatCompletionInputMessage[];
|
|
3904
|
+
|
|
3905
|
+
declare const inputs_getModelInputSnippet: typeof getModelInputSnippet;
|
|
3906
|
+
declare namespace inputs {
|
|
3907
|
+
export { inputs_getModelInputSnippet as getModelInputSnippet };
|
|
3908
|
+
}
|
|
3909
|
+
|
|
3910
|
+
declare const snippetBasic$2: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3911
|
+
declare const snippetTextGeneration$1: (model: ModelDataMinimal, accessToken: string, opts?: {
|
|
3912
|
+
streaming?: boolean;
|
|
3913
|
+
messages?: ChatCompletionInputMessage[];
|
|
3914
|
+
temperature?: GenerationParameters$2["temperature"];
|
|
3915
|
+
max_tokens?: GenerationParameters$2["max_tokens"];
|
|
3916
|
+
top_p?: GenerationParameters$2["top_p"];
|
|
3917
|
+
}) => InferenceSnippet;
|
|
3918
|
+
declare const snippetZeroShotClassification$2: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3919
|
+
declare const snippetFile$2: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3920
|
+
declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet>>;
|
|
3921
|
+
declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet;
|
|
3922
|
+
declare function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean;
|
|
3923
|
+
|
|
3924
|
+
declare const curl_curlSnippets: typeof curlSnippets;
|
|
3925
|
+
declare const curl_getCurlInferenceSnippet: typeof getCurlInferenceSnippet;
|
|
3926
|
+
declare const curl_hasCurlInferenceSnippet: typeof hasCurlInferenceSnippet;
|
|
3927
|
+
declare namespace curl {
|
|
3928
|
+
export { curl_curlSnippets as curlSnippets, curl_getCurlInferenceSnippet as getCurlInferenceSnippet, curl_hasCurlInferenceSnippet as hasCurlInferenceSnippet, snippetBasic$2 as snippetBasic, snippetFile$2 as snippetFile, snippetTextGeneration$1 as snippetTextGeneration, snippetZeroShotClassification$2 as snippetZeroShotClassification };
|
|
3929
|
+
}
|
|
3930
|
+
|
|
3931
|
+
declare const snippetConversational: (model: ModelDataMinimal, accessToken: string, opts?: {
|
|
3932
|
+
streaming?: boolean;
|
|
3933
|
+
messages?: ChatCompletionInputMessage[];
|
|
3934
|
+
temperature?: GenerationParameters$2["temperature"];
|
|
3935
|
+
max_tokens?: GenerationParameters$2["max_tokens"];
|
|
3936
|
+
top_p?: GenerationParameters$2["top_p"];
|
|
3937
|
+
}) => InferenceSnippet[];
|
|
3938
|
+
declare const snippetZeroShotClassification$1: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3939
|
+
declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3940
|
+
declare const snippetBasic$1: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3941
|
+
declare const snippetFile$1: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3942
|
+
declare const snippetTextToImage$1: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3943
|
+
declare const snippetTabular: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3944
|
+
declare const snippetTextToAudio$1: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3945
|
+
declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => InferenceSnippet;
|
|
3946
|
+
declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet | InferenceSnippet[]>>;
|
|
3947
|
+
declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>): InferenceSnippet | InferenceSnippet[];
|
|
3948
|
+
declare function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean;
|
|
3949
|
+
|
|
3950
|
+
declare const python_getPythonInferenceSnippet: typeof getPythonInferenceSnippet;
|
|
3951
|
+
declare const python_hasPythonInferenceSnippet: typeof hasPythonInferenceSnippet;
|
|
3952
|
+
declare const python_pythonSnippets: typeof pythonSnippets;
|
|
3953
|
+
declare const python_snippetConversational: typeof snippetConversational;
|
|
3954
|
+
declare const python_snippetDocumentQuestionAnswering: typeof snippetDocumentQuestionAnswering;
|
|
3955
|
+
declare const python_snippetTabular: typeof snippetTabular;
|
|
3956
|
+
declare const python_snippetZeroShotImageClassification: typeof snippetZeroShotImageClassification;
|
|
3957
|
+
declare namespace python {
|
|
3958
|
+
export { python_getPythonInferenceSnippet as getPythonInferenceSnippet, python_hasPythonInferenceSnippet as hasPythonInferenceSnippet, python_pythonSnippets as pythonSnippets, snippetBasic$1 as snippetBasic, python_snippetConversational as snippetConversational, python_snippetDocumentQuestionAnswering as snippetDocumentQuestionAnswering, snippetFile$1 as snippetFile, python_snippetTabular as snippetTabular, snippetTextToAudio$1 as snippetTextToAudio, snippetTextToImage$1 as snippetTextToImage, snippetZeroShotClassification$1 as snippetZeroShotClassification, python_snippetZeroShotImageClassification as snippetZeroShotImageClassification };
|
|
3959
|
+
}
|
|
3960
|
+
|
|
3961
|
+
declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3962
|
+
declare const snippetTextGeneration: (model: ModelDataMinimal, accessToken: string, opts?: {
|
|
3963
|
+
streaming?: boolean;
|
|
3964
|
+
messages?: ChatCompletionInputMessage[];
|
|
3965
|
+
temperature?: GenerationParameters$2["temperature"];
|
|
3966
|
+
max_tokens?: GenerationParameters$2["max_tokens"];
|
|
3967
|
+
top_p?: GenerationParameters$2["top_p"];
|
|
3968
|
+
}) => InferenceSnippet | InferenceSnippet[];
|
|
3969
|
+
declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3970
|
+
declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3971
|
+
declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3972
|
+
declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => InferenceSnippet;
|
|
3973
|
+
declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string, opts?: Record<string, unknown>) => InferenceSnippet | InferenceSnippet[]>>;
|
|
3974
|
+
declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): InferenceSnippet | InferenceSnippet[];
|
|
3975
|
+
declare function hasJsInferenceSnippet(model: ModelDataMinimal): boolean;
|
|
3976
|
+
|
|
3977
|
+
declare const js_getJsInferenceSnippet: typeof getJsInferenceSnippet;
|
|
3978
|
+
declare const js_hasJsInferenceSnippet: typeof hasJsInferenceSnippet;
|
|
3979
|
+
declare const js_jsSnippets: typeof jsSnippets;
|
|
3980
|
+
declare const js_snippetBasic: typeof snippetBasic;
|
|
3981
|
+
declare const js_snippetFile: typeof snippetFile;
|
|
3982
|
+
declare const js_snippetTextGeneration: typeof snippetTextGeneration;
|
|
3983
|
+
declare const js_snippetTextToAudio: typeof snippetTextToAudio;
|
|
3984
|
+
declare const js_snippetTextToImage: typeof snippetTextToImage;
|
|
3985
|
+
declare const js_snippetZeroShotClassification: typeof snippetZeroShotClassification;
|
|
3986
|
+
declare namespace js {
|
|
3987
|
+
export { js_getJsInferenceSnippet as getJsInferenceSnippet, js_hasJsInferenceSnippet as hasJsInferenceSnippet, js_jsSnippets as jsSnippets, js_snippetBasic as snippetBasic, js_snippetFile as snippetFile, js_snippetTextGeneration as snippetTextGeneration, js_snippetTextToAudio as snippetTextToAudio, js_snippetTextToImage as snippetTextToImage, js_snippetZeroShotClassification as snippetZeroShotClassification };
|
|
3988
|
+
}
|
|
3989
|
+
|
|
3990
|
+
declare const index_curl: typeof curl;
|
|
3991
|
+
declare const index_inputs: typeof inputs;
|
|
3992
|
+
declare const index_js: typeof js;
|
|
3993
|
+
declare const index_python: typeof python;
|
|
3994
|
+
declare namespace index {
|
|
3995
|
+
export { index_curl as curl, index_inputs as inputs, index_js as js, index_python as python };
|
|
3996
|
+
}
|
|
3997
|
+
|
|
3998
|
+
declare enum GGMLQuantizationType {
|
|
3999
|
+
F32 = 0,
|
|
4000
|
+
F16 = 1,
|
|
4001
|
+
Q4_0 = 2,
|
|
4002
|
+
Q4_1 = 3,
|
|
4003
|
+
Q5_0 = 6,
|
|
4004
|
+
Q5_1 = 7,
|
|
4005
|
+
Q8_0 = 8,
|
|
4006
|
+
Q8_1 = 9,
|
|
4007
|
+
Q2_K = 10,
|
|
4008
|
+
Q3_K = 11,
|
|
4009
|
+
Q4_K = 12,
|
|
4010
|
+
Q5_K = 13,
|
|
4011
|
+
Q6_K = 14,
|
|
4012
|
+
Q8_K = 15,
|
|
4013
|
+
IQ2_XXS = 16,
|
|
4014
|
+
IQ2_XS = 17,
|
|
4015
|
+
IQ3_XXS = 18,
|
|
4016
|
+
IQ1_S = 19,
|
|
4017
|
+
IQ4_NL = 20,
|
|
4018
|
+
IQ3_S = 21,
|
|
4019
|
+
IQ2_S = 22,
|
|
4020
|
+
IQ4_XS = 23,
|
|
4021
|
+
I8 = 24,
|
|
4022
|
+
I16 = 25,
|
|
4023
|
+
I32 = 26,
|
|
4024
|
+
I64 = 27,
|
|
4025
|
+
F64 = 28,
|
|
4026
|
+
IQ1_M = 29,
|
|
4027
|
+
BF16 = 30
|
|
4028
|
+
}
|
|
4029
|
+
declare const GGUF_QUANT_RE: RegExp;
|
|
4030
|
+
declare const GGUF_QUANT_RE_GLOBAL: RegExp;
|
|
4031
|
+
declare function parseGGUFQuantLabel(fname: string): string | undefined;
|
|
4032
|
+
|
|
4033
|
+
interface HardwareSpec {
|
|
4034
|
+
/**
|
|
4035
|
+
* Approximate value, in FP16 whenever possible.
|
|
4036
|
+
* This is only approximate/theoretical and shouldn't be taken too seriously.
|
|
4037
|
+
* Currently the CPU values are from cpu-monkey.com
|
|
4038
|
+
* while the GPU values are from techpowerup.com
|
|
4039
|
+
*
|
|
4040
|
+
* Note to reviewers: I got fed up with data entry,
|
|
4041
|
+
* and HuggingChat running Llama3 with Web search was failing a bit,
|
|
4042
|
+
* so some of those values might be slightly inaccurate. Forgive me and please feel free to improve.
|
|
4043
|
+
*/
|
|
4044
|
+
tflops: number;
|
|
4045
|
+
/**
|
|
4046
|
+
* If an array is specified, options of memory size (can be VRAM, unified RAM)
|
|
4047
|
+
* e.g. an A100 exists in 40 or 80 GB.
|
|
4048
|
+
*/
|
|
4049
|
+
memory?: number[];
|
|
4050
|
+
}
|
|
4051
|
+
declare const DEFAULT_MEMORY_OPTIONS: number[];
|
|
4052
|
+
declare const SKUS: {
|
|
4053
|
+
GPU: {
|
|
4054
|
+
NVIDIA: {
|
|
4055
|
+
H100: {
|
|
4056
|
+
tflops: number;
|
|
4057
|
+
memory: number[];
|
|
4058
|
+
};
|
|
4059
|
+
L40: {
|
|
4060
|
+
tflops: number;
|
|
4061
|
+
memory: number[];
|
|
4062
|
+
};
|
|
4063
|
+
"RTX 6000 Ada": {
|
|
4064
|
+
tflops: number;
|
|
4065
|
+
memory: number[];
|
|
4066
|
+
};
|
|
4067
|
+
"RTX 5880 Ada": {
|
|
4068
|
+
tflops: number;
|
|
4069
|
+
memory: number[];
|
|
4070
|
+
};
|
|
4071
|
+
"RTX 5000 Ada": {
|
|
4072
|
+
tflops: number;
|
|
4073
|
+
memory: number[];
|
|
4074
|
+
};
|
|
4075
|
+
"RTX 4500 Ada": {
|
|
4076
|
+
tflops: number;
|
|
4077
|
+
memory: number[];
|
|
4078
|
+
};
|
|
4079
|
+
"RTX 4000 Ada": {
|
|
4080
|
+
tflops: number;
|
|
4081
|
+
memory: number[];
|
|
4082
|
+
};
|
|
4083
|
+
"RTX 4000 SFF Ada": {
|
|
4084
|
+
tflops: number;
|
|
4085
|
+
memory: number[];
|
|
4086
|
+
};
|
|
4087
|
+
"RTX 2000 Ada": {
|
|
4088
|
+
tflops: number;
|
|
4089
|
+
memory: number[];
|
|
4090
|
+
};
|
|
4091
|
+
"RTX A4000": {
|
|
4092
|
+
tflops: number;
|
|
4093
|
+
memory: number[];
|
|
4094
|
+
};
|
|
4095
|
+
A100: {
|
|
4096
|
+
tflops: number;
|
|
4097
|
+
memory: number[];
|
|
4098
|
+
};
|
|
4099
|
+
A40: {
|
|
4100
|
+
tflops: number;
|
|
4101
|
+
memory: number[];
|
|
4102
|
+
};
|
|
4103
|
+
A10: {
|
|
4104
|
+
tflops: number;
|
|
4105
|
+
memory: number[];
|
|
4106
|
+
};
|
|
4107
|
+
A2: {
|
|
4108
|
+
tflops: number;
|
|
4109
|
+
memory: number[];
|
|
4110
|
+
};
|
|
4111
|
+
"RTX 4090": {
|
|
4112
|
+
tflops: number;
|
|
4113
|
+
memory: number[];
|
|
4114
|
+
};
|
|
4115
|
+
"RTX 4090D": {
|
|
4116
|
+
tflops: number;
|
|
4117
|
+
memory: number[];
|
|
4118
|
+
};
|
|
4119
|
+
"RTX 4080 SUPER": {
|
|
4120
|
+
tflops: number;
|
|
4121
|
+
memory: number[];
|
|
4122
|
+
};
|
|
4123
|
+
"RTX 4080": {
|
|
4124
|
+
tflops: number;
|
|
4125
|
+
memory: number[];
|
|
4126
|
+
};
|
|
4127
|
+
"RTX 4070": {
|
|
4128
|
+
tflops: number;
|
|
4129
|
+
memory: number[];
|
|
4130
|
+
};
|
|
4131
|
+
"RTX 4070 Ti": {
|
|
4132
|
+
tflops: number;
|
|
4133
|
+
memory: number[];
|
|
4134
|
+
};
|
|
4135
|
+
"RTX 4070 Super": {
|
|
4136
|
+
tflops: number;
|
|
4137
|
+
memory: number[];
|
|
4138
|
+
};
|
|
4139
|
+
"RTX 4070 Ti Super": {
|
|
4140
|
+
tflops: number;
|
|
4141
|
+
memory: number[];
|
|
4142
|
+
};
|
|
4143
|
+
"RTX 4060": {
|
|
4144
|
+
tflops: number;
|
|
4145
|
+
memory: number[];
|
|
4146
|
+
};
|
|
4147
|
+
"RTX 4060 Ti": {
|
|
4148
|
+
tflops: number;
|
|
4149
|
+
memory: number[];
|
|
4150
|
+
};
|
|
4151
|
+
"RTX 3090": {
|
|
4152
|
+
tflops: number;
|
|
4153
|
+
memory: number[];
|
|
4154
|
+
};
|
|
4155
|
+
"RTX 3090 Ti": {
|
|
4156
|
+
tflops: number;
|
|
4157
|
+
memory: number[];
|
|
4158
|
+
};
|
|
4159
|
+
"RTX 3080": {
|
|
4160
|
+
tflops: number;
|
|
4161
|
+
memory: number[];
|
|
4162
|
+
};
|
|
4163
|
+
"RTX 3080 Ti": {
|
|
4164
|
+
tflops: number;
|
|
4165
|
+
memory: number[];
|
|
4166
|
+
};
|
|
4167
|
+
"RTX 3070": {
|
|
4168
|
+
tflops: number;
|
|
4169
|
+
memory: number[];
|
|
4170
|
+
};
|
|
4171
|
+
"RTX 3070 Ti": {
|
|
4172
|
+
tflops: number;
|
|
4173
|
+
memory: number[];
|
|
4174
|
+
};
|
|
4175
|
+
"RTX 3070 Ti Laptop": {
|
|
4176
|
+
tflops: number;
|
|
4177
|
+
memory: number[];
|
|
4178
|
+
};
|
|
4179
|
+
"RTX 3060 Ti": {
|
|
4180
|
+
tflops: number;
|
|
4181
|
+
memory: number[];
|
|
4182
|
+
};
|
|
4183
|
+
"RTX 3060": {
|
|
4184
|
+
tflops: number;
|
|
4185
|
+
memory: number[];
|
|
4186
|
+
};
|
|
4187
|
+
"RTX 2080 Ti": {
|
|
4188
|
+
tflops: number;
|
|
4189
|
+
memory: number[];
|
|
4190
|
+
};
|
|
4191
|
+
"RTX 2080": {
|
|
4192
|
+
tflops: number;
|
|
4193
|
+
memory: number[];
|
|
4194
|
+
};
|
|
4195
|
+
"RTX 2070": {
|
|
4196
|
+
tflops: number;
|
|
4197
|
+
memory: number[];
|
|
4198
|
+
};
|
|
4199
|
+
"RTX 2070 SUPER Mobile": {
|
|
4200
|
+
tflops: number;
|
|
4201
|
+
memory: number[];
|
|
4202
|
+
};
|
|
4203
|
+
"RTX 2070 SUPER": {
|
|
4204
|
+
tflops: number;
|
|
4205
|
+
memory: number[];
|
|
4206
|
+
};
|
|
4207
|
+
"RTX 3050 Mobile": {
|
|
4208
|
+
tflops: number;
|
|
4209
|
+
memory: number[];
|
|
4210
|
+
};
|
|
4211
|
+
"RTX 2060 Mobile": {
|
|
4212
|
+
tflops: number;
|
|
4213
|
+
memory: number[];
|
|
4214
|
+
};
|
|
4215
|
+
"GTX 1080 Ti": {
|
|
4216
|
+
tflops: number;
|
|
4217
|
+
memory: number[];
|
|
4218
|
+
};
|
|
4219
|
+
"GTX 1070 Ti": {
|
|
4220
|
+
tflops: number;
|
|
4221
|
+
memory: number[];
|
|
4222
|
+
};
|
|
4223
|
+
"RTX Titan": {
|
|
4224
|
+
tflops: number;
|
|
4225
|
+
memory: number[];
|
|
4226
|
+
};
|
|
4227
|
+
"GTX 1660": {
|
|
4228
|
+
tflops: number;
|
|
4229
|
+
memory: number[];
|
|
4230
|
+
};
|
|
4231
|
+
"GTX 1650 Mobile": {
|
|
4232
|
+
tflops: number;
|
|
4233
|
+
memory: number[];
|
|
4234
|
+
};
|
|
4235
|
+
T4: {
|
|
4236
|
+
tflops: number;
|
|
4237
|
+
memory: number[];
|
|
4238
|
+
};
|
|
4239
|
+
V100: {
|
|
4240
|
+
tflops: number;
|
|
4241
|
+
memory: number[];
|
|
4242
|
+
};
|
|
4243
|
+
"Quadro P6000": {
|
|
4244
|
+
tflops: number;
|
|
4245
|
+
memory: number[];
|
|
4246
|
+
};
|
|
4247
|
+
P40: {
|
|
4248
|
+
tflops: number;
|
|
4249
|
+
memory: number[];
|
|
4250
|
+
};
|
|
4251
|
+
};
|
|
4252
|
+
AMD: {
|
|
4253
|
+
MI300: {
|
|
4254
|
+
tflops: number;
|
|
4255
|
+
memory: number[];
|
|
4256
|
+
};
|
|
4257
|
+
MI250: {
|
|
4258
|
+
tflops: number;
|
|
4259
|
+
memory: number[];
|
|
4260
|
+
};
|
|
4261
|
+
MI210: {
|
|
4262
|
+
tflops: number;
|
|
4263
|
+
memory: number[];
|
|
4264
|
+
};
|
|
4265
|
+
MI100: {
|
|
4266
|
+
tflops: number;
|
|
4267
|
+
memory: number[];
|
|
4268
|
+
};
|
|
4269
|
+
MI60: {
|
|
4270
|
+
tflops: number;
|
|
4271
|
+
memory: number[];
|
|
4272
|
+
};
|
|
4273
|
+
MI50: {
|
|
4274
|
+
tflops: number;
|
|
4275
|
+
memory: number[];
|
|
4276
|
+
};
|
|
4277
|
+
"RX 7900 XTX": {
|
|
4278
|
+
tflops: number;
|
|
4279
|
+
memory: number[];
|
|
4280
|
+
};
|
|
4281
|
+
"RX 7900 XT": {
|
|
4282
|
+
tflops: number;
|
|
4283
|
+
memory: number[];
|
|
4284
|
+
};
|
|
4285
|
+
"RX 7900 GRE": {
|
|
4286
|
+
tflops: number;
|
|
4287
|
+
memory: number[];
|
|
4288
|
+
};
|
|
4289
|
+
"RX 7800 XT": {
|
|
4290
|
+
tflops: number;
|
|
4291
|
+
memory: number[];
|
|
4292
|
+
};
|
|
4293
|
+
"RX 7700 XT": {
|
|
4294
|
+
tflops: number;
|
|
4295
|
+
memory: number[];
|
|
4296
|
+
};
|
|
4297
|
+
"RX 7600 XT": {
|
|
4298
|
+
tflops: number;
|
|
4299
|
+
memory: number[];
|
|
4300
|
+
};
|
|
4301
|
+
"RX 6950 XT": {
|
|
4302
|
+
tflops: number;
|
|
4303
|
+
memory: number[];
|
|
4304
|
+
};
|
|
4305
|
+
"RX 6800": {
|
|
4306
|
+
tflops: number;
|
|
4307
|
+
memory: number[];
|
|
4308
|
+
};
|
|
4309
|
+
"RX 6700 XT": {
|
|
4310
|
+
tflops: number;
|
|
4311
|
+
memory: number[];
|
|
4312
|
+
};
|
|
4313
|
+
"RX 6700": {
|
|
4314
|
+
tflops: number;
|
|
4315
|
+
memory: number[];
|
|
4316
|
+
};
|
|
4317
|
+
"Radeon Pro VII": {
|
|
4318
|
+
tflops: number;
|
|
4319
|
+
memory: number[];
|
|
4320
|
+
};
|
|
4321
|
+
};
|
|
4322
|
+
QUALCOMM: {
|
|
4323
|
+
"Snapdragon X Elite X1E-00-1DE": {
|
|
4324
|
+
tflops: number;
|
|
4325
|
+
};
|
|
4326
|
+
"Snapdragon X Elite X1E-84-100": {
|
|
4327
|
+
tflops: number;
|
|
4328
|
+
};
|
|
4329
|
+
"Snapdragon X Elite X1E-80-100": {
|
|
4330
|
+
tflops: number;
|
|
4331
|
+
};
|
|
4332
|
+
"Snapdragon X Elite X1E-78-100": {
|
|
4333
|
+
tflops: number;
|
|
4334
|
+
};
|
|
4335
|
+
"Snapdragon X Plus X1P-64-100": {
|
|
4336
|
+
tflops: number;
|
|
4337
|
+
};
|
|
4338
|
+
};
|
|
4339
|
+
};
|
|
4340
|
+
CPU: {
|
|
4341
|
+
Intel: {
|
|
4342
|
+
"Xeon 4th Generation (Sapphire Rapids)": {
|
|
4343
|
+
tflops: number;
|
|
4344
|
+
};
|
|
4345
|
+
"Xeon 3th Generation (Ice Lake)": {
|
|
4346
|
+
tflops: number;
|
|
4347
|
+
};
|
|
4348
|
+
"Xeon 2th Generation (Cascade Lake)": {
|
|
4349
|
+
tflops: number;
|
|
4350
|
+
};
|
|
4351
|
+
"Intel Core 13th Generation (i9)": {
|
|
4352
|
+
tflops: number;
|
|
4353
|
+
};
|
|
4354
|
+
"Intel Core 13th Generation (i7)": {
|
|
4355
|
+
tflops: number;
|
|
4356
|
+
};
|
|
4357
|
+
"Intel Core 13th Generation (i5)": {
|
|
4358
|
+
tflops: number;
|
|
4359
|
+
};
|
|
4360
|
+
"Intel Core 13th Generation (i3)": {
|
|
4361
|
+
tflops: number;
|
|
4362
|
+
};
|
|
4363
|
+
"Intel Core 12th Generation (i9)": {
|
|
4364
|
+
tflops: number;
|
|
4365
|
+
};
|
|
4366
|
+
"Intel Core 12th Generation (i7)": {
|
|
4367
|
+
tflops: number;
|
|
4368
|
+
};
|
|
4369
|
+
"Intel Core 12th Generation (i5)": {
|
|
4370
|
+
tflops: number;
|
|
4371
|
+
};
|
|
4372
|
+
"Intel Core 12th Generation (i3)": {
|
|
4373
|
+
tflops: number;
|
|
4374
|
+
};
|
|
4375
|
+
"Intel Core 11th Generation (i9)": {
|
|
4376
|
+
tflops: number;
|
|
4377
|
+
};
|
|
4378
|
+
"Intel Core 11th Generation (i7)": {
|
|
4379
|
+
tflops: number;
|
|
4380
|
+
};
|
|
4381
|
+
"Intel Core 11th Generation (i5)": {
|
|
4382
|
+
tflops: number;
|
|
4383
|
+
};
|
|
4384
|
+
"Intel Core 11th Generation (i3)": {
|
|
4385
|
+
tflops: number;
|
|
4386
|
+
};
|
|
4387
|
+
"Intel Core 10th Generation (i9)": {
|
|
4388
|
+
tflops: number;
|
|
4389
|
+
};
|
|
4390
|
+
"Intel Core 10th Generation (i7)": {
|
|
4391
|
+
tflops: number;
|
|
4392
|
+
};
|
|
4393
|
+
"Intel Core 10th Generation (i5)": {
|
|
4394
|
+
tflops: number;
|
|
4395
|
+
};
|
|
4396
|
+
"Intel Core 10th Generation (i3)": {
|
|
4397
|
+
tflops: number;
|
|
4398
|
+
};
|
|
4399
|
+
};
|
|
4400
|
+
AMD: {
|
|
4401
|
+
"EPYC 4th Generation (Genoa)": {
|
|
4402
|
+
tflops: number;
|
|
4403
|
+
};
|
|
4404
|
+
"EPYC 3th Generation (Milan)": {
|
|
4405
|
+
tflops: number;
|
|
4406
|
+
};
|
|
4407
|
+
"EPYC 2th Generation (Rome)": {
|
|
4408
|
+
tflops: number;
|
|
4409
|
+
};
|
|
4410
|
+
"EPYC 1st Generation (Naples)": {
|
|
4411
|
+
tflops: number;
|
|
4412
|
+
};
|
|
4413
|
+
"Ryzen Zen4 7000 (Ryzen 9)": {
|
|
4414
|
+
tflops: number;
|
|
4415
|
+
};
|
|
4416
|
+
"Ryzen Zen4 7000 (Ryzen 7)": {
|
|
4417
|
+
tflops: number;
|
|
4418
|
+
};
|
|
4419
|
+
"Ryzen Zen4 7000 (Ryzen 5)": {
|
|
4420
|
+
tflops: number;
|
|
4421
|
+
};
|
|
4422
|
+
"Ryzen Zen3 5000 (Ryzen 9)": {
|
|
4423
|
+
tflops: number;
|
|
4424
|
+
};
|
|
4425
|
+
"Ryzen Zen3 5000 (Ryzen 7)": {
|
|
4426
|
+
tflops: number;
|
|
4427
|
+
};
|
|
4428
|
+
"Ryzen Zen3 5000 (Ryzen 5)": {
|
|
4429
|
+
tflops: number;
|
|
4430
|
+
};
|
|
4431
|
+
"Ryzen Zen 2 3000 (Threadripper)": {
|
|
4432
|
+
tflops: number;
|
|
4433
|
+
};
|
|
4434
|
+
"Ryzen Zen 2 3000 (Ryzen 9)": {
|
|
4435
|
+
tflops: number;
|
|
4436
|
+
};
|
|
4437
|
+
"Ryzen Zen 2 3000 (Ryzen 7)": {
|
|
4438
|
+
tflops: number;
|
|
4439
|
+
};
|
|
4440
|
+
"Ryzen Zen 2 3000 (Ryzen 5)": {
|
|
4441
|
+
tflops: number;
|
|
4442
|
+
};
|
|
4443
|
+
"Ryzen Zen 2 3000 (Ryzen 3)": {
|
|
4444
|
+
tflops: number;
|
|
4445
|
+
};
|
|
4446
|
+
};
|
|
4447
|
+
};
|
|
4448
|
+
"Apple Silicon": {
|
|
4449
|
+
"-": {
|
|
4450
|
+
"Apple M1": {
|
|
4451
|
+
tflops: number;
|
|
4452
|
+
memory: number[];
|
|
4453
|
+
};
|
|
4454
|
+
"Apple M1 Pro": {
|
|
4455
|
+
tflops: number;
|
|
4456
|
+
memory: number[];
|
|
4457
|
+
};
|
|
4458
|
+
"Apple M1 Max": {
|
|
4459
|
+
tflops: number;
|
|
4460
|
+
memory: number[];
|
|
4461
|
+
};
|
|
4462
|
+
"Apple M1 Ultra": {
|
|
4463
|
+
tflops: number;
|
|
4464
|
+
memory: number[];
|
|
4465
|
+
};
|
|
4466
|
+
"Apple M2": {
|
|
4467
|
+
tflops: number;
|
|
4468
|
+
memory: number[];
|
|
4469
|
+
};
|
|
4470
|
+
"Apple M2 Pro": {
|
|
4471
|
+
tflops: number;
|
|
4472
|
+
memory: number[];
|
|
4473
|
+
};
|
|
4474
|
+
"Apple M2 Max": {
|
|
4475
|
+
tflops: number;
|
|
4476
|
+
memory: number[];
|
|
4477
|
+
};
|
|
4478
|
+
"Apple M2 Ultra": {
|
|
4479
|
+
tflops: number;
|
|
4480
|
+
memory: number[];
|
|
4481
|
+
};
|
|
4482
|
+
"Apple M3": {
|
|
4483
|
+
tflops: number;
|
|
4484
|
+
memory: number[];
|
|
4485
|
+
};
|
|
4486
|
+
"Apple M3 Pro": {
|
|
4487
|
+
tflops: number;
|
|
4488
|
+
memory: number[];
|
|
4489
|
+
};
|
|
4490
|
+
"Apple M3 Max": {
|
|
4491
|
+
tflops: number;
|
|
4492
|
+
memory: number[];
|
|
4493
|
+
};
|
|
4494
|
+
};
|
|
4495
|
+
};
|
|
4496
|
+
};
|
|
4497
|
+
type SkuType = keyof typeof SKUS;
|
|
4498
|
+
|
|
4499
|
+
interface LocalAppSnippet {
|
|
4500
|
+
/**
|
|
4501
|
+
* Title of the snippet
|
|
4502
|
+
*/
|
|
4503
|
+
title: string;
|
|
4504
|
+
/**
|
|
4505
|
+
* Optional setup guide
|
|
4506
|
+
*/
|
|
4507
|
+
setup?: string;
|
|
4508
|
+
/**
|
|
4509
|
+
* Content (or command) to be run
|
|
4510
|
+
*/
|
|
4511
|
+
content: string | string[];
|
|
4512
|
+
}
|
|
4513
|
+
/**
|
|
4514
|
+
* Elements configurable by a local app.
|
|
4515
|
+
*/
|
|
4516
|
+
type LocalApp = {
|
|
4517
|
+
/**
|
|
4518
|
+
* Name that appears in buttons
|
|
4519
|
+
*/
|
|
4520
|
+
prettyLabel: string;
|
|
4521
|
+
/**
|
|
4522
|
+
* Link to get more info about a local app (website etc)
|
|
4523
|
+
*/
|
|
4524
|
+
docsUrl: string;
|
|
4525
|
+
/**
|
|
4526
|
+
* main category of app
|
|
4527
|
+
*/
|
|
4528
|
+
mainTask: PipelineType;
|
|
4529
|
+
/**
|
|
4530
|
+
* Whether to display a pill "macOS-only"
|
|
4531
|
+
*/
|
|
4532
|
+
macOSOnly?: boolean;
|
|
4533
|
+
comingSoon?: boolean;
|
|
4534
|
+
/**
|
|
4535
|
+
* IMPORTANT: function to figure out whether to display the button on a model page's main "Use this model" dropdown.
|
|
4536
|
+
*/
|
|
4537
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4538
|
+
} & ({
|
|
4539
|
+
/**
|
|
4540
|
+
* If the app supports deeplink, URL to open.
|
|
4541
|
+
*/
|
|
4542
|
+
deeplink: (model: ModelData, filepath?: string) => URL;
|
|
4543
|
+
} | {
|
|
4544
|
+
/**
|
|
4545
|
+
* And if not (mostly llama.cpp), snippet to copy/paste in your terminal
|
|
4546
|
+
* Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
|
|
4547
|
+
* Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
|
|
4548
|
+
*/
|
|
4549
|
+
snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
|
|
4550
|
+
});
|
|
4551
|
+
declare function isTgiModel(model: ModelData): boolean;
|
|
4552
|
+
declare function isLlamaCppGgufModel(model: ModelData): boolean;
|
|
4553
|
+
/**
|
|
4554
|
+
* Add your new local app here.
|
|
4555
|
+
*
|
|
4556
|
+
* This is open to new suggestions and awesome upcoming apps.
|
|
4557
|
+
*
|
|
4558
|
+
* /!\ IMPORTANT
|
|
4559
|
+
*
|
|
4560
|
+
* If possible, you need to support deeplinks and be as cross-platform as possible.
|
|
4561
|
+
*
|
|
4562
|
+
* Ping the HF team if we can help with anything!
|
|
4563
|
+
*/
|
|
4564
|
+
declare const LOCAL_APPS: {
|
|
4565
|
+
"llama.cpp": {
|
|
4566
|
+
prettyLabel: string;
|
|
4567
|
+
docsUrl: string;
|
|
4568
|
+
mainTask: "text-generation";
|
|
4569
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4570
|
+
snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
|
|
4571
|
+
};
|
|
4572
|
+
"node-llama-cpp": {
|
|
4573
|
+
prettyLabel: string;
|
|
4574
|
+
docsUrl: string;
|
|
4575
|
+
mainTask: "text-generation";
|
|
4576
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4577
|
+
snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
|
|
4578
|
+
};
|
|
4579
|
+
vllm: {
|
|
4580
|
+
prettyLabel: string;
|
|
4581
|
+
docsUrl: string;
|
|
4582
|
+
mainTask: "text-generation";
|
|
4583
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4584
|
+
snippet: (model: ModelData) => LocalAppSnippet[];
|
|
4585
|
+
};
|
|
4586
|
+
tgi: {
|
|
4587
|
+
prettyLabel: string;
|
|
4588
|
+
docsUrl: string;
|
|
4589
|
+
mainTask: "text-generation";
|
|
4590
|
+
displayOnModelPage: typeof isTgiModel;
|
|
4591
|
+
snippet: (model: ModelData) => LocalAppSnippet[];
|
|
4592
|
+
};
|
|
4593
|
+
lmstudio: {
|
|
4594
|
+
prettyLabel: string;
|
|
4595
|
+
docsUrl: string;
|
|
4596
|
+
mainTask: "text-generation";
|
|
4597
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4598
|
+
deeplink: (model: ModelData, filepath: string | undefined) => URL;
|
|
4599
|
+
};
|
|
4600
|
+
localai: {
|
|
4601
|
+
prettyLabel: string;
|
|
4602
|
+
docsUrl: string;
|
|
4603
|
+
mainTask: "text-generation";
|
|
4604
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4605
|
+
snippet: (model: ModelData, filepath?: string) => LocalAppSnippet[];
|
|
4606
|
+
};
|
|
4607
|
+
jan: {
|
|
4608
|
+
prettyLabel: string;
|
|
4609
|
+
docsUrl: string;
|
|
4610
|
+
mainTask: "text-generation";
|
|
4611
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4612
|
+
deeplink: (model: ModelData) => URL;
|
|
4613
|
+
};
|
|
4614
|
+
backyard: {
|
|
4615
|
+
prettyLabel: string;
|
|
4616
|
+
docsUrl: string;
|
|
4617
|
+
mainTask: "text-generation";
|
|
4618
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4619
|
+
deeplink: (model: ModelData) => URL;
|
|
4620
|
+
};
|
|
4621
|
+
sanctum: {
|
|
4622
|
+
prettyLabel: string;
|
|
4623
|
+
docsUrl: string;
|
|
4624
|
+
mainTask: "text-generation";
|
|
4625
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4626
|
+
deeplink: (model: ModelData) => URL;
|
|
4627
|
+
};
|
|
4628
|
+
jellybox: {
|
|
4629
|
+
prettyLabel: string;
|
|
4630
|
+
docsUrl: string;
|
|
4631
|
+
mainTask: "text-generation";
|
|
4632
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4633
|
+
deeplink: (model: ModelData) => URL;
|
|
4634
|
+
};
|
|
4635
|
+
msty: {
|
|
4636
|
+
prettyLabel: string;
|
|
4637
|
+
docsUrl: string;
|
|
4638
|
+
mainTask: "text-generation";
|
|
4639
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4640
|
+
deeplink: (model: ModelData) => URL;
|
|
4641
|
+
};
|
|
4642
|
+
recursechat: {
|
|
4643
|
+
prettyLabel: string;
|
|
4644
|
+
docsUrl: string;
|
|
4645
|
+
mainTask: "text-generation";
|
|
4646
|
+
macOSOnly: true;
|
|
4647
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4648
|
+
deeplink: (model: ModelData) => URL;
|
|
4649
|
+
};
|
|
4650
|
+
drawthings: {
|
|
4651
|
+
prettyLabel: string;
|
|
4652
|
+
docsUrl: string;
|
|
4653
|
+
mainTask: "text-to-image";
|
|
4654
|
+
macOSOnly: true;
|
|
4655
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4656
|
+
deeplink: (model: ModelData) => URL;
|
|
4657
|
+
};
|
|
4658
|
+
diffusionbee: {
|
|
4659
|
+
prettyLabel: string;
|
|
4660
|
+
docsUrl: string;
|
|
4661
|
+
mainTask: "text-to-image";
|
|
4662
|
+
macOSOnly: true;
|
|
4663
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4664
|
+
deeplink: (model: ModelData) => URL;
|
|
4665
|
+
};
|
|
4666
|
+
joyfusion: {
|
|
4667
|
+
prettyLabel: string;
|
|
4668
|
+
docsUrl: string;
|
|
4669
|
+
mainTask: "text-to-image";
|
|
4670
|
+
macOSOnly: true;
|
|
4671
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4672
|
+
deeplink: (model: ModelData) => URL;
|
|
4673
|
+
};
|
|
4674
|
+
invoke: {
|
|
4675
|
+
prettyLabel: string;
|
|
4676
|
+
docsUrl: string;
|
|
4677
|
+
mainTask: "text-to-image";
|
|
4678
|
+
displayOnModelPage: (model: ModelData) => boolean;
|
|
4679
|
+
deeplink: (model: ModelData) => URL;
|
|
4680
|
+
};
|
|
4681
|
+
ollama: {
|
|
4682
|
+
prettyLabel: string;
|
|
4683
|
+
docsUrl: string;
|
|
4684
|
+
mainTask: "text-generation";
|
|
4685
|
+
displayOnModelPage: typeof isLlamaCppGgufModel;
|
|
4686
|
+
snippet: (model: ModelData, filepath?: string) => string;
|
|
4687
|
+
};
|
|
4688
|
+
};
|
|
4689
|
+
type LocalAppKey = keyof typeof LOCAL_APPS;
|
|
4690
|
+
|
|
4691
|
+
/**
|
|
4692
|
+
* Elements configurable by a dataset library.
|
|
4693
|
+
*/
|
|
4694
|
+
interface DatasetLibraryUiElement {
|
|
4695
|
+
/**
|
|
4696
|
+
* Pretty name of the library.
|
|
4697
|
+
* displayed (in tags?, and) on the main
|
|
4698
|
+
* call-to-action button on the dataset page.
|
|
4699
|
+
*/
|
|
4700
|
+
prettyLabel: string;
|
|
4701
|
+
/**
|
|
4702
|
+
* Repo name of the library's (usually on GitHub) code repo
|
|
4703
|
+
*/
|
|
4704
|
+
repoName: string;
|
|
4705
|
+
/**
|
|
4706
|
+
* URL to library's (usually on GitHub) code repo
|
|
4707
|
+
*/
|
|
4708
|
+
repoUrl: string;
|
|
4709
|
+
/**
|
|
4710
|
+
* URL to library's docs
|
|
4711
|
+
*/
|
|
4712
|
+
docsUrl?: string;
|
|
4713
|
+
}
|
|
4714
|
+
declare const DATASET_LIBRARIES_UI_ELEMENTS: {
|
|
4715
|
+
mlcroissant: {
|
|
4716
|
+
prettyLabel: string;
|
|
4717
|
+
repoName: string;
|
|
4718
|
+
repoUrl: string;
|
|
4719
|
+
docsUrl: string;
|
|
4720
|
+
};
|
|
4721
|
+
webdataset: {
|
|
4722
|
+
prettyLabel: string;
|
|
4723
|
+
repoName: string;
|
|
4724
|
+
repoUrl: string;
|
|
4725
|
+
docsUrl: string;
|
|
4726
|
+
};
|
|
4727
|
+
datasets: {
|
|
4728
|
+
prettyLabel: string;
|
|
4729
|
+
repoName: string;
|
|
4730
|
+
repoUrl: string;
|
|
4731
|
+
docsUrl: string;
|
|
4732
|
+
};
|
|
4733
|
+
pandas: {
|
|
4734
|
+
prettyLabel: string;
|
|
4735
|
+
repoName: string;
|
|
4736
|
+
repoUrl: string;
|
|
4737
|
+
docsUrl: string;
|
|
4738
|
+
};
|
|
4739
|
+
dask: {
|
|
4740
|
+
prettyLabel: string;
|
|
4741
|
+
repoName: string;
|
|
4742
|
+
repoUrl: string;
|
|
4743
|
+
docsUrl: string;
|
|
4744
|
+
};
|
|
4745
|
+
distilabel: {
|
|
4746
|
+
prettyLabel: string;
|
|
4747
|
+
repoName: string;
|
|
4748
|
+
repoUrl: string;
|
|
4749
|
+
docsUrl: string;
|
|
4750
|
+
};
|
|
4751
|
+
fiftyone: {
|
|
4752
|
+
prettyLabel: string;
|
|
4753
|
+
repoName: string;
|
|
4754
|
+
repoUrl: string;
|
|
4755
|
+
docsUrl: string;
|
|
4756
|
+
};
|
|
4757
|
+
argilla: {
|
|
4758
|
+
prettyLabel: string;
|
|
4759
|
+
repoName: string;
|
|
4760
|
+
repoUrl: string;
|
|
4761
|
+
docsUrl: string;
|
|
4762
|
+
};
|
|
4763
|
+
polars: {
|
|
4764
|
+
prettyLabel: string;
|
|
4765
|
+
repoName: string;
|
|
4766
|
+
repoUrl: string;
|
|
4767
|
+
docsUrl: string;
|
|
4768
|
+
};
|
|
4769
|
+
duckdb: {
|
|
4770
|
+
prettyLabel: string;
|
|
4771
|
+
repoName: string;
|
|
4772
|
+
repoUrl: string;
|
|
4773
|
+
docsUrl: string;
|
|
4774
|
+
};
|
|
4775
|
+
};
|
|
4776
|
+
type DatasetLibraryKey = keyof typeof DATASET_LIBRARIES_UI_ELEMENTS;
|
|
4777
|
+
|
|
4778
|
+
export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, type AddedToken, type AudioClassificationInput, type AudioClassificationOutput, type AudioClassificationOutputElement, type AudioClassificationParameters, type AutomaticSpeechRecognitionInput, type AutomaticSpeechRecognitionOutput, type AutomaticSpeechRecognitionOutputChunk, type AutomaticSpeechRecognitionParameters, type BoundingBox, type ChatCompletionInput, type ChatCompletionInputMessage, type ChatCompletionOutput, type ChatCompletionOutputComplete, type ChatCompletionOutputMessage, type ChatCompletionStreamOutput, type ChatCompletionStreamOutputChoice, type ChatCompletionStreamOutputDelta, type ClassificationOutputTransform$1 as ClassificationOutputTransform, DATASET_LIBRARIES_UI_ELEMENTS, DEFAULT_MEMORY_OPTIONS, type DatasetLibraryKey, type DatasetLibraryUiElement, type DepthEstimationInput, type DepthEstimationOutput, type DocumentQuestionAnsweringInput, type DocumentQuestionAnsweringInputData, type DocumentQuestionAnsweringOutput, type DocumentQuestionAnsweringOutputElement, type DocumentQuestionAnsweringParameters, type EarlyStoppingUnion$2 as EarlyStoppingUnion, type ExampleRepo, type FeatureExtractionInput, type FeatureExtractionInputTruncationDirection, type FeatureExtractionOutput, type FillMaskInput, type FillMaskOutput, type FillMaskOutputElement, type FillMaskParameters, GGMLQuantizationType, GGUF_QUANT_RE, GGUF_QUANT_RE_GLOBAL, type GenerationParameters$2 as GenerationParameters, type HardwareSpec, type ImageClassificationInput, type ImageClassificationOutput, type ImageClassificationOutputElement, type ImageClassificationParameters, type ImageSegmentationInput, type ImageSegmentationOutput, type ImageSegmentationOutputElement, type ImageSegmentationParameters, type ImageSegmentationSubtask, type ImageToImageInput, type ImageToImageOutput, type ImageToImageParameters, type ImageToTextInput, type ImageToTextOutput, type ImageToTextParameters, LIBRARY_TASK_MAPPING, LOCAL_APPS, type LibraryUiElement, type LocalApp, type LocalAppKey, type LocalAppSnippet, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, type Modality, type ModelData, type ModelLibraryKey, type ObjectDetectionInput, type ObjectDetectionOutput, type ObjectDetectionOutputElement, type ObjectDetectionParameters, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, type PipelineData, type PipelineType, type QuestionAnsweringInput, type QuestionAnsweringInputData, type QuestionAnsweringOutput, type QuestionAnsweringOutputElement, type QuestionAnsweringParameters, SKUS, SPECIAL_TOKENS_ATTRIBUTES, SUBTASK_TYPES, type SentenceSimilarityInput, type SentenceSimilarityInputData, type SentenceSimilarityOutput, type SkuType, type SpecialTokensMap, type SummarizationInput, type SummarizationOutput, type SummarizationParameters, type SummarizationTruncationStrategy, TASKS_DATA, TASKS_MODEL_LIBRARIES, type TableQuestionAnsweringInput, type TableQuestionAnsweringInputData, type TableQuestionAnsweringOutput, type TableQuestionAnsweringOutputElement, type TargetSize$1 as TargetSize, type TaskData, type TaskDataCustom, type TaskDemo, type TaskDemoEntry, type TextClassificationInput, type TextClassificationOutput, type TextClassificationOutputElement, type TextClassificationParameters, type TextGenerationInput, type TextGenerationInputGenerateParameters, type TextGenerationOutput, type TextGenerationOutputBestOfSequence, type TextGenerationOutputDetails, type TextGenerationOutputFinishReason, type TextGenerationOutputPrefillToken, type TextGenerationOutputToken, type TextGenerationStreamOutput, type TextGenerationStreamOutputStreamDetails, type TextToImageInput, type TextToImageOutput, type TextToImageParameters, type TextToSpeechInput, type TextToSpeechOutput, type TextToSpeechParameters, type TokenClassificationAggregationStrategy, type TokenClassificationInput, type TokenClassificationOutput, type TokenClassificationOutputElement, type TokenClassificationParameters, type TokenizerConfig, type TransformersInfo, type TranslationInput, type TranslationOutput, type VideoClassificationInput, type VideoClassificationOutput, type VideoClassificationOutputElement, type VideoClassificationParameters, type VisualQuestionAnsweringInput, type VisualQuestionAnsweringInputData, type VisualQuestionAnsweringOutput, type VisualQuestionAnsweringOutputElement, type VisualQuestionAnsweringParameters, type WidgetExample, type WidgetExampleAssetAndPromptInput, type WidgetExampleAssetAndTextInput, type WidgetExampleAssetAndZeroShotInput, type WidgetExampleAssetInput, type WidgetExampleAttribute, type WidgetExampleChatInput, type WidgetExampleOutput, type WidgetExampleOutputAnswerScore, type WidgetExampleOutputLabels, type WidgetExampleOutputText, type WidgetExampleOutputUrl, type WidgetExampleSentenceSimilarityInput, type WidgetExampleStructuredDataInput, type WidgetExampleTableDataInput, type WidgetExampleTextAndContextInput, type WidgetExampleTextAndTableInput, type WidgetExampleTextInput, type WidgetExampleZeroShotTextInput, type WidgetType, type WordBox, type ZeroShotClassificationInput, type ZeroShotClassificationInputData, type ZeroShotClassificationOutput, type ZeroShotClassificationOutputElement, type ZeroShotClassificationParameters, type ZeroShotImageClassificationInput, type ZeroShotImageClassificationInputData, type ZeroShotImageClassificationOutput, type ZeroShotImageClassificationOutputElement, type ZeroShotImageClassificationParameters, type ZeroShotObjectDetectionInput, type ZeroShotObjectDetectionInputData, type ZeroShotObjectDetectionOutput, type ZeroShotObjectDetectionOutputElement, parseGGUFQuantLabel, index as snippets };
|