@huggingface/tasks 0.13.1-test → 0.13.1-test2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (198) hide show
  1. package/package.json +4 -2
  2. package/src/dataset-libraries.ts +89 -0
  3. package/src/default-widget-inputs.ts +718 -0
  4. package/src/gguf.ts +40 -0
  5. package/src/hardware.ts +482 -0
  6. package/src/index.ts +59 -0
  7. package/src/library-to-tasks.ts +76 -0
  8. package/src/local-apps.ts +412 -0
  9. package/src/model-data.ts +149 -0
  10. package/src/model-libraries-downloads.ts +18 -0
  11. package/src/model-libraries-snippets.ts +1128 -0
  12. package/src/model-libraries.ts +820 -0
  13. package/src/pipelines.ts +698 -0
  14. package/src/snippets/common.ts +39 -0
  15. package/src/snippets/curl.spec.ts +94 -0
  16. package/src/snippets/curl.ts +120 -0
  17. package/src/snippets/index.ts +7 -0
  18. package/src/snippets/inputs.ts +167 -0
  19. package/src/snippets/js.spec.ts +148 -0
  20. package/src/snippets/js.ts +305 -0
  21. package/src/snippets/python.spec.ts +144 -0
  22. package/src/snippets/python.ts +321 -0
  23. package/src/snippets/types.ts +16 -0
  24. package/src/tasks/audio-classification/about.md +86 -0
  25. package/src/tasks/audio-classification/data.ts +81 -0
  26. package/src/tasks/audio-classification/inference.ts +52 -0
  27. package/src/tasks/audio-classification/spec/input.json +35 -0
  28. package/src/tasks/audio-classification/spec/output.json +11 -0
  29. package/src/tasks/audio-to-audio/about.md +56 -0
  30. package/src/tasks/audio-to-audio/data.ts +70 -0
  31. package/src/tasks/automatic-speech-recognition/about.md +90 -0
  32. package/src/tasks/automatic-speech-recognition/data.ts +82 -0
  33. package/src/tasks/automatic-speech-recognition/inference.ts +160 -0
  34. package/src/tasks/automatic-speech-recognition/spec/input.json +35 -0
  35. package/src/tasks/automatic-speech-recognition/spec/output.json +38 -0
  36. package/src/tasks/chat-completion/inference.ts +322 -0
  37. package/src/tasks/chat-completion/spec/input.json +350 -0
  38. package/src/tasks/chat-completion/spec/output.json +206 -0
  39. package/src/tasks/chat-completion/spec/stream_output.json +213 -0
  40. package/src/tasks/common-definitions.json +100 -0
  41. package/src/tasks/depth-estimation/about.md +45 -0
  42. package/src/tasks/depth-estimation/data.ts +70 -0
  43. package/src/tasks/depth-estimation/inference.ts +35 -0
  44. package/src/tasks/depth-estimation/spec/input.json +25 -0
  45. package/src/tasks/depth-estimation/spec/output.json +16 -0
  46. package/src/tasks/document-question-answering/about.md +53 -0
  47. package/src/tasks/document-question-answering/data.ts +85 -0
  48. package/src/tasks/document-question-answering/inference.ts +110 -0
  49. package/src/tasks/document-question-answering/spec/input.json +85 -0
  50. package/src/tasks/document-question-answering/spec/output.json +36 -0
  51. package/src/tasks/feature-extraction/about.md +72 -0
  52. package/src/tasks/feature-extraction/data.ts +57 -0
  53. package/src/tasks/feature-extraction/inference.ts +40 -0
  54. package/src/tasks/feature-extraction/spec/input.json +47 -0
  55. package/src/tasks/feature-extraction/spec/output.json +15 -0
  56. package/src/tasks/fill-mask/about.md +51 -0
  57. package/src/tasks/fill-mask/data.ts +79 -0
  58. package/src/tasks/fill-mask/inference.ts +62 -0
  59. package/src/tasks/fill-mask/spec/input.json +38 -0
  60. package/src/tasks/fill-mask/spec/output.json +29 -0
  61. package/src/tasks/image-classification/about.md +50 -0
  62. package/src/tasks/image-classification/data.ts +88 -0
  63. package/src/tasks/image-classification/inference.ts +52 -0
  64. package/src/tasks/image-classification/spec/input.json +35 -0
  65. package/src/tasks/image-classification/spec/output.json +11 -0
  66. package/src/tasks/image-feature-extraction/about.md +23 -0
  67. package/src/tasks/image-feature-extraction/data.ts +59 -0
  68. package/src/tasks/image-segmentation/about.md +63 -0
  69. package/src/tasks/image-segmentation/data.ts +99 -0
  70. package/src/tasks/image-segmentation/inference.ts +69 -0
  71. package/src/tasks/image-segmentation/spec/input.json +45 -0
  72. package/src/tasks/image-segmentation/spec/output.json +26 -0
  73. package/src/tasks/image-text-to-text/about.md +76 -0
  74. package/src/tasks/image-text-to-text/data.ts +102 -0
  75. package/src/tasks/image-to-3d/about.md +62 -0
  76. package/src/tasks/image-to-3d/data.ts +75 -0
  77. package/src/tasks/image-to-image/about.md +129 -0
  78. package/src/tasks/image-to-image/data.ts +101 -0
  79. package/src/tasks/image-to-image/inference.ts +68 -0
  80. package/src/tasks/image-to-image/spec/input.json +55 -0
  81. package/src/tasks/image-to-image/spec/output.json +12 -0
  82. package/src/tasks/image-to-text/about.md +61 -0
  83. package/src/tasks/image-to-text/data.ts +82 -0
  84. package/src/tasks/image-to-text/inference.ts +143 -0
  85. package/src/tasks/image-to-text/spec/input.json +34 -0
  86. package/src/tasks/image-to-text/spec/output.json +14 -0
  87. package/src/tasks/index.ts +312 -0
  88. package/src/tasks/keypoint-detection/about.md +57 -0
  89. package/src/tasks/keypoint-detection/data.ts +50 -0
  90. package/src/tasks/mask-generation/about.md +65 -0
  91. package/src/tasks/mask-generation/data.ts +55 -0
  92. package/src/tasks/object-detection/about.md +37 -0
  93. package/src/tasks/object-detection/data.ts +86 -0
  94. package/src/tasks/object-detection/inference.ts +75 -0
  95. package/src/tasks/object-detection/spec/input.json +31 -0
  96. package/src/tasks/object-detection/spec/output.json +50 -0
  97. package/src/tasks/placeholder/about.md +15 -0
  98. package/src/tasks/placeholder/data.ts +21 -0
  99. package/src/tasks/placeholder/spec/input.json +35 -0
  100. package/src/tasks/placeholder/spec/output.json +17 -0
  101. package/src/tasks/question-answering/about.md +56 -0
  102. package/src/tasks/question-answering/data.ts +75 -0
  103. package/src/tasks/question-answering/inference.ts +99 -0
  104. package/src/tasks/question-answering/spec/input.json +67 -0
  105. package/src/tasks/question-answering/spec/output.json +29 -0
  106. package/src/tasks/reinforcement-learning/about.md +167 -0
  107. package/src/tasks/reinforcement-learning/data.ts +75 -0
  108. package/src/tasks/sentence-similarity/about.md +97 -0
  109. package/src/tasks/sentence-similarity/data.ts +101 -0
  110. package/src/tasks/sentence-similarity/inference.ts +32 -0
  111. package/src/tasks/sentence-similarity/spec/input.json +40 -0
  112. package/src/tasks/sentence-similarity/spec/output.json +12 -0
  113. package/src/tasks/summarization/about.md +58 -0
  114. package/src/tasks/summarization/data.ts +76 -0
  115. package/src/tasks/summarization/inference.ts +57 -0
  116. package/src/tasks/summarization/spec/input.json +42 -0
  117. package/src/tasks/summarization/spec/output.json +14 -0
  118. package/src/tasks/table-question-answering/about.md +43 -0
  119. package/src/tasks/table-question-answering/data.ts +59 -0
  120. package/src/tasks/table-question-answering/inference.ts +61 -0
  121. package/src/tasks/table-question-answering/spec/input.json +44 -0
  122. package/src/tasks/table-question-answering/spec/output.json +40 -0
  123. package/src/tasks/tabular-classification/about.md +65 -0
  124. package/src/tasks/tabular-classification/data.ts +68 -0
  125. package/src/tasks/tabular-regression/about.md +87 -0
  126. package/src/tasks/tabular-regression/data.ts +57 -0
  127. package/src/tasks/text-classification/about.md +173 -0
  128. package/src/tasks/text-classification/data.ts +103 -0
  129. package/src/tasks/text-classification/inference.ts +51 -0
  130. package/src/tasks/text-classification/spec/input.json +35 -0
  131. package/src/tasks/text-classification/spec/output.json +11 -0
  132. package/src/tasks/text-generation/about.md +154 -0
  133. package/src/tasks/text-generation/data.ts +114 -0
  134. package/src/tasks/text-generation/inference.ts +200 -0
  135. package/src/tasks/text-generation/spec/input.json +219 -0
  136. package/src/tasks/text-generation/spec/output.json +179 -0
  137. package/src/tasks/text-generation/spec/stream_output.json +103 -0
  138. package/src/tasks/text-to-3d/about.md +62 -0
  139. package/src/tasks/text-to-3d/data.ts +56 -0
  140. package/src/tasks/text-to-audio/inference.ts +143 -0
  141. package/src/tasks/text-to-audio/spec/input.json +31 -0
  142. package/src/tasks/text-to-audio/spec/output.json +17 -0
  143. package/src/tasks/text-to-image/about.md +96 -0
  144. package/src/tasks/text-to-image/data.ts +100 -0
  145. package/src/tasks/text-to-image/inference.ts +75 -0
  146. package/src/tasks/text-to-image/spec/input.json +63 -0
  147. package/src/tasks/text-to-image/spec/output.json +13 -0
  148. package/src/tasks/text-to-speech/about.md +63 -0
  149. package/src/tasks/text-to-speech/data.ts +79 -0
  150. package/src/tasks/text-to-speech/inference.ts +145 -0
  151. package/src/tasks/text-to-speech/spec/input.json +31 -0
  152. package/src/tasks/text-to-speech/spec/output.json +7 -0
  153. package/src/tasks/text-to-video/about.md +41 -0
  154. package/src/tasks/text-to-video/data.ts +102 -0
  155. package/src/tasks/text2text-generation/inference.ts +55 -0
  156. package/src/tasks/text2text-generation/spec/input.json +55 -0
  157. package/src/tasks/text2text-generation/spec/output.json +14 -0
  158. package/src/tasks/token-classification/about.md +76 -0
  159. package/src/tasks/token-classification/data.ts +92 -0
  160. package/src/tasks/token-classification/inference.ts +85 -0
  161. package/src/tasks/token-classification/spec/input.json +65 -0
  162. package/src/tasks/token-classification/spec/output.json +37 -0
  163. package/src/tasks/translation/about.md +65 -0
  164. package/src/tasks/translation/data.ts +70 -0
  165. package/src/tasks/translation/inference.ts +67 -0
  166. package/src/tasks/translation/spec/input.json +50 -0
  167. package/src/tasks/translation/spec/output.json +14 -0
  168. package/src/tasks/unconditional-image-generation/about.md +50 -0
  169. package/src/tasks/unconditional-image-generation/data.ts +72 -0
  170. package/src/tasks/video-classification/about.md +37 -0
  171. package/src/tasks/video-classification/data.ts +84 -0
  172. package/src/tasks/video-classification/inference.ts +59 -0
  173. package/src/tasks/video-classification/spec/input.json +42 -0
  174. package/src/tasks/video-classification/spec/output.json +10 -0
  175. package/src/tasks/video-text-to-text/about.md +98 -0
  176. package/src/tasks/video-text-to-text/data.ts +66 -0
  177. package/src/tasks/visual-question-answering/about.md +48 -0
  178. package/src/tasks/visual-question-answering/data.ts +97 -0
  179. package/src/tasks/visual-question-answering/inference.ts +62 -0
  180. package/src/tasks/visual-question-answering/spec/input.json +41 -0
  181. package/src/tasks/visual-question-answering/spec/output.json +21 -0
  182. package/src/tasks/zero-shot-classification/about.md +40 -0
  183. package/src/tasks/zero-shot-classification/data.ts +70 -0
  184. package/src/tasks/zero-shot-classification/inference.ts +67 -0
  185. package/src/tasks/zero-shot-classification/spec/input.json +50 -0
  186. package/src/tasks/zero-shot-classification/spec/output.json +11 -0
  187. package/src/tasks/zero-shot-image-classification/about.md +75 -0
  188. package/src/tasks/zero-shot-image-classification/data.ts +84 -0
  189. package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
  190. package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
  191. package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
  192. package/src/tasks/zero-shot-object-detection/about.md +45 -0
  193. package/src/tasks/zero-shot-object-detection/data.ts +67 -0
  194. package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
  195. package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
  196. package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
  197. package/src/tokenizer-data.ts +32 -0
  198. package/src/widget-example.ts +125 -0
@@ -0,0 +1,412 @@
1
+ import { parseGGUFQuantLabel } from "./gguf.js";
2
+ import type { ModelData } from "./model-data.js";
3
+ import type { PipelineType } from "./pipelines.js";
4
+
5
+ export interface LocalAppSnippet {
6
+ /**
7
+ * Title of the snippet
8
+ */
9
+ title: string;
10
+ /**
11
+ * Optional setup guide
12
+ */
13
+ setup?: string;
14
+ /**
15
+ * Content (or command) to be run
16
+ */
17
+ content: string | string[];
18
+ }
19
+
20
+ /**
21
+ * Elements configurable by a local app.
22
+ */
23
+ export type LocalApp = {
24
+ /**
25
+ * Name that appears in buttons
26
+ */
27
+ prettyLabel: string;
28
+ /**
29
+ * Link to get more info about a local app (website etc)
30
+ */
31
+ docsUrl: string;
32
+ /**
33
+ * main category of app
34
+ */
35
+ mainTask: PipelineType;
36
+ /**
37
+ * Whether to display a pill "macOS-only"
38
+ */
39
+ macOSOnly?: boolean;
40
+
41
+ comingSoon?: boolean;
42
+ /**
43
+ * IMPORTANT: function to figure out whether to display the button on a model page's main "Use this model" dropdown.
44
+ */
45
+ displayOnModelPage: (model: ModelData) => boolean;
46
+ } & (
47
+ | {
48
+ /**
49
+ * If the app supports deeplink, URL to open.
50
+ */
51
+ deeplink: (model: ModelData, filepath?: string) => URL;
52
+ }
53
+ | {
54
+ /**
55
+ * And if not (mostly llama.cpp), snippet to copy/paste in your terminal
56
+ * Support the placeholder {{GGUF_FILE}} that will be replaced by the gguf file path or the list of available files.
57
+ * Support the placeholder {{OLLAMA_TAG}} that will be replaced by the list of available quant tags or will be removed if there are no multiple quant files in a same repo.
58
+ */
59
+ snippet: (model: ModelData, filepath?: string) => string | string[] | LocalAppSnippet | LocalAppSnippet[];
60
+ }
61
+ );
62
+
63
+ function isAwqModel(model: ModelData): boolean {
64
+ return model.config?.quantization_config?.quant_method === "awq";
65
+ }
66
+
67
+ function isGptqModel(model: ModelData): boolean {
68
+ return model.config?.quantization_config?.quant_method === "gptq";
69
+ }
70
+
71
+ function isAqlmModel(model: ModelData): boolean {
72
+ return model.config?.quantization_config?.quant_method === "aqlm";
73
+ }
74
+
75
+ function isMarlinModel(model: ModelData): boolean {
76
+ return model.config?.quantization_config?.quant_method === "marlin";
77
+ }
78
+
79
+ function isTransformersModel(model: ModelData): boolean {
80
+ return model.tags.includes("transformers");
81
+ }
82
+ function isTgiModel(model: ModelData): boolean {
83
+ return model.tags.includes("text-generation-inference");
84
+ }
85
+
86
+ function isLlamaCppGgufModel(model: ModelData) {
87
+ return !!model.gguf?.context_length;
88
+ }
89
+
90
+ function isMlxModel(model: ModelData) {
91
+ return model.tags.includes("mlx");
92
+ }
93
+
94
+ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
95
+ const command = (binary: string) =>
96
+ [
97
+ "# Load and run the model:",
98
+ `${binary} \\`,
99
+ ` --hf-repo "${model.id}" \\`,
100
+ ` --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\`,
101
+ ' -p "You are a helpful assistant" \\',
102
+ " --conversation",
103
+ ].join("\n");
104
+ return [
105
+ {
106
+ title: "Install from brew",
107
+ setup: "brew install llama.cpp",
108
+ content: command("llama-cli"),
109
+ },
110
+ {
111
+ title: "Use pre-built binary",
112
+ setup: [
113
+ // prettier-ignore
114
+ "# Download pre-built binary from:",
115
+ "# https://github.com/ggerganov/llama.cpp/releases",
116
+ ].join("\n"),
117
+ content: command("./llama-cli"),
118
+ },
119
+ {
120
+ title: "Build from source code",
121
+ setup: [
122
+ "git clone https://github.com/ggerganov/llama.cpp.git",
123
+ "cd llama.cpp",
124
+ "LLAMA_CURL=1 make llama-cli",
125
+ ].join("\n"),
126
+ content: command("./llama-cli"),
127
+ },
128
+ ];
129
+ };
130
+
131
+ const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
132
+ return [
133
+ {
134
+ title: "Chat with the model",
135
+ content: [
136
+ `npx -y node-llama-cpp chat \\`,
137
+ ` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
138
+ ` --prompt 'Hi there!'`,
139
+ ].join("\n"),
140
+ },
141
+ {
142
+ title: "Estimate the model compatibility with your hardware",
143
+ content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
144
+ },
145
+ ];
146
+ };
147
+
148
+ const snippetOllama = (model: ModelData, filepath?: string): string => {
149
+ if (filepath) {
150
+ const quantLabel = parseGGUFQuantLabel(filepath);
151
+ const ollamatag = quantLabel ? `:${quantLabel}` : "";
152
+ return `ollama run hf.co/${model.id}${ollamatag}`;
153
+ }
154
+ return `ollama run hf.co/${model.id}{{OLLAMA_TAG}}`;
155
+ };
156
+
157
+ const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
158
+ const command = (binary: string) =>
159
+ ["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
160
+ return [
161
+ {
162
+ title: "Install from binary",
163
+ setup: "curl https://localai.io/install.sh | sh",
164
+ content: command("local-ai run"),
165
+ },
166
+ {
167
+ title: "Use Docker images",
168
+ setup: [
169
+ // prettier-ignore
170
+ "# Pull the image:",
171
+ "docker pull localai/localai:latest-cpu",
172
+ ].join("\n"),
173
+ content: command(
174
+ "docker run -p 8080:8080 --name localai -v $PWD/models:/build/models localai/localai:latest-cpu"
175
+ ),
176
+ },
177
+ ];
178
+ };
179
+
180
+ const snippetVllm = (model: ModelData): LocalAppSnippet[] => {
181
+ const runCommand = [
182
+ "# Call the server using curl:",
183
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
184
+ ` -H "Content-Type: application/json" \\`,
185
+ ` --data '{`,
186
+ ` "model": "${model.id}",`,
187
+ ` "messages": [`,
188
+ ` {"role": "user", "content": "Hello!"}`,
189
+ ` ]`,
190
+ ` }'`,
191
+ ];
192
+ return [
193
+ {
194
+ title: "Install from pip",
195
+ setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
196
+ content: [`# Load and run the model:\nvllm serve "${model.id}"`, runCommand.join("\n")],
197
+ },
198
+ {
199
+ title: "Use Docker images",
200
+ setup: [
201
+ "# Deploy with docker on Linux:",
202
+ `docker run --runtime nvidia --gpus all \\`,
203
+ ` --name my_vllm_container \\`,
204
+ ` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
205
+ ` --env "HUGGING_FACE_HUB_TOKEN=<secret>" \\`,
206
+ ` -p 8000:8000 \\`,
207
+ ` --ipc=host \\`,
208
+ ` vllm/vllm-openai:latest \\`,
209
+ ` --model ${model.id}`,
210
+ ].join("\n"),
211
+ content: [
212
+ `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
213
+ runCommand.join("\n"),
214
+ ],
215
+ },
216
+ ];
217
+ };
218
+ const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
219
+ const runCommand = [
220
+ "# Call the server using curl:",
221
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
222
+ ` -H "Content-Type: application/json" \\`,
223
+ ` --data '{`,
224
+ ` "model": "${model.id}",`,
225
+ ` "messages": [`,
226
+ ` {"role": "user", "content": "What is the capital of France?"}`,
227
+ ` ]`,
228
+ ` }'`,
229
+ ];
230
+ return [
231
+ {
232
+ title: "Use Docker images",
233
+ setup: [
234
+ "# Deploy with docker on Linux:",
235
+ `docker run --gpus all \\`,
236
+ ` -v ~/.cache/huggingface:/root/.cache/huggingface \\`,
237
+ ` -e HF_TOKEN="<secret>" \\`,
238
+ ` -p 8000:80 \\`,
239
+ ` ghcr.io/huggingface/text-generation-inference:latest \\`,
240
+ ` --model-id ${model.id}`,
241
+ ].join("\n"),
242
+ content: [runCommand.join("\n")],
243
+ },
244
+ ];
245
+ };
246
+
247
+ /**
248
+ * Add your new local app here.
249
+ *
250
+ * This is open to new suggestions and awesome upcoming apps.
251
+ *
252
+ * /!\ IMPORTANT
253
+ *
254
+ * If possible, you need to support deeplinks and be as cross-platform as possible.
255
+ *
256
+ * Ping the HF team if we can help with anything!
257
+ */
258
+ export const LOCAL_APPS = {
259
+ "llama.cpp": {
260
+ prettyLabel: "llama.cpp",
261
+ docsUrl: "https://github.com/ggerganov/llama.cpp",
262
+ mainTask: "text-generation",
263
+ displayOnModelPage: isLlamaCppGgufModel,
264
+ snippet: snippetLlamacpp,
265
+ },
266
+ "node-llama-cpp": {
267
+ prettyLabel: "node-llama-cpp",
268
+ docsUrl: "https://node-llama-cpp.withcat.ai",
269
+ mainTask: "text-generation",
270
+ displayOnModelPage: isLlamaCppGgufModel,
271
+ snippet: snippetNodeLlamaCppCli,
272
+ },
273
+ vllm: {
274
+ prettyLabel: "vLLM",
275
+ docsUrl: "https://docs.vllm.ai",
276
+ mainTask: "text-generation",
277
+ displayOnModelPage: (model: ModelData) =>
278
+ (isAwqModel(model) ||
279
+ isGptqModel(model) ||
280
+ isAqlmModel(model) ||
281
+ isMarlinModel(model) ||
282
+ isLlamaCppGgufModel(model) ||
283
+ isTransformersModel(model)) &&
284
+ (model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
285
+ snippet: snippetVllm,
286
+ },
287
+ tgi: {
288
+ prettyLabel: "TGI",
289
+ docsUrl: "https://huggingface.co/docs/text-generation-inference/",
290
+ mainTask: "text-generation",
291
+ displayOnModelPage: isTgiModel,
292
+ snippet: snippetTgi,
293
+ },
294
+ lmstudio: {
295
+ prettyLabel: "LM Studio",
296
+ docsUrl: "https://lmstudio.ai",
297
+ mainTask: "text-generation",
298
+ displayOnModelPage: (model) => isLlamaCppGgufModel(model) || isMlxModel(model),
299
+ deeplink: (model, filepath) =>
300
+ new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`),
301
+ },
302
+ localai: {
303
+ prettyLabel: "LocalAI",
304
+ docsUrl: "https://github.com/mudler/LocalAI",
305
+ mainTask: "text-generation",
306
+ displayOnModelPage: isLlamaCppGgufModel,
307
+ snippet: snippetLocalAI,
308
+ },
309
+ jan: {
310
+ prettyLabel: "Jan",
311
+ docsUrl: "https://jan.ai",
312
+ mainTask: "text-generation",
313
+ displayOnModelPage: isLlamaCppGgufModel,
314
+ deeplink: (model) => new URL(`jan://models/huggingface/${model.id}`),
315
+ },
316
+ backyard: {
317
+ prettyLabel: "Backyard AI",
318
+ docsUrl: "https://backyard.ai",
319
+ mainTask: "text-generation",
320
+ displayOnModelPage: isLlamaCppGgufModel,
321
+ deeplink: (model) => new URL(`https://backyard.ai/hf/model/${model.id}`),
322
+ },
323
+ sanctum: {
324
+ prettyLabel: "Sanctum",
325
+ docsUrl: "https://sanctum.ai",
326
+ mainTask: "text-generation",
327
+ displayOnModelPage: isLlamaCppGgufModel,
328
+ deeplink: (model) => new URL(`sanctum://open_from_hf?model=${model.id}`),
329
+ },
330
+ jellybox: {
331
+ prettyLabel: "Jellybox",
332
+ docsUrl: "https://jellybox.com",
333
+ mainTask: "text-generation",
334
+ displayOnModelPage: (model) =>
335
+ isLlamaCppGgufModel(model) ||
336
+ (model.library_name === "diffusers" &&
337
+ model.tags.includes("safetensors") &&
338
+ (model.pipeline_tag === "text-to-image" || model.tags.includes("lora"))),
339
+ deeplink: (model) => {
340
+ if (isLlamaCppGgufModel(model)) {
341
+ return new URL(`jellybox://llm/models/huggingface/LLM/${model.id}`);
342
+ } else if (model.tags.includes("lora")) {
343
+ return new URL(`jellybox://image/models/huggingface/ImageLora/${model.id}`);
344
+ } else {
345
+ return new URL(`jellybox://image/models/huggingface/Image/${model.id}`);
346
+ }
347
+ },
348
+ },
349
+ msty: {
350
+ prettyLabel: "Msty",
351
+ docsUrl: "https://msty.app",
352
+ mainTask: "text-generation",
353
+ displayOnModelPage: isLlamaCppGgufModel,
354
+ deeplink: (model) => new URL(`msty://models/search/hf/${model.id}`),
355
+ },
356
+ recursechat: {
357
+ prettyLabel: "RecurseChat",
358
+ docsUrl: "https://recurse.chat",
359
+ mainTask: "text-generation",
360
+ macOSOnly: true,
361
+ displayOnModelPage: isLlamaCppGgufModel,
362
+ deeplink: (model) => new URL(`recursechat://new-hf-gguf-model?hf-model-id=${model.id}`),
363
+ },
364
+ drawthings: {
365
+ prettyLabel: "Draw Things",
366
+ docsUrl: "https://drawthings.ai",
367
+ mainTask: "text-to-image",
368
+ macOSOnly: true,
369
+ displayOnModelPage: (model) =>
370
+ model.library_name === "diffusers" && (model.pipeline_tag === "text-to-image" || model.tags.includes("lora")),
371
+ deeplink: (model) => {
372
+ if (model.tags.includes("lora")) {
373
+ return new URL(`https://drawthings.ai/import/diffusers/pipeline.load_lora_weights?repo_id=${model.id}`);
374
+ } else {
375
+ return new URL(`https://drawthings.ai/import/diffusers/pipeline.from_pretrained?repo_id=${model.id}`);
376
+ }
377
+ },
378
+ },
379
+ diffusionbee: {
380
+ prettyLabel: "DiffusionBee",
381
+ docsUrl: "https://diffusionbee.com",
382
+ mainTask: "text-to-image",
383
+ macOSOnly: true,
384
+ displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
385
+ deeplink: (model) => new URL(`https://diffusionbee.com/huggingface_import?model_id=${model.id}`),
386
+ },
387
+ joyfusion: {
388
+ prettyLabel: "JoyFusion",
389
+ docsUrl: "https://joyfusion.app",
390
+ mainTask: "text-to-image",
391
+ macOSOnly: true,
392
+ displayOnModelPage: (model) =>
393
+ model.tags.includes("coreml") && model.tags.includes("joyfusion") && model.pipeline_tag === "text-to-image",
394
+ deeplink: (model) => new URL(`https://joyfusion.app/import_from_hf?repo_id=${model.id}`),
395
+ },
396
+ invoke: {
397
+ prettyLabel: "Invoke",
398
+ docsUrl: "https://github.com/invoke-ai/InvokeAI",
399
+ mainTask: "text-to-image",
400
+ displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
401
+ deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`),
402
+ },
403
+ ollama: {
404
+ prettyLabel: "Ollama",
405
+ docsUrl: "https://ollama.com",
406
+ mainTask: "text-generation",
407
+ displayOnModelPage: isLlamaCppGgufModel,
408
+ snippet: snippetOllama,
409
+ },
410
+ } satisfies Record<string, LocalApp>;
411
+
412
+ export type LocalAppKey = keyof typeof LOCAL_APPS;
@@ -0,0 +1,149 @@
1
+ import type { PipelineType } from "./pipelines.js";
2
+ import type { WidgetExample } from "./widget-example.js";
3
+ import type { TokenizerConfig } from "./tokenizer-data.js";
4
+
5
+ /**
6
+ * Public interface for model metadata
7
+ */
8
+ export interface ModelData {
9
+ /**
10
+ * id of model (e.g. 'user/repo_name')
11
+ */
12
+ id: string;
13
+ /**
14
+ * Whether or not to enable inference widget for this model
15
+ * TODO(type it)
16
+ */
17
+ inference: string;
18
+ /**
19
+ * is this model private?
20
+ */
21
+ private?: boolean;
22
+ /**
23
+ * this dictionary has useful information about the model configuration
24
+ */
25
+ config?: {
26
+ architectures?: string[];
27
+ /**
28
+ * Dict of AutoModel or Auto… class name to local import path in the repo
29
+ */
30
+ auto_map?: {
31
+ /**
32
+ * String Property
33
+ */
34
+ [x: string]: string;
35
+ };
36
+ model_type?: string;
37
+ quantization_config?: {
38
+ bits?: number;
39
+ load_in_4bit?: boolean;
40
+ load_in_8bit?: boolean;
41
+ /**
42
+ * awq, gptq, aqlm, marlin, … Used by vLLM
43
+ */
44
+ quant_method?: string;
45
+ };
46
+ tokenizer_config?: TokenizerConfig;
47
+ adapter_transformers?: {
48
+ model_name?: string;
49
+ model_class?: string;
50
+ };
51
+ diffusers?: {
52
+ _class_name?: string;
53
+ };
54
+ sklearn?: {
55
+ model?: {
56
+ file?: string;
57
+ };
58
+ model_format?: string;
59
+ };
60
+ speechbrain?: {
61
+ speechbrain_interface?: string;
62
+ vocoder_interface?: string;
63
+ vocoder_model_id?: string;
64
+ };
65
+ peft?: {
66
+ base_model_name_or_path?: string;
67
+ task_type?: string;
68
+ };
69
+ };
70
+ /**
71
+ * all the model tags
72
+ */
73
+ tags: string[];
74
+ /**
75
+ * transformers-specific info to display in the code sample.
76
+ */
77
+ transformersInfo?: TransformersInfo;
78
+ /**
79
+ * Pipeline type
80
+ */
81
+ pipeline_tag?: PipelineType | undefined;
82
+ /**
83
+ * for relevant models, get mask token
84
+ */
85
+ mask_token?: string | undefined;
86
+ /**
87
+ * Example data that will be fed into the widget.
88
+ *
89
+ * can be set in the model card metadata (under `widget`),
90
+ * or by default in `DefaultWidget.ts`
91
+ */
92
+ widgetData?: WidgetExample[] | undefined;
93
+ /**
94
+ * Parameters that will be used by the widget when calling Inference API (serverless)
95
+ * https://huggingface.co/docs/api-inference/detailed_parameters
96
+ *
97
+ * can be set in the model card metadata (under `inference/parameters`)
98
+ * Example:
99
+ * inference:
100
+ * parameters:
101
+ * key: val
102
+ */
103
+ cardData?: {
104
+ inference?:
105
+ | boolean
106
+ | {
107
+ parameters?: Record<string, unknown>;
108
+ };
109
+ base_model?: string | string[];
110
+ instance_prompt?: string | null;
111
+ };
112
+ /**
113
+ * Library name
114
+ * Example: transformers, SpeechBrain, Stanza, etc.
115
+ */
116
+ library_name?: string;
117
+ safetensors?: {
118
+ parameters: Record<string, number>;
119
+ total: number;
120
+ sharded: boolean;
121
+ };
122
+ gguf?: {
123
+ total: number;
124
+ architecture?: string;
125
+ context_length?: number;
126
+ };
127
+ }
128
+
129
+ /**
130
+ * transformers-specific info to display in the code sample.
131
+ */
132
+ export interface TransformersInfo {
133
+ /**
134
+ * e.g. AutoModelForSequenceClassification
135
+ */
136
+ auto_model: string;
137
+ /**
138
+ * if set in config.json's auto_map
139
+ */
140
+ custom_class?: string;
141
+ /**
142
+ * e.g. text-classification
143
+ */
144
+ pipeline_tag?: PipelineType;
145
+ /**
146
+ * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor"
147
+ */
148
+ processor?: string;
149
+ }
@@ -0,0 +1,18 @@
1
+ /**
2
+ * This file contains the (simplified) types used
3
+ * to represent queries that are made to Elastic
4
+ * in order to count number of model downloads
5
+ *
6
+ * Read this doc about download stats on the Hub:
7
+ *
8
+ * https://huggingface.co/docs/hub/models-download-stats
9
+ * Available fields:
10
+ * - path: the complete file path (relative) (e.g: "prefix/file.extension")
11
+ * - path_prefix: the prefix of the file path (e.g: "prefix/", empty if no prefix)
12
+ * - path_extension: the extension of the file path (e.g: "extension")
13
+ * - path_filename: the name of the file path (e.g: "file")
14
+ * see also:
15
+ * https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-query-string-query.html
16
+ */
17
+
18
+ export type ElasticSearchQuery = string;