@huggingface/tasks 0.10.22 → 0.11.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +96 -41
- package/dist/index.js +96 -40
- package/dist/scripts/inference-tei-import.d.ts +2 -0
- package/dist/scripts/inference-tei-import.d.ts.map +1 -0
- package/dist/src/index.d.ts +0 -1
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/model-data.d.ts +2 -15
- package/dist/src/model-data.d.ts.map +1 -1
- package/dist/src/model-libraries-snippets.d.ts +1 -0
- package/dist/src/model-libraries-snippets.d.ts.map +1 -1
- package/dist/src/model-libraries.d.ts +37 -2
- package/dist/src/model-libraries.d.ts.map +1 -1
- package/dist/src/tasks/depth-estimation/data.d.ts.map +1 -1
- package/dist/src/tasks/feature-extraction/data.d.ts.map +1 -1
- package/dist/src/tasks/feature-extraction/inference.d.ts +22 -7
- package/dist/src/tasks/feature-extraction/inference.d.ts.map +1 -1
- package/dist/src/tasks/object-detection/data.d.ts.map +1 -1
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts.map +1 -1
- package/package.json +3 -2
- package/src/index.ts +0 -1
- package/src/model-data.ts +2 -16
- package/src/model-libraries-snippets.ts +10 -0
- package/src/model-libraries.ts +35 -0
- package/src/tasks/depth-estimation/about.md +10 -1
- package/src/tasks/depth-estimation/data.ts +13 -9
- package/src/tasks/feature-extraction/about.md +46 -1
- package/src/tasks/feature-extraction/data.ts +9 -4
- package/src/tasks/feature-extraction/inference.ts +23 -5
- package/src/tasks/feature-extraction/spec/input.json +34 -13
- package/src/tasks/feature-extraction/spec/output.json +10 -2
- package/src/tasks/image-text-to-text/data.ts +1 -1
- package/src/tasks/object-detection/data.ts +13 -6
- package/src/tasks/text-generation/data.ts +1 -1
- package/src/tasks/text-to-image/data.ts +4 -4
- package/src/tasks/zero-shot-image-classification/about.md +2 -3
- package/src/tasks/zero-shot-image-classification/data.ts +4 -0
- package/src/tasks/zero-shot-object-detection/data.ts +8 -3
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-data.d.ts","sourceRoot":"","sources":["../../src/model-data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAChD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAExD
|
|
1
|
+
{"version":3,"file":"model-data.d.ts","sourceRoot":"","sources":["../../src/model-data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,aAAa,CAAC;AAChD,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AACtD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,kBAAkB,CAAC;AAExD;;GAEG;AACH,MAAM,WAAW,SAAS;IACzB;;OAEG;IACH,EAAE,EAAE,MAAM,CAAC;IACX;;;OAGG;IACH,SAAS,EAAE,MAAM,CAAC;IAClB;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB;;OAEG;IACH,MAAM,CAAC,EAAE;QACR,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;QACzB;;WAEG;QACH,QAAQ,CAAC,EAAE;YACV;;eAEG;YACH,CAAC,CAAC,EAAE,MAAM,GAAG,MAAM,CAAC;SACpB,CAAC;QACF,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB,mBAAmB,CAAC,EAAE;YACrB,IAAI,CAAC,EAAE,MAAM,CAAC;YACd,YAAY,CAAC,EAAE,OAAO,CAAC;YACvB,YAAY,CAAC,EAAE,OAAO,CAAC;SACvB,CAAC;QACF,gBAAgB,CAAC,EAAE,eAAe,CAAC;QACnC,oBAAoB,CAAC,EAAE;YACtB,UAAU,CAAC,EAAE,MAAM,CAAC;YACpB,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,SAAS,CAAC,EAAE;YACX,WAAW,CAAC,EAAE,MAAM,CAAC;SACrB,CAAC;QACF,OAAO,CAAC,EAAE;YACT,KAAK,CAAC,EAAE;gBACP,IAAI,CAAC,EAAE,MAAM,CAAC;aACd,CAAC;YACF,YAAY,CAAC,EAAE,MAAM,CAAC;SACtB,CAAC;QACF,WAAW,CAAC,EAAE;YACb,qBAAqB,CAAC,EAAE,MAAM,CAAC;YAC/B,iBAAiB,CAAC,EAAE,MAAM,CAAC;YAC3B,gBAAgB,CAAC,EAAE,MAAM,CAAC;SAC1B,CAAC;QACF,IAAI,CAAC,EAAE;YACN,uBAAuB,CAAC,EAAE,MAAM,CAAC;YACjC,SAAS,CAAC,EAAE,MAAM,CAAC;SACnB,CAAC;KACF,CAAC;IACF;;OAEG;IACH,IAAI,EAAE,MAAM,EAAE,CAAC;IACf;;OAEG;IACH,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IACpC;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,GAAG,SAAS,CAAC;IACxC;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,GAAG,SAAS,CAAC;IAChC;;;;;OAKG;IACH,UAAU,CAAC,EAAE,aAAa,EAAE,GAAG,SAAS,CAAC;IACzC;;;;;;;;;OASG;IACH,QAAQ,CAAC,EAAE;QACV,SAAS,CAAC,EACP,OAAO,GACP;YACA,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;SACpC,CAAC;QACL,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;KAC/B,CAAC;IACF;;;OAGG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;CACtB;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,YAAY,CAAC,EAAE,MAAM,CAAC;IACtB;;OAEG;IACH,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB"}
|
|
@@ -17,6 +17,7 @@ export declare const keras: (model: ModelData) => string[];
|
|
|
17
17
|
export declare const keras_nlp: (model: ModelData) => string[];
|
|
18
18
|
export declare const tf_keras: (model: ModelData) => string[];
|
|
19
19
|
export declare const mars5_tts: (model: ModelData) => string[];
|
|
20
|
+
export declare const mesh_anything: () => string[];
|
|
20
21
|
export declare const open_clip: (model: ModelData) => string[];
|
|
21
22
|
export declare const paddlenlp: (model: ModelData) => string[];
|
|
22
23
|
export declare const pyannote_audio_pipeline: (model: ModelData) => string[];
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAY9C,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAMF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AA+BF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAgB/C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAMlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EASlD,CAAC;AAIF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAMhD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAS9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAOjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,UAAW,SAAS,KAAG,MAAM,EAehE,CAAC;AAiBF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAyBF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAOtD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAsCF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,SAAS,KAAG,MAAM,EAmC3D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAEtD,CAAC;AAEF,eAAO,MAAM,oBAAoB,UAAW,SAAS,KAAG,MAAM,EAI7D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAU9C,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAIpD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAK/C,CAAC;AAkBF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAkBpD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EA4CrD,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAcvD,CAAC;AAiBF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAkB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAMzD,CAAC;AAgBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAEjD,CAAC;AAEF,eAAO,MAAM,MAAM,QAA6B,MAAM,EAMrD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,OAAO,QAAO,MAAM,EAYhC,CAAC;AAEF,eAAO,MAAM,GAAG,UAAW,SAAS,KAAG,MAAM,EAK5C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAQ7C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AA6BF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAUnD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAYnC,CAAC"}
|
|
1
|
+
{"version":3,"file":"model-libraries-snippets.d.ts","sourceRoot":"","sources":["../../src/model-libraries-snippets.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAY9C,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAkBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAkBlD,CAAC;AAMF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAIjD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AA+BF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAgB/C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAMlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EASlD,CAAC;AAIF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAO/C,CAAC;AAEF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAMhD,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAS9C,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAUlD,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAOjD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,aAAa,QAAO,MAAM,EAQtC,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAKlD,CAAC;AAEF,eAAO,MAAM,SAAS,UAAW,SAAS,KAAG,MAAM,EAsBlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,UAAW,SAAS,KAAG,MAAM,EAehE,CAAC;AAiBF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAKvD,CAAC;AAyBF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAOtD,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAI7C,CAAC;AAsCF,eAAO,MAAM,OAAO,UAAW,SAAS,KAAG,MAAM,EAehD,CAAC;AAEF,eAAO,MAAM,kBAAkB,UAAW,SAAS,KAAG,MAAM,EAmC3D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,aAAa,UAAW,SAAS,KAAG,MAAM,EAEtD,CAAC;AAEF,eAAO,MAAM,oBAAoB,UAAW,SAAS,KAAG,MAAM,EAI7D,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAU9C,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAIpD,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAK/C,CAAC;AAkBF,eAAO,MAAM,WAAW,UAAW,SAAS,KAAG,MAAM,EAkBpD,CAAC;AAEF,eAAO,MAAM,YAAY,UAAW,SAAS,KAAG,MAAM,EA4CrD,CAAC;AAEF,eAAO,MAAM,cAAc,UAAW,SAAS,KAAG,MAAM,EAcvD,CAAC;AAiBF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAkB7C,CAAC;AAEF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAKjD,CAAC;AAEF,eAAO,MAAM,gBAAgB,UAAW,SAAS,KAAG,MAAM,EAMzD,CAAC;AAgBF,eAAO,MAAM,QAAQ,UAAW,SAAS,KAAG,MAAM,EAEjD,CAAC;AAEF,eAAO,MAAM,MAAM,QAA6B,MAAM,EAMrD,CAAC;AAEF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAInD,CAAC;AAEF,eAAO,MAAM,OAAO,QAAO,MAAM,EAYhC,CAAC;AAEF,eAAO,MAAM,GAAG,UAAW,SAAS,KAAG,MAAM,EAK5C,CAAC;AAEF,eAAO,MAAM,KAAK,UAAW,SAAS,KAAG,MAAM,EAI9C,CAAC;AAEF,eAAO,MAAM,IAAI,UAAW,SAAS,KAAG,MAAM,EAQ7C,CAAC;AAEF,eAAO,MAAM,MAAM,UAAW,SAAS,KAAG,MAAM,EAI/C,CAAC;AA6BF,eAAO,MAAM,UAAU,UAAW,SAAS,KAAG,MAAM,EAUnD,CAAC;AAEF,eAAO,MAAM,UAAU,QAAO,MAAM,EAYnC,CAAC"}
|
|
@@ -118,6 +118,12 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
118
118
|
filter: false;
|
|
119
119
|
countDownloads: string;
|
|
120
120
|
};
|
|
121
|
+
champ: {
|
|
122
|
+
prettyLabel: string;
|
|
123
|
+
repoName: string;
|
|
124
|
+
repoUrl: string;
|
|
125
|
+
countDownloads: string;
|
|
126
|
+
};
|
|
121
127
|
chat_tts: {
|
|
122
128
|
prettyLabel: string;
|
|
123
129
|
repoName: string;
|
|
@@ -126,6 +132,13 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
126
132
|
filter: false;
|
|
127
133
|
countDownloads: string;
|
|
128
134
|
};
|
|
135
|
+
colpali: {
|
|
136
|
+
prettyLabel: string;
|
|
137
|
+
repoName: string;
|
|
138
|
+
repoUrl: string;
|
|
139
|
+
filter: false;
|
|
140
|
+
countDownloads: string;
|
|
141
|
+
};
|
|
129
142
|
diffusers: {
|
|
130
143
|
prettyLabel: string;
|
|
131
144
|
repoName: string;
|
|
@@ -224,6 +237,12 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
224
237
|
filter: false;
|
|
225
238
|
countDownloads: string;
|
|
226
239
|
};
|
|
240
|
+
hallo: {
|
|
241
|
+
prettyLabel: string;
|
|
242
|
+
repoName: string;
|
|
243
|
+
repoUrl: string;
|
|
244
|
+
countDownloads: string;
|
|
245
|
+
};
|
|
227
246
|
"hunyuan-dit": {
|
|
228
247
|
prettyLabel: string;
|
|
229
248
|
repoName: string;
|
|
@@ -273,6 +292,14 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
273
292
|
countDownloads: string;
|
|
274
293
|
snippets: (model: ModelData) => string[];
|
|
275
294
|
};
|
|
295
|
+
"mesh-anything": {
|
|
296
|
+
prettyLabel: string;
|
|
297
|
+
repoName: string;
|
|
298
|
+
repoUrl: string;
|
|
299
|
+
filter: false;
|
|
300
|
+
countDownloads: string;
|
|
301
|
+
snippets: () => string[];
|
|
302
|
+
};
|
|
276
303
|
"ml-agents": {
|
|
277
304
|
prettyLabel: string;
|
|
278
305
|
repoName: string;
|
|
@@ -298,6 +325,14 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
298
325
|
filter: false;
|
|
299
326
|
countDownloads: string;
|
|
300
327
|
};
|
|
328
|
+
"mlc-llm": {
|
|
329
|
+
prettyLabel: string;
|
|
330
|
+
repoName: string;
|
|
331
|
+
repoUrl: string;
|
|
332
|
+
docsUrl: string;
|
|
333
|
+
filter: false;
|
|
334
|
+
countDownloads: string;
|
|
335
|
+
};
|
|
301
336
|
nemo: {
|
|
302
337
|
prettyLabel: string;
|
|
303
338
|
repoName: string;
|
|
@@ -514,6 +549,6 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
|
|
|
514
549
|
};
|
|
515
550
|
};
|
|
516
551
|
export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
|
|
517
|
-
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "bm25s" | "chat_tts" | "diffusers" | "doctr" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hunyuan-dit" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "mindspore" | "mars5-tts" | "ml-agents" | "mlx" | "mlx-image" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "recurrentgemma" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "diffusion-single-file" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
|
|
518
|
-
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "bm25s" | "chat_tts" | "diffusers" | "doctr" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hunyuan-dit" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "mindspore" | "mars5-tts" | "ml-agents" | "mlx" | "mlx-image" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "recurrentgemma" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "diffusion-single-file" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
|
|
552
|
+
export declare const ALL_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "bm25s" | "champ" | "chat_tts" | "colpali" | "diffusers" | "doctr" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hunyuan-dit" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "mindspore" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "recurrentgemma" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "diffusion-single-file" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
|
|
553
|
+
export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "bm25s" | "champ" | "chat_tts" | "colpali" | "diffusers" | "doctr" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hunyuan-dit" | "keras" | "tf-keras" | "keras-nlp" | "k2" | "mindspore" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "recurrentgemma" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "stable-audio-tools" | "diffusion-single-file" | "stable-baselines3" | "stanza" | "tensorflowtts" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "unity-sentis" | "voicecraft" | "whisperkit")[];
|
|
519
554
|
//# sourceMappingURL=model-libraries.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B
|
|
1
|
+
{"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,cAAc,CAAC;AAC9C,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,6BAA6B,CAAC;AAEtE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAgfI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,43BAAgE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,43BAQ1B,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/depth-estimation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/depth-estimation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyDf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAgDf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -3,21 +3,36 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Using src/scripts/inference-codegen
|
|
5
5
|
*/
|
|
6
|
-
export type FeatureExtractionOutput =
|
|
6
|
+
export type FeatureExtractionOutput = Array<number[]>;
|
|
7
7
|
/**
|
|
8
|
-
*
|
|
8
|
+
* Feature Extraction Input.
|
|
9
|
+
*
|
|
10
|
+
* Auto-generated from TEI specs.
|
|
11
|
+
* For more details, check out
|
|
12
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
|
|
9
13
|
*/
|
|
10
14
|
export interface FeatureExtractionInput {
|
|
11
15
|
/**
|
|
12
|
-
* The text to
|
|
16
|
+
* The text to embed.
|
|
13
17
|
*/
|
|
14
18
|
inputs: string;
|
|
19
|
+
normalize?: boolean;
|
|
15
20
|
/**
|
|
16
|
-
*
|
|
21
|
+
* The name of the prompt that should be used by for encoding. If not set, no prompt
|
|
22
|
+
* will be applied.
|
|
23
|
+
*
|
|
24
|
+
* Must be a key in the `Sentence Transformers` configuration `prompts` dictionary.
|
|
25
|
+
*
|
|
26
|
+
* For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",
|
|
27
|
+
* ...},
|
|
28
|
+
* then the sentence "What is the capital of France?" will be encoded as
|
|
29
|
+
* "query: What is the capital of France?" because the prompt text will be prepended before
|
|
30
|
+
* any text to encode.
|
|
17
31
|
*/
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
32
|
+
prompt_name?: string;
|
|
33
|
+
truncate?: boolean;
|
|
34
|
+
truncation_direction?: FeatureExtractionInputTruncationDirection;
|
|
21
35
|
[property: string]: unknown;
|
|
22
36
|
}
|
|
37
|
+
export type FeatureExtractionInputTruncationDirection = "Left" | "Right";
|
|
23
38
|
//# sourceMappingURL=inference.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/feature-extraction/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,MAAM,MAAM,uBAAuB,GAAG,
|
|
1
|
+
{"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/feature-extraction/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,MAAM,MAAM,uBAAuB,GAAG,KAAK,CAAC,MAAM,EAAE,CAAC,CAAC;AAEtD;;;;;;GAMG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;;;;;;;;;;OAWG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB,QAAQ,CAAC,EAAE,OAAO,CAAC;IACnB,oBAAoB,CAAC,EAAE,yCAAyC,CAAC;IACjE,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AAED,MAAM,MAAM,yCAAyC,GAAG,MAAM,GAAG,OAAO,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAiFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA+Ef,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,
|
|
1
|
+
{"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/zero-shot-object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA8Df,CAAC;AAEF,eAAe,QAAQ,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/tasks",
|
|
3
3
|
"packageManager": "pnpm@8.10.5",
|
|
4
|
-
"version": "0.
|
|
4
|
+
"version": "0.11.1",
|
|
5
5
|
"description": "List of ML tasks for huggingface.co/tasks",
|
|
6
6
|
"repository": "https://github.com/huggingface/huggingface.js.git",
|
|
7
7
|
"publishConfig": {
|
|
@@ -44,6 +44,7 @@
|
|
|
44
44
|
"build": "tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration",
|
|
45
45
|
"check": "tsc",
|
|
46
46
|
"inference-codegen": "tsx scripts/inference-codegen.ts && prettier --write src/tasks/*/inference.ts",
|
|
47
|
-
"inference-tgi-import": "tsx scripts/inference-tgi-import.ts && prettier --write src/tasks/text-generation/spec/*.json && prettier --write src/tasks/chat-completion/spec/*.json"
|
|
47
|
+
"inference-tgi-import": "tsx scripts/inference-tgi-import.ts && prettier --write src/tasks/text-generation/spec/*.json && prettier --write src/tasks/chat-completion/spec/*.json",
|
|
48
|
+
"inference-tei-import": "tsx scripts/inference-tei-import.ts && prettier --write src/tasks/feature-extraction/spec/*.json"
|
|
48
49
|
}
|
|
49
50
|
}
|
package/src/index.ts
CHANGED
|
@@ -39,7 +39,6 @@ export type {
|
|
|
39
39
|
WidgetExampleOutputAnswerScore,
|
|
40
40
|
WidgetExampleOutputText,
|
|
41
41
|
} from "./widget-example";
|
|
42
|
-
export { InferenceDisplayability } from "./model-data";
|
|
43
42
|
export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data";
|
|
44
43
|
|
|
45
44
|
import * as snippets from "./snippets";
|
package/src/model-data.ts
CHANGED
|
@@ -2,21 +2,6 @@ import type { PipelineType } from "./pipelines";
|
|
|
2
2
|
import type { WidgetExample } from "./widget-example";
|
|
3
3
|
import type { TokenizerConfig } from "./tokenizer-data";
|
|
4
4
|
|
|
5
|
-
export enum InferenceDisplayability {
|
|
6
|
-
/**
|
|
7
|
-
* Yes
|
|
8
|
-
*/
|
|
9
|
-
Yes = "Yes",
|
|
10
|
-
/**
|
|
11
|
-
* And then, all the possible reasons why it's no:
|
|
12
|
-
*/
|
|
13
|
-
ExplicitOptOut = "ExplicitOptOut",
|
|
14
|
-
CustomCode = "CustomCode",
|
|
15
|
-
LibraryNotDetected = "LibraryNotDetected",
|
|
16
|
-
PipelineNotDetected = "PipelineNotDetected",
|
|
17
|
-
PipelineLibraryPairNotSupported = "PipelineLibraryPairNotSupported",
|
|
18
|
-
}
|
|
19
|
-
|
|
20
5
|
/**
|
|
21
6
|
* Public interface for model metadata
|
|
22
7
|
*/
|
|
@@ -27,8 +12,9 @@ export interface ModelData {
|
|
|
27
12
|
id: string;
|
|
28
13
|
/**
|
|
29
14
|
* Whether or not to enable inference widget for this model
|
|
15
|
+
* TODO(type it)
|
|
30
16
|
*/
|
|
31
|
-
inference:
|
|
17
|
+
inference: string;
|
|
32
18
|
/**
|
|
33
19
|
* is this model private?
|
|
34
20
|
*/
|
|
@@ -230,6 +230,16 @@ from inference import Mars5TTS
|
|
|
230
230
|
mars5 = Mars5TTS.from_pretrained("${model.id}")`,
|
|
231
231
|
];
|
|
232
232
|
|
|
233
|
+
export const mesh_anything = (): string[] => [
|
|
234
|
+
`# Install from https://github.com/buaacyw/MeshAnything.git
|
|
235
|
+
|
|
236
|
+
from MeshAnything.models.meshanything import MeshAnything
|
|
237
|
+
|
|
238
|
+
# refer to https://github.com/buaacyw/MeshAnything/blob/main/main.py#L91 on how to define args
|
|
239
|
+
# and https://github.com/buaacyw/MeshAnything/blob/main/app.py regarding usage
|
|
240
|
+
model = MeshAnything(args)`,
|
|
241
|
+
];
|
|
242
|
+
|
|
233
243
|
export const open_clip = (model: ModelData): string[] => [
|
|
234
244
|
`import open_clip
|
|
235
245
|
|
package/src/model-libraries.ts
CHANGED
|
@@ -122,6 +122,12 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
122
122
|
filter: false,
|
|
123
123
|
countDownloads: `path:"params.index.json"`,
|
|
124
124
|
},
|
|
125
|
+
champ: {
|
|
126
|
+
prettyLabel: "Champ",
|
|
127
|
+
repoName: "Champ",
|
|
128
|
+
repoUrl: "https://github.com/fudan-generative-vision/champ",
|
|
129
|
+
countDownloads: `path:"champ/motion_module.pth"`,
|
|
130
|
+
},
|
|
125
131
|
chat_tts: {
|
|
126
132
|
prettyLabel: "ChatTTS",
|
|
127
133
|
repoName: "ChatTTS",
|
|
@@ -130,6 +136,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
130
136
|
filter: false,
|
|
131
137
|
countDownloads: `path:"asset/GPT.pt"`,
|
|
132
138
|
},
|
|
139
|
+
colpali: {
|
|
140
|
+
prettyLabel: "ColPali",
|
|
141
|
+
repoName: "ColPali",
|
|
142
|
+
repoUrl: "https://github.com/ManuelFay/colpali",
|
|
143
|
+
filter: false,
|
|
144
|
+
countDownloads: `path:"adapter_config.json"`,
|
|
145
|
+
},
|
|
133
146
|
diffusers: {
|
|
134
147
|
prettyLabel: "Diffusers",
|
|
135
148
|
repoName: "🤗/diffusers",
|
|
@@ -229,6 +242,12 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
229
242
|
filter: false,
|
|
230
243
|
countDownloads: `path:"ckpt/tensor00000_000" OR path:"ckpt-0/tensor00000_000"`,
|
|
231
244
|
},
|
|
245
|
+
hallo: {
|
|
246
|
+
prettyLabel: "Hallo",
|
|
247
|
+
repoName: "Hallo",
|
|
248
|
+
repoUrl: "https://github.com/fudan-generative-vision/hallo",
|
|
249
|
+
countDownloads: `path:"hallo/net.pth"`,
|
|
250
|
+
},
|
|
232
251
|
"hunyuan-dit": {
|
|
233
252
|
prettyLabel: "HunyuanDiT",
|
|
234
253
|
repoName: "HunyuanDiT",
|
|
@@ -279,6 +298,14 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
279
298
|
countDownloads: `path:"mars5_ar.safetensors"`,
|
|
280
299
|
snippets: snippets.mars5_tts,
|
|
281
300
|
},
|
|
301
|
+
"mesh-anything": {
|
|
302
|
+
prettyLabel: "MeshAnything",
|
|
303
|
+
repoName: "MeshAnything",
|
|
304
|
+
repoUrl: "https://github.com/buaacyw/MeshAnything",
|
|
305
|
+
filter: false,
|
|
306
|
+
countDownloads: `path:"MeshAnything_350m.pth"`,
|
|
307
|
+
snippets: snippets.mesh_anything,
|
|
308
|
+
},
|
|
282
309
|
"ml-agents": {
|
|
283
310
|
prettyLabel: "ml-agents",
|
|
284
311
|
repoName: "ml-agents",
|
|
@@ -304,6 +331,14 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
|
|
|
304
331
|
filter: false,
|
|
305
332
|
countDownloads: `path:"model.safetensors"`,
|
|
306
333
|
},
|
|
334
|
+
"mlc-llm": {
|
|
335
|
+
prettyLabel: "MLC-LLM",
|
|
336
|
+
repoName: "MLC-LLM",
|
|
337
|
+
repoUrl: "https://github.com/mlc-ai/mlc-llm",
|
|
338
|
+
docsUrl: "https://llm.mlc.ai/docs/",
|
|
339
|
+
filter: false,
|
|
340
|
+
countDownloads: `path:"mlc-chat-config.json"`,
|
|
341
|
+
},
|
|
307
342
|
nemo: {
|
|
308
343
|
prettyLabel: "NeMo",
|
|
309
344
|
repoName: "NeMo",
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
##
|
|
1
|
+
## Use Cases
|
|
2
|
+
|
|
2
3
|
Depth estimation models can be used to estimate the depth of different objects present in an image.
|
|
3
4
|
|
|
4
5
|
### Estimation of Volumetric Information
|
|
@@ -8,6 +9,14 @@ Depth estimation models are widely used to study volumetric formation of objects
|
|
|
8
9
|
|
|
9
10
|
Depth estimation models can also be used to develop a 3D representation from a 2D image.
|
|
10
11
|
|
|
12
|
+
## Depth Estimation Subtasks
|
|
13
|
+
|
|
14
|
+
There are two depth estimation subtasks.
|
|
15
|
+
|
|
16
|
+
- **Absolute depth estimation**: Absolute (or metric) depth estimation aims to provide exact depth measurements from the camera. Absolute depth estimation models output depth maps with real-world distances in meter or feet.
|
|
17
|
+
|
|
18
|
+
- **Relative depth estimation**: Relative depth estimation aims to predict the depth order of objects or points in a scene without providing the precise measurements.
|
|
19
|
+
|
|
11
20
|
## Inference
|
|
12
21
|
|
|
13
22
|
With the `transformers` library, you can use the `depth-estimation` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [Intel/dpt-large](https://huggingface.co/Intel/dpt-large) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. Additionally, you can find a comprehensive list of various depth estimation models at [this link](https://huggingface.co/models?pipeline_tag=depth-estimation).
|
|
@@ -3,9 +3,13 @@ import type { TaskDataCustom } from "..";
|
|
|
3
3
|
const taskData: TaskDataCustom = {
|
|
4
4
|
datasets: [
|
|
5
5
|
{
|
|
6
|
-
description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data",
|
|
6
|
+
description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data.",
|
|
7
7
|
id: "sayakpaul/nyu_depth_v2",
|
|
8
8
|
},
|
|
9
|
+
{
|
|
10
|
+
description: "Monocular depth estimation benchmark based without noise and errors.",
|
|
11
|
+
id: "depth-anything/DA-2K",
|
|
12
|
+
},
|
|
9
13
|
],
|
|
10
14
|
demo: {
|
|
11
15
|
inputs: [
|
|
@@ -24,17 +28,17 @@ const taskData: TaskDataCustom = {
|
|
|
24
28
|
metrics: [],
|
|
25
29
|
models: [
|
|
26
30
|
{
|
|
27
|
-
description: "
|
|
28
|
-
id: "
|
|
29
|
-
},
|
|
30
|
-
{
|
|
31
|
-
description: "Strong Depth Estimation model trained on a big compilation of datasets.",
|
|
32
|
-
id: "LiheYoung/depth-anything-large-hf",
|
|
31
|
+
description: "Cutting-edge depth estimation model.",
|
|
32
|
+
id: "depth-anything/Depth-Anything-V2-Large",
|
|
33
33
|
},
|
|
34
34
|
{
|
|
35
35
|
description: "A strong monocular depth estimation model.",
|
|
36
36
|
id: "Bingxin/Marigold",
|
|
37
37
|
},
|
|
38
|
+
{
|
|
39
|
+
description: "A metric depth estimation model trained on NYU dataset.",
|
|
40
|
+
id: "Intel/zoedepth-nyu",
|
|
41
|
+
},
|
|
38
42
|
],
|
|
39
43
|
spaces: [
|
|
40
44
|
{
|
|
@@ -42,8 +46,8 @@ const taskData: TaskDataCustom = {
|
|
|
42
46
|
id: "radames/dpt-depth-estimation-3d-voxels",
|
|
43
47
|
},
|
|
44
48
|
{
|
|
45
|
-
description: "An application
|
|
46
|
-
id: "
|
|
49
|
+
description: "An application on cutting-edge depth estimation.",
|
|
50
|
+
id: "depth-anything/Depth-Anything-V2",
|
|
47
51
|
},
|
|
48
52
|
{
|
|
49
53
|
description: "An application to try state-of-the-art depth estimation.",
|
|
@@ -1,9 +1,21 @@
|
|
|
1
1
|
## Use Cases
|
|
2
2
|
|
|
3
|
+
### Transfer Learning
|
|
4
|
+
|
|
3
5
|
Models trained on a specific dataset can learn features about the data. For instance, a model trained on an English poetry dataset learns English grammar at a very high level. This information can be transferred to a new model that is going to be trained on tweets. This process of extracting features and transferring to another model is called transfer learning. One can pass their dataset through a feature extraction pipeline and feed the result to a classifier.
|
|
4
6
|
|
|
7
|
+
### Retrieval and Reranking
|
|
8
|
+
|
|
9
|
+
Retrieval is the process of obtaining relevant documents or information based on a user's search query. In the context of NLP, retrieval systems aim to find relevant text passages or documents from a large corpus of data that match the user's query. The goal is to return a set of results that are likely to be useful to the user. On the other hand, reranking is a technique used to improve the quality of retrieval results by reordering them based on their relevance to the query.
|
|
10
|
+
|
|
11
|
+
### Retrieval Augmented Generation
|
|
12
|
+
|
|
13
|
+
Retrieval-augmented generation (RAG) is a technique in which user inputs to generative models are first queried through a knowledge base, and the most relevant information from the knowledge base is used to augment the prompt to reduce hallucinations during generation. Feature extraction models (primarily retrieval and reranking models) can be used in RAG to reduce model hallucinations and ground the model.
|
|
14
|
+
|
|
5
15
|
## Inference
|
|
6
16
|
|
|
17
|
+
You can infer feature extraction models using `pipeline` of transformers library.
|
|
18
|
+
|
|
7
19
|
```python
|
|
8
20
|
from transformers import pipeline
|
|
9
21
|
checkpoint = "facebook/bart-base"
|
|
@@ -22,6 +34,39 @@ feature_extractor(text,return_tensors = "pt")[0].numpy().mean(axis=0)
|
|
|
22
34
|
[ 0.2520, -0.6869, -1.0582, ..., 0.5198, -2.2106, 0.4547]]])'''
|
|
23
35
|
```
|
|
24
36
|
|
|
37
|
+
A very popular library for training similarity and search models is called `sentence-transformers`. To get started, install the library.
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install -U sentence-transformers
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
You can infer with `sentence-transformers` models as follows.
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
from sentence_transformers import SentenceTransformer
|
|
47
|
+
|
|
48
|
+
model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
|
49
|
+
sentences = [
|
|
50
|
+
"The weather is lovely today.",
|
|
51
|
+
"It's so sunny outside!",
|
|
52
|
+
"He drove to the stadium.",
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
embeddings = model.encode(sentences)
|
|
56
|
+
similarities = model.similarity(embeddings, embeddings)
|
|
57
|
+
print(similarities)
|
|
58
|
+
# tensor([[1.0000, 0.6660, 0.1046],
|
|
59
|
+
# [0.6660, 1.0000, 0.1411],
|
|
60
|
+
# [0.1046, 0.1411, 1.0000]])
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
### Text Embedding Inference
|
|
64
|
+
|
|
65
|
+
[Text Embeddings Inference (TEI)](https://github.com/huggingface/text-embeddings-inference) is a toolkit to easily serve feature extraction models using few lines of code.
|
|
66
|
+
|
|
25
67
|
## Useful resources
|
|
26
68
|
|
|
27
|
-
- [Documentation for feature
|
|
69
|
+
- [Documentation for feature extraction task in 🤗Transformers](https://huggingface.co/docs/transformers/main_classes/feature_extractor)
|
|
70
|
+
- [Introduction to MTEB Benchmark](https://huggingface.co/blog/mteb)
|
|
71
|
+
- [Cookbook: Simple RAG for GitHub issues using Hugging Face Zephyr and LangChain](https://huggingface.co/learn/cookbook/rag_zephyr_langchain)
|
|
72
|
+
- [sentence-transformers organization on Hugging Face Hub](https://huggingface.co/sentence-transformers)
|
|
@@ -33,14 +33,19 @@ const taskData: TaskDataCustom = {
|
|
|
33
33
|
models: [
|
|
34
34
|
{
|
|
35
35
|
description: "A powerful feature extraction model for natural language processing tasks.",
|
|
36
|
-
id: "
|
|
36
|
+
id: "thenlper/gte-large",
|
|
37
37
|
},
|
|
38
38
|
{
|
|
39
|
-
description: "A strong feature extraction model for
|
|
40
|
-
id: "
|
|
39
|
+
description: "A strong feature extraction model for retrieval.",
|
|
40
|
+
id: "Alibaba-NLP/gte-Qwen1.5-7B-instruct",
|
|
41
|
+
},
|
|
42
|
+
],
|
|
43
|
+
spaces: [
|
|
44
|
+
{
|
|
45
|
+
description: "A leaderboard to rank best feature extraction models..",
|
|
46
|
+
id: "mteb/leaderboard",
|
|
41
47
|
},
|
|
42
48
|
],
|
|
43
|
-
spaces: [],
|
|
44
49
|
summary: "Feature extraction is the task of extracting features learnt in a model.",
|
|
45
50
|
widgetModels: ["facebook/bart-base"],
|
|
46
51
|
};
|
|
@@ -4,19 +4,37 @@
|
|
|
4
4
|
* Using src/scripts/inference-codegen
|
|
5
5
|
*/
|
|
6
6
|
|
|
7
|
-
export type FeatureExtractionOutput =
|
|
7
|
+
export type FeatureExtractionOutput = Array<number[]>;
|
|
8
8
|
|
|
9
9
|
/**
|
|
10
|
-
*
|
|
10
|
+
* Feature Extraction Input.
|
|
11
|
+
*
|
|
12
|
+
* Auto-generated from TEI specs.
|
|
13
|
+
* For more details, check out
|
|
14
|
+
* https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
|
|
11
15
|
*/
|
|
12
16
|
export interface FeatureExtractionInput {
|
|
13
17
|
/**
|
|
14
|
-
* The text to
|
|
18
|
+
* The text to embed.
|
|
15
19
|
*/
|
|
16
20
|
inputs: string;
|
|
21
|
+
normalize?: boolean;
|
|
17
22
|
/**
|
|
18
|
-
*
|
|
23
|
+
* The name of the prompt that should be used by for encoding. If not set, no prompt
|
|
24
|
+
* will be applied.
|
|
25
|
+
*
|
|
26
|
+
* Must be a key in the `Sentence Transformers` configuration `prompts` dictionary.
|
|
27
|
+
*
|
|
28
|
+
* For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",
|
|
29
|
+
* ...},
|
|
30
|
+
* then the sentence "What is the capital of France?" will be encoded as
|
|
31
|
+
* "query: What is the capital of France?" because the prompt text will be prepended before
|
|
32
|
+
* any text to encode.
|
|
19
33
|
*/
|
|
20
|
-
|
|
34
|
+
prompt_name?: string;
|
|
35
|
+
truncate?: boolean;
|
|
36
|
+
truncation_direction?: FeatureExtractionInputTruncationDirection;
|
|
21
37
|
[property: string]: unknown;
|
|
22
38
|
}
|
|
39
|
+
|
|
40
|
+
export type FeatureExtractionInputTruncationDirection = "Left" | "Right";
|
|
@@ -1,26 +1,47 @@
|
|
|
1
1
|
{
|
|
2
2
|
"$id": "/inference/schemas/feature-extraction/input.json",
|
|
3
3
|
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
-
"description": "
|
|
4
|
+
"description": "Feature Extraction Input.\n\nAuto-generated from TEI specs.\nFor more details, check out https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.",
|
|
5
5
|
"title": "FeatureExtractionInput",
|
|
6
6
|
"type": "object",
|
|
7
|
+
"required": ["inputs"],
|
|
7
8
|
"properties": {
|
|
8
9
|
"inputs": {
|
|
9
|
-
"
|
|
10
|
-
"
|
|
10
|
+
"type": "string",
|
|
11
|
+
"description": "The text to embed."
|
|
11
12
|
},
|
|
12
|
-
"
|
|
13
|
-
"
|
|
14
|
-
"
|
|
13
|
+
"normalize": {
|
|
14
|
+
"type": "boolean",
|
|
15
|
+
"default": "true",
|
|
16
|
+
"example": "true"
|
|
17
|
+
},
|
|
18
|
+
"prompt_name": {
|
|
19
|
+
"type": "string",
|
|
20
|
+
"description": "The name of the prompt that should be used by for encoding. If not set, no prompt\nwill be applied.\n\nMust be a key in the `Sentence Transformers` configuration `prompts` dictionary.\n\nFor example if ``prompt_name`` is \"query\" and the ``prompts`` is {\"query\": \"query: \", ...},\nthen the sentence \"What is the capital of France?\" will be encoded as\n\"query: What is the capital of France?\" because the prompt text will be prepended before\nany text to encode.",
|
|
21
|
+
"default": "null",
|
|
22
|
+
"example": "null",
|
|
23
|
+
"nullable": true
|
|
24
|
+
},
|
|
25
|
+
"truncate": {
|
|
26
|
+
"type": "boolean",
|
|
27
|
+
"default": "false",
|
|
28
|
+
"example": "false",
|
|
29
|
+
"nullable": true
|
|
30
|
+
},
|
|
31
|
+
"truncation_direction": {
|
|
32
|
+
"allOf": [
|
|
33
|
+
{
|
|
34
|
+
"$ref": "#/$defs/FeatureExtractionInputTruncationDirection"
|
|
35
|
+
}
|
|
36
|
+
],
|
|
37
|
+
"default": "right"
|
|
15
38
|
}
|
|
16
39
|
},
|
|
17
40
|
"$defs": {
|
|
18
|
-
"
|
|
19
|
-
"
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"properties": {}
|
|
41
|
+
"FeatureExtractionInputTruncationDirection": {
|
|
42
|
+
"type": "string",
|
|
43
|
+
"enum": ["Left", "Right"],
|
|
44
|
+
"title": "FeatureExtractionInputTruncationDirection"
|
|
23
45
|
}
|
|
24
|
-
}
|
|
25
|
-
"required": ["inputs"]
|
|
46
|
+
}
|
|
26
47
|
}
|