@huggingface/inference 3.1.0 → 3.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.cjs +33 -0
- package/dist/index.js +32 -0
- package/dist/src/tasks/index.d.ts +1 -0
- package/dist/src/tasks/index.d.ts.map +1 -1
- package/package.json +2 -2
- package/src/tasks/index.ts +1 -0
package/README.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# 🤗 Hugging Face Inference
|
|
1
|
+
# 🤗 Hugging Face Inference
|
|
2
2
|
|
|
3
3
|
A Typescript powered wrapper for the Hugging Face Inference API (serverless), Inference Endpoints (dedicated), and third-party Inference Providers.
|
|
4
4
|
It works with [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index), and even with supported third-party Inference Providers.
|
package/dist/index.cjs
CHANGED
|
@@ -54,6 +54,7 @@ __export(src_exports, {
|
|
|
54
54
|
textGenerationStream: () => textGenerationStream,
|
|
55
55
|
textToImage: () => textToImage,
|
|
56
56
|
textToSpeech: () => textToSpeech,
|
|
57
|
+
textToVideo: () => textToVideo,
|
|
57
58
|
tokenClassification: () => tokenClassification,
|
|
58
59
|
translation: () => translation,
|
|
59
60
|
visualQuestionAnswering: () => visualQuestionAnswering,
|
|
@@ -91,6 +92,7 @@ __export(tasks_exports, {
|
|
|
91
92
|
textGenerationStream: () => textGenerationStream,
|
|
92
93
|
textToImage: () => textToImage,
|
|
93
94
|
textToSpeech: () => textToSpeech,
|
|
95
|
+
textToVideo: () => textToVideo,
|
|
94
96
|
tokenClassification: () => tokenClassification,
|
|
95
97
|
translation: () => translation,
|
|
96
98
|
visualQuestionAnswering: () => visualQuestionAnswering,
|
|
@@ -911,6 +913,36 @@ async function zeroShotImageClassification(args, options) {
|
|
|
911
913
|
return res;
|
|
912
914
|
}
|
|
913
915
|
|
|
916
|
+
// src/tasks/cv/textToVideo.ts
|
|
917
|
+
var SUPPORTED_PROVIDERS = ["fal-ai", "replicate"];
|
|
918
|
+
async function textToVideo(args, options) {
|
|
919
|
+
if (!args.provider || !typedInclude(SUPPORTED_PROVIDERS, args.provider)) {
|
|
920
|
+
throw new Error(
|
|
921
|
+
`textToVideo inference is only supported for the following providers: ${SUPPORTED_PROVIDERS.join(", ")}`
|
|
922
|
+
);
|
|
923
|
+
}
|
|
924
|
+
const payload = args.provider === "fal-ai" || args.provider === "replicate" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
925
|
+
const res = await request(payload, {
|
|
926
|
+
...options,
|
|
927
|
+
taskHint: "text-to-video"
|
|
928
|
+
});
|
|
929
|
+
if (args.provider === "fal-ai") {
|
|
930
|
+
const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "url" in res.video && typeof res.video.url === "string" && isUrl(res.video.url);
|
|
931
|
+
if (!isValidOutput) {
|
|
932
|
+
throw new InferenceOutputError("Expected { video: { url: string } }");
|
|
933
|
+
}
|
|
934
|
+
const urlResponse = await fetch(res.video.url);
|
|
935
|
+
return await urlResponse.blob();
|
|
936
|
+
} else {
|
|
937
|
+
const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
|
|
938
|
+
if (!isValidOutput) {
|
|
939
|
+
throw new InferenceOutputError("Expected { output: string }");
|
|
940
|
+
}
|
|
941
|
+
const urlResponse = await fetch(res.output);
|
|
942
|
+
return await urlResponse.blob();
|
|
943
|
+
}
|
|
944
|
+
}
|
|
945
|
+
|
|
914
946
|
// src/lib/getDefaultTask.ts
|
|
915
947
|
var taskCache = /* @__PURE__ */ new Map();
|
|
916
948
|
var CACHE_DURATION = 10 * 60 * 1e3;
|
|
@@ -1338,6 +1370,7 @@ var INFERENCE_PROVIDERS = ["fal-ai", "replicate", "sambanova", "together", "hf-i
|
|
|
1338
1370
|
textGenerationStream,
|
|
1339
1371
|
textToImage,
|
|
1340
1372
|
textToSpeech,
|
|
1373
|
+
textToVideo,
|
|
1341
1374
|
tokenClassification,
|
|
1342
1375
|
translation,
|
|
1343
1376
|
visualQuestionAnswering,
|
package/dist/index.js
CHANGED
|
@@ -33,6 +33,7 @@ __export(tasks_exports, {
|
|
|
33
33
|
textGenerationStream: () => textGenerationStream,
|
|
34
34
|
textToImage: () => textToImage,
|
|
35
35
|
textToSpeech: () => textToSpeech,
|
|
36
|
+
textToVideo: () => textToVideo,
|
|
36
37
|
tokenClassification: () => tokenClassification,
|
|
37
38
|
translation: () => translation,
|
|
38
39
|
visualQuestionAnswering: () => visualQuestionAnswering,
|
|
@@ -853,6 +854,36 @@ async function zeroShotImageClassification(args, options) {
|
|
|
853
854
|
return res;
|
|
854
855
|
}
|
|
855
856
|
|
|
857
|
+
// src/tasks/cv/textToVideo.ts
|
|
858
|
+
var SUPPORTED_PROVIDERS = ["fal-ai", "replicate"];
|
|
859
|
+
async function textToVideo(args, options) {
|
|
860
|
+
if (!args.provider || !typedInclude(SUPPORTED_PROVIDERS, args.provider)) {
|
|
861
|
+
throw new Error(
|
|
862
|
+
`textToVideo inference is only supported for the following providers: ${SUPPORTED_PROVIDERS.join(", ")}`
|
|
863
|
+
);
|
|
864
|
+
}
|
|
865
|
+
const payload = args.provider === "fal-ai" || args.provider === "replicate" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
866
|
+
const res = await request(payload, {
|
|
867
|
+
...options,
|
|
868
|
+
taskHint: "text-to-video"
|
|
869
|
+
});
|
|
870
|
+
if (args.provider === "fal-ai") {
|
|
871
|
+
const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "url" in res.video && typeof res.video.url === "string" && isUrl(res.video.url);
|
|
872
|
+
if (!isValidOutput) {
|
|
873
|
+
throw new InferenceOutputError("Expected { video: { url: string } }");
|
|
874
|
+
}
|
|
875
|
+
const urlResponse = await fetch(res.video.url);
|
|
876
|
+
return await urlResponse.blob();
|
|
877
|
+
} else {
|
|
878
|
+
const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
|
|
879
|
+
if (!isValidOutput) {
|
|
880
|
+
throw new InferenceOutputError("Expected { output: string }");
|
|
881
|
+
}
|
|
882
|
+
const urlResponse = await fetch(res.output);
|
|
883
|
+
return await urlResponse.blob();
|
|
884
|
+
}
|
|
885
|
+
}
|
|
886
|
+
|
|
856
887
|
// src/lib/getDefaultTask.ts
|
|
857
888
|
var taskCache = /* @__PURE__ */ new Map();
|
|
858
889
|
var CACHE_DURATION = 10 * 60 * 1e3;
|
|
@@ -1279,6 +1310,7 @@ export {
|
|
|
1279
1310
|
textGenerationStream,
|
|
1280
1311
|
textToImage,
|
|
1281
1312
|
textToSpeech,
|
|
1313
|
+
textToVideo,
|
|
1282
1314
|
tokenClassification,
|
|
1283
1315
|
translation,
|
|
1284
1316
|
visualQuestionAnswering,
|
|
@@ -11,6 +11,7 @@ export * from "./cv/objectDetection";
|
|
|
11
11
|
export * from "./cv/textToImage";
|
|
12
12
|
export * from "./cv/imageToImage";
|
|
13
13
|
export * from "./cv/zeroShotImageClassification";
|
|
14
|
+
export * from "./cv/textToVideo";
|
|
14
15
|
export * from "./nlp/featureExtraction";
|
|
15
16
|
export * from "./nlp/fillMask";
|
|
16
17
|
export * from "./nlp/questionAnswering";
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AACA,cAAc,kBAAkB,CAAC;AACjC,cAAc,2BAA2B,CAAC;AAG1C,cAAc,6BAA6B,CAAC;AAC5C,cAAc,oCAAoC,CAAC;AACnD,cAAc,sBAAsB,CAAC;AACrC,cAAc,sBAAsB,CAAC;AAGrC,cAAc,0BAA0B,CAAC;AACzC,cAAc,wBAAwB,CAAC;AACvC,cAAc,kBAAkB,CAAC;AACjC,cAAc,sBAAsB,CAAC;AACrC,cAAc,kBAAkB,CAAC;AACjC,cAAc,mBAAmB,CAAC;AAClC,cAAc,kCAAkC,CAAC;
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AACA,cAAc,kBAAkB,CAAC;AACjC,cAAc,2BAA2B,CAAC;AAG1C,cAAc,6BAA6B,CAAC;AAC5C,cAAc,oCAAoC,CAAC;AACnD,cAAc,sBAAsB,CAAC;AACrC,cAAc,sBAAsB,CAAC;AAGrC,cAAc,0BAA0B,CAAC;AACzC,cAAc,wBAAwB,CAAC;AACvC,cAAc,kBAAkB,CAAC;AACjC,cAAc,sBAAsB,CAAC;AACrC,cAAc,kBAAkB,CAAC;AACjC,cAAc,mBAAmB,CAAC;AAClC,cAAc,kCAAkC,CAAC;AACjD,cAAc,kBAAkB,CAAC;AAGjC,cAAc,yBAAyB,CAAC;AACxC,cAAc,gBAAgB,CAAC;AAC/B,cAAc,yBAAyB,CAAC;AACxC,cAAc,0BAA0B,CAAC;AACzC,cAAc,qBAAqB,CAAC;AACpC,cAAc,8BAA8B,CAAC;AAC7C,cAAc,0BAA0B,CAAC;AACzC,cAAc,sBAAsB,CAAC;AACrC,cAAc,4BAA4B,CAAC;AAC3C,cAAc,2BAA2B,CAAC;AAC1C,cAAc,mBAAmB,CAAC;AAClC,cAAc,8BAA8B,CAAC;AAC7C,cAAc,sBAAsB,CAAC;AACrC,cAAc,4BAA4B,CAAC;AAG3C,cAAc,wCAAwC,CAAC;AACvD,cAAc,sCAAsC,CAAC;AAGrD,cAAc,6BAA6B,CAAC;AAC5C,cAAc,iCAAiC,CAAC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/inference",
|
|
3
|
-
"version": "3.1.
|
|
3
|
+
"version": "3.1.2",
|
|
4
4
|
"packageManager": "pnpm@8.10.5",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
|
|
@@ -39,7 +39,7 @@
|
|
|
39
39
|
},
|
|
40
40
|
"type": "module",
|
|
41
41
|
"dependencies": {
|
|
42
|
-
"@huggingface/tasks": "^0.
|
|
42
|
+
"@huggingface/tasks": "^0.15.0"
|
|
43
43
|
},
|
|
44
44
|
"devDependencies": {
|
|
45
45
|
"@types/node": "18.13.0"
|
package/src/tasks/index.ts
CHANGED
|
@@ -16,6 +16,7 @@ export * from "./cv/objectDetection";
|
|
|
16
16
|
export * from "./cv/textToImage";
|
|
17
17
|
export * from "./cv/imageToImage";
|
|
18
18
|
export * from "./cv/zeroShotImageClassification";
|
|
19
|
+
export * from "./cv/textToVideo";
|
|
19
20
|
|
|
20
21
|
// Natural Language Processing tasks
|
|
21
22
|
export * from "./nlp/featureExtraction";
|