@huggingface/inference 3.6.2 → 3.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -25
- package/dist/index.cjs +135 -114
- package/dist/index.js +135 -114
- package/dist/src/config.d.ts +1 -0
- package/dist/src/config.d.ts.map +1 -1
- package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -1
- package/dist/src/tasks/custom/request.d.ts +1 -0
- package/dist/src/tasks/custom/request.d.ts.map +1 -1
- package/dist/src/tasks/custom/streamingRequest.d.ts +1 -0
- package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageToText.d.ts.map +1 -1
- package/dist/src/tasks/cv/objectDetection.d.ts +1 -1
- package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -1
- package/dist/src/tasks/cv/textToVideo.d.ts +1 -1
- package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +1 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/chatCompletion.d.ts +1 -1
- package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -1
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts +1 -1
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
- package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -1
- package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -1
- package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -1
- package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
- package/dist/src/types.d.ts +7 -0
- package/dist/src/types.d.ts.map +1 -1
- package/dist/src/utils/request.d.ts +27 -0
- package/dist/src/utils/request.d.ts.map +1 -0
- package/package.json +2 -2
- package/src/config.ts +1 -0
- package/src/lib/makeRequestOptions.ts +5 -2
- package/src/snippets/templates.exported.ts +1 -1
- package/src/tasks/audio/audioClassification.ts +2 -2
- package/src/tasks/audio/audioToAudio.ts +2 -2
- package/src/tasks/audio/automaticSpeechRecognition.ts +3 -3
- package/src/tasks/audio/textToSpeech.ts +2 -2
- package/src/tasks/custom/request.ts +7 -32
- package/src/tasks/custom/streamingRequest.ts +5 -85
- package/src/tasks/cv/imageClassification.ts +2 -2
- package/src/tasks/cv/imageSegmentation.ts +2 -2
- package/src/tasks/cv/imageToImage.ts +2 -2
- package/src/tasks/cv/imageToText.ts +7 -9
- package/src/tasks/cv/objectDetection.ts +4 -4
- package/src/tasks/cv/textToImage.ts +3 -3
- package/src/tasks/cv/textToVideo.ts +23 -20
- package/src/tasks/cv/zeroShotImageClassification.ts +4 -5
- package/src/tasks/multimodal/documentQuestionAnswering.ts +13 -13
- package/src/tasks/multimodal/visualQuestionAnswering.ts +4 -2
- package/src/tasks/nlp/chatCompletion.ts +3 -4
- package/src/tasks/nlp/chatCompletionStream.ts +3 -3
- package/src/tasks/nlp/featureExtraction.ts +2 -2
- package/src/tasks/nlp/fillMask.ts +2 -2
- package/src/tasks/nlp/questionAnswering.ts +3 -2
- package/src/tasks/nlp/sentenceSimilarity.ts +2 -11
- package/src/tasks/nlp/summarization.ts +2 -2
- package/src/tasks/nlp/tableQuestionAnswering.ts +2 -2
- package/src/tasks/nlp/textClassification.ts +8 -9
- package/src/tasks/nlp/textGeneration.ts +16 -16
- package/src/tasks/nlp/textGenerationStream.ts +2 -2
- package/src/tasks/nlp/tokenClassification.ts +9 -10
- package/src/tasks/nlp/translation.ts +2 -2
- package/src/tasks/nlp/zeroShotClassification.ts +9 -10
- package/src/tasks/tabular/tabularClassification.ts +2 -2
- package/src/tasks/tabular/tabularRegression.ts +2 -2
- package/src/types.ts +8 -0
- package/src/utils/request.ts +161 -0
package/dist/index.js
CHANGED
|
@@ -44,6 +44,7 @@ __export(tasks_exports, {
|
|
|
44
44
|
// src/config.ts
|
|
45
45
|
var HF_HUB_URL = "https://huggingface.co";
|
|
46
46
|
var HF_ROUTER_URL = "https://router.huggingface.co";
|
|
47
|
+
var HF_HEADER_X_BILL_TO = "X-HF-Bill-To";
|
|
47
48
|
|
|
48
49
|
// src/providers/black-forest-labs.ts
|
|
49
50
|
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
@@ -475,7 +476,7 @@ var OPENAI_CONFIG = {
|
|
|
475
476
|
|
|
476
477
|
// package.json
|
|
477
478
|
var name = "@huggingface/inference";
|
|
478
|
-
var version = "3.
|
|
479
|
+
var version = "3.7.0";
|
|
479
480
|
|
|
480
481
|
// src/providers/consts.ts
|
|
481
482
|
var HARDCODED_MODEL_ID_MAPPING = {
|
|
@@ -597,7 +598,7 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
|
597
598
|
const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
|
|
598
599
|
const provider = maybeProvider ?? "hf-inference";
|
|
599
600
|
const providerConfig = providerConfigs[provider];
|
|
600
|
-
const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options ?? {};
|
|
601
|
+
const { includeCredentials, task, chatCompletion: chatCompletion2, signal, billTo } = options ?? {};
|
|
601
602
|
const authMethod = (() => {
|
|
602
603
|
if (providerConfig.clientSideRoutingOnly) {
|
|
603
604
|
if (accessToken && accessToken.startsWith("hf_")) {
|
|
@@ -625,6 +626,9 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
|
625
626
|
accessToken,
|
|
626
627
|
authMethod
|
|
627
628
|
});
|
|
629
|
+
if (billTo) {
|
|
630
|
+
headers[HF_HEADER_X_BILL_TO] = billTo;
|
|
631
|
+
}
|
|
628
632
|
if (!binary) {
|
|
629
633
|
headers["Content-Type"] = "application/json";
|
|
630
634
|
}
|
|
@@ -678,37 +682,6 @@ function removeProviderPrefix(model, provider) {
|
|
|
678
682
|
return model.slice(provider.length + 1);
|
|
679
683
|
}
|
|
680
684
|
|
|
681
|
-
// src/tasks/custom/request.ts
|
|
682
|
-
async function request(args, options) {
|
|
683
|
-
const { url, info } = await makeRequestOptions(args, options);
|
|
684
|
-
const response = await (options?.fetch ?? fetch)(url, info);
|
|
685
|
-
if (options?.retry_on_error !== false && response.status === 503) {
|
|
686
|
-
return request(args, options);
|
|
687
|
-
}
|
|
688
|
-
if (!response.ok) {
|
|
689
|
-
const contentType = response.headers.get("Content-Type");
|
|
690
|
-
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
|
|
691
|
-
const output = await response.json();
|
|
692
|
-
if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
|
|
693
|
-
throw new Error(
|
|
694
|
-
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
|
|
695
|
-
);
|
|
696
|
-
}
|
|
697
|
-
if (output.error || output.detail) {
|
|
698
|
-
throw new Error(JSON.stringify(output.error ?? output.detail));
|
|
699
|
-
} else {
|
|
700
|
-
throw new Error(output);
|
|
701
|
-
}
|
|
702
|
-
}
|
|
703
|
-
const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
|
|
704
|
-
throw new Error(message ?? "An error occurred while fetching the blob");
|
|
705
|
-
}
|
|
706
|
-
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
707
|
-
return await response.json();
|
|
708
|
-
}
|
|
709
|
-
return await response.blob();
|
|
710
|
-
}
|
|
711
|
-
|
|
712
685
|
// src/vendor/fetch-event-source/parse.ts
|
|
713
686
|
function getLines(onLine) {
|
|
714
687
|
let buffer;
|
|
@@ -808,12 +781,44 @@ function newMessage() {
|
|
|
808
781
|
};
|
|
809
782
|
}
|
|
810
783
|
|
|
811
|
-
// src/
|
|
812
|
-
async function
|
|
784
|
+
// src/utils/request.ts
|
|
785
|
+
async function innerRequest(args, options) {
|
|
786
|
+
const { url, info } = await makeRequestOptions(args, options);
|
|
787
|
+
const response = await (options?.fetch ?? fetch)(url, info);
|
|
788
|
+
const requestContext = { url, info };
|
|
789
|
+
if (options?.retry_on_error !== false && response.status === 503) {
|
|
790
|
+
return innerRequest(args, options);
|
|
791
|
+
}
|
|
792
|
+
if (!response.ok) {
|
|
793
|
+
const contentType = response.headers.get("Content-Type");
|
|
794
|
+
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
|
|
795
|
+
const output = await response.json();
|
|
796
|
+
if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
|
|
797
|
+
throw new Error(
|
|
798
|
+
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
|
|
799
|
+
);
|
|
800
|
+
}
|
|
801
|
+
if (output.error || output.detail) {
|
|
802
|
+
throw new Error(JSON.stringify(output.error ?? output.detail));
|
|
803
|
+
} else {
|
|
804
|
+
throw new Error(output);
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
|
|
808
|
+
throw new Error(message ?? "An error occurred while fetching the blob");
|
|
809
|
+
}
|
|
810
|
+
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
811
|
+
const data = await response.json();
|
|
812
|
+
return { data, requestContext };
|
|
813
|
+
}
|
|
814
|
+
const blob = await response.blob();
|
|
815
|
+
return { data: blob, requestContext };
|
|
816
|
+
}
|
|
817
|
+
async function* innerStreamingRequest(args, options) {
|
|
813
818
|
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
|
|
814
819
|
const response = await (options?.fetch ?? fetch)(url, info);
|
|
815
820
|
if (options?.retry_on_error !== false && response.status === 503) {
|
|
816
|
-
return yield*
|
|
821
|
+
return yield* innerStreamingRequest(args, options);
|
|
817
822
|
}
|
|
818
823
|
if (!response.ok) {
|
|
819
824
|
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
@@ -827,6 +832,9 @@ async function* streamingRequest(args, options) {
|
|
|
827
832
|
if (output.error && "message" in output.error && typeof output.error.message === "string") {
|
|
828
833
|
throw new Error(output.error.message);
|
|
829
834
|
}
|
|
835
|
+
if (typeof output.message === "string") {
|
|
836
|
+
throw new Error(output.message);
|
|
837
|
+
}
|
|
830
838
|
}
|
|
831
839
|
throw new Error(`Server response contains error: ${response.status}`);
|
|
832
840
|
}
|
|
@@ -879,6 +887,23 @@ async function* streamingRequest(args, options) {
|
|
|
879
887
|
}
|
|
880
888
|
}
|
|
881
889
|
|
|
890
|
+
// src/tasks/custom/request.ts
|
|
891
|
+
async function request(args, options) {
|
|
892
|
+
console.warn(
|
|
893
|
+
"The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
|
|
894
|
+
);
|
|
895
|
+
const result = await innerRequest(args, options);
|
|
896
|
+
return result.data;
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
// src/tasks/custom/streamingRequest.ts
|
|
900
|
+
async function* streamingRequest(args, options) {
|
|
901
|
+
console.warn(
|
|
902
|
+
"The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
|
|
903
|
+
);
|
|
904
|
+
yield* innerStreamingRequest(args, options);
|
|
905
|
+
}
|
|
906
|
+
|
|
882
907
|
// src/utils/pick.ts
|
|
883
908
|
function pick(o, props) {
|
|
884
909
|
return Object.assign(
|
|
@@ -914,7 +939,7 @@ function preparePayload(args) {
|
|
|
914
939
|
// src/tasks/audio/audioClassification.ts
|
|
915
940
|
async function audioClassification(args, options) {
|
|
916
941
|
const payload = preparePayload(args);
|
|
917
|
-
const res = await
|
|
942
|
+
const { data: res } = await innerRequest(payload, {
|
|
918
943
|
...options,
|
|
919
944
|
task: "audio-classification"
|
|
920
945
|
});
|
|
@@ -941,7 +966,7 @@ function base64FromBytes(arr) {
|
|
|
941
966
|
// src/tasks/audio/automaticSpeechRecognition.ts
|
|
942
967
|
async function automaticSpeechRecognition(args, options) {
|
|
943
968
|
const payload = await buildPayload(args);
|
|
944
|
-
const res = await
|
|
969
|
+
const { data: res } = await innerRequest(payload, {
|
|
945
970
|
...options,
|
|
946
971
|
task: "automatic-speech-recognition"
|
|
947
972
|
});
|
|
@@ -985,7 +1010,7 @@ async function textToSpeech(args, options) {
|
|
|
985
1010
|
...args.parameters,
|
|
986
1011
|
text: args.inputs
|
|
987
1012
|
} : args;
|
|
988
|
-
const res = await
|
|
1013
|
+
const { data: res } = await innerRequest(payload, {
|
|
989
1014
|
...options,
|
|
990
1015
|
task: "text-to-speech"
|
|
991
1016
|
});
|
|
@@ -1011,7 +1036,7 @@ async function textToSpeech(args, options) {
|
|
|
1011
1036
|
// src/tasks/audio/audioToAudio.ts
|
|
1012
1037
|
async function audioToAudio(args, options) {
|
|
1013
1038
|
const payload = preparePayload(args);
|
|
1014
|
-
const res = await
|
|
1039
|
+
const { data: res } = await innerRequest(payload, {
|
|
1015
1040
|
...options,
|
|
1016
1041
|
task: "audio-to-audio"
|
|
1017
1042
|
});
|
|
@@ -1037,7 +1062,7 @@ function preparePayload2(args) {
|
|
|
1037
1062
|
// src/tasks/cv/imageClassification.ts
|
|
1038
1063
|
async function imageClassification(args, options) {
|
|
1039
1064
|
const payload = preparePayload2(args);
|
|
1040
|
-
const res = await
|
|
1065
|
+
const { data: res } = await innerRequest(payload, {
|
|
1041
1066
|
...options,
|
|
1042
1067
|
task: "image-classification"
|
|
1043
1068
|
});
|
|
@@ -1051,7 +1076,7 @@ async function imageClassification(args, options) {
|
|
|
1051
1076
|
// src/tasks/cv/imageSegmentation.ts
|
|
1052
1077
|
async function imageSegmentation(args, options) {
|
|
1053
1078
|
const payload = preparePayload2(args);
|
|
1054
|
-
const res = await
|
|
1079
|
+
const { data: res } = await innerRequest(payload, {
|
|
1055
1080
|
...options,
|
|
1056
1081
|
task: "image-segmentation"
|
|
1057
1082
|
});
|
|
@@ -1065,20 +1090,20 @@ async function imageSegmentation(args, options) {
|
|
|
1065
1090
|
// src/tasks/cv/imageToText.ts
|
|
1066
1091
|
async function imageToText(args, options) {
|
|
1067
1092
|
const payload = preparePayload2(args);
|
|
1068
|
-
const res =
|
|
1093
|
+
const { data: res } = await innerRequest(payload, {
|
|
1069
1094
|
...options,
|
|
1070
1095
|
task: "image-to-text"
|
|
1071
|
-
})
|
|
1072
|
-
if (typeof res?.generated_text !== "string") {
|
|
1096
|
+
});
|
|
1097
|
+
if (typeof res?.[0]?.generated_text !== "string") {
|
|
1073
1098
|
throw new InferenceOutputError("Expected {generated_text: string}");
|
|
1074
1099
|
}
|
|
1075
|
-
return res;
|
|
1100
|
+
return res?.[0];
|
|
1076
1101
|
}
|
|
1077
1102
|
|
|
1078
1103
|
// src/tasks/cv/objectDetection.ts
|
|
1079
1104
|
async function objectDetection(args, options) {
|
|
1080
1105
|
const payload = preparePayload2(args);
|
|
1081
|
-
const res = await
|
|
1106
|
+
const { data: res } = await innerRequest(payload, {
|
|
1082
1107
|
...options,
|
|
1083
1108
|
task: "object-detection"
|
|
1084
1109
|
});
|
|
@@ -1115,7 +1140,7 @@ async function textToImage(args, options) {
|
|
|
1115
1140
|
...getResponseFormatArg(args.provider),
|
|
1116
1141
|
prompt: args.inputs
|
|
1117
1142
|
};
|
|
1118
|
-
const res = await
|
|
1143
|
+
const { data: res } = await innerRequest(payload, {
|
|
1119
1144
|
...options,
|
|
1120
1145
|
task: "text-to-image"
|
|
1121
1146
|
});
|
|
@@ -1204,7 +1229,7 @@ async function imageToImage(args, options) {
|
|
|
1204
1229
|
)
|
|
1205
1230
|
};
|
|
1206
1231
|
}
|
|
1207
|
-
const res = await
|
|
1232
|
+
const { data: res } = await innerRequest(reqArgs, {
|
|
1208
1233
|
...options,
|
|
1209
1234
|
task: "image-to-image"
|
|
1210
1235
|
});
|
|
@@ -1239,7 +1264,7 @@ async function preparePayload3(args) {
|
|
|
1239
1264
|
}
|
|
1240
1265
|
async function zeroShotImageClassification(args, options) {
|
|
1241
1266
|
const payload = await preparePayload3(args);
|
|
1242
|
-
const res = await
|
|
1267
|
+
const { data: res } = await innerRequest(payload, {
|
|
1243
1268
|
...options,
|
|
1244
1269
|
task: "zero-shot-image-classification"
|
|
1245
1270
|
});
|
|
@@ -1259,33 +1284,36 @@ async function textToVideo(args, options) {
|
|
|
1259
1284
|
);
|
|
1260
1285
|
}
|
|
1261
1286
|
const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
1262
|
-
const
|
|
1287
|
+
const { data, requestContext } = await innerRequest(payload, {
|
|
1263
1288
|
...options,
|
|
1264
1289
|
task: "text-to-video"
|
|
1265
1290
|
});
|
|
1266
1291
|
if (args.provider === "fal-ai") {
|
|
1267
|
-
|
|
1268
|
-
|
|
1292
|
+
return await pollFalResponse(
|
|
1293
|
+
data,
|
|
1294
|
+
requestContext.url,
|
|
1295
|
+
requestContext.info.headers
|
|
1296
|
+
);
|
|
1269
1297
|
} else if (args.provider === "novita") {
|
|
1270
|
-
const isValidOutput = typeof
|
|
1298
|
+
const isValidOutput = typeof data === "object" && !!data && "video" in data && typeof data.video === "object" && !!data.video && "video_url" in data.video && typeof data.video.video_url === "string" && isUrl(data.video.video_url);
|
|
1271
1299
|
if (!isValidOutput) {
|
|
1272
1300
|
throw new InferenceOutputError("Expected { video: { video_url: string } }");
|
|
1273
1301
|
}
|
|
1274
|
-
const urlResponse = await fetch(
|
|
1302
|
+
const urlResponse = await fetch(data.video.video_url);
|
|
1275
1303
|
return await urlResponse.blob();
|
|
1276
1304
|
} else {
|
|
1277
|
-
const isValidOutput = typeof
|
|
1305
|
+
const isValidOutput = typeof data === "object" && !!data && "output" in data && typeof data.output === "string" && isUrl(data.output);
|
|
1278
1306
|
if (!isValidOutput) {
|
|
1279
1307
|
throw new InferenceOutputError("Expected { output: string }");
|
|
1280
1308
|
}
|
|
1281
|
-
const urlResponse = await fetch(
|
|
1309
|
+
const urlResponse = await fetch(data.output);
|
|
1282
1310
|
return await urlResponse.blob();
|
|
1283
1311
|
}
|
|
1284
1312
|
}
|
|
1285
1313
|
|
|
1286
1314
|
// src/tasks/nlp/featureExtraction.ts
|
|
1287
1315
|
async function featureExtraction(args, options) {
|
|
1288
|
-
const res = await
|
|
1316
|
+
const { data: res } = await innerRequest(args, {
|
|
1289
1317
|
...options,
|
|
1290
1318
|
task: "feature-extraction"
|
|
1291
1319
|
});
|
|
@@ -1308,7 +1336,7 @@ async function featureExtraction(args, options) {
|
|
|
1308
1336
|
|
|
1309
1337
|
// src/tasks/nlp/fillMask.ts
|
|
1310
1338
|
async function fillMask(args, options) {
|
|
1311
|
-
const res = await
|
|
1339
|
+
const { data: res } = await innerRequest(args, {
|
|
1312
1340
|
...options,
|
|
1313
1341
|
task: "fill-mask"
|
|
1314
1342
|
});
|
|
@@ -1325,7 +1353,7 @@ async function fillMask(args, options) {
|
|
|
1325
1353
|
|
|
1326
1354
|
// src/tasks/nlp/questionAnswering.ts
|
|
1327
1355
|
async function questionAnswering(args, options) {
|
|
1328
|
-
const res = await
|
|
1356
|
+
const { data: res } = await innerRequest(args, {
|
|
1329
1357
|
...options,
|
|
1330
1358
|
task: "question-answering"
|
|
1331
1359
|
});
|
|
@@ -1340,7 +1368,7 @@ async function questionAnswering(args, options) {
|
|
|
1340
1368
|
|
|
1341
1369
|
// src/tasks/nlp/sentenceSimilarity.ts
|
|
1342
1370
|
async function sentenceSimilarity(args, options) {
|
|
1343
|
-
const res = await
|
|
1371
|
+
const { data: res } = await innerRequest(args, {
|
|
1344
1372
|
...options,
|
|
1345
1373
|
task: "sentence-similarity"
|
|
1346
1374
|
});
|
|
@@ -1350,17 +1378,10 @@ async function sentenceSimilarity(args, options) {
|
|
|
1350
1378
|
}
|
|
1351
1379
|
return res;
|
|
1352
1380
|
}
|
|
1353
|
-
function prepareInput(args) {
|
|
1354
|
-
return {
|
|
1355
|
-
...omit(args, ["inputs", "parameters"]),
|
|
1356
|
-
inputs: { ...omit(args.inputs, "sourceSentence") },
|
|
1357
|
-
parameters: { source_sentence: args.inputs.sourceSentence, ...args.parameters }
|
|
1358
|
-
};
|
|
1359
|
-
}
|
|
1360
1381
|
|
|
1361
1382
|
// src/tasks/nlp/summarization.ts
|
|
1362
1383
|
async function summarization(args, options) {
|
|
1363
|
-
const res = await
|
|
1384
|
+
const { data: res } = await innerRequest(args, {
|
|
1364
1385
|
...options,
|
|
1365
1386
|
task: "summarization"
|
|
1366
1387
|
});
|
|
@@ -1373,7 +1394,7 @@ async function summarization(args, options) {
|
|
|
1373
1394
|
|
|
1374
1395
|
// src/tasks/nlp/tableQuestionAnswering.ts
|
|
1375
1396
|
async function tableQuestionAnswering(args, options) {
|
|
1376
|
-
const res = await
|
|
1397
|
+
const { data: res } = await innerRequest(args, {
|
|
1377
1398
|
...options,
|
|
1378
1399
|
task: "table-question-answering"
|
|
1379
1400
|
});
|
|
@@ -1393,15 +1414,16 @@ function validate(elem) {
|
|
|
1393
1414
|
|
|
1394
1415
|
// src/tasks/nlp/textClassification.ts
|
|
1395
1416
|
async function textClassification(args, options) {
|
|
1396
|
-
const res =
|
|
1417
|
+
const { data: res } = await innerRequest(args, {
|
|
1397
1418
|
...options,
|
|
1398
1419
|
task: "text-classification"
|
|
1399
|
-
})
|
|
1400
|
-
const
|
|
1420
|
+
});
|
|
1421
|
+
const output = res?.[0];
|
|
1422
|
+
const isValidOutput = Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number");
|
|
1401
1423
|
if (!isValidOutput) {
|
|
1402
1424
|
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
1403
1425
|
}
|
|
1404
|
-
return
|
|
1426
|
+
return output;
|
|
1405
1427
|
}
|
|
1406
1428
|
|
|
1407
1429
|
// src/utils/toArray.ts
|
|
@@ -1416,7 +1438,7 @@ function toArray(obj) {
|
|
|
1416
1438
|
async function textGeneration(args, options) {
|
|
1417
1439
|
if (args.provider === "together") {
|
|
1418
1440
|
args.prompt = args.inputs;
|
|
1419
|
-
const raw = await
|
|
1441
|
+
const { data: raw } = await innerRequest(args, {
|
|
1420
1442
|
...options,
|
|
1421
1443
|
task: "text-generation"
|
|
1422
1444
|
});
|
|
@@ -1437,10 +1459,10 @@ async function textGeneration(args, options) {
|
|
|
1437
1459
|
} : void 0,
|
|
1438
1460
|
...omit(args, ["inputs", "parameters"])
|
|
1439
1461
|
};
|
|
1440
|
-
const raw = await
|
|
1462
|
+
const raw = (await innerRequest(payload, {
|
|
1441
1463
|
...options,
|
|
1442
1464
|
task: "text-generation"
|
|
1443
|
-
});
|
|
1465
|
+
})).data;
|
|
1444
1466
|
const isValidOutput = typeof raw === "object" && "choices" in raw && Array.isArray(raw?.choices) && typeof raw?.model === "string";
|
|
1445
1467
|
if (!isValidOutput) {
|
|
1446
1468
|
throw new InferenceOutputError("Expected ChatCompletionOutput");
|
|
@@ -1450,23 +1472,22 @@ async function textGeneration(args, options) {
|
|
|
1450
1472
|
generated_text: completion.message.content
|
|
1451
1473
|
};
|
|
1452
1474
|
} else {
|
|
1453
|
-
const res =
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
);
|
|
1459
|
-
const isValidOutput = Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
|
|
1475
|
+
const { data: res } = await innerRequest(args, {
|
|
1476
|
+
...options,
|
|
1477
|
+
task: "text-generation"
|
|
1478
|
+
});
|
|
1479
|
+
const output = toArray(res);
|
|
1480
|
+
const isValidOutput = Array.isArray(output) && output.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
|
|
1460
1481
|
if (!isValidOutput) {
|
|
1461
1482
|
throw new InferenceOutputError("Expected Array<{generated_text: string}>");
|
|
1462
1483
|
}
|
|
1463
|
-
return
|
|
1484
|
+
return output?.[0];
|
|
1464
1485
|
}
|
|
1465
1486
|
}
|
|
1466
1487
|
|
|
1467
1488
|
// src/tasks/nlp/textGenerationStream.ts
|
|
1468
1489
|
async function* textGenerationStream(args, options) {
|
|
1469
|
-
yield*
|
|
1490
|
+
yield* innerStreamingRequest(args, {
|
|
1470
1491
|
...options,
|
|
1471
1492
|
task: "text-generation"
|
|
1472
1493
|
});
|
|
@@ -1474,13 +1495,12 @@ async function* textGenerationStream(args, options) {
|
|
|
1474
1495
|
|
|
1475
1496
|
// src/tasks/nlp/tokenClassification.ts
|
|
1476
1497
|
async function tokenClassification(args, options) {
|
|
1477
|
-
const res =
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
)
|
|
1483
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1498
|
+
const { data: res } = await innerRequest(args, {
|
|
1499
|
+
...options,
|
|
1500
|
+
task: "token-classification"
|
|
1501
|
+
});
|
|
1502
|
+
const output = toArray(res);
|
|
1503
|
+
const isValidOutput = Array.isArray(output) && output.every(
|
|
1484
1504
|
(x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
|
|
1485
1505
|
);
|
|
1486
1506
|
if (!isValidOutput) {
|
|
@@ -1488,12 +1508,12 @@ async function tokenClassification(args, options) {
|
|
|
1488
1508
|
"Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
|
|
1489
1509
|
);
|
|
1490
1510
|
}
|
|
1491
|
-
return
|
|
1511
|
+
return output;
|
|
1492
1512
|
}
|
|
1493
1513
|
|
|
1494
1514
|
// src/tasks/nlp/translation.ts
|
|
1495
1515
|
async function translation(args, options) {
|
|
1496
|
-
const res = await
|
|
1516
|
+
const { data: res } = await innerRequest(args, {
|
|
1497
1517
|
...options,
|
|
1498
1518
|
task: "translation"
|
|
1499
1519
|
});
|
|
@@ -1506,24 +1526,23 @@ async function translation(args, options) {
|
|
|
1506
1526
|
|
|
1507
1527
|
// src/tasks/nlp/zeroShotClassification.ts
|
|
1508
1528
|
async function zeroShotClassification(args, options) {
|
|
1509
|
-
const res =
|
|
1510
|
-
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
)
|
|
1515
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1529
|
+
const { data: res } = await innerRequest(args, {
|
|
1530
|
+
...options,
|
|
1531
|
+
task: "zero-shot-classification"
|
|
1532
|
+
});
|
|
1533
|
+
const output = toArray(res);
|
|
1534
|
+
const isValidOutput = Array.isArray(output) && output.every(
|
|
1516
1535
|
(x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
|
|
1517
1536
|
);
|
|
1518
1537
|
if (!isValidOutput) {
|
|
1519
1538
|
throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
|
|
1520
1539
|
}
|
|
1521
|
-
return
|
|
1540
|
+
return output;
|
|
1522
1541
|
}
|
|
1523
1542
|
|
|
1524
1543
|
// src/tasks/nlp/chatCompletion.ts
|
|
1525
1544
|
async function chatCompletion(args, options) {
|
|
1526
|
-
const res = await
|
|
1545
|
+
const { data: res } = await innerRequest(args, {
|
|
1527
1546
|
...options,
|
|
1528
1547
|
task: "text-generation",
|
|
1529
1548
|
chatCompletion: true
|
|
@@ -1538,7 +1557,7 @@ async function chatCompletion(args, options) {
|
|
|
1538
1557
|
|
|
1539
1558
|
// src/tasks/nlp/chatCompletionStream.ts
|
|
1540
1559
|
async function* chatCompletionStream(args, options) {
|
|
1541
|
-
yield*
|
|
1560
|
+
yield* innerStreamingRequest(args, {
|
|
1542
1561
|
...options,
|
|
1543
1562
|
task: "text-generation",
|
|
1544
1563
|
chatCompletion: true
|
|
@@ -1555,19 +1574,21 @@ async function documentQuestionAnswering(args, options) {
|
|
|
1555
1574
|
image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
|
|
1556
1575
|
}
|
|
1557
1576
|
};
|
|
1558
|
-
const res =
|
|
1559
|
-
|
|
1577
|
+
const { data: res } = await innerRequest(
|
|
1578
|
+
reqArgs,
|
|
1579
|
+
{
|
|
1560
1580
|
...options,
|
|
1561
1581
|
task: "document-question-answering"
|
|
1562
|
-
}
|
|
1582
|
+
}
|
|
1563
1583
|
);
|
|
1564
|
-
const
|
|
1584
|
+
const output = toArray(res);
|
|
1585
|
+
const isValidOutput = Array.isArray(output) && output.every(
|
|
1565
1586
|
(elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
|
|
1566
1587
|
);
|
|
1567
1588
|
if (!isValidOutput) {
|
|
1568
1589
|
throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
|
|
1569
1590
|
}
|
|
1570
|
-
return
|
|
1591
|
+
return output[0];
|
|
1571
1592
|
}
|
|
1572
1593
|
|
|
1573
1594
|
// src/tasks/multimodal/visualQuestionAnswering.ts
|
|
@@ -1580,7 +1601,7 @@ async function visualQuestionAnswering(args, options) {
|
|
|
1580
1601
|
image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
|
|
1581
1602
|
}
|
|
1582
1603
|
};
|
|
1583
|
-
const res = await
|
|
1604
|
+
const { data: res } = await innerRequest(reqArgs, {
|
|
1584
1605
|
...options,
|
|
1585
1606
|
task: "visual-question-answering"
|
|
1586
1607
|
});
|
|
@@ -1595,7 +1616,7 @@ async function visualQuestionAnswering(args, options) {
|
|
|
1595
1616
|
|
|
1596
1617
|
// src/tasks/tabular/tabularRegression.ts
|
|
1597
1618
|
async function tabularRegression(args, options) {
|
|
1598
|
-
const res = await
|
|
1619
|
+
const { data: res } = await innerRequest(args, {
|
|
1599
1620
|
...options,
|
|
1600
1621
|
task: "tabular-regression"
|
|
1601
1622
|
});
|
|
@@ -1608,7 +1629,7 @@ async function tabularRegression(args, options) {
|
|
|
1608
1629
|
|
|
1609
1630
|
// src/tasks/tabular/tabularClassification.ts
|
|
1610
1631
|
async function tabularClassification(args, options) {
|
|
1611
|
-
const res = await
|
|
1632
|
+
const { data: res } = await innerRequest(args, {
|
|
1612
1633
|
...options,
|
|
1613
1634
|
task: "tabular-classification"
|
|
1614
1635
|
});
|
|
@@ -1699,7 +1720,7 @@ var templates = {
|
|
|
1699
1720
|
"basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1700
1721
|
"basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1701
1722
|
"textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
|
|
1702
|
-
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({
|
|
1723
|
+
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
|
|
1703
1724
|
"zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
|
|
1704
1725
|
},
|
|
1705
1726
|
"huggingface.js": {
|
package/dist/src/config.d.ts
CHANGED
package/dist/src/config.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,2BAA2B,CAAC;AACnD,eAAO,MAAM,aAAa,kCAAkC,CAAC"}
|
|
1
|
+
{"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../src/config.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,2BAA2B,CAAC;AACnD,eAAO,MAAM,aAAa,kCAAkC,CAAC;AAC7D,eAAO,MAAM,mBAAmB,iBAAiB,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAcA,OAAO,KAAK,EAAqB,aAAa,EAAE,OAAO,EAAkB,WAAW,EAAE,MAAM,UAAU,CAAC;AAgCvG;;;GAGG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,oEAAoE;IACpE,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAoC7C;AAED;;;GAGG;AACH,wBAAgB,mCAAmC,CAClD,aAAa,EAAE,MAAM,EACrB,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,
|
|
1
|
+
{"version":3,"file":"makeRequestOptions.d.ts","sourceRoot":"","sources":["../../../src/lib/makeRequestOptions.ts"],"names":[],"mappings":"AAcA,OAAO,KAAK,EAAqB,aAAa,EAAE,OAAO,EAAkB,WAAW,EAAE,MAAM,UAAU,CAAC;AAgCvG;;;GAGG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,oEAAoE;IACpE,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAAC,CAoC7C;AAED;;;GAGG;AACH,wBAAgB,mCAAmC,CAClD,aAAa,EAAE,MAAM,EACrB,IAAI,EAAE,WAAW,GAAG;IACnB,IAAI,CAAC,EAAE,IAAI,GAAG,WAAW,CAAC;IAC1B,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB,EACD,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,WAAW,CAAA;CAAE,CAiGpC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,+BAA+B,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAE5G,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAe,MAAM,aAAa,CAAC;
|
|
1
|
+
{"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,+BAA+B,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAE5G,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAe,MAAM,aAAa,CAAC;AAIlE,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAGhD,MAAM,MAAM,8BAA8B,GAAG,QAAQ,GAAG,CAAC,+BAA+B,GAAG,gBAAgB,CAAC,CAAC;AAC7G;;;GAGG;AACH,wBAAsB,0BAA0B,CAC/C,IAAI,EAAE,8BAA8B,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,gCAAgC,CAAC,CAW3C"}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { InferenceTask, Options, RequestArgs } from "../../types";
|
|
2
2
|
/**
|
|
3
3
|
* Primitive to make custom calls to the inference provider
|
|
4
|
+
* @deprecated Use specific task functions instead. This function will be removed in a future version.
|
|
4
5
|
*/
|
|
5
6
|
export declare function request<T>(args: RequestArgs, options?: Options & {
|
|
6
7
|
/** In most cases (unless we pass a endpointUrl) we know the task */
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE
|
|
1
|
+
{"version":3,"file":"request.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/request.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAGvE;;;GAGG;AACH,wBAAsB,OAAO,CAAC,CAAC,EAC9B,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,oEAAoE;IACpE,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,OAAO,CAAC,CAAC,CAAC,CAMZ"}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { InferenceTask, Options, RequestArgs } from "../../types";
|
|
2
2
|
/**
|
|
3
3
|
* Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
|
|
4
|
+
* @deprecated Use specific task functions instead. This function will be removed in a future version.
|
|
4
5
|
*/
|
|
5
6
|
export declare function streamingRequest<T>(args: RequestArgs, options?: Options & {
|
|
6
7
|
/** In most cases (unless we pass a endpointUrl) we know the task */
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;
|
|
1
|
+
{"version":3,"file":"streamingRequest.d.ts","sourceRoot":"","sources":["../../../../src/tasks/custom/streamingRequest.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,aAAa,EAAE,OAAO,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AAEvE;;;GAGG;AACH,wBAAuB,gBAAgB,CAAC,CAAC,EACxC,IAAI,EAAE,WAAW,EACjB,OAAO,CAAC,EAAE,OAAO,GAAG;IACnB,oEAAoE;IACpE,IAAI,CAAC,EAAE,aAAa,CAAC;IACrB,oCAAoC;IACpC,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB,GACC,cAAc,CAAC,CAAC,CAAC,CAKnB"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"imageToText.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToText.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAE9E,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAGhD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG,CAAC,gBAAgB,GAAG,gBAAgB,CAAC,CAAC;AAC/E;;GAEG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,
|
|
1
|
+
{"version":3,"file":"imageToText.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/imageToText.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAE9E,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAGhD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG,CAAC,gBAAgB,GAAG,gBAAgB,CAAC,CAAC;AAC/E;;GAEG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAYtG"}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import type { BaseArgs, Options } from "../../types";
|
|
2
1
|
import type { ObjectDetectionInput, ObjectDetectionOutput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types";
|
|
3
3
|
import { type LegacyImageInput } from "./utils";
|
|
4
4
|
export type ObjectDetectionArgs = BaseArgs & (ObjectDetectionInput | LegacyImageInput);
|
|
5
5
|
/**
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"objectDetection.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/objectDetection.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"objectDetection.d.ts","sourceRoot":"","sources":["../../../../src/tasks/cv/objectDetection.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,oBAAoB,EAAE,qBAAqB,EAAE,MAAM,oBAAoB,CAAC;AAEtF,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAErD,OAAO,EAAkB,KAAK,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAEhE,MAAM,MAAM,mBAAmB,GAAG,QAAQ,GAAG,CAAC,oBAAoB,GAAG,gBAAgB,CAAC,CAAC;AAEvF;;;GAGG;AACH,wBAAsB,eAAe,CAAC,IAAI,EAAE,mBAAmB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,qBAAqB,CAAC,CAuBlH"}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import type { BaseArgs, Options } from "../../types";
|
|
2
1
|
import type { TextToVideoInput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types";
|
|
3
3
|
export type TextToVideoArgs = BaseArgs & TextToVideoInput;
|
|
4
4
|
export type TextToVideoOutput = Blob;
|
|
5
5
|
export declare function textToVideo(args: TextToVideoArgs, options?: Options): Promise<TextToVideoOutput>;
|