@huggingface/inference 3.6.2 → 3.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -25
- package/dist/index.cjs +135 -114
- package/dist/index.js +135 -114
- package/dist/src/config.d.ts +1 -0
- package/dist/src/config.d.ts.map +1 -1
- package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -1
- package/dist/src/tasks/custom/request.d.ts +1 -0
- package/dist/src/tasks/custom/request.d.ts.map +1 -1
- package/dist/src/tasks/custom/streamingRequest.d.ts +1 -0
- package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -1
- package/dist/src/tasks/cv/imageToText.d.ts.map +1 -1
- package/dist/src/tasks/cv/objectDetection.d.ts +1 -1
- package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -1
- package/dist/src/tasks/cv/textToVideo.d.ts +1 -1
- package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +1 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/chatCompletion.d.ts +1 -1
- package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -1
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts +1 -1
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
- package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -1
- package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -1
- package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -1
- package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -1
- package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
- package/dist/src/types.d.ts +7 -0
- package/dist/src/types.d.ts.map +1 -1
- package/dist/src/utils/request.d.ts +27 -0
- package/dist/src/utils/request.d.ts.map +1 -0
- package/package.json +2 -2
- package/src/config.ts +1 -0
- package/src/lib/makeRequestOptions.ts +5 -2
- package/src/snippets/templates.exported.ts +1 -1
- package/src/tasks/audio/audioClassification.ts +2 -2
- package/src/tasks/audio/audioToAudio.ts +2 -2
- package/src/tasks/audio/automaticSpeechRecognition.ts +3 -3
- package/src/tasks/audio/textToSpeech.ts +2 -2
- package/src/tasks/custom/request.ts +7 -32
- package/src/tasks/custom/streamingRequest.ts +5 -85
- package/src/tasks/cv/imageClassification.ts +2 -2
- package/src/tasks/cv/imageSegmentation.ts +2 -2
- package/src/tasks/cv/imageToImage.ts +2 -2
- package/src/tasks/cv/imageToText.ts +7 -9
- package/src/tasks/cv/objectDetection.ts +4 -4
- package/src/tasks/cv/textToImage.ts +3 -3
- package/src/tasks/cv/textToVideo.ts +23 -20
- package/src/tasks/cv/zeroShotImageClassification.ts +4 -5
- package/src/tasks/multimodal/documentQuestionAnswering.ts +13 -13
- package/src/tasks/multimodal/visualQuestionAnswering.ts +4 -2
- package/src/tasks/nlp/chatCompletion.ts +3 -4
- package/src/tasks/nlp/chatCompletionStream.ts +3 -3
- package/src/tasks/nlp/featureExtraction.ts +2 -2
- package/src/tasks/nlp/fillMask.ts +2 -2
- package/src/tasks/nlp/questionAnswering.ts +3 -2
- package/src/tasks/nlp/sentenceSimilarity.ts +2 -11
- package/src/tasks/nlp/summarization.ts +2 -2
- package/src/tasks/nlp/tableQuestionAnswering.ts +2 -2
- package/src/tasks/nlp/textClassification.ts +8 -9
- package/src/tasks/nlp/textGeneration.ts +16 -16
- package/src/tasks/nlp/textGenerationStream.ts +2 -2
- package/src/tasks/nlp/tokenClassification.ts +9 -10
- package/src/tasks/nlp/translation.ts +2 -2
- package/src/tasks/nlp/zeroShotClassification.ts +9 -10
- package/src/tasks/tabular/tabularClassification.ts +2 -2
- package/src/tasks/tabular/tabularRegression.ts +2 -2
- package/src/types.ts +8 -0
- package/src/utils/request.ts +161 -0
package/README.md
CHANGED
|
@@ -572,31 +572,6 @@ await hf.tabularClassification({
|
|
|
572
572
|
})
|
|
573
573
|
```
|
|
574
574
|
|
|
575
|
-
## Custom Calls
|
|
576
|
-
|
|
577
|
-
For models with custom parameters / outputs.
|
|
578
|
-
|
|
579
|
-
```typescript
|
|
580
|
-
await hf.request({
|
|
581
|
-
model: 'my-custom-model',
|
|
582
|
-
inputs: 'hello world',
|
|
583
|
-
parameters: {
|
|
584
|
-
custom_param: 'some magic',
|
|
585
|
-
}
|
|
586
|
-
})
|
|
587
|
-
|
|
588
|
-
// Custom streaming call, for models with custom parameters / outputs
|
|
589
|
-
for await (const output of hf.streamingRequest({
|
|
590
|
-
model: 'my-custom-model',
|
|
591
|
-
inputs: 'hello world',
|
|
592
|
-
parameters: {
|
|
593
|
-
custom_param: 'some magic',
|
|
594
|
-
}
|
|
595
|
-
})) {
|
|
596
|
-
...
|
|
597
|
-
}
|
|
598
|
-
```
|
|
599
|
-
|
|
600
575
|
You can use any Chat Completion API-compatible provider with the `chatCompletion` method.
|
|
601
576
|
|
|
602
577
|
```typescript
|
package/dist/index.cjs
CHANGED
|
@@ -101,6 +101,7 @@ __export(tasks_exports, {
|
|
|
101
101
|
// src/config.ts
|
|
102
102
|
var HF_HUB_URL = "https://huggingface.co";
|
|
103
103
|
var HF_ROUTER_URL = "https://router.huggingface.co";
|
|
104
|
+
var HF_HEADER_X_BILL_TO = "X-HF-Bill-To";
|
|
104
105
|
|
|
105
106
|
// src/providers/black-forest-labs.ts
|
|
106
107
|
var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
|
|
@@ -532,7 +533,7 @@ var OPENAI_CONFIG = {
|
|
|
532
533
|
|
|
533
534
|
// package.json
|
|
534
535
|
var name = "@huggingface/inference";
|
|
535
|
-
var version = "3.
|
|
536
|
+
var version = "3.7.0";
|
|
536
537
|
|
|
537
538
|
// src/providers/consts.ts
|
|
538
539
|
var HARDCODED_MODEL_ID_MAPPING = {
|
|
@@ -654,7 +655,7 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
|
654
655
|
const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
|
|
655
656
|
const provider = maybeProvider ?? "hf-inference";
|
|
656
657
|
const providerConfig = providerConfigs[provider];
|
|
657
|
-
const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options ?? {};
|
|
658
|
+
const { includeCredentials, task, chatCompletion: chatCompletion2, signal, billTo } = options ?? {};
|
|
658
659
|
const authMethod = (() => {
|
|
659
660
|
if (providerConfig.clientSideRoutingOnly) {
|
|
660
661
|
if (accessToken && accessToken.startsWith("hf_")) {
|
|
@@ -682,6 +683,9 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
|
|
|
682
683
|
accessToken,
|
|
683
684
|
authMethod
|
|
684
685
|
});
|
|
686
|
+
if (billTo) {
|
|
687
|
+
headers[HF_HEADER_X_BILL_TO] = billTo;
|
|
688
|
+
}
|
|
685
689
|
if (!binary) {
|
|
686
690
|
headers["Content-Type"] = "application/json";
|
|
687
691
|
}
|
|
@@ -735,37 +739,6 @@ function removeProviderPrefix(model, provider) {
|
|
|
735
739
|
return model.slice(provider.length + 1);
|
|
736
740
|
}
|
|
737
741
|
|
|
738
|
-
// src/tasks/custom/request.ts
|
|
739
|
-
async function request(args, options) {
|
|
740
|
-
const { url, info } = await makeRequestOptions(args, options);
|
|
741
|
-
const response = await (options?.fetch ?? fetch)(url, info);
|
|
742
|
-
if (options?.retry_on_error !== false && response.status === 503) {
|
|
743
|
-
return request(args, options);
|
|
744
|
-
}
|
|
745
|
-
if (!response.ok) {
|
|
746
|
-
const contentType = response.headers.get("Content-Type");
|
|
747
|
-
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
|
|
748
|
-
const output = await response.json();
|
|
749
|
-
if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
|
|
750
|
-
throw new Error(
|
|
751
|
-
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
|
|
752
|
-
);
|
|
753
|
-
}
|
|
754
|
-
if (output.error || output.detail) {
|
|
755
|
-
throw new Error(JSON.stringify(output.error ?? output.detail));
|
|
756
|
-
} else {
|
|
757
|
-
throw new Error(output);
|
|
758
|
-
}
|
|
759
|
-
}
|
|
760
|
-
const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
|
|
761
|
-
throw new Error(message ?? "An error occurred while fetching the blob");
|
|
762
|
-
}
|
|
763
|
-
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
764
|
-
return await response.json();
|
|
765
|
-
}
|
|
766
|
-
return await response.blob();
|
|
767
|
-
}
|
|
768
|
-
|
|
769
742
|
// src/vendor/fetch-event-source/parse.ts
|
|
770
743
|
function getLines(onLine) {
|
|
771
744
|
let buffer;
|
|
@@ -865,12 +838,44 @@ function newMessage() {
|
|
|
865
838
|
};
|
|
866
839
|
}
|
|
867
840
|
|
|
868
|
-
// src/
|
|
869
|
-
async function
|
|
841
|
+
// src/utils/request.ts
|
|
842
|
+
async function innerRequest(args, options) {
|
|
843
|
+
const { url, info } = await makeRequestOptions(args, options);
|
|
844
|
+
const response = await (options?.fetch ?? fetch)(url, info);
|
|
845
|
+
const requestContext = { url, info };
|
|
846
|
+
if (options?.retry_on_error !== false && response.status === 503) {
|
|
847
|
+
return innerRequest(args, options);
|
|
848
|
+
}
|
|
849
|
+
if (!response.ok) {
|
|
850
|
+
const contentType = response.headers.get("Content-Type");
|
|
851
|
+
if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
|
|
852
|
+
const output = await response.json();
|
|
853
|
+
if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
|
|
854
|
+
throw new Error(
|
|
855
|
+
`Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
|
|
856
|
+
);
|
|
857
|
+
}
|
|
858
|
+
if (output.error || output.detail) {
|
|
859
|
+
throw new Error(JSON.stringify(output.error ?? output.detail));
|
|
860
|
+
} else {
|
|
861
|
+
throw new Error(output);
|
|
862
|
+
}
|
|
863
|
+
}
|
|
864
|
+
const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
|
|
865
|
+
throw new Error(message ?? "An error occurred while fetching the blob");
|
|
866
|
+
}
|
|
867
|
+
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
868
|
+
const data = await response.json();
|
|
869
|
+
return { data, requestContext };
|
|
870
|
+
}
|
|
871
|
+
const blob = await response.blob();
|
|
872
|
+
return { data: blob, requestContext };
|
|
873
|
+
}
|
|
874
|
+
async function* innerStreamingRequest(args, options) {
|
|
870
875
|
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
|
|
871
876
|
const response = await (options?.fetch ?? fetch)(url, info);
|
|
872
877
|
if (options?.retry_on_error !== false && response.status === 503) {
|
|
873
|
-
return yield*
|
|
878
|
+
return yield* innerStreamingRequest(args, options);
|
|
874
879
|
}
|
|
875
880
|
if (!response.ok) {
|
|
876
881
|
if (response.headers.get("Content-Type")?.startsWith("application/json")) {
|
|
@@ -884,6 +889,9 @@ async function* streamingRequest(args, options) {
|
|
|
884
889
|
if (output.error && "message" in output.error && typeof output.error.message === "string") {
|
|
885
890
|
throw new Error(output.error.message);
|
|
886
891
|
}
|
|
892
|
+
if (typeof output.message === "string") {
|
|
893
|
+
throw new Error(output.message);
|
|
894
|
+
}
|
|
887
895
|
}
|
|
888
896
|
throw new Error(`Server response contains error: ${response.status}`);
|
|
889
897
|
}
|
|
@@ -936,6 +944,23 @@ async function* streamingRequest(args, options) {
|
|
|
936
944
|
}
|
|
937
945
|
}
|
|
938
946
|
|
|
947
|
+
// src/tasks/custom/request.ts
|
|
948
|
+
async function request(args, options) {
|
|
949
|
+
console.warn(
|
|
950
|
+
"The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
|
|
951
|
+
);
|
|
952
|
+
const result = await innerRequest(args, options);
|
|
953
|
+
return result.data;
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
// src/tasks/custom/streamingRequest.ts
|
|
957
|
+
async function* streamingRequest(args, options) {
|
|
958
|
+
console.warn(
|
|
959
|
+
"The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
|
|
960
|
+
);
|
|
961
|
+
yield* innerStreamingRequest(args, options);
|
|
962
|
+
}
|
|
963
|
+
|
|
939
964
|
// src/utils/pick.ts
|
|
940
965
|
function pick(o, props) {
|
|
941
966
|
return Object.assign(
|
|
@@ -971,7 +996,7 @@ function preparePayload(args) {
|
|
|
971
996
|
// src/tasks/audio/audioClassification.ts
|
|
972
997
|
async function audioClassification(args, options) {
|
|
973
998
|
const payload = preparePayload(args);
|
|
974
|
-
const res = await
|
|
999
|
+
const { data: res } = await innerRequest(payload, {
|
|
975
1000
|
...options,
|
|
976
1001
|
task: "audio-classification"
|
|
977
1002
|
});
|
|
@@ -998,7 +1023,7 @@ function base64FromBytes(arr) {
|
|
|
998
1023
|
// src/tasks/audio/automaticSpeechRecognition.ts
|
|
999
1024
|
async function automaticSpeechRecognition(args, options) {
|
|
1000
1025
|
const payload = await buildPayload(args);
|
|
1001
|
-
const res = await
|
|
1026
|
+
const { data: res } = await innerRequest(payload, {
|
|
1002
1027
|
...options,
|
|
1003
1028
|
task: "automatic-speech-recognition"
|
|
1004
1029
|
});
|
|
@@ -1042,7 +1067,7 @@ async function textToSpeech(args, options) {
|
|
|
1042
1067
|
...args.parameters,
|
|
1043
1068
|
text: args.inputs
|
|
1044
1069
|
} : args;
|
|
1045
|
-
const res = await
|
|
1070
|
+
const { data: res } = await innerRequest(payload, {
|
|
1046
1071
|
...options,
|
|
1047
1072
|
task: "text-to-speech"
|
|
1048
1073
|
});
|
|
@@ -1068,7 +1093,7 @@ async function textToSpeech(args, options) {
|
|
|
1068
1093
|
// src/tasks/audio/audioToAudio.ts
|
|
1069
1094
|
async function audioToAudio(args, options) {
|
|
1070
1095
|
const payload = preparePayload(args);
|
|
1071
|
-
const res = await
|
|
1096
|
+
const { data: res } = await innerRequest(payload, {
|
|
1072
1097
|
...options,
|
|
1073
1098
|
task: "audio-to-audio"
|
|
1074
1099
|
});
|
|
@@ -1094,7 +1119,7 @@ function preparePayload2(args) {
|
|
|
1094
1119
|
// src/tasks/cv/imageClassification.ts
|
|
1095
1120
|
async function imageClassification(args, options) {
|
|
1096
1121
|
const payload = preparePayload2(args);
|
|
1097
|
-
const res = await
|
|
1122
|
+
const { data: res } = await innerRequest(payload, {
|
|
1098
1123
|
...options,
|
|
1099
1124
|
task: "image-classification"
|
|
1100
1125
|
});
|
|
@@ -1108,7 +1133,7 @@ async function imageClassification(args, options) {
|
|
|
1108
1133
|
// src/tasks/cv/imageSegmentation.ts
|
|
1109
1134
|
async function imageSegmentation(args, options) {
|
|
1110
1135
|
const payload = preparePayload2(args);
|
|
1111
|
-
const res = await
|
|
1136
|
+
const { data: res } = await innerRequest(payload, {
|
|
1112
1137
|
...options,
|
|
1113
1138
|
task: "image-segmentation"
|
|
1114
1139
|
});
|
|
@@ -1122,20 +1147,20 @@ async function imageSegmentation(args, options) {
|
|
|
1122
1147
|
// src/tasks/cv/imageToText.ts
|
|
1123
1148
|
async function imageToText(args, options) {
|
|
1124
1149
|
const payload = preparePayload2(args);
|
|
1125
|
-
const res =
|
|
1150
|
+
const { data: res } = await innerRequest(payload, {
|
|
1126
1151
|
...options,
|
|
1127
1152
|
task: "image-to-text"
|
|
1128
|
-
})
|
|
1129
|
-
if (typeof res?.generated_text !== "string") {
|
|
1153
|
+
});
|
|
1154
|
+
if (typeof res?.[0]?.generated_text !== "string") {
|
|
1130
1155
|
throw new InferenceOutputError("Expected {generated_text: string}");
|
|
1131
1156
|
}
|
|
1132
|
-
return res;
|
|
1157
|
+
return res?.[0];
|
|
1133
1158
|
}
|
|
1134
1159
|
|
|
1135
1160
|
// src/tasks/cv/objectDetection.ts
|
|
1136
1161
|
async function objectDetection(args, options) {
|
|
1137
1162
|
const payload = preparePayload2(args);
|
|
1138
|
-
const res = await
|
|
1163
|
+
const { data: res } = await innerRequest(payload, {
|
|
1139
1164
|
...options,
|
|
1140
1165
|
task: "object-detection"
|
|
1141
1166
|
});
|
|
@@ -1172,7 +1197,7 @@ async function textToImage(args, options) {
|
|
|
1172
1197
|
...getResponseFormatArg(args.provider),
|
|
1173
1198
|
prompt: args.inputs
|
|
1174
1199
|
};
|
|
1175
|
-
const res = await
|
|
1200
|
+
const { data: res } = await innerRequest(payload, {
|
|
1176
1201
|
...options,
|
|
1177
1202
|
task: "text-to-image"
|
|
1178
1203
|
});
|
|
@@ -1261,7 +1286,7 @@ async function imageToImage(args, options) {
|
|
|
1261
1286
|
)
|
|
1262
1287
|
};
|
|
1263
1288
|
}
|
|
1264
|
-
const res = await
|
|
1289
|
+
const { data: res } = await innerRequest(reqArgs, {
|
|
1265
1290
|
...options,
|
|
1266
1291
|
task: "image-to-image"
|
|
1267
1292
|
});
|
|
@@ -1296,7 +1321,7 @@ async function preparePayload3(args) {
|
|
|
1296
1321
|
}
|
|
1297
1322
|
async function zeroShotImageClassification(args, options) {
|
|
1298
1323
|
const payload = await preparePayload3(args);
|
|
1299
|
-
const res = await
|
|
1324
|
+
const { data: res } = await innerRequest(payload, {
|
|
1300
1325
|
...options,
|
|
1301
1326
|
task: "zero-shot-image-classification"
|
|
1302
1327
|
});
|
|
@@ -1316,33 +1341,36 @@ async function textToVideo(args, options) {
|
|
|
1316
1341
|
);
|
|
1317
1342
|
}
|
|
1318
1343
|
const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
|
|
1319
|
-
const
|
|
1344
|
+
const { data, requestContext } = await innerRequest(payload, {
|
|
1320
1345
|
...options,
|
|
1321
1346
|
task: "text-to-video"
|
|
1322
1347
|
});
|
|
1323
1348
|
if (args.provider === "fal-ai") {
|
|
1324
|
-
|
|
1325
|
-
|
|
1349
|
+
return await pollFalResponse(
|
|
1350
|
+
data,
|
|
1351
|
+
requestContext.url,
|
|
1352
|
+
requestContext.info.headers
|
|
1353
|
+
);
|
|
1326
1354
|
} else if (args.provider === "novita") {
|
|
1327
|
-
const isValidOutput = typeof
|
|
1355
|
+
const isValidOutput = typeof data === "object" && !!data && "video" in data && typeof data.video === "object" && !!data.video && "video_url" in data.video && typeof data.video.video_url === "string" && isUrl(data.video.video_url);
|
|
1328
1356
|
if (!isValidOutput) {
|
|
1329
1357
|
throw new InferenceOutputError("Expected { video: { video_url: string } }");
|
|
1330
1358
|
}
|
|
1331
|
-
const urlResponse = await fetch(
|
|
1359
|
+
const urlResponse = await fetch(data.video.video_url);
|
|
1332
1360
|
return await urlResponse.blob();
|
|
1333
1361
|
} else {
|
|
1334
|
-
const isValidOutput = typeof
|
|
1362
|
+
const isValidOutput = typeof data === "object" && !!data && "output" in data && typeof data.output === "string" && isUrl(data.output);
|
|
1335
1363
|
if (!isValidOutput) {
|
|
1336
1364
|
throw new InferenceOutputError("Expected { output: string }");
|
|
1337
1365
|
}
|
|
1338
|
-
const urlResponse = await fetch(
|
|
1366
|
+
const urlResponse = await fetch(data.output);
|
|
1339
1367
|
return await urlResponse.blob();
|
|
1340
1368
|
}
|
|
1341
1369
|
}
|
|
1342
1370
|
|
|
1343
1371
|
// src/tasks/nlp/featureExtraction.ts
|
|
1344
1372
|
async function featureExtraction(args, options) {
|
|
1345
|
-
const res = await
|
|
1373
|
+
const { data: res } = await innerRequest(args, {
|
|
1346
1374
|
...options,
|
|
1347
1375
|
task: "feature-extraction"
|
|
1348
1376
|
});
|
|
@@ -1365,7 +1393,7 @@ async function featureExtraction(args, options) {
|
|
|
1365
1393
|
|
|
1366
1394
|
// src/tasks/nlp/fillMask.ts
|
|
1367
1395
|
async function fillMask(args, options) {
|
|
1368
|
-
const res = await
|
|
1396
|
+
const { data: res } = await innerRequest(args, {
|
|
1369
1397
|
...options,
|
|
1370
1398
|
task: "fill-mask"
|
|
1371
1399
|
});
|
|
@@ -1382,7 +1410,7 @@ async function fillMask(args, options) {
|
|
|
1382
1410
|
|
|
1383
1411
|
// src/tasks/nlp/questionAnswering.ts
|
|
1384
1412
|
async function questionAnswering(args, options) {
|
|
1385
|
-
const res = await
|
|
1413
|
+
const { data: res } = await innerRequest(args, {
|
|
1386
1414
|
...options,
|
|
1387
1415
|
task: "question-answering"
|
|
1388
1416
|
});
|
|
@@ -1397,7 +1425,7 @@ async function questionAnswering(args, options) {
|
|
|
1397
1425
|
|
|
1398
1426
|
// src/tasks/nlp/sentenceSimilarity.ts
|
|
1399
1427
|
async function sentenceSimilarity(args, options) {
|
|
1400
|
-
const res = await
|
|
1428
|
+
const { data: res } = await innerRequest(args, {
|
|
1401
1429
|
...options,
|
|
1402
1430
|
task: "sentence-similarity"
|
|
1403
1431
|
});
|
|
@@ -1407,17 +1435,10 @@ async function sentenceSimilarity(args, options) {
|
|
|
1407
1435
|
}
|
|
1408
1436
|
return res;
|
|
1409
1437
|
}
|
|
1410
|
-
function prepareInput(args) {
|
|
1411
|
-
return {
|
|
1412
|
-
...omit(args, ["inputs", "parameters"]),
|
|
1413
|
-
inputs: { ...omit(args.inputs, "sourceSentence") },
|
|
1414
|
-
parameters: { source_sentence: args.inputs.sourceSentence, ...args.parameters }
|
|
1415
|
-
};
|
|
1416
|
-
}
|
|
1417
1438
|
|
|
1418
1439
|
// src/tasks/nlp/summarization.ts
|
|
1419
1440
|
async function summarization(args, options) {
|
|
1420
|
-
const res = await
|
|
1441
|
+
const { data: res } = await innerRequest(args, {
|
|
1421
1442
|
...options,
|
|
1422
1443
|
task: "summarization"
|
|
1423
1444
|
});
|
|
@@ -1430,7 +1451,7 @@ async function summarization(args, options) {
|
|
|
1430
1451
|
|
|
1431
1452
|
// src/tasks/nlp/tableQuestionAnswering.ts
|
|
1432
1453
|
async function tableQuestionAnswering(args, options) {
|
|
1433
|
-
const res = await
|
|
1454
|
+
const { data: res } = await innerRequest(args, {
|
|
1434
1455
|
...options,
|
|
1435
1456
|
task: "table-question-answering"
|
|
1436
1457
|
});
|
|
@@ -1450,15 +1471,16 @@ function validate(elem) {
|
|
|
1450
1471
|
|
|
1451
1472
|
// src/tasks/nlp/textClassification.ts
|
|
1452
1473
|
async function textClassification(args, options) {
|
|
1453
|
-
const res =
|
|
1474
|
+
const { data: res } = await innerRequest(args, {
|
|
1454
1475
|
...options,
|
|
1455
1476
|
task: "text-classification"
|
|
1456
|
-
})
|
|
1457
|
-
const
|
|
1477
|
+
});
|
|
1478
|
+
const output = res?.[0];
|
|
1479
|
+
const isValidOutput = Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number");
|
|
1458
1480
|
if (!isValidOutput) {
|
|
1459
1481
|
throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
|
|
1460
1482
|
}
|
|
1461
|
-
return
|
|
1483
|
+
return output;
|
|
1462
1484
|
}
|
|
1463
1485
|
|
|
1464
1486
|
// src/utils/toArray.ts
|
|
@@ -1473,7 +1495,7 @@ function toArray(obj) {
|
|
|
1473
1495
|
async function textGeneration(args, options) {
|
|
1474
1496
|
if (args.provider === "together") {
|
|
1475
1497
|
args.prompt = args.inputs;
|
|
1476
|
-
const raw = await
|
|
1498
|
+
const { data: raw } = await innerRequest(args, {
|
|
1477
1499
|
...options,
|
|
1478
1500
|
task: "text-generation"
|
|
1479
1501
|
});
|
|
@@ -1494,10 +1516,10 @@ async function textGeneration(args, options) {
|
|
|
1494
1516
|
} : void 0,
|
|
1495
1517
|
...omit(args, ["inputs", "parameters"])
|
|
1496
1518
|
};
|
|
1497
|
-
const raw = await
|
|
1519
|
+
const raw = (await innerRequest(payload, {
|
|
1498
1520
|
...options,
|
|
1499
1521
|
task: "text-generation"
|
|
1500
|
-
});
|
|
1522
|
+
})).data;
|
|
1501
1523
|
const isValidOutput = typeof raw === "object" && "choices" in raw && Array.isArray(raw?.choices) && typeof raw?.model === "string";
|
|
1502
1524
|
if (!isValidOutput) {
|
|
1503
1525
|
throw new InferenceOutputError("Expected ChatCompletionOutput");
|
|
@@ -1507,23 +1529,22 @@ async function textGeneration(args, options) {
|
|
|
1507
1529
|
generated_text: completion.message.content
|
|
1508
1530
|
};
|
|
1509
1531
|
} else {
|
|
1510
|
-
const res =
|
|
1511
|
-
|
|
1512
|
-
|
|
1513
|
-
|
|
1514
|
-
|
|
1515
|
-
);
|
|
1516
|
-
const isValidOutput = Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
|
|
1532
|
+
const { data: res } = await innerRequest(args, {
|
|
1533
|
+
...options,
|
|
1534
|
+
task: "text-generation"
|
|
1535
|
+
});
|
|
1536
|
+
const output = toArray(res);
|
|
1537
|
+
const isValidOutput = Array.isArray(output) && output.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
|
|
1517
1538
|
if (!isValidOutput) {
|
|
1518
1539
|
throw new InferenceOutputError("Expected Array<{generated_text: string}>");
|
|
1519
1540
|
}
|
|
1520
|
-
return
|
|
1541
|
+
return output?.[0];
|
|
1521
1542
|
}
|
|
1522
1543
|
}
|
|
1523
1544
|
|
|
1524
1545
|
// src/tasks/nlp/textGenerationStream.ts
|
|
1525
1546
|
async function* textGenerationStream(args, options) {
|
|
1526
|
-
yield*
|
|
1547
|
+
yield* innerStreamingRequest(args, {
|
|
1527
1548
|
...options,
|
|
1528
1549
|
task: "text-generation"
|
|
1529
1550
|
});
|
|
@@ -1531,13 +1552,12 @@ async function* textGenerationStream(args, options) {
|
|
|
1531
1552
|
|
|
1532
1553
|
// src/tasks/nlp/tokenClassification.ts
|
|
1533
1554
|
async function tokenClassification(args, options) {
|
|
1534
|
-
const res =
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
)
|
|
1540
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1555
|
+
const { data: res } = await innerRequest(args, {
|
|
1556
|
+
...options,
|
|
1557
|
+
task: "token-classification"
|
|
1558
|
+
});
|
|
1559
|
+
const output = toArray(res);
|
|
1560
|
+
const isValidOutput = Array.isArray(output) && output.every(
|
|
1541
1561
|
(x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
|
|
1542
1562
|
);
|
|
1543
1563
|
if (!isValidOutput) {
|
|
@@ -1545,12 +1565,12 @@ async function tokenClassification(args, options) {
|
|
|
1545
1565
|
"Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
|
|
1546
1566
|
);
|
|
1547
1567
|
}
|
|
1548
|
-
return
|
|
1568
|
+
return output;
|
|
1549
1569
|
}
|
|
1550
1570
|
|
|
1551
1571
|
// src/tasks/nlp/translation.ts
|
|
1552
1572
|
async function translation(args, options) {
|
|
1553
|
-
const res = await
|
|
1573
|
+
const { data: res } = await innerRequest(args, {
|
|
1554
1574
|
...options,
|
|
1555
1575
|
task: "translation"
|
|
1556
1576
|
});
|
|
@@ -1563,24 +1583,23 @@ async function translation(args, options) {
|
|
|
1563
1583
|
|
|
1564
1584
|
// src/tasks/nlp/zeroShotClassification.ts
|
|
1565
1585
|
async function zeroShotClassification(args, options) {
|
|
1566
|
-
const res =
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
)
|
|
1572
|
-
const isValidOutput = Array.isArray(res) && res.every(
|
|
1586
|
+
const { data: res } = await innerRequest(args, {
|
|
1587
|
+
...options,
|
|
1588
|
+
task: "zero-shot-classification"
|
|
1589
|
+
});
|
|
1590
|
+
const output = toArray(res);
|
|
1591
|
+
const isValidOutput = Array.isArray(output) && output.every(
|
|
1573
1592
|
(x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
|
|
1574
1593
|
);
|
|
1575
1594
|
if (!isValidOutput) {
|
|
1576
1595
|
throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
|
|
1577
1596
|
}
|
|
1578
|
-
return
|
|
1597
|
+
return output;
|
|
1579
1598
|
}
|
|
1580
1599
|
|
|
1581
1600
|
// src/tasks/nlp/chatCompletion.ts
|
|
1582
1601
|
async function chatCompletion(args, options) {
|
|
1583
|
-
const res = await
|
|
1602
|
+
const { data: res } = await innerRequest(args, {
|
|
1584
1603
|
...options,
|
|
1585
1604
|
task: "text-generation",
|
|
1586
1605
|
chatCompletion: true
|
|
@@ -1595,7 +1614,7 @@ async function chatCompletion(args, options) {
|
|
|
1595
1614
|
|
|
1596
1615
|
// src/tasks/nlp/chatCompletionStream.ts
|
|
1597
1616
|
async function* chatCompletionStream(args, options) {
|
|
1598
|
-
yield*
|
|
1617
|
+
yield* innerStreamingRequest(args, {
|
|
1599
1618
|
...options,
|
|
1600
1619
|
task: "text-generation",
|
|
1601
1620
|
chatCompletion: true
|
|
@@ -1612,19 +1631,21 @@ async function documentQuestionAnswering(args, options) {
|
|
|
1612
1631
|
image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
|
|
1613
1632
|
}
|
|
1614
1633
|
};
|
|
1615
|
-
const res =
|
|
1616
|
-
|
|
1634
|
+
const { data: res } = await innerRequest(
|
|
1635
|
+
reqArgs,
|
|
1636
|
+
{
|
|
1617
1637
|
...options,
|
|
1618
1638
|
task: "document-question-answering"
|
|
1619
|
-
}
|
|
1639
|
+
}
|
|
1620
1640
|
);
|
|
1621
|
-
const
|
|
1641
|
+
const output = toArray(res);
|
|
1642
|
+
const isValidOutput = Array.isArray(output) && output.every(
|
|
1622
1643
|
(elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
|
|
1623
1644
|
);
|
|
1624
1645
|
if (!isValidOutput) {
|
|
1625
1646
|
throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
|
|
1626
1647
|
}
|
|
1627
|
-
return
|
|
1648
|
+
return output[0];
|
|
1628
1649
|
}
|
|
1629
1650
|
|
|
1630
1651
|
// src/tasks/multimodal/visualQuestionAnswering.ts
|
|
@@ -1637,7 +1658,7 @@ async function visualQuestionAnswering(args, options) {
|
|
|
1637
1658
|
image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
|
|
1638
1659
|
}
|
|
1639
1660
|
};
|
|
1640
|
-
const res = await
|
|
1661
|
+
const { data: res } = await innerRequest(reqArgs, {
|
|
1641
1662
|
...options,
|
|
1642
1663
|
task: "visual-question-answering"
|
|
1643
1664
|
});
|
|
@@ -1652,7 +1673,7 @@ async function visualQuestionAnswering(args, options) {
|
|
|
1652
1673
|
|
|
1653
1674
|
// src/tasks/tabular/tabularRegression.ts
|
|
1654
1675
|
async function tabularRegression(args, options) {
|
|
1655
|
-
const res = await
|
|
1676
|
+
const { data: res } = await innerRequest(args, {
|
|
1656
1677
|
...options,
|
|
1657
1678
|
task: "tabular-regression"
|
|
1658
1679
|
});
|
|
@@ -1665,7 +1686,7 @@ async function tabularRegression(args, options) {
|
|
|
1665
1686
|
|
|
1666
1687
|
// src/tasks/tabular/tabularClassification.ts
|
|
1667
1688
|
async function tabularClassification(args, options) {
|
|
1668
|
-
const res = await
|
|
1689
|
+
const { data: res } = await innerRequest(args, {
|
|
1669
1690
|
...options,
|
|
1670
1691
|
task: "tabular-classification"
|
|
1671
1692
|
});
|
|
@@ -1753,7 +1774,7 @@ var templates = {
|
|
|
1753
1774
|
"basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1754
1775
|
"basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
|
|
1755
1776
|
"textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
|
|
1756
|
-
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({
|
|
1777
|
+
"textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
|
|
1757
1778
|
"zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
|
|
1758
1779
|
},
|
|
1759
1780
|
"huggingface.js": {
|