@huggingface/inference 2.6.3 → 2.6.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +247 -89
- package/dist/{index.mjs → index.cjs} +58 -19
- package/dist/index.d.ts +47 -139
- package/dist/index.js +6 -72
- package/package.json +13 -15
- package/src/lib/getDefaultTask.ts +1 -1
- package/src/lib/makeRequestOptions.ts +1 -0
- package/src/tasks/custom/request.ts +1 -1
- package/src/tasks/custom/streamingRequest.ts +1 -1
- package/src/tasks/index.ts +0 -1
- package/src/tasks/nlp/textGeneration.ts +5 -57
- package/src/tasks/nlp/textGenerationStream.ts +4 -3
- package/src/tasks/nlp/translation.ts +6 -4
- package/src/types.ts +9 -35
- package/src/tasks/nlp/conversational.ts +0 -81
package/dist/index.d.ts
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
+
|
|
1
2
|
export interface Options {
|
|
2
3
|
/**
|
|
3
4
|
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
|
|
4
5
|
*/
|
|
5
6
|
retry_on_error?: boolean;
|
|
6
7
|
/**
|
|
7
|
-
* (Default: true). Boolean. There is a cache layer on
|
|
8
|
+
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
|
|
8
9
|
*/
|
|
9
10
|
use_cache?: boolean;
|
|
10
11
|
/**
|
|
@@ -24,6 +25,10 @@ export interface Options {
|
|
|
24
25
|
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
|
|
25
26
|
*/
|
|
26
27
|
fetch?: typeof fetch;
|
|
28
|
+
/**
|
|
29
|
+
* Abort Controller signal to use for request interruption.
|
|
30
|
+
*/
|
|
31
|
+
signal?: AbortSignal;
|
|
27
32
|
|
|
28
33
|
/**
|
|
29
34
|
* (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
|
|
@@ -35,35 +40,51 @@ export type InferenceTask =
|
|
|
35
40
|
| "audio-classification"
|
|
36
41
|
| "audio-to-audio"
|
|
37
42
|
| "automatic-speech-recognition"
|
|
38
|
-
| "conversational"
|
|
39
43
|
| "depth-estimation"
|
|
40
44
|
| "document-question-answering"
|
|
41
45
|
| "feature-extraction"
|
|
42
46
|
| "fill-mask"
|
|
47
|
+
| "graph-ml"
|
|
43
48
|
| "image-classification"
|
|
49
|
+
| "image-feature-extraction"
|
|
44
50
|
| "image-segmentation"
|
|
51
|
+
| "image-text-to-text"
|
|
52
|
+
| "image-to-3d"
|
|
45
53
|
| "image-to-image"
|
|
46
54
|
| "image-to-text"
|
|
55
|
+
| "image-to-video"
|
|
56
|
+
| "mask-generation"
|
|
57
|
+
| "multiple-choice"
|
|
47
58
|
| "object-detection"
|
|
48
|
-
| "video-classification"
|
|
49
59
|
| "question-answering"
|
|
50
60
|
| "reinforcement-learning"
|
|
61
|
+
| "robotics"
|
|
51
62
|
| "sentence-similarity"
|
|
52
63
|
| "summarization"
|
|
53
64
|
| "table-question-answering"
|
|
65
|
+
| "table-to-text"
|
|
54
66
|
| "tabular-classification"
|
|
55
67
|
| "tabular-regression"
|
|
68
|
+
| "tabular-to-text"
|
|
56
69
|
| "text-classification"
|
|
57
70
|
| "text-generation"
|
|
71
|
+
| "text-retrieval"
|
|
72
|
+
| "text-to-3d"
|
|
73
|
+
| "text-to-audio"
|
|
58
74
|
| "text-to-image"
|
|
59
75
|
| "text-to-speech"
|
|
60
76
|
| "text-to-video"
|
|
77
|
+
| "text2text-generation"
|
|
78
|
+
| "time-series-forecasting"
|
|
61
79
|
| "token-classification"
|
|
62
80
|
| "translation"
|
|
63
81
|
| "unconditional-image-generation"
|
|
82
|
+
| "video-classification"
|
|
64
83
|
| "visual-question-answering"
|
|
84
|
+
| "voice-activity-detection"
|
|
65
85
|
| "zero-shot-classification"
|
|
66
|
-
| "zero-shot-image-classification"
|
|
86
|
+
| "zero-shot-image-classification"
|
|
87
|
+
| "zero-shot-object-detection";
|
|
67
88
|
|
|
68
89
|
export interface BaseArgs {
|
|
69
90
|
/**
|
|
@@ -73,7 +94,7 @@ export interface BaseArgs {
|
|
|
73
94
|
*/
|
|
74
95
|
accessToken?: string;
|
|
75
96
|
/**
|
|
76
|
-
* The model to use. Can be a full URL for
|
|
97
|
+
* The model to use. Can be a full URL for a dedicated inference endpoint.
|
|
77
98
|
*
|
|
78
99
|
* If not specified, will call huggingface.co/api/tasks to get the default model for the task.
|
|
79
100
|
*/
|
|
@@ -174,7 +195,7 @@ export type TextToSpeechOutput = Blob;
|
|
|
174
195
|
*/
|
|
175
196
|
export function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<TextToSpeechOutput>;
|
|
176
197
|
/**
|
|
177
|
-
* Primitive to make custom calls to
|
|
198
|
+
* Primitive to make custom calls to Inference Endpoints
|
|
178
199
|
*/
|
|
179
200
|
export function request<T>(
|
|
180
201
|
args: RequestArgs,
|
|
@@ -479,65 +500,6 @@ export function visualQuestionAnswering(
|
|
|
479
500
|
args: VisualQuestionAnsweringArgs,
|
|
480
501
|
options?: Options
|
|
481
502
|
): Promise<VisualQuestionAnsweringOutput>;
|
|
482
|
-
export type ConversationalArgs = BaseArgs & {
|
|
483
|
-
inputs: {
|
|
484
|
-
/**
|
|
485
|
-
* A list of strings corresponding to the earlier replies from the model.
|
|
486
|
-
*/
|
|
487
|
-
generated_responses?: string[];
|
|
488
|
-
/**
|
|
489
|
-
* A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses.
|
|
490
|
-
*/
|
|
491
|
-
past_user_inputs?: string[];
|
|
492
|
-
/**
|
|
493
|
-
* The last input from the user in the conversation.
|
|
494
|
-
*/
|
|
495
|
-
text: string;
|
|
496
|
-
};
|
|
497
|
-
parameters?: {
|
|
498
|
-
/**
|
|
499
|
-
* (Default: None). Integer to define the maximum length in tokens of the output summary.
|
|
500
|
-
*/
|
|
501
|
-
max_length?: number;
|
|
502
|
-
/**
|
|
503
|
-
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
|
|
504
|
-
*/
|
|
505
|
-
max_time?: number;
|
|
506
|
-
/**
|
|
507
|
-
* (Default: None). Integer to define the minimum length in tokens of the output summary.
|
|
508
|
-
*/
|
|
509
|
-
min_length?: number;
|
|
510
|
-
/**
|
|
511
|
-
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
|
|
512
|
-
*/
|
|
513
|
-
repetition_penalty?: number;
|
|
514
|
-
/**
|
|
515
|
-
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
|
|
516
|
-
*/
|
|
517
|
-
temperature?: number;
|
|
518
|
-
/**
|
|
519
|
-
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
|
|
520
|
-
*/
|
|
521
|
-
top_k?: number;
|
|
522
|
-
/**
|
|
523
|
-
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
|
|
524
|
-
*/
|
|
525
|
-
top_p?: number;
|
|
526
|
-
};
|
|
527
|
-
};
|
|
528
|
-
export interface ConversationalOutput {
|
|
529
|
-
conversation: {
|
|
530
|
-
generated_responses: string[];
|
|
531
|
-
past_user_inputs: string[];
|
|
532
|
-
};
|
|
533
|
-
generated_text: string;
|
|
534
|
-
warnings: string[];
|
|
535
|
-
}
|
|
536
|
-
/**
|
|
537
|
-
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
|
|
538
|
-
*
|
|
539
|
-
*/
|
|
540
|
-
export function conversational(args: ConversationalArgs, options?: Options): Promise<ConversationalOutput>;
|
|
541
503
|
export type FeatureExtractionArgs = BaseArgs & {
|
|
542
504
|
/**
|
|
543
505
|
* The inputs is a string or a list of strings to get the features from.
|
|
@@ -740,64 +702,13 @@ export function textClassification(
|
|
|
740
702
|
args: TextClassificationArgs,
|
|
741
703
|
options?: Options
|
|
742
704
|
): Promise<TextClassificationOutput>;
|
|
743
|
-
export type TextGenerationArgs = BaseArgs & {
|
|
744
|
-
/**
|
|
745
|
-
* A string to be generated from
|
|
746
|
-
*/
|
|
747
|
-
inputs: string;
|
|
748
|
-
parameters?: {
|
|
749
|
-
/**
|
|
750
|
-
* (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
|
|
751
|
-
*/
|
|
752
|
-
do_sample?: boolean;
|
|
753
|
-
/**
|
|
754
|
-
* (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
|
|
755
|
-
*/
|
|
756
|
-
max_new_tokens?: number;
|
|
757
|
-
/**
|
|
758
|
-
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
|
|
759
|
-
*/
|
|
760
|
-
max_time?: number;
|
|
761
|
-
/**
|
|
762
|
-
* (Default: 1). Integer. The number of proposition you want to be returned.
|
|
763
|
-
*/
|
|
764
|
-
num_return_sequences?: number;
|
|
765
|
-
/**
|
|
766
|
-
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
|
|
767
|
-
*/
|
|
768
|
-
repetition_penalty?: number;
|
|
769
|
-
/**
|
|
770
|
-
* (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
|
|
771
|
-
*/
|
|
772
|
-
return_full_text?: boolean;
|
|
773
|
-
/**
|
|
774
|
-
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
|
|
775
|
-
*/
|
|
776
|
-
temperature?: number;
|
|
777
|
-
/**
|
|
778
|
-
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
|
|
779
|
-
*/
|
|
780
|
-
top_k?: number;
|
|
781
|
-
/**
|
|
782
|
-
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
|
|
783
|
-
*/
|
|
784
|
-
top_p?: number;
|
|
785
|
-
/**
|
|
786
|
-
* (Default: None). Integer. The maximum number of tokens from the input.
|
|
787
|
-
*/
|
|
788
|
-
truncate?: number;
|
|
789
|
-
};
|
|
790
|
-
};
|
|
791
|
-
export interface TextGenerationOutput {
|
|
792
|
-
/**
|
|
793
|
-
* The continuated string
|
|
794
|
-
*/
|
|
795
|
-
generated_text: string;
|
|
796
|
-
}
|
|
797
705
|
/**
|
|
798
706
|
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
|
|
799
707
|
*/
|
|
800
|
-
export function textGeneration(
|
|
708
|
+
export function textGeneration(
|
|
709
|
+
args: BaseArgs & TextGenerationInput,
|
|
710
|
+
options?: Options
|
|
711
|
+
): Promise<TextGenerationOutput>;
|
|
801
712
|
export type TextGenerationStreamFinishReason =
|
|
802
713
|
/** number of generated tokens == `max_new_tokens` */
|
|
803
714
|
| "length"
|
|
@@ -875,7 +786,7 @@ export interface TextGenerationStreamOutput {
|
|
|
875
786
|
* Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
|
|
876
787
|
*/
|
|
877
788
|
export function textGenerationStream(
|
|
878
|
-
args:
|
|
789
|
+
args: BaseArgs & TextGenerationInput,
|
|
879
790
|
options?: Options
|
|
880
791
|
): AsyncGenerator<TextGenerationStreamOutput>;
|
|
881
792
|
export type TokenClassificationArgs = BaseArgs & {
|
|
@@ -934,9 +845,10 @@ export type TranslationArgs = BaseArgs & {
|
|
|
934
845
|
/**
|
|
935
846
|
* A string to be translated
|
|
936
847
|
*/
|
|
937
|
-
inputs: string;
|
|
848
|
+
inputs: string | string[];
|
|
938
849
|
};
|
|
939
|
-
export
|
|
850
|
+
export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[];
|
|
851
|
+
export interface TranslationOutputValue {
|
|
940
852
|
/**
|
|
941
853
|
* The string after translation
|
|
942
854
|
*/
|
|
@@ -1050,7 +962,7 @@ export class HfInference {
|
|
|
1050
962
|
*/
|
|
1051
963
|
textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken'>, options?: Options): Promise<TextToSpeechOutput>;
|
|
1052
964
|
/**
|
|
1053
|
-
* Primitive to make custom calls to
|
|
965
|
+
* Primitive to make custom calls to Inference Endpoints
|
|
1054
966
|
*/
|
|
1055
967
|
request<T>(
|
|
1056
968
|
args: Omit<RequestArgs, 'accessToken'>,
|
|
@@ -1130,11 +1042,6 @@ export class HfInference {
|
|
|
1130
1042
|
args: Omit<VisualQuestionAnsweringArgs, 'accessToken'>,
|
|
1131
1043
|
options?: Options
|
|
1132
1044
|
): Promise<VisualQuestionAnsweringOutput>;
|
|
1133
|
-
/**
|
|
1134
|
-
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
|
|
1135
|
-
*
|
|
1136
|
-
*/
|
|
1137
|
-
conversational(args: Omit<ConversationalArgs, 'accessToken'>, options?: Options): Promise<ConversationalOutput>;
|
|
1138
1045
|
/**
|
|
1139
1046
|
* This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
|
|
1140
1047
|
*/
|
|
@@ -1181,12 +1088,15 @@ export class HfInference {
|
|
|
1181
1088
|
/**
|
|
1182
1089
|
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
|
|
1183
1090
|
*/
|
|
1184
|
-
textGeneration(
|
|
1091
|
+
textGeneration(
|
|
1092
|
+
args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput,
|
|
1093
|
+
options?: Options
|
|
1094
|
+
): Promise<TextGenerationOutput>;
|
|
1185
1095
|
/**
|
|
1186
1096
|
* Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
|
|
1187
1097
|
*/
|
|
1188
1098
|
textGenerationStream(
|
|
1189
|
-
args: Omit<
|
|
1099
|
+
args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput,
|
|
1190
1100
|
options?: Options
|
|
1191
1101
|
): AsyncGenerator<TextGenerationStreamOutput>;
|
|
1192
1102
|
/**
|
|
@@ -1255,7 +1165,7 @@ export class HfInferenceEndpoint {
|
|
|
1255
1165
|
*/
|
|
1256
1166
|
textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextToSpeechOutput>;
|
|
1257
1167
|
/**
|
|
1258
|
-
* Primitive to make custom calls to
|
|
1168
|
+
* Primitive to make custom calls to Inference Endpoints
|
|
1259
1169
|
*/
|
|
1260
1170
|
request<T>(
|
|
1261
1171
|
args: Omit<RequestArgs, 'accessToken' | 'model'>,
|
|
@@ -1335,11 +1245,6 @@ export class HfInferenceEndpoint {
|
|
|
1335
1245
|
args: Omit<VisualQuestionAnsweringArgs, 'accessToken' | 'model'>,
|
|
1336
1246
|
options?: Options
|
|
1337
1247
|
): Promise<VisualQuestionAnsweringOutput>;
|
|
1338
|
-
/**
|
|
1339
|
-
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
|
|
1340
|
-
*
|
|
1341
|
-
*/
|
|
1342
|
-
conversational(args: Omit<ConversationalArgs, 'accessToken' | 'model'>, options?: Options): Promise<ConversationalOutput>;
|
|
1343
1248
|
/**
|
|
1344
1249
|
* This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
|
|
1345
1250
|
*/
|
|
@@ -1386,12 +1291,15 @@ export class HfInferenceEndpoint {
|
|
|
1386
1291
|
/**
|
|
1387
1292
|
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
|
|
1388
1293
|
*/
|
|
1389
|
-
textGeneration(
|
|
1294
|
+
textGeneration(
|
|
1295
|
+
args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput,
|
|
1296
|
+
options?: Options
|
|
1297
|
+
): Promise<TextGenerationOutput>;
|
|
1390
1298
|
/**
|
|
1391
1299
|
* Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
|
|
1392
1300
|
*/
|
|
1393
1301
|
textGenerationStream(
|
|
1394
|
-
args: Omit<
|
|
1302
|
+
args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput,
|
|
1395
1303
|
options?: Options
|
|
1396
1304
|
): AsyncGenerator<TextGenerationStreamOutput>;
|
|
1397
1305
|
/**
|
package/dist/index.js
CHANGED
|
@@ -1,61 +1,9 @@
|
|
|
1
1
|
/// <reference path="./index.d.ts" />
|
|
2
|
-
"use strict";
|
|
3
2
|
var __defProp = Object.defineProperty;
|
|
4
|
-
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
-
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
-
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
3
|
var __export = (target, all) => {
|
|
8
4
|
for (var name in all)
|
|
9
5
|
__defProp(target, name, { get: all[name], enumerable: true });
|
|
10
6
|
};
|
|
11
|
-
var __copyProps = (to, from, except, desc) => {
|
|
12
|
-
if (from && typeof from === "object" || typeof from === "function") {
|
|
13
|
-
for (let key of __getOwnPropNames(from))
|
|
14
|
-
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
15
|
-
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
16
|
-
}
|
|
17
|
-
return to;
|
|
18
|
-
};
|
|
19
|
-
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
20
|
-
|
|
21
|
-
// src/index.ts
|
|
22
|
-
var src_exports = {};
|
|
23
|
-
__export(src_exports, {
|
|
24
|
-
HfInference: () => HfInference,
|
|
25
|
-
HfInferenceEndpoint: () => HfInferenceEndpoint,
|
|
26
|
-
InferenceOutputError: () => InferenceOutputError,
|
|
27
|
-
audioClassification: () => audioClassification,
|
|
28
|
-
audioToAudio: () => audioToAudio,
|
|
29
|
-
automaticSpeechRecognition: () => automaticSpeechRecognition,
|
|
30
|
-
conversational: () => conversational,
|
|
31
|
-
documentQuestionAnswering: () => documentQuestionAnswering,
|
|
32
|
-
featureExtraction: () => featureExtraction,
|
|
33
|
-
fillMask: () => fillMask,
|
|
34
|
-
imageClassification: () => imageClassification,
|
|
35
|
-
imageSegmentation: () => imageSegmentation,
|
|
36
|
-
imageToImage: () => imageToImage,
|
|
37
|
-
imageToText: () => imageToText,
|
|
38
|
-
objectDetection: () => objectDetection,
|
|
39
|
-
questionAnswering: () => questionAnswering,
|
|
40
|
-
request: () => request,
|
|
41
|
-
sentenceSimilarity: () => sentenceSimilarity,
|
|
42
|
-
streamingRequest: () => streamingRequest,
|
|
43
|
-
summarization: () => summarization,
|
|
44
|
-
tableQuestionAnswering: () => tableQuestionAnswering,
|
|
45
|
-
tabularClassification: () => tabularClassification,
|
|
46
|
-
tabularRegression: () => tabularRegression,
|
|
47
|
-
textClassification: () => textClassification,
|
|
48
|
-
textGeneration: () => textGeneration,
|
|
49
|
-
textGenerationStream: () => textGenerationStream,
|
|
50
|
-
textToImage: () => textToImage,
|
|
51
|
-
textToSpeech: () => textToSpeech,
|
|
52
|
-
tokenClassification: () => tokenClassification,
|
|
53
|
-
translation: () => translation,
|
|
54
|
-
visualQuestionAnswering: () => visualQuestionAnswering,
|
|
55
|
-
zeroShotClassification: () => zeroShotClassification,
|
|
56
|
-
zeroShotImageClassification: () => zeroShotImageClassification
|
|
57
|
-
});
|
|
58
|
-
module.exports = __toCommonJS(src_exports);
|
|
59
7
|
|
|
60
8
|
// src/tasks/index.ts
|
|
61
9
|
var tasks_exports = {};
|
|
@@ -63,7 +11,6 @@ __export(tasks_exports, {
|
|
|
63
11
|
audioClassification: () => audioClassification,
|
|
64
12
|
audioToAudio: () => audioToAudio,
|
|
65
13
|
automaticSpeechRecognition: () => automaticSpeechRecognition,
|
|
66
|
-
conversational: () => conversational,
|
|
67
14
|
documentQuestionAnswering: () => documentQuestionAnswering,
|
|
68
15
|
featureExtraction: () => featureExtraction,
|
|
69
16
|
fillMask: () => fillMask,
|
|
@@ -192,7 +139,8 @@ async function makeRequestOptions(args, options) {
|
|
|
192
139
|
...otherArgs,
|
|
193
140
|
options: options && otherOptions
|
|
194
141
|
}),
|
|
195
|
-
credentials
|
|
142
|
+
credentials,
|
|
143
|
+
signal: options?.signal
|
|
196
144
|
};
|
|
197
145
|
return { url, info };
|
|
198
146
|
}
|
|
@@ -326,7 +274,7 @@ async function* streamingRequest(args, options) {
|
|
|
326
274
|
const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
|
|
327
275
|
const response = await (options?.fetch ?? fetch)(url, info);
|
|
328
276
|
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
|
|
329
|
-
return streamingRequest(args, {
|
|
277
|
+
return yield* streamingRequest(args, {
|
|
330
278
|
...options,
|
|
331
279
|
wait_for_model: true
|
|
332
280
|
});
|
|
@@ -584,18 +532,6 @@ async function zeroShotImageClassification(args, options) {
|
|
|
584
532
|
return res;
|
|
585
533
|
}
|
|
586
534
|
|
|
587
|
-
// src/tasks/nlp/conversational.ts
|
|
588
|
-
async function conversational(args, options) {
|
|
589
|
-
const res = await request(args, { ...options, taskHint: "conversational" });
|
|
590
|
-
const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && (typeof res.warnings === "undefined" || Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string"));
|
|
591
|
-
if (!isValidOutput) {
|
|
592
|
-
throw new InferenceOutputError(
|
|
593
|
-
"Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}"
|
|
594
|
-
);
|
|
595
|
-
}
|
|
596
|
-
return res;
|
|
597
|
-
}
|
|
598
|
-
|
|
599
535
|
// src/tasks/nlp/featureExtraction.ts
|
|
600
536
|
async function featureExtraction(args, options) {
|
|
601
537
|
const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0;
|
|
@@ -765,7 +701,7 @@ async function translation(args, options) {
|
|
|
765
701
|
if (!isValidOutput) {
|
|
766
702
|
throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
|
|
767
703
|
}
|
|
768
|
-
return res?.[0];
|
|
704
|
+
return res?.length === 1 ? res?.[0] : res;
|
|
769
705
|
}
|
|
770
706
|
|
|
771
707
|
// src/tasks/nlp/zeroShotClassification.ts
|
|
@@ -902,15 +838,13 @@ var HfInferenceEndpoint = class {
|
|
|
902
838
|
}
|
|
903
839
|
}
|
|
904
840
|
};
|
|
905
|
-
|
|
906
|
-
0 && (module.exports = {
|
|
841
|
+
export {
|
|
907
842
|
HfInference,
|
|
908
843
|
HfInferenceEndpoint,
|
|
909
844
|
InferenceOutputError,
|
|
910
845
|
audioClassification,
|
|
911
846
|
audioToAudio,
|
|
912
847
|
automaticSpeechRecognition,
|
|
913
|
-
conversational,
|
|
914
848
|
documentQuestionAnswering,
|
|
915
849
|
featureExtraction,
|
|
916
850
|
fillMask,
|
|
@@ -937,4 +871,4 @@ var HfInferenceEndpoint = class {
|
|
|
937
871
|
visualQuestionAnswering,
|
|
938
872
|
zeroShotClassification,
|
|
939
873
|
zeroShotImageClassification
|
|
940
|
-
}
|
|
874
|
+
};
|
package/package.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@huggingface/inference",
|
|
3
|
-
"version": "2.6.
|
|
4
|
-
"packageManager": "pnpm@8.
|
|
3
|
+
"version": "2.6.5",
|
|
4
|
+
"packageManager": "pnpm@8.10.5",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
|
|
7
|
-
"description": "Typescript wrapper for the Hugging Face Inference API",
|
|
7
|
+
"description": "Typescript wrapper for the Hugging Face Inference Endpoints & Inference API",
|
|
8
8
|
"repository": {
|
|
9
9
|
"type": "git",
|
|
10
10
|
"url": "https://github.com/huggingface/huggingface.js.git"
|
|
@@ -30,30 +30,28 @@
|
|
|
30
30
|
],
|
|
31
31
|
"source": "src/index.ts",
|
|
32
32
|
"types": "./dist/index.d.ts",
|
|
33
|
-
"main": "./dist/index.
|
|
34
|
-
"module": "./dist/index.
|
|
33
|
+
"main": "./dist/index.cjs",
|
|
34
|
+
"module": "./dist/index.js",
|
|
35
35
|
"exports": {
|
|
36
36
|
"types": "./dist/index.d.ts",
|
|
37
|
-
"require": "./dist/index.
|
|
38
|
-
"import": "./dist/index.
|
|
37
|
+
"require": "./dist/index.cjs",
|
|
38
|
+
"import": "./dist/index.js"
|
|
39
39
|
},
|
|
40
|
+
"type": "module",
|
|
40
41
|
"devDependencies": {
|
|
41
42
|
"@types/node": "18.13.0",
|
|
42
|
-
"
|
|
43
|
-
"typescript": "^5.0.4",
|
|
44
|
-
"vite": "^4.1.4",
|
|
45
|
-
"vitest": "^0.29.8"
|
|
43
|
+
"@huggingface/tasks": "^0.6.0"
|
|
46
44
|
},
|
|
47
45
|
"resolutions": {},
|
|
48
46
|
"scripts": {
|
|
49
47
|
"build": "tsup src/index.ts --format cjs,esm --clean && pnpm run dts",
|
|
50
|
-
"dts": "
|
|
48
|
+
"dts": "tsx scripts/generate-dts.ts",
|
|
51
49
|
"lint": "eslint --quiet --fix --ext .cjs,.ts .",
|
|
52
50
|
"lint:check": "eslint --ext .cjs,.ts .",
|
|
53
51
|
"format": "prettier --write .",
|
|
54
52
|
"format:check": "prettier --check .",
|
|
55
|
-
"test": "vitest run --config vitest.config.
|
|
56
|
-
"test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.
|
|
57
|
-
"
|
|
53
|
+
"test": "vitest run --config vitest.config.mts",
|
|
54
|
+
"test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts",
|
|
55
|
+
"check": "tsc"
|
|
58
56
|
}
|
|
59
57
|
}
|
|
@@ -2,7 +2,7 @@ import { isUrl } from "./isUrl";
|
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* We want to make calls to the huggingface hub the least possible, eg if
|
|
5
|
-
* someone is calling
|
|
5
|
+
* someone is calling Inference Endpoints 1000 times per second, we don't want
|
|
6
6
|
* to make 1000 calls to the hub to get the task name.
|
|
7
7
|
*/
|
|
8
8
|
const taskCache = new Map<string, { task: string; date: Date }>();
|
|
@@ -2,7 +2,7 @@ import type { InferenceTask, Options, RequestArgs } from "../../types";
|
|
|
2
2
|
import { makeRequestOptions } from "../../lib/makeRequestOptions";
|
|
3
3
|
|
|
4
4
|
/**
|
|
5
|
-
* Primitive to make custom calls to
|
|
5
|
+
* Primitive to make custom calls to Inference Endpoints
|
|
6
6
|
*/
|
|
7
7
|
export async function request<T>(
|
|
8
8
|
args: RequestArgs,
|
|
@@ -19,7 +19,7 @@ export async function* streamingRequest<T>(
|
|
|
19
19
|
const response = await (options?.fetch ?? fetch)(url, info);
|
|
20
20
|
|
|
21
21
|
if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
|
|
22
|
-
return streamingRequest(args, {
|
|
22
|
+
return yield* streamingRequest(args, {
|
|
23
23
|
...options,
|
|
24
24
|
wait_for_model: true,
|
|
25
25
|
});
|
package/src/tasks/index.ts
CHANGED
|
@@ -18,7 +18,6 @@ export * from "./cv/imageToImage";
|
|
|
18
18
|
export * from "./cv/zeroShotImageClassification";
|
|
19
19
|
|
|
20
20
|
// Natural Language Processing tasks
|
|
21
|
-
export * from "./nlp/conversational";
|
|
22
21
|
export * from "./nlp/featureExtraction";
|
|
23
22
|
export * from "./nlp/fillMask";
|
|
24
23
|
export * from "./nlp/questionAnswering";
|
|
@@ -1,67 +1,15 @@
|
|
|
1
|
+
import type { TextGenerationInput, TextGenerationOutput } from "@huggingface/tasks/src/tasks/text-generation/inference";
|
|
1
2
|
import { InferenceOutputError } from "../../lib/InferenceOutputError";
|
|
2
3
|
import type { BaseArgs, Options } from "../../types";
|
|
3
4
|
import { request } from "../custom/request";
|
|
4
5
|
|
|
5
|
-
export type TextGenerationArgs = BaseArgs & {
|
|
6
|
-
/**
|
|
7
|
-
* A string to be generated from
|
|
8
|
-
*/
|
|
9
|
-
inputs: string;
|
|
10
|
-
parameters?: {
|
|
11
|
-
/**
|
|
12
|
-
* (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
|
|
13
|
-
*/
|
|
14
|
-
do_sample?: boolean;
|
|
15
|
-
/**
|
|
16
|
-
* (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
|
|
17
|
-
*/
|
|
18
|
-
max_new_tokens?: number;
|
|
19
|
-
/**
|
|
20
|
-
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
|
|
21
|
-
*/
|
|
22
|
-
max_time?: number;
|
|
23
|
-
/**
|
|
24
|
-
* (Default: 1). Integer. The number of proposition you want to be returned.
|
|
25
|
-
*/
|
|
26
|
-
num_return_sequences?: number;
|
|
27
|
-
/**
|
|
28
|
-
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
|
|
29
|
-
*/
|
|
30
|
-
repetition_penalty?: number;
|
|
31
|
-
/**
|
|
32
|
-
* (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
|
|
33
|
-
*/
|
|
34
|
-
return_full_text?: boolean;
|
|
35
|
-
/**
|
|
36
|
-
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
|
|
37
|
-
*/
|
|
38
|
-
temperature?: number;
|
|
39
|
-
/**
|
|
40
|
-
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
|
|
41
|
-
*/
|
|
42
|
-
top_k?: number;
|
|
43
|
-
/**
|
|
44
|
-
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
|
|
45
|
-
*/
|
|
46
|
-
top_p?: number;
|
|
47
|
-
/**
|
|
48
|
-
* (Default: None). Integer. The maximum number of tokens from the input.
|
|
49
|
-
*/
|
|
50
|
-
truncate?: number;
|
|
51
|
-
};
|
|
52
|
-
};
|
|
53
|
-
|
|
54
|
-
export interface TextGenerationOutput {
|
|
55
|
-
/**
|
|
56
|
-
* The continuated string
|
|
57
|
-
*/
|
|
58
|
-
generated_text: string;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
6
|
/**
|
|
62
7
|
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
|
|
63
8
|
*/
|
|
64
|
-
export async function textGeneration(
|
|
9
|
+
export async function textGeneration(
|
|
10
|
+
args: BaseArgs & TextGenerationInput,
|
|
11
|
+
options?: Options
|
|
12
|
+
): Promise<TextGenerationOutput> {
|
|
65
13
|
const res = await request<TextGenerationOutput[]>(args, {
|
|
66
14
|
...options,
|
|
67
15
|
taskHint: "text-generation",
|
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
import type { Options } from "../../types";
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
2
|
import { streamingRequest } from "../custom/streamingRequest";
|
|
3
|
-
|
|
3
|
+
|
|
4
|
+
import type { TextGenerationInput } from "@huggingface/tasks/src/tasks/text-generation/inference";
|
|
4
5
|
|
|
5
6
|
export interface TextGenerationStreamToken {
|
|
6
7
|
/** Token ID from the model tokenizer */
|
|
@@ -85,7 +86,7 @@ export interface TextGenerationStreamOutput {
|
|
|
85
86
|
* Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
|
|
86
87
|
*/
|
|
87
88
|
export async function* textGenerationStream(
|
|
88
|
-
args:
|
|
89
|
+
args: BaseArgs & TextGenerationInput,
|
|
89
90
|
options?: Options
|
|
90
91
|
): AsyncGenerator<TextGenerationStreamOutput> {
|
|
91
92
|
yield* streamingRequest<TextGenerationStreamOutput>(args, {
|