@huggingface/inference 2.6.7 → 2.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +126 -27
- package/dist/index.cjs +78 -12
- package/dist/index.js +76 -12
- package/dist/src/HfInference.d.ts +28 -0
- package/dist/src/HfInference.d.ts.map +1 -0
- package/dist/src/index.d.ts +5 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/lib/InferenceOutputError.d.ts +4 -0
- package/dist/src/lib/InferenceOutputError.d.ts.map +1 -0
- package/dist/src/lib/getDefaultTask.d.ts +12 -0
- package/dist/src/lib/getDefaultTask.d.ts.map +1 -0
- package/dist/src/lib/isUrl.d.ts +2 -0
- package/dist/src/lib/isUrl.d.ts.map +1 -0
- package/dist/src/lib/makeRequestOptions.d.ts +18 -0
- package/dist/src/lib/makeRequestOptions.d.ts.map +1 -0
- package/dist/src/tasks/audio/audioClassification.d.ts +24 -0
- package/dist/src/tasks/audio/audioClassification.d.ts.map +1 -0
- package/dist/src/tasks/audio/audioToAudio.d.ts +28 -0
- package/dist/src/tasks/audio/audioToAudio.d.ts.map +1 -0
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts +19 -0
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -0
- package/dist/src/tasks/audio/textToSpeech.d.ts +14 -0
- package/dist/src/tasks/audio/textToSpeech.d.ts.map +1 -0
- package/dist/src/tasks/custom/request.d.ts +13 -0
- package/dist/src/tasks/custom/request.d.ts.map +1 -0
- package/dist/src/tasks/custom/streamingRequest.d.ts +13 -0
- package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -0
- package/dist/src/tasks/cv/imageClassification.d.ts +24 -0
- package/dist/src/tasks/cv/imageClassification.d.ts.map +1 -0
- package/dist/src/tasks/cv/imageSegmentation.d.ts +28 -0
- package/dist/src/tasks/cv/imageSegmentation.d.ts.map +1 -0
- package/dist/src/tasks/cv/imageToImage.d.ts +55 -0
- package/dist/src/tasks/cv/imageToImage.d.ts.map +1 -0
- package/dist/src/tasks/cv/imageToText.d.ts +18 -0
- package/dist/src/tasks/cv/imageToText.d.ts.map +1 -0
- package/dist/src/tasks/cv/objectDetection.d.ts +33 -0
- package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -0
- package/dist/src/tasks/cv/textToImage.d.ts +36 -0
- package/dist/src/tasks/cv/textToImage.d.ts.map +1 -0
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +26 -0
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -0
- package/dist/src/tasks/index.d.ts +32 -0
- package/dist/src/tasks/index.d.ts.map +1 -0
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +35 -0
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -0
- package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts +27 -0
- package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -0
- package/dist/src/tasks/nlp/chatCompletion.d.ts +7 -0
- package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -0
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts +7 -0
- package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -0
- package/dist/src/tasks/nlp/featureExtraction.d.ts +19 -0
- package/dist/src/tasks/nlp/featureExtraction.d.ts.map +1 -0
- package/dist/src/tasks/nlp/fillMask.d.ts +27 -0
- package/dist/src/tasks/nlp/fillMask.d.ts.map +1 -0
- package/dist/src/tasks/nlp/questionAnswering.d.ts +30 -0
- package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -0
- package/dist/src/tasks/nlp/sentenceSimilarity.d.ts +19 -0
- package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -0
- package/dist/src/tasks/nlp/summarization.d.ts +48 -0
- package/dist/src/tasks/nlp/summarization.d.ts.map +1 -0
- package/dist/src/tasks/nlp/tableQuestionAnswering.d.ts +36 -0
- package/dist/src/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -0
- package/dist/src/tasks/nlp/textClassification.d.ts +22 -0
- package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -0
- package/dist/src/tasks/nlp/textGeneration.d.ts +8 -0
- package/dist/src/tasks/nlp/textGeneration.d.ts.map +1 -0
- package/dist/src/tasks/nlp/textGenerationStream.d.ts +81 -0
- package/dist/src/tasks/nlp/textGenerationStream.d.ts.map +1 -0
- package/dist/src/tasks/nlp/tokenClassification.d.ts +51 -0
- package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -0
- package/dist/src/tasks/nlp/translation.d.ts +19 -0
- package/dist/src/tasks/nlp/translation.d.ts.map +1 -0
- package/dist/src/tasks/nlp/zeroShotClassification.d.ts +28 -0
- package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -0
- package/dist/src/tasks/tabular/tabularClassification.d.ts +20 -0
- package/dist/src/tasks/tabular/tabularClassification.d.ts.map +1 -0
- package/dist/src/tasks/tabular/tabularRegression.d.ts +20 -0
- package/dist/src/tasks/tabular/tabularRegression.d.ts.map +1 -0
- package/dist/src/types.d.ts +69 -0
- package/dist/src/types.d.ts.map +1 -0
- package/dist/src/utils/base64FromBytes.d.ts +2 -0
- package/dist/src/utils/base64FromBytes.d.ts.map +1 -0
- package/dist/src/utils/distributive-omit.d.ts +9 -0
- package/dist/src/utils/distributive-omit.d.ts.map +1 -0
- package/dist/src/utils/isBackend.d.ts +2 -0
- package/dist/src/utils/isBackend.d.ts.map +1 -0
- package/dist/src/utils/isFrontend.d.ts +2 -0
- package/dist/src/utils/isFrontend.d.ts.map +1 -0
- package/dist/src/utils/omit.d.ts +5 -0
- package/dist/src/utils/omit.d.ts.map +1 -0
- package/dist/src/utils/pick.d.ts +5 -0
- package/dist/src/utils/pick.d.ts.map +1 -0
- package/dist/src/utils/toArray.d.ts +2 -0
- package/dist/src/utils/toArray.d.ts.map +1 -0
- package/dist/src/utils/typedInclude.d.ts +2 -0
- package/dist/src/utils/typedInclude.d.ts.map +1 -0
- package/dist/src/vendor/fetch-event-source/parse.d.ts +69 -0
- package/dist/src/vendor/fetch-event-source/parse.d.ts.map +1 -0
- package/dist/src/vendor/fetch-event-source/parse.spec.d.ts +2 -0
- package/dist/src/vendor/fetch-event-source/parse.spec.d.ts.map +1 -0
- package/dist/test/HfInference.spec.d.ts +2 -0
- package/dist/test/HfInference.spec.d.ts.map +1 -0
- package/dist/test/expect-closeto.d.ts +2 -0
- package/dist/test/expect-closeto.d.ts.map +1 -0
- package/dist/test/test-files.d.ts +2 -0
- package/dist/test/test-files.d.ts.map +1 -0
- package/dist/test/vcr.d.ts +2 -0
- package/dist/test/vcr.d.ts.map +1 -0
- package/package.json +9 -7
- package/src/HfInference.ts +4 -4
- package/src/lib/makeRequestOptions.ts +17 -7
- package/src/tasks/custom/request.ts +5 -0
- package/src/tasks/custom/streamingRequest.ts +8 -0
- package/src/tasks/cv/imageToImage.ts +1 -1
- package/src/tasks/cv/zeroShotImageClassification.ts +1 -1
- package/src/tasks/index.ts +2 -0
- package/src/tasks/multimodal/documentQuestionAnswering.ts +1 -1
- package/src/tasks/multimodal/visualQuestionAnswering.ts +1 -1
- package/src/tasks/nlp/chatCompletion.ts +32 -0
- package/src/tasks/nlp/chatCompletionStream.ts +17 -0
- package/src/tasks/nlp/textGeneration.ts +2 -202
- package/src/tasks/nlp/textGenerationStream.ts +2 -1
- package/src/types.ts +14 -3
- package/src/utils/base64FromBytes.ts +11 -0
- package/src/utils/{distributive-omit.d.ts → distributive-omit.ts} +0 -2
- package/src/utils/isBackend.ts +6 -0
- package/src/utils/isFrontend.ts +3 -0
- package/dist/index.d.ts +0 -1536
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type QuestionAnsweringArgs = BaseArgs & {
|
|
3
|
+
inputs: {
|
|
4
|
+
context: string;
|
|
5
|
+
question: string;
|
|
6
|
+
};
|
|
7
|
+
};
|
|
8
|
+
export interface QuestionAnsweringOutput {
|
|
9
|
+
/**
|
|
10
|
+
* A string that’s the answer within the text.
|
|
11
|
+
*/
|
|
12
|
+
answer: string;
|
|
13
|
+
/**
|
|
14
|
+
* The index (string wise) of the stop of the answer within context.
|
|
15
|
+
*/
|
|
16
|
+
end: number;
|
|
17
|
+
/**
|
|
18
|
+
* A float that represents how likely that the answer is correct
|
|
19
|
+
*/
|
|
20
|
+
score: number;
|
|
21
|
+
/**
|
|
22
|
+
* The index (string wise) of the start of the answer within context.
|
|
23
|
+
*/
|
|
24
|
+
start: number;
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2
|
|
28
|
+
*/
|
|
29
|
+
export declare function questionAnswering(args: QuestionAnsweringArgs, options?: Options): Promise<QuestionAnsweringOutput>;
|
|
30
|
+
//# sourceMappingURL=questionAnswering.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"questionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/questionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C,MAAM,EAAE;QACP,OAAO,EAAE,MAAM,CAAC;QAChB,QAAQ,EAAE,MAAM,CAAC;KACjB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,uBAAuB;IACvC;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,GAAG,EAAE,MAAM,CAAC;IACZ;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAgBlC"}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type SentenceSimilarityArgs = BaseArgs & {
|
|
3
|
+
/**
|
|
4
|
+
* The inputs vary based on the model.
|
|
5
|
+
*
|
|
6
|
+
* For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will have a `source_sentence` string and
|
|
7
|
+
* a `sentences` array of strings
|
|
8
|
+
*/
|
|
9
|
+
inputs: Record<string, unknown> | Record<string, unknown>[];
|
|
10
|
+
};
|
|
11
|
+
/**
|
|
12
|
+
* Returned values are a list of floats
|
|
13
|
+
*/
|
|
14
|
+
export type SentenceSimilarityOutput = number[];
|
|
15
|
+
/**
|
|
16
|
+
* Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings.
|
|
17
|
+
*/
|
|
18
|
+
export declare function sentenceSimilarity(args: SentenceSimilarityArgs, options?: Options): Promise<SentenceSimilarityOutput>;
|
|
19
|
+
//# sourceMappingURL=sentenceSimilarity.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sentenceSimilarity.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/sentenceSimilarity.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,sBAAsB,GAAG,QAAQ,GAAG;IAC/C;;;;;OAKG;IACH,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,EAAE,CAAC;CAC5D,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,wBAAwB,GAAG,MAAM,EAAE,CAAC;AAEhD;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,sBAAsB,EAC5B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,wBAAwB,CAAC,CAanC"}
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type SummarizationArgs = BaseArgs & {
|
|
3
|
+
/**
|
|
4
|
+
* A string to be summarized
|
|
5
|
+
*/
|
|
6
|
+
inputs: string;
|
|
7
|
+
parameters?: {
|
|
8
|
+
/**
|
|
9
|
+
* (Default: None). Integer to define the maximum length in tokens of the output summary.
|
|
10
|
+
*/
|
|
11
|
+
max_length?: number;
|
|
12
|
+
/**
|
|
13
|
+
* (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
|
|
14
|
+
*/
|
|
15
|
+
max_time?: number;
|
|
16
|
+
/**
|
|
17
|
+
* (Default: None). Integer to define the minimum length in tokens of the output summary.
|
|
18
|
+
*/
|
|
19
|
+
min_length?: number;
|
|
20
|
+
/**
|
|
21
|
+
* (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
|
|
22
|
+
*/
|
|
23
|
+
repetition_penalty?: number;
|
|
24
|
+
/**
|
|
25
|
+
* (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
|
|
26
|
+
*/
|
|
27
|
+
temperature?: number;
|
|
28
|
+
/**
|
|
29
|
+
* (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
|
|
30
|
+
*/
|
|
31
|
+
top_k?: number;
|
|
32
|
+
/**
|
|
33
|
+
* (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
|
|
34
|
+
*/
|
|
35
|
+
top_p?: number;
|
|
36
|
+
};
|
|
37
|
+
};
|
|
38
|
+
export interface SummarizationOutput {
|
|
39
|
+
/**
|
|
40
|
+
* The string after translation
|
|
41
|
+
*/
|
|
42
|
+
summary_text: string;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model.
|
|
46
|
+
*/
|
|
47
|
+
export declare function summarization(args: SummarizationArgs, options?: Options): Promise<SummarizationOutput>;
|
|
48
|
+
//# sourceMappingURL=summarization.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"summarization.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/summarization.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,iBAAiB,GAAG,QAAQ,GAAG;IAC1C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE;QACZ;;WAEG;QACH,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB;;WAEG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB;;WAEG;QACH,UAAU,CAAC,EAAE,MAAM,CAAC;QACpB;;WAEG;QACH,kBAAkB,CAAC,EAAE,MAAM,CAAC;QAC5B;;WAEG;QACH,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;QACf;;WAEG;QACH,KAAK,CAAC,EAAE,MAAM,CAAC;KACf,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,mBAAmB;IACnC;;OAEG;IACH,YAAY,EAAE,MAAM,CAAC;CACrB;AAED;;GAEG;AACH,wBAAsB,aAAa,CAAC,IAAI,EAAE,iBAAiB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,mBAAmB,CAAC,CAU5G"}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type TableQuestionAnsweringArgs = BaseArgs & {
|
|
3
|
+
inputs: {
|
|
4
|
+
/**
|
|
5
|
+
* The query in plain text that you want to ask the table
|
|
6
|
+
*/
|
|
7
|
+
query: string;
|
|
8
|
+
/**
|
|
9
|
+
* A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size.
|
|
10
|
+
*/
|
|
11
|
+
table: Record<string, string[]>;
|
|
12
|
+
};
|
|
13
|
+
};
|
|
14
|
+
export interface TableQuestionAnsweringOutput {
|
|
15
|
+
/**
|
|
16
|
+
* The aggregator used to get the answer
|
|
17
|
+
*/
|
|
18
|
+
aggregator: string;
|
|
19
|
+
/**
|
|
20
|
+
* The plaintext answer
|
|
21
|
+
*/
|
|
22
|
+
answer: string;
|
|
23
|
+
/**
|
|
24
|
+
* A list of coordinates of the cells contents
|
|
25
|
+
*/
|
|
26
|
+
cells: string[];
|
|
27
|
+
/**
|
|
28
|
+
* a list of coordinates of the cells referenced in the answer
|
|
29
|
+
*/
|
|
30
|
+
coordinates: number[][];
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq.
|
|
34
|
+
*/
|
|
35
|
+
export declare function tableQuestionAnswering(args: TableQuestionAnsweringArgs, options?: Options): Promise<TableQuestionAnsweringOutput>;
|
|
36
|
+
//# sourceMappingURL=tableQuestionAnswering.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tableQuestionAnswering.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/tableQuestionAnswering.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,0BAA0B,GAAG,QAAQ,GAAG;IACnD,MAAM,EAAE;QACP;;WAEG;QACH,KAAK,EAAE,MAAM,CAAC;QACd;;WAEG;QACH,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC;KAChC,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,4BAA4B;IAC5C;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IACnB;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,KAAK,EAAE,MAAM,EAAE,CAAC;IAChB;;OAEG;IACH,WAAW,EAAE,MAAM,EAAE,EAAE,CAAC;CACxB;AAED;;GAEG;AACH,wBAAsB,sBAAsB,CAC3C,IAAI,EAAE,0BAA0B,EAChC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,4BAA4B,CAAC,CAkBvC"}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type TextClassificationArgs = BaseArgs & {
|
|
3
|
+
/**
|
|
4
|
+
* A string to be classified
|
|
5
|
+
*/
|
|
6
|
+
inputs: string;
|
|
7
|
+
};
|
|
8
|
+
export type TextClassificationOutput = {
|
|
9
|
+
/**
|
|
10
|
+
* The label for the class (model specific)
|
|
11
|
+
*/
|
|
12
|
+
label: string;
|
|
13
|
+
/**
|
|
14
|
+
* A floats that represents how likely is that the text belongs to this class.
|
|
15
|
+
*/
|
|
16
|
+
score: number;
|
|
17
|
+
}[];
|
|
18
|
+
/**
|
|
19
|
+
* Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english
|
|
20
|
+
*/
|
|
21
|
+
export declare function textClassification(args: TextClassificationArgs, options?: Options): Promise<TextClassificationOutput>;
|
|
22
|
+
//# sourceMappingURL=textClassification.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"textClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/textClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,sBAAsB,GAAG,QAAQ,GAAG;IAC/C;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,wBAAwB,GAAG;IACtC;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;CACd,EAAE,CAAC;AAEJ;;GAEG;AACH,wBAAsB,kBAAkB,CACvC,IAAI,EAAE,sBAAsB,EAC5B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,wBAAwB,CAAC,CAanC"}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { TextGenerationInput, TextGenerationOutput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types";
|
|
3
|
+
export type { TextGenerationInput, TextGenerationOutput };
|
|
4
|
+
/**
|
|
5
|
+
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
|
|
6
|
+
*/
|
|
7
|
+
export declare function textGeneration(args: BaseArgs & TextGenerationInput, options?: Options): Promise<TextGenerationOutput>;
|
|
8
|
+
//# sourceMappingURL=textGeneration.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"textGeneration.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/textGeneration.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,MAAM,oBAAoB,CAAC;AAEpF,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,YAAY,EAAE,mBAAmB,EAAE,oBAAoB,EAAE,CAAC;AAE1D;;GAEG;AACH,wBAAsB,cAAc,CACnC,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,oBAAoB,CAAC,CAU/B"}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
import type { TextGenerationInput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types";
|
|
3
|
+
export interface TextGenerationStreamToken {
|
|
4
|
+
/** Token ID from the model tokenizer */
|
|
5
|
+
id: number;
|
|
6
|
+
/** Token text */
|
|
7
|
+
text: string;
|
|
8
|
+
/** Logprob */
|
|
9
|
+
logprob: number;
|
|
10
|
+
/**
|
|
11
|
+
* Is the token a special token
|
|
12
|
+
* Can be used to ignore tokens when concatenating
|
|
13
|
+
*/
|
|
14
|
+
special: boolean;
|
|
15
|
+
}
|
|
16
|
+
export interface TextGenerationStreamPrefillToken {
|
|
17
|
+
/** Token ID from the model tokenizer */
|
|
18
|
+
id: number;
|
|
19
|
+
/** Token text */
|
|
20
|
+
text: string;
|
|
21
|
+
/**
|
|
22
|
+
* Logprob
|
|
23
|
+
* Optional since the logprob of the first token cannot be computed
|
|
24
|
+
*/
|
|
25
|
+
logprob?: number;
|
|
26
|
+
}
|
|
27
|
+
export interface TextGenerationStreamBestOfSequence {
|
|
28
|
+
/** Generated text */
|
|
29
|
+
generated_text: string;
|
|
30
|
+
/** Generation finish reason */
|
|
31
|
+
finish_reason: TextGenerationStreamFinishReason;
|
|
32
|
+
/** Number of generated tokens */
|
|
33
|
+
generated_tokens: number;
|
|
34
|
+
/** Sampling seed if sampling was activated */
|
|
35
|
+
seed?: number;
|
|
36
|
+
/** Prompt tokens */
|
|
37
|
+
prefill: TextGenerationStreamPrefillToken[];
|
|
38
|
+
/** Generated tokens */
|
|
39
|
+
tokens: TextGenerationStreamToken[];
|
|
40
|
+
}
|
|
41
|
+
export type TextGenerationStreamFinishReason =
|
|
42
|
+
/** number of generated tokens == `max_new_tokens` */
|
|
43
|
+
"length"
|
|
44
|
+
/** the model generated its end of sequence token */
|
|
45
|
+
| "eos_token"
|
|
46
|
+
/** the model generated a text included in `stop_sequences` */
|
|
47
|
+
| "stop_sequence";
|
|
48
|
+
export interface TextGenerationStreamDetails {
|
|
49
|
+
/** Generation finish reason */
|
|
50
|
+
finish_reason: TextGenerationStreamFinishReason;
|
|
51
|
+
/** Number of generated tokens */
|
|
52
|
+
generated_tokens: number;
|
|
53
|
+
/** Sampling seed if sampling was activated */
|
|
54
|
+
seed?: number;
|
|
55
|
+
/** Prompt tokens */
|
|
56
|
+
prefill: TextGenerationStreamPrefillToken[];
|
|
57
|
+
/** */
|
|
58
|
+
tokens: TextGenerationStreamToken[];
|
|
59
|
+
/** Additional sequences when using the `best_of` parameter */
|
|
60
|
+
best_of_sequences?: TextGenerationStreamBestOfSequence[];
|
|
61
|
+
}
|
|
62
|
+
export interface TextGenerationStreamOutput {
|
|
63
|
+
index?: number;
|
|
64
|
+
/** Generated token, one at a time */
|
|
65
|
+
token: TextGenerationStreamToken;
|
|
66
|
+
/**
|
|
67
|
+
* Complete generated text
|
|
68
|
+
* Only available when the generation is finished
|
|
69
|
+
*/
|
|
70
|
+
generated_text: string | null;
|
|
71
|
+
/**
|
|
72
|
+
* Generation details
|
|
73
|
+
* Only available when the generation is finished
|
|
74
|
+
*/
|
|
75
|
+
details: TextGenerationStreamDetails | null;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
|
|
79
|
+
*/
|
|
80
|
+
export declare function textGenerationStream(args: BaseArgs & TextGenerationInput, options?: Options): AsyncGenerator<TextGenerationStreamOutput>;
|
|
81
|
+
//# sourceMappingURL=textGenerationStream.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"textGenerationStream.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/textGenerationStream.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAC9D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,WAAW,yBAAyB;IACzC,wCAAwC;IACxC,EAAE,EAAE,MAAM,CAAC;IACX,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAC;IACb,cAAc;IACd,OAAO,EAAE,MAAM,CAAC;IAChB;;;OAGG;IACH,OAAO,EAAE,OAAO,CAAC;CACjB;AAED,MAAM,WAAW,gCAAgC;IAChD,wCAAwC;IACxC,EAAE,EAAE,MAAM,CAAC;IACX,iBAAiB;IACjB,IAAI,EAAE,MAAM,CAAC;IACb;;;OAGG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,WAAW,kCAAkC;IAClD,qBAAqB;IACrB,cAAc,EAAE,MAAM,CAAC;IACvB,+BAA+B;IAC/B,aAAa,EAAE,gCAAgC,CAAC;IAChD,iCAAiC;IACjC,gBAAgB,EAAE,MAAM,CAAC;IACzB,8CAA8C;IAC9C,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,oBAAoB;IACpB,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,uBAAuB;IACvB,MAAM,EAAE,yBAAyB,EAAE,CAAC;CACpC;AAED,MAAM,MAAM,gCAAgC;AAC3C,qDAAqD;AACnD,QAAQ;AACV,oDAAoD;GAClD,WAAW;AACb,8DAA8D;GAC5D,eAAe,CAAC;AAEnB,MAAM,WAAW,2BAA2B;IAC3C,+BAA+B;IAC/B,aAAa,EAAE,gCAAgC,CAAC;IAChD,iCAAiC;IACjC,gBAAgB,EAAE,MAAM,CAAC;IACzB,8CAA8C;IAC9C,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,oBAAoB;IACpB,OAAO,EAAE,gCAAgC,EAAE,CAAC;IAC5C,MAAM;IACN,MAAM,EAAE,yBAAyB,EAAE,CAAC;IACpC,8DAA8D;IAC9D,iBAAiB,CAAC,EAAE,kCAAkC,EAAE,CAAC;CACzD;AAED,MAAM,WAAW,0BAA0B;IAC1C,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,qCAAqC;IACrC,KAAK,EAAE,yBAAyB,CAAC;IACjC;;;OAGG;IACH,cAAc,EAAE,MAAM,GAAG,IAAI,CAAC;IAC9B;;;OAGG;IACH,OAAO,EAAE,2BAA2B,GAAG,IAAI,CAAC;CAC5C;AAED;;GAEG;AACH,wBAAuB,oBAAoB,CAC1C,IAAI,EAAE,QAAQ,GAAG,mBAAmB,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,cAAc,CAAC,0BAA0B,CAAC,CAK5C"}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type TokenClassificationArgs = BaseArgs & {
|
|
3
|
+
/**
|
|
4
|
+
* A string to be classified
|
|
5
|
+
*/
|
|
6
|
+
inputs: string;
|
|
7
|
+
parameters?: {
|
|
8
|
+
/**
|
|
9
|
+
* (Default: simple). There are several aggregation strategies:
|
|
10
|
+
*
|
|
11
|
+
* none: Every token gets classified without further aggregation.
|
|
12
|
+
*
|
|
13
|
+
* simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar).
|
|
14
|
+
*
|
|
15
|
+
* first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity.
|
|
16
|
+
*
|
|
17
|
+
* average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied.
|
|
18
|
+
*
|
|
19
|
+
* max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score.
|
|
20
|
+
*/
|
|
21
|
+
aggregation_strategy?: "none" | "simple" | "first" | "average" | "max";
|
|
22
|
+
};
|
|
23
|
+
};
|
|
24
|
+
export interface TokenClassificationOutputValue {
|
|
25
|
+
/**
|
|
26
|
+
* The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times.
|
|
27
|
+
*/
|
|
28
|
+
end: number;
|
|
29
|
+
/**
|
|
30
|
+
* The type for the entity being recognized (model specific).
|
|
31
|
+
*/
|
|
32
|
+
entity_group: string;
|
|
33
|
+
/**
|
|
34
|
+
* How likely the entity was recognized.
|
|
35
|
+
*/
|
|
36
|
+
score: number;
|
|
37
|
+
/**
|
|
38
|
+
* The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times.
|
|
39
|
+
*/
|
|
40
|
+
start: number;
|
|
41
|
+
/**
|
|
42
|
+
* The string that was captured
|
|
43
|
+
*/
|
|
44
|
+
word: string;
|
|
45
|
+
}
|
|
46
|
+
export type TokenClassificationOutput = TokenClassificationOutputValue[];
|
|
47
|
+
/**
|
|
48
|
+
* Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english
|
|
49
|
+
*/
|
|
50
|
+
export declare function tokenClassification(args: TokenClassificationArgs, options?: Options): Promise<TokenClassificationOutput>;
|
|
51
|
+
//# sourceMappingURL=tokenClassification.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tokenClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/tokenClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG;IAChD;;OAEG;IACH,MAAM,EAAE,MAAM,CAAC;IACf,UAAU,CAAC,EAAE;QACZ;;;;;;;;;;;;WAYG;QACH,oBAAoB,CAAC,EAAE,MAAM,GAAG,QAAQ,GAAG,OAAO,GAAG,SAAS,GAAG,KAAK,CAAC;KACvE,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,8BAA8B;IAC9C;;OAEG;IACH,GAAG,EAAE,MAAM,CAAC;IACZ;;OAEG;IACH,YAAY,EAAE,MAAM,CAAC;IACrB;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;CACb;AAED,MAAM,MAAM,yBAAyB,GAAG,8BAA8B,EAAE,CAAC;AAEzE;;GAEG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAuBpC"}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type TranslationArgs = BaseArgs & {
|
|
3
|
+
/**
|
|
4
|
+
* A string to be translated
|
|
5
|
+
*/
|
|
6
|
+
inputs: string | string[];
|
|
7
|
+
};
|
|
8
|
+
export interface TranslationOutputValue {
|
|
9
|
+
/**
|
|
10
|
+
* The string after translation
|
|
11
|
+
*/
|
|
12
|
+
translation_text: string;
|
|
13
|
+
}
|
|
14
|
+
export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[];
|
|
15
|
+
/**
|
|
16
|
+
* This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
|
|
17
|
+
*/
|
|
18
|
+
export declare function translation(args: TranslationArgs, options?: Options): Promise<TranslationOutput>;
|
|
19
|
+
//# sourceMappingURL=translation.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"translation.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/translation.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,eAAe,GAAG,QAAQ,GAAG;IACxC;;OAEG;IACH,MAAM,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC1B,CAAC;AAEF,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,gBAAgB,EAAE,MAAM,CAAC;CACzB;AAED,MAAM,MAAM,iBAAiB,GAAG,sBAAsB,GAAG,sBAAsB,EAAE,CAAC;AAElF;;GAEG;AACH,wBAAsB,WAAW,CAAC,IAAI,EAAE,eAAe,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,iBAAiB,CAAC,CAUtG"}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type ZeroShotClassificationArgs = BaseArgs & {
|
|
3
|
+
/**
|
|
4
|
+
* a string or list of strings
|
|
5
|
+
*/
|
|
6
|
+
inputs: string | string[];
|
|
7
|
+
parameters: {
|
|
8
|
+
/**
|
|
9
|
+
* a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end.
|
|
10
|
+
*/
|
|
11
|
+
candidate_labels: string[];
|
|
12
|
+
/**
|
|
13
|
+
* (Default: false) Boolean that is set to True if classes can overlap
|
|
14
|
+
*/
|
|
15
|
+
multi_label?: boolean;
|
|
16
|
+
};
|
|
17
|
+
};
|
|
18
|
+
export interface ZeroShotClassificationOutputValue {
|
|
19
|
+
labels: string[];
|
|
20
|
+
scores: number[];
|
|
21
|
+
sequence: string;
|
|
22
|
+
}
|
|
23
|
+
export type ZeroShotClassificationOutput = ZeroShotClassificationOutputValue[];
|
|
24
|
+
/**
|
|
25
|
+
* This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli.
|
|
26
|
+
*/
|
|
27
|
+
export declare function zeroShotClassification(args: ZeroShotClassificationArgs, options?: Options): Promise<ZeroShotClassificationOutput>;
|
|
28
|
+
//# sourceMappingURL=zeroShotClassification.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"zeroShotClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/nlp/zeroShotClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAIrD,MAAM,MAAM,0BAA0B,GAAG,QAAQ,GAAG;IACnD;;OAEG;IACH,MAAM,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;IAC1B,UAAU,EAAE;QACX;;WAEG;QACH,gBAAgB,EAAE,MAAM,EAAE,CAAC;QAC3B;;WAEG;QACH,WAAW,CAAC,EAAE,OAAO,CAAC;KACtB,CAAC;CACF,CAAC;AAEF,MAAM,WAAW,iCAAiC;IACjD,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,MAAM,EAAE,MAAM,EAAE,CAAC;IACjB,QAAQ,EAAE,MAAM,CAAC;CACjB;AAED,MAAM,MAAM,4BAA4B,GAAG,iCAAiC,EAAE,CAAC;AAE/E;;GAEG;AACH,wBAAsB,sBAAsB,CAC3C,IAAI,EAAE,0BAA0B,EAChC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,4BAA4B,CAAC,CAqBvC"}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type TabularClassificationArgs = BaseArgs & {
|
|
3
|
+
inputs: {
|
|
4
|
+
/**
|
|
5
|
+
* A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size.
|
|
6
|
+
*/
|
|
7
|
+
data: Record<string, string[]>;
|
|
8
|
+
};
|
|
9
|
+
};
|
|
10
|
+
/**
|
|
11
|
+
* A list of predicted labels for each row
|
|
12
|
+
*/
|
|
13
|
+
export type TabularClassificationOutput = number[];
|
|
14
|
+
/**
|
|
15
|
+
* Predicts target label for a given set of features in tabular form.
|
|
16
|
+
* Typically, you will want to train a classification model on your training data and use it with your new data of the same format.
|
|
17
|
+
* Example model: vvmnnnkv/wine-quality
|
|
18
|
+
*/
|
|
19
|
+
export declare function tabularClassification(args: TabularClassificationArgs, options?: Options): Promise<TabularClassificationOutput>;
|
|
20
|
+
//# sourceMappingURL=tabularClassification.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tabularClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/tabular/tabularClassification.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,yBAAyB,GAAG,QAAQ,GAAG;IAClD,MAAM,EAAE;QACP;;WAEG;QACH,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC;KAC/B,CAAC;CACF,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,2BAA2B,GAAG,MAAM,EAAE,CAAC;AAEnD;;;;GAIG;AACH,wBAAsB,qBAAqB,CAC1C,IAAI,EAAE,yBAAyB,EAC/B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,2BAA2B,CAAC,CAUtC"}
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types";
|
|
2
|
+
export type TabularRegressionArgs = BaseArgs & {
|
|
3
|
+
inputs: {
|
|
4
|
+
/**
|
|
5
|
+
* A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size.
|
|
6
|
+
*/
|
|
7
|
+
data: Record<string, string[]>;
|
|
8
|
+
};
|
|
9
|
+
};
|
|
10
|
+
/**
|
|
11
|
+
* a list of predicted values for each row
|
|
12
|
+
*/
|
|
13
|
+
export type TabularRegressionOutput = number[];
|
|
14
|
+
/**
|
|
15
|
+
* Predicts target value for a given set of features in tabular form.
|
|
16
|
+
* Typically, you will want to train a regression model on your training data and use it with your new data of the same format.
|
|
17
|
+
* Example model: scikit-learn/Fish-Weight
|
|
18
|
+
*/
|
|
19
|
+
export declare function tabularRegression(args: TabularRegressionArgs, options?: Options): Promise<TabularRegressionOutput>;
|
|
20
|
+
//# sourceMappingURL=tabularRegression.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tabularRegression.d.ts","sourceRoot":"","sources":["../../../../src/tasks/tabular/tabularRegression.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,aAAa,CAAC;AAGrD,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG;IAC9C,MAAM,EAAE;QACP;;WAEG;QACH,IAAI,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,CAAC;KAC/B,CAAC;CACF,CAAC;AAEF;;GAEG;AACH,MAAM,MAAM,uBAAuB,GAAG,MAAM,EAAE,CAAC;AAE/C;;;;GAIG;AACH,wBAAsB,iBAAiB,CACtC,IAAI,EAAE,qBAAqB,EAC3B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,uBAAuB,CAAC,CAUlC"}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import type { PipelineType } from "@huggingface/tasks";
|
|
2
|
+
import type { ChatCompletionInput } from "@huggingface/tasks";
|
|
3
|
+
export interface Options {
|
|
4
|
+
/**
|
|
5
|
+
* (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
|
|
6
|
+
*/
|
|
7
|
+
retry_on_error?: boolean;
|
|
8
|
+
/**
|
|
9
|
+
* (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
|
|
10
|
+
*/
|
|
11
|
+
use_cache?: boolean;
|
|
12
|
+
/**
|
|
13
|
+
* (Default: false). Boolean. Do not load the model if it's not already available.
|
|
14
|
+
*/
|
|
15
|
+
dont_load_model?: boolean;
|
|
16
|
+
/**
|
|
17
|
+
* (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
|
|
18
|
+
*/
|
|
19
|
+
use_gpu?: boolean;
|
|
20
|
+
/**
|
|
21
|
+
* (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
|
|
22
|
+
*/
|
|
23
|
+
wait_for_model?: boolean;
|
|
24
|
+
/**
|
|
25
|
+
* Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
|
|
26
|
+
*/
|
|
27
|
+
fetch?: typeof fetch;
|
|
28
|
+
/**
|
|
29
|
+
* Abort Controller signal to use for request interruption.
|
|
30
|
+
*/
|
|
31
|
+
signal?: AbortSignal;
|
|
32
|
+
/**
|
|
33
|
+
* (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
|
|
34
|
+
*/
|
|
35
|
+
includeCredentials?: string | boolean;
|
|
36
|
+
}
|
|
37
|
+
export type InferenceTask = Exclude<PipelineType, "other">;
|
|
38
|
+
export interface BaseArgs {
|
|
39
|
+
/**
|
|
40
|
+
* The access token to use. Without it, you'll get rate-limited quickly.
|
|
41
|
+
*
|
|
42
|
+
* Can be created for free in hf.co/settings/token
|
|
43
|
+
*/
|
|
44
|
+
accessToken?: string;
|
|
45
|
+
/**
|
|
46
|
+
* The model to use.
|
|
47
|
+
*
|
|
48
|
+
* If not specified, will call huggingface.co/api/tasks to get the default model for the task.
|
|
49
|
+
*
|
|
50
|
+
* /!\ Legacy behavior allows this to be an URL, but this is deprecated and will be removed in the future.
|
|
51
|
+
* Use the `endpointUrl` parameter instead.
|
|
52
|
+
*/
|
|
53
|
+
model?: string;
|
|
54
|
+
/**
|
|
55
|
+
* The URL of the endpoint to use. If not specified, will call huggingface.co/api/tasks to get the default endpoint for the task.
|
|
56
|
+
*
|
|
57
|
+
* If specified, will use this URL instead of the default one.
|
|
58
|
+
*/
|
|
59
|
+
endpointUrl?: string;
|
|
60
|
+
}
|
|
61
|
+
export type RequestArgs = BaseArgs & ({
|
|
62
|
+
data: Blob | ArrayBuffer;
|
|
63
|
+
} | {
|
|
64
|
+
inputs: unknown;
|
|
65
|
+
} | ChatCompletionInput) & {
|
|
66
|
+
parameters?: Record<string, unknown>;
|
|
67
|
+
accessToken?: string;
|
|
68
|
+
};
|
|
69
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,oBAAoB,CAAC;AACvD,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAE9D,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IACpB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAC1B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;IACzB;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,KAAK,CAAC;IACrB;;OAEG;IACH,MAAM,CAAC,EAAE,WAAW,CAAC;IAErB;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;CACtC;AAED,MAAM,MAAM,aAAa,GAAG,OAAO,CAAC,YAAY,EAAE,OAAO,CAAC,CAAC;AAE3D,MAAM,WAAW,QAAQ;IACxB;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;;;;;;OAOG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IAEf;;;;OAIG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB;AAED,MAAM,MAAM,WAAW,GAAG,QAAQ,GACjC,CAAC;IAAE,IAAI,EAAE,IAAI,GAAG,WAAW,CAAA;CAAE,GAAG;IAAE,MAAM,EAAE,OAAO,CAAA;CAAE,GAAG,mBAAmB,CAAC,GAAG;IAC5E,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IACrC,WAAW,CAAC,EAAE,MAAM,CAAC;CACrB,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"base64FromBytes.d.ts","sourceRoot":"","sources":["../../../src/utils/base64FromBytes.ts"],"names":[],"mappings":"AAAA,wBAAgB,eAAe,CAAC,GAAG,EAAE,UAAU,GAAG,MAAM,CAUvD"}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This allows omitting keys from objects inside unions, without merging the individual components of the union.
|
|
3
|
+
*/
|
|
4
|
+
type Omit_<T, K> = Omit<T, Extract<keyof T, K>>;
|
|
5
|
+
export type DistributiveOmit<T, K> = T extends unknown ? keyof Omit_<T, K> extends never ? never : {
|
|
6
|
+
[P in keyof Omit_<T, K>]: Omit_<T, K>[P];
|
|
7
|
+
} : never;
|
|
8
|
+
export {};
|
|
9
|
+
//# sourceMappingURL=distributive-omit.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"distributive-omit.d.ts","sourceRoot":"","sources":["../../../src/utils/distributive-omit.ts"],"names":[],"mappings":"AAEA;;GAEG;AAEH,KAAK,KAAK,CAAC,CAAC,EAAE,CAAC,IAAI,IAAI,CAAC,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;AAEhD,MAAM,MAAM,gBAAgB,CAAC,CAAC,EAAE,CAAC,IAAI,CAAC,SAAS,OAAO,GACnD,MAAM,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,SAAS,KAAK,GAC9B,KAAK,GACL;KAAG,CAAC,IAAI,MAAM,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,GAAG,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,CAAC;CAAE,GAC7C,KAAK,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"isBackend.d.ts","sourceRoot":"","sources":["../../../src/utils/isBackend.ts"],"names":[],"mappings":"AAKA,eAAO,MAAM,SAAS,SAA6B,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"isFrontend.d.ts","sourceRoot":"","sources":["../../../src/utils/isFrontend.ts"],"names":[],"mappings":"AAEA,eAAO,MAAM,UAAU,SAAa,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"omit.d.ts","sourceRoot":"","sources":["../../../src/utils/omit.ts"],"names":[],"mappings":"AAGA;;GAEG;AACH,wBAAgB,IAAI,CAAC,CAAC,SAAS,MAAM,EAAE,CAAC,SAAS,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,GAAG,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC,CAI5G"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"pick.d.ts","sourceRoot":"","sources":["../../../src/utils/pick.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,wBAAgB,IAAI,CAAC,CAAC,EAAE,CAAC,SAAS,MAAM,CAAC,EAAE,CAAC,EAAE,CAAC,EAAE,KAAK,EAAE,CAAC,EAAE,GAAG,aAAa,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,CAS1F"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"toArray.d.ts","sourceRoot":"","sources":["../../../src/utils/toArray.ts"],"names":[],"mappings":"AAAA,wBAAgB,OAAO,CAAC,CAAC,EAAE,GAAG,EAAE,CAAC,GAAG,CAAC,SAAS,OAAO,EAAE,GAAG,CAAC,GAAG,CAAC,EAAE,CAKhE"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"typedInclude.d.ts","sourceRoot":"","sources":["../../../src/utils/typedInclude.ts"],"names":[],"mappings":"AAAA,wBAAgB,YAAY,CAAC,CAAC,EAAE,CAAC,SAAS,CAAC,EAAE,GAAG,EAAE,SAAS,CAAC,EAAE,EAAE,CAAC,EAAE,CAAC,GAAG,CAAC,IAAI,CAAC,CAE5E"}
|