@huggingface/tasks 0.2.0 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{index.mjs → index.cjs} +295 -134
- package/dist/index.d.ts +8 -6
- package/dist/index.js +260 -169
- package/package.json +13 -8
- package/src/library-to-tasks.ts +1 -1
- package/src/library-ui-elements.ts +24 -10
- package/src/model-data.ts +1 -1
- package/src/model-libraries.ts +3 -2
- package/src/pipelines.ts +1 -1
- package/src/tasks/audio-classification/about.md +1 -1
- package/src/tasks/audio-classification/inference.ts +51 -0
- package/src/tasks/audio-classification/spec/input.json +34 -0
- package/src/tasks/audio-classification/spec/output.json +21 -0
- package/src/tasks/audio-to-audio/about.md +1 -1
- package/src/tasks/automatic-speech-recognition/about.md +4 -2
- package/src/tasks/automatic-speech-recognition/inference.ts +154 -0
- package/src/tasks/automatic-speech-recognition/spec/input.json +34 -0
- package/src/tasks/automatic-speech-recognition/spec/output.json +36 -0
- package/src/tasks/common-definitions.json +109 -0
- package/src/tasks/depth-estimation/data.ts +8 -4
- package/src/tasks/depth-estimation/inference.ts +35 -0
- package/src/tasks/depth-estimation/spec/input.json +30 -0
- package/src/tasks/depth-estimation/spec/output.json +10 -0
- package/src/tasks/document-question-answering/inference.ts +102 -0
- package/src/tasks/document-question-answering/spec/input.json +85 -0
- package/src/tasks/document-question-answering/spec/output.json +36 -0
- package/src/tasks/feature-extraction/inference.ts +22 -0
- package/src/tasks/feature-extraction/spec/input.json +26 -0
- package/src/tasks/feature-extraction/spec/output.json +7 -0
- package/src/tasks/fill-mask/inference.ts +61 -0
- package/src/tasks/fill-mask/spec/input.json +38 -0
- package/src/tasks/fill-mask/spec/output.json +29 -0
- package/src/tasks/image-classification/inference.ts +51 -0
- package/src/tasks/image-classification/spec/input.json +34 -0
- package/src/tasks/image-classification/spec/output.json +10 -0
- package/src/tasks/image-segmentation/inference.ts +65 -0
- package/src/tasks/image-segmentation/spec/input.json +54 -0
- package/src/tasks/image-segmentation/spec/output.json +25 -0
- package/src/tasks/image-to-image/inference.ts +67 -0
- package/src/tasks/image-to-image/spec/input.json +52 -0
- package/src/tasks/image-to-image/spec/output.json +12 -0
- package/src/tasks/image-to-text/inference.ts +138 -0
- package/src/tasks/image-to-text/spec/input.json +34 -0
- package/src/tasks/image-to-text/spec/output.json +17 -0
- package/src/tasks/index.ts +5 -2
- package/src/tasks/mask-generation/about.md +65 -0
- package/src/tasks/mask-generation/data.ts +55 -0
- package/src/tasks/object-detection/inference.ts +62 -0
- package/src/tasks/object-detection/spec/input.json +30 -0
- package/src/tasks/object-detection/spec/output.json +46 -0
- package/src/tasks/placeholder/data.ts +3 -0
- package/src/tasks/placeholder/spec/input.json +35 -0
- package/src/tasks/placeholder/spec/output.json +17 -0
- package/src/tasks/question-answering/inference.ts +99 -0
- package/src/tasks/question-answering/spec/input.json +67 -0
- package/src/tasks/question-answering/spec/output.json +29 -0
- package/src/tasks/sentence-similarity/about.md +2 -2
- package/src/tasks/sentence-similarity/inference.ts +32 -0
- package/src/tasks/sentence-similarity/spec/input.json +40 -0
- package/src/tasks/sentence-similarity/spec/output.json +12 -0
- package/src/tasks/summarization/data.ts +1 -0
- package/src/tasks/summarization/inference.ts +58 -0
- package/src/tasks/summarization/spec/input.json +7 -0
- package/src/tasks/summarization/spec/output.json +7 -0
- package/src/tasks/table-question-answering/inference.ts +61 -0
- package/src/tasks/table-question-answering/spec/input.json +39 -0
- package/src/tasks/table-question-answering/spec/output.json +40 -0
- package/src/tasks/tabular-classification/about.md +1 -1
- package/src/tasks/tabular-regression/about.md +1 -1
- package/src/tasks/text-classification/about.md +1 -0
- package/src/tasks/text-classification/inference.ts +51 -0
- package/src/tasks/text-classification/spec/input.json +35 -0
- package/src/tasks/text-classification/spec/output.json +10 -0
- package/src/tasks/text-generation/about.md +24 -13
- package/src/tasks/text-generation/data.ts +22 -38
- package/src/tasks/text-generation/inference.ts +85 -0
- package/src/tasks/text-generation/spec/input.json +74 -0
- package/src/tasks/text-generation/spec/output.json +17 -0
- package/src/tasks/text-to-audio/inference.ts +138 -0
- package/src/tasks/text-to-audio/spec/input.json +31 -0
- package/src/tasks/text-to-audio/spec/output.json +20 -0
- package/src/tasks/text-to-image/about.md +11 -2
- package/src/tasks/text-to-image/data.ts +6 -2
- package/src/tasks/text-to-image/inference.ts +73 -0
- package/src/tasks/text-to-image/spec/input.json +57 -0
- package/src/tasks/text-to-image/spec/output.json +15 -0
- package/src/tasks/text-to-speech/about.md +4 -2
- package/src/tasks/text-to-speech/data.ts +1 -0
- package/src/tasks/text-to-speech/inference.ts +146 -0
- package/src/tasks/text-to-speech/spec/input.json +7 -0
- package/src/tasks/text-to-speech/spec/output.json +7 -0
- package/src/tasks/text2text-generation/inference.ts +53 -0
- package/src/tasks/text2text-generation/spec/input.json +55 -0
- package/src/tasks/text2text-generation/spec/output.json +17 -0
- package/src/tasks/token-classification/inference.ts +82 -0
- package/src/tasks/token-classification/spec/input.json +65 -0
- package/src/tasks/token-classification/spec/output.json +33 -0
- package/src/tasks/translation/data.ts +1 -0
- package/src/tasks/translation/inference.ts +58 -0
- package/src/tasks/translation/spec/input.json +7 -0
- package/src/tasks/translation/spec/output.json +7 -0
- package/src/tasks/video-classification/inference.ts +59 -0
- package/src/tasks/video-classification/spec/input.json +42 -0
- package/src/tasks/video-classification/spec/output.json +10 -0
- package/src/tasks/visual-question-answering/inference.ts +63 -0
- package/src/tasks/visual-question-answering/spec/input.json +41 -0
- package/src/tasks/visual-question-answering/spec/output.json +21 -0
- package/src/tasks/zero-shot-classification/inference.ts +67 -0
- package/src/tasks/zero-shot-classification/spec/input.json +50 -0
- package/src/tasks/zero-shot-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-image-classification/data.ts +8 -5
- package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
- package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
- package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
- package/src/tasks/zero-shot-object-detection/about.md +45 -0
- package/src/tasks/zero-shot-object-detection/data.ts +62 -0
- package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
- package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
- package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
- package/tsconfig.json +3 -3
|
@@ -28,8 +28,8 @@ const taskData: TaskDataCustom = {
|
|
|
28
28
|
id: "Intel/dpt-large",
|
|
29
29
|
},
|
|
30
30
|
{
|
|
31
|
-
description: "Strong Depth Estimation model trained on
|
|
32
|
-
id: "
|
|
31
|
+
description: "Strong Depth Estimation model trained on a big compilation of datasets.",
|
|
32
|
+
id: "LiheYoung/depth-anything-large-hf",
|
|
33
33
|
},
|
|
34
34
|
{
|
|
35
35
|
description: "A strong monocular depth estimation model.",
|
|
@@ -42,8 +42,12 @@ const taskData: TaskDataCustom = {
|
|
|
42
42
|
id: "radames/dpt-depth-estimation-3d-voxels",
|
|
43
43
|
},
|
|
44
44
|
{
|
|
45
|
-
description: "An application
|
|
46
|
-
id: "
|
|
45
|
+
description: "An application to compare the outputs of different depth estimation models.",
|
|
46
|
+
id: "LiheYoung/Depth-Anything",
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
description: "An application to try state-of-the-art depth estimation.",
|
|
50
|
+
id: "merve/compare_depth_models",
|
|
47
51
|
},
|
|
48
52
|
],
|
|
49
53
|
summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
export type DepthEstimationOutput = unknown[];
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Inputs for Depth Estimation inference
|
|
11
|
+
*/
|
|
12
|
+
export interface DepthEstimationInput {
|
|
13
|
+
/**
|
|
14
|
+
* The input image data
|
|
15
|
+
*/
|
|
16
|
+
data: unknown;
|
|
17
|
+
/**
|
|
18
|
+
* Additional inference parameters
|
|
19
|
+
*/
|
|
20
|
+
parameters?: DepthEstimationParameters;
|
|
21
|
+
[property: string]: unknown;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Additional inference parameters
|
|
26
|
+
*
|
|
27
|
+
* Additional inference parameters for Depth Estimation
|
|
28
|
+
*/
|
|
29
|
+
export interface DepthEstimationParameters {
|
|
30
|
+
/**
|
|
31
|
+
* When specified, limits the output to the top K most probable classes.
|
|
32
|
+
*/
|
|
33
|
+
topK?: number;
|
|
34
|
+
[property: string]: unknown;
|
|
35
|
+
}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/depth-estimation/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Depth Estimation inference",
|
|
5
|
+
"title": "DepthEstimationInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input image data"
|
|
10
|
+
},
|
|
11
|
+
"parameters": {
|
|
12
|
+
"description": "Additional inference parameters",
|
|
13
|
+
"$ref": "#/$defs/DepthEstimationParameters"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"$defs": {
|
|
17
|
+
"DepthEstimationParameters": {
|
|
18
|
+
"title": "DepthEstimationParameters",
|
|
19
|
+
"description": "Additional inference parameters for Depth Estimation",
|
|
20
|
+
"type": "object",
|
|
21
|
+
"properties": {
|
|
22
|
+
"topK": {
|
|
23
|
+
"type": "integer",
|
|
24
|
+
"description": "When specified, limits the output to the top K most probable classes."
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
},
|
|
29
|
+
"required": ["data"]
|
|
30
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/depth-estimation/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Depth Estimation task",
|
|
5
|
+
"title": "DepthEstimationOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"description": "The output depth labels"
|
|
9
|
+
}
|
|
10
|
+
}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Document Question Answering inference
|
|
8
|
+
*/
|
|
9
|
+
export interface DocumentQuestionAnsweringInput {
|
|
10
|
+
/**
|
|
11
|
+
* One (document, question) pair to answer
|
|
12
|
+
*/
|
|
13
|
+
data: DocumentQuestionAnsweringInputData;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: DocumentQuestionAnsweringParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* One (document, question) pair to answer
|
|
22
|
+
*/
|
|
23
|
+
export interface DocumentQuestionAnsweringInputData {
|
|
24
|
+
/**
|
|
25
|
+
* The image on which the question is asked
|
|
26
|
+
*/
|
|
27
|
+
image: unknown;
|
|
28
|
+
/**
|
|
29
|
+
* A question to ask of the document
|
|
30
|
+
*/
|
|
31
|
+
question: string;
|
|
32
|
+
[property: string]: unknown;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Additional inference parameters
|
|
36
|
+
*
|
|
37
|
+
* Additional inference parameters for Document Question Answering
|
|
38
|
+
*/
|
|
39
|
+
export interface DocumentQuestionAnsweringParameters {
|
|
40
|
+
/**
|
|
41
|
+
* If the words in the document are too long to fit with the question for the model, it will
|
|
42
|
+
* be split in several chunks with some overlap. This argument controls the size of that
|
|
43
|
+
* overlap.
|
|
44
|
+
*/
|
|
45
|
+
docStride?: number;
|
|
46
|
+
/**
|
|
47
|
+
* Whether to accept impossible as an answer
|
|
48
|
+
*/
|
|
49
|
+
handleImpossibleAnswer?: boolean;
|
|
50
|
+
/**
|
|
51
|
+
* Language to use while running OCR. Defaults to english.
|
|
52
|
+
*/
|
|
53
|
+
lang?: string;
|
|
54
|
+
/**
|
|
55
|
+
* The maximum length of predicted answers (e.g., only answers with a shorter length are
|
|
56
|
+
* considered).
|
|
57
|
+
*/
|
|
58
|
+
maxAnswerLen?: number;
|
|
59
|
+
/**
|
|
60
|
+
* The maximum length of the question after tokenization. It will be truncated if needed.
|
|
61
|
+
*/
|
|
62
|
+
maxQuestionLen?: number;
|
|
63
|
+
/**
|
|
64
|
+
* The maximum length of the total sentence (context + question) in tokens of each chunk
|
|
65
|
+
* passed to the model. The context will be split in several chunks (using doc_stride as
|
|
66
|
+
* overlap) if needed.
|
|
67
|
+
*/
|
|
68
|
+
maxSeqLen?: number;
|
|
69
|
+
/**
|
|
70
|
+
* The number of answers to return (will be chosen by order of likelihood). Can return less
|
|
71
|
+
* than top_k answers if there are not enough options available within the context.
|
|
72
|
+
*/
|
|
73
|
+
topK?: number;
|
|
74
|
+
/**
|
|
75
|
+
* A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
|
|
76
|
+
* skip the OCR step and use the provided bounding boxes instead.
|
|
77
|
+
*/
|
|
78
|
+
wordBoxes?: WordBox[];
|
|
79
|
+
[property: string]: unknown;
|
|
80
|
+
}
|
|
81
|
+
export type WordBox = number[] | string;
|
|
82
|
+
export type DocumentQuestionAnsweringOutput = DocumentQuestionAnsweringOutputElement[];
|
|
83
|
+
/**
|
|
84
|
+
* Outputs of inference for the Document Question Answering task
|
|
85
|
+
*/
|
|
86
|
+
export interface DocumentQuestionAnsweringOutputElement {
|
|
87
|
+
/**
|
|
88
|
+
* The answer to the question.
|
|
89
|
+
*/
|
|
90
|
+
answer: string;
|
|
91
|
+
end: number;
|
|
92
|
+
/**
|
|
93
|
+
* The probability associated to the answer.
|
|
94
|
+
*/
|
|
95
|
+
score: number;
|
|
96
|
+
start: number;
|
|
97
|
+
/**
|
|
98
|
+
* The index of each word/box pair that is in the answer
|
|
99
|
+
*/
|
|
100
|
+
words: number[];
|
|
101
|
+
[property: string]: unknown;
|
|
102
|
+
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/document-question-answering/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Document Question Answering inference",
|
|
5
|
+
"title": "DocumentQuestionAnsweringInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "One (document, question) pair to answer",
|
|
10
|
+
"type": "object",
|
|
11
|
+
"title": "DocumentQuestionAnsweringInputData",
|
|
12
|
+
"properties": {
|
|
13
|
+
"image": {
|
|
14
|
+
"description": "The image on which the question is asked"
|
|
15
|
+
},
|
|
16
|
+
"question": {
|
|
17
|
+
"type": "string",
|
|
18
|
+
"description": "A question to ask of the document"
|
|
19
|
+
}
|
|
20
|
+
},
|
|
21
|
+
"required": ["image", "question"]
|
|
22
|
+
},
|
|
23
|
+
"parameters": {
|
|
24
|
+
"description": "Additional inference parameters",
|
|
25
|
+
"$ref": "#/$defs/DocumentQuestionAnsweringParameters"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"$defs": {
|
|
29
|
+
"DocumentQuestionAnsweringParameters": {
|
|
30
|
+
"title": "DocumentQuestionAnsweringParameters",
|
|
31
|
+
"description": "Additional inference parameters for Document Question Answering",
|
|
32
|
+
"type": "object",
|
|
33
|
+
"properties": {
|
|
34
|
+
"docStride": {
|
|
35
|
+
"type": "integer",
|
|
36
|
+
"description": "If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap."
|
|
37
|
+
},
|
|
38
|
+
"handleImpossibleAnswer": {
|
|
39
|
+
"type": "boolean",
|
|
40
|
+
"description": "Whether to accept impossible as an answer"
|
|
41
|
+
},
|
|
42
|
+
"lang": {
|
|
43
|
+
"type": "string",
|
|
44
|
+
"description": "Language to use while running OCR. Defaults to english."
|
|
45
|
+
},
|
|
46
|
+
"maxAnswerLen": {
|
|
47
|
+
"type": "integer",
|
|
48
|
+
"description": "The maximum length of predicted answers (e.g., only answers with a shorter length are considered)."
|
|
49
|
+
},
|
|
50
|
+
"maxSeqLen": {
|
|
51
|
+
"type": "integer",
|
|
52
|
+
"description": "The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using doc_stride as overlap) if needed."
|
|
53
|
+
},
|
|
54
|
+
"maxQuestionLen": {
|
|
55
|
+
"type": "integer",
|
|
56
|
+
"description": "The maximum length of the question after tokenization. It will be truncated if needed."
|
|
57
|
+
},
|
|
58
|
+
"topK": {
|
|
59
|
+
"type": "integer",
|
|
60
|
+
"description": "The number of answers to return (will be chosen by order of likelihood). Can return less than top_k answers if there are not enough options available within the context."
|
|
61
|
+
},
|
|
62
|
+
"wordBoxes": {
|
|
63
|
+
"type": "array",
|
|
64
|
+
"description": "A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR step and use the provided bounding boxes instead.",
|
|
65
|
+
"items": {
|
|
66
|
+
"anyOf": [
|
|
67
|
+
{
|
|
68
|
+
"type": "string"
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
"type": "array",
|
|
72
|
+
"items": {
|
|
73
|
+
"type": "number"
|
|
74
|
+
},
|
|
75
|
+
"maxLength": 4,
|
|
76
|
+
"minLength": 4
|
|
77
|
+
}
|
|
78
|
+
]
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
},
|
|
84
|
+
"required": ["data"]
|
|
85
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/document-question-answering/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Document Question Answering task",
|
|
5
|
+
"title": "DocumentQuestionAnsweringOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"answer": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"description": "The answer to the question."
|
|
13
|
+
},
|
|
14
|
+
"score": {
|
|
15
|
+
"type": "number",
|
|
16
|
+
"description": "The probability associated to the answer."
|
|
17
|
+
},
|
|
18
|
+
"start": {
|
|
19
|
+
"type": "integer",
|
|
20
|
+
"descrtiption": "The start word index of the answer (in the OCR’d version of the input or provided word boxes)."
|
|
21
|
+
},
|
|
22
|
+
"end": {
|
|
23
|
+
"type": "integer",
|
|
24
|
+
"descrtiption": "The end word index of the answer (in the OCR’d version of the input or provided word boxes)."
|
|
25
|
+
},
|
|
26
|
+
"words": {
|
|
27
|
+
"type": "array",
|
|
28
|
+
"items": {
|
|
29
|
+
"type": "integer"
|
|
30
|
+
},
|
|
31
|
+
"description": "The index of each word/box pair that is in the answer"
|
|
32
|
+
}
|
|
33
|
+
},
|
|
34
|
+
"required": ["answer", "score", "start", "end", "words"]
|
|
35
|
+
}
|
|
36
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
export type FeatureExtractionOutput = unknown[];
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Inputs for Text Embedding inference
|
|
11
|
+
*/
|
|
12
|
+
export interface FeatureExtractionInput {
|
|
13
|
+
/**
|
|
14
|
+
* The text to get the embeddings of
|
|
15
|
+
*/
|
|
16
|
+
data: string;
|
|
17
|
+
/**
|
|
18
|
+
* Additional inference parameters
|
|
19
|
+
*/
|
|
20
|
+
parameters?: { [key: string]: unknown };
|
|
21
|
+
[property: string]: unknown;
|
|
22
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/feature-extraction/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Text Embedding inference",
|
|
5
|
+
"title": "FeatureExtractionInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The text to get the embeddings of",
|
|
10
|
+
"type": "string"
|
|
11
|
+
},
|
|
12
|
+
"parameters": {
|
|
13
|
+
"description": "Additional inference parameters",
|
|
14
|
+
"$ref": "#/$defs/FeatureExtractionParameters"
|
|
15
|
+
}
|
|
16
|
+
},
|
|
17
|
+
"$defs": {
|
|
18
|
+
"FeatureExtractionParameters": {
|
|
19
|
+
"title": "FeatureExtractionParameters",
|
|
20
|
+
"description": "Additional inference parameters for Feature Extraction",
|
|
21
|
+
"type": "object",
|
|
22
|
+
"properties": {}
|
|
23
|
+
}
|
|
24
|
+
},
|
|
25
|
+
"required": ["data"]
|
|
26
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/feature-extraction/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "The embedding for the input text, as a nested list (tensor) of floats",
|
|
5
|
+
"type": "array",
|
|
6
|
+
"title": "FeatureExtractionOutput"
|
|
7
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Fill Mask inference
|
|
8
|
+
*/
|
|
9
|
+
export interface FillMaskInput {
|
|
10
|
+
/**
|
|
11
|
+
* The text with masked tokens
|
|
12
|
+
*/
|
|
13
|
+
data: string;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: FillMaskParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Fill Mask
|
|
24
|
+
*/
|
|
25
|
+
export interface FillMaskParameters {
|
|
26
|
+
/**
|
|
27
|
+
* When passed, the model will limit the scores to the passed targets instead of looking up
|
|
28
|
+
* in the whole vocabulary. If the provided targets are not in the model vocab, they will be
|
|
29
|
+
* tokenized and the first resulting token will be used (with a warning, and that might be
|
|
30
|
+
* slower).
|
|
31
|
+
*/
|
|
32
|
+
targets?: string[];
|
|
33
|
+
/**
|
|
34
|
+
* When passed, overrides the number of predictions to return.
|
|
35
|
+
*/
|
|
36
|
+
topK?: number;
|
|
37
|
+
[property: string]: unknown;
|
|
38
|
+
}
|
|
39
|
+
export type FillMaskOutput = FillMaskOutputElement[];
|
|
40
|
+
/**
|
|
41
|
+
* Outputs of inference for the Fill Mask task
|
|
42
|
+
*/
|
|
43
|
+
export interface FillMaskOutputElement {
|
|
44
|
+
/**
|
|
45
|
+
* The corresponding probability
|
|
46
|
+
*/
|
|
47
|
+
score: number;
|
|
48
|
+
/**
|
|
49
|
+
* The corresponding input with the mask token prediction.
|
|
50
|
+
*/
|
|
51
|
+
sequence: string;
|
|
52
|
+
/**
|
|
53
|
+
* The predicted token id (to replace the masked one).
|
|
54
|
+
*/
|
|
55
|
+
token: number;
|
|
56
|
+
/**
|
|
57
|
+
* The predicted token (to replace the masked one).
|
|
58
|
+
*/
|
|
59
|
+
tokenStr: string;
|
|
60
|
+
[property: string]: unknown;
|
|
61
|
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/fill-mask/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Fill Mask inference",
|
|
5
|
+
"title": "FillMaskInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The text with masked tokens",
|
|
10
|
+
"type": "string"
|
|
11
|
+
},
|
|
12
|
+
"parameters": {
|
|
13
|
+
"description": "Additional inference parameters",
|
|
14
|
+
"$ref": "#/$defs/FillMaskParameters"
|
|
15
|
+
}
|
|
16
|
+
},
|
|
17
|
+
"$defs": {
|
|
18
|
+
"FillMaskParameters": {
|
|
19
|
+
"title": "FillMaskParameters",
|
|
20
|
+
"description": "Additional inference parameters for Fill Mask",
|
|
21
|
+
"type": "object",
|
|
22
|
+
"properties": {
|
|
23
|
+
"topK": {
|
|
24
|
+
"type": "integer",
|
|
25
|
+
"description": "When passed, overrides the number of predictions to return."
|
|
26
|
+
},
|
|
27
|
+
"targets": {
|
|
28
|
+
"description": "When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocabulary. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower).",
|
|
29
|
+
"type": "array",
|
|
30
|
+
"items": {
|
|
31
|
+
"type": "string"
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
},
|
|
37
|
+
"required": ["data"]
|
|
38
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/fill-mask/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Fill Mask task",
|
|
5
|
+
"title": "FillMaskOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"type": "object",
|
|
9
|
+
"properties": {
|
|
10
|
+
"sequence": {
|
|
11
|
+
"type": "string",
|
|
12
|
+
"description": "The corresponding input with the mask token prediction."
|
|
13
|
+
},
|
|
14
|
+
"score": {
|
|
15
|
+
"type": "number",
|
|
16
|
+
"description": "The corresponding probability"
|
|
17
|
+
},
|
|
18
|
+
"token": {
|
|
19
|
+
"type": "integer",
|
|
20
|
+
"description": "The predicted token id (to replace the masked one)."
|
|
21
|
+
},
|
|
22
|
+
"tokenStr": {
|
|
23
|
+
"type": "string",
|
|
24
|
+
"description": "The predicted token (to replace the masked one)."
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
"required": ["sequence", "score", "token", "tokenStr"]
|
|
28
|
+
}
|
|
29
|
+
}
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Image Classification inference
|
|
8
|
+
*/
|
|
9
|
+
export interface ImageClassificationInput {
|
|
10
|
+
/**
|
|
11
|
+
* The input image data
|
|
12
|
+
*/
|
|
13
|
+
data: unknown;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: ImageClassificationParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Image Classification
|
|
24
|
+
*/
|
|
25
|
+
export interface ImageClassificationParameters {
|
|
26
|
+
functionToApply?: ClassificationOutputTransform;
|
|
27
|
+
/**
|
|
28
|
+
* When specified, limits the output to the top K most probable classes.
|
|
29
|
+
*/
|
|
30
|
+
topK?: number;
|
|
31
|
+
[property: string]: unknown;
|
|
32
|
+
}
|
|
33
|
+
/**
|
|
34
|
+
* The function to apply to the model outputs in order to retrieve the scores.
|
|
35
|
+
*/
|
|
36
|
+
export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none";
|
|
37
|
+
export type ImageClassificationOutput = ImageClassificationOutputElement[];
|
|
38
|
+
/**
|
|
39
|
+
* Outputs of inference for the Image Classification task
|
|
40
|
+
*/
|
|
41
|
+
export interface ImageClassificationOutputElement {
|
|
42
|
+
/**
|
|
43
|
+
* The predicted class label.
|
|
44
|
+
*/
|
|
45
|
+
label: string;
|
|
46
|
+
/**
|
|
47
|
+
* The corresponding probability.
|
|
48
|
+
*/
|
|
49
|
+
score: number;
|
|
50
|
+
[property: string]: unknown;
|
|
51
|
+
}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/image-classification/input.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Inputs for Image Classification inference",
|
|
5
|
+
"title": "ImageClassificationInput",
|
|
6
|
+
"type": "object",
|
|
7
|
+
"properties": {
|
|
8
|
+
"data": {
|
|
9
|
+
"description": "The input image data"
|
|
10
|
+
},
|
|
11
|
+
"parameters": {
|
|
12
|
+
"description": "Additional inference parameters",
|
|
13
|
+
"$ref": "#/$defs/ImageClassificationParameters"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"$defs": {
|
|
17
|
+
"ImageClassificationParameters": {
|
|
18
|
+
"title": "ImageClassificationParameters",
|
|
19
|
+
"description": "Additional inference parameters for Image Classification",
|
|
20
|
+
"type": "object",
|
|
21
|
+
"properties": {
|
|
22
|
+
"functionToApply": {
|
|
23
|
+
"title": "ImageClassificationOutputTransform",
|
|
24
|
+
"$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutputTransform"
|
|
25
|
+
},
|
|
26
|
+
"topK": {
|
|
27
|
+
"type": "integer",
|
|
28
|
+
"description": "When specified, limits the output to the top K most probable classes."
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"required": ["data"]
|
|
34
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
{
|
|
2
|
+
"$id": "/inference/schemas/image-classification/output.json",
|
|
3
|
+
"$schema": "http://json-schema.org/draft-06/schema#",
|
|
4
|
+
"description": "Outputs of inference for the Image Classification task",
|
|
5
|
+
"title": "ImageClassificationOutput",
|
|
6
|
+
"type": "array",
|
|
7
|
+
"items": {
|
|
8
|
+
"$ref": "/inference/schemas/common-definitions.json#/definitions/ClassificationOutput"
|
|
9
|
+
}
|
|
10
|
+
}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Inference code generated from the JSON schema spec in ./spec
|
|
3
|
+
*
|
|
4
|
+
* Using src/scripts/inference-codegen
|
|
5
|
+
*/
|
|
6
|
+
/**
|
|
7
|
+
* Inputs for Image Segmentation inference
|
|
8
|
+
*/
|
|
9
|
+
export interface ImageSegmentationInput {
|
|
10
|
+
/**
|
|
11
|
+
* The input image data
|
|
12
|
+
*/
|
|
13
|
+
data: unknown;
|
|
14
|
+
/**
|
|
15
|
+
* Additional inference parameters
|
|
16
|
+
*/
|
|
17
|
+
parameters?: ImageSegmentationParameters;
|
|
18
|
+
[property: string]: unknown;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Additional inference parameters
|
|
22
|
+
*
|
|
23
|
+
* Additional inference parameters for Image Segmentation
|
|
24
|
+
*/
|
|
25
|
+
export interface ImageSegmentationParameters {
|
|
26
|
+
/**
|
|
27
|
+
* Threshold to use when turning the predicted masks into binary values.
|
|
28
|
+
*/
|
|
29
|
+
maskThreshold?: number;
|
|
30
|
+
/**
|
|
31
|
+
* Mask overlap threshold to eliminate small, disconnected segments.
|
|
32
|
+
*/
|
|
33
|
+
overlapMaskAreaThreshold?: number;
|
|
34
|
+
/**
|
|
35
|
+
* Segmentation task to be performed, depending on model capabilities.
|
|
36
|
+
*/
|
|
37
|
+
subtask?: ImageSegmentationSubtask;
|
|
38
|
+
/**
|
|
39
|
+
* Probability threshold to filter out predicted masks.
|
|
40
|
+
*/
|
|
41
|
+
threshold?: number;
|
|
42
|
+
[property: string]: unknown;
|
|
43
|
+
}
|
|
44
|
+
export type ImageSegmentationSubtask = "instance" | "panoptic" | "semantic";
|
|
45
|
+
export type ImageSegmentationOutput = ImageSegmentationOutputElement[];
|
|
46
|
+
/**
|
|
47
|
+
* Outputs of inference for the Image Segmentation task
|
|
48
|
+
*
|
|
49
|
+
* A predicted mask / segment
|
|
50
|
+
*/
|
|
51
|
+
export interface ImageSegmentationOutputElement {
|
|
52
|
+
/**
|
|
53
|
+
* The label of the predicted segment
|
|
54
|
+
*/
|
|
55
|
+
label: string;
|
|
56
|
+
/**
|
|
57
|
+
* The corresponding mask as a black-and-white image
|
|
58
|
+
*/
|
|
59
|
+
mask: unknown;
|
|
60
|
+
/**
|
|
61
|
+
* The score or confidence degreee the model has
|
|
62
|
+
*/
|
|
63
|
+
score?: number;
|
|
64
|
+
[property: string]: unknown;
|
|
65
|
+
}
|