@huggingface/tasks 0.2.2 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/README.md +1 -1
  2. package/dist/index.cjs +3144 -3085
  3. package/dist/index.d.ts +441 -74
  4. package/dist/index.js +3143 -3084
  5. package/package.json +1 -1
  6. package/src/index.ts +2 -5
  7. package/src/library-to-tasks.ts +1 -1
  8. package/src/model-libraries-downloads.ts +20 -0
  9. package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +46 -292
  10. package/src/model-libraries.ts +375 -44
  11. package/src/pipelines.ts +14 -8
  12. package/src/tasks/audio-classification/inference.ts +4 -4
  13. package/src/tasks/audio-classification/spec/input.json +4 -4
  14. package/src/tasks/audio-classification/spec/output.json +1 -12
  15. package/src/tasks/automatic-speech-recognition/inference.ts +35 -30
  16. package/src/tasks/automatic-speech-recognition/spec/input.json +3 -3
  17. package/src/tasks/automatic-speech-recognition/spec/output.json +30 -28
  18. package/src/tasks/common-definitions.json +25 -17
  19. package/src/tasks/depth-estimation/inference.ts +10 -10
  20. package/src/tasks/depth-estimation/spec/input.json +3 -8
  21. package/src/tasks/depth-estimation/spec/output.json +9 -3
  22. package/src/tasks/document-question-answering/inference.ts +16 -8
  23. package/src/tasks/document-question-answering/spec/input.json +9 -9
  24. package/src/tasks/document-question-answering/spec/output.json +2 -2
  25. package/src/tasks/feature-extraction/inference.ts +1 -1
  26. package/src/tasks/feature-extraction/spec/input.json +2 -2
  27. package/src/tasks/fill-mask/inference.ts +4 -3
  28. package/src/tasks/fill-mask/spec/input.json +3 -3
  29. package/src/tasks/fill-mask/spec/output.json +1 -1
  30. package/src/tasks/image-classification/inference.ts +3 -3
  31. package/src/tasks/image-classification/spec/input.json +4 -4
  32. package/src/tasks/image-segmentation/inference.ts +3 -3
  33. package/src/tasks/image-segmentation/spec/input.json +4 -4
  34. package/src/tasks/image-to-image/inference.ts +5 -5
  35. package/src/tasks/image-to-image/spec/input.json +9 -7
  36. package/src/tasks/image-to-text/inference.ts +25 -20
  37. package/src/tasks/image-to-text/spec/input.json +3 -3
  38. package/src/tasks/image-to-text/spec/output.json +8 -11
  39. package/src/tasks/index.ts +2 -0
  40. package/src/tasks/object-detection/inference.ts +1 -1
  41. package/src/tasks/object-detection/spec/input.json +2 -2
  42. package/src/tasks/placeholder/spec/input.json +4 -4
  43. package/src/tasks/placeholder/spec/output.json +1 -1
  44. package/src/tasks/question-answering/inference.ts +8 -8
  45. package/src/tasks/question-answering/spec/input.json +9 -9
  46. package/src/tasks/sentence-similarity/inference.ts +1 -1
  47. package/src/tasks/sentence-similarity/spec/input.json +2 -2
  48. package/src/tasks/summarization/inference.ts +5 -4
  49. package/src/tasks/table-question-answering/inference.ts +1 -1
  50. package/src/tasks/table-question-answering/spec/input.json +8 -3
  51. package/src/tasks/text-classification/inference.ts +3 -3
  52. package/src/tasks/text-classification/spec/input.json +4 -4
  53. package/src/tasks/text-generation/inference.ts +123 -14
  54. package/src/tasks/text-generation/spec/input.json +28 -12
  55. package/src/tasks/text-generation/spec/output.json +112 -9
  56. package/src/tasks/text-to-audio/inference.ts +24 -19
  57. package/src/tasks/text-to-audio/spec/input.json +2 -2
  58. package/src/tasks/text-to-audio/spec/output.json +10 -13
  59. package/src/tasks/text-to-image/inference.ts +6 -8
  60. package/src/tasks/text-to-image/spec/input.json +9 -7
  61. package/src/tasks/text-to-image/spec/output.json +7 -9
  62. package/src/tasks/text-to-speech/inference.ts +18 -17
  63. package/src/tasks/text2text-generation/inference.ts +10 -8
  64. package/src/tasks/text2text-generation/spec/input.json +4 -4
  65. package/src/tasks/text2text-generation/spec/output.json +8 -11
  66. package/src/tasks/token-classification/inference.ts +4 -4
  67. package/src/tasks/token-classification/spec/input.json +4 -4
  68. package/src/tasks/token-classification/spec/output.json +1 -1
  69. package/src/tasks/translation/inference.ts +5 -4
  70. package/src/tasks/video-classification/inference.ts +5 -5
  71. package/src/tasks/video-classification/spec/input.json +6 -6
  72. package/src/tasks/visual-question-answering/inference.ts +2 -2
  73. package/src/tasks/visual-question-answering/spec/input.json +3 -3
  74. package/src/tasks/zero-shot-classification/inference.ts +3 -3
  75. package/src/tasks/zero-shot-classification/spec/input.json +4 -4
  76. package/src/tasks/zero-shot-image-classification/inference.ts +2 -2
  77. package/src/tasks/zero-shot-image-classification/spec/input.json +3 -3
  78. package/src/tasks/zero-shot-object-detection/inference.ts +1 -1
  79. package/src/tasks/zero-shot-object-detection/spec/input.json +2 -2
@@ -3,6 +3,7 @@
3
3
  *
4
4
  * Using src/scripts/inference-codegen
5
5
  */
6
+
6
7
  /**
7
8
  * Inputs for Automatic Speech Recognition inference
8
9
  */
@@ -10,13 +11,14 @@ export interface AutomaticSpeechRecognitionInput {
10
11
  /**
11
12
  * The input audio data
12
13
  */
13
- data: unknown;
14
+ inputs: unknown;
14
15
  /**
15
16
  * Additional inference parameters
16
17
  */
17
18
  parameters?: AutomaticSpeechRecognitionParameters;
18
19
  [property: string]: unknown;
19
20
  }
21
+
20
22
  /**
21
23
  * Additional inference parameters
22
24
  *
@@ -30,9 +32,10 @@ export interface AutomaticSpeechRecognitionParameters {
30
32
  /**
31
33
  * Whether to output corresponding timestamps with the generated text
32
34
  */
33
- returnTimestamps?: boolean;
35
+ return_timestamps?: boolean;
34
36
  [property: string]: unknown;
35
37
  }
38
+
36
39
  /**
37
40
  * Parametrization of the text generation process
38
41
  *
@@ -42,18 +45,18 @@ export interface GenerationParameters {
42
45
  /**
43
46
  * Whether to use sampling instead of greedy decoding when generating new tokens.
44
47
  */
45
- doSample?: boolean;
48
+ do_sample?: boolean;
46
49
  /**
47
50
  * Controls the stopping condition for beam-based methods.
48
51
  */
49
- earlyStopping?: EarlyStoppingUnion;
52
+ early_stopping?: EarlyStoppingUnion;
50
53
  /**
51
54
  * If set to float strictly between 0 and 1, only tokens with a conditional probability
52
55
  * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
53
56
  * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
54
57
  * Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
55
58
  */
56
- epsilonCutoff?: number;
59
+ epsilon_cutoff?: number;
57
60
  /**
58
61
  * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
59
62
  * float strictly between 0 and 1, a token is only considered if it is greater than either
@@ -63,37 +66,37 @@ export interface GenerationParameters {
63
66
  * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
64
67
  * for more details.
65
68
  */
66
- etaCutoff?: number;
69
+ eta_cutoff?: number;
67
70
  /**
68
71
  * The maximum length (in tokens) of the generated text, including the input.
69
72
  */
70
- maxLength?: number;
73
+ max_length?: number;
71
74
  /**
72
75
  * The maximum number of tokens to generate. Takes precedence over maxLength.
73
76
  */
74
- maxNewTokens?: number;
77
+ max_new_tokens?: number;
75
78
  /**
76
79
  * The minimum length (in tokens) of the generated text, including the input.
77
80
  */
78
- minLength?: number;
81
+ min_length?: number;
79
82
  /**
80
83
  * The minimum number of tokens to generate. Takes precedence over maxLength.
81
84
  */
82
- minNewTokens?: number;
85
+ min_new_tokens?: number;
83
86
  /**
84
87
  * Number of groups to divide num_beams into in order to ensure diversity among different
85
88
  * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
86
89
  */
87
- numBeamGroups?: number;
90
+ num_beam_groups?: number;
88
91
  /**
89
92
  * Number of beams to use for beam search.
90
93
  */
91
- numBeams?: number;
94
+ num_beams?: number;
92
95
  /**
93
96
  * The value balances the model confidence and the degeneration penalty in contrastive
94
97
  * search decoding.
95
98
  */
96
- penaltyAlpha?: number;
99
+ penalty_alpha?: number;
97
100
  /**
98
101
  * The value used to modulate the next token probabilities.
99
102
  */
@@ -101,12 +104,12 @@ export interface GenerationParameters {
101
104
  /**
102
105
  * The number of highest probability vocabulary tokens to keep for top-k-filtering.
103
106
  */
104
- topK?: number;
107
+ top_k?: number;
105
108
  /**
106
109
  * If set to float < 1, only the smallest set of most probable tokens with probabilities
107
110
  * that add up to top_p or higher are kept for generation.
108
111
  */
109
- topP?: number;
112
+ top_p?: number;
110
113
  /**
111
114
  * Local typicality measures how similar the conditional probability of predicting a target
112
115
  * token next is to the expected conditional probability of predicting a random token next,
@@ -114,33 +117,23 @@ export interface GenerationParameters {
114
117
  * most locally typical tokens with probabilities that add up to typical_p or higher are
115
118
  * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
116
119
  */
117
- typicalP?: number;
120
+ typical_p?: number;
118
121
  /**
119
122
  * Whether the model should use the past last key/values attentions to speed up decoding
120
123
  */
121
- useCache?: boolean;
124
+ use_cache?: boolean;
122
125
  [property: string]: unknown;
123
126
  }
127
+
124
128
  /**
125
129
  * Controls the stopping condition for beam-based methods.
126
130
  */
127
131
  export type EarlyStoppingUnion = boolean | "never";
128
- export interface AutomaticSpeechRecognitionOutputChunk {
129
- /**
130
- * A chunk of text identified by the model
131
- */
132
- text: string;
133
- /**
134
- * The start and end timestamps corresponding with the text
135
- */
136
- timestamps: number[];
137
- [property: string]: unknown;
138
- }
139
- export type AutomaticSpeechRecognitionOutput = AutomaticSpeechRecognitionOutputElement[];
132
+
140
133
  /**
141
134
  * Outputs of inference for the Automatic Speech Recognition task
142
135
  */
143
- export interface AutomaticSpeechRecognitionOutputElement {
136
+ export interface AutomaticSpeechRecognitionOutput {
144
137
  /**
145
138
  * When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
146
139
  * the model.
@@ -152,3 +145,15 @@ export interface AutomaticSpeechRecognitionOutputElement {
152
145
  text: string;
153
146
  [property: string]: unknown;
154
147
  }
148
+
149
+ export interface AutomaticSpeechRecognitionOutputChunk {
150
+ /**
151
+ * A chunk of text identified by the model
152
+ */
153
+ text: string;
154
+ /**
155
+ * The start and end timestamps corresponding with the text
156
+ */
157
+ timestamps: number[];
158
+ [property: string]: unknown;
159
+ }
@@ -5,7 +5,7 @@
5
5
  "title": "AutomaticSpeechRecognitionInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "The input audio data"
10
10
  },
11
11
  "parameters": {
@@ -19,7 +19,7 @@
19
19
  "description": "Additional inference parameters for Automatic Speech Recognition",
20
20
  "type": "object",
21
21
  "properties": {
22
- "returnTimestamps": {
22
+ "return_timestamps": {
23
23
  "type": "boolean",
24
24
  "description": "Whether to output corresponding timestamps with the generated text"
25
25
  },
@@ -30,5 +30,5 @@
30
30
  }
31
31
  }
32
32
  },
33
- "required": ["data"]
33
+ "required": ["inputs"]
34
34
  }
@@ -3,34 +3,36 @@
3
3
  "$schema": "http://json-schema.org/draft-06/schema#",
4
4
  "description": "Outputs of inference for the Automatic Speech Recognition task",
5
5
  "title": "AutomaticSpeechRecognitionOutput",
6
- "type": "array",
7
- "items": {
8
- "type": "object",
9
- "properties": {
10
- "text": {
11
- "type": "string",
12
- "description": "The recognized text."
13
- },
14
- "chunks": {
15
- "type": "array",
16
- "description": "When returnTimestamps is enabled, chunks contains a list of audio chunks identified by the model.",
17
- "items": {
18
- "type": "object",
19
- "title": "AutomaticSpeechRecognitionOutputChunk",
20
- "properties": {
21
- "text": { "type": "string", "description": "A chunk of text identified by the model" },
22
- "timestamps": {
23
- "type": "array",
24
- "description": "The start and end timestamps corresponding with the text",
25
- "items": { "type": "number" },
26
- "minLength": 2,
27
- "maxLength": 2
28
- }
6
+ "type": "object",
7
+ "properties": {
8
+ "text": {
9
+ "type": "string",
10
+ "description": "The recognized text."
11
+ },
12
+ "chunks": {
13
+ "type": "array",
14
+ "description": "When returnTimestamps is enabled, chunks contains a list of audio chunks identified by the model.",
15
+ "items": {
16
+ "type": "object",
17
+ "title": "AutomaticSpeechRecognitionOutputChunk",
18
+ "properties": {
19
+ "text": {
20
+ "type": "string",
21
+ "description": "A chunk of text identified by the model"
29
22
  },
30
- "required": ["text", "timestamps"]
31
- }
23
+ "timestamps": {
24
+ "type": "array",
25
+ "description": "The start and end timestamps corresponding with the text",
26
+ "items": {
27
+ "type": "number"
28
+ },
29
+ "minLength": 2,
30
+ "maxLength": 2
31
+ }
32
+ },
33
+ "required": ["text", "timestamps"]
32
34
  }
33
- },
34
- "required": ["text"]
35
- }
35
+ }
36
+ },
37
+ "required": ["text"]
36
38
  }
@@ -43,63 +43,71 @@
43
43
  "type": "number",
44
44
  "description": "The value used to modulate the next token probabilities."
45
45
  },
46
- "topK": {
46
+ "top_k": {
47
47
  "type": "integer",
48
48
  "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
49
49
  },
50
- "topP": {
50
+ "top_p": {
51
51
  "type": "number",
52
52
  "description": "If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation."
53
53
  },
54
- "typicalP": {
54
+ "typical_p": {
55
55
  "type": "number",
56
56
  "description": " Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details."
57
57
  },
58
- "epsilonCutoff": {
58
+ "epsilon_cutoff": {
59
59
  "type": "number",
60
60
  "description": "If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details."
61
61
  },
62
- "etaCutoff": {
62
+ "eta_cutoff": {
63
63
  "type": "number",
64
64
  "description": "Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details."
65
65
  },
66
- "maxLength": {
66
+ "max_length": {
67
67
  "type": "integer",
68
68
  "description": "The maximum length (in tokens) of the generated text, including the input."
69
69
  },
70
- "maxNewTokens": {
70
+ "max_new_tokens": {
71
71
  "type": "integer",
72
72
  "description": "The maximum number of tokens to generate. Takes precedence over maxLength."
73
73
  },
74
- "minLength": {
74
+ "min_length": {
75
75
  "type": "integer",
76
76
  "description": "The minimum length (in tokens) of the generated text, including the input."
77
77
  },
78
- "minNewTokens": {
78
+ "min_new_tokens": {
79
79
  "type": "integer",
80
80
  "description": "The minimum number of tokens to generate. Takes precedence over maxLength."
81
81
  },
82
- "doSample": {
82
+ "do_sample": {
83
83
  "type": "boolean",
84
84
  "description": "Whether to use sampling instead of greedy decoding when generating new tokens."
85
85
  },
86
- "earlyStopping": {
86
+ "early_stopping": {
87
87
  "description": "Controls the stopping condition for beam-based methods.",
88
- "oneOf": [{ "type": "boolean" }, { "const": "never", "type": "string" }]
89
- },
90
- "numBeams": {
88
+ "oneOf": [
89
+ {
90
+ "type": "boolean"
91
+ },
92
+ {
93
+ "const": "never",
94
+ "type": "string"
95
+ }
96
+ ]
97
+ },
98
+ "num_beams": {
91
99
  "type": "integer",
92
100
  "description": "Number of beams to use for beam search."
93
101
  },
94
- "numBeamGroups": {
102
+ "num_beam_groups": {
95
103
  "type": "integer",
96
104
  "description": "Number of groups to divide num_beams into in order to ensure diversity among different groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details."
97
105
  },
98
- "penaltyAlpha": {
106
+ "penalty_alpha": {
99
107
  "type": "number",
100
108
  "description": "The value balances the model confidence and the degeneration penalty in contrastive search decoding."
101
109
  },
102
- "useCache": {
110
+ "use_cache": {
103
111
  "type": "boolean",
104
112
  "description": "Whether the model should use the past last key/values attentions to speed up decoding"
105
113
  }
@@ -4,8 +4,6 @@
4
4
  * Using src/scripts/inference-codegen
5
5
  */
6
6
 
7
- export type DepthEstimationOutput = unknown[];
8
-
9
7
  /**
10
8
  * Inputs for Depth Estimation inference
11
9
  */
@@ -13,23 +11,25 @@ export interface DepthEstimationInput {
13
11
  /**
14
12
  * The input image data
15
13
  */
16
- data: unknown;
14
+ inputs: unknown;
17
15
  /**
18
16
  * Additional inference parameters
19
17
  */
20
- parameters?: DepthEstimationParameters;
18
+ parameters?: { [key: string]: unknown };
21
19
  [property: string]: unknown;
22
20
  }
23
21
 
24
22
  /**
25
- * Additional inference parameters
26
- *
27
- * Additional inference parameters for Depth Estimation
23
+ * Outputs of inference for the Depth Estimation task
28
24
  */
29
- export interface DepthEstimationParameters {
25
+ export interface DepthEstimationOutput {
26
+ /**
27
+ * The predicted depth as an image
28
+ */
29
+ depth?: unknown;
30
30
  /**
31
- * When specified, limits the output to the top K most probable classes.
31
+ * The predicted depth as a tensor
32
32
  */
33
- topK?: number;
33
+ predicted_depth?: unknown;
34
34
  [property: string]: unknown;
35
35
  }
@@ -5,7 +5,7 @@
5
5
  "title": "DepthEstimationInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "The input image data"
10
10
  },
11
11
  "parameters": {
@@ -18,13 +18,8 @@
18
18
  "title": "DepthEstimationParameters",
19
19
  "description": "Additional inference parameters for Depth Estimation",
20
20
  "type": "object",
21
- "properties": {
22
- "topK": {
23
- "type": "integer",
24
- "description": "When specified, limits the output to the top K most probable classes."
25
- }
26
- }
21
+ "properties": {}
27
22
  }
28
23
  },
29
- "required": ["data"]
24
+ "required": ["inputs"]
30
25
  }
@@ -3,8 +3,14 @@
3
3
  "$schema": "http://json-schema.org/draft-06/schema#",
4
4
  "description": "Outputs of inference for the Depth Estimation task",
5
5
  "title": "DepthEstimationOutput",
6
- "type": "array",
7
- "items": {
8
- "description": "The output depth labels"
6
+
7
+ "type": "object",
8
+ "properties": {
9
+ "predicted_depth": {
10
+ "description": "The predicted depth as a tensor"
11
+ },
12
+ "depth": {
13
+ "description": "The predicted depth as an image"
14
+ }
9
15
  }
10
16
  }
@@ -10,7 +10,7 @@ export interface DocumentQuestionAnsweringInput {
10
10
  /**
11
11
  * One (document, question) pair to answer
12
12
  */
13
- data: DocumentQuestionAnsweringInputData;
13
+ inputs: DocumentQuestionAnsweringInputData;
14
14
  /**
15
15
  * Additional inference parameters
16
16
  */
@@ -42,11 +42,11 @@ export interface DocumentQuestionAnsweringParameters {
42
42
  * be split in several chunks with some overlap. This argument controls the size of that
43
43
  * overlap.
44
44
  */
45
- docStride?: number;
45
+ doc_stride?: number;
46
46
  /**
47
47
  * Whether to accept impossible as an answer
48
48
  */
49
- handleImpossibleAnswer?: boolean;
49
+ handle_impossible_answer?: boolean;
50
50
  /**
51
51
  * Language to use while running OCR. Defaults to english.
52
52
  */
@@ -55,27 +55,27 @@ export interface DocumentQuestionAnsweringParameters {
55
55
  * The maximum length of predicted answers (e.g., only answers with a shorter length are
56
56
  * considered).
57
57
  */
58
- maxAnswerLen?: number;
58
+ max_answer_len?: number;
59
59
  /**
60
60
  * The maximum length of the question after tokenization. It will be truncated if needed.
61
61
  */
62
- maxQuestionLen?: number;
62
+ max_question_len?: number;
63
63
  /**
64
64
  * The maximum length of the total sentence (context + question) in tokens of each chunk
65
65
  * passed to the model. The context will be split in several chunks (using doc_stride as
66
66
  * overlap) if needed.
67
67
  */
68
- maxSeqLen?: number;
68
+ max_seq_len?: number;
69
69
  /**
70
70
  * The number of answers to return (will be chosen by order of likelihood). Can return less
71
71
  * than top_k answers if there are not enough options available within the context.
72
72
  */
73
- topK?: number;
73
+ top_k?: number;
74
74
  /**
75
75
  * A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
76
76
  * skip the OCR step and use the provided bounding boxes instead.
77
77
  */
78
- wordBoxes?: WordBox[];
78
+ word_boxes?: WordBox[];
79
79
  [property: string]: unknown;
80
80
  }
81
81
  export type WordBox = number[] | string;
@@ -88,11 +88,19 @@ export interface DocumentQuestionAnsweringOutputElement {
88
88
  * The answer to the question.
89
89
  */
90
90
  answer: string;
91
+ /**
92
+ * The end word index of the answer (in the OCR’d version of the input or provided word
93
+ * boxes).
94
+ */
91
95
  end: number;
92
96
  /**
93
97
  * The probability associated to the answer.
94
98
  */
95
99
  score: number;
100
+ /**
101
+ * The start word index of the answer (in the OCR’d version of the input or provided word
102
+ * boxes).
103
+ */
96
104
  start: number;
97
105
  /**
98
106
  * The index of each word/box pair that is in the answer
@@ -5,7 +5,7 @@
5
5
  "title": "DocumentQuestionAnsweringInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "One (document, question) pair to answer",
10
10
  "type": "object",
11
11
  "title": "DocumentQuestionAnsweringInputData",
@@ -31,11 +31,11 @@
31
31
  "description": "Additional inference parameters for Document Question Answering",
32
32
  "type": "object",
33
33
  "properties": {
34
- "docStride": {
34
+ "doc_stride": {
35
35
  "type": "integer",
36
36
  "description": "If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap."
37
37
  },
38
- "handleImpossibleAnswer": {
38
+ "handle_impossible_answer": {
39
39
  "type": "boolean",
40
40
  "description": "Whether to accept impossible as an answer"
41
41
  },
@@ -43,23 +43,23 @@
43
43
  "type": "string",
44
44
  "description": "Language to use while running OCR. Defaults to english."
45
45
  },
46
- "maxAnswerLen": {
46
+ "max_answer_len": {
47
47
  "type": "integer",
48
48
  "description": "The maximum length of predicted answers (e.g., only answers with a shorter length are considered)."
49
49
  },
50
- "maxSeqLen": {
50
+ "max_seq_len": {
51
51
  "type": "integer",
52
52
  "description": "The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using doc_stride as overlap) if needed."
53
53
  },
54
- "maxQuestionLen": {
54
+ "max_question_len": {
55
55
  "type": "integer",
56
56
  "description": "The maximum length of the question after tokenization. It will be truncated if needed."
57
57
  },
58
- "topK": {
58
+ "top_k": {
59
59
  "type": "integer",
60
60
  "description": "The number of answers to return (will be chosen by order of likelihood). Can return less than top_k answers if there are not enough options available within the context."
61
61
  },
62
- "wordBoxes": {
62
+ "word_boxes": {
63
63
  "type": "array",
64
64
  "description": "A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR step and use the provided bounding boxes instead.",
65
65
  "items": {
@@ -81,5 +81,5 @@
81
81
  }
82
82
  }
83
83
  },
84
- "required": ["data"]
84
+ "required": ["inputs"]
85
85
  }
@@ -17,11 +17,11 @@
17
17
  },
18
18
  "start": {
19
19
  "type": "integer",
20
- "descrtiption": "The start word index of the answer (in the OCR’d version of the input or provided word boxes)."
20
+ "description": "The start word index of the answer (in the OCR\u2019d version of the input or provided word boxes)."
21
21
  },
22
22
  "end": {
23
23
  "type": "integer",
24
- "descrtiption": "The end word index of the answer (in the OCR’d version of the input or provided word boxes)."
24
+ "description": "The end word index of the answer (in the OCR\u2019d version of the input or provided word boxes)."
25
25
  },
26
26
  "words": {
27
27
  "type": "array",
@@ -13,7 +13,7 @@ export interface FeatureExtractionInput {
13
13
  /**
14
14
  * The text to get the embeddings of
15
15
  */
16
- data: string;
16
+ inputs: string;
17
17
  /**
18
18
  * Additional inference parameters
19
19
  */
@@ -5,7 +5,7 @@
5
5
  "title": "FeatureExtractionInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "The text to get the embeddings of",
10
10
  "type": "string"
11
11
  },
@@ -22,5 +22,5 @@
22
22
  "properties": {}
23
23
  }
24
24
  },
25
- "required": ["data"]
25
+ "required": ["inputs"]
26
26
  }
@@ -10,7 +10,7 @@ export interface FillMaskInput {
10
10
  /**
11
11
  * The text with masked tokens
12
12
  */
13
- data: string;
13
+ inputs: string;
14
14
  /**
15
15
  * Additional inference parameters
16
16
  */
@@ -33,7 +33,7 @@ export interface FillMaskParameters {
33
33
  /**
34
34
  * When passed, overrides the number of predictions to return.
35
35
  */
36
- topK?: number;
36
+ top_k?: number;
37
37
  [property: string]: unknown;
38
38
  }
39
39
  export type FillMaskOutput = FillMaskOutputElement[];
@@ -53,9 +53,10 @@ export interface FillMaskOutputElement {
53
53
  * The predicted token id (to replace the masked one).
54
54
  */
55
55
  token: number;
56
+ tokenStr: unknown;
56
57
  /**
57
58
  * The predicted token (to replace the masked one).
58
59
  */
59
- tokenStr: string;
60
+ token_str?: string;
60
61
  [property: string]: unknown;
61
62
  }
@@ -5,7 +5,7 @@
5
5
  "title": "FillMaskInput",
6
6
  "type": "object",
7
7
  "properties": {
8
- "data": {
8
+ "inputs": {
9
9
  "description": "The text with masked tokens",
10
10
  "type": "string"
11
11
  },
@@ -20,7 +20,7 @@
20
20
  "description": "Additional inference parameters for Fill Mask",
21
21
  "type": "object",
22
22
  "properties": {
23
- "topK": {
23
+ "top_k": {
24
24
  "type": "integer",
25
25
  "description": "When passed, overrides the number of predictions to return."
26
26
  },
@@ -34,5 +34,5 @@
34
34
  }
35
35
  }
36
36
  },
37
- "required": ["data"]
37
+ "required": ["inputs"]
38
38
  }