@huggingface/tasks 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/README.md +1 -1
  2. package/dist/{index.mjs → index.cjs} +2695 -2497
  3. package/dist/index.d.ts +427 -65
  4. package/dist/index.js +2660 -2532
  5. package/package.json +13 -8
  6. package/src/index.ts +2 -5
  7. package/src/library-to-tasks.ts +1 -1
  8. package/src/model-data.ts +1 -1
  9. package/src/model-libraries-downloads.ts +20 -0
  10. package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +50 -296
  11. package/src/model-libraries.ts +375 -44
  12. package/src/pipelines.ts +1 -1
  13. package/src/tasks/audio-classification/about.md +1 -1
  14. package/src/tasks/audio-classification/inference.ts +51 -0
  15. package/src/tasks/audio-classification/spec/input.json +34 -0
  16. package/src/tasks/audio-classification/spec/output.json +10 -0
  17. package/src/tasks/audio-to-audio/about.md +1 -1
  18. package/src/tasks/automatic-speech-recognition/about.md +4 -2
  19. package/src/tasks/automatic-speech-recognition/inference.ts +159 -0
  20. package/src/tasks/automatic-speech-recognition/spec/input.json +34 -0
  21. package/src/tasks/automatic-speech-recognition/spec/output.json +38 -0
  22. package/src/tasks/common-definitions.json +117 -0
  23. package/src/tasks/depth-estimation/data.ts +8 -4
  24. package/src/tasks/depth-estimation/inference.ts +35 -0
  25. package/src/tasks/depth-estimation/spec/input.json +25 -0
  26. package/src/tasks/depth-estimation/spec/output.json +16 -0
  27. package/src/tasks/document-question-answering/inference.ts +110 -0
  28. package/src/tasks/document-question-answering/spec/input.json +85 -0
  29. package/src/tasks/document-question-answering/spec/output.json +36 -0
  30. package/src/tasks/feature-extraction/inference.ts +22 -0
  31. package/src/tasks/feature-extraction/spec/input.json +26 -0
  32. package/src/tasks/feature-extraction/spec/output.json +7 -0
  33. package/src/tasks/fill-mask/inference.ts +62 -0
  34. package/src/tasks/fill-mask/spec/input.json +38 -0
  35. package/src/tasks/fill-mask/spec/output.json +29 -0
  36. package/src/tasks/image-classification/inference.ts +51 -0
  37. package/src/tasks/image-classification/spec/input.json +34 -0
  38. package/src/tasks/image-classification/spec/output.json +10 -0
  39. package/src/tasks/image-segmentation/inference.ts +65 -0
  40. package/src/tasks/image-segmentation/spec/input.json +54 -0
  41. package/src/tasks/image-segmentation/spec/output.json +25 -0
  42. package/src/tasks/image-to-image/inference.ts +67 -0
  43. package/src/tasks/image-to-image/spec/input.json +54 -0
  44. package/src/tasks/image-to-image/spec/output.json +12 -0
  45. package/src/tasks/image-to-text/inference.ts +143 -0
  46. package/src/tasks/image-to-text/spec/input.json +34 -0
  47. package/src/tasks/image-to-text/spec/output.json +14 -0
  48. package/src/tasks/index.ts +5 -2
  49. package/src/tasks/mask-generation/about.md +65 -0
  50. package/src/tasks/mask-generation/data.ts +42 -5
  51. package/src/tasks/object-detection/inference.ts +62 -0
  52. package/src/tasks/object-detection/spec/input.json +30 -0
  53. package/src/tasks/object-detection/spec/output.json +46 -0
  54. package/src/tasks/placeholder/data.ts +3 -0
  55. package/src/tasks/placeholder/spec/input.json +35 -0
  56. package/src/tasks/placeholder/spec/output.json +17 -0
  57. package/src/tasks/question-answering/inference.ts +99 -0
  58. package/src/tasks/question-answering/spec/input.json +67 -0
  59. package/src/tasks/question-answering/spec/output.json +29 -0
  60. package/src/tasks/sentence-similarity/about.md +2 -2
  61. package/src/tasks/sentence-similarity/inference.ts +32 -0
  62. package/src/tasks/sentence-similarity/spec/input.json +40 -0
  63. package/src/tasks/sentence-similarity/spec/output.json +12 -0
  64. package/src/tasks/summarization/data.ts +1 -0
  65. package/src/tasks/summarization/inference.ts +59 -0
  66. package/src/tasks/summarization/spec/input.json +7 -0
  67. package/src/tasks/summarization/spec/output.json +7 -0
  68. package/src/tasks/table-question-answering/inference.ts +61 -0
  69. package/src/tasks/table-question-answering/spec/input.json +44 -0
  70. package/src/tasks/table-question-answering/spec/output.json +40 -0
  71. package/src/tasks/tabular-classification/about.md +1 -1
  72. package/src/tasks/tabular-regression/about.md +1 -1
  73. package/src/tasks/text-classification/about.md +1 -0
  74. package/src/tasks/text-classification/inference.ts +51 -0
  75. package/src/tasks/text-classification/spec/input.json +35 -0
  76. package/src/tasks/text-classification/spec/output.json +10 -0
  77. package/src/tasks/text-generation/about.md +24 -13
  78. package/src/tasks/text-generation/data.ts +22 -38
  79. package/src/tasks/text-generation/inference.ts +194 -0
  80. package/src/tasks/text-generation/spec/input.json +90 -0
  81. package/src/tasks/text-generation/spec/output.json +120 -0
  82. package/src/tasks/text-to-audio/inference.ts +143 -0
  83. package/src/tasks/text-to-audio/spec/input.json +31 -0
  84. package/src/tasks/text-to-audio/spec/output.json +17 -0
  85. package/src/tasks/text-to-image/about.md +11 -2
  86. package/src/tasks/text-to-image/data.ts +6 -2
  87. package/src/tasks/text-to-image/inference.ts +71 -0
  88. package/src/tasks/text-to-image/spec/input.json +59 -0
  89. package/src/tasks/text-to-image/spec/output.json +13 -0
  90. package/src/tasks/text-to-speech/about.md +4 -2
  91. package/src/tasks/text-to-speech/data.ts +1 -0
  92. package/src/tasks/text-to-speech/inference.ts +147 -0
  93. package/src/tasks/text-to-speech/spec/input.json +7 -0
  94. package/src/tasks/text-to-speech/spec/output.json +7 -0
  95. package/src/tasks/text2text-generation/inference.ts +55 -0
  96. package/src/tasks/text2text-generation/spec/input.json +55 -0
  97. package/src/tasks/text2text-generation/spec/output.json +14 -0
  98. package/src/tasks/token-classification/inference.ts +82 -0
  99. package/src/tasks/token-classification/spec/input.json +65 -0
  100. package/src/tasks/token-classification/spec/output.json +33 -0
  101. package/src/tasks/translation/data.ts +1 -0
  102. package/src/tasks/translation/inference.ts +59 -0
  103. package/src/tasks/translation/spec/input.json +7 -0
  104. package/src/tasks/translation/spec/output.json +7 -0
  105. package/src/tasks/video-classification/inference.ts +59 -0
  106. package/src/tasks/video-classification/spec/input.json +42 -0
  107. package/src/tasks/video-classification/spec/output.json +10 -0
  108. package/src/tasks/visual-question-answering/inference.ts +63 -0
  109. package/src/tasks/visual-question-answering/spec/input.json +41 -0
  110. package/src/tasks/visual-question-answering/spec/output.json +21 -0
  111. package/src/tasks/zero-shot-classification/inference.ts +67 -0
  112. package/src/tasks/zero-shot-classification/spec/input.json +50 -0
  113. package/src/tasks/zero-shot-classification/spec/output.json +10 -0
  114. package/src/tasks/zero-shot-image-classification/data.ts +8 -5
  115. package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
  116. package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
  117. package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
  118. package/src/tasks/zero-shot-object-detection/about.md +6 -0
  119. package/src/tasks/zero-shot-object-detection/data.ts +6 -1
  120. package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
  121. package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
  122. package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
  123. package/tsconfig.json +3 -3
@@ -0,0 +1,159 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+
7
+ /**
8
+ * Inputs for Automatic Speech Recognition inference
9
+ */
10
+ export interface AutomaticSpeechRecognitionInput {
11
+ /**
12
+ * The input audio data
13
+ */
14
+ inputs: unknown;
15
+ /**
16
+ * Additional inference parameters
17
+ */
18
+ parameters?: AutomaticSpeechRecognitionParameters;
19
+ [property: string]: unknown;
20
+ }
21
+
22
+ /**
23
+ * Additional inference parameters
24
+ *
25
+ * Additional inference parameters for Automatic Speech Recognition
26
+ */
27
+ export interface AutomaticSpeechRecognitionParameters {
28
+ /**
29
+ * Parametrization of the text generation process
30
+ */
31
+ generate?: GenerationParameters;
32
+ /**
33
+ * Whether to output corresponding timestamps with the generated text
34
+ */
35
+ return_timestamps?: boolean;
36
+ [property: string]: unknown;
37
+ }
38
+
39
+ /**
40
+ * Parametrization of the text generation process
41
+ *
42
+ * Ad-hoc parametrization of the text generation process
43
+ */
44
+ export interface GenerationParameters {
45
+ /**
46
+ * Whether to use sampling instead of greedy decoding when generating new tokens.
47
+ */
48
+ do_sample?: boolean;
49
+ /**
50
+ * Controls the stopping condition for beam-based methods.
51
+ */
52
+ early_stopping?: EarlyStoppingUnion;
53
+ /**
54
+ * If set to float strictly between 0 and 1, only tokens with a conditional probability
55
+ * greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
56
+ * 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
57
+ * Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
58
+ */
59
+ epsilon_cutoff?: number;
60
+ /**
61
+ * Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
62
+ * float strictly between 0 and 1, a token is only considered if it is greater than either
63
+ * eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
64
+ * term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
65
+ * the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
66
+ * See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
67
+ * for more details.
68
+ */
69
+ eta_cutoff?: number;
70
+ /**
71
+ * The maximum length (in tokens) of the generated text, including the input.
72
+ */
73
+ max_length?: number;
74
+ /**
75
+ * The maximum number of tokens to generate. Takes precedence over maxLength.
76
+ */
77
+ max_new_tokens?: number;
78
+ /**
79
+ * The minimum length (in tokens) of the generated text, including the input.
80
+ */
81
+ min_length?: number;
82
+ /**
83
+ * The minimum number of tokens to generate. Takes precedence over maxLength.
84
+ */
85
+ min_new_tokens?: number;
86
+ /**
87
+ * Number of groups to divide num_beams into in order to ensure diversity among different
88
+ * groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
89
+ */
90
+ num_beam_groups?: number;
91
+ /**
92
+ * Number of beams to use for beam search.
93
+ */
94
+ num_beams?: number;
95
+ /**
96
+ * The value balances the model confidence and the degeneration penalty in contrastive
97
+ * search decoding.
98
+ */
99
+ penalty_alpha?: number;
100
+ /**
101
+ * The value used to modulate the next token probabilities.
102
+ */
103
+ temperature?: number;
104
+ /**
105
+ * The number of highest probability vocabulary tokens to keep for top-k-filtering.
106
+ */
107
+ top_k?: number;
108
+ /**
109
+ * If set to float < 1, only the smallest set of most probable tokens with probabilities
110
+ * that add up to top_p or higher are kept for generation.
111
+ */
112
+ top_p?: number;
113
+ /**
114
+ * Local typicality measures how similar the conditional probability of predicting a target
115
+ * token next is to the expected conditional probability of predicting a random token next,
116
+ * given the partial text already generated. If set to float < 1, the smallest set of the
117
+ * most locally typical tokens with probabilities that add up to typical_p or higher are
118
+ * kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
119
+ */
120
+ typical_p?: number;
121
+ /**
122
+ * Whether the model should use the past last key/values attentions to speed up decoding
123
+ */
124
+ use_cache?: boolean;
125
+ [property: string]: unknown;
126
+ }
127
+
128
+ /**
129
+ * Controls the stopping condition for beam-based methods.
130
+ */
131
+ export type EarlyStoppingUnion = boolean | "never";
132
+
133
+ /**
134
+ * Outputs of inference for the Automatic Speech Recognition task
135
+ */
136
+ export interface AutomaticSpeechRecognitionOutput {
137
+ /**
138
+ * When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
139
+ * the model.
140
+ */
141
+ chunks?: AutomaticSpeechRecognitionOutputChunk[];
142
+ /**
143
+ * The recognized text.
144
+ */
145
+ text: string;
146
+ [property: string]: unknown;
147
+ }
148
+
149
+ export interface AutomaticSpeechRecognitionOutputChunk {
150
+ /**
151
+ * A chunk of text identified by the model
152
+ */
153
+ text: string;
154
+ /**
155
+ * The start and end timestamps corresponding with the text
156
+ */
157
+ timestamps: number[];
158
+ [property: string]: unknown;
159
+ }
@@ -0,0 +1,34 @@
1
+ {
2
+ "$id": "/inference/schemas/automatic-speech-recognition/input.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Inputs for Automatic Speech Recognition inference",
5
+ "title": "AutomaticSpeechRecognitionInput",
6
+ "type": "object",
7
+ "properties": {
8
+ "inputs": {
9
+ "description": "The input audio data"
10
+ },
11
+ "parameters": {
12
+ "description": "Additional inference parameters",
13
+ "$ref": "#/$defs/AutomaticSpeechRecognitionParameters"
14
+ }
15
+ },
16
+ "$defs": {
17
+ "AutomaticSpeechRecognitionParameters": {
18
+ "title": "AutomaticSpeechRecognitionParameters",
19
+ "description": "Additional inference parameters for Automatic Speech Recognition",
20
+ "type": "object",
21
+ "properties": {
22
+ "return_timestamps": {
23
+ "type": "boolean",
24
+ "description": "Whether to output corresponding timestamps with the generated text"
25
+ },
26
+ "generate": {
27
+ "description": "Parametrization of the text generation process",
28
+ "$ref": "/inference/schemas/common-definitions.json#/definitions/GenerationParameters"
29
+ }
30
+ }
31
+ }
32
+ },
33
+ "required": ["inputs"]
34
+ }
@@ -0,0 +1,38 @@
1
+ {
2
+ "$id": "/inference/schemas/automatic-speech-recognition/output.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Outputs of inference for the Automatic Speech Recognition task",
5
+ "title": "AutomaticSpeechRecognitionOutput",
6
+ "type": "object",
7
+ "properties": {
8
+ "text": {
9
+ "type": "string",
10
+ "description": "The recognized text."
11
+ },
12
+ "chunks": {
13
+ "type": "array",
14
+ "description": "When returnTimestamps is enabled, chunks contains a list of audio chunks identified by the model.",
15
+ "items": {
16
+ "type": "object",
17
+ "title": "AutomaticSpeechRecognitionOutputChunk",
18
+ "properties": {
19
+ "text": {
20
+ "type": "string",
21
+ "description": "A chunk of text identified by the model"
22
+ },
23
+ "timestamps": {
24
+ "type": "array",
25
+ "description": "The start and end timestamps corresponding with the text",
26
+ "items": {
27
+ "type": "number"
28
+ },
29
+ "minLength": 2,
30
+ "maxLength": 2
31
+ }
32
+ },
33
+ "required": ["text", "timestamps"]
34
+ }
35
+ }
36
+ },
37
+ "required": ["text"]
38
+ }
@@ -0,0 +1,117 @@
1
+ {
2
+ "$id": "/inference/schemas/common-definitions.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "(Incomplete!) Common type definitions shared by several tasks",
5
+ "definitions": {
6
+ "ClassificationOutputTransform": {
7
+ "title": "ClassificationOutputTransform",
8
+ "type": "string",
9
+ "description": "The function to apply to the model outputs in order to retrieve the scores.",
10
+ "oneOf": [
11
+ {
12
+ "const": "sigmoid"
13
+ },
14
+ {
15
+ "const": "softmax"
16
+ },
17
+ {
18
+ "const": "none"
19
+ }
20
+ ]
21
+ },
22
+ "ClassificationOutput": {
23
+ "title": "ClassificationOutput",
24
+ "type": "object",
25
+ "properties": {
26
+ "label": {
27
+ "type": "string",
28
+ "description": "The predicted class label."
29
+ },
30
+ "score": {
31
+ "type": "number",
32
+ "description": "The corresponding probability."
33
+ }
34
+ },
35
+ "required": ["label", "score"]
36
+ },
37
+ "GenerationParameters": {
38
+ "title": "GenerationParameters",
39
+ "description": "Ad-hoc parametrization of the text generation process",
40
+ "type": "object",
41
+ "properties": {
42
+ "temperature": {
43
+ "type": "number",
44
+ "description": "The value used to modulate the next token probabilities."
45
+ },
46
+ "top_k": {
47
+ "type": "integer",
48
+ "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering."
49
+ },
50
+ "top_p": {
51
+ "type": "number",
52
+ "description": "If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation."
53
+ },
54
+ "typical_p": {
55
+ "type": "number",
56
+ "description": " Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details."
57
+ },
58
+ "epsilon_cutoff": {
59
+ "type": "number",
60
+ "description": "If set to float strictly between 0 and 1, only tokens with a conditional probability greater than epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details."
61
+ },
62
+ "eta_cutoff": {
63
+ "type": "number",
64
+ "description": "Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) for more details."
65
+ },
66
+ "max_length": {
67
+ "type": "integer",
68
+ "description": "The maximum length (in tokens) of the generated text, including the input."
69
+ },
70
+ "max_new_tokens": {
71
+ "type": "integer",
72
+ "description": "The maximum number of tokens to generate. Takes precedence over maxLength."
73
+ },
74
+ "min_length": {
75
+ "type": "integer",
76
+ "description": "The minimum length (in tokens) of the generated text, including the input."
77
+ },
78
+ "min_new_tokens": {
79
+ "type": "integer",
80
+ "description": "The minimum number of tokens to generate. Takes precedence over maxLength."
81
+ },
82
+ "do_sample": {
83
+ "type": "boolean",
84
+ "description": "Whether to use sampling instead of greedy decoding when generating new tokens."
85
+ },
86
+ "early_stopping": {
87
+ "description": "Controls the stopping condition for beam-based methods.",
88
+ "oneOf": [
89
+ {
90
+ "type": "boolean"
91
+ },
92
+ {
93
+ "const": "never",
94
+ "type": "string"
95
+ }
96
+ ]
97
+ },
98
+ "num_beams": {
99
+ "type": "integer",
100
+ "description": "Number of beams to use for beam search."
101
+ },
102
+ "num_beam_groups": {
103
+ "type": "integer",
104
+ "description": "Number of groups to divide num_beams into in order to ensure diversity among different groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details."
105
+ },
106
+ "penalty_alpha": {
107
+ "type": "number",
108
+ "description": "The value balances the model confidence and the degeneration penalty in contrastive search decoding."
109
+ },
110
+ "use_cache": {
111
+ "type": "boolean",
112
+ "description": "Whether the model should use the past last key/values attentions to speed up decoding"
113
+ }
114
+ }
115
+ }
116
+ }
117
+ }
@@ -28,8 +28,8 @@ const taskData: TaskDataCustom = {
28
28
  id: "Intel/dpt-large",
29
29
  },
30
30
  {
31
- description: "Strong Depth Estimation model trained on the KITTI dataset.",
32
- id: "facebook/dpt-dinov2-large-kitti",
31
+ description: "Strong Depth Estimation model trained on a big compilation of datasets.",
32
+ id: "LiheYoung/depth-anything-large-hf",
33
33
  },
34
34
  {
35
35
  description: "A strong monocular depth estimation model.",
@@ -42,8 +42,12 @@ const taskData: TaskDataCustom = {
42
42
  id: "radames/dpt-depth-estimation-3d-voxels",
43
43
  },
44
44
  {
45
- description: "An application that can estimate the depth in a given image.",
46
- id: "keras-io/Monocular-Depth-Estimation",
45
+ description: "An application to compare the outputs of different depth estimation models.",
46
+ id: "LiheYoung/Depth-Anything",
47
+ },
48
+ {
49
+ description: "An application to try state-of-the-art depth estimation.",
50
+ id: "merve/compare_depth_models",
47
51
  },
48
52
  ],
49
53
  summary: "Depth estimation is the task of predicting depth of the objects present in an image.",
@@ -0,0 +1,35 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+
7
+ /**
8
+ * Inputs for Depth Estimation inference
9
+ */
10
+ export interface DepthEstimationInput {
11
+ /**
12
+ * The input image data
13
+ */
14
+ inputs: unknown;
15
+ /**
16
+ * Additional inference parameters
17
+ */
18
+ parameters?: { [key: string]: unknown };
19
+ [property: string]: unknown;
20
+ }
21
+
22
+ /**
23
+ * Outputs of inference for the Depth Estimation task
24
+ */
25
+ export interface DepthEstimationOutput {
26
+ /**
27
+ * The predicted depth as an image
28
+ */
29
+ depth?: unknown;
30
+ /**
31
+ * The predicted depth as a tensor
32
+ */
33
+ predicted_depth?: unknown;
34
+ [property: string]: unknown;
35
+ }
@@ -0,0 +1,25 @@
1
+ {
2
+ "$id": "/inference/schemas/depth-estimation/input.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Inputs for Depth Estimation inference",
5
+ "title": "DepthEstimationInput",
6
+ "type": "object",
7
+ "properties": {
8
+ "inputs": {
9
+ "description": "The input image data"
10
+ },
11
+ "parameters": {
12
+ "description": "Additional inference parameters",
13
+ "$ref": "#/$defs/DepthEstimationParameters"
14
+ }
15
+ },
16
+ "$defs": {
17
+ "DepthEstimationParameters": {
18
+ "title": "DepthEstimationParameters",
19
+ "description": "Additional inference parameters for Depth Estimation",
20
+ "type": "object",
21
+ "properties": {}
22
+ }
23
+ },
24
+ "required": ["inputs"]
25
+ }
@@ -0,0 +1,16 @@
1
+ {
2
+ "$id": "/inference/schemas/depth-estimation/output.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Outputs of inference for the Depth Estimation task",
5
+ "title": "DepthEstimationOutput",
6
+
7
+ "type": "object",
8
+ "properties": {
9
+ "predicted_depth": {
10
+ "description": "The predicted depth as a tensor"
11
+ },
12
+ "depth": {
13
+ "description": "The predicted depth as an image"
14
+ }
15
+ }
16
+ }
@@ -0,0 +1,110 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+ /**
7
+ * Inputs for Document Question Answering inference
8
+ */
9
+ export interface DocumentQuestionAnsweringInput {
10
+ /**
11
+ * One (document, question) pair to answer
12
+ */
13
+ inputs: DocumentQuestionAnsweringInputData;
14
+ /**
15
+ * Additional inference parameters
16
+ */
17
+ parameters?: DocumentQuestionAnsweringParameters;
18
+ [property: string]: unknown;
19
+ }
20
+ /**
21
+ * One (document, question) pair to answer
22
+ */
23
+ export interface DocumentQuestionAnsweringInputData {
24
+ /**
25
+ * The image on which the question is asked
26
+ */
27
+ image: unknown;
28
+ /**
29
+ * A question to ask of the document
30
+ */
31
+ question: string;
32
+ [property: string]: unknown;
33
+ }
34
+ /**
35
+ * Additional inference parameters
36
+ *
37
+ * Additional inference parameters for Document Question Answering
38
+ */
39
+ export interface DocumentQuestionAnsweringParameters {
40
+ /**
41
+ * If the words in the document are too long to fit with the question for the model, it will
42
+ * be split in several chunks with some overlap. This argument controls the size of that
43
+ * overlap.
44
+ */
45
+ doc_stride?: number;
46
+ /**
47
+ * Whether to accept impossible as an answer
48
+ */
49
+ handle_impossible_answer?: boolean;
50
+ /**
51
+ * Language to use while running OCR. Defaults to english.
52
+ */
53
+ lang?: string;
54
+ /**
55
+ * The maximum length of predicted answers (e.g., only answers with a shorter length are
56
+ * considered).
57
+ */
58
+ max_answer_len?: number;
59
+ /**
60
+ * The maximum length of the question after tokenization. It will be truncated if needed.
61
+ */
62
+ max_question_len?: number;
63
+ /**
64
+ * The maximum length of the total sentence (context + question) in tokens of each chunk
65
+ * passed to the model. The context will be split in several chunks (using doc_stride as
66
+ * overlap) if needed.
67
+ */
68
+ max_seq_len?: number;
69
+ /**
70
+ * The number of answers to return (will be chosen by order of likelihood). Can return less
71
+ * than top_k answers if there are not enough options available within the context.
72
+ */
73
+ top_k?: number;
74
+ /**
75
+ * A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
76
+ * skip the OCR step and use the provided bounding boxes instead.
77
+ */
78
+ word_boxes?: WordBox[];
79
+ [property: string]: unknown;
80
+ }
81
+ export type WordBox = number[] | string;
82
+ export type DocumentQuestionAnsweringOutput = DocumentQuestionAnsweringOutputElement[];
83
+ /**
84
+ * Outputs of inference for the Document Question Answering task
85
+ */
86
+ export interface DocumentQuestionAnsweringOutputElement {
87
+ /**
88
+ * The answer to the question.
89
+ */
90
+ answer: string;
91
+ /**
92
+ * The end word index of the answer (in the OCR’d version of the input or provided word
93
+ * boxes).
94
+ */
95
+ end: number;
96
+ /**
97
+ * The probability associated to the answer.
98
+ */
99
+ score: number;
100
+ /**
101
+ * The start word index of the answer (in the OCR’d version of the input or provided word
102
+ * boxes).
103
+ */
104
+ start: number;
105
+ /**
106
+ * The index of each word/box pair that is in the answer
107
+ */
108
+ words: number[];
109
+ [property: string]: unknown;
110
+ }
@@ -0,0 +1,85 @@
1
+ {
2
+ "$id": "/inference/schemas/document-question-answering/input.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Inputs for Document Question Answering inference",
5
+ "title": "DocumentQuestionAnsweringInput",
6
+ "type": "object",
7
+ "properties": {
8
+ "inputs": {
9
+ "description": "One (document, question) pair to answer",
10
+ "type": "object",
11
+ "title": "DocumentQuestionAnsweringInputData",
12
+ "properties": {
13
+ "image": {
14
+ "description": "The image on which the question is asked"
15
+ },
16
+ "question": {
17
+ "type": "string",
18
+ "description": "A question to ask of the document"
19
+ }
20
+ },
21
+ "required": ["image", "question"]
22
+ },
23
+ "parameters": {
24
+ "description": "Additional inference parameters",
25
+ "$ref": "#/$defs/DocumentQuestionAnsweringParameters"
26
+ }
27
+ },
28
+ "$defs": {
29
+ "DocumentQuestionAnsweringParameters": {
30
+ "title": "DocumentQuestionAnsweringParameters",
31
+ "description": "Additional inference parameters for Document Question Answering",
32
+ "type": "object",
33
+ "properties": {
34
+ "doc_stride": {
35
+ "type": "integer",
36
+ "description": "If the words in the document are too long to fit with the question for the model, it will be split in several chunks with some overlap. This argument controls the size of that overlap."
37
+ },
38
+ "handle_impossible_answer": {
39
+ "type": "boolean",
40
+ "description": "Whether to accept impossible as an answer"
41
+ },
42
+ "lang": {
43
+ "type": "string",
44
+ "description": "Language to use while running OCR. Defaults to english."
45
+ },
46
+ "max_answer_len": {
47
+ "type": "integer",
48
+ "description": "The maximum length of predicted answers (e.g., only answers with a shorter length are considered)."
49
+ },
50
+ "max_seq_len": {
51
+ "type": "integer",
52
+ "description": "The maximum length of the total sentence (context + question) in tokens of each chunk passed to the model. The context will be split in several chunks (using doc_stride as overlap) if needed."
53
+ },
54
+ "max_question_len": {
55
+ "type": "integer",
56
+ "description": "The maximum length of the question after tokenization. It will be truncated if needed."
57
+ },
58
+ "top_k": {
59
+ "type": "integer",
60
+ "description": "The number of answers to return (will be chosen by order of likelihood). Can return less than top_k answers if there are not enough options available within the context."
61
+ },
62
+ "word_boxes": {
63
+ "type": "array",
64
+ "description": "A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR step and use the provided bounding boxes instead.",
65
+ "items": {
66
+ "anyOf": [
67
+ {
68
+ "type": "string"
69
+ },
70
+ {
71
+ "type": "array",
72
+ "items": {
73
+ "type": "number"
74
+ },
75
+ "maxLength": 4,
76
+ "minLength": 4
77
+ }
78
+ ]
79
+ }
80
+ }
81
+ }
82
+ }
83
+ },
84
+ "required": ["inputs"]
85
+ }
@@ -0,0 +1,36 @@
1
+ {
2
+ "$id": "/inference/schemas/document-question-answering/output.json",
3
+ "$schema": "http://json-schema.org/draft-06/schema#",
4
+ "description": "Outputs of inference for the Document Question Answering task",
5
+ "title": "DocumentQuestionAnsweringOutput",
6
+ "type": "array",
7
+ "items": {
8
+ "type": "object",
9
+ "properties": {
10
+ "answer": {
11
+ "type": "string",
12
+ "description": "The answer to the question."
13
+ },
14
+ "score": {
15
+ "type": "number",
16
+ "description": "The probability associated to the answer."
17
+ },
18
+ "start": {
19
+ "type": "integer",
20
+ "description": "The start word index of the answer (in the OCR\u2019d version of the input or provided word boxes)."
21
+ },
22
+ "end": {
23
+ "type": "integer",
24
+ "description": "The end word index of the answer (in the OCR\u2019d version of the input or provided word boxes)."
25
+ },
26
+ "words": {
27
+ "type": "array",
28
+ "items": {
29
+ "type": "integer"
30
+ },
31
+ "description": "The index of each word/box pair that is in the answer"
32
+ }
33
+ },
34
+ "required": ["answer", "score", "start", "end", "words"]
35
+ }
36
+ }