huggingface-hub 0.21.4__py3-none-any.whl → 0.22.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +217 -1
- huggingface_hub/_commit_api.py +14 -15
- huggingface_hub/_inference_endpoints.py +12 -11
- huggingface_hub/_login.py +1 -0
- huggingface_hub/_multi_commits.py +1 -0
- huggingface_hub/_snapshot_download.py +9 -1
- huggingface_hub/_tensorboard_logger.py +1 -0
- huggingface_hub/_webhooks_payload.py +1 -0
- huggingface_hub/_webhooks_server.py +1 -0
- huggingface_hub/commands/_cli_utils.py +1 -0
- huggingface_hub/commands/delete_cache.py +1 -0
- huggingface_hub/commands/download.py +1 -0
- huggingface_hub/commands/env.py +1 -0
- huggingface_hub/commands/scan_cache.py +1 -0
- huggingface_hub/commands/upload.py +1 -0
- huggingface_hub/community.py +1 -0
- huggingface_hub/constants.py +3 -1
- huggingface_hub/errors.py +38 -0
- huggingface_hub/file_download.py +102 -95
- huggingface_hub/hf_api.py +47 -35
- huggingface_hub/hf_file_system.py +77 -3
- huggingface_hub/hub_mixin.py +215 -54
- huggingface_hub/inference/_client.py +554 -239
- huggingface_hub/inference/_common.py +195 -41
- huggingface_hub/inference/_generated/_async_client.py +558 -239
- huggingface_hub/inference/_generated/types/__init__.py +115 -0
- huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
- huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +116 -0
- huggingface_hub/inference/_generated/types/base.py +149 -0
- huggingface_hub/inference/_generated/types/chat_completion.py +106 -0
- huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
- huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
- huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
- huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
- huggingface_hub/inference/_generated/types/image_classification.py +43 -0
- huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
- huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
- huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
- huggingface_hub/inference/_generated/types/object_detection.py +55 -0
- huggingface_hub/inference/_generated/types/question_answering.py +77 -0
- huggingface_hub/inference/_generated/types/sentence_similarity.py +28 -0
- huggingface_hub/inference/_generated/types/summarization.py +46 -0
- huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
- huggingface_hub/inference/_generated/types/text2text_generation.py +45 -0
- huggingface_hub/inference/_generated/types/text_classification.py +43 -0
- huggingface_hub/inference/_generated/types/text_generation.py +161 -0
- huggingface_hub/inference/_generated/types/text_to_audio.py +105 -0
- huggingface_hub/inference/_generated/types/text_to_image.py +57 -0
- huggingface_hub/inference/_generated/types/token_classification.py +53 -0
- huggingface_hub/inference/_generated/types/translation.py +46 -0
- huggingface_hub/inference/_generated/types/video_classification.py +47 -0
- huggingface_hub/inference/_generated/types/visual_question_answering.py +53 -0
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +56 -0
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +51 -0
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +55 -0
- huggingface_hub/inference/_templating.py +105 -0
- huggingface_hub/inference/_types.py +4 -152
- huggingface_hub/keras_mixin.py +39 -17
- huggingface_hub/lfs.py +20 -8
- huggingface_hub/repocard.py +11 -3
- huggingface_hub/repocard_data.py +12 -2
- huggingface_hub/serialization/__init__.py +1 -0
- huggingface_hub/serialization/_base.py +1 -0
- huggingface_hub/serialization/_numpy.py +1 -0
- huggingface_hub/serialization/_tensorflow.py +1 -0
- huggingface_hub/serialization/_torch.py +1 -0
- huggingface_hub/utils/__init__.py +4 -1
- huggingface_hub/utils/_cache_manager.py +7 -0
- huggingface_hub/utils/_chunk_utils.py +1 -0
- huggingface_hub/utils/_datetime.py +1 -0
- huggingface_hub/utils/_errors.py +10 -1
- huggingface_hub/utils/_experimental.py +1 -0
- huggingface_hub/utils/_fixes.py +19 -3
- huggingface_hub/utils/_git_credential.py +1 -0
- huggingface_hub/utils/_headers.py +10 -3
- huggingface_hub/utils/_hf_folder.py +1 -0
- huggingface_hub/utils/_http.py +1 -0
- huggingface_hub/utils/_pagination.py +1 -0
- huggingface_hub/utils/_paths.py +1 -0
- huggingface_hub/utils/_runtime.py +22 -0
- huggingface_hub/utils/_subprocess.py +1 -0
- huggingface_hub/utils/_token.py +1 -0
- huggingface_hub/utils/_typing.py +29 -1
- huggingface_hub/utils/_validators.py +1 -0
- huggingface_hub/utils/endpoint_helpers.py +1 -0
- huggingface_hub/utils/logging.py +1 -1
- huggingface_hub/utils/sha.py +1 -0
- huggingface_hub/utils/tqdm.py +1 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/METADATA +14 -15
- huggingface_hub-0.22.0.dist-info/RECORD +113 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/WHEEL +1 -1
- huggingface_hub/inference/_text_generation.py +0 -551
- huggingface_hub-0.21.4.dist-info/RECORD +0 -81
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Dict, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class FeatureExtractionInput(BaseInferenceType):
|
|
14
|
+
"""Inputs for Text Embedding inference"""
|
|
15
|
+
|
|
16
|
+
inputs: str
|
|
17
|
+
"""The text to get the embeddings of"""
|
|
18
|
+
parameters: Optional[Dict[str, Any]] = None
|
|
19
|
+
"""Additional inference parameters"""
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class FillMaskParameters(BaseInferenceType):
|
|
14
|
+
"""Additional inference parameters
|
|
15
|
+
Additional inference parameters for Fill Mask
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
targets: Optional[List[str]] = None
|
|
19
|
+
"""When passed, the model will limit the scores to the passed targets instead of looking up
|
|
20
|
+
in the whole vocabulary. If the provided targets are not in the model vocab, they will be
|
|
21
|
+
tokenized and the first resulting token will be used (with a warning, and that might be
|
|
22
|
+
slower).
|
|
23
|
+
"""
|
|
24
|
+
top_k: Optional[int] = None
|
|
25
|
+
"""When passed, overrides the number of predictions to return."""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class FillMaskInput(BaseInferenceType):
|
|
30
|
+
"""Inputs for Fill Mask inference"""
|
|
31
|
+
|
|
32
|
+
inputs: str
|
|
33
|
+
"""The text with masked tokens"""
|
|
34
|
+
parameters: Optional[FillMaskParameters] = None
|
|
35
|
+
"""Additional inference parameters"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class FillMaskOutputElement(BaseInferenceType):
|
|
40
|
+
"""Outputs of inference for the Fill Mask task"""
|
|
41
|
+
|
|
42
|
+
score: float
|
|
43
|
+
"""The corresponding probability"""
|
|
44
|
+
sequence: str
|
|
45
|
+
"""The corresponding input with the mask token prediction."""
|
|
46
|
+
token: int
|
|
47
|
+
"""The predicted token id (to replace the masked one)."""
|
|
48
|
+
token_str: Any
|
|
49
|
+
fill_mask_output_token_str: Optional[str] = None
|
|
50
|
+
"""The predicted token (to replace the masked one)."""
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ImageClassificationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Image Classification
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
|
22
|
+
top_k: Optional[int] = None
|
|
23
|
+
"""When specified, limits the output to the top K most probable classes."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class ImageClassificationInput(BaseInferenceType):
|
|
28
|
+
"""Inputs for Image Classification inference"""
|
|
29
|
+
|
|
30
|
+
inputs: Any
|
|
31
|
+
"""The input image data"""
|
|
32
|
+
parameters: Optional[ImageClassificationParameters] = None
|
|
33
|
+
"""Additional inference parameters"""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class ImageClassificationOutputElement(BaseInferenceType):
|
|
38
|
+
"""Outputs of inference for the Image Classification task"""
|
|
39
|
+
|
|
40
|
+
label: str
|
|
41
|
+
"""The predicted class label."""
|
|
42
|
+
score: float
|
|
43
|
+
"""The corresponding probability."""
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ImageSegmentationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Image Segmentation
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
mask_threshold: Optional[float] = None
|
|
22
|
+
"""Threshold to use when turning the predicted masks into binary values."""
|
|
23
|
+
overlap_mask_area_threshold: Optional[float] = None
|
|
24
|
+
"""Mask overlap threshold to eliminate small, disconnected segments."""
|
|
25
|
+
subtask: Optional["ImageSegmentationSubtask"] = None
|
|
26
|
+
"""Segmentation task to be performed, depending on model capabilities."""
|
|
27
|
+
threshold: Optional[float] = None
|
|
28
|
+
"""Probability threshold to filter out predicted masks."""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ImageSegmentationInput(BaseInferenceType):
|
|
33
|
+
"""Inputs for Image Segmentation inference"""
|
|
34
|
+
|
|
35
|
+
inputs: Any
|
|
36
|
+
"""The input image data"""
|
|
37
|
+
parameters: Optional[ImageSegmentationParameters] = None
|
|
38
|
+
"""Additional inference parameters"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class ImageSegmentationOutputElement(BaseInferenceType):
|
|
43
|
+
"""Outputs of inference for the Image Segmentation task
|
|
44
|
+
A predicted mask / segment
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
label: str
|
|
48
|
+
"""The label of the predicted segment"""
|
|
49
|
+
mask: Any
|
|
50
|
+
"""The corresponding mask as a black-and-white image"""
|
|
51
|
+
score: Optional[float] = None
|
|
52
|
+
"""The score or confidence degreee the model has"""
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class ImageToImageTargetSize(BaseInferenceType):
|
|
14
|
+
"""The size in pixel of the output image"""
|
|
15
|
+
|
|
16
|
+
height: int
|
|
17
|
+
width: int
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ImageToImageParameters(BaseInferenceType):
|
|
22
|
+
"""Additional inference parameters
|
|
23
|
+
Additional inference parameters for Image To Image
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
guidance_scale: Optional[float] = None
|
|
27
|
+
"""For diffusion models. A higher guidance scale value encourages the model to generate
|
|
28
|
+
images closely linked to the text prompt at the expense of lower image quality.
|
|
29
|
+
"""
|
|
30
|
+
negative_prompt: Optional[List[str]] = None
|
|
31
|
+
"""One or several prompt to guide what NOT to include in image generation."""
|
|
32
|
+
num_inference_steps: Optional[int] = None
|
|
33
|
+
"""For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
|
34
|
+
a higher quality image at the expense of slower inference.
|
|
35
|
+
"""
|
|
36
|
+
target_size: Optional[ImageToImageTargetSize] = None
|
|
37
|
+
"""The size in pixel of the output image"""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class ImageToImageInput(BaseInferenceType):
|
|
42
|
+
"""Inputs for Image To Image inference"""
|
|
43
|
+
|
|
44
|
+
inputs: Any
|
|
45
|
+
"""The input image data"""
|
|
46
|
+
parameters: Optional[ImageToImageParameters] = None
|
|
47
|
+
"""Additional inference parameters"""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class ImageToImageOutput(BaseInferenceType):
|
|
52
|
+
"""Outputs of inference for the Image To Image task"""
|
|
53
|
+
|
|
54
|
+
image: Any
|
|
55
|
+
"""The output image"""
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Literal, Optional, Union
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
EarlyStoppingEnum = Literal["never"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ImageToTextGenerationParameters(BaseInferenceType):
|
|
17
|
+
"""Parametrization of the text generation process
|
|
18
|
+
Ad-hoc parametrization of the text generation process
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
do_sample: Optional[bool] = None
|
|
22
|
+
"""Whether to use sampling instead of greedy decoding when generating new tokens."""
|
|
23
|
+
early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None
|
|
24
|
+
"""Controls the stopping condition for beam-based methods."""
|
|
25
|
+
epsilon_cutoff: Optional[float] = None
|
|
26
|
+
"""If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
27
|
+
greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
28
|
+
3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
29
|
+
Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
30
|
+
"""
|
|
31
|
+
eta_cutoff: Optional[float] = None
|
|
32
|
+
"""Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
33
|
+
float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
34
|
+
eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
35
|
+
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
36
|
+
the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
37
|
+
See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
38
|
+
for more details.
|
|
39
|
+
"""
|
|
40
|
+
max_length: Optional[int] = None
|
|
41
|
+
"""The maximum length (in tokens) of the generated text, including the input."""
|
|
42
|
+
max_new_tokens: Optional[int] = None
|
|
43
|
+
"""The maximum number of tokens to generate. Takes precedence over maxLength."""
|
|
44
|
+
min_length: Optional[int] = None
|
|
45
|
+
"""The minimum length (in tokens) of the generated text, including the input."""
|
|
46
|
+
min_new_tokens: Optional[int] = None
|
|
47
|
+
"""The minimum number of tokens to generate. Takes precedence over maxLength."""
|
|
48
|
+
num_beam_groups: Optional[int] = None
|
|
49
|
+
"""Number of groups to divide num_beams into in order to ensure diversity among different
|
|
50
|
+
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
51
|
+
"""
|
|
52
|
+
num_beams: Optional[int] = None
|
|
53
|
+
"""Number of beams to use for beam search."""
|
|
54
|
+
penalty_alpha: Optional[float] = None
|
|
55
|
+
"""The value balances the model confidence and the degeneration penalty in contrastive
|
|
56
|
+
search decoding.
|
|
57
|
+
"""
|
|
58
|
+
temperature: Optional[float] = None
|
|
59
|
+
"""The value used to modulate the next token probabilities."""
|
|
60
|
+
top_k: Optional[int] = None
|
|
61
|
+
"""The number of highest probability vocabulary tokens to keep for top-k-filtering."""
|
|
62
|
+
top_p: Optional[float] = None
|
|
63
|
+
"""If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
64
|
+
that add up to top_p or higher are kept for generation.
|
|
65
|
+
"""
|
|
66
|
+
typical_p: Optional[float] = None
|
|
67
|
+
"""Local typicality measures how similar the conditional probability of predicting a target
|
|
68
|
+
token next is to the expected conditional probability of predicting a random token next,
|
|
69
|
+
given the partial text already generated. If set to float < 1, the smallest set of the
|
|
70
|
+
most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
71
|
+
kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
72
|
+
"""
|
|
73
|
+
use_cache: Optional[bool] = None
|
|
74
|
+
"""Whether the model should use the past last key/values attentions to speed up decoding"""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class ImageToTextParameters(BaseInferenceType):
|
|
79
|
+
"""Additional inference parameters
|
|
80
|
+
Additional inference parameters for Image To Text
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
generate: Optional[ImageToTextGenerationParameters] = None
|
|
84
|
+
"""Parametrization of the text generation process"""
|
|
85
|
+
max_new_tokens: Optional[int] = None
|
|
86
|
+
"""The amount of maximum tokens to generate."""
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@dataclass
|
|
90
|
+
class ImageToTextInput(BaseInferenceType):
|
|
91
|
+
"""Inputs for Image To Text inference"""
|
|
92
|
+
|
|
93
|
+
inputs: Any
|
|
94
|
+
"""The input image data"""
|
|
95
|
+
parameters: Optional[ImageToTextParameters] = None
|
|
96
|
+
"""Additional inference parameters"""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@dataclass
|
|
100
|
+
class ImageToTextOutput(BaseInferenceType):
|
|
101
|
+
"""Outputs of inference for the Image To Text task"""
|
|
102
|
+
|
|
103
|
+
generated_text: Any
|
|
104
|
+
image_to_text_output_generated_text: Optional[str] = None
|
|
105
|
+
"""The generated text."""
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class ObjectDetectionParameters(BaseInferenceType):
|
|
14
|
+
"""Additional inference parameters
|
|
15
|
+
Additional inference parameters for Object Detection
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
threshold: Optional[float] = None
|
|
19
|
+
"""The probability necessary to make a prediction."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ObjectDetectionInput(BaseInferenceType):
|
|
24
|
+
"""Inputs for Object Detection inference"""
|
|
25
|
+
|
|
26
|
+
inputs: Any
|
|
27
|
+
"""The input image data"""
|
|
28
|
+
parameters: Optional[ObjectDetectionParameters] = None
|
|
29
|
+
"""Additional inference parameters"""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class ObjectDetectionBoundingBox(BaseInferenceType):
|
|
34
|
+
"""The predicted bounding box. Coordinates are relative to the top left corner of the input
|
|
35
|
+
image.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
xmax: int
|
|
39
|
+
xmin: int
|
|
40
|
+
ymax: int
|
|
41
|
+
ymin: int
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class ObjectDetectionOutputElement(BaseInferenceType):
|
|
46
|
+
"""Outputs of inference for the Object Detection task"""
|
|
47
|
+
|
|
48
|
+
box: ObjectDetectionBoundingBox
|
|
49
|
+
"""The predicted bounding box. Coordinates are relative to the top left corner of the input
|
|
50
|
+
image.
|
|
51
|
+
"""
|
|
52
|
+
label: str
|
|
53
|
+
"""The predicted label for the bounding box"""
|
|
54
|
+
score: float
|
|
55
|
+
"""The associated score / probability"""
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class QuestionAnsweringInputData(BaseInferenceType):
|
|
14
|
+
"""One (context, question) pair to answer"""
|
|
15
|
+
|
|
16
|
+
context: str
|
|
17
|
+
"""The context to be used for answering the question"""
|
|
18
|
+
question: str
|
|
19
|
+
"""The question to be answered"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class QuestionAnsweringParameters(BaseInferenceType):
|
|
24
|
+
"""Additional inference parameters
|
|
25
|
+
Additional inference parameters for Question Answering
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
align_to_words: Optional[bool] = None
|
|
29
|
+
"""Attempts to align the answer to real words. Improves quality on space separated
|
|
30
|
+
languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
|
|
31
|
+
"""
|
|
32
|
+
doc_stride: Optional[int] = None
|
|
33
|
+
"""If the context is too long to fit with the question for the model, it will be split in
|
|
34
|
+
several chunks with some overlap. This argument controls the size of that overlap.
|
|
35
|
+
"""
|
|
36
|
+
handle_impossible_answer: Optional[bool] = None
|
|
37
|
+
"""Whether to accept impossible as an answer."""
|
|
38
|
+
max_answer_len: Optional[int] = None
|
|
39
|
+
"""The maximum length of predicted answers (e.g., only answers with a shorter length are
|
|
40
|
+
considered).
|
|
41
|
+
"""
|
|
42
|
+
max_question_len: Optional[int] = None
|
|
43
|
+
"""The maximum length of the question after tokenization. It will be truncated if needed."""
|
|
44
|
+
max_seq_len: Optional[int] = None
|
|
45
|
+
"""The maximum length of the total sentence (context + question) in tokens of each chunk
|
|
46
|
+
passed to the model. The context will be split in several chunks (using docStride as
|
|
47
|
+
overlap) if needed.
|
|
48
|
+
"""
|
|
49
|
+
top_k: Optional[int] = None
|
|
50
|
+
"""The number of answers to return (will be chosen by order of likelihood). Note that we
|
|
51
|
+
return less than topk answers if there are not enough options available within the
|
|
52
|
+
context.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class QuestionAnsweringInput(BaseInferenceType):
|
|
58
|
+
"""Inputs for Question Answering inference"""
|
|
59
|
+
|
|
60
|
+
inputs: QuestionAnsweringInputData
|
|
61
|
+
"""One (context, question) pair to answer"""
|
|
62
|
+
parameters: Optional[QuestionAnsweringParameters] = None
|
|
63
|
+
"""Additional inference parameters"""
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class QuestionAnsweringOutputElement(BaseInferenceType):
|
|
68
|
+
"""Outputs of inference for the Question Answering task"""
|
|
69
|
+
|
|
70
|
+
answer: str
|
|
71
|
+
"""The answer to the question."""
|
|
72
|
+
end: int
|
|
73
|
+
"""The character position in the input where the answer ends."""
|
|
74
|
+
score: float
|
|
75
|
+
"""The probability associated to the answer."""
|
|
76
|
+
start: int
|
|
77
|
+
"""The character position in the input where the answer begins."""
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class SentenceSimilarityInputData(BaseInferenceType):
|
|
14
|
+
sentences: List[str]
|
|
15
|
+
"""A list of strings which will be compared against the source_sentence."""
|
|
16
|
+
source_sentence: str
|
|
17
|
+
"""The string that you wish to compare the other strings with. This can be a phrase,
|
|
18
|
+
sentence, or longer passage, depending on the model being used.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class SentenceSimilarityInput(BaseInferenceType):
|
|
24
|
+
"""Inputs for Sentence similarity inference"""
|
|
25
|
+
|
|
26
|
+
inputs: SentenceSimilarityInputData
|
|
27
|
+
parameters: Optional[Dict[str, Any]] = None
|
|
28
|
+
"""Additional inference parameters"""
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Dict, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
SummarizationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class SummarizationGenerationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Text2text Generation
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
clean_up_tokenization_spaces: Optional[bool] = None
|
|
22
|
+
"""Whether to clean up the potential extra spaces in the text output."""
|
|
23
|
+
generate_parameters: Optional[Dict[str, Any]] = None
|
|
24
|
+
"""Additional parametrization of the text generation algorithm"""
|
|
25
|
+
truncation: Optional["SummarizationGenerationTruncationStrategy"] = None
|
|
26
|
+
"""The truncation strategy to use"""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class SummarizationInput(BaseInferenceType):
|
|
31
|
+
"""Inputs for Summarization inference
|
|
32
|
+
Inputs for Text2text Generation inference
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
inputs: str
|
|
36
|
+
"""The input text data"""
|
|
37
|
+
parameters: Optional[SummarizationGenerationParameters] = None
|
|
38
|
+
"""Additional inference parameters"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class SummarizationOutput(BaseInferenceType):
|
|
43
|
+
"""Outputs of inference for the Summarization task"""
|
|
44
|
+
|
|
45
|
+
summary_text: str
|
|
46
|
+
"""The summarized text."""
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class TableQuestionAnsweringInputData(BaseInferenceType):
|
|
14
|
+
"""One (table, question) pair to answer"""
|
|
15
|
+
|
|
16
|
+
question: str
|
|
17
|
+
"""The question to be answered about the table"""
|
|
18
|
+
table: Dict[str, List[str]]
|
|
19
|
+
"""The table to serve as context for the questions"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class TableQuestionAnsweringInput(BaseInferenceType):
|
|
24
|
+
"""Inputs for Table Question Answering inference"""
|
|
25
|
+
|
|
26
|
+
inputs: TableQuestionAnsweringInputData
|
|
27
|
+
"""One (table, question) pair to answer"""
|
|
28
|
+
parameters: Optional[Dict[str, Any]] = None
|
|
29
|
+
"""Additional inference parameters"""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class TableQuestionAnsweringOutputElement(BaseInferenceType):
|
|
34
|
+
"""Outputs of inference for the Table Question Answering task"""
|
|
35
|
+
|
|
36
|
+
answer: str
|
|
37
|
+
"""The answer of the question given the table. If there is an aggregator, the answer will be
|
|
38
|
+
preceded by `AGGREGATOR >`.
|
|
39
|
+
"""
|
|
40
|
+
cells: List[str]
|
|
41
|
+
"""List of strings made up of the answer cell values."""
|
|
42
|
+
coordinates: List[List[int]]
|
|
43
|
+
"""Coordinates of the cells of the answers."""
|
|
44
|
+
aggregator: Optional[str] = None
|
|
45
|
+
"""If the model has an aggregator, this returns the aggregator."""
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Dict, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class Text2TextGenerationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Text2text Generation
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
clean_up_tokenization_spaces: Optional[bool] = None
|
|
22
|
+
"""Whether to clean up the potential extra spaces in the text output."""
|
|
23
|
+
generate_parameters: Optional[Dict[str, Any]] = None
|
|
24
|
+
"""Additional parametrization of the text generation algorithm"""
|
|
25
|
+
truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
|
|
26
|
+
"""The truncation strategy to use"""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class Text2TextGenerationInput(BaseInferenceType):
|
|
31
|
+
"""Inputs for Text2text Generation inference"""
|
|
32
|
+
|
|
33
|
+
inputs: str
|
|
34
|
+
"""The input text data"""
|
|
35
|
+
parameters: Optional[Text2TextGenerationParameters] = None
|
|
36
|
+
"""Additional inference parameters"""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class Text2TextGenerationOutput(BaseInferenceType):
|
|
41
|
+
"""Outputs of inference for the Text2text Generation task"""
|
|
42
|
+
|
|
43
|
+
generated_text: Any
|
|
44
|
+
text2_text_generation_output_generated_text: Optional[str] = None
|
|
45
|
+
"""The generated text."""
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class TextClassificationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Text Classification
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
|
22
|
+
top_k: Optional[int] = None
|
|
23
|
+
"""When specified, limits the output to the top K most probable classes."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class TextClassificationInput(BaseInferenceType):
|
|
28
|
+
"""Inputs for Text Classification inference"""
|
|
29
|
+
|
|
30
|
+
inputs: str
|
|
31
|
+
"""The text to classify"""
|
|
32
|
+
parameters: Optional[TextClassificationParameters] = None
|
|
33
|
+
"""Additional inference parameters"""
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class TextClassificationOutputElement(BaseInferenceType):
|
|
38
|
+
"""Outputs of inference for the Text Classification task"""
|
|
39
|
+
|
|
40
|
+
label: str
|
|
41
|
+
"""The predicted class label."""
|
|
42
|
+
score: float
|
|
43
|
+
"""The corresponding probability."""
|