huggingface-hub 0.21.4__py3-none-any.whl → 0.22.0rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +217 -1
- huggingface_hub/_commit_api.py +14 -15
- huggingface_hub/_inference_endpoints.py +12 -11
- huggingface_hub/_login.py +1 -0
- huggingface_hub/_multi_commits.py +1 -0
- huggingface_hub/_snapshot_download.py +9 -1
- huggingface_hub/_tensorboard_logger.py +1 -0
- huggingface_hub/_webhooks_payload.py +1 -0
- huggingface_hub/_webhooks_server.py +1 -0
- huggingface_hub/commands/_cli_utils.py +1 -0
- huggingface_hub/commands/delete_cache.py +1 -0
- huggingface_hub/commands/download.py +1 -0
- huggingface_hub/commands/env.py +1 -0
- huggingface_hub/commands/scan_cache.py +1 -0
- huggingface_hub/commands/upload.py +1 -0
- huggingface_hub/community.py +1 -0
- huggingface_hub/constants.py +3 -1
- huggingface_hub/errors.py +38 -0
- huggingface_hub/file_download.py +24 -24
- huggingface_hub/hf_api.py +47 -35
- huggingface_hub/hub_mixin.py +210 -54
- huggingface_hub/inference/_client.py +554 -239
- huggingface_hub/inference/_common.py +195 -41
- huggingface_hub/inference/_generated/_async_client.py +558 -239
- huggingface_hub/inference/_generated/types/__init__.py +115 -0
- huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
- huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +116 -0
- huggingface_hub/inference/_generated/types/base.py +149 -0
- huggingface_hub/inference/_generated/types/chat_completion.py +106 -0
- huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
- huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
- huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
- huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
- huggingface_hub/inference/_generated/types/image_classification.py +43 -0
- huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
- huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
- huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
- huggingface_hub/inference/_generated/types/object_detection.py +55 -0
- huggingface_hub/inference/_generated/types/question_answering.py +77 -0
- huggingface_hub/inference/_generated/types/sentence_similarity.py +28 -0
- huggingface_hub/inference/_generated/types/summarization.py +46 -0
- huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
- huggingface_hub/inference/_generated/types/text2text_generation.py +45 -0
- huggingface_hub/inference/_generated/types/text_classification.py +43 -0
- huggingface_hub/inference/_generated/types/text_generation.py +161 -0
- huggingface_hub/inference/_generated/types/text_to_audio.py +105 -0
- huggingface_hub/inference/_generated/types/text_to_image.py +57 -0
- huggingface_hub/inference/_generated/types/token_classification.py +53 -0
- huggingface_hub/inference/_generated/types/translation.py +46 -0
- huggingface_hub/inference/_generated/types/video_classification.py +47 -0
- huggingface_hub/inference/_generated/types/visual_question_answering.py +53 -0
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +56 -0
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +51 -0
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +55 -0
- huggingface_hub/inference/_templating.py +105 -0
- huggingface_hub/inference/_types.py +4 -152
- huggingface_hub/keras_mixin.py +39 -17
- huggingface_hub/lfs.py +20 -8
- huggingface_hub/repocard.py +11 -3
- huggingface_hub/repocard_data.py +12 -2
- huggingface_hub/serialization/__init__.py +1 -0
- huggingface_hub/serialization/_base.py +1 -0
- huggingface_hub/serialization/_numpy.py +1 -0
- huggingface_hub/serialization/_tensorflow.py +1 -0
- huggingface_hub/serialization/_torch.py +1 -0
- huggingface_hub/utils/__init__.py +4 -1
- huggingface_hub/utils/_cache_manager.py +7 -0
- huggingface_hub/utils/_chunk_utils.py +1 -0
- huggingface_hub/utils/_datetime.py +1 -0
- huggingface_hub/utils/_errors.py +10 -1
- huggingface_hub/utils/_experimental.py +1 -0
- huggingface_hub/utils/_fixes.py +19 -3
- huggingface_hub/utils/_git_credential.py +1 -0
- huggingface_hub/utils/_headers.py +10 -3
- huggingface_hub/utils/_hf_folder.py +1 -0
- huggingface_hub/utils/_http.py +1 -0
- huggingface_hub/utils/_pagination.py +1 -0
- huggingface_hub/utils/_paths.py +1 -0
- huggingface_hub/utils/_runtime.py +22 -0
- huggingface_hub/utils/_subprocess.py +1 -0
- huggingface_hub/utils/_token.py +1 -0
- huggingface_hub/utils/_typing.py +29 -1
- huggingface_hub/utils/_validators.py +1 -0
- huggingface_hub/utils/endpoint_helpers.py +1 -0
- huggingface_hub/utils/logging.py +1 -1
- huggingface_hub/utils/sha.py +1 -0
- huggingface_hub/utils/tqdm.py +1 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0rc0.dist-info}/METADATA +14 -15
- huggingface_hub-0.22.0rc0.dist-info/RECORD +113 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0rc0.dist-info}/WHEEL +1 -1
- huggingface_hub/inference/_text_generation.py +0 -551
- huggingface_hub-0.21.4.dist-info/RECORD +0 -81
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0rc0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0rc0.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0rc0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import List, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class TextGenerationParameters(BaseInferenceType):
|
|
14
|
+
"""Additional inference parameters
|
|
15
|
+
Additional inference parameters for Text Generation
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
best_of: Optional[int] = None
|
|
19
|
+
"""The number of sampling queries to run. Only the best one (in terms of total logprob) will
|
|
20
|
+
be returned.
|
|
21
|
+
"""
|
|
22
|
+
decoder_input_details: Optional[bool] = None
|
|
23
|
+
"""Whether or not to output decoder input details"""
|
|
24
|
+
details: Optional[bool] = None
|
|
25
|
+
"""Whether or not to output details"""
|
|
26
|
+
do_sample: Optional[bool] = None
|
|
27
|
+
"""Whether to use logits sampling instead of greedy decoding when generating new tokens."""
|
|
28
|
+
max_new_tokens: Optional[int] = None
|
|
29
|
+
"""The maximum number of tokens to generate."""
|
|
30
|
+
repetition_penalty: Optional[float] = None
|
|
31
|
+
"""The parameter for repetition penalty. A value of 1.0 means no penalty. See [this
|
|
32
|
+
paper](https://hf.co/papers/1909.05858) for more details.
|
|
33
|
+
"""
|
|
34
|
+
return_full_text: Optional[bool] = None
|
|
35
|
+
"""Whether to prepend the prompt to the generated text."""
|
|
36
|
+
seed: Optional[int] = None
|
|
37
|
+
"""The random sampling seed."""
|
|
38
|
+
stop_sequences: Optional[List[str]] = None
|
|
39
|
+
"""Stop generating tokens if a member of `stop_sequences` is generated."""
|
|
40
|
+
temperature: Optional[float] = None
|
|
41
|
+
"""The value used to modulate the logits distribution."""
|
|
42
|
+
top_k: Optional[int] = None
|
|
43
|
+
"""The number of highest probability vocabulary tokens to keep for top-k-filtering."""
|
|
44
|
+
top_p: Optional[float] = None
|
|
45
|
+
"""If set to < 1, only the smallest set of most probable tokens with probabilities that add
|
|
46
|
+
up to `top_p` or higher are kept for generation.
|
|
47
|
+
"""
|
|
48
|
+
truncate: Optional[int] = None
|
|
49
|
+
"""Truncate input tokens to the given size."""
|
|
50
|
+
typical_p: Optional[float] = None
|
|
51
|
+
"""Typical Decoding mass. See [Typical Decoding for Natural Language
|
|
52
|
+
Generation](https://hf.co/papers/2202.00666) for more information
|
|
53
|
+
"""
|
|
54
|
+
watermark: Optional[bool] = None
|
|
55
|
+
"""Watermarking with [A Watermark for Large Language Models](https://hf.co/papers/2301.10226)"""
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class TextGenerationInput(BaseInferenceType):
|
|
60
|
+
"""Inputs for Text Generation inference"""
|
|
61
|
+
|
|
62
|
+
inputs: str
|
|
63
|
+
"""The text to initialize generation with"""
|
|
64
|
+
parameters: Optional[TextGenerationParameters] = None
|
|
65
|
+
"""Additional inference parameters"""
|
|
66
|
+
stream: Optional[bool] = None
|
|
67
|
+
"""Whether to stream output tokens"""
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
TextGenerationFinishReason = Literal["length", "eos_token", "stop_sequence"]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class TextGenerationPrefillToken(BaseInferenceType):
|
|
75
|
+
id: int
|
|
76
|
+
logprob: float
|
|
77
|
+
text: str
|
|
78
|
+
"""The text associated with that token"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass
|
|
82
|
+
class TextGenerationOutputToken(BaseInferenceType):
|
|
83
|
+
"""Generated token."""
|
|
84
|
+
|
|
85
|
+
id: int
|
|
86
|
+
special: bool
|
|
87
|
+
"""Whether or not that token is a special one"""
|
|
88
|
+
text: str
|
|
89
|
+
"""The text associated with that token"""
|
|
90
|
+
logprob: Optional[float] = None
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@dataclass
|
|
94
|
+
class TextGenerationOutputSequenceDetails(BaseInferenceType):
|
|
95
|
+
finish_reason: "TextGenerationFinishReason"
|
|
96
|
+
generated_text: str
|
|
97
|
+
"""The generated text"""
|
|
98
|
+
generated_tokens: int
|
|
99
|
+
"""The number of generated tokens"""
|
|
100
|
+
prefill: List[TextGenerationPrefillToken]
|
|
101
|
+
tokens: List[TextGenerationOutputToken]
|
|
102
|
+
"""The generated tokens and associated details"""
|
|
103
|
+
seed: Optional[int] = None
|
|
104
|
+
"""The random seed used for generation"""
|
|
105
|
+
top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
|
|
106
|
+
"""Most likely tokens"""
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
@dataclass
|
|
110
|
+
class TextGenerationOutputDetails(BaseInferenceType):
|
|
111
|
+
"""When enabled, details about the generation"""
|
|
112
|
+
|
|
113
|
+
finish_reason: "TextGenerationFinishReason"
|
|
114
|
+
"""The reason why the generation was stopped."""
|
|
115
|
+
generated_tokens: int
|
|
116
|
+
"""The number of generated tokens"""
|
|
117
|
+
prefill: List[TextGenerationPrefillToken]
|
|
118
|
+
tokens: List[TextGenerationOutputToken]
|
|
119
|
+
"""The generated tokens and associated details"""
|
|
120
|
+
best_of_sequences: Optional[List[TextGenerationOutputSequenceDetails]] = None
|
|
121
|
+
"""Details about additional sequences when best_of is provided"""
|
|
122
|
+
seed: Optional[int] = None
|
|
123
|
+
"""The random seed used for generation"""
|
|
124
|
+
top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
|
|
125
|
+
"""Most likely tokens"""
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
@dataclass
|
|
129
|
+
class TextGenerationOutput(BaseInferenceType):
|
|
130
|
+
"""Outputs for Text Generation inference"""
|
|
131
|
+
|
|
132
|
+
generated_text: str
|
|
133
|
+
"""The generated text"""
|
|
134
|
+
details: Optional[TextGenerationOutputDetails] = None
|
|
135
|
+
"""When enabled, details about the generation"""
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
@dataclass
|
|
139
|
+
class TextGenerationStreamDetails(BaseInferenceType):
|
|
140
|
+
"""Generation details. Only available when the generation is finished."""
|
|
141
|
+
|
|
142
|
+
finish_reason: "TextGenerationFinishReason"
|
|
143
|
+
"""The reason why the generation was stopped."""
|
|
144
|
+
generated_tokens: int
|
|
145
|
+
"""The number of generated tokens"""
|
|
146
|
+
seed: int
|
|
147
|
+
"""The random seed used for generation"""
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@dataclass
|
|
151
|
+
class TextGenerationStreamOutput(BaseInferenceType):
|
|
152
|
+
"""Text Generation Stream Output"""
|
|
153
|
+
|
|
154
|
+
token: TextGenerationOutputToken
|
|
155
|
+
"""Generated token."""
|
|
156
|
+
details: Optional[TextGenerationStreamDetails] = None
|
|
157
|
+
"""Generation details. Only available when the generation is finished."""
|
|
158
|
+
generated_text: Optional[str] = None
|
|
159
|
+
"""The complete generated text. Only available when the generation is finished."""
|
|
160
|
+
index: Optional[int] = None
|
|
161
|
+
"""The token index within the stream. Optional to support older clients that omit it."""
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Literal, Optional, Union
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
EarlyStoppingEnum = Literal["never"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class TextToAudioGenerationParameters(BaseInferenceType):
|
|
17
|
+
"""Parametrization of the text generation process
|
|
18
|
+
Ad-hoc parametrization of the text generation process
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
do_sample: Optional[bool] = None
|
|
22
|
+
"""Whether to use sampling instead of greedy decoding when generating new tokens."""
|
|
23
|
+
early_stopping: Optional[Union[bool, "EarlyStoppingEnum"]] = None
|
|
24
|
+
"""Controls the stopping condition for beam-based methods."""
|
|
25
|
+
epsilon_cutoff: Optional[float] = None
|
|
26
|
+
"""If set to float strictly between 0 and 1, only tokens with a conditional probability
|
|
27
|
+
greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
|
|
28
|
+
3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
|
|
29
|
+
Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
|
|
30
|
+
"""
|
|
31
|
+
eta_cutoff: Optional[float] = None
|
|
32
|
+
"""Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
|
|
33
|
+
float strictly between 0 and 1, a token is only considered if it is greater than either
|
|
34
|
+
eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
|
|
35
|
+
term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
|
|
36
|
+
the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
|
|
37
|
+
See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
|
|
38
|
+
for more details.
|
|
39
|
+
"""
|
|
40
|
+
max_length: Optional[int] = None
|
|
41
|
+
"""The maximum length (in tokens) of the generated text, including the input."""
|
|
42
|
+
max_new_tokens: Optional[int] = None
|
|
43
|
+
"""The maximum number of tokens to generate. Takes precedence over maxLength."""
|
|
44
|
+
min_length: Optional[int] = None
|
|
45
|
+
"""The minimum length (in tokens) of the generated text, including the input."""
|
|
46
|
+
min_new_tokens: Optional[int] = None
|
|
47
|
+
"""The minimum number of tokens to generate. Takes precedence over maxLength."""
|
|
48
|
+
num_beam_groups: Optional[int] = None
|
|
49
|
+
"""Number of groups to divide num_beams into in order to ensure diversity among different
|
|
50
|
+
groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
|
|
51
|
+
"""
|
|
52
|
+
num_beams: Optional[int] = None
|
|
53
|
+
"""Number of beams to use for beam search."""
|
|
54
|
+
penalty_alpha: Optional[float] = None
|
|
55
|
+
"""The value balances the model confidence and the degeneration penalty in contrastive
|
|
56
|
+
search decoding.
|
|
57
|
+
"""
|
|
58
|
+
temperature: Optional[float] = None
|
|
59
|
+
"""The value used to modulate the next token probabilities."""
|
|
60
|
+
top_k: Optional[int] = None
|
|
61
|
+
"""The number of highest probability vocabulary tokens to keep for top-k-filtering."""
|
|
62
|
+
top_p: Optional[float] = None
|
|
63
|
+
"""If set to float < 1, only the smallest set of most probable tokens with probabilities
|
|
64
|
+
that add up to top_p or higher are kept for generation.
|
|
65
|
+
"""
|
|
66
|
+
typical_p: Optional[float] = None
|
|
67
|
+
"""Local typicality measures how similar the conditional probability of predicting a target
|
|
68
|
+
token next is to the expected conditional probability of predicting a random token next,
|
|
69
|
+
given the partial text already generated. If set to float < 1, the smallest set of the
|
|
70
|
+
most locally typical tokens with probabilities that add up to typical_p or higher are
|
|
71
|
+
kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
|
|
72
|
+
"""
|
|
73
|
+
use_cache: Optional[bool] = None
|
|
74
|
+
"""Whether the model should use the past last key/values attentions to speed up decoding"""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class TextToAudioParameters(BaseInferenceType):
|
|
79
|
+
"""Additional inference parameters
|
|
80
|
+
Additional inference parameters for Text To Audio
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
generate: Optional[TextToAudioGenerationParameters] = None
|
|
84
|
+
"""Parametrization of the text generation process"""
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
@dataclass
|
|
88
|
+
class TextToAudioInput(BaseInferenceType):
|
|
89
|
+
"""Inputs for Text To Audio inference"""
|
|
90
|
+
|
|
91
|
+
inputs: str
|
|
92
|
+
"""The input text data"""
|
|
93
|
+
parameters: Optional[TextToAudioParameters] = None
|
|
94
|
+
"""Additional inference parameters"""
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@dataclass
|
|
98
|
+
class TextToAudioOutput(BaseInferenceType):
|
|
99
|
+
"""Outputs of inference for the Text To Audio task"""
|
|
100
|
+
|
|
101
|
+
audio: Any
|
|
102
|
+
"""The generated audio waveform."""
|
|
103
|
+
sampling_rate: Any
|
|
104
|
+
text_to_audio_output_sampling_rate: Optional[float] = None
|
|
105
|
+
"""The sampling rate of the generated audio waveform."""
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class TextToImageTargetSize(BaseInferenceType):
|
|
14
|
+
"""The size in pixel of the output image"""
|
|
15
|
+
|
|
16
|
+
height: int
|
|
17
|
+
width: int
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class TextToImageParameters(BaseInferenceType):
|
|
22
|
+
"""Additional inference parameters
|
|
23
|
+
Additional inference parameters for Text To Image
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
guidance_scale: Optional[float] = None
|
|
27
|
+
"""For diffusion models. A higher guidance scale value encourages the model to generate
|
|
28
|
+
images closely linked to the text prompt at the expense of lower image quality.
|
|
29
|
+
"""
|
|
30
|
+
negative_prompt: Optional[List[str]] = None
|
|
31
|
+
"""One or several prompt to guide what NOT to include in image generation."""
|
|
32
|
+
num_inference_steps: Optional[int] = None
|
|
33
|
+
"""For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
|
34
|
+
a higher quality image at the expense of slower inference.
|
|
35
|
+
"""
|
|
36
|
+
scheduler: Optional[str] = None
|
|
37
|
+
"""For diffusion models. Override the scheduler with a compatible one"""
|
|
38
|
+
target_size: Optional[TextToImageTargetSize] = None
|
|
39
|
+
"""The size in pixel of the output image"""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class TextToImageInput(BaseInferenceType):
|
|
44
|
+
"""Inputs for Text To Image inference"""
|
|
45
|
+
|
|
46
|
+
inputs: str
|
|
47
|
+
"""The input text data (sometimes called "prompt\""""
|
|
48
|
+
parameters: Optional[TextToImageParameters] = None
|
|
49
|
+
"""Additional inference parameters"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@dataclass
|
|
53
|
+
class TextToImageOutput(BaseInferenceType):
|
|
54
|
+
"""Outputs of inference for the Text To Image task"""
|
|
55
|
+
|
|
56
|
+
image: Any
|
|
57
|
+
"""The generated image"""
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, List, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
TokenClassificationAggregationStrategy = Literal["none", "simple", "first", "average", "max"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class TokenClassificationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Token Classification
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None
|
|
22
|
+
"""The strategy used to fuse tokens based on model predictions"""
|
|
23
|
+
ignore_labels: Optional[List[str]] = None
|
|
24
|
+
"""A list of labels to ignore"""
|
|
25
|
+
stride: Optional[int] = None
|
|
26
|
+
"""The number of overlapping tokens between chunks when splitting the input text."""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class TokenClassificationInput(BaseInferenceType):
|
|
31
|
+
"""Inputs for Token Classification inference"""
|
|
32
|
+
|
|
33
|
+
inputs: str
|
|
34
|
+
"""The input text data"""
|
|
35
|
+
parameters: Optional[TokenClassificationParameters] = None
|
|
36
|
+
"""Additional inference parameters"""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class TokenClassificationOutputElement(BaseInferenceType):
|
|
41
|
+
"""Outputs of inference for the Token Classification task"""
|
|
42
|
+
|
|
43
|
+
label: Any
|
|
44
|
+
score: float
|
|
45
|
+
"""The associated score / probability"""
|
|
46
|
+
end: Optional[int] = None
|
|
47
|
+
"""The character position in the input where this group ends."""
|
|
48
|
+
entity_group: Optional[str] = None
|
|
49
|
+
"""The predicted label for that group of tokens"""
|
|
50
|
+
start: Optional[int] = None
|
|
51
|
+
"""The character position in the input where this group begins."""
|
|
52
|
+
word: Optional[str] = None
|
|
53
|
+
"""The corresponding text"""
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Dict, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
TranslationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class TranslationGenerationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Text2text Generation
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
clean_up_tokenization_spaces: Optional[bool] = None
|
|
22
|
+
"""Whether to clean up the potential extra spaces in the text output."""
|
|
23
|
+
generate_parameters: Optional[Dict[str, Any]] = None
|
|
24
|
+
"""Additional parametrization of the text generation algorithm"""
|
|
25
|
+
truncation: Optional["TranslationGenerationTruncationStrategy"] = None
|
|
26
|
+
"""The truncation strategy to use"""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class TranslationInput(BaseInferenceType):
|
|
31
|
+
"""Inputs for Translation inference
|
|
32
|
+
Inputs for Text2text Generation inference
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
inputs: str
|
|
36
|
+
"""The input text data"""
|
|
37
|
+
parameters: Optional[TranslationGenerationParameters] = None
|
|
38
|
+
"""Additional inference parameters"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class TranslationOutput(BaseInferenceType):
|
|
43
|
+
"""Outputs of inference for the Translation task"""
|
|
44
|
+
|
|
45
|
+
translation_text: str
|
|
46
|
+
"""The translated text."""
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Literal, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class VideoClassificationParameters(BaseInferenceType):
|
|
17
|
+
"""Additional inference parameters
|
|
18
|
+
Additional inference parameters for Video Classification
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
frame_sampling_rate: Optional[int] = None
|
|
22
|
+
"""The sampling rate used to select frames from the video."""
|
|
23
|
+
function_to_apply: Optional["ClassificationOutputTransform"] = None
|
|
24
|
+
num_frames: Optional[int] = None
|
|
25
|
+
"""The number of sampled frames to consider for classification."""
|
|
26
|
+
top_k: Optional[int] = None
|
|
27
|
+
"""When specified, limits the output to the top K most probable classes."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class VideoClassificationInput(BaseInferenceType):
|
|
32
|
+
"""Inputs for Video Classification inference"""
|
|
33
|
+
|
|
34
|
+
inputs: Any
|
|
35
|
+
"""The input video data"""
|
|
36
|
+
parameters: Optional[VideoClassificationParameters] = None
|
|
37
|
+
"""Additional inference parameters"""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class VideoClassificationOutputElement(BaseInferenceType):
|
|
42
|
+
"""Outputs of inference for the Video Classification task"""
|
|
43
|
+
|
|
44
|
+
label: str
|
|
45
|
+
"""The predicted class label."""
|
|
46
|
+
score: float
|
|
47
|
+
"""The corresponding probability."""
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class VisualQuestionAnsweringInputData(BaseInferenceType):
|
|
14
|
+
"""One (image, question) pair to answer"""
|
|
15
|
+
|
|
16
|
+
image: Any
|
|
17
|
+
"""The image."""
|
|
18
|
+
question: Any
|
|
19
|
+
"""The question to answer based on the image."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class VisualQuestionAnsweringParameters(BaseInferenceType):
|
|
24
|
+
"""Additional inference parameters
|
|
25
|
+
Additional inference parameters for Visual Question Answering
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
top_k: Optional[int] = None
|
|
29
|
+
"""The number of answers to return (will be chosen by order of likelihood). Note that we
|
|
30
|
+
return less than topk answers if there are not enough options available within the
|
|
31
|
+
context.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class VisualQuestionAnsweringInput(BaseInferenceType):
|
|
37
|
+
"""Inputs for Visual Question Answering inference"""
|
|
38
|
+
|
|
39
|
+
inputs: VisualQuestionAnsweringInputData
|
|
40
|
+
"""One (image, question) pair to answer"""
|
|
41
|
+
parameters: Optional[VisualQuestionAnsweringParameters] = None
|
|
42
|
+
"""Additional inference parameters"""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class VisualQuestionAnsweringOutputElement(BaseInferenceType):
|
|
47
|
+
"""Outputs of inference for the Visual Question Answering task"""
|
|
48
|
+
|
|
49
|
+
label: Any
|
|
50
|
+
score: float
|
|
51
|
+
"""The associated score / probability"""
|
|
52
|
+
answer: Optional[str] = None
|
|
53
|
+
"""The answer to the question"""
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class ZeroShotClassificationInputData(BaseInferenceType):
|
|
14
|
+
"""The input text data, with candidate labels"""
|
|
15
|
+
|
|
16
|
+
candidate_labels: List[str]
|
|
17
|
+
"""The set of possible class labels to classify the text into."""
|
|
18
|
+
text: str
|
|
19
|
+
"""The text to classify"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ZeroShotClassificationParameters(BaseInferenceType):
|
|
24
|
+
"""Additional inference parameters
|
|
25
|
+
Additional inference parameters for Zero Shot Classification
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
hypothesis_template: Optional[str] = None
|
|
29
|
+
"""The sentence used in conjunction with candidateLabels to attempt the text classification
|
|
30
|
+
by replacing the placeholder with the candidate labels.
|
|
31
|
+
"""
|
|
32
|
+
multi_label: Optional[bool] = None
|
|
33
|
+
"""Whether multiple candidate labels can be true. If false, the scores are normalized such
|
|
34
|
+
that the sum of the label likelihoods for each sequence is 1. If true, the labels are
|
|
35
|
+
considered independent and probabilities are normalized for each candidate.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class ZeroShotClassificationInput(BaseInferenceType):
|
|
41
|
+
"""Inputs for Zero Shot Classification inference"""
|
|
42
|
+
|
|
43
|
+
inputs: ZeroShotClassificationInputData
|
|
44
|
+
"""The input text data, with candidate labels"""
|
|
45
|
+
parameters: Optional[ZeroShotClassificationParameters] = None
|
|
46
|
+
"""Additional inference parameters"""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class ZeroShotClassificationOutputElement(BaseInferenceType):
|
|
51
|
+
"""Outputs of inference for the Zero Shot Classification task"""
|
|
52
|
+
|
|
53
|
+
label: str
|
|
54
|
+
"""The predicted class label."""
|
|
55
|
+
score: float
|
|
56
|
+
"""The corresponding probability."""
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, List, Optional
|
|
8
|
+
|
|
9
|
+
from .base import BaseInferenceType
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class ZeroShotImageClassificationInputData(BaseInferenceType):
|
|
14
|
+
"""The input image data, with candidate labels"""
|
|
15
|
+
|
|
16
|
+
candidate_labels: List[str]
|
|
17
|
+
"""The candidate labels for this image"""
|
|
18
|
+
image: Any
|
|
19
|
+
"""The image data to classify"""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ZeroShotImageClassificationParameters(BaseInferenceType):
|
|
24
|
+
"""Additional inference parameters
|
|
25
|
+
Additional inference parameters for Zero Shot Image Classification
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
hypothesis_template: Optional[str] = None
|
|
29
|
+
"""The sentence used in conjunction with candidateLabels to attempt the text classification
|
|
30
|
+
by replacing the placeholder with the candidate labels.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class ZeroShotImageClassificationInput(BaseInferenceType):
|
|
36
|
+
"""Inputs for Zero Shot Image Classification inference"""
|
|
37
|
+
|
|
38
|
+
inputs: ZeroShotImageClassificationInputData
|
|
39
|
+
"""The input image data, with candidate labels"""
|
|
40
|
+
parameters: Optional[ZeroShotImageClassificationParameters] = None
|
|
41
|
+
"""Additional inference parameters"""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class ZeroShotImageClassificationOutputElement(BaseInferenceType):
|
|
46
|
+
"""Outputs of inference for the Zero Shot Image Classification task"""
|
|
47
|
+
|
|
48
|
+
label: str
|
|
49
|
+
"""The predicted class label."""
|
|
50
|
+
score: float
|
|
51
|
+
"""The corresponding probability."""
|