huggingface-hub 0.21.4__py3-none-any.whl → 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (97) hide show
  1. huggingface_hub/__init__.py +217 -1
  2. huggingface_hub/_commit_api.py +14 -15
  3. huggingface_hub/_inference_endpoints.py +12 -11
  4. huggingface_hub/_login.py +1 -0
  5. huggingface_hub/_multi_commits.py +1 -0
  6. huggingface_hub/_snapshot_download.py +9 -1
  7. huggingface_hub/_tensorboard_logger.py +1 -0
  8. huggingface_hub/_webhooks_payload.py +1 -0
  9. huggingface_hub/_webhooks_server.py +1 -0
  10. huggingface_hub/commands/_cli_utils.py +1 -0
  11. huggingface_hub/commands/delete_cache.py +1 -0
  12. huggingface_hub/commands/download.py +1 -0
  13. huggingface_hub/commands/env.py +1 -0
  14. huggingface_hub/commands/scan_cache.py +1 -0
  15. huggingface_hub/commands/upload.py +1 -0
  16. huggingface_hub/community.py +1 -0
  17. huggingface_hub/constants.py +3 -1
  18. huggingface_hub/errors.py +38 -0
  19. huggingface_hub/file_download.py +102 -95
  20. huggingface_hub/hf_api.py +47 -35
  21. huggingface_hub/hf_file_system.py +77 -3
  22. huggingface_hub/hub_mixin.py +215 -54
  23. huggingface_hub/inference/_client.py +554 -239
  24. huggingface_hub/inference/_common.py +195 -41
  25. huggingface_hub/inference/_generated/_async_client.py +558 -239
  26. huggingface_hub/inference/_generated/types/__init__.py +115 -0
  27. huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
  28. huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
  29. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +116 -0
  30. huggingface_hub/inference/_generated/types/base.py +149 -0
  31. huggingface_hub/inference/_generated/types/chat_completion.py +106 -0
  32. huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
  33. huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
  34. huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
  35. huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
  36. huggingface_hub/inference/_generated/types/image_classification.py +43 -0
  37. huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
  38. huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
  39. huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
  40. huggingface_hub/inference/_generated/types/object_detection.py +55 -0
  41. huggingface_hub/inference/_generated/types/question_answering.py +77 -0
  42. huggingface_hub/inference/_generated/types/sentence_similarity.py +28 -0
  43. huggingface_hub/inference/_generated/types/summarization.py +46 -0
  44. huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
  45. huggingface_hub/inference/_generated/types/text2text_generation.py +45 -0
  46. huggingface_hub/inference/_generated/types/text_classification.py +43 -0
  47. huggingface_hub/inference/_generated/types/text_generation.py +161 -0
  48. huggingface_hub/inference/_generated/types/text_to_audio.py +105 -0
  49. huggingface_hub/inference/_generated/types/text_to_image.py +57 -0
  50. huggingface_hub/inference/_generated/types/token_classification.py +53 -0
  51. huggingface_hub/inference/_generated/types/translation.py +46 -0
  52. huggingface_hub/inference/_generated/types/video_classification.py +47 -0
  53. huggingface_hub/inference/_generated/types/visual_question_answering.py +53 -0
  54. huggingface_hub/inference/_generated/types/zero_shot_classification.py +56 -0
  55. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +51 -0
  56. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +55 -0
  57. huggingface_hub/inference/_templating.py +105 -0
  58. huggingface_hub/inference/_types.py +4 -152
  59. huggingface_hub/keras_mixin.py +39 -17
  60. huggingface_hub/lfs.py +20 -8
  61. huggingface_hub/repocard.py +11 -3
  62. huggingface_hub/repocard_data.py +12 -2
  63. huggingface_hub/serialization/__init__.py +1 -0
  64. huggingface_hub/serialization/_base.py +1 -0
  65. huggingface_hub/serialization/_numpy.py +1 -0
  66. huggingface_hub/serialization/_tensorflow.py +1 -0
  67. huggingface_hub/serialization/_torch.py +1 -0
  68. huggingface_hub/utils/__init__.py +4 -1
  69. huggingface_hub/utils/_cache_manager.py +7 -0
  70. huggingface_hub/utils/_chunk_utils.py +1 -0
  71. huggingface_hub/utils/_datetime.py +1 -0
  72. huggingface_hub/utils/_errors.py +10 -1
  73. huggingface_hub/utils/_experimental.py +1 -0
  74. huggingface_hub/utils/_fixes.py +19 -3
  75. huggingface_hub/utils/_git_credential.py +1 -0
  76. huggingface_hub/utils/_headers.py +10 -3
  77. huggingface_hub/utils/_hf_folder.py +1 -0
  78. huggingface_hub/utils/_http.py +1 -0
  79. huggingface_hub/utils/_pagination.py +1 -0
  80. huggingface_hub/utils/_paths.py +1 -0
  81. huggingface_hub/utils/_runtime.py +22 -0
  82. huggingface_hub/utils/_subprocess.py +1 -0
  83. huggingface_hub/utils/_token.py +1 -0
  84. huggingface_hub/utils/_typing.py +29 -1
  85. huggingface_hub/utils/_validators.py +1 -0
  86. huggingface_hub/utils/endpoint_helpers.py +1 -0
  87. huggingface_hub/utils/logging.py +1 -1
  88. huggingface_hub/utils/sha.py +1 -0
  89. huggingface_hub/utils/tqdm.py +1 -0
  90. {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/METADATA +14 -15
  91. huggingface_hub-0.22.0.dist-info/RECORD +113 -0
  92. {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/WHEEL +1 -1
  93. huggingface_hub/inference/_text_generation.py +0 -551
  94. huggingface_hub-0.21.4.dist-info/RECORD +0 -81
  95. {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/LICENSE +0 -0
  96. {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/entry_points.txt +0 -0
  97. {huggingface_hub-0.21.4.dist-info → huggingface_hub-0.22.0.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: bdist_wheel (0.43.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,551 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # Original implementation taken from the `text-generation` Python client (see https://pypi.org/project/text-generation/
17
- # and https://github.com/huggingface/text-generation-inference/tree/main/clients/python)
18
- #
19
- # Changes compared to original implementation:
20
- # - use pydantic.dataclasses instead of BaseModel
21
- # - default to Python's dataclasses if Pydantic is not installed (same implementation but no validation)
22
- # - added default values for all parameters (not needed in BaseModel but dataclasses yes)
23
- # - integrated in `huggingface_hub.InferenceClient``
24
- # - added `stream: bool` and `details: bool` in the `text_generation` method instead of having different methods for each use case
25
- import warnings
26
- from dataclasses import field
27
- from enum import Enum
28
- from typing import List, NoReturn, Optional
29
-
30
- from requests import HTTPError
31
-
32
- from ..utils import is_pydantic_available
33
-
34
-
35
- if is_pydantic_available():
36
- from pydantic import validator as pydantic_validator
37
- from pydantic.dataclasses import dataclass
38
-
39
- def validator(*args, **kwargs):
40
- # Pydantic v1's `@validator` is deprecated in favor of `@field_validator`. In order to support both pydantic v1
41
- # and v2 without changing the logic, we catch the warning message in pydantic v2 and ignore it. If we want to
42
- # support pydantic v3 in the future, we will drop support for pydantic v1 and use `pydantic.field_validator`
43
- # correctly.
44
- #
45
- # Related:
46
- # - https://docs.pydantic.dev/latest/migration/#changes-to-validators
47
- # - https://github.com/huggingface/huggingface_hub/pull/1837
48
- with warnings.catch_warnings():
49
- warnings.filterwarnings("ignore", message="Pydantic V1 style `@validator` validators are deprecated.")
50
- return pydantic_validator(*args, **kwargs)
51
- else:
52
- # No validation if Pydantic is not installed
53
- from dataclasses import dataclass # type: ignore
54
-
55
- def validator(x): # type: ignore
56
- return lambda y: y
57
-
58
-
59
- @dataclass
60
- class TextGenerationParameters:
61
- """
62
- Parameters for text generation.
63
-
64
- Args:
65
- do_sample (`bool`, *optional*):
66
- Activate logits sampling. Defaults to False.
67
- max_new_tokens (`int`, *optional*):
68
- Maximum number of generated tokens. Defaults to 20.
69
- repetition_penalty (`Optional[float]`, *optional*):
70
- The parameter for repetition penalty. A value of 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf)
71
- for more details. Defaults to None.
72
- return_full_text (`bool`, *optional*):
73
- Whether to prepend the prompt to the generated text. Defaults to False.
74
- stop (`List[str]`, *optional*):
75
- Stop generating tokens if a member of `stop_sequences` is generated. Defaults to an empty list.
76
- seed (`Optional[int]`, *optional*):
77
- Random sampling seed. Defaults to None.
78
- temperature (`Optional[float]`, *optional*):
79
- The value used to modulate the logits distribution. Defaults to None.
80
- top_k (`Optional[int]`, *optional*):
81
- The number of highest probability vocabulary tokens to keep for top-k-filtering. Defaults to None.
82
- top_p (`Optional[float]`, *optional*):
83
- If set to a value less than 1, only the smallest set of most probable tokens with probabilities that add up
84
- to `top_p` or higher are kept for generation. Defaults to None.
85
- truncate (`Optional[int]`, *optional*):
86
- Truncate input tokens to the given size. Defaults to None.
87
- typical_p (`Optional[float]`, *optional*):
88
- Typical Decoding mass. See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666)
89
- for more information. Defaults to None.
90
- best_of (`Optional[int]`, *optional*):
91
- Generate `best_of` sequences and return the one with the highest token logprobs. Defaults to None.
92
- watermark (`bool`, *optional*):
93
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226). Defaults to False.
94
- details (`bool`, *optional*):
95
- Get generation details. Defaults to False.
96
- decoder_input_details (`bool`, *optional*):
97
- Get decoder input token logprobs and ids. Defaults to False.
98
- """
99
-
100
- # Activate logits sampling
101
- do_sample: bool = False
102
- # Maximum number of generated tokens
103
- max_new_tokens: int = 20
104
- # The parameter for repetition penalty. 1.0 means no penalty.
105
- # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
106
- repetition_penalty: Optional[float] = None
107
- # Whether to prepend the prompt to the generated text
108
- return_full_text: bool = False
109
- # Stop generating tokens if a member of `stop_sequences` is generated
110
- stop: List[str] = field(default_factory=lambda: [])
111
- # Random sampling seed
112
- seed: Optional[int] = None
113
- # The value used to module the logits distribution.
114
- temperature: Optional[float] = None
115
- # The number of highest probability vocabulary tokens to keep for top-k-filtering.
116
- top_k: Optional[int] = None
117
- # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
118
- # higher are kept for generation.
119
- top_p: Optional[float] = None
120
- # truncate inputs tokens to the given size
121
- truncate: Optional[int] = None
122
- # Typical Decoding mass
123
- # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
124
- typical_p: Optional[float] = None
125
- # Generate best_of sequences and return the one if the highest token logprobs
126
- best_of: Optional[int] = None
127
- # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
128
- watermark: bool = False
129
- # Get generation details
130
- details: bool = False
131
- # Get decoder input token logprobs and ids
132
- decoder_input_details: bool = False
133
-
134
- @validator("best_of")
135
- def valid_best_of(cls, field_value, values):
136
- if field_value is not None:
137
- if field_value <= 0:
138
- raise ValueError("`best_of` must be strictly positive")
139
- if field_value > 1 and values["seed"] is not None:
140
- raise ValueError("`seed` must not be set when `best_of` is > 1")
141
- sampling = (
142
- values["do_sample"]
143
- | (values["temperature"] is not None)
144
- | (values["top_k"] is not None)
145
- | (values["top_p"] is not None)
146
- | (values["typical_p"] is not None)
147
- )
148
- if field_value > 1 and not sampling:
149
- raise ValueError("you must use sampling when `best_of` is > 1")
150
-
151
- return field_value
152
-
153
- @validator("repetition_penalty")
154
- def valid_repetition_penalty(cls, v):
155
- if v is not None and v <= 0:
156
- raise ValueError("`repetition_penalty` must be strictly positive")
157
- return v
158
-
159
- @validator("seed")
160
- def valid_seed(cls, v):
161
- if v is not None and v < 0:
162
- raise ValueError("`seed` must be positive")
163
- return v
164
-
165
- @validator("temperature")
166
- def valid_temp(cls, v):
167
- if v is not None and v <= 0:
168
- raise ValueError("`temperature` must be strictly positive")
169
- return v
170
-
171
- @validator("top_k")
172
- def valid_top_k(cls, v):
173
- if v is not None and v <= 0:
174
- raise ValueError("`top_k` must be strictly positive")
175
- return v
176
-
177
- @validator("top_p")
178
- def valid_top_p(cls, v):
179
- if v is not None and (v <= 0 or v >= 1.0):
180
- raise ValueError("`top_p` must be > 0.0 and < 1.0")
181
- return v
182
-
183
- @validator("truncate")
184
- def valid_truncate(cls, v):
185
- if v is not None and v <= 0:
186
- raise ValueError("`truncate` must be strictly positive")
187
- return v
188
-
189
- @validator("typical_p")
190
- def valid_typical_p(cls, v):
191
- if v is not None and (v <= 0 or v >= 1.0):
192
- raise ValueError("`typical_p` must be > 0.0 and < 1.0")
193
- return v
194
-
195
-
196
- @dataclass
197
- class TextGenerationRequest:
198
- """
199
- Request object for text generation (only for internal use).
200
-
201
- Args:
202
- inputs (`str`):
203
- The prompt for text generation.
204
- parameters (`Optional[TextGenerationParameters]`, *optional*):
205
- Generation parameters.
206
- stream (`bool`, *optional*):
207
- Whether to stream output tokens. Defaults to False.
208
- """
209
-
210
- # Prompt
211
- inputs: str
212
- # Generation parameters
213
- parameters: Optional[TextGenerationParameters] = None
214
- # Whether to stream output tokens
215
- stream: bool = False
216
-
217
- @validator("inputs")
218
- def valid_input(cls, v):
219
- if not v:
220
- raise ValueError("`inputs` cannot be empty")
221
- return v
222
-
223
- @validator("stream")
224
- def valid_best_of_stream(cls, field_value, values):
225
- parameters = values["parameters"]
226
- if parameters is not None and parameters.best_of is not None and parameters.best_of > 1 and field_value:
227
- raise ValueError("`best_of` != 1 is not supported when `stream` == True")
228
- return field_value
229
-
230
- def __post_init__(self):
231
- if not is_pydantic_available():
232
- # If pydantic is not installed, we need to instantiate the nested dataclasses manually
233
- if self.parameters is not None and isinstance(self.parameters, dict):
234
- self.parameters = TextGenerationParameters(**self.parameters)
235
-
236
-
237
- # Decoder input tokens
238
- @dataclass
239
- class InputToken:
240
- """
241
- Represents an input token.
242
-
243
- Args:
244
- id (`int`):
245
- Token ID from the model tokenizer.
246
- text (`str`):
247
- Token text.
248
- logprob (`float` or `None`):
249
- Log probability of the token. Optional since the logprob of the first token cannot be computed.
250
- """
251
-
252
- # Token ID from the model tokenizer
253
- id: int
254
- # Token text
255
- text: str
256
- # Logprob
257
- # Optional since the logprob of the first token cannot be computed
258
- logprob: Optional[float] = None
259
-
260
-
261
- # Generated tokens
262
- @dataclass
263
- class Token:
264
- """
265
- Represents a token.
266
-
267
- Args:
268
- id (`int`):
269
- Token ID from the model tokenizer.
270
- text (`str`):
271
- Token text.
272
- logprob (`float`):
273
- Log probability of the token.
274
- special (`bool`):
275
- Indicates whether the token is a special token. It can be used to ignore
276
- tokens when concatenating.
277
- """
278
-
279
- # Token ID from the model tokenizer
280
- id: int
281
- # Token text
282
- text: str
283
- # Logprob
284
- logprob: float
285
- # Is the token a special token
286
- # Can be used to ignore tokens when concatenating
287
- special: bool
288
-
289
-
290
- # Generation finish reason
291
- class FinishReason(str, Enum):
292
- # number of generated tokens == `max_new_tokens`
293
- Length = "length"
294
- # the model generated its end of sequence token
295
- EndOfSequenceToken = "eos_token"
296
- # the model generated a text included in `stop_sequences`
297
- StopSequence = "stop_sequence"
298
-
299
-
300
- # Additional sequences when using the `best_of` parameter
301
- @dataclass
302
- class BestOfSequence:
303
- """
304
- Represents a best-of sequence generated during text generation.
305
-
306
- Args:
307
- generated_text (`str`):
308
- The generated text.
309
- finish_reason (`FinishReason`):
310
- The reason for the generation to finish, represented by a `FinishReason` value.
311
- generated_tokens (`int`):
312
- The number of generated tokens in the sequence.
313
- seed (`Optional[int]`):
314
- The sampling seed if sampling was activated.
315
- prefill (`List[InputToken]`):
316
- The decoder input tokens. Empty if `decoder_input_details` is False. Defaults to an empty list.
317
- tokens (`List[Token]`):
318
- The generated tokens. Defaults to an empty list.
319
- """
320
-
321
- # Generated text
322
- generated_text: str
323
- # Generation finish reason
324
- finish_reason: FinishReason
325
- # Number of generated tokens
326
- generated_tokens: int
327
- # Sampling seed if sampling was activated
328
- seed: Optional[int] = None
329
- # Decoder input tokens, empty if decoder_input_details is False
330
- prefill: List[InputToken] = field(default_factory=lambda: [])
331
- # Generated tokens
332
- tokens: List[Token] = field(default_factory=lambda: [])
333
-
334
- def __post_init__(self):
335
- if not is_pydantic_available():
336
- # If pydantic is not installed, we need to instantiate the nested dataclasses manually
337
- self.prefill = [
338
- InputToken(**input_token) if isinstance(input_token, dict) else input_token
339
- for input_token in self.prefill
340
- ]
341
- self.tokens = [Token(**token) if isinstance(token, dict) else token for token in self.tokens]
342
-
343
-
344
- # `generate` details
345
- @dataclass
346
- class Details:
347
- """
348
- Represents details of a text generation.
349
-
350
- Args:
351
- finish_reason (`FinishReason`):
352
- The reason for the generation to finish, represented by a `FinishReason` value.
353
- generated_tokens (`int`):
354
- The number of generated tokens.
355
- seed (`Optional[int]`):
356
- The sampling seed if sampling was activated.
357
- prefill (`List[InputToken]`, *optional*):
358
- The decoder input tokens. Empty if `decoder_input_details` is False. Defaults to an empty list.
359
- tokens (`List[Token]`):
360
- The generated tokens. Defaults to an empty list.
361
- best_of_sequences (`Optional[List[BestOfSequence]]`):
362
- Additional sequences when using the `best_of` parameter.
363
- """
364
-
365
- # Generation finish reason
366
- finish_reason: FinishReason
367
- # Number of generated tokens
368
- generated_tokens: int
369
- # Sampling seed if sampling was activated
370
- seed: Optional[int] = None
371
- # Decoder input tokens, empty if decoder_input_details is False
372
- prefill: List[InputToken] = field(default_factory=lambda: [])
373
- # Generated tokens
374
- tokens: List[Token] = field(default_factory=lambda: [])
375
- # Additional sequences when using the `best_of` parameter
376
- best_of_sequences: Optional[List[BestOfSequence]] = None
377
-
378
- def __post_init__(self):
379
- if not is_pydantic_available():
380
- # If pydantic is not installed, we need to instantiate the nested dataclasses manually
381
- self.prefill = [
382
- InputToken(**input_token) if isinstance(input_token, dict) else input_token
383
- for input_token in self.prefill
384
- ]
385
- self.tokens = [Token(**token) if isinstance(token, dict) else token for token in self.tokens]
386
- if self.best_of_sequences is not None:
387
- self.best_of_sequences = [
388
- BestOfSequence(**best_of_sequence) if isinstance(best_of_sequence, dict) else best_of_sequence
389
- for best_of_sequence in self.best_of_sequences
390
- ]
391
-
392
-
393
- # `generate` return value
394
- @dataclass
395
- class TextGenerationResponse:
396
- """
397
- Represents a response for text generation.
398
-
399
- Only returned when `details=True`, otherwise a string is returned.
400
-
401
- Args:
402
- generated_text (`str`):
403
- The generated text.
404
- details (`Optional[Details]`):
405
- Generation details. Returned only if `details=True` is sent to the server.
406
- """
407
-
408
- # Generated text
409
- generated_text: str
410
- # Generation details
411
- details: Optional[Details] = None
412
-
413
- def __post_init__(self):
414
- if not is_pydantic_available():
415
- # If pydantic is not installed, we need to instantiate the nested dataclasses manually
416
- if self.details is not None and isinstance(self.details, dict):
417
- self.details = Details(**self.details)
418
-
419
-
420
- # `generate_stream` details
421
- @dataclass
422
- class StreamDetails:
423
- """
424
- Represents details of a text generation stream.
425
-
426
- Args:
427
- finish_reason (`FinishReason`):
428
- The reason for the generation to finish, represented by a `FinishReason` value.
429
- generated_tokens (`int`):
430
- The number of generated tokens.
431
- seed (`Optional[int]`):
432
- The sampling seed if sampling was activated.
433
- """
434
-
435
- # Generation finish reason
436
- finish_reason: FinishReason
437
- # Number of generated tokens
438
- generated_tokens: int
439
- # Sampling seed if sampling was activated
440
- seed: Optional[int] = None
441
-
442
-
443
- # `generate_stream` return value
444
- @dataclass
445
- class TextGenerationStreamResponse:
446
- """
447
- Represents a response for streaming text generation.
448
-
449
- Only returned when `details=True` and `stream=True`.
450
-
451
- Args:
452
- token (`Token`):
453
- The generated token.
454
- index (`Optional[int]`, *optional*):
455
- The token index within the stream. Optional to support older clients that omit it.
456
- generated_text (`Optional[str]`, *optional*):
457
- The complete generated text. Only available when the generation is finished.
458
- details (`Optional[StreamDetails]`, *optional*):
459
- Generation details. Only available when the generation is finished.
460
- """
461
-
462
- # Generated token
463
- token: Token
464
- # The token index within the stream
465
- # Optional to support older clients that omit it.
466
- index: Optional[int] = None
467
- # Complete generated text
468
- # Only available when the generation is finished
469
- generated_text: Optional[str] = None
470
- # Generation details
471
- # Only available when the generation is finished
472
- details: Optional[StreamDetails] = None
473
-
474
- def __post_init__(self):
475
- if not is_pydantic_available():
476
- # If pydantic is not installed, we need to instantiate the nested dataclasses manually
477
- if isinstance(self.token, dict):
478
- self.token = Token(**self.token)
479
- if self.details is not None and isinstance(self.details, dict):
480
- self.details = StreamDetails(**self.details)
481
-
482
-
483
- # TEXT GENERATION ERRORS
484
- # ----------------------
485
- # Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation
486
- # inference project (https://github.com/huggingface/text-generation-inference).
487
- # ----------------------
488
-
489
-
490
- class TextGenerationError(HTTPError):
491
- """Generic error raised if text-generation went wrong."""
492
-
493
-
494
- # Text Generation Inference Errors
495
- class ValidationError(TextGenerationError):
496
- """Server-side validation error."""
497
-
498
-
499
- class GenerationError(TextGenerationError):
500
- pass
501
-
502
-
503
- class OverloadedError(TextGenerationError):
504
- pass
505
-
506
-
507
- class IncompleteGenerationError(TextGenerationError):
508
- pass
509
-
510
-
511
- class UnknownError(TextGenerationError):
512
- pass
513
-
514
-
515
- def raise_text_generation_error(http_error: HTTPError) -> NoReturn:
516
- """
517
- Try to parse text-generation-inference error message and raise HTTPError in any case.
518
-
519
- Args:
520
- error (`HTTPError`):
521
- The HTTPError that have been raised.
522
- """
523
- # Try to parse a Text Generation Inference error
524
-
525
- try:
526
- # Hacky way to retrieve payload in case of aiohttp error
527
- payload = getattr(http_error, "response_error_payload", None) or http_error.response.json()
528
- error = payload.get("error")
529
- error_type = payload.get("error_type")
530
- except Exception: # no payload
531
- raise http_error
532
-
533
- # If error_type => more information than `hf_raise_for_status`
534
- if error_type is not None:
535
- exception = _parse_text_generation_error(error, error_type)
536
- raise exception from http_error
537
-
538
- # Otherwise, fallback to default error
539
- raise http_error
540
-
541
-
542
- def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError:
543
- if error_type == "generation":
544
- return GenerationError(error) # type: ignore
545
- if error_type == "incomplete_generation":
546
- return IncompleteGenerationError(error) # type: ignore
547
- if error_type == "overloaded":
548
- return OverloadedError(error) # type: ignore
549
- if error_type == "validation":
550
- return ValidationError(error) # type: ignore
551
- return UnknownError(error) # type: ignore
@@ -1,81 +0,0 @@
1
- huggingface_hub/__init__.py,sha256=nu0b7XY9x7XElBvxPdCmxewlejCC_OeLt7uR1_priWE,21502
2
- huggingface_hub/_commit_api.py,sha256=ShZvHuBuARsZqzDVSzH07_E1M4LYk7dekGng9CYO0B0,29194
3
- huggingface_hub/_commit_scheduler.py,sha256=FgfjYv3E0oK3iBxDdy45Y7t78FWkmjnBR4dRd5aZviU,13653
4
- huggingface_hub/_inference_endpoints.py,sha256=HLsc3vr1EIBxB5zLtJvJh02ahew-0tXuEEWlpvzOjY4,15426
5
- huggingface_hub/_login.py,sha256=t5R3PXZN6loc27SJF9f8_d5Q4whhcd3ftHwDEhN-cPI,15363
6
- huggingface_hub/_multi_commits.py,sha256=xEiS4N8ZmIrrDxVGS93mA33VoVrbhlAp5T8M_XQTMiA,12518
7
- huggingface_hub/_snapshot_download.py,sha256=-0Kpp1gG1m2Slkbyj4GhvdOZeqzDTChqMQ1TGFHNjnA,15377
8
- huggingface_hub/_space_api.py,sha256=Mae_lqTRyTWyszI5mlObJ2fn9slPxkFPcFTEVADoNQM,5255
9
- huggingface_hub/_tensorboard_logger.py,sha256=3W8eUS3jhZ3WyVG9Hi6rVwZC-jcCyRunBns5vIniGfA,7165
10
- huggingface_hub/_webhooks_payload.py,sha256=raqcWHIw98bsFvam5kkqiSEbDA8W0TU055ZlpmbUrWs,2831
11
- huggingface_hub/_webhooks_server.py,sha256=u3Kua_O4okXPgnH60GDIhJTDEEyIiVX6Go4CUNX4ifE,15196
12
- huggingface_hub/community.py,sha256=SHc_LOZYSzCuk5aA73InLObrrnW-0MJuv2e63FXwg50,12202
13
- huggingface_hub/constants.py,sha256=fYBWYCLnyFOOldgN6a8E8PLmDVuGNdnKhfm5Qu8SLfE,7717
14
- huggingface_hub/fastai_utils.py,sha256=5I7zAfgHJU_mZnxnf9wgWTHrCRu_EAV8VTangDVfE_o,16676
15
- huggingface_hub/file_download.py,sha256=1_7W6f0CmQYlRbmIrFNsIcHsnqHJ4UIhE-XS9cmm9WY,77558
16
- huggingface_hub/hf_api.py,sha256=ebPm0KHK4V2OJLZW6t_tMSacegDhA6dQTXqbRpmwG8M,367012
17
- huggingface_hub/hf_file_system.py,sha256=8-gNR_BsZccS2yfgub4D0GuIk75G4Tu40AJXmdnsOLg,34180
18
- huggingface_hub/hub_mixin.py,sha256=1latp7B3le4uLUrrZByc3ym1t1AbeejMzedM1Ssphkw,23521
19
- huggingface_hub/inference_api.py,sha256=UXOKu_Ez2I3hDsjguqCcCrj03WFDndehpngYiIAucdg,8331
20
- huggingface_hub/keras_mixin.py,sha256=fxVjwm742fwsLwbuNVt7Slo3KAjEX7sCcTudKnolPZM,18741
21
- huggingface_hub/lfs.py,sha256=_YA93hK_R2j8TUnMnGk2CanYty-hTPBZX0MJDHbzUqc,19333
22
- huggingface_hub/repocard.py,sha256=rHmWR1YJzzwJk_MS1arcqLLcpkrOrD6RrNGb87tfCHU,34291
23
- huggingface_hub/repocard_data.py,sha256=wPTeJX2w5dGoFFZEp2Y1tYtogrwtH2p85_A3Gq511QA,31583
24
- huggingface_hub/repository.py,sha256=8oNhKNvJRye3dr67cTn8faKkBSiWFgvj7bIBlOpI-8U,54489
25
- huggingface_hub/commands/__init__.py,sha256=AkbM2a-iGh0Vq_xAWhK3mu3uZ44km8-X5uWjKcvcrUQ,928
26
- huggingface_hub/commands/_cli_utils.py,sha256=VA_3cHzIlsEQmKPnfNTgJNI36UtcrxRmfB44RdbP1LA,1970
27
- huggingface_hub/commands/delete_cache.py,sha256=9Nn2ihdORPpkULkhAzju6aYar2rsa4laSE38rt8645I,16130
28
- huggingface_hub/commands/download.py,sha256=YnwGiL0--tq65pfLWKI5G5N3HGOovhIb7t2AI0vU1yM,9166
29
- huggingface_hub/commands/env.py,sha256=LJjOxo-m0DrvQdyhWGjnLGtWt91ec63BMI4FQ-5bWXQ,1225
30
- huggingface_hub/commands/huggingface_cli.py,sha256=o862C98OcZoyqCzY7mNpia1h0KaLJUgSb0y10ot8sxA,1924
31
- huggingface_hub/commands/lfs.py,sha256=6E769AoRxUDiIOapn1_QvTbNtdUnUiouu2F4Gopp4do,7318
32
- huggingface_hub/commands/scan_cache.py,sha256=nMEJxBScezxs00EWyAvJtWCjhwxCL1YlBE6qNfiT3RY,5182
33
- huggingface_hub/commands/upload.py,sha256=vrac37T3sYwzaf6gpVR5qWzwh4fOhqakRvDUrLEx4Kg,13621
34
- huggingface_hub/commands/user.py,sha256=QApZJOCQEHADhjunM3hlQ72uqHsearCiCE4SdpzGdcc,6893
35
- huggingface_hub/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- huggingface_hub/inference/_client.py,sha256=bR8BqBKuCavMCjdadn0rsa-pvxHp1hWK3-RjBpmv3hU,88740
37
- huggingface_hub/inference/_common.py,sha256=7FejyCmwnVDaDCkgvgIjpDNJVs-2cBbFsPBIJDXCJSQ,11374
38
- huggingface_hub/inference/_text_generation.py,sha256=LErz7X6YQwrJBuEVWSI_VXlQkVfmjl73enjd0YDwkhg,20616
39
- huggingface_hub/inference/_types.py,sha256=qmMF3Z_Rft5FkHZ0Ij5aLkvpHP8WqfXC3SGs1dFHCwY,6068
40
- huggingface_hub/inference/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
- huggingface_hub/inference/_generated/_async_client.py,sha256=wCh0352cLysg5TR9Rbwzui8CWvqr8mwjd7lRB_ngbqI,91618
42
- huggingface_hub/serialization/__init__.py,sha256=xGIpRYLosQ5MZl1Ht3_SJNCM37rKSkWLaFZrSWApyaw,909
43
- huggingface_hub/serialization/_base.py,sha256=6_lA2BzLDjZoBLvfzUBNws2G77FUz8VSClN7DxC9c8U,7132
44
- huggingface_hub/serialization/_numpy.py,sha256=aWbqBD56mDPgiLGWFdJDZcGwRm6Tt1i55nq941vzq1o,2670
45
- huggingface_hub/serialization/_tensorflow.py,sha256=TRYyqo0IZwPEGClu2UTRpXRAyYTj3uPqrPvnhLuWdAk,3558
46
- huggingface_hub/serialization/_torch.py,sha256=UtqRNhyhiRsgrsGbIXYzG_1nQVjEfd8L9JIDgg-0PJM,7686
47
- huggingface_hub/templates/datasetcard_template.md,sha256=W-EMqR6wndbrnZorkVv56URWPG49l7MATGeI015kTvs,5503
48
- huggingface_hub/templates/modelcard_template.md,sha256=4AqArS3cqdtbit5Bo-DhjcnDFR-pza5hErLLTPM4Yuc,6870
49
- huggingface_hub/utils/__init__.py,sha256=OrAjppTGd2eaaVJ0jix4vnjSJx9IGznkL_r2K5GZcvE,3323
50
- huggingface_hub/utils/_cache_assets.py,sha256=kai77HPQMfYpROouMBQCr_gdBCaeTm996Sqj0dExbNg,5728
51
- huggingface_hub/utils/_cache_manager.py,sha256=zRBo37DaHS6IDWyQ_53oCL4-U6p--inAGsC8DLtAQ_I,29103
52
- huggingface_hub/utils/_chunk_utils.py,sha256=6VRyjiGr2bPupPl1azSUTxKuJ51wdgELipwJ2YRfH5U,2129
53
- huggingface_hub/utils/_datetime.py,sha256=vrR5-HN19j8bg2wxWfWzIU_7fRF_zzLyxhqGZWmTYH0,2553
54
- huggingface_hub/utils/_deprecation.py,sha256=HZhRGGUX_QMKBBBwHHlffLtmCSK01TOpeXHefZbPfwI,4872
55
- huggingface_hub/utils/_errors.py,sha256=nGtrPcPSMgtgy4klg52wjozAqwxLX8Wx6i7Wg1PhAb4,14733
56
- huggingface_hub/utils/_experimental.py,sha256=rBx4gV2NU1dT_OfeRzsCmCWyIF4Wxcf0PdkmIASoT6o,2394
57
- huggingface_hub/utils/_fixes.py,sha256=wFvfTYj62Il2OwkQB_Qp0xONG6SARQ5oEkT3_FhB4rc,2437
58
- huggingface_hub/utils/_git_credential.py,sha256=NMfMmuqdub_QX3T2d32Jhpf3RBnf2eh4VnDhHoqyZRA,4595
59
- huggingface_hub/utils/_headers.py,sha256=wz0kPrpu9PHpeCIJAq8MBiHuR2HbNWGukd0QgWS6lWo,9344
60
- huggingface_hub/utils/_hf_folder.py,sha256=5fxKNZ8y12szgmLhxZWJsjK_zx-wopMtVoFPCuwI1VI,3612
61
- huggingface_hub/utils/_http.py,sha256=qJ9wlsv-SU9L4Epr8FLHznY3COIcOrUUmGMjJXfrQvI,12889
62
- huggingface_hub/utils/_pagination.py,sha256=VfpmMLyNCRo24fw0o_yWysMK69d9M6sSg2-nWtuypO4,1840
63
- huggingface_hub/utils/_paths.py,sha256=nUaxXN-R2EcWfHE8ivFWfHqEKMIvXEdUeCGDC_QHMqc,4397
64
- huggingface_hub/utils/_runtime.py,sha256=SkZmZuFLcpeMv1sTn6_YwnmWz592riCJGPS_-bgGMOs,10430
65
- huggingface_hub/utils/_safetensors.py,sha256=EE9v9HflWBUqIegn0dCGHgNu9G9Db3v2aszvG4ldPF8,4876
66
- huggingface_hub/utils/_subprocess.py,sha256=LW9b8TWh9rsm3pW9_5b-mVV_AtYNyLXgC6e09SthkWI,4616
67
- huggingface_hub/utils/_telemetry.py,sha256=jHAdgWNcL9nVvMT3ec3i78O-cwL09GnlifuokzpQjMI,4641
68
- huggingface_hub/utils/_token.py,sha256=e3GGABkd6zPYLE4-RdUxnH6vyen4vsvNxEl2PgStiTA,5475
69
- huggingface_hub/utils/_typing.py,sha256=zTA0nTJAILGveXbJKyeh6u9uIagrFgPoRqr-uCEGDQI,921
70
- huggingface_hub/utils/_validators.py,sha256=3ZmHubjslDRwFYe1oKyaUw6DZrc3DsuV2gABPrx7PTw,9358
71
- huggingface_hub/utils/endpoint_helpers.py,sha256=Q9YpLXVkMVaGEZb4PAMioghxFcepEUictZ1LBe9Uxyk,9535
72
- huggingface_hub/utils/insecure_hashlib.py,sha256=OjxlvtSQHpbLp9PWSrXBDJ0wHjxCBU-SQJgucEEXDbU,1058
73
- huggingface_hub/utils/logging.py,sha256=mARNwc5gY6apMQ9IM5zymn-RsYnFbYW3b0HDMYXmBS0,4729
74
- huggingface_hub/utils/sha.py,sha256=IVi7CfBthfu-ExLduY_CQltTy-tVGTbrvURCTOWKcLA,901
75
- huggingface_hub/utils/tqdm.py,sha256=zBWgoxxwHooOceABVREVqSNpJGcMpaByKFVDU8VbuUQ,6334
76
- huggingface_hub-0.21.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
77
- huggingface_hub-0.21.4.dist-info/METADATA,sha256=D8616jH4iiuDT94lfbzrnmcGfQUAaCEWoDNzu2-De0I,13176
78
- huggingface_hub-0.21.4.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
79
- huggingface_hub-0.21.4.dist-info/entry_points.txt,sha256=Y3Z2L02rBG7va_iE6RPXolIgwOdwUFONyRN3kXMxZ0g,131
80
- huggingface_hub-0.21.4.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
81
- huggingface_hub-0.21.4.dist-info/RECORD,,