huggingface-hub 0.35.1__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +28 -45
- huggingface_hub/_commit_api.py +28 -28
- huggingface_hub/_commit_scheduler.py +11 -8
- huggingface_hub/_inference_endpoints.py +8 -8
- huggingface_hub/_jobs_api.py +20 -20
- huggingface_hub/_login.py +13 -39
- huggingface_hub/_oauth.py +8 -8
- huggingface_hub/_snapshot_download.py +14 -28
- huggingface_hub/_space_api.py +4 -4
- huggingface_hub/_tensorboard_logger.py +5 -5
- huggingface_hub/_upload_large_folder.py +15 -15
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +2 -2
- huggingface_hub/cli/__init__.py +0 -14
- huggingface_hub/cli/_cli_utils.py +80 -3
- huggingface_hub/cli/auth.py +104 -150
- huggingface_hub/cli/cache.py +102 -126
- huggingface_hub/cli/download.py +93 -110
- huggingface_hub/cli/hf.py +37 -41
- huggingface_hub/cli/jobs.py +689 -1017
- huggingface_hub/cli/lfs.py +120 -143
- huggingface_hub/cli/repo.py +158 -216
- huggingface_hub/cli/repo_files.py +50 -84
- huggingface_hub/cli/system.py +6 -25
- huggingface_hub/cli/upload.py +198 -212
- huggingface_hub/cli/upload_large_folder.py +90 -105
- huggingface_hub/commands/_cli_utils.py +2 -2
- huggingface_hub/commands/delete_cache.py +11 -11
- huggingface_hub/commands/download.py +4 -13
- huggingface_hub/commands/lfs.py +4 -4
- huggingface_hub/commands/repo_files.py +2 -2
- huggingface_hub/commands/tag.py +1 -3
- huggingface_hub/commands/upload.py +4 -4
- huggingface_hub/commands/upload_large_folder.py +3 -3
- huggingface_hub/commands/user.py +4 -5
- huggingface_hub/community.py +5 -5
- huggingface_hub/constants.py +3 -41
- huggingface_hub/dataclasses.py +16 -22
- huggingface_hub/errors.py +43 -30
- huggingface_hub/fastai_utils.py +8 -9
- huggingface_hub/file_download.py +154 -253
- huggingface_hub/hf_api.py +329 -558
- huggingface_hub/hf_file_system.py +104 -62
- huggingface_hub/hub_mixin.py +32 -54
- huggingface_hub/inference/_client.py +178 -163
- huggingface_hub/inference/_common.py +38 -54
- huggingface_hub/inference/_generated/_async_client.py +219 -259
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +16 -16
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +4 -4
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +10 -10
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/agent.py +3 -3
- huggingface_hub/inference/_mcp/constants.py +1 -2
- huggingface_hub/inference/_mcp/mcp_client.py +33 -22
- huggingface_hub/inference/_mcp/types.py +10 -10
- huggingface_hub/inference/_mcp/utils.py +4 -4
- huggingface_hub/inference/_providers/__init__.py +2 -13
- huggingface_hub/inference/_providers/_common.py +24 -25
- huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
- huggingface_hub/inference/_providers/cohere.py +3 -3
- huggingface_hub/inference/_providers/fal_ai.py +25 -25
- huggingface_hub/inference/_providers/featherless_ai.py +4 -4
- huggingface_hub/inference/_providers/fireworks_ai.py +3 -3
- huggingface_hub/inference/_providers/hf_inference.py +13 -13
- huggingface_hub/inference/_providers/hyperbolic.py +4 -4
- huggingface_hub/inference/_providers/nebius.py +10 -10
- huggingface_hub/inference/_providers/novita.py +5 -5
- huggingface_hub/inference/_providers/nscale.py +4 -4
- huggingface_hub/inference/_providers/replicate.py +15 -15
- huggingface_hub/inference/_providers/sambanova.py +6 -6
- huggingface_hub/inference/_providers/together.py +7 -7
- huggingface_hub/lfs.py +24 -33
- huggingface_hub/repocard.py +16 -17
- huggingface_hub/repocard_data.py +56 -56
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +9 -9
- huggingface_hub/serialization/_dduf.py +7 -7
- huggingface_hub/serialization/_torch.py +28 -28
- huggingface_hub/utils/__init__.py +10 -4
- huggingface_hub/utils/_auth.py +5 -5
- huggingface_hub/utils/_cache_manager.py +31 -31
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +3 -3
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +3 -3
- huggingface_hub/utils/_headers.py +7 -29
- huggingface_hub/utils/_http.py +369 -209
- huggingface_hub/utils/_pagination.py +4 -4
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +15 -13
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +9 -9
- huggingface_hub/utils/_telemetry.py +3 -3
- huggingface_hub/utils/_typing.py +3 -3
- huggingface_hub/utils/_validators.py +53 -72
- huggingface_hub/utils/_xet.py +16 -16
- huggingface_hub/utils/_xet_progress_reporting.py +1 -1
- huggingface_hub/utils/insecure_hashlib.py +3 -9
- huggingface_hub/utils/tqdm.py +3 -3
- {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/METADATA +17 -26
- huggingface_hub-1.0.0rc1.dist-info/RECORD +161 -0
- huggingface_hub/inference/_providers/publicai.py +0 -6
- huggingface_hub/inference/_providers/scaleway.py +0 -28
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -500
- huggingface_hub/repository.py +0 -1477
- huggingface_hub/serialization/_tensorflow.py +0 -95
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.35.1.dist-info/RECORD +0 -168
- {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/WHEEL +0 -0
- {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -21,25 +21,13 @@ import logging
|
|
|
21
21
|
import mimetypes
|
|
22
22
|
from dataclasses import dataclass
|
|
23
23
|
from pathlib import Path
|
|
24
|
-
from typing import
|
|
25
|
-
TYPE_CHECKING,
|
|
26
|
-
Any,
|
|
27
|
-
AsyncIterable,
|
|
28
|
-
BinaryIO,
|
|
29
|
-
Dict,
|
|
30
|
-
Iterable,
|
|
31
|
-
List,
|
|
32
|
-
Literal,
|
|
33
|
-
NoReturn,
|
|
34
|
-
Optional,
|
|
35
|
-
Union,
|
|
36
|
-
overload,
|
|
37
|
-
)
|
|
24
|
+
from typing import TYPE_CHECKING, Any, AsyncIterable, BinaryIO, Iterable, Literal, NoReturn, Optional, Union, overload
|
|
38
25
|
|
|
39
|
-
|
|
26
|
+
import httpx
|
|
40
27
|
|
|
41
28
|
from huggingface_hub.errors import (
|
|
42
29
|
GenerationError,
|
|
30
|
+
HfHubHTTPError,
|
|
43
31
|
IncompleteGenerationError,
|
|
44
32
|
OverloadedError,
|
|
45
33
|
TextGenerationError,
|
|
@@ -52,7 +40,6 @@ from ._generated.types import ChatCompletionStreamOutput, TextGenerationStreamOu
|
|
|
52
40
|
|
|
53
41
|
|
|
54
42
|
if TYPE_CHECKING:
|
|
55
|
-
from aiohttp import ClientResponse, ClientSession
|
|
56
43
|
from PIL.Image import Image
|
|
57
44
|
|
|
58
45
|
# TYPES
|
|
@@ -71,9 +58,9 @@ class RequestParameters:
|
|
|
71
58
|
url: str
|
|
72
59
|
task: str
|
|
73
60
|
model: Optional[str]
|
|
74
|
-
json: Optional[Union[str,
|
|
61
|
+
json: Optional[Union[str, dict, list]]
|
|
75
62
|
data: Optional[bytes]
|
|
76
|
-
headers:
|
|
63
|
+
headers: dict[str, Any]
|
|
77
64
|
|
|
78
65
|
|
|
79
66
|
class MimeBytes(bytes):
|
|
@@ -240,7 +227,7 @@ def _b64_to_image(encoded_image: str) -> "Image":
|
|
|
240
227
|
return Image.open(io.BytesIO(base64.b64decode(encoded_image)))
|
|
241
228
|
|
|
242
229
|
|
|
243
|
-
def _bytes_to_list(content: bytes) ->
|
|
230
|
+
def _bytes_to_list(content: bytes) -> list:
|
|
244
231
|
"""Parse bytes from a Response object into a Python list.
|
|
245
232
|
|
|
246
233
|
Expects the response body to be JSON-encoded data.
|
|
@@ -251,7 +238,7 @@ def _bytes_to_list(content: bytes) -> List:
|
|
|
251
238
|
return json.loads(content.decode())
|
|
252
239
|
|
|
253
240
|
|
|
254
|
-
def _bytes_to_dict(content: bytes) ->
|
|
241
|
+
def _bytes_to_dict(content: bytes) -> dict:
|
|
255
242
|
"""Parse bytes from a Response object into a Python dictionary.
|
|
256
243
|
|
|
257
244
|
Expects the response body to be JSON-encoded data.
|
|
@@ -271,7 +258,7 @@ def _bytes_to_image(content: bytes) -> "Image":
|
|
|
271
258
|
return Image.open(io.BytesIO(content))
|
|
272
259
|
|
|
273
260
|
|
|
274
|
-
def _as_dict(response: Union[bytes,
|
|
261
|
+
def _as_dict(response: Union[bytes, dict]) -> dict:
|
|
275
262
|
return json.loads(response) if isinstance(response, bytes) else response
|
|
276
263
|
|
|
277
264
|
|
|
@@ -279,13 +266,13 @@ def _as_dict(response: Union[bytes, Dict]) -> Dict:
|
|
|
279
266
|
|
|
280
267
|
|
|
281
268
|
def _stream_text_generation_response(
|
|
282
|
-
|
|
269
|
+
output_lines: Iterable[str], details: bool
|
|
283
270
|
) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]:
|
|
284
271
|
"""Used in `InferenceClient.text_generation`."""
|
|
285
272
|
# Parse ServerSentEvents
|
|
286
|
-
for
|
|
273
|
+
for line in output_lines:
|
|
287
274
|
try:
|
|
288
|
-
output = _format_text_generation_stream_output(
|
|
275
|
+
output = _format_text_generation_stream_output(line, details)
|
|
289
276
|
except StopIteration:
|
|
290
277
|
break
|
|
291
278
|
if output is not None:
|
|
@@ -293,13 +280,13 @@ def _stream_text_generation_response(
|
|
|
293
280
|
|
|
294
281
|
|
|
295
282
|
async def _async_stream_text_generation_response(
|
|
296
|
-
|
|
283
|
+
output_lines: AsyncIterable[str], details: bool
|
|
297
284
|
) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]:
|
|
298
285
|
"""Used in `AsyncInferenceClient.text_generation`."""
|
|
299
286
|
# Parse ServerSentEvents
|
|
300
|
-
async for
|
|
287
|
+
async for line in output_lines:
|
|
301
288
|
try:
|
|
302
|
-
output = _format_text_generation_stream_output(
|
|
289
|
+
output = _format_text_generation_stream_output(line, details)
|
|
303
290
|
except StopIteration:
|
|
304
291
|
break
|
|
305
292
|
if output is not None:
|
|
@@ -307,17 +294,17 @@ async def _async_stream_text_generation_response(
|
|
|
307
294
|
|
|
308
295
|
|
|
309
296
|
def _format_text_generation_stream_output(
|
|
310
|
-
|
|
297
|
+
line: str, details: bool
|
|
311
298
|
) -> Optional[Union[str, TextGenerationStreamOutput]]:
|
|
312
|
-
if not
|
|
299
|
+
if not line.startswith("data:"):
|
|
313
300
|
return None # empty line
|
|
314
301
|
|
|
315
|
-
if
|
|
302
|
+
if line.strip() == "data: [DONE]":
|
|
316
303
|
raise StopIteration("[DONE] signal received.")
|
|
317
304
|
|
|
318
305
|
# Decode payload
|
|
319
|
-
payload =
|
|
320
|
-
json_payload = json.loads(payload
|
|
306
|
+
payload = line.lstrip("data:").rstrip("/n")
|
|
307
|
+
json_payload = json.loads(payload)
|
|
321
308
|
|
|
322
309
|
# Either an error as being returned
|
|
323
310
|
if json_payload.get("error") is not None:
|
|
@@ -329,12 +316,12 @@ def _format_text_generation_stream_output(
|
|
|
329
316
|
|
|
330
317
|
|
|
331
318
|
def _stream_chat_completion_response(
|
|
332
|
-
|
|
319
|
+
lines: Iterable[str],
|
|
333
320
|
) -> Iterable[ChatCompletionStreamOutput]:
|
|
334
321
|
"""Used in `InferenceClient.chat_completion` if model is served with TGI."""
|
|
335
|
-
for
|
|
322
|
+
for line in lines:
|
|
336
323
|
try:
|
|
337
|
-
output = _format_chat_completion_stream_output(
|
|
324
|
+
output = _format_chat_completion_stream_output(line)
|
|
338
325
|
except StopIteration:
|
|
339
326
|
break
|
|
340
327
|
if output is not None:
|
|
@@ -342,12 +329,12 @@ def _stream_chat_completion_response(
|
|
|
342
329
|
|
|
343
330
|
|
|
344
331
|
async def _async_stream_chat_completion_response(
|
|
345
|
-
|
|
332
|
+
lines: AsyncIterable[str],
|
|
346
333
|
) -> AsyncIterable[ChatCompletionStreamOutput]:
|
|
347
334
|
"""Used in `AsyncInferenceClient.chat_completion`."""
|
|
348
|
-
async for
|
|
335
|
+
async for line in lines:
|
|
349
336
|
try:
|
|
350
|
-
output = _format_chat_completion_stream_output(
|
|
337
|
+
output = _format_chat_completion_stream_output(line)
|
|
351
338
|
except StopIteration:
|
|
352
339
|
break
|
|
353
340
|
if output is not None:
|
|
@@ -355,17 +342,16 @@ async def _async_stream_chat_completion_response(
|
|
|
355
342
|
|
|
356
343
|
|
|
357
344
|
def _format_chat_completion_stream_output(
|
|
358
|
-
|
|
345
|
+
line: str,
|
|
359
346
|
) -> Optional[ChatCompletionStreamOutput]:
|
|
360
|
-
if not
|
|
347
|
+
if not line.startswith("data:"):
|
|
361
348
|
return None # empty line
|
|
362
349
|
|
|
363
|
-
if
|
|
350
|
+
if line.strip() == "data: [DONE]":
|
|
364
351
|
raise StopIteration("[DONE] signal received.")
|
|
365
352
|
|
|
366
353
|
# Decode payload
|
|
367
|
-
|
|
368
|
-
json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))
|
|
354
|
+
json_payload = json.loads(line.lstrip("data:").strip())
|
|
369
355
|
|
|
370
356
|
# Either an error as being returned
|
|
371
357
|
if json_payload.get("error") is not None:
|
|
@@ -375,13 +361,9 @@ def _format_chat_completion_stream_output(
|
|
|
375
361
|
return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload)
|
|
376
362
|
|
|
377
363
|
|
|
378
|
-
async def _async_yield_from(client:
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
yield byte_payload.strip()
|
|
382
|
-
finally:
|
|
383
|
-
# Always close the underlying HTTP session to avoid resource leaks
|
|
384
|
-
await client.close()
|
|
364
|
+
async def _async_yield_from(client: httpx.AsyncClient, response: httpx.Response) -> AsyncIterable[str]:
|
|
365
|
+
async for line in response.aiter_lines():
|
|
366
|
+
yield line.strip()
|
|
385
367
|
|
|
386
368
|
|
|
387
369
|
# "TGI servers" are servers running with the `text-generation-inference` backend.
|
|
@@ -402,14 +384,14 @@ async def _async_yield_from(client: "ClientSession", response: "ClientResponse")
|
|
|
402
384
|
# For more details, see https://github.com/huggingface/text-generation-inference and
|
|
403
385
|
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task.
|
|
404
386
|
|
|
405
|
-
_UNSUPPORTED_TEXT_GENERATION_KWARGS:
|
|
387
|
+
_UNSUPPORTED_TEXT_GENERATION_KWARGS: dict[Optional[str], list[str]] = {}
|
|
406
388
|
|
|
407
389
|
|
|
408
|
-
def _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs:
|
|
390
|
+
def _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs: list[str]) -> None:
|
|
409
391
|
_UNSUPPORTED_TEXT_GENERATION_KWARGS.setdefault(model, []).extend(unsupported_kwargs)
|
|
410
392
|
|
|
411
393
|
|
|
412
|
-
def _get_unsupported_text_generation_kwargs(model: Optional[str]) ->
|
|
394
|
+
def _get_unsupported_text_generation_kwargs(model: Optional[str]) -> list[str]:
|
|
413
395
|
return _UNSUPPORTED_TEXT_GENERATION_KWARGS.get(model, [])
|
|
414
396
|
|
|
415
397
|
|
|
@@ -420,7 +402,7 @@ def _get_unsupported_text_generation_kwargs(model: Optional[str]) -> List[str]:
|
|
|
420
402
|
# ----------------------
|
|
421
403
|
|
|
422
404
|
|
|
423
|
-
def raise_text_generation_error(http_error:
|
|
405
|
+
def raise_text_generation_error(http_error: HfHubHTTPError) -> NoReturn:
|
|
424
406
|
"""
|
|
425
407
|
Try to parse text-generation-inference error message and raise HTTPError in any case.
|
|
426
408
|
|
|
@@ -429,6 +411,8 @@ def raise_text_generation_error(http_error: HTTPError) -> NoReturn:
|
|
|
429
411
|
The HTTPError that have been raised.
|
|
430
412
|
"""
|
|
431
413
|
# Try to parse a Text Generation Inference error
|
|
414
|
+
if http_error.response is None:
|
|
415
|
+
raise http_error
|
|
432
416
|
|
|
433
417
|
try:
|
|
434
418
|
# Hacky way to retrieve payload in case of aiohttp error
|