huggingface-hub 1.0.0rc1__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (34) hide show
  1. huggingface_hub/__init__.py +1 -1
  2. huggingface_hub/_commit_api.py +126 -66
  3. huggingface_hub/_commit_scheduler.py +4 -7
  4. huggingface_hub/_login.py +9 -15
  5. huggingface_hub/_tensorboard_logger.py +2 -5
  6. huggingface_hub/_webhooks_server.py +8 -20
  7. huggingface_hub/cli/repo.py +137 -5
  8. huggingface_hub/dataclasses.py +3 -12
  9. huggingface_hub/fastai_utils.py +22 -32
  10. huggingface_hub/file_download.py +18 -21
  11. huggingface_hub/hf_api.py +258 -410
  12. huggingface_hub/hf_file_system.py +17 -44
  13. huggingface_hub/inference/_client.py +25 -47
  14. huggingface_hub/inference/_generated/_async_client.py +25 -47
  15. huggingface_hub/inference/_mcp/agent.py +2 -5
  16. huggingface_hub/inference/_mcp/mcp_client.py +2 -5
  17. huggingface_hub/inference/_providers/__init__.py +11 -0
  18. huggingface_hub/inference/_providers/_common.py +1 -0
  19. huggingface_hub/inference/_providers/publicai.py +6 -0
  20. huggingface_hub/inference/_providers/scaleway.py +28 -0
  21. huggingface_hub/lfs.py +14 -8
  22. huggingface_hub/repocard.py +12 -16
  23. huggingface_hub/serialization/_base.py +3 -6
  24. huggingface_hub/serialization/_torch.py +16 -34
  25. huggingface_hub/utils/_cache_manager.py +41 -71
  26. huggingface_hub/utils/_chunk_utils.py +2 -3
  27. huggingface_hub/utils/_http.py +27 -30
  28. huggingface_hub/utils/logging.py +8 -11
  29. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc2.dist-info}/METADATA +2 -2
  30. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc2.dist-info}/RECORD +34 -32
  31. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc2.dist-info}/LICENSE +0 -0
  32. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc2.dist-info}/WHEEL +0 -0
  33. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc2.dist-info}/entry_points.txt +0 -0
  34. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc2.dist-info}/top_level.txt +0 -0
@@ -60,13 +60,10 @@ class HfFileSystem(fsspec.AbstractFileSystem):
60
60
  """
61
61
  Access a remote Hugging Face Hub repository as if were a local file system.
62
62
 
63
- <Tip warning={true}>
64
-
65
- [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
66
- Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
67
- layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
68
-
69
- </Tip>
63
+ > [!WARNING]
64
+ > [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
65
+ > Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
66
+ > layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
70
67
 
71
68
  Args:
72
69
  token (`str` or `bool`, *optional*):
@@ -301,11 +298,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
301
298
 
302
299
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.rm).
303
300
 
304
- <Tip warning={true}>
305
-
306
- Note: When possible, use `HfApi.delete_file()` for better performance.
307
-
308
- </Tip>
301
+ > [!WARNING]
302
+ > Note: When possible, use `HfApi.delete_file()` for better performance.
309
303
 
310
304
  Args:
311
305
  path (`str`):
@@ -345,11 +339,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
345
339
 
346
340
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.ls).
347
341
 
348
- <Tip warning={true}>
349
-
350
- Note: When possible, use `HfApi.list_repo_tree()` for better performance.
351
-
352
- </Tip>
342
+ > [!WARNING]
343
+ > Note: When possible, use `HfApi.list_repo_tree()` for better performance.
353
344
 
354
345
  Args:
355
346
  path (`str`):
@@ -580,11 +571,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
580
571
  """
581
572
  Copy a file within or between repositories.
582
573
 
583
- <Tip warning={true}>
584
-
585
- Note: When possible, use `HfApi.upload_file()` for better performance.
586
-
587
- </Tip>
574
+ > [!WARNING]
575
+ > Note: When possible, use `HfApi.upload_file()` for better performance.
588
576
 
589
577
  Args:
590
578
  path1 (`str`):
@@ -657,11 +645,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
657
645
 
658
646
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.info).
659
647
 
660
- <Tip warning={true}>
661
-
662
- Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
663
-
664
- </Tip>
648
+ > [!WARNING]
649
+ > Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
665
650
 
666
651
  Args:
667
652
  path (`str`):
@@ -758,11 +743,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
758
743
 
759
744
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.exists).
760
745
 
761
- <Tip warning={true}>
762
-
763
- Note: When possible, use `HfApi.file_exists()` for better performance.
764
-
765
- </Tip>
746
+ > [!WARNING]
747
+ > Note: When possible, use `HfApi.file_exists()` for better performance.
766
748
 
767
749
  Args:
768
750
  path (`str`):
@@ -843,11 +825,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
843
825
  """
844
826
  Copy single remote file to local.
845
827
 
846
- <Tip warning={true}>
847
-
848
- Note: When possible, use `HfApi.hf_hub_download()` for better performance.
849
-
850
- </Tip>
828
+ > [!WARNING]
829
+ > Note: When possible, use `HfApi.hf_hub_download()` for better performance.
851
830
 
852
831
  Args:
853
832
  rpath (`str`):
@@ -959,13 +938,7 @@ class HfFileSystemFile(fsspec.spec.AbstractBufferedFile):
959
938
  repo_type=self.resolved_path.repo_type,
960
939
  endpoint=self.fs.endpoint,
961
940
  )
962
- r = http_backoff(
963
- "GET",
964
- url,
965
- headers=headers,
966
- retry_on_status_codes=(500, 502, 503, 504),
967
- timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
968
- )
941
+ r = http_backoff("GET", url, headers=headers, timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT)
969
942
  hf_raise_for_status(r)
970
943
  return r.content
971
944
 
@@ -135,7 +135,7 @@ class InferenceClient:
135
135
  Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
136
136
  arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
137
137
  provider (`str`, *optional*):
138
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
138
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"` or `"together"`.
139
139
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
140
140
  If model is a URL or `base_url` is passed, then `provider` is not used.
141
141
  token (`str`, *optional*):
@@ -560,18 +560,14 @@ class InferenceClient:
560
560
  """
561
561
  A method for completing conversations using a specified language model.
562
562
 
563
- <Tip>
563
+ > [!TIP]
564
+ > The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
565
+ > Inputs and outputs are strictly the same and using either syntax will yield the same results.
566
+ > Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
567
+ > for more details about OpenAI's compatibility.
564
568
 
565
- The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
566
- Inputs and outputs are strictly the same and using either syntax will yield the same results.
567
- Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
568
- for more details about OpenAI's compatibility.
569
-
570
- </Tip>
571
-
572
- <Tip>
573
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
574
- </Tip>
569
+ > [!TIP]
570
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
575
571
 
576
572
  Args:
577
573
  messages (List of [`ChatCompletionInputMessage`]):
@@ -1217,11 +1213,8 @@ class InferenceClient:
1217
1213
  """
1218
1214
  Perform image segmentation on the given image using the specified model.
1219
1215
 
1220
- <Tip warning={true}>
1221
-
1222
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1223
-
1224
- </Tip>
1216
+ > [!WARNING]
1217
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1225
1218
 
1226
1219
  Args:
1227
1220
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1289,11 +1282,8 @@ class InferenceClient:
1289
1282
  """
1290
1283
  Perform image-to-image translation using a specified model.
1291
1284
 
1292
- <Tip warning={true}>
1293
-
1294
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1295
-
1296
- </Tip>
1285
+ > [!WARNING]
1286
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1297
1287
 
1298
1288
  Args:
1299
1289
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1482,11 +1472,8 @@ class InferenceClient:
1482
1472
  """
1483
1473
  Perform object detection on the given image using the specified model.
1484
1474
 
1485
- <Tip warning={true}>
1486
-
1487
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1488
-
1489
- </Tip>
1475
+ > [!WARNING]
1476
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1490
1477
 
1491
1478
  Args:
1492
1479
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -2128,12 +2115,9 @@ class InferenceClient:
2128
2115
  """
2129
2116
  Given a prompt, generate the following text.
2130
2117
 
2131
- <Tip>
2132
-
2133
- If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2134
- It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2135
-
2136
- </Tip>
2118
+ > [!TIP]
2119
+ > If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2120
+ > It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2137
2121
 
2138
2122
  Args:
2139
2123
  prompt (`str`):
@@ -2456,15 +2440,11 @@ class InferenceClient:
2456
2440
  """
2457
2441
  Generate an image based on a given text using a specified model.
2458
2442
 
2459
- <Tip warning={true}>
2460
-
2461
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2462
-
2463
- </Tip>
2443
+ > [!WARNING]
2444
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2464
2445
 
2465
- <Tip>
2466
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2467
- </Tip>
2446
+ > [!TIP]
2447
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2468
2448
 
2469
2449
  Args:
2470
2450
  prompt (`str`):
@@ -2597,9 +2577,8 @@ class InferenceClient:
2597
2577
  """
2598
2578
  Generate a video based on a given text.
2599
2579
 
2600
- <Tip>
2601
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2602
- </Tip>
2580
+ > [!TIP]
2581
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2603
2582
 
2604
2583
  Args:
2605
2584
  prompt (`str`):
@@ -2705,9 +2684,8 @@ class InferenceClient:
2705
2684
  """
2706
2685
  Synthesize an audio of a voice pronouncing a given text.
2707
2686
 
2708
- <Tip>
2709
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2710
- </Tip>
2687
+ > [!TIP]
2688
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2711
2689
 
2712
2690
  Args:
2713
2691
  text (`str`):
@@ -126,7 +126,7 @@ class AsyncInferenceClient:
126
126
  Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
127
127
  arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
128
128
  provider (`str`, *optional*):
129
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
129
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"` or `"together"`.
130
130
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
131
131
  If model is a URL or `base_url` is passed, then `provider` is not used.
132
132
  token (`str`, *optional*):
@@ -580,18 +580,14 @@ class AsyncInferenceClient:
580
580
  """
581
581
  A method for completing conversations using a specified language model.
582
582
 
583
- <Tip>
583
+ > [!TIP]
584
+ > The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
585
+ > Inputs and outputs are strictly the same and using either syntax will yield the same results.
586
+ > Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
587
+ > for more details about OpenAI's compatibility.
584
588
 
585
- The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
586
- Inputs and outputs are strictly the same and using either syntax will yield the same results.
587
- Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
588
- for more details about OpenAI's compatibility.
589
-
590
- </Tip>
591
-
592
- <Tip>
593
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
594
- </Tip>
589
+ > [!TIP]
590
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
595
591
 
596
592
  Args:
597
593
  messages (List of [`ChatCompletionInputMessage`]):
@@ -1247,11 +1243,8 @@ class AsyncInferenceClient:
1247
1243
  """
1248
1244
  Perform image segmentation on the given image using the specified model.
1249
1245
 
1250
- <Tip warning={true}>
1251
-
1252
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1253
-
1254
- </Tip>
1246
+ > [!WARNING]
1247
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1255
1248
 
1256
1249
  Args:
1257
1250
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1320,11 +1313,8 @@ class AsyncInferenceClient:
1320
1313
  """
1321
1314
  Perform image-to-image translation using a specified model.
1322
1315
 
1323
- <Tip warning={true}>
1324
-
1325
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1326
-
1327
- </Tip>
1316
+ > [!WARNING]
1317
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1328
1318
 
1329
1319
  Args:
1330
1320
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1516,11 +1506,8 @@ class AsyncInferenceClient:
1516
1506
  """
1517
1507
  Perform object detection on the given image using the specified model.
1518
1508
 
1519
- <Tip warning={true}>
1520
-
1521
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1522
-
1523
- </Tip>
1509
+ > [!WARNING]
1510
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1524
1511
 
1525
1512
  Args:
1526
1513
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -2170,12 +2157,9 @@ class AsyncInferenceClient:
2170
2157
  """
2171
2158
  Given a prompt, generate the following text.
2172
2159
 
2173
- <Tip>
2174
-
2175
- If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2176
- It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2177
-
2178
- </Tip>
2160
+ > [!TIP]
2161
+ > If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2162
+ > It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2179
2163
 
2180
2164
  Args:
2181
2165
  prompt (`str`):
@@ -2499,15 +2483,11 @@ class AsyncInferenceClient:
2499
2483
  """
2500
2484
  Generate an image based on a given text using a specified model.
2501
2485
 
2502
- <Tip warning={true}>
2503
-
2504
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2505
-
2506
- </Tip>
2486
+ > [!WARNING]
2487
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2507
2488
 
2508
- <Tip>
2509
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2510
- </Tip>
2489
+ > [!TIP]
2490
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2511
2491
 
2512
2492
  Args:
2513
2493
  prompt (`str`):
@@ -2641,9 +2621,8 @@ class AsyncInferenceClient:
2641
2621
  """
2642
2622
  Generate a video based on a given text.
2643
2623
 
2644
- <Tip>
2645
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2646
- </Tip>
2624
+ > [!TIP]
2625
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2647
2626
 
2648
2627
  Args:
2649
2628
  prompt (`str`):
@@ -2749,9 +2728,8 @@ class AsyncInferenceClient:
2749
2728
  """
2750
2729
  Synthesize an audio of a voice pronouncing a given text.
2751
2730
 
2752
- <Tip>
2753
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2754
- </Tip>
2731
+ > [!TIP]
2732
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2755
2733
 
2756
2734
  Args:
2757
2735
  text (`str`):
@@ -14,11 +14,8 @@ class Agent(MCPClient):
14
14
  """
15
15
  Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`].
16
16
 
17
- <Tip warning={true}>
18
-
19
- This class is experimental and might be subject to breaking changes in the future without prior notice.
20
-
21
- </Tip>
17
+ > [!WARNING]
18
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
22
19
 
23
20
  Args:
24
21
  model (`str`, *optional*):
@@ -56,11 +56,8 @@ class MCPClient:
56
56
  """
57
57
  Client for connecting to one or more MCP servers and processing chat completions with tools.
58
58
 
59
- <Tip warning={true}>
60
-
61
- This class is experimental and might be subject to breaking changes in the future without prior notice.
62
-
63
- </Tip>
59
+ > [!WARNING]
60
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
64
61
 
65
62
  Args:
66
63
  model (`str`, `optional`):
@@ -36,8 +36,10 @@ from .nebius import (
36
36
  from .novita import NovitaConversationalTask, NovitaTextGenerationTask, NovitaTextToVideoTask
37
37
  from .nscale import NscaleConversationalTask, NscaleTextToImageTask
38
38
  from .openai import OpenAIConversationalTask
39
+ from .publicai import PublicAIConversationalTask
39
40
  from .replicate import ReplicateImageToImageTask, ReplicateTask, ReplicateTextToImageTask, ReplicateTextToSpeechTask
40
41
  from .sambanova import SambanovaConversationalTask, SambanovaFeatureExtractionTask
42
+ from .scaleway import ScalewayConversationalTask, ScalewayFeatureExtractionTask
41
43
  from .together import TogetherConversationalTask, TogetherTextGenerationTask, TogetherTextToImageTask
42
44
 
43
45
 
@@ -58,8 +60,10 @@ PROVIDER_T = Literal[
58
60
  "novita",
59
61
  "nscale",
60
62
  "openai",
63
+ "publicai",
61
64
  "replicate",
62
65
  "sambanova",
66
+ "scaleway",
63
67
  "together",
64
68
  ]
65
69
 
@@ -144,6 +148,9 @@ PROVIDERS: dict[PROVIDER_T, dict[str, TaskProviderHelper]] = {
144
148
  "openai": {
145
149
  "conversational": OpenAIConversationalTask(),
146
150
  },
151
+ "publicai": {
152
+ "conversational": PublicAIConversationalTask(),
153
+ },
147
154
  "replicate": {
148
155
  "image-to-image": ReplicateImageToImageTask(),
149
156
  "text-to-image": ReplicateTextToImageTask(),
@@ -154,6 +161,10 @@ PROVIDERS: dict[PROVIDER_T, dict[str, TaskProviderHelper]] = {
154
161
  "conversational": SambanovaConversationalTask(),
155
162
  "feature-extraction": SambanovaFeatureExtractionTask(),
156
163
  },
164
+ "scaleway": {
165
+ "conversational": ScalewayConversationalTask(),
166
+ "feature-extraction": ScalewayFeatureExtractionTask(),
167
+ },
157
168
  "together": {
158
169
  "text-to-image": TogetherTextToImageTask(),
159
170
  "conversational": TogetherConversationalTask(),
@@ -33,6 +33,7 @@ HARDCODED_MODEL_INFERENCE_MAPPING: dict[str, dict[str, InferenceProviderMapping]
33
33
  "nscale": {},
34
34
  "replicate": {},
35
35
  "sambanova": {},
36
+ "scaleway": {},
36
37
  "together": {},
37
38
  }
38
39
 
@@ -0,0 +1,6 @@
1
+ from ._common import BaseConversationalTask
2
+
3
+
4
+ class PublicAIConversationalTask(BaseConversationalTask):
5
+ def __init__(self):
6
+ super().__init__(provider="publicai", base_url="https://api.publicai.co")
@@ -0,0 +1,28 @@
1
+ from typing import Any, Dict, Optional, Union
2
+
3
+ from huggingface_hub.inference._common import RequestParameters, _as_dict
4
+
5
+ from ._common import BaseConversationalTask, InferenceProviderMapping, TaskProviderHelper, filter_none
6
+
7
+
8
+ class ScalewayConversationalTask(BaseConversationalTask):
9
+ def __init__(self):
10
+ super().__init__(provider="scaleway", base_url="https://api.scaleway.ai")
11
+
12
+
13
+ class ScalewayFeatureExtractionTask(TaskProviderHelper):
14
+ def __init__(self):
15
+ super().__init__(provider="scaleway", base_url="https://api.scaleway.ai", task="feature-extraction")
16
+
17
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
18
+ return "/v1/embeddings"
19
+
20
+ def _prepare_payload_as_dict(
21
+ self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
22
+ ) -> Optional[Dict]:
23
+ parameters = filter_none(parameters)
24
+ return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
25
+
26
+ def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
27
+ embeddings = _as_dict(response)["data"]
28
+ return [embedding["embedding"] for embedding in embeddings]
huggingface_hub/lfs.py CHANGED
@@ -107,7 +107,8 @@ def post_lfs_batch_info(
107
107
  revision: Optional[str] = None,
108
108
  endpoint: Optional[str] = None,
109
109
  headers: Optional[dict[str, str]] = None,
110
- ) -> tuple[list[dict], list[dict]]:
110
+ transfers: Optional[list[str]] = None,
111
+ ) -> tuple[list[dict], list[dict], Optional[str]]:
111
112
  """
112
113
  Requests the LFS batch endpoint to retrieve upload instructions
113
114
 
@@ -126,11 +127,14 @@ def post_lfs_batch_info(
126
127
  The git revision to upload to.
127
128
  headers (`dict`, *optional*):
128
129
  Additional headers to include in the request
130
+ transfers (`list`, *optional*):
131
+ List of transfer methods to use. Defaults to ["basic", "multipart"].
129
132
 
130
133
  Returns:
131
- `LfsBatchInfo`: 2-tuple:
134
+ `LfsBatchInfo`: 3-tuple:
132
135
  - First element is the list of upload instructions from the server
133
- - Second element is an list of errors, if any
136
+ - Second element is a list of errors, if any
137
+ - Third element is the chosen transfer adapter if provided by the server (e.g. "basic", "multipart", "xet")
134
138
 
135
139
  Raises:
136
140
  [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
@@ -145,7 +149,7 @@ def post_lfs_batch_info(
145
149
  batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
146
150
  payload: dict = {
147
151
  "operation": "upload",
148
- "transfers": ["basic", "multipart"],
152
+ "transfers": transfers if transfers is not None else ["basic", "multipart"],
149
153
  "objects": [
150
154
  {
151
155
  "oid": upload.sha256.hex(),
@@ -171,9 +175,13 @@ def post_lfs_batch_info(
171
175
  if not isinstance(objects, list):
172
176
  raise ValueError("Malformed response from server")
173
177
 
178
+ chosen_transfer = batch_info.get("transfer")
179
+ chosen_transfer = chosen_transfer if isinstance(chosen_transfer, str) else None
180
+
174
181
  return (
175
182
  [_validate_batch_actions(obj) for obj in objects if "error" not in obj],
176
183
  [_validate_batch_error(obj) for obj in objects if "error" in obj],
184
+ chosen_transfer,
177
185
  )
178
186
 
179
187
 
@@ -313,7 +321,7 @@ def _upload_single_part(operation: "CommitOperationAdd", upload_url: str) -> Non
313
321
  """
314
322
  with operation.as_file(with_tqdm=True) as fileobj:
315
323
  # S3 might raise a transient 500 error -> let's retry if that happens
316
- response = http_backoff("PUT", upload_url, data=fileobj, retry_on_status_codes=(500, 502, 503, 504))
324
+ response = http_backoff("PUT", upload_url, data=fileobj)
317
325
  hf_raise_for_status(response)
318
326
 
319
327
 
@@ -397,9 +405,7 @@ def _upload_parts_iteratively(
397
405
  read_limit=chunk_size,
398
406
  ) as fileobj_slice:
399
407
  # S3 might raise a transient 500 error -> let's retry if that happens
400
- part_upload_res = http_backoff(
401
- "PUT", part_upload_url, data=fileobj_slice, retry_on_status_codes=(500, 502, 503, 504)
402
- )
408
+ part_upload_res = http_backoff("PUT", part_upload_url, data=fileobj_slice)
403
409
  hf_raise_for_status(part_upload_res)
404
410
  headers.append(part_upload_res.headers)
405
411
  return headers # type: ignore
@@ -64,13 +64,11 @@ class RepoCard:
64
64
  '\\n# My repo\\n'
65
65
 
66
66
  ```
67
- <Tip>
68
- Raises the following error:
69
-
70
- - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
71
- when the content of the repo card metadata is not a dictionary.
72
-
73
- </Tip>
67
+ > [!TIP]
68
+ > Raises the following error:
69
+ >
70
+ > - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
71
+ > when the content of the repo card metadata is not a dictionary.
74
72
  """
75
73
 
76
74
  # Set the content of the RepoCard, as well as underlying .data and .text attributes.
@@ -198,15 +196,13 @@ class RepoCard:
198
196
  The type of Hugging Face repo to push to. Options are "model", "dataset", and "space".
199
197
  If this function is called from a child class, the default will be the child class's `repo_type`.
200
198
 
201
- <Tip>
202
- Raises the following errors:
203
-
204
- - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
205
- if the card fails validation checks.
206
- - [`HfHubHTTPError`]
207
- if the request to the Hub API fails for any other reason.
208
-
209
- </Tip>
199
+ > [!TIP]
200
+ > Raises the following errors:
201
+ >
202
+ > - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
203
+ > if the card fails validation checks.
204
+ > - [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
205
+ > if the request to the Hub API fails for any other reason.
210
206
  """
211
207
 
212
208
  # If repo type is provided, otherwise, use the repo type of the card.
@@ -62,12 +62,9 @@ def split_state_dict_into_shards_factory(
62
62
  have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
63
63
  [6+2+2GB], [6+2GB], [6GB].
64
64
 
65
- <Tip warning={true}>
66
-
67
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
68
- size greater than `max_shard_size`.
69
-
70
- </Tip>
65
+ > [!WARNING]
66
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
67
+ > size greater than `max_shard_size`.
71
68
 
72
69
  Args:
73
70
  state_dict (`dict[str, Tensor]`):