huggingface-hub 0.35.3__py3-none-any.whl → 0.36.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (42) hide show
  1. huggingface_hub/__init__.py +7 -1
  2. huggingface_hub/_commit_api.py +125 -65
  3. huggingface_hub/_commit_scheduler.py +4 -7
  4. huggingface_hub/_jobs_api.py +1 -1
  5. huggingface_hub/_login.py +9 -15
  6. huggingface_hub/_tensorboard_logger.py +2 -5
  7. huggingface_hub/_webhooks_server.py +9 -21
  8. huggingface_hub/cli/download.py +2 -2
  9. huggingface_hub/cli/repo_files.py +1 -1
  10. huggingface_hub/cli/upload.py +1 -1
  11. huggingface_hub/cli/upload_large_folder.py +1 -1
  12. huggingface_hub/community.py +16 -8
  13. huggingface_hub/fastai_utils.py +22 -32
  14. huggingface_hub/file_download.py +17 -20
  15. huggingface_hub/hf_api.py +514 -541
  16. huggingface_hub/hf_file_system.py +45 -40
  17. huggingface_hub/inference/_client.py +28 -49
  18. huggingface_hub/inference/_generated/_async_client.py +28 -49
  19. huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
  20. huggingface_hub/inference/_mcp/agent.py +2 -5
  21. huggingface_hub/inference/_mcp/mcp_client.py +2 -5
  22. huggingface_hub/inference/_providers/__init__.py +5 -0
  23. huggingface_hub/inference/_providers/_common.py +1 -0
  24. huggingface_hub/inference/_providers/clarifai.py +13 -0
  25. huggingface_hub/keras_mixin.py +3 -6
  26. huggingface_hub/lfs.py +12 -4
  27. huggingface_hub/repocard.py +12 -16
  28. huggingface_hub/repository.py +15 -21
  29. huggingface_hub/serialization/_base.py +3 -6
  30. huggingface_hub/serialization/_tensorflow.py +3 -6
  31. huggingface_hub/serialization/_torch.py +17 -35
  32. huggingface_hub/utils/_cache_manager.py +41 -71
  33. huggingface_hub/utils/_chunk_utils.py +2 -3
  34. huggingface_hub/utils/_http.py +29 -34
  35. huggingface_hub/utils/_validators.py +2 -2
  36. huggingface_hub/utils/logging.py +8 -11
  37. {huggingface_hub-0.35.3.dist-info → huggingface_hub-0.36.0rc0.dist-info}/METADATA +2 -2
  38. {huggingface_hub-0.35.3.dist-info → huggingface_hub-0.36.0rc0.dist-info}/RECORD +42 -41
  39. {huggingface_hub-0.35.3.dist-info → huggingface_hub-0.36.0rc0.dist-info}/LICENSE +0 -0
  40. {huggingface_hub-0.35.3.dist-info → huggingface_hub-0.36.0rc0.dist-info}/WHEEL +0 -0
  41. {huggingface_hub-0.35.3.dist-info → huggingface_hub-0.36.0rc0.dist-info}/entry_points.txt +0 -0
  42. {huggingface_hub-0.35.3.dist-info → huggingface_hub-0.36.0rc0.dist-info}/top_level.txt +0 -0
@@ -59,13 +59,10 @@ class HfFileSystem(fsspec.AbstractFileSystem):
59
59
  """
60
60
  Access a remote Hugging Face Hub repository as if were a local file system.
61
61
 
62
- <Tip warning={true}>
63
-
64
- [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
65
- Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
66
- layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
67
-
68
- </Tip>
62
+ > [!WARNING]
63
+ > [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
64
+ > Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
65
+ > layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
69
66
 
70
67
  Args:
71
68
  token (`str` or `bool`, *optional*):
@@ -104,18 +101,22 @@ class HfFileSystem(fsspec.AbstractFileSystem):
104
101
  *args,
105
102
  endpoint: Optional[str] = None,
106
103
  token: Union[bool, str, None] = None,
104
+ block_size: Optional[int] = None,
107
105
  **storage_options,
108
106
  ):
109
107
  super().__init__(*args, **storage_options)
110
108
  self.endpoint = endpoint or constants.ENDPOINT
111
109
  self.token = token
112
110
  self._api = HfApi(endpoint=endpoint, token=token)
111
+ self.block_size = block_size
113
112
  # Maps (repo_type, repo_id, revision) to a 2-tuple with:
114
113
  # * the 1st element indicating whether the repositoy and the revision exist
115
114
  # * the 2nd element being the exception raised if the repository or revision doesn't exist
116
115
  self._repo_and_revision_exists_cache: Dict[
117
116
  Tuple[str, str, Optional[str]], Tuple[bool, Optional[Exception]]
118
117
  ] = {}
118
+ # Maps parent directory path to path infos
119
+ self.dircache: Dict[str, List[Dict[str, Any]]] = {}
119
120
 
120
121
  def _repo_and_revision_exist(
121
122
  self, repo_type: str, repo_id: str, revision: Optional[str]
@@ -267,12 +268,15 @@ class HfFileSystem(fsspec.AbstractFileSystem):
267
268
  block_size: Optional[int] = None,
268
269
  **kwargs,
269
270
  ) -> "HfFileSystemFile":
271
+ block_size = block_size if block_size is not None else self.block_size
272
+ if block_size is not None:
273
+ kwargs["block_size"] = block_size
270
274
  if "a" in mode:
271
275
  raise NotImplementedError("Appending to remote files is not yet supported.")
272
276
  if block_size == 0:
273
- return HfFileSystemStreamFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
277
+ return HfFileSystemStreamFile(self, path, mode=mode, revision=revision, **kwargs)
274
278
  else:
275
- return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
279
+ return HfFileSystemFile(self, path, mode=mode, revision=revision, **kwargs)
276
280
 
277
281
  def _rm(self, path: str, revision: Optional[str] = None, **kwargs) -> None:
278
282
  resolved_path = self.resolve_path(path, revision=revision)
@@ -300,11 +304,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
300
304
 
301
305
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.rm).
302
306
 
303
- <Tip warning={true}>
304
-
305
- Note: When possible, use `HfApi.delete_file()` for better performance.
306
-
307
- </Tip>
307
+ > [!WARNING]
308
+ > Note: When possible, use `HfApi.delete_file()` for better performance.
308
309
 
309
310
  Args:
310
311
  path (`str`):
@@ -344,11 +345,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
344
345
 
345
346
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.ls).
346
347
 
347
- <Tip warning={true}>
348
-
349
- Note: When possible, use `HfApi.list_repo_tree()` for better performance.
350
-
351
- </Tip>
348
+ > [!WARNING]
349
+ > Note: When possible, use `HfApi.list_repo_tree()` for better performance.
352
350
 
353
351
  Args:
354
352
  path (`str`):
@@ -447,7 +445,7 @@ class HfFileSystem(fsspec.AbstractFileSystem):
447
445
  common_path_depth = common_path[len(path) :].count("/")
448
446
  maxdepth -= common_path_depth
449
447
  out = [o for o in out if not o["name"].startswith(common_path + "/")]
450
- for cached_path in self.dircache:
448
+ for cached_path in list(self.dircache):
451
449
  if cached_path.startswith(common_path + "/"):
452
450
  self.dircache.pop(cached_path, None)
453
451
  self.dircache.pop(common_path, None)
@@ -596,11 +594,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
596
594
  """
597
595
  Copy a file within or between repositories.
598
596
 
599
- <Tip warning={true}>
600
-
601
- Note: When possible, use `HfApi.upload_file()` for better performance.
602
-
603
- </Tip>
597
+ > [!WARNING]
598
+ > Note: When possible, use `HfApi.upload_file()` for better performance.
604
599
 
605
600
  Args:
606
601
  path1 (`str`):
@@ -673,11 +668,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
673
668
 
674
669
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.info).
675
670
 
676
- <Tip warning={true}>
677
-
678
- Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
679
-
680
- </Tip>
671
+ > [!WARNING]
672
+ > Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
681
673
 
682
674
  Args:
683
675
  path (`str`):
@@ -774,11 +766,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
774
766
 
775
767
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.exists).
776
768
 
777
- <Tip warning={true}>
778
-
779
- Note: When possible, use `HfApi.file_exists()` for better performance.
780
-
781
- </Tip>
769
+ > [!WARNING]
770
+ > Note: When possible, use `HfApi.file_exists()` for better performance.
782
771
 
783
772
  Args:
784
773
  path (`str`):
@@ -859,11 +848,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
859
848
  """
860
849
  Copy single remote file to local.
861
850
 
862
- <Tip warning={true}>
863
-
864
- Note: When possible, use `HfApi.hf_hub_download()` for better performance.
865
-
866
- </Tip>
851
+ > [!WARNING]
852
+ > Note: When possible, use `HfApi.hf_hub_download()` for better performance.
867
853
 
868
854
  Args:
869
855
  rpath (`str`):
@@ -943,6 +929,18 @@ class HfFileSystem(fsspec.AbstractFileSystem):
943
929
  # See https://github.com/huggingface/huggingface_hub/issues/1733
944
930
  raise NotImplementedError("Transactional commits are not supported.")
945
931
 
932
+ def __reduce__(self):
933
+ # re-populate the instance cache at HfFileSystem._cache and re-populate the cache attributes of every instance
934
+ return make_instance, (
935
+ type(self),
936
+ self.storage_args,
937
+ self.storage_options,
938
+ {
939
+ "dircache": self.dircache,
940
+ "_repo_and_revision_exists_cache": self._repo_and_revision_exists_cache,
941
+ },
942
+ )
943
+
946
944
 
947
945
  class HfFileSystemFile(fsspec.spec.AbstractBufferedFile):
948
946
  def __init__(self, fs: HfFileSystem, path: str, revision: Optional[str] = None, **kwargs):
@@ -1143,3 +1141,10 @@ def _raise_file_not_found(path: str, err: Optional[Exception]) -> NoReturn:
1143
1141
 
1144
1142
  def reopen(fs: HfFileSystem, path: str, mode: str, block_size: int, cache_type: str):
1145
1143
  return fs.open(path, mode=mode, block_size=block_size, cache_type=cache_type)
1144
+
1145
+
1146
+ def make_instance(cls, args, kwargs, instance_cache_attributes_dict):
1147
+ fs = cls(*args, **kwargs)
1148
+ for attr, cached_value in instance_cache_attributes_dict.items():
1149
+ setattr(fs, attr, cached_value)
1150
+ return fs
@@ -130,7 +130,7 @@ class InferenceClient:
130
130
  Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
131
131
  arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
132
132
  provider (`str`, *optional*):
133
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"` or `"zai-org"`.
133
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"clarifai"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"` or `"zai-org"`.
134
134
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
135
135
  If model is a URL or `base_url` is passed, then `provider` is not used.
136
136
  token (`str`, *optional*):
@@ -545,18 +545,14 @@ class InferenceClient:
545
545
  """
546
546
  A method for completing conversations using a specified language model.
547
547
 
548
- <Tip>
548
+ > [!TIP]
549
+ > The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
550
+ > Inputs and outputs are strictly the same and using either syntax will yield the same results.
551
+ > Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
552
+ > for more details about OpenAI's compatibility.
549
553
 
550
- The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
551
- Inputs and outputs are strictly the same and using either syntax will yield the same results.
552
- Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
553
- for more details about OpenAI's compatibility.
554
-
555
- </Tip>
556
-
557
- <Tip>
558
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
559
- </Tip>
554
+ > [!TIP]
555
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
560
556
 
561
557
  Args:
562
558
  messages (List of [`ChatCompletionInputMessage`]):
@@ -1202,11 +1198,8 @@ class InferenceClient:
1202
1198
  """
1203
1199
  Perform image segmentation on the given image using the specified model.
1204
1200
 
1205
- <Tip warning={true}>
1206
-
1207
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1208
-
1209
- </Tip>
1201
+ > [!WARNING]
1202
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1210
1203
 
1211
1204
  Args:
1212
1205
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1274,11 +1267,8 @@ class InferenceClient:
1274
1267
  """
1275
1268
  Perform image-to-image translation using a specified model.
1276
1269
 
1277
- <Tip warning={true}>
1278
-
1279
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1280
-
1281
- </Tip>
1270
+ > [!WARNING]
1271
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1282
1272
 
1283
1273
  Args:
1284
1274
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1297,7 +1287,8 @@ class InferenceClient:
1297
1287
  The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1298
1288
  Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1299
1289
  target_size (`ImageToImageTargetSize`, *optional*):
1300
- The size in pixel of the output image. This parameter is only supported by some providers and for specific models. It will be ignored when unsupported.
1290
+ The size in pixels of the output image. This parameter is only supported by some providers and for
1291
+ specific models. It will be ignored when unsupported.
1301
1292
 
1302
1293
  Returns:
1303
1294
  `Image`: The translated image.
@@ -1467,11 +1458,8 @@ class InferenceClient:
1467
1458
  """
1468
1459
  Perform object detection on the given image using the specified model.
1469
1460
 
1470
- <Tip warning={true}>
1471
-
1472
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1473
-
1474
- </Tip>
1461
+ > [!WARNING]
1462
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1475
1463
 
1476
1464
  Args:
1477
1465
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1613,7 +1601,7 @@ class InferenceClient:
1613
1601
  Defaults to None.
1614
1602
 
1615
1603
  Returns:
1616
- `List[float]`: The embedding representing the input text.
1604
+ `List[float]`: The similarity scores between the main sentence and the given comparison sentences.
1617
1605
 
1618
1606
  Raises:
1619
1607
  [`InferenceTimeoutError`]:
@@ -2113,12 +2101,9 @@ class InferenceClient:
2113
2101
  """
2114
2102
  Given a prompt, generate the following text.
2115
2103
 
2116
- <Tip>
2117
-
2118
- If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2119
- It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2120
-
2121
- </Tip>
2104
+ > [!TIP]
2105
+ > If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2106
+ > It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2122
2107
 
2123
2108
  Args:
2124
2109
  prompt (`str`):
@@ -2441,15 +2426,11 @@ class InferenceClient:
2441
2426
  """
2442
2427
  Generate an image based on a given text using a specified model.
2443
2428
 
2444
- <Tip warning={true}>
2445
-
2446
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2447
-
2448
- </Tip>
2429
+ > [!WARNING]
2430
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2449
2431
 
2450
- <Tip>
2451
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2452
- </Tip>
2432
+ > [!TIP]
2433
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2453
2434
 
2454
2435
  Args:
2455
2436
  prompt (`str`):
@@ -2582,9 +2563,8 @@ class InferenceClient:
2582
2563
  """
2583
2564
  Generate a video based on a given text.
2584
2565
 
2585
- <Tip>
2586
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2587
- </Tip>
2566
+ > [!TIP]
2567
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2588
2568
 
2589
2569
  Args:
2590
2570
  prompt (`str`):
@@ -2690,9 +2670,8 @@ class InferenceClient:
2690
2670
  """
2691
2671
  Synthesize an audio of a voice pronouncing a given text.
2692
2672
 
2693
- <Tip>
2694
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2695
- </Tip>
2673
+ > [!TIP]
2674
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2696
2675
 
2697
2676
  Args:
2698
2677
  text (`str`):
@@ -118,7 +118,7 @@ class AsyncInferenceClient:
118
118
  Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
119
119
  arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
120
120
  provider (`str`, *optional*):
121
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"` or `"zai-org"`.
121
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"clarifai"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"` or `"zai-org"`.
122
122
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
123
123
  If model is a URL or `base_url` is passed, then `provider` is not used.
124
124
  token (`str`, *optional*):
@@ -579,18 +579,14 @@ class AsyncInferenceClient:
579
579
  """
580
580
  A method for completing conversations using a specified language model.
581
581
 
582
- <Tip>
582
+ > [!TIP]
583
+ > The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
584
+ > Inputs and outputs are strictly the same and using either syntax will yield the same results.
585
+ > Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
586
+ > for more details about OpenAI's compatibility.
583
587
 
584
- The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
585
- Inputs and outputs are strictly the same and using either syntax will yield the same results.
586
- Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
587
- for more details about OpenAI's compatibility.
588
-
589
- </Tip>
590
-
591
- <Tip>
592
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
593
- </Tip>
588
+ > [!TIP]
589
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
594
590
 
595
591
  Args:
596
592
  messages (List of [`ChatCompletionInputMessage`]):
@@ -1246,11 +1242,8 @@ class AsyncInferenceClient:
1246
1242
  """
1247
1243
  Perform image segmentation on the given image using the specified model.
1248
1244
 
1249
- <Tip warning={true}>
1250
-
1251
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1252
-
1253
- </Tip>
1245
+ > [!WARNING]
1246
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1254
1247
 
1255
1248
  Args:
1256
1249
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1319,11 +1312,8 @@ class AsyncInferenceClient:
1319
1312
  """
1320
1313
  Perform image-to-image translation using a specified model.
1321
1314
 
1322
- <Tip warning={true}>
1323
-
1324
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1325
-
1326
- </Tip>
1315
+ > [!WARNING]
1316
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1327
1317
 
1328
1318
  Args:
1329
1319
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1342,7 +1332,8 @@ class AsyncInferenceClient:
1342
1332
  The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1343
1333
  Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1344
1334
  target_size (`ImageToImageTargetSize`, *optional*):
1345
- The size in pixel of the output image. This parameter is only supported by some providers and for specific models. It will be ignored when unsupported.
1335
+ The size in pixels of the output image. This parameter is only supported by some providers and for
1336
+ specific models. It will be ignored when unsupported.
1346
1337
 
1347
1338
  Returns:
1348
1339
  `Image`: The translated image.
@@ -1515,11 +1506,8 @@ class AsyncInferenceClient:
1515
1506
  """
1516
1507
  Perform object detection on the given image using the specified model.
1517
1508
 
1518
- <Tip warning={true}>
1519
-
1520
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1521
-
1522
- </Tip>
1509
+ > [!WARNING]
1510
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1523
1511
 
1524
1512
  Args:
1525
1513
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1663,7 +1651,7 @@ class AsyncInferenceClient:
1663
1651
  Defaults to None.
1664
1652
 
1665
1653
  Returns:
1666
- `List[float]`: The embedding representing the input text.
1654
+ `List[float]`: The similarity scores between the main sentence and the given comparison sentences.
1667
1655
 
1668
1656
  Raises:
1669
1657
  [`InferenceTimeoutError`]:
@@ -2169,12 +2157,9 @@ class AsyncInferenceClient:
2169
2157
  """
2170
2158
  Given a prompt, generate the following text.
2171
2159
 
2172
- <Tip>
2173
-
2174
- If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2175
- It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2176
-
2177
- </Tip>
2160
+ > [!TIP]
2161
+ > If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2162
+ > It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2178
2163
 
2179
2164
  Args:
2180
2165
  prompt (`str`):
@@ -2498,15 +2483,11 @@ class AsyncInferenceClient:
2498
2483
  """
2499
2484
  Generate an image based on a given text using a specified model.
2500
2485
 
2501
- <Tip warning={true}>
2502
-
2503
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2504
-
2505
- </Tip>
2486
+ > [!WARNING]
2487
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2506
2488
 
2507
- <Tip>
2508
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2509
- </Tip>
2489
+ > [!TIP]
2490
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2510
2491
 
2511
2492
  Args:
2512
2493
  prompt (`str`):
@@ -2640,9 +2621,8 @@ class AsyncInferenceClient:
2640
2621
  """
2641
2622
  Generate a video based on a given text.
2642
2623
 
2643
- <Tip>
2644
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2645
- </Tip>
2624
+ > [!TIP]
2625
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2646
2626
 
2647
2627
  Args:
2648
2628
  prompt (`str`):
@@ -2748,9 +2728,8 @@ class AsyncInferenceClient:
2748
2728
  """
2749
2729
  Synthesize an audio of a voice pronouncing a given text.
2750
2730
 
2751
- <Tip>
2752
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2753
- </Tip>
2731
+ > [!TIP]
2732
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2754
2733
 
2755
2734
  Args:
2756
2735
  text (`str`):
@@ -10,7 +10,9 @@ from .base import BaseInferenceType, dataclass_with_extra
10
10
 
11
11
  @dataclass_with_extra
12
12
  class ImageToImageTargetSize(BaseInferenceType):
13
- """The size in pixel of the output image."""
13
+ """The size in pixels of the output image. This parameter is only supported by some
14
+ providers and for specific models. It will be ignored when unsupported.
15
+ """
14
16
 
15
17
  height: int
16
18
  width: int
@@ -33,7 +35,9 @@ class ImageToImageParameters(BaseInferenceType):
33
35
  prompt: Optional[str] = None
34
36
  """The text prompt to guide the image generation."""
35
37
  target_size: Optional[ImageToImageTargetSize] = None
36
- """The size in pixel of the output image. This parameter is only supported by some providers and for specific models. It will be ignored when unsupported."""
38
+ """The size in pixels of the output image. This parameter is only supported by some
39
+ providers and for specific models. It will be ignored when unsupported.
40
+ """
37
41
 
38
42
 
39
43
  @dataclass_with_extra
@@ -14,11 +14,8 @@ class Agent(MCPClient):
14
14
  """
15
15
  Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`].
16
16
 
17
- <Tip warning={true}>
18
-
19
- This class is experimental and might be subject to breaking changes in the future without prior notice.
20
-
21
- </Tip>
17
+ > [!WARNING]
18
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
22
19
 
23
20
  Args:
24
21
  model (`str`, *optional*):
@@ -56,11 +56,8 @@ class MCPClient:
56
56
  """
57
57
  Client for connecting to one or more MCP servers and processing chat completions with tools.
58
58
 
59
- <Tip warning={true}>
60
-
61
- This class is experimental and might be subject to breaking changes in the future without prior notice.
62
-
63
- </Tip>
59
+ > [!WARNING]
60
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
64
61
 
65
62
  Args:
66
63
  model (`str`, `optional`):
@@ -9,6 +9,7 @@ from huggingface_hub.utils import logging
9
9
  from ._common import TaskProviderHelper, _fetch_inference_provider_mapping
10
10
  from .black_forest_labs import BlackForestLabsTextToImageTask
11
11
  from .cerebras import CerebrasConversationalTask
12
+ from .clarifai import ClarifaiConversationalTask
12
13
  from .cohere import CohereConversationalTask
13
14
  from .fal_ai import (
14
15
  FalAIAutomaticSpeechRecognitionTask,
@@ -50,6 +51,7 @@ logger = logging.get_logger(__name__)
50
51
  PROVIDER_T = Literal[
51
52
  "black-forest-labs",
52
53
  "cerebras",
54
+ "clarifai",
53
55
  "cohere",
54
56
  "fal-ai",
55
57
  "featherless-ai",
@@ -78,6 +80,9 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
78
80
  "cerebras": {
79
81
  "conversational": CerebrasConversationalTask(),
80
82
  },
83
+ "clarifai": {
84
+ "conversational": ClarifaiConversationalTask(),
85
+ },
81
86
  "cohere": {
82
87
  "conversational": CohereConversationalTask(),
83
88
  },
@@ -24,6 +24,7 @@ HARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]
24
24
  # status="live")
25
25
  "cerebras": {},
26
26
  "cohere": {},
27
+ "clarifai": {},
27
28
  "fal-ai": {},
28
29
  "fireworks-ai": {},
29
30
  "groq": {},
@@ -0,0 +1,13 @@
1
+ from ._common import BaseConversationalTask
2
+
3
+
4
+ _PROVIDER = "clarifai"
5
+ _BASE_URL = "https://api.clarifai.com"
6
+
7
+
8
+ class ClarifaiConversationalTask(BaseConversationalTask):
9
+ def __init__(self):
10
+ super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
11
+
12
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
13
+ return "/v2/ext/openai/v1/chat/completions"
@@ -283,12 +283,9 @@ def from_pretrained_keras(*args, **kwargs) -> "KerasModelHubMixin":
283
283
  model_kwargs (`Dict`, *optional*):
284
284
  model_kwargs will be passed to the model during initialization
285
285
 
286
- <Tip>
287
-
288
- Passing `token=True` is required when you want to use a private
289
- model.
290
-
291
- </Tip>
286
+ > [!TIP]
287
+ > Passing `token=True` is required when you want to use a private
288
+ > model.
292
289
  """
293
290
  return KerasModelHubMixin.from_pretrained(*args, **kwargs)
294
291
 
huggingface_hub/lfs.py CHANGED
@@ -108,7 +108,8 @@ def post_lfs_batch_info(
108
108
  revision: Optional[str] = None,
109
109
  endpoint: Optional[str] = None,
110
110
  headers: Optional[Dict[str, str]] = None,
111
- ) -> Tuple[List[dict], List[dict]]:
111
+ transfers: Optional[List[str]] = None,
112
+ ) -> Tuple[List[dict], List[dict], Optional[str]]:
112
113
  """
113
114
  Requests the LFS batch endpoint to retrieve upload instructions
114
115
 
@@ -127,11 +128,14 @@ def post_lfs_batch_info(
127
128
  The git revision to upload to.
128
129
  headers (`dict`, *optional*):
129
130
  Additional headers to include in the request
131
+ transfers (`list`, *optional*):
132
+ List of transfer methods to use. Defaults to ["basic", "multipart"].
130
133
 
131
134
  Returns:
132
- `LfsBatchInfo`: 2-tuple:
135
+ `LfsBatchInfo`: 3-tuple:
133
136
  - First element is the list of upload instructions from the server
134
- - Second element is an list of errors, if any
137
+ - Second element is a list of errors, if any
138
+ - Third element is the chosen transfer adapter if provided by the server (e.g. "basic", "multipart", "xet")
135
139
 
136
140
  Raises:
137
141
  [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
@@ -146,7 +150,7 @@ def post_lfs_batch_info(
146
150
  batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
147
151
  payload: Dict = {
148
152
  "operation": "upload",
149
- "transfers": ["basic", "multipart"],
153
+ "transfers": transfers if transfers is not None else ["basic", "multipart"],
150
154
  "objects": [
151
155
  {
152
156
  "oid": upload.sha256.hex(),
@@ -172,9 +176,13 @@ def post_lfs_batch_info(
172
176
  if not isinstance(objects, list):
173
177
  raise ValueError("Malformed response from server")
174
178
 
179
+ chosen_transfer = batch_info.get("transfer")
180
+ chosen_transfer = chosen_transfer if isinstance(chosen_transfer, str) else None
181
+
175
182
  return (
176
183
  [_validate_batch_actions(obj) for obj in objects if "error" not in obj],
177
184
  [_validate_batch_error(obj) for obj in objects if "error" in obj],
185
+ chosen_transfer,
178
186
  )
179
187
 
180
188