huggingface-hub 1.0.0rc1__py3-none-any.whl → 1.0.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (59) hide show
  1. huggingface_hub/__init__.py +4 -7
  2. huggingface_hub/_commit_api.py +126 -66
  3. huggingface_hub/_commit_scheduler.py +4 -7
  4. huggingface_hub/_login.py +10 -16
  5. huggingface_hub/_snapshot_download.py +119 -21
  6. huggingface_hub/_tensorboard_logger.py +2 -5
  7. huggingface_hub/_upload_large_folder.py +1 -2
  8. huggingface_hub/_webhooks_server.py +8 -20
  9. huggingface_hub/cli/_cli_utils.py +12 -6
  10. huggingface_hub/cli/download.py +32 -7
  11. huggingface_hub/cli/repo.py +137 -5
  12. huggingface_hub/dataclasses.py +122 -2
  13. huggingface_hub/errors.py +4 -0
  14. huggingface_hub/fastai_utils.py +22 -32
  15. huggingface_hub/file_download.py +234 -38
  16. huggingface_hub/hf_api.py +385 -424
  17. huggingface_hub/hf_file_system.py +55 -65
  18. huggingface_hub/inference/_client.py +27 -48
  19. huggingface_hub/inference/_generated/_async_client.py +27 -48
  20. huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
  21. huggingface_hub/inference/_mcp/agent.py +2 -5
  22. huggingface_hub/inference/_mcp/mcp_client.py +6 -8
  23. huggingface_hub/inference/_providers/__init__.py +16 -0
  24. huggingface_hub/inference/_providers/_common.py +2 -0
  25. huggingface_hub/inference/_providers/fal_ai.py +2 -0
  26. huggingface_hub/inference/_providers/publicai.py +6 -0
  27. huggingface_hub/inference/_providers/scaleway.py +28 -0
  28. huggingface_hub/inference/_providers/zai_org.py +17 -0
  29. huggingface_hub/lfs.py +14 -8
  30. huggingface_hub/repocard.py +12 -16
  31. huggingface_hub/serialization/_base.py +3 -6
  32. huggingface_hub/serialization/_torch.py +16 -34
  33. huggingface_hub/utils/__init__.py +1 -2
  34. huggingface_hub/utils/_cache_manager.py +42 -72
  35. huggingface_hub/utils/_chunk_utils.py +2 -3
  36. huggingface_hub/utils/_http.py +37 -68
  37. huggingface_hub/utils/_validators.py +2 -2
  38. huggingface_hub/utils/logging.py +8 -11
  39. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc3.dist-info}/METADATA +2 -2
  40. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc3.dist-info}/RECORD +44 -56
  41. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc3.dist-info}/entry_points.txt +0 -1
  42. huggingface_hub/commands/__init__.py +0 -27
  43. huggingface_hub/commands/_cli_utils.py +0 -74
  44. huggingface_hub/commands/delete_cache.py +0 -476
  45. huggingface_hub/commands/download.py +0 -195
  46. huggingface_hub/commands/env.py +0 -39
  47. huggingface_hub/commands/huggingface_cli.py +0 -65
  48. huggingface_hub/commands/lfs.py +0 -200
  49. huggingface_hub/commands/repo.py +0 -151
  50. huggingface_hub/commands/repo_files.py +0 -132
  51. huggingface_hub/commands/scan_cache.py +0 -183
  52. huggingface_hub/commands/tag.py +0 -159
  53. huggingface_hub/commands/upload.py +0 -318
  54. huggingface_hub/commands/upload_large_folder.py +0 -131
  55. huggingface_hub/commands/user.py +0 -207
  56. huggingface_hub/commands/version.py +0 -40
  57. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc3.dist-info}/LICENSE +0 -0
  58. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc3.dist-info}/WHEEL +0 -0
  59. {huggingface_hub-1.0.0rc1.dist-info → huggingface_hub-1.0.0rc3.dist-info}/top_level.txt +0 -0
@@ -60,13 +60,10 @@ class HfFileSystem(fsspec.AbstractFileSystem):
60
60
  """
61
61
  Access a remote Hugging Face Hub repository as if were a local file system.
62
62
 
63
- <Tip warning={true}>
64
-
65
- [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
66
- Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
67
- layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
68
-
69
- </Tip>
63
+ > [!WARNING]
64
+ > [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
65
+ > Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
66
+ > layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
70
67
 
71
68
  Args:
72
69
  token (`str` or `bool`, *optional*):
@@ -301,11 +298,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
301
298
 
302
299
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.rm).
303
300
 
304
- <Tip warning={true}>
305
-
306
- Note: When possible, use `HfApi.delete_file()` for better performance.
307
-
308
- </Tip>
301
+ > [!WARNING]
302
+ > Note: When possible, use `HfApi.delete_file()` for better performance.
309
303
 
310
304
  Args:
311
305
  path (`str`):
@@ -345,11 +339,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
345
339
 
346
340
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.ls).
347
341
 
348
- <Tip warning={true}>
349
-
350
- Note: When possible, use `HfApi.list_repo_tree()` for better performance.
351
-
352
- </Tip>
342
+ > [!WARNING]
343
+ > Note: When possible, use `HfApi.list_repo_tree()` for better performance.
353
344
 
354
345
  Args:
355
346
  path (`str`):
@@ -387,6 +378,7 @@ class HfFileSystem(fsspec.AbstractFileSystem):
387
378
  refresh: bool = False,
388
379
  revision: Optional[str] = None,
389
380
  expand_info: bool = False,
381
+ maxdepth: Optional[int] = None,
390
382
  ):
391
383
  resolved_path = self.resolve_path(path, revision=revision)
392
384
  path = resolved_path.unresolve()
@@ -406,19 +398,25 @@ class HfFileSystem(fsspec.AbstractFileSystem):
406
398
  if recursive:
407
399
  # Use BFS to traverse the cache and build the "recursive "output
408
400
  # (The Hub uses a so-called "tree first" strategy for the tree endpoint but we sort the output to follow the spec so the result is (eventually) the same)
401
+ depth = 2
409
402
  dirs_to_visit = deque(
410
- [path_info for path_info in cached_path_infos if path_info["type"] == "directory"]
403
+ [(depth, path_info) for path_info in cached_path_infos if path_info["type"] == "directory"]
411
404
  )
412
405
  while dirs_to_visit:
413
- dir_info = dirs_to_visit.popleft()
414
- if dir_info["name"] not in self.dircache:
415
- dirs_not_in_dircache.append(dir_info["name"])
416
- else:
417
- cached_path_infos = self.dircache[dir_info["name"]]
418
- out.extend(cached_path_infos)
419
- dirs_to_visit.extend(
420
- [path_info for path_info in cached_path_infos if path_info["type"] == "directory"]
421
- )
406
+ depth, dir_info = dirs_to_visit.popleft()
407
+ if maxdepth is None or depth <= maxdepth:
408
+ if dir_info["name"] not in self.dircache:
409
+ dirs_not_in_dircache.append(dir_info["name"])
410
+ else:
411
+ cached_path_infos = self.dircache[dir_info["name"]]
412
+ out.extend(cached_path_infos)
413
+ dirs_to_visit.extend(
414
+ [
415
+ (depth + 1, path_info)
416
+ for path_info in cached_path_infos
417
+ if path_info["type"] == "directory"
418
+ ]
419
+ )
422
420
 
423
421
  dirs_not_expanded = []
424
422
  if expand_info:
@@ -437,6 +435,9 @@ class HfFileSystem(fsspec.AbstractFileSystem):
437
435
  or common_prefix in chain(dirs_not_in_dircache, dirs_not_expanded)
438
436
  else self._parent(common_prefix)
439
437
  )
438
+ if maxdepth is not None:
439
+ common_path_depth = common_path[len(path) :].count("/")
440
+ maxdepth -= common_path_depth
440
441
  out = [o for o in out if not o["name"].startswith(common_path + "/")]
441
442
  for cached_path in self.dircache:
442
443
  if cached_path.startswith(common_path + "/"):
@@ -449,6 +450,7 @@ class HfFileSystem(fsspec.AbstractFileSystem):
449
450
  refresh=True,
450
451
  revision=revision,
451
452
  expand_info=expand_info,
453
+ maxdepth=maxdepth,
452
454
  )
453
455
  )
454
456
  else:
@@ -461,9 +463,10 @@ class HfFileSystem(fsspec.AbstractFileSystem):
461
463
  repo_type=resolved_path.repo_type,
462
464
  )
463
465
  for path_info in tree:
466
+ cache_path = root_path + "/" + path_info.path
464
467
  if isinstance(path_info, RepoFile):
465
468
  cache_path_info = {
466
- "name": root_path + "/" + path_info.path,
469
+ "name": cache_path,
467
470
  "size": path_info.size,
468
471
  "type": "file",
469
472
  "blob_id": path_info.blob_id,
@@ -473,7 +476,7 @@ class HfFileSystem(fsspec.AbstractFileSystem):
473
476
  }
474
477
  else:
475
478
  cache_path_info = {
476
- "name": root_path + "/" + path_info.path,
479
+ "name": cache_path,
477
480
  "size": 0,
478
481
  "type": "directory",
479
482
  "tree_id": path_info.tree_id,
@@ -481,7 +484,9 @@ class HfFileSystem(fsspec.AbstractFileSystem):
481
484
  }
482
485
  parent_path = self._parent(cache_path_info["name"])
483
486
  self.dircache.setdefault(parent_path, []).append(cache_path_info)
484
- out.append(cache_path_info)
487
+ depth = cache_path[len(path) :].count("/")
488
+ if maxdepth is None or depth <= maxdepth:
489
+ out.append(cache_path_info)
485
490
  return out
486
491
 
487
492
  def walk(self, path: str, *args, **kwargs) -> Iterator[tuple[str, list[str], list[str]]]:
@@ -548,19 +553,22 @@ class HfFileSystem(fsspec.AbstractFileSystem):
548
553
  Returns:
549
554
  `Union[list[str], dict[str, dict[str, Any]]]`: List of paths or dict of file information.
550
555
  """
551
- if maxdepth:
552
- return super().find(
553
- path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, refresh=refresh, revision=revision, **kwargs
554
- )
556
+ if maxdepth is not None and maxdepth < 1:
557
+ raise ValueError("maxdepth must be at least 1")
555
558
  resolved_path = self.resolve_path(path, revision=revision)
556
559
  path = resolved_path.unresolve()
557
560
  try:
558
- out = self._ls_tree(path, recursive=True, refresh=refresh, revision=resolved_path.revision, **kwargs)
561
+ out = self._ls_tree(
562
+ path, recursive=True, refresh=refresh, revision=resolved_path.revision, maxdepth=maxdepth, **kwargs
563
+ )
559
564
  except EntryNotFoundError:
560
565
  # Path could be a file
561
- if self.info(path, revision=revision, **kwargs)["type"] == "file":
562
- out = {path: {}}
563
- else:
566
+ try:
567
+ if self.info(path, revision=revision, **kwargs)["type"] == "file":
568
+ out = {path: {}}
569
+ else:
570
+ out = {}
571
+ except FileNotFoundError:
564
572
  out = {}
565
573
  else:
566
574
  if not withdirs:
@@ -580,11 +588,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
580
588
  """
581
589
  Copy a file within or between repositories.
582
590
 
583
- <Tip warning={true}>
584
-
585
- Note: When possible, use `HfApi.upload_file()` for better performance.
586
-
587
- </Tip>
591
+ > [!WARNING]
592
+ > Note: When possible, use `HfApi.upload_file()` for better performance.
588
593
 
589
594
  Args:
590
595
  path1 (`str`):
@@ -657,11 +662,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
657
662
 
658
663
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.info).
659
664
 
660
- <Tip warning={true}>
661
-
662
- Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
663
-
664
- </Tip>
665
+ > [!WARNING]
666
+ > Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
665
667
 
666
668
  Args:
667
669
  path (`str`):
@@ -758,11 +760,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
758
760
 
759
761
  For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.exists).
760
762
 
761
- <Tip warning={true}>
762
-
763
- Note: When possible, use `HfApi.file_exists()` for better performance.
764
-
765
- </Tip>
763
+ > [!WARNING]
764
+ > Note: When possible, use `HfApi.file_exists()` for better performance.
766
765
 
767
766
  Args:
768
767
  path (`str`):
@@ -843,11 +842,8 @@ class HfFileSystem(fsspec.AbstractFileSystem):
843
842
  """
844
843
  Copy single remote file to local.
845
844
 
846
- <Tip warning={true}>
847
-
848
- Note: When possible, use `HfApi.hf_hub_download()` for better performance.
849
-
850
- </Tip>
845
+ > [!WARNING]
846
+ > Note: When possible, use `HfApi.hf_hub_download()` for better performance.
851
847
 
852
848
  Args:
853
849
  rpath (`str`):
@@ -959,13 +955,7 @@ class HfFileSystemFile(fsspec.spec.AbstractBufferedFile):
959
955
  repo_type=self.resolved_path.repo_type,
960
956
  endpoint=self.fs.endpoint,
961
957
  )
962
- r = http_backoff(
963
- "GET",
964
- url,
965
- headers=headers,
966
- retry_on_status_codes=(500, 502, 503, 504),
967
- timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
968
- )
958
+ r = http_backoff("GET", url, headers=headers, timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT)
969
959
  hf_raise_for_status(r)
970
960
  return r.content
971
961
 
@@ -135,7 +135,7 @@ class InferenceClient:
135
135
  Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
136
136
  arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
137
137
  provider (`str`, *optional*):
138
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
138
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"` or `"zai-org"`.
139
139
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
140
140
  If model is a URL or `base_url` is passed, then `provider` is not used.
141
141
  token (`str`, *optional*):
@@ -560,18 +560,14 @@ class InferenceClient:
560
560
  """
561
561
  A method for completing conversations using a specified language model.
562
562
 
563
- <Tip>
563
+ > [!TIP]
564
+ > The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
565
+ > Inputs and outputs are strictly the same and using either syntax will yield the same results.
566
+ > Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
567
+ > for more details about OpenAI's compatibility.
564
568
 
565
- The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
566
- Inputs and outputs are strictly the same and using either syntax will yield the same results.
567
- Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
568
- for more details about OpenAI's compatibility.
569
-
570
- </Tip>
571
-
572
- <Tip>
573
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
574
- </Tip>
569
+ > [!TIP]
570
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
575
571
 
576
572
  Args:
577
573
  messages (List of [`ChatCompletionInputMessage`]):
@@ -1217,11 +1213,8 @@ class InferenceClient:
1217
1213
  """
1218
1214
  Perform image segmentation on the given image using the specified model.
1219
1215
 
1220
- <Tip warning={true}>
1221
-
1222
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1223
-
1224
- </Tip>
1216
+ > [!WARNING]
1217
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1225
1218
 
1226
1219
  Args:
1227
1220
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1289,11 +1282,8 @@ class InferenceClient:
1289
1282
  """
1290
1283
  Perform image-to-image translation using a specified model.
1291
1284
 
1292
- <Tip warning={true}>
1293
-
1294
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1295
-
1296
- </Tip>
1285
+ > [!WARNING]
1286
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1297
1287
 
1298
1288
  Args:
1299
1289
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1312,7 +1302,8 @@ class InferenceClient:
1312
1302
  The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1313
1303
  Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1314
1304
  target_size (`ImageToImageTargetSize`, *optional*):
1315
- The size in pixel of the output image.
1305
+ The size in pixels of the output image. This parameter is only supported by some providers and for
1306
+ specific models. It will be ignored when unsupported.
1316
1307
 
1317
1308
  Returns:
1318
1309
  `Image`: The translated image.
@@ -1482,11 +1473,8 @@ class InferenceClient:
1482
1473
  """
1483
1474
  Perform object detection on the given image using the specified model.
1484
1475
 
1485
- <Tip warning={true}>
1486
-
1487
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1488
-
1489
- </Tip>
1476
+ > [!WARNING]
1477
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1490
1478
 
1491
1479
  Args:
1492
1480
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -2128,12 +2116,9 @@ class InferenceClient:
2128
2116
  """
2129
2117
  Given a prompt, generate the following text.
2130
2118
 
2131
- <Tip>
2132
-
2133
- If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2134
- It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2135
-
2136
- </Tip>
2119
+ > [!TIP]
2120
+ > If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2121
+ > It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2137
2122
 
2138
2123
  Args:
2139
2124
  prompt (`str`):
@@ -2456,15 +2441,11 @@ class InferenceClient:
2456
2441
  """
2457
2442
  Generate an image based on a given text using a specified model.
2458
2443
 
2459
- <Tip warning={true}>
2460
-
2461
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2462
-
2463
- </Tip>
2444
+ > [!WARNING]
2445
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2464
2446
 
2465
- <Tip>
2466
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2467
- </Tip>
2447
+ > [!TIP]
2448
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2468
2449
 
2469
2450
  Args:
2470
2451
  prompt (`str`):
@@ -2597,9 +2578,8 @@ class InferenceClient:
2597
2578
  """
2598
2579
  Generate a video based on a given text.
2599
2580
 
2600
- <Tip>
2601
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2602
- </Tip>
2581
+ > [!TIP]
2582
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2603
2583
 
2604
2584
  Args:
2605
2585
  prompt (`str`):
@@ -2705,9 +2685,8 @@ class InferenceClient:
2705
2685
  """
2706
2686
  Synthesize an audio of a voice pronouncing a given text.
2707
2687
 
2708
- <Tip>
2709
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2710
- </Tip>
2688
+ > [!TIP]
2689
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2711
2690
 
2712
2691
  Args:
2713
2692
  text (`str`):
@@ -126,7 +126,7 @@ class AsyncInferenceClient:
126
126
  Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
127
127
  arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
128
128
  provider (`str`, *optional*):
129
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
129
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `publicai`, `"replicate"`, `"sambanova"`, `"scaleway"`, `"together"` or `"zai-org"`.
130
130
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
131
131
  If model is a URL or `base_url` is passed, then `provider` is not used.
132
132
  token (`str`, *optional*):
@@ -580,18 +580,14 @@ class AsyncInferenceClient:
580
580
  """
581
581
  A method for completing conversations using a specified language model.
582
582
 
583
- <Tip>
583
+ > [!TIP]
584
+ > The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
585
+ > Inputs and outputs are strictly the same and using either syntax will yield the same results.
586
+ > Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
587
+ > for more details about OpenAI's compatibility.
584
588
 
585
- The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client.
586
- Inputs and outputs are strictly the same and using either syntax will yield the same results.
587
- Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility)
588
- for more details about OpenAI's compatibility.
589
-
590
- </Tip>
591
-
592
- <Tip>
593
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
594
- </Tip>
589
+ > [!TIP]
590
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
595
591
 
596
592
  Args:
597
593
  messages (List of [`ChatCompletionInputMessage`]):
@@ -1247,11 +1243,8 @@ class AsyncInferenceClient:
1247
1243
  """
1248
1244
  Perform image segmentation on the given image using the specified model.
1249
1245
 
1250
- <Tip warning={true}>
1251
-
1252
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1253
-
1254
- </Tip>
1246
+ > [!WARNING]
1247
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1255
1248
 
1256
1249
  Args:
1257
1250
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1320,11 +1313,8 @@ class AsyncInferenceClient:
1320
1313
  """
1321
1314
  Perform image-to-image translation using a specified model.
1322
1315
 
1323
- <Tip warning={true}>
1324
-
1325
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1326
-
1327
- </Tip>
1316
+ > [!WARNING]
1317
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1328
1318
 
1329
1319
  Args:
1330
1320
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -1343,7 +1333,8 @@ class AsyncInferenceClient:
1343
1333
  The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1344
1334
  Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1345
1335
  target_size (`ImageToImageTargetSize`, *optional*):
1346
- The size in pixel of the output image.
1336
+ The size in pixels of the output image. This parameter is only supported by some providers and for
1337
+ specific models. It will be ignored when unsupported.
1347
1338
 
1348
1339
  Returns:
1349
1340
  `Image`: The translated image.
@@ -1516,11 +1507,8 @@ class AsyncInferenceClient:
1516
1507
  """
1517
1508
  Perform object detection on the given image using the specified model.
1518
1509
 
1519
- <Tip warning={true}>
1520
-
1521
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1522
-
1523
- </Tip>
1510
+ > [!WARNING]
1511
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1524
1512
 
1525
1513
  Args:
1526
1514
  image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
@@ -2170,12 +2158,9 @@ class AsyncInferenceClient:
2170
2158
  """
2171
2159
  Given a prompt, generate the following text.
2172
2160
 
2173
- <Tip>
2174
-
2175
- If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2176
- It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2177
-
2178
- </Tip>
2161
+ > [!TIP]
2162
+ > If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method.
2163
+ > It accepts a list of messages instead of a single text prompt and handles the chat templating for you.
2179
2164
 
2180
2165
  Args:
2181
2166
  prompt (`str`):
@@ -2499,15 +2484,11 @@ class AsyncInferenceClient:
2499
2484
  """
2500
2485
  Generate an image based on a given text using a specified model.
2501
2486
 
2502
- <Tip warning={true}>
2503
-
2504
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2505
-
2506
- </Tip>
2487
+ > [!WARNING]
2488
+ > You must have `PIL` installed if you want to work with images (`pip install Pillow`).
2507
2489
 
2508
- <Tip>
2509
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2510
- </Tip>
2490
+ > [!TIP]
2491
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2511
2492
 
2512
2493
  Args:
2513
2494
  prompt (`str`):
@@ -2641,9 +2622,8 @@ class AsyncInferenceClient:
2641
2622
  """
2642
2623
  Generate a video based on a given text.
2643
2624
 
2644
- <Tip>
2645
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2646
- </Tip>
2625
+ > [!TIP]
2626
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2647
2627
 
2648
2628
  Args:
2649
2629
  prompt (`str`):
@@ -2749,9 +2729,8 @@ class AsyncInferenceClient:
2749
2729
  """
2750
2730
  Synthesize an audio of a voice pronouncing a given text.
2751
2731
 
2752
- <Tip>
2753
- You can pass provider-specific parameters to the model by using the `extra_body` argument.
2754
- </Tip>
2732
+ > [!TIP]
2733
+ > You can pass provider-specific parameters to the model by using the `extra_body` argument.
2755
2734
 
2756
2735
  Args:
2757
2736
  text (`str`):
@@ -10,7 +10,9 @@ from .base import BaseInferenceType, dataclass_with_extra
10
10
 
11
11
  @dataclass_with_extra
12
12
  class ImageToImageTargetSize(BaseInferenceType):
13
- """The size in pixel of the output image."""
13
+ """The size in pixels of the output image. This parameter is only supported by some
14
+ providers and for specific models. It will be ignored when unsupported.
15
+ """
14
16
 
15
17
  height: int
16
18
  width: int
@@ -33,7 +35,9 @@ class ImageToImageParameters(BaseInferenceType):
33
35
  prompt: Optional[str] = None
34
36
  """The text prompt to guide the image generation."""
35
37
  target_size: Optional[ImageToImageTargetSize] = None
36
- """The size in pixel of the output image."""
38
+ """The size in pixels of the output image. This parameter is only supported by some
39
+ providers and for specific models. It will be ignored when unsupported.
40
+ """
37
41
 
38
42
 
39
43
  @dataclass_with_extra
@@ -14,11 +14,8 @@ class Agent(MCPClient):
14
14
  """
15
15
  Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`].
16
16
 
17
- <Tip warning={true}>
18
-
19
- This class is experimental and might be subject to breaking changes in the future without prior notice.
20
-
21
- </Tip>
17
+ > [!WARNING]
18
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
22
19
 
23
20
  Args:
24
21
  model (`str`, *optional*):
@@ -56,11 +56,8 @@ class MCPClient:
56
56
  """
57
57
  Client for connecting to one or more MCP servers and processing chat completions with tools.
58
58
 
59
- <Tip warning={true}>
60
-
61
- This class is experimental and might be subject to breaking changes in the future without prior notice.
62
-
63
- </Tip>
59
+ > [!WARNING]
60
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
64
61
 
65
62
  Args:
66
63
  model (`str`, `optional`):
@@ -158,7 +155,7 @@ class MCPClient:
158
155
  from mcp import types as mcp_types
159
156
 
160
157
  # Extract allowed_tools configuration if provided
161
- allowed_tools = params.pop("allowed_tools", [])
158
+ allowed_tools = params.pop("allowed_tools", None)
162
159
 
163
160
  # Determine server type and create appropriate parameters
164
161
  if type == "stdio":
@@ -218,9 +215,10 @@ class MCPClient:
218
215
  logger.debug("Connected to server with tools:", [tool.name for tool in response.tools])
219
216
 
220
217
  # Filter tools based on allowed_tools configuration
221
- filtered_tools = [tool for tool in response.tools if tool.name in allowed_tools]
218
+ filtered_tools = response.tools
222
219
 
223
- if allowed_tools:
220
+ if allowed_tools is not None:
221
+ filtered_tools = [tool for tool in response.tools if tool.name in allowed_tools]
224
222
  logger.debug(
225
223
  f"Tool filtering applied. Using {len(filtered_tools)} of {len(response.tools)} available tools: {[tool.name for tool in filtered_tools]}"
226
224
  )
@@ -36,9 +36,12 @@ from .nebius import (
36
36
  from .novita import NovitaConversationalTask, NovitaTextGenerationTask, NovitaTextToVideoTask
37
37
  from .nscale import NscaleConversationalTask, NscaleTextToImageTask
38
38
  from .openai import OpenAIConversationalTask
39
+ from .publicai import PublicAIConversationalTask
39
40
  from .replicate import ReplicateImageToImageTask, ReplicateTask, ReplicateTextToImageTask, ReplicateTextToSpeechTask
40
41
  from .sambanova import SambanovaConversationalTask, SambanovaFeatureExtractionTask
42
+ from .scaleway import ScalewayConversationalTask, ScalewayFeatureExtractionTask
41
43
  from .together import TogetherConversationalTask, TogetherTextGenerationTask, TogetherTextToImageTask
44
+ from .zai_org import ZaiConversationalTask
42
45
 
43
46
 
44
47
  logger = logging.get_logger(__name__)
@@ -58,9 +61,12 @@ PROVIDER_T = Literal[
58
61
  "novita",
59
62
  "nscale",
60
63
  "openai",
64
+ "publicai",
61
65
  "replicate",
62
66
  "sambanova",
67
+ "scaleway",
63
68
  "together",
69
+ "zai-org",
64
70
  ]
65
71
 
66
72
  PROVIDER_OR_POLICY_T = Union[PROVIDER_T, Literal["auto"]]
@@ -144,6 +150,9 @@ PROVIDERS: dict[PROVIDER_T, dict[str, TaskProviderHelper]] = {
144
150
  "openai": {
145
151
  "conversational": OpenAIConversationalTask(),
146
152
  },
153
+ "publicai": {
154
+ "conversational": PublicAIConversationalTask(),
155
+ },
147
156
  "replicate": {
148
157
  "image-to-image": ReplicateImageToImageTask(),
149
158
  "text-to-image": ReplicateTextToImageTask(),
@@ -154,11 +163,18 @@ PROVIDERS: dict[PROVIDER_T, dict[str, TaskProviderHelper]] = {
154
163
  "conversational": SambanovaConversationalTask(),
155
164
  "feature-extraction": SambanovaFeatureExtractionTask(),
156
165
  },
166
+ "scaleway": {
167
+ "conversational": ScalewayConversationalTask(),
168
+ "feature-extraction": ScalewayFeatureExtractionTask(),
169
+ },
157
170
  "together": {
158
171
  "text-to-image": TogetherTextToImageTask(),
159
172
  "conversational": TogetherConversationalTask(),
160
173
  "text-generation": TogetherTextGenerationTask(),
161
174
  },
175
+ "zai-org": {
176
+ "conversational": ZaiConversationalTask(),
177
+ },
162
178
  }
163
179
 
164
180