huggingface-hub 0.32.5__py3-none-any.whl → 0.33.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +1 -1
- huggingface_hub/_local_folder.py +1 -0
- huggingface_hub/_snapshot_download.py +2 -2
- huggingface_hub/file_download.py +1 -1
- huggingface_hub/hf_api.py +74 -46
- huggingface_hub/inference/_client.py +4 -5
- huggingface_hub/inference/_generated/_async_client.py +4 -5
- huggingface_hub/inference/_mcp/mcp_client.py +19 -5
- huggingface_hub/inference/_providers/__init__.py +15 -1
- huggingface_hub/inference/_providers/_common.py +15 -3
- huggingface_hub/inference/_providers/featherless_ai.py +38 -0
- huggingface_hub/inference/_providers/groq.py +9 -0
- huggingface_hub/inference/_providers/hf_inference.py +6 -2
- huggingface_hub/inference/_providers/openai.py +3 -1
- {huggingface_hub-0.32.5.dist-info → huggingface_hub-0.33.0.dist-info}/METADATA +1 -1
- {huggingface_hub-0.32.5.dist-info → huggingface_hub-0.33.0.dist-info}/RECORD +20 -18
- {huggingface_hub-0.32.5.dist-info → huggingface_hub-0.33.0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.32.5.dist-info → huggingface_hub-0.33.0.dist-info}/WHEEL +0 -0
- {huggingface_hub-0.32.5.dist-info → huggingface_hub-0.33.0.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.32.5.dist-info → huggingface_hub-0.33.0.dist-info}/top_level.txt +0 -0
huggingface_hub/__init__.py
CHANGED
huggingface_hub/_local_folder.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
from pathlib import Path
|
|
3
|
-
from typing import Dict, Iterable, List, Literal, Optional, Union
|
|
3
|
+
from typing import Dict, Iterable, List, Literal, Optional, Type, Union
|
|
4
4
|
|
|
5
5
|
import requests
|
|
6
6
|
from tqdm.auto import tqdm as base_tqdm
|
|
@@ -44,7 +44,7 @@ def snapshot_download(
|
|
|
44
44
|
allow_patterns: Optional[Union[List[str], str]] = None,
|
|
45
45
|
ignore_patterns: Optional[Union[List[str], str]] = None,
|
|
46
46
|
max_workers: int = 8,
|
|
47
|
-
tqdm_class: Optional[base_tqdm] = None,
|
|
47
|
+
tqdm_class: Optional[Type[base_tqdm]] = None,
|
|
48
48
|
headers: Optional[Dict[str, str]] = None,
|
|
49
49
|
endpoint: Optional[str] = None,
|
|
50
50
|
# Deprecated args
|
huggingface_hub/file_download.py
CHANGED
|
@@ -1706,7 +1706,7 @@ def _download_to_tmp_and_move(
|
|
|
1706
1706
|
_check_disk_space(expected_size, destination_path.parent)
|
|
1707
1707
|
|
|
1708
1708
|
if xet_file_data is not None and is_xet_available():
|
|
1709
|
-
logger.
|
|
1709
|
+
logger.debug("Xet Storage is enabled for this repo. Downloading file from Xet Storage..")
|
|
1710
1710
|
xet_get(
|
|
1711
1711
|
incomplete_path=incomplete_path,
|
|
1712
1712
|
xet_file_data=xet_file_data,
|
huggingface_hub/hf_api.py
CHANGED
|
@@ -28,6 +28,7 @@ from functools import wraps
|
|
|
28
28
|
from itertools import islice
|
|
29
29
|
from pathlib import Path
|
|
30
30
|
from typing import (
|
|
31
|
+
TYPE_CHECKING,
|
|
31
32
|
Any,
|
|
32
33
|
BinaryIO,
|
|
33
34
|
Callable,
|
|
@@ -38,6 +39,7 @@ from typing import (
|
|
|
38
39
|
Literal,
|
|
39
40
|
Optional,
|
|
40
41
|
Tuple,
|
|
42
|
+
Type,
|
|
41
43
|
TypeVar,
|
|
42
44
|
Union,
|
|
43
45
|
overload,
|
|
@@ -134,8 +136,11 @@ from .utils._typing import CallableT
|
|
|
134
136
|
from .utils.endpoint_helpers import _is_emission_within_threshold
|
|
135
137
|
|
|
136
138
|
|
|
139
|
+
if TYPE_CHECKING:
|
|
140
|
+
from .inference._providers import PROVIDER_T
|
|
141
|
+
|
|
137
142
|
R = TypeVar("R") # Return type
|
|
138
|
-
CollectionItemType_T = Literal["model", "dataset", "space", "paper"]
|
|
143
|
+
CollectionItemType_T = Literal["model", "dataset", "space", "paper", "collection"]
|
|
139
144
|
|
|
140
145
|
ExpandModelProperty_T = Literal[
|
|
141
146
|
"author",
|
|
@@ -708,21 +713,26 @@ class RepoFolder:
|
|
|
708
713
|
|
|
709
714
|
@dataclass
|
|
710
715
|
class InferenceProviderMapping:
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
provider_id: str
|
|
716
|
+
provider: "PROVIDER_T" # Provider name
|
|
717
|
+
hf_model_id: str # ID of the model on the Hugging Face Hub
|
|
718
|
+
provider_id: str # ID of the model on the provider's side
|
|
719
|
+
status: Literal["error", "live", "staging"]
|
|
714
720
|
task: str
|
|
715
721
|
|
|
716
722
|
adapter: Optional[str] = None
|
|
717
723
|
adapter_weights_path: Optional[str] = None
|
|
724
|
+
type: Optional[Literal["single-model", "tag-filter"]] = None
|
|
718
725
|
|
|
719
726
|
def __init__(self, **kwargs):
|
|
727
|
+
self.provider = kwargs.pop("provider")
|
|
720
728
|
self.hf_model_id = kwargs.pop("hf_model_id")
|
|
721
|
-
self.status = kwargs.pop("status")
|
|
722
729
|
self.provider_id = kwargs.pop("providerId")
|
|
730
|
+
self.status = kwargs.pop("status")
|
|
723
731
|
self.task = kwargs.pop("task")
|
|
732
|
+
|
|
724
733
|
self.adapter = kwargs.pop("adapter", None)
|
|
725
734
|
self.adapter_weights_path = kwargs.pop("adapterWeightsPath", None)
|
|
735
|
+
self.type = kwargs.pop("type", None)
|
|
726
736
|
self.__dict__.update(**kwargs)
|
|
727
737
|
|
|
728
738
|
|
|
@@ -764,12 +774,10 @@ class ModelInfo:
|
|
|
764
774
|
If so, whether there is manual or automatic approval.
|
|
765
775
|
gguf (`Dict`, *optional*):
|
|
766
776
|
GGUF information of the model.
|
|
767
|
-
inference (`Literal["
|
|
768
|
-
Status of the model on the
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
inference_provider_mapping (`Dict`, *optional*):
|
|
772
|
-
Model's inference provider mapping.
|
|
777
|
+
inference (`Literal["warm"]`, *optional*):
|
|
778
|
+
Status of the model on Inference Providers. Warm if the model is served by at least one provider.
|
|
779
|
+
inference_provider_mapping (`List[InferenceProviderMapping]`, *optional*):
|
|
780
|
+
A list of [`InferenceProviderMapping`] ordered after the user's provider order.
|
|
773
781
|
likes (`int`):
|
|
774
782
|
Number of likes of the model.
|
|
775
783
|
library_name (`str`, *optional*):
|
|
@@ -814,8 +822,8 @@ class ModelInfo:
|
|
|
814
822
|
downloads_all_time: Optional[int]
|
|
815
823
|
gated: Optional[Literal["auto", "manual", False]]
|
|
816
824
|
gguf: Optional[Dict]
|
|
817
|
-
inference: Optional[Literal["warm"
|
|
818
|
-
inference_provider_mapping: Optional[
|
|
825
|
+
inference: Optional[Literal["warm"]]
|
|
826
|
+
inference_provider_mapping: Optional[List[InferenceProviderMapping]]
|
|
819
827
|
likes: Optional[int]
|
|
820
828
|
library_name: Optional[str]
|
|
821
829
|
tags: Optional[List[str]]
|
|
@@ -851,14 +859,25 @@ class ModelInfo:
|
|
|
851
859
|
self.gguf = kwargs.pop("gguf", None)
|
|
852
860
|
|
|
853
861
|
self.inference = kwargs.pop("inference", None)
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
for
|
|
861
|
-
|
|
862
|
+
|
|
863
|
+
# little hack to simplify Inference Providers logic and make it backward and forward compatible
|
|
864
|
+
# right now, API returns a dict on model_info and a list on list_models. Let's harmonize to list.
|
|
865
|
+
mapping = kwargs.pop("inferenceProviderMapping", None)
|
|
866
|
+
if isinstance(mapping, list):
|
|
867
|
+
self.inference_provider_mapping = [
|
|
868
|
+
InferenceProviderMapping(**{**value, "hf_model_id": self.id}) for value in mapping
|
|
869
|
+
]
|
|
870
|
+
elif isinstance(mapping, dict):
|
|
871
|
+
self.inference_provider_mapping = [
|
|
872
|
+
InferenceProviderMapping(**{**value, "hf_model_id": self.id, "provider": provider})
|
|
873
|
+
for provider, value in mapping.items()
|
|
874
|
+
]
|
|
875
|
+
elif mapping is None:
|
|
876
|
+
self.inference_provider_mapping = None
|
|
877
|
+
else:
|
|
878
|
+
raise ValueError(
|
|
879
|
+
f"Unexpected type for `inferenceProviderMapping`. Expecting `dict` or `list`. Got {mapping}."
|
|
880
|
+
)
|
|
862
881
|
|
|
863
882
|
self.tags = kwargs.pop("tags", None)
|
|
864
883
|
self.pipeline_tag = kwargs.pop("pipeline_tag", None)
|
|
@@ -1169,16 +1188,16 @@ class SpaceInfo:
|
|
|
1169
1188
|
@dataclass
|
|
1170
1189
|
class CollectionItem:
|
|
1171
1190
|
"""
|
|
1172
|
-
Contains information about an item of a Collection (model, dataset, Space or
|
|
1191
|
+
Contains information about an item of a Collection (model, dataset, Space, paper or collection).
|
|
1173
1192
|
|
|
1174
1193
|
Attributes:
|
|
1175
1194
|
item_object_id (`str`):
|
|
1176
1195
|
Unique ID of the item in the collection.
|
|
1177
1196
|
item_id (`str`):
|
|
1178
|
-
ID of the underlying object on the Hub. Can be either a repo_id or a
|
|
1179
|
-
e.g. `"jbilcke-hf/ai-comic-factory"`, `"2307.09288"`.
|
|
1197
|
+
ID of the underlying object on the Hub. Can be either a repo_id, a paper id or a collection slug.
|
|
1198
|
+
e.g. `"jbilcke-hf/ai-comic-factory"`, `"2307.09288"`, `"celinah/cerebras-function-calling-682607169c35fbfa98b30b9a"`.
|
|
1180
1199
|
item_type (`str`):
|
|
1181
|
-
Type of the underlying object. Can be one of `"model"`, `"dataset"`, `"space"` or `"
|
|
1200
|
+
Type of the underlying object. Can be one of `"model"`, `"dataset"`, `"space"`, `"paper"` or `"collection"`.
|
|
1182
1201
|
position (`int`):
|
|
1183
1202
|
Position of the item in the collection.
|
|
1184
1203
|
note (`str`, *optional*):
|
|
@@ -1192,10 +1211,20 @@ class CollectionItem:
|
|
|
1192
1211
|
note: Optional[str] = None
|
|
1193
1212
|
|
|
1194
1213
|
def __init__(
|
|
1195
|
-
self,
|
|
1214
|
+
self,
|
|
1215
|
+
_id: str,
|
|
1216
|
+
id: str,
|
|
1217
|
+
type: CollectionItemType_T,
|
|
1218
|
+
position: int,
|
|
1219
|
+
note: Optional[Dict] = None,
|
|
1220
|
+
**kwargs,
|
|
1196
1221
|
) -> None:
|
|
1197
1222
|
self.item_object_id: str = _id # id in database
|
|
1198
1223
|
self.item_id: str = id # repo_id or paper id
|
|
1224
|
+
# if the item is a collection, override item_id with the slug
|
|
1225
|
+
slug = kwargs.get("slug")
|
|
1226
|
+
if slug is not None:
|
|
1227
|
+
self.item_id = slug # collection slug
|
|
1199
1228
|
self.item_type: CollectionItemType_T = type
|
|
1200
1229
|
self.position: int = position
|
|
1201
1230
|
self.note: str = note["text"] if note is not None else None
|
|
@@ -1825,7 +1854,8 @@ class HfApi:
|
|
|
1825
1854
|
filter: Union[str, Iterable[str], None] = None,
|
|
1826
1855
|
author: Optional[str] = None,
|
|
1827
1856
|
gated: Optional[bool] = None,
|
|
1828
|
-
inference: Optional[Literal["
|
|
1857
|
+
inference: Optional[Literal["warm"]] = None,
|
|
1858
|
+
inference_provider: Optional[Union[Literal["all"], "PROVIDER_T", List["PROVIDER_T"]]] = None,
|
|
1829
1859
|
library: Optional[Union[str, List[str]]] = None,
|
|
1830
1860
|
language: Optional[Union[str, List[str]]] = None,
|
|
1831
1861
|
model_name: Optional[str] = None,
|
|
@@ -1859,10 +1889,11 @@ class HfApi:
|
|
|
1859
1889
|
A boolean to filter models on the Hub that are gated or not. By default, all models are returned.
|
|
1860
1890
|
If `gated=True` is passed, only gated models are returned.
|
|
1861
1891
|
If `gated=False` is passed, only non-gated models are returned.
|
|
1862
|
-
inference (`Literal["
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1892
|
+
inference (`Literal["warm"]`, *optional*):
|
|
1893
|
+
If "warm", filter models on the Hub currently served by at least one provider.
|
|
1894
|
+
inference_provider (`Literal["all"]` or `str`, *optional*):
|
|
1895
|
+
A string to filter models on the Hub that are served by a specific provider.
|
|
1896
|
+
Pass `"all"` to get all models served by at least one provider.
|
|
1866
1897
|
library (`str` or `List`, *optional*):
|
|
1867
1898
|
A string or list of strings of foundational libraries models were
|
|
1868
1899
|
originally trained from, such as pytorch, tensorflow, or allennlp.
|
|
@@ -1922,7 +1953,7 @@ class HfApi:
|
|
|
1922
1953
|
Returns:
|
|
1923
1954
|
`Iterable[ModelInfo]`: an iterable of [`huggingface_hub.hf_api.ModelInfo`] objects.
|
|
1924
1955
|
|
|
1925
|
-
Example
|
|
1956
|
+
Example:
|
|
1926
1957
|
|
|
1927
1958
|
```python
|
|
1928
1959
|
>>> from huggingface_hub import HfApi
|
|
@@ -1932,24 +1963,19 @@ class HfApi:
|
|
|
1932
1963
|
# List all models
|
|
1933
1964
|
>>> api.list_models()
|
|
1934
1965
|
|
|
1935
|
-
# List
|
|
1966
|
+
# List text classification models
|
|
1936
1967
|
>>> api.list_models(filter="text-classification")
|
|
1937
1968
|
|
|
1938
|
-
# List
|
|
1939
|
-
>>> api.list_models(filter="
|
|
1940
|
-
```
|
|
1941
|
-
|
|
1942
|
-
Example usage with the `search` argument:
|
|
1969
|
+
# List models from the KerasHub library
|
|
1970
|
+
>>> api.list_models(filter="keras-hub")
|
|
1943
1971
|
|
|
1944
|
-
|
|
1945
|
-
>>>
|
|
1946
|
-
|
|
1947
|
-
>>> api = HfApi()
|
|
1972
|
+
# List models served by Cohere
|
|
1973
|
+
>>> api.list_models(inference_provider="cohere")
|
|
1948
1974
|
|
|
1949
|
-
# List
|
|
1975
|
+
# List models with "bert" in their name
|
|
1950
1976
|
>>> api.list_models(search="bert")
|
|
1951
1977
|
|
|
1952
|
-
# List
|
|
1978
|
+
# List models with "bert" in their name and pushed by google
|
|
1953
1979
|
>>> api.list_models(search="bert", author="google")
|
|
1954
1980
|
```
|
|
1955
1981
|
"""
|
|
@@ -1992,6 +2018,8 @@ class HfApi:
|
|
|
1992
2018
|
params["gated"] = gated
|
|
1993
2019
|
if inference is not None:
|
|
1994
2020
|
params["inference"] = inference
|
|
2021
|
+
if inference_provider is not None:
|
|
2022
|
+
params["inference_provider"] = inference_provider
|
|
1995
2023
|
if pipeline_tag:
|
|
1996
2024
|
params["pipeline_tag"] = pipeline_tag
|
|
1997
2025
|
search_list = []
|
|
@@ -4482,7 +4510,7 @@ class HfApi:
|
|
|
4482
4510
|
isinstance(addition.path_or_fileobj, io.BufferedIOBase) for addition in new_lfs_additions_to_upload
|
|
4483
4511
|
)
|
|
4484
4512
|
if xet_enabled and not has_buffered_io_data and is_xet_available():
|
|
4485
|
-
logger.
|
|
4513
|
+
logger.debug("Uploading files using Xet Storage..")
|
|
4486
4514
|
_upload_xet_files(**upload_kwargs, create_pr=create_pr) # type: ignore [arg-type]
|
|
4487
4515
|
else:
|
|
4488
4516
|
if xet_enabled and is_xet_available():
|
|
@@ -5523,7 +5551,7 @@ class HfApi:
|
|
|
5523
5551
|
allow_patterns: Optional[Union[List[str], str]] = None,
|
|
5524
5552
|
ignore_patterns: Optional[Union[List[str], str]] = None,
|
|
5525
5553
|
max_workers: int = 8,
|
|
5526
|
-
tqdm_class: Optional[base_tqdm] = None,
|
|
5554
|
+
tqdm_class: Optional[Type[base_tqdm]] = None,
|
|
5527
5555
|
# Deprecated args
|
|
5528
5556
|
local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto",
|
|
5529
5557
|
resume_download: Optional[bool] = None,
|
|
@@ -134,7 +134,7 @@ class InferenceClient:
|
|
|
134
134
|
path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api)
|
|
135
135
|
documentation for details). When passing a URL as `model`, the client will not append any suffix path to it.
|
|
136
136
|
provider (`str`, *optional*):
|
|
137
|
-
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"fireworks-ai"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
|
|
137
|
+
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
|
|
138
138
|
Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
139
139
|
If model is a URL or `base_url` is passed, then `provider` is not used.
|
|
140
140
|
token (`str`, *optional*):
|
|
@@ -1685,9 +1685,8 @@ class InferenceClient:
|
|
|
1685
1685
|
model_id = model or self.model
|
|
1686
1686
|
provider_helper = get_provider_helper(self.provider, task="table-question-answering", model=model_id)
|
|
1687
1687
|
request_parameters = provider_helper.prepare_request(
|
|
1688
|
-
inputs=
|
|
1688
|
+
inputs={"query": query, "table": table},
|
|
1689
1689
|
parameters={"model": model, "padding": padding, "sequential": sequential, "truncation": truncation},
|
|
1690
|
-
extra_payload={"query": query, "table": table},
|
|
1691
1690
|
headers=self.headers,
|
|
1692
1691
|
model=model_id,
|
|
1693
1692
|
api_key=self.token,
|
|
@@ -3196,7 +3195,7 @@ class InferenceClient:
|
|
|
3196
3195
|
return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response)
|
|
3197
3196
|
|
|
3198
3197
|
@_deprecate_method(
|
|
3199
|
-
version="0.
|
|
3198
|
+
version="0.35.0",
|
|
3200
3199
|
message=(
|
|
3201
3200
|
"HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
|
|
3202
3201
|
" Use `HfApi.list_models(..., inference_provider='...')` to list warm models per provider."
|
|
@@ -3386,7 +3385,7 @@ class InferenceClient:
|
|
|
3386
3385
|
return response.status_code == 200
|
|
3387
3386
|
|
|
3388
3387
|
@_deprecate_method(
|
|
3389
|
-
version="0.
|
|
3388
|
+
version="0.35.0",
|
|
3390
3389
|
message=(
|
|
3391
3390
|
"HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
|
|
3392
3391
|
" Use `HfApi.model_info` to get the model status both with HF Inference API and external providers."
|
|
@@ -122,7 +122,7 @@ class AsyncInferenceClient:
|
|
|
122
122
|
path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api)
|
|
123
123
|
documentation for details). When passing a URL as `model`, the client will not append any suffix path to it.
|
|
124
124
|
provider (`str`, *optional*):
|
|
125
|
-
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"fireworks-ai"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
|
|
125
|
+
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
|
|
126
126
|
Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
127
127
|
If model is a URL or `base_url` is passed, then `provider` is not used.
|
|
128
128
|
token (`str`, *optional*):
|
|
@@ -1737,9 +1737,8 @@ class AsyncInferenceClient:
|
|
|
1737
1737
|
model_id = model or self.model
|
|
1738
1738
|
provider_helper = get_provider_helper(self.provider, task="table-question-answering", model=model_id)
|
|
1739
1739
|
request_parameters = provider_helper.prepare_request(
|
|
1740
|
-
inputs=
|
|
1740
|
+
inputs={"query": query, "table": table},
|
|
1741
1741
|
parameters={"model": model, "padding": padding, "sequential": sequential, "truncation": truncation},
|
|
1742
|
-
extra_payload={"query": query, "table": table},
|
|
1743
1742
|
headers=self.headers,
|
|
1744
1743
|
model=model_id,
|
|
1745
1744
|
api_key=self.token,
|
|
@@ -3260,7 +3259,7 @@ class AsyncInferenceClient:
|
|
|
3260
3259
|
return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response)
|
|
3261
3260
|
|
|
3262
3261
|
@_deprecate_method(
|
|
3263
|
-
version="0.
|
|
3262
|
+
version="0.35.0",
|
|
3264
3263
|
message=(
|
|
3265
3264
|
"HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
|
|
3266
3265
|
" Use `HfApi.list_models(..., inference_provider='...')` to list warm models per provider."
|
|
@@ -3496,7 +3495,7 @@ class AsyncInferenceClient:
|
|
|
3496
3495
|
return response.status == 200
|
|
3497
3496
|
|
|
3498
3497
|
@_deprecate_method(
|
|
3499
|
-
version="0.
|
|
3498
|
+
version="0.35.0",
|
|
3500
3499
|
message=(
|
|
3501
3500
|
"HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
|
|
3502
3501
|
" Use `HfApi.model_info` to get the model status both with HF Inference API and external providers."
|
|
@@ -310,7 +310,19 @@ class MCPClient:
|
|
|
310
310
|
# Process tool calls one by one
|
|
311
311
|
for tool_call in final_tool_calls.values():
|
|
312
312
|
function_name = tool_call.function.name
|
|
313
|
-
|
|
313
|
+
try:
|
|
314
|
+
function_args = json.loads(tool_call.function.arguments or "{}")
|
|
315
|
+
except json.JSONDecodeError as err:
|
|
316
|
+
tool_message = {
|
|
317
|
+
"role": "tool",
|
|
318
|
+
"tool_call_id": tool_call.id,
|
|
319
|
+
"name": function_name,
|
|
320
|
+
"content": f"Invalid JSON generated by the model: {err}",
|
|
321
|
+
}
|
|
322
|
+
tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
|
|
323
|
+
messages.append(tool_message_as_obj)
|
|
324
|
+
yield tool_message_as_obj
|
|
325
|
+
continue # move to next tool call
|
|
314
326
|
|
|
315
327
|
tool_message = {"role": "tool", "tool_call_id": tool_call.id, "content": "", "name": function_name}
|
|
316
328
|
|
|
@@ -324,11 +336,13 @@ class MCPClient:
|
|
|
324
336
|
# Execute tool call with the appropriate session
|
|
325
337
|
session = self.sessions.get(function_name)
|
|
326
338
|
if session is not None:
|
|
327
|
-
|
|
328
|
-
|
|
339
|
+
try:
|
|
340
|
+
result = await session.call_tool(function_name, function_args)
|
|
341
|
+
tool_message["content"] = format_result(result)
|
|
342
|
+
except Exception as err:
|
|
343
|
+
tool_message["content"] = f"Error: MCP tool call failed with error message: {err}"
|
|
329
344
|
else:
|
|
330
|
-
|
|
331
|
-
tool_message["content"] = error_msg
|
|
345
|
+
tool_message["content"] = f"Error: No session found for tool: {function_name}"
|
|
332
346
|
|
|
333
347
|
# Yield tool message
|
|
334
348
|
tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
|
|
@@ -1,5 +1,9 @@
|
|
|
1
1
|
from typing import Dict, Literal, Optional, Union
|
|
2
2
|
|
|
3
|
+
from huggingface_hub.inference._providers.featherless_ai import (
|
|
4
|
+
FeatherlessConversationalTask,
|
|
5
|
+
FeatherlessTextGenerationTask,
|
|
6
|
+
)
|
|
3
7
|
from huggingface_hub.utils import logging
|
|
4
8
|
|
|
5
9
|
from ._common import TaskProviderHelper, _fetch_inference_provider_mapping
|
|
@@ -13,6 +17,7 @@ from .fal_ai import (
|
|
|
13
17
|
FalAITextToVideoTask,
|
|
14
18
|
)
|
|
15
19
|
from .fireworks_ai import FireworksAIConversationalTask
|
|
20
|
+
from .groq import GroqConversationalTask
|
|
16
21
|
from .hf_inference import (
|
|
17
22
|
HFInferenceBinaryInputTask,
|
|
18
23
|
HFInferenceConversational,
|
|
@@ -42,7 +47,9 @@ PROVIDER_T = Literal[
|
|
|
42
47
|
"cerebras",
|
|
43
48
|
"cohere",
|
|
44
49
|
"fal-ai",
|
|
50
|
+
"featherless-ai",
|
|
45
51
|
"fireworks-ai",
|
|
52
|
+
"groq",
|
|
46
53
|
"hf-inference",
|
|
47
54
|
"hyperbolic",
|
|
48
55
|
"nebius",
|
|
@@ -72,9 +79,16 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
|
|
|
72
79
|
"text-to-speech": FalAITextToSpeechTask(),
|
|
73
80
|
"text-to-video": FalAITextToVideoTask(),
|
|
74
81
|
},
|
|
82
|
+
"featherless-ai": {
|
|
83
|
+
"conversational": FeatherlessConversationalTask(),
|
|
84
|
+
"text-generation": FeatherlessTextGenerationTask(),
|
|
85
|
+
},
|
|
75
86
|
"fireworks-ai": {
|
|
76
87
|
"conversational": FireworksAIConversationalTask(),
|
|
77
88
|
},
|
|
89
|
+
"groq": {
|
|
90
|
+
"conversational": GroqConversationalTask(),
|
|
91
|
+
},
|
|
78
92
|
"hf-inference": {
|
|
79
93
|
"text-to-image": HFInferenceTask("text-to-image"),
|
|
80
94
|
"conversational": HFInferenceConversational(),
|
|
@@ -174,7 +188,7 @@ def get_provider_helper(
|
|
|
174
188
|
if model is None:
|
|
175
189
|
raise ValueError("Specifying a model is required when provider is 'auto'")
|
|
176
190
|
provider_mapping = _fetch_inference_provider_mapping(model)
|
|
177
|
-
provider = next(iter(provider_mapping))
|
|
191
|
+
provider = next(iter(provider_mapping)).provider
|
|
178
192
|
|
|
179
193
|
provider_tasks = PROVIDERS.get(provider) # type: ignore
|
|
180
194
|
if provider_tasks is None:
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from functools import lru_cache
|
|
2
|
-
from typing import Any, Dict, Optional, Union
|
|
2
|
+
from typing import Any, Dict, List, Optional, Union
|
|
3
3
|
|
|
4
4
|
from huggingface_hub import constants
|
|
5
5
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
@@ -9,6 +9,7 @@ from huggingface_hub.utils import build_hf_headers, get_token, logging
|
|
|
9
9
|
|
|
10
10
|
logger = logging.get_logger(__name__)
|
|
11
11
|
|
|
12
|
+
|
|
12
13
|
# Dev purposes only.
|
|
13
14
|
# If you want to try to run inference for a new model locally before it's registered on huggingface.co
|
|
14
15
|
# for a given Inference Provider, you can add it to the following dictionary.
|
|
@@ -24,6 +25,7 @@ HARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]
|
|
|
24
25
|
"cohere": {},
|
|
25
26
|
"fal-ai": {},
|
|
26
27
|
"fireworks-ai": {},
|
|
28
|
+
"groq": {},
|
|
27
29
|
"hf-inference": {},
|
|
28
30
|
"hyperbolic": {},
|
|
29
31
|
"nebius": {},
|
|
@@ -124,7 +126,12 @@ class TaskProviderHelper:
|
|
|
124
126
|
if HARDCODED_MODEL_INFERENCE_MAPPING.get(self.provider, {}).get(model):
|
|
125
127
|
return HARDCODED_MODEL_INFERENCE_MAPPING[self.provider][model]
|
|
126
128
|
|
|
127
|
-
provider_mapping =
|
|
129
|
+
provider_mapping = None
|
|
130
|
+
for mapping in _fetch_inference_provider_mapping(model):
|
|
131
|
+
if mapping.provider == self.provider:
|
|
132
|
+
provider_mapping = mapping
|
|
133
|
+
break
|
|
134
|
+
|
|
128
135
|
if provider_mapping is None:
|
|
129
136
|
raise ValueError(f"Model {model} is not supported by provider {self.provider}.")
|
|
130
137
|
|
|
@@ -138,6 +145,11 @@ class TaskProviderHelper:
|
|
|
138
145
|
logger.warning(
|
|
139
146
|
f"Model {model} is in staging mode for provider {self.provider}. Meant for test purposes only."
|
|
140
147
|
)
|
|
148
|
+
if provider_mapping.status == "error":
|
|
149
|
+
logger.warning(
|
|
150
|
+
f"Our latest automated health check on model '{model}' for provider '{self.provider}' did not complete successfully. "
|
|
151
|
+
"Inference call might fail."
|
|
152
|
+
)
|
|
141
153
|
return provider_mapping
|
|
142
154
|
|
|
143
155
|
def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:
|
|
@@ -236,7 +248,7 @@ class BaseTextGenerationTask(TaskProviderHelper):
|
|
|
236
248
|
|
|
237
249
|
|
|
238
250
|
@lru_cache(maxsize=None)
|
|
239
|
-
def _fetch_inference_provider_mapping(model: str) ->
|
|
251
|
+
def _fetch_inference_provider_mapping(model: str) -> List["InferenceProviderMapping"]:
|
|
240
252
|
"""
|
|
241
253
|
Fetch provider mappings for a model from the Hub.
|
|
242
254
|
"""
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Union
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
5
|
+
|
|
6
|
+
from ._common import BaseConversationalTask, BaseTextGenerationTask, filter_none
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
_PROVIDER = "featherless-ai"
|
|
10
|
+
_BASE_URL = "https://api.featherless.ai"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class FeatherlessTextGenerationTask(BaseTextGenerationTask):
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
16
|
+
|
|
17
|
+
def _prepare_payload_as_dict(
|
|
18
|
+
self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
|
|
19
|
+
) -> Optional[Dict]:
|
|
20
|
+
params = filter_none(parameters.copy())
|
|
21
|
+
params["max_tokens"] = params.pop("max_new_tokens", None)
|
|
22
|
+
|
|
23
|
+
return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id}
|
|
24
|
+
|
|
25
|
+
def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
26
|
+
output = _as_dict(response)["choices"][0]
|
|
27
|
+
return {
|
|
28
|
+
"generated_text": output["text"],
|
|
29
|
+
"details": {
|
|
30
|
+
"finish_reason": output.get("finish_reason"),
|
|
31
|
+
"seed": output.get("seed"),
|
|
32
|
+
},
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class FeatherlessConversationalTask(BaseConversationalTask):
|
|
37
|
+
def __init__(self):
|
|
38
|
+
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from ._common import BaseConversationalTask
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class GroqConversationalTask(BaseConversationalTask):
|
|
5
|
+
def __init__(self):
|
|
6
|
+
super().__init__(provider="groq", base_url="https://api.groq.com")
|
|
7
|
+
|
|
8
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
9
|
+
return "/openai/v1/chat/completions"
|
|
@@ -26,7 +26,9 @@ class HFInferenceTask(TaskProviderHelper):
|
|
|
26
26
|
|
|
27
27
|
def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:
|
|
28
28
|
if model is not None and model.startswith(("http://", "https://")):
|
|
29
|
-
return InferenceProviderMapping(
|
|
29
|
+
return InferenceProviderMapping(
|
|
30
|
+
provider="hf-inference", providerId=model, hf_model_id=model, task=self.task, status="live"
|
|
31
|
+
)
|
|
30
32
|
model_id = model if model is not None else _fetch_recommended_models().get(self.task)
|
|
31
33
|
if model_id is None:
|
|
32
34
|
raise ValueError(
|
|
@@ -34,7 +36,9 @@ class HFInferenceTask(TaskProviderHelper):
|
|
|
34
36
|
" explicitly. Visit https://huggingface.co/tasks for more info."
|
|
35
37
|
)
|
|
36
38
|
_check_supported_task(model_id, self.task)
|
|
37
|
-
return InferenceProviderMapping(
|
|
39
|
+
return InferenceProviderMapping(
|
|
40
|
+
provider="hf-inference", providerId=model_id, hf_model_id=model_id, task=self.task, status="live"
|
|
41
|
+
)
|
|
38
42
|
|
|
39
43
|
def _prepare_url(self, api_key: str, mapped_model: str) -> str:
|
|
40
44
|
# hf-inference provider can handle URLs (e.g. Inference Endpoints or TGI deployment)
|
|
@@ -20,4 +20,6 @@ class OpenAIConversationalTask(BaseConversationalTask):
|
|
|
20
20
|
def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:
|
|
21
21
|
if model is None:
|
|
22
22
|
raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.")
|
|
23
|
-
return InferenceProviderMapping(
|
|
23
|
+
return InferenceProviderMapping(
|
|
24
|
+
provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model
|
|
25
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: huggingface-hub
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.33.0
|
|
4
4
|
Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
|
|
5
5
|
Home-page: https://github.com/huggingface/huggingface_hub
|
|
6
6
|
Author: Hugging Face, Inc.
|
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
huggingface_hub/__init__.py,sha256=
|
|
1
|
+
huggingface_hub/__init__.py,sha256=jv6NeJ3utSer-d4lltLZJ1kWnseFXgt7KBH-BnKY2K8,50644
|
|
2
2
|
huggingface_hub/_commit_api.py,sha256=ZbmuIhFdF8B3F_cvGtxorka7MmIQOk8oBkCtYltnCvI,39456
|
|
3
3
|
huggingface_hub/_commit_scheduler.py,sha256=tfIoO1xWHjTJ6qy6VS6HIoymDycFPg0d6pBSZprrU2U,14679
|
|
4
4
|
huggingface_hub/_inference_endpoints.py,sha256=qXR0utAYRaEWTI8EXzAsDpVDcYpp8bJPEBbcOxRS52E,17413
|
|
5
|
-
huggingface_hub/_local_folder.py,sha256=
|
|
5
|
+
huggingface_hub/_local_folder.py,sha256=9NkNGsyEfTtopfhXbicS2TFIcm9lAzLFqItzYy2h0D4,16915
|
|
6
6
|
huggingface_hub/_login.py,sha256=ssf4viT5BhHI2ZidnSuAZcrwSxzaLOrf8xgRVKuvu_A,20298
|
|
7
7
|
huggingface_hub/_oauth.py,sha256=YNbSSZCNZLiCqwMoYboSAfI3XjEsbyAADJcwgRAdhBc,18802
|
|
8
|
-
huggingface_hub/_snapshot_download.py,sha256=
|
|
8
|
+
huggingface_hub/_snapshot_download.py,sha256=6XR6z_BWVP484pUX6hzX8JgsqIrKMFGDBqT97qArPS4,16090
|
|
9
9
|
huggingface_hub/_space_api.py,sha256=jb6rF8qLtjaNU12D-8ygAPM26xDiHCu8CHXHowhGTmg,5470
|
|
10
10
|
huggingface_hub/_tensorboard_logger.py,sha256=ZkYcAUiRC8RGL214QUYtp58O8G5tn-HF6DCWha9imcA,8358
|
|
11
11
|
huggingface_hub/_upload_large_folder.py,sha256=elY5Rv2YVJECVpdZ9PM1zdO8kG-jmi8DifLOa7aC3EU,24178
|
|
@@ -16,8 +16,8 @@ huggingface_hub/constants.py,sha256=1RdXbeORR-21auyKLsLbOJDIC9Cd70tYEAVWzP64BJc,
|
|
|
16
16
|
huggingface_hub/dataclasses.py,sha256=sgPdEi2UDprhNPP2PPkiSlzsHdC1WcpwVTLwlHAEcr0,17224
|
|
17
17
|
huggingface_hub/errors.py,sha256=D7Lw0Jjrf8vfmD0B26LEvg-JWkU8Zq0KDPJOzFY4QLw,11201
|
|
18
18
|
huggingface_hub/fastai_utils.py,sha256=DpeH9d-6ut2k_nCAAwglM51XmRmgfbRe2SPifpVL5Yk,16745
|
|
19
|
-
huggingface_hub/file_download.py,sha256=
|
|
20
|
-
huggingface_hub/hf_api.py,sha256=
|
|
19
|
+
huggingface_hub/file_download.py,sha256=qXPRmGRTv1qAA_QwU7CHYusFGCME32ox1yQ6X62_5O8,78542
|
|
20
|
+
huggingface_hub/hf_api.py,sha256=LSteoR6ndvbkS1GortgAlfIZh2HNTZBOhiKUJ4pwgdY,444347
|
|
21
21
|
huggingface_hub/hf_file_system.py,sha256=U6IY_QLNzZfvpsbvKEiakOBS2U6cduZw5t0x8wBPUn4,47531
|
|
22
22
|
huggingface_hub/hub_mixin.py,sha256=LpbggOPIlr7L2QVi3DOfWsGYsde9OMlwxT5LZfcSdSQ,38115
|
|
23
23
|
huggingface_hub/inference_api.py,sha256=b4-NhPSn9b44nYKV8tDKXodmE4JVdEymMWL4CVGkzlE,8323
|
|
@@ -43,10 +43,10 @@ huggingface_hub/commands/upload_large_folder.py,sha256=P-EO44JWVl39Ax4b0E0Z873d0
|
|
|
43
43
|
huggingface_hub/commands/user.py,sha256=_4rjCrP84KqtqCMn-r3YWLuGLrnklOWTdJFVTNFMLuU,7096
|
|
44
44
|
huggingface_hub/commands/version.py,sha256=vfCJn7GO1m-DtDmbdsty8_RTVtnZ7lX6MJsx0Bf4e-s,1266
|
|
45
45
|
huggingface_hub/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
46
|
-
huggingface_hub/inference/_client.py,sha256=
|
|
46
|
+
huggingface_hub/inference/_client.py,sha256=HQgPsM6LSYXULCzq6z-S0utdijbswhwcwV2kuq4YxkE,161536
|
|
47
47
|
huggingface_hub/inference/_common.py,sha256=iwCkq2fWE1MVoPTeeXN7UN5FZi7g5fZ3K8PHSOCi5dU,14591
|
|
48
48
|
huggingface_hub/inference/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
49
|
-
huggingface_hub/inference/_generated/_async_client.py,sha256=
|
|
49
|
+
huggingface_hub/inference/_generated/_async_client.py,sha256=D3Kv_EId7tMoU6ED41_2SHLrw2jlJIW2q0vHx0SxTpk,167696
|
|
50
50
|
huggingface_hub/inference/_generated/types/__init__.py,sha256=qI8Eu9WcBcKhVkLli6YniGHpfiJ9MLqtzmwXX35E7bA,6443
|
|
51
51
|
huggingface_hub/inference/_generated/types/audio_classification.py,sha256=Jg3mzfGhCSH6CfvVvgJSiFpkz6v4nNA0G4LJXacEgNc,1573
|
|
52
52
|
huggingface_hub/inference/_generated/types/audio_to_audio.py,sha256=2Ep4WkePL7oJwcp5nRJqApwviumGHbft9HhXE9XLHj4,891
|
|
@@ -85,22 +85,24 @@ huggingface_hub/inference/_mcp/_cli_hacks.py,sha256=cMZirVFe4N0EM9Nzzs9aEmzUBUEB
|
|
|
85
85
|
huggingface_hub/inference/_mcp/agent.py,sha256=azX9_lsFjNlgsEvRYdKgsmOmpNReWIcbuMeIVWc852k,4264
|
|
86
86
|
huggingface_hub/inference/_mcp/cli.py,sha256=9IKItC1XJ4yzQAKP1iZwpYL1BA56bem2AQlKlB0SGdc,9251
|
|
87
87
|
huggingface_hub/inference/_mcp/constants.py,sha256=tE_V6qcvsmvVoJa4eg04jhoTR2Cx1cNHieY2ENrm1_M,2511
|
|
88
|
-
huggingface_hub/inference/_mcp/mcp_client.py,sha256=
|
|
88
|
+
huggingface_hub/inference/_mcp/mcp_client.py,sha256=qefnsJOv2B7YS9k68pLRrsEZXF_N09yjuP_5S_kBv68,14839
|
|
89
89
|
huggingface_hub/inference/_mcp/types.py,sha256=JPK7rC9j-abot8pN3xw1UbSv9S2OBSRStjl_cidWs1Q,1247
|
|
90
90
|
huggingface_hub/inference/_mcp/utils.py,sha256=VsRWl0fuSZDS0zNT9n7FOMSlzA0UBbP8p8xWKWDt2Pc,4093
|
|
91
|
-
huggingface_hub/inference/_providers/__init__.py,sha256=
|
|
92
|
-
huggingface_hub/inference/_providers/_common.py,sha256=
|
|
91
|
+
huggingface_hub/inference/_providers/__init__.py,sha256=rOaUL8zXKazYMgnPMDxEN7Y3nZwaKsA0gkILLWN1HLg,8116
|
|
92
|
+
huggingface_hub/inference/_providers/_common.py,sha256=V4oDtWGeihEOSBbcn1zWvYmOAyG7ZVZvvAUVE_RFA2Q,10578
|
|
93
93
|
huggingface_hub/inference/_providers/black_forest_labs.py,sha256=wO7qgRyNyrIKlZtvL3vJEbS4-D19kfoXZk6PDh1dTis,2842
|
|
94
94
|
huggingface_hub/inference/_providers/cerebras.py,sha256=QOJ-1U-os7uE7p6eUnn_P_APq-yQhx28be7c3Tq2EuA,210
|
|
95
95
|
huggingface_hub/inference/_providers/cohere.py,sha256=O3tC-qIUL91mx_mE8bOHCtDWcQuKOUauhUoXSUBUCZ8,1253
|
|
96
96
|
huggingface_hub/inference/_providers/fal_ai.py,sha256=gGWPsvQIsuk3kTIXHwpOqA0R1ZsPEo5MYc7OwUoFjxY,7162
|
|
97
|
+
huggingface_hub/inference/_providers/featherless_ai.py,sha256=QxBz-32O4PztxixrIjrfKuTOzvfqyUi-cVsw0Hf_zlY,1382
|
|
97
98
|
huggingface_hub/inference/_providers/fireworks_ai.py,sha256=Id226ITfPkOcFMFzly3MW9l-dZl9l4qizL4JEHWkBFk,1215
|
|
98
|
-
huggingface_hub/inference/_providers/
|
|
99
|
+
huggingface_hub/inference/_providers/groq.py,sha256=JTk2JV4ZOlaohho7zLAFQtk92kGVsPmLJ1hmzcwsqvQ,315
|
|
100
|
+
huggingface_hub/inference/_providers/hf_inference.py,sha256=LRQ_FzmDOCOY0NWIbDHM87DnyE1aVFZuxJleAiAnSvM,8508
|
|
99
101
|
huggingface_hub/inference/_providers/hyperbolic.py,sha256=OQIBi2j3aNvuaSQ8BUK1K1PVeRXdrxc80G-6YmBa-ns,1985
|
|
100
102
|
huggingface_hub/inference/_providers/nebius.py,sha256=VJpTF2JZ58rznc9wxdk-57vwF8sV2vESw_WkXjXqCho,3580
|
|
101
103
|
huggingface_hub/inference/_providers/novita.py,sha256=HGVC8wPraRQUuI5uBoye1Y4Wqe4X116B71GhhbWy5yM,2514
|
|
102
104
|
huggingface_hub/inference/_providers/nscale.py,sha256=qWUsWinQmUbNUqehyKn34tVoWehu8gd-OZ2F4uj2SWM,1802
|
|
103
|
-
huggingface_hub/inference/_providers/openai.py,sha256=
|
|
105
|
+
huggingface_hub/inference/_providers/openai.py,sha256=GCVYeNdjWIgpQQ7E_Xv8IebmdhTi0S6WfFosz3nLtps,1089
|
|
104
106
|
huggingface_hub/inference/_providers/replicate.py,sha256=zFQnnAaNmRruqTvZUG_8It8xkKePHLGKRomSkwjrUuk,3157
|
|
105
107
|
huggingface_hub/inference/_providers/sambanova.py,sha256=Unt3H3jr_kgI9vzRjmmW1DFyoEuPkKCcgIIloiOj3j8,2037
|
|
106
108
|
huggingface_hub/inference/_providers/together.py,sha256=KHF19CS3qXS7G1-CwcMiD8Z5wzPKEKi4F2DzqAthbBE,3439
|
|
@@ -139,9 +141,9 @@ huggingface_hub/utils/insecure_hashlib.py,sha256=iAaepavFZ5Dhfa5n8KozRfQprKmvcjS
|
|
|
139
141
|
huggingface_hub/utils/logging.py,sha256=0A8fF1yh3L9Ka_bCDX2ml4U5Ht0tY8Dr3JcbRvWFuwo,4909
|
|
140
142
|
huggingface_hub/utils/sha.py,sha256=OFnNGCba0sNcT2gUwaVCJnldxlltrHHe0DS_PCpV3C4,2134
|
|
141
143
|
huggingface_hub/utils/tqdm.py,sha256=xAKcyfnNHsZ7L09WuEM5Ew5-MDhiahLACbbN2zMmcLs,10671
|
|
142
|
-
huggingface_hub-0.
|
|
143
|
-
huggingface_hub-0.
|
|
144
|
-
huggingface_hub-0.
|
|
145
|
-
huggingface_hub-0.
|
|
146
|
-
huggingface_hub-0.
|
|
147
|
-
huggingface_hub-0.
|
|
144
|
+
huggingface_hub-0.33.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
145
|
+
huggingface_hub-0.33.0.dist-info/METADATA,sha256=W36QzG9PlzKu3fSJWL_qWux7SIG-kb0OECNBHNiSWsE,14777
|
|
146
|
+
huggingface_hub-0.33.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
|
147
|
+
huggingface_hub-0.33.0.dist-info/entry_points.txt,sha256=uelw0-fu0kd-CxIuOsR1bsjLIFnAaMQ6AIqluJYDhQw,184
|
|
148
|
+
huggingface_hub-0.33.0.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
|
|
149
|
+
huggingface_hub-0.33.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|