huggingface-hub 0.28.0rc5__py3-none-any.whl → 0.29.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (63) hide show
  1. huggingface_hub/__init__.py +1 -4
  2. huggingface_hub/constants.py +16 -11
  3. huggingface_hub/file_download.py +10 -6
  4. huggingface_hub/hf_api.py +53 -23
  5. huggingface_hub/inference/_client.py +151 -84
  6. huggingface_hub/inference/_common.py +3 -27
  7. huggingface_hub/inference/_generated/_async_client.py +147 -83
  8. huggingface_hub/inference/_generated/types/__init__.py +1 -1
  9. huggingface_hub/inference/_generated/types/audio_classification.py +4 -5
  10. huggingface_hub/inference/_generated/types/audio_to_audio.py +3 -4
  11. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +7 -8
  12. huggingface_hub/inference/_generated/types/base.py +21 -0
  13. huggingface_hub/inference/_generated/types/chat_completion.py +29 -30
  14. huggingface_hub/inference/_generated/types/depth_estimation.py +3 -4
  15. huggingface_hub/inference/_generated/types/document_question_answering.py +5 -6
  16. huggingface_hub/inference/_generated/types/feature_extraction.py +5 -6
  17. huggingface_hub/inference/_generated/types/fill_mask.py +4 -5
  18. huggingface_hub/inference/_generated/types/image_classification.py +4 -5
  19. huggingface_hub/inference/_generated/types/image_segmentation.py +4 -5
  20. huggingface_hub/inference/_generated/types/image_to_image.py +5 -6
  21. huggingface_hub/inference/_generated/types/image_to_text.py +5 -6
  22. huggingface_hub/inference/_generated/types/object_detection.py +5 -6
  23. huggingface_hub/inference/_generated/types/question_answering.py +5 -6
  24. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -4
  25. huggingface_hub/inference/_generated/types/summarization.py +4 -5
  26. huggingface_hub/inference/_generated/types/table_question_answering.py +5 -6
  27. huggingface_hub/inference/_generated/types/text2text_generation.py +4 -5
  28. huggingface_hub/inference/_generated/types/text_classification.py +4 -5
  29. huggingface_hub/inference/_generated/types/text_generation.py +12 -13
  30. huggingface_hub/inference/_generated/types/text_to_audio.py +5 -6
  31. huggingface_hub/inference/_generated/types/text_to_image.py +8 -15
  32. huggingface_hub/inference/_generated/types/text_to_speech.py +5 -6
  33. huggingface_hub/inference/_generated/types/text_to_video.py +4 -5
  34. huggingface_hub/inference/_generated/types/token_classification.py +4 -5
  35. huggingface_hub/inference/_generated/types/translation.py +4 -5
  36. huggingface_hub/inference/_generated/types/video_classification.py +4 -5
  37. huggingface_hub/inference/_generated/types/visual_question_answering.py +5 -6
  38. huggingface_hub/inference/_generated/types/zero_shot_classification.py +4 -5
  39. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +4 -5
  40. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +5 -6
  41. huggingface_hub/inference/_providers/__init__.py +44 -8
  42. huggingface_hub/inference/_providers/_common.py +239 -0
  43. huggingface_hub/inference/_providers/black_forest_labs.py +66 -0
  44. huggingface_hub/inference/_providers/fal_ai.py +31 -100
  45. huggingface_hub/inference/_providers/fireworks_ai.py +6 -0
  46. huggingface_hub/inference/_providers/hf_inference.py +58 -142
  47. huggingface_hub/inference/_providers/hyperbolic.py +43 -0
  48. huggingface_hub/inference/_providers/nebius.py +41 -0
  49. huggingface_hub/inference/_providers/novita.py +26 -0
  50. huggingface_hub/inference/_providers/replicate.py +24 -119
  51. huggingface_hub/inference/_providers/sambanova.py +3 -86
  52. huggingface_hub/inference/_providers/together.py +36 -130
  53. huggingface_hub/utils/_headers.py +5 -0
  54. huggingface_hub/utils/_hf_folder.py +4 -32
  55. huggingface_hub/utils/_http.py +85 -2
  56. huggingface_hub/utils/_typing.py +1 -1
  57. huggingface_hub/utils/logging.py +6 -0
  58. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/METADATA +1 -1
  59. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/RECORD +63 -57
  60. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/LICENSE +0 -0
  61. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/WHEEL +0 -0
  62. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/entry_points.txt +0 -0
  63. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/top_level.txt +0 -0
@@ -1,108 +1,31 @@
1
1
  import base64
2
- from abc import ABC, abstractmethod
2
+ from abc import ABC
3
3
  from typing import Any, Dict, Optional, Union
4
4
 
5
- from huggingface_hub import constants
6
- from huggingface_hub.inference._common import RequestParameters, TaskProviderHelper, _as_dict
7
- from huggingface_hub.utils import build_hf_headers, get_session, get_token, logging
8
-
9
-
10
- logger = logging.get_logger(__name__)
11
-
12
-
13
- BASE_URL = "https://fal.run"
14
-
15
- SUPPORTED_MODELS = {
16
- "automatic-speech-recognition": {
17
- "openai/whisper-large-v3": "fal-ai/whisper",
18
- },
19
- "text-to-image": {
20
- "black-forest-labs/FLUX.1-schnell": "fal-ai/flux/schnell",
21
- "black-forest-labs/FLUX.1-dev": "fal-ai/flux/dev",
22
- "playgroundai/playground-v2.5-1024px-aesthetic": "fal-ai/playground-v25",
23
- "ByteDance/SDXL-Lightning": "fal-ai/lightning-models",
24
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "fal-ai/pixart-sigma",
25
- "stabilityai/stable-diffusion-3-medium": "fal-ai/stable-diffusion-v3-medium",
26
- "Warlord-K/Sana-1024": "fal-ai/sana",
27
- "fal/AuraFlow-v0.2": "fal-ai/aura-flow",
28
- "stabilityai/stable-diffusion-3.5-large": "fal-ai/stable-diffusion-v35-large",
29
- "Kwai-Kolors/Kolors": "fal-ai/kolors",
30
- },
31
- "text-to-video": {
32
- "genmo/mochi-1-preview": "fal-ai/mochi-v1",
33
- "tencent/HunyuanVideo": "fal-ai/hunyuan-video",
34
- },
35
- }
5
+ from huggingface_hub.inference._common import _as_dict
6
+ from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none
7
+ from huggingface_hub.utils import get_session
36
8
 
37
9
 
38
10
  class FalAITask(TaskProviderHelper, ABC):
39
- """Base class for FalAI API tasks."""
40
-
41
11
  def __init__(self, task: str):
42
- self.task = task
43
-
44
- def prepare_request(
45
- self,
46
- *,
47
- inputs: Any,
48
- parameters: Dict[str, Any],
49
- headers: Dict,
50
- model: Optional[str],
51
- api_key: Optional[str],
52
- extra_payload: Optional[Dict[str, Any]] = None,
53
- ) -> RequestParameters:
54
- if api_key is None:
55
- api_key = get_token()
56
- if api_key is None:
57
- raise ValueError(
58
- "You must provide an api_key to work with fal.ai API or log in with `huggingface-cli login`."
59
- )
60
-
61
- mapped_model = self._map_model(model)
62
- headers = {
63
- **build_hf_headers(token=api_key),
64
- **headers,
65
- }
66
-
67
- # Route to the proxy if the api_key is a HF TOKEN
68
- if api_key.startswith("hf_"):
69
- base_url = constants.INFERENCE_PROXY_TEMPLATE.format(provider="fal-ai")
70
- logger.info("Calling fal.ai provider through Hugging Face proxy.")
71
- else:
72
- base_url = BASE_URL
73
- headers["authorization"] = f"Key {api_key}"
74
- logger.info("Calling fal.ai provider directly.")
75
-
76
- payload = self._prepare_payload(inputs, parameters=parameters)
77
-
78
- return RequestParameters(
79
- url=f"{base_url}/{mapped_model}",
80
- task=self.task,
81
- model=mapped_model,
82
- json=payload,
83
- data=None,
84
- headers=headers,
85
- )
12
+ super().__init__(provider="fal-ai", base_url="https://fal.run", task=task)
86
13
 
87
- def _map_model(self, model: Optional[str]) -> str:
88
- if model is None:
89
- raise ValueError("Please provide a model available on FalAI.")
90
- if self.task not in SUPPORTED_MODELS:
91
- raise ValueError(f"Task {self.task} not supported with FalAI.")
92
- mapped_model = SUPPORTED_MODELS[self.task].get(model)
93
- if mapped_model is None:
94
- raise ValueError(f"Model {model} is not supported with FalAI for task {self.task}.")
95
- return mapped_model
14
+ def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:
15
+ headers = super()._prepare_headers(headers, api_key)
16
+ if not api_key.startswith("hf_"):
17
+ headers["authorization"] = f"Key {api_key}"
18
+ return headers
96
19
 
97
- @abstractmethod
98
- def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]: ...
20
+ def _prepare_route(self, mapped_model: str) -> str:
21
+ return f"/{mapped_model}"
99
22
 
100
23
 
101
24
  class FalAIAutomaticSpeechRecognitionTask(FalAITask):
102
25
  def __init__(self):
103
26
  super().__init__("automatic-speech-recognition")
104
27
 
105
- def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]:
28
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
106
29
  if isinstance(inputs, str) and inputs.startswith(("http://", "https://")):
107
30
  # If input is a URL, pass it directly
108
31
  audio_url = inputs
@@ -116,10 +39,7 @@ class FalAIAutomaticSpeechRecognitionTask(FalAITask):
116
39
  content_type = "audio/mpeg"
117
40
  audio_url = f"data:{content_type};base64,{audio_b64}"
118
41
 
119
- return {
120
- "audio_url": audio_url,
121
- **{k: v for k, v in parameters.items() if v is not None},
122
- }
42
+ return {"audio_url": audio_url, **filter_none(parameters)}
123
43
 
124
44
  def get_response(self, response: Union[bytes, Dict]) -> Any:
125
45
  text = _as_dict(response)["text"]
@@ -132,9 +52,9 @@ class FalAITextToImageTask(FalAITask):
132
52
  def __init__(self):
133
53
  super().__init__("text-to-image")
134
54
 
135
- def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]:
136
- parameters = {k: v for k, v in parameters.items() if v is not None}
137
- if "image_size" not in parameters and "width" in parameters and "height" in parameters:
55
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
56
+ parameters = filter_none(parameters)
57
+ if "width" in parameters and "height" in parameters:
138
58
  parameters["image_size"] = {
139
59
  "width": parameters.pop("width"),
140
60
  "height": parameters.pop("height"),
@@ -146,13 +66,24 @@ class FalAITextToImageTask(FalAITask):
146
66
  return get_session().get(url).content
147
67
 
148
68
 
69
+ class FalAITextToSpeechTask(FalAITask):
70
+ def __init__(self):
71
+ super().__init__("text-to-speech")
72
+
73
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
74
+ return {"lyrics": inputs, **filter_none(parameters)}
75
+
76
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
77
+ url = _as_dict(response)["audio"]["url"]
78
+ return get_session().get(url).content
79
+
80
+
149
81
  class FalAITextToVideoTask(FalAITask):
150
82
  def __init__(self):
151
83
  super().__init__("text-to-video")
152
84
 
153
- def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]:
154
- parameters = {k: v for k, v in parameters.items() if v is not None}
155
- return {"prompt": inputs, **parameters}
85
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
86
+ return {"prompt": inputs, **filter_none(parameters)}
156
87
 
157
88
  def get_response(self, response: Union[bytes, Dict]) -> Any:
158
89
  url = _as_dict(response)["video"]["url"]
@@ -0,0 +1,6 @@
1
+ from ._common import BaseConversationalTask
2
+
3
+
4
+ class FireworksAIConversationalTask(BaseConversationalTask):
5
+ def __init__(self):
6
+ super().__init__(provider="fireworks-ai", base_url="https://api.fireworks.ai/inference")
@@ -1,134 +1,68 @@
1
+ import json
2
+ from functools import lru_cache
1
3
  from pathlib import Path
2
- from typing import Any, Dict, List, Optional, Tuple, Union
4
+ from typing import Any, Dict, Optional
3
5
 
4
- from huggingface_hub.constants import ENDPOINT
5
- from huggingface_hub.inference._common import RequestParameters, TaskProviderHelper, _b64_encode, _open_as_binary
6
- from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
7
-
8
-
9
- ## RECOMMENDED MODELS
10
-
11
- # Will be globally fetched only once (see '_fetch_recommended_models')
12
- _RECOMMENDED_MODELS: Optional[Dict[str, Optional[str]]] = None
13
-
14
- BASE_URL = "https://api-inference.huggingface.co"
15
-
16
-
17
- def _first_or_none(items: List[Any]) -> Optional[Any]:
18
- try:
19
- return items[0] or None
20
- except IndexError:
21
- return None
22
-
23
-
24
- def _fetch_recommended_models() -> Dict[str, Optional[str]]:
25
- global _RECOMMENDED_MODELS
26
- if _RECOMMENDED_MODELS is None:
27
- response = get_session().get(f"{ENDPOINT}/api/tasks", headers=build_hf_headers())
28
- hf_raise_for_status(response)
29
- _RECOMMENDED_MODELS = {
30
- task: _first_or_none(details["widgetModels"]) for task, details in response.json().items()
31
- }
32
- return _RECOMMENDED_MODELS
33
-
34
-
35
- def get_recommended_model(task: str) -> str:
36
- """
37
- Get the model Hugging Face recommends for the input task.
38
-
39
- Args:
40
- task (`str`):
41
- The Hugging Face task to get which model Hugging Face recommends.
42
- All available tasks can be found [here](https://huggingface.co/tasks).
43
-
44
- Returns:
45
- `str`: Name of the model recommended for the input task.
46
-
47
- Raises:
48
- `ValueError`: If Hugging Face has no recommendation for the input task.
49
- """
50
- model = _fetch_recommended_models().get(task)
51
- if model is None:
52
- raise ValueError(
53
- f"Task {task} has no recommended model. Please specify a model"
54
- " explicitly. Visit https://huggingface.co/tasks for more info."
55
- )
56
- return model
6
+ from huggingface_hub import constants
7
+ from huggingface_hub.inference._common import _b64_encode, _open_as_binary
8
+ from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none
9
+ from huggingface_hub.utils import build_hf_headers, get_session, get_token, hf_raise_for_status
57
10
 
58
11
 
59
12
  class HFInferenceTask(TaskProviderHelper):
60
13
  """Base class for HF Inference API tasks."""
61
14
 
62
15
  def __init__(self, task: str):
63
- self.task = task
64
-
65
- def prepare_request(
66
- self,
67
- *,
68
- inputs: Any,
69
- parameters: Dict[str, Any],
70
- headers: Dict,
71
- model: Optional[str],
72
- api_key: Optional[str],
73
- extra_payload: Optional[Dict[str, Any]] = None,
74
- ) -> RequestParameters:
75
- if extra_payload is None:
76
- extra_payload = {}
77
- mapped_model = self.map_model(model)
78
- url = self.build_url(mapped_model)
79
- data, json = self._prepare_payload(inputs, parameters=parameters, model=model, extra_payload=extra_payload)
80
- headers = self.prepare_headers(headers=headers, api_key=api_key)
81
-
82
- return RequestParameters(
83
- url=url,
84
- task=self.task,
85
- model=mapped_model,
86
- json=json,
87
- data=data,
88
- headers=headers,
16
+ super().__init__(
17
+ provider="hf-inference",
18
+ base_url=constants.INFERENCE_PROXY_TEMPLATE.format(provider="hf-inference"),
19
+ task=task,
89
20
  )
90
21
 
91
- def map_model(self, model: Optional[str]) -> str:
92
- return model if model is not None else get_recommended_model(self.task)
22
+ def _prepare_api_key(self, api_key: Optional[str]) -> str:
23
+ # special case: for HF Inference we allow not providing an API key
24
+ return api_key or get_token() # type: ignore[return-value]
93
25
 
94
- def build_url(self, model: str) -> str:
95
- # hf-inference provider can handle URLs (e.g. Inference Endpoints or TGI deployment)
96
- if model.startswith(("http://", "https://")):
26
+ def _prepare_mapped_model(self, model: Optional[str]) -> str:
27
+ if model is not None:
97
28
  return model
98
-
29
+ model = _fetch_recommended_models().get(self.task)
30
+ if model is None:
31
+ raise ValueError(
32
+ f"Task {self.task} has no recommended model for HF Inference. Please specify a model"
33
+ " explicitly. Visit https://huggingface.co/tasks for more info."
34
+ )
35
+ return model
36
+
37
+ def _prepare_url(self, api_key: str, mapped_model: str) -> str:
38
+ # hf-inference provider can handle URLs (e.g. Inference Endpoints or TGI deployment)
39
+ if mapped_model.startswith(("http://", "https://")):
40
+ return mapped_model
99
41
  return (
100
42
  # Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks.
101
- f"{BASE_URL}/pipeline/{self.task}/{model}"
43
+ f"{self.base_url}/pipeline/{self.task}/{mapped_model}"
102
44
  if self.task in ("feature-extraction", "sentence-similarity")
103
45
  # Otherwise, we use the default endpoint
104
- else f"{BASE_URL}/models/{model}"
46
+ else f"{self.base_url}/models/{mapped_model}"
105
47
  )
106
48
 
107
- def prepare_headers(self, headers: Dict, *, api_key: Optional[Union[bool, str]] = None) -> Dict:
108
- return {**build_hf_headers(token=api_key), **headers}
109
-
110
- def _prepare_payload(
111
- self, inputs: Any, parameters: Dict[str, Any], model: Optional[str], extra_payload: Dict[str, Any]
112
- ) -> Tuple[Any, Any]:
49
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
113
50
  if isinstance(inputs, bytes):
114
51
  raise ValueError(f"Unexpected binary input for task {self.task}.")
115
52
  if isinstance(inputs, Path):
116
53
  raise ValueError(f"Unexpected path input for task {self.task} (got {inputs})")
117
- return None, {
118
- "inputs": inputs,
119
- "parameters": {k: v for k, v in parameters.items() if v is not None},
120
- **extra_payload,
121
- }
122
-
123
- def get_response(self, response: Union[bytes, Dict]) -> Any:
124
- return response
54
+ return {"inputs": inputs, "parameters": filter_none(parameters)}
125
55
 
126
56
 
127
57
  class HFInferenceBinaryInputTask(HFInferenceTask):
128
- def _prepare_payload(
129
- self, inputs: Any, parameters: Dict[str, Any], model: Optional[str], extra_payload: Dict[str, Any]
130
- ) -> Tuple[Any, Any]:
131
- parameters = {k: v for k, v in parameters.items() if v is not None}
58
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
59
+ return None
60
+
61
+ def _prepare_payload_as_bytes(
62
+ self, inputs: Any, parameters: Dict, mapped_model: str, extra_payload: Optional[Dict]
63
+ ) -> Optional[bytes]:
64
+ parameters = filter_none({k: v for k, v in parameters.items() if v is not None})
65
+ extra_payload = extra_payload or {}
132
66
  has_parameters = len(parameters) > 0 or len(extra_payload) > 0
133
67
 
134
68
  # Raise if not a binary object or a local path or a URL.
@@ -139,51 +73,26 @@ class HFInferenceBinaryInputTask(HFInferenceTask):
139
73
  if not has_parameters:
140
74
  with _open_as_binary(inputs) as data:
141
75
  data_as_bytes = data if isinstance(data, bytes) else data.read()
142
- return data_as_bytes, None
76
+ return data_as_bytes
143
77
 
144
78
  # Otherwise encode as b64
145
- return None, {"inputs": _b64_encode(inputs), "parameters": parameters, **extra_payload}
79
+ return json.dumps({"inputs": _b64_encode(inputs), "parameters": parameters, **extra_payload}).encode("utf-8")
146
80
 
147
81
 
148
82
  class HFInferenceConversational(HFInferenceTask):
149
83
  def __init__(self):
150
84
  super().__init__("text-generation")
151
85
 
152
- def prepare_request(
153
- self,
154
- *,
155
- inputs: Any,
156
- parameters: Dict[str, Any],
157
- headers: Dict,
158
- model: Optional[str],
159
- api_key: Optional[str],
160
- extra_payload: Optional[Dict[str, Any]] = None,
161
- ) -> RequestParameters:
162
- model = self.map_model(model)
163
- payload_model = parameters.get("model") or model
164
-
165
- if payload_model is None or payload_model.startswith(("http://", "https://")):
166
- payload_model = "tgi" # use a random string if not provided
167
-
168
- json = {
169
- **{key: value for key, value in parameters.items() if value is not None},
170
- "model": payload_model,
171
- "messages": inputs,
172
- **(extra_payload or {}),
173
- }
174
- headers = self.prepare_headers(headers=headers, api_key=api_key)
175
-
176
- return RequestParameters(
177
- url=self.build_url(model),
178
- task=self.task,
179
- model=model,
180
- json=json,
181
- data=None,
182
- headers=headers,
183
- )
86
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
87
+ payload_model = "tgi" if mapped_model.startswith(("http://", "https://")) else mapped_model
88
+ return {**filter_none(parameters), "model": payload_model, "messages": inputs}
184
89
 
185
- def build_url(self, model: str) -> str:
186
- base_url = model if model.startswith(("http://", "https://")) else f"{BASE_URL}/models/{model}"
90
+ def _prepare_url(self, api_key: str, mapped_model: str) -> str:
91
+ base_url = (
92
+ mapped_model
93
+ if mapped_model.startswith(("http://", "https://"))
94
+ else f"{constants.INFERENCE_PROXY_TEMPLATE.format(provider='hf-inference')}/models/{mapped_model}"
95
+ )
187
96
  return _build_chat_completion_url(base_url)
188
97
 
189
98
 
@@ -200,3 +109,10 @@ def _build_chat_completion_url(model_url: str) -> str:
200
109
  model_url += "/v1/chat/completions"
201
110
 
202
111
  return model_url
112
+
113
+
114
+ @lru_cache(maxsize=1)
115
+ def _fetch_recommended_models() -> Dict[str, Optional[str]]:
116
+ response = get_session().get(f"{constants.ENDPOINT}/api/tasks", headers=build_hf_headers())
117
+ hf_raise_for_status(response)
118
+ return {task: next(iter(details["widgetModels"]), None) for task, details in response.json().items()}
@@ -0,0 +1,43 @@
1
+ import base64
2
+ from typing import Any, Dict, Optional, Union
3
+
4
+ from huggingface_hub.inference._common import _as_dict
5
+ from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none
6
+
7
+
8
+ class HyperbolicTextToImageTask(TaskProviderHelper):
9
+ def __init__(self):
10
+ super().__init__(provider="hyperbolic", base_url="https://api.hyperbolic.xyz", task="text-to-image")
11
+
12
+ def _prepare_route(self, mapped_model: str) -> str:
13
+ return "/v1/images/generations"
14
+
15
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
16
+ parameters = filter_none(parameters)
17
+ if "num_inference_steps" in parameters:
18
+ parameters["steps"] = parameters.pop("num_inference_steps")
19
+ if "guidance_scale" in parameters:
20
+ parameters["cfg_scale"] = parameters.pop("guidance_scale")
21
+ # For Hyperbolic, the width and height are required parameters
22
+ if "width" not in parameters:
23
+ parameters["width"] = 512
24
+ if "height" not in parameters:
25
+ parameters["height"] = 512
26
+ return {"prompt": inputs, "model_name": mapped_model, **parameters}
27
+
28
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
29
+ response_dict = _as_dict(response)
30
+ return base64.b64decode(response_dict["images"][0]["image"])
31
+
32
+
33
+ class HyperbolicTextGenerationTask(BaseConversationalTask):
34
+ """
35
+ Special case for Hyperbolic, where text-generation task is handled as a conversational task.
36
+ """
37
+
38
+ def __init__(self, task: str):
39
+ super().__init__(
40
+ provider="hyperbolic",
41
+ base_url="https://api.hyperbolic.xyz",
42
+ )
43
+ self.task = task
@@ -0,0 +1,41 @@
1
+ import base64
2
+ from typing import Any, Dict, Optional, Union
3
+
4
+ from huggingface_hub.inference._common import _as_dict
5
+ from huggingface_hub.inference._providers._common import (
6
+ BaseConversationalTask,
7
+ BaseTextGenerationTask,
8
+ TaskProviderHelper,
9
+ filter_none,
10
+ )
11
+
12
+
13
+ class NebiusTextGenerationTask(BaseTextGenerationTask):
14
+ def __init__(self):
15
+ super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
16
+
17
+
18
+ class NebiusConversationalTask(BaseConversationalTask):
19
+ def __init__(self):
20
+ super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
21
+
22
+
23
+ class NebiusTextToImageTask(TaskProviderHelper):
24
+ def __init__(self):
25
+ super().__init__(task="text-to-image", provider="nebius", base_url="https://api.studio.nebius.ai")
26
+
27
+ def _prepare_route(self, mapped_model: str) -> str:
28
+ return "/v1/images/generations"
29
+
30
+ def _prepare_payload_as_dict(self, inputs: Any, parameters: Dict, mapped_model: str) -> Optional[Dict]:
31
+ parameters = filter_none(parameters)
32
+ if "guidance_scale" in parameters:
33
+ parameters.pop("guidance_scale")
34
+ if parameters.get("response_format") not in ("b64_json", "url"):
35
+ parameters["response_format"] = "b64_json"
36
+
37
+ return {"prompt": inputs, **parameters, "model": mapped_model}
38
+
39
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
40
+ response_dict = _as_dict(response)
41
+ return base64.b64decode(response_dict["data"][0]["b64_json"])
@@ -0,0 +1,26 @@
1
+ from huggingface_hub.inference._providers._common import (
2
+ BaseConversationalTask,
3
+ BaseTextGenerationTask,
4
+ )
5
+
6
+
7
+ _PROVIDER = "novita"
8
+ _BASE_URL = "https://api.novita.ai/v3/openai"
9
+
10
+
11
+ class NovitaTextGenerationTask(BaseTextGenerationTask):
12
+ def __init__(self):
13
+ super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
14
+
15
+ def _prepare_route(self, mapped_model: str) -> str:
16
+ # there is no v1/ route for novita
17
+ return "/completions"
18
+
19
+
20
+ class NovitaConversationalTask(BaseConversationalTask):
21
+ def __init__(self):
22
+ super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
23
+
24
+ def _prepare_route(self, mapped_model: str) -> str:
25
+ # there is no v1/ route for novita
26
+ return "/chat/completions"