huggingface-hub 0.31.0rc0__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. huggingface_hub/__init__.py +145 -46
  2. huggingface_hub/_commit_api.py +168 -119
  3. huggingface_hub/_commit_scheduler.py +15 -15
  4. huggingface_hub/_inference_endpoints.py +15 -12
  5. huggingface_hub/_jobs_api.py +301 -0
  6. huggingface_hub/_local_folder.py +18 -3
  7. huggingface_hub/_login.py +31 -63
  8. huggingface_hub/_oauth.py +460 -0
  9. huggingface_hub/_snapshot_download.py +239 -80
  10. huggingface_hub/_space_api.py +5 -5
  11. huggingface_hub/_tensorboard_logger.py +15 -19
  12. huggingface_hub/_upload_large_folder.py +172 -76
  13. huggingface_hub/_webhooks_payload.py +3 -3
  14. huggingface_hub/_webhooks_server.py +13 -25
  15. huggingface_hub/{commands → cli}/__init__.py +1 -15
  16. huggingface_hub/cli/_cli_utils.py +173 -0
  17. huggingface_hub/cli/auth.py +147 -0
  18. huggingface_hub/cli/cache.py +841 -0
  19. huggingface_hub/cli/download.py +189 -0
  20. huggingface_hub/cli/hf.py +60 -0
  21. huggingface_hub/cli/inference_endpoints.py +377 -0
  22. huggingface_hub/cli/jobs.py +772 -0
  23. huggingface_hub/cli/lfs.py +175 -0
  24. huggingface_hub/cli/repo.py +315 -0
  25. huggingface_hub/cli/repo_files.py +94 -0
  26. huggingface_hub/{commands/env.py → cli/system.py} +10 -13
  27. huggingface_hub/cli/upload.py +294 -0
  28. huggingface_hub/cli/upload_large_folder.py +117 -0
  29. huggingface_hub/community.py +20 -12
  30. huggingface_hub/constants.py +38 -53
  31. huggingface_hub/dataclasses.py +609 -0
  32. huggingface_hub/errors.py +80 -30
  33. huggingface_hub/fastai_utils.py +30 -41
  34. huggingface_hub/file_download.py +435 -351
  35. huggingface_hub/hf_api.py +2050 -1124
  36. huggingface_hub/hf_file_system.py +269 -152
  37. huggingface_hub/hub_mixin.py +43 -63
  38. huggingface_hub/inference/_client.py +347 -434
  39. huggingface_hub/inference/_common.py +133 -121
  40. huggingface_hub/inference/_generated/_async_client.py +397 -541
  41. huggingface_hub/inference/_generated/types/__init__.py +5 -1
  42. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
  43. huggingface_hub/inference/_generated/types/base.py +10 -7
  44. huggingface_hub/inference/_generated/types/chat_completion.py +59 -23
  45. huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
  46. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
  47. huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
  48. huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
  49. huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
  50. huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
  51. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
  52. huggingface_hub/inference/_generated/types/summarization.py +2 -2
  53. huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
  54. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
  55. huggingface_hub/inference/_generated/types/text_generation.py +10 -10
  56. huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
  57. huggingface_hub/inference/_generated/types/token_classification.py +2 -2
  58. huggingface_hub/inference/_generated/types/translation.py +2 -2
  59. huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
  60. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
  61. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
  62. huggingface_hub/inference/_mcp/__init__.py +0 -0
  63. huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
  64. huggingface_hub/inference/_mcp/agent.py +100 -0
  65. huggingface_hub/inference/_mcp/cli.py +247 -0
  66. huggingface_hub/inference/_mcp/constants.py +81 -0
  67. huggingface_hub/inference/_mcp/mcp_client.py +395 -0
  68. huggingface_hub/inference/_mcp/types.py +45 -0
  69. huggingface_hub/inference/_mcp/utils.py +128 -0
  70. huggingface_hub/inference/_providers/__init__.py +82 -7
  71. huggingface_hub/inference/_providers/_common.py +129 -27
  72. huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
  73. huggingface_hub/inference/_providers/cerebras.py +1 -1
  74. huggingface_hub/inference/_providers/clarifai.py +13 -0
  75. huggingface_hub/inference/_providers/cohere.py +20 -3
  76. huggingface_hub/inference/_providers/fal_ai.py +183 -56
  77. huggingface_hub/inference/_providers/featherless_ai.py +38 -0
  78. huggingface_hub/inference/_providers/fireworks_ai.py +18 -0
  79. huggingface_hub/inference/_providers/groq.py +9 -0
  80. huggingface_hub/inference/_providers/hf_inference.py +69 -30
  81. huggingface_hub/inference/_providers/hyperbolic.py +4 -4
  82. huggingface_hub/inference/_providers/nebius.py +33 -5
  83. huggingface_hub/inference/_providers/novita.py +5 -5
  84. huggingface_hub/inference/_providers/nscale.py +44 -0
  85. huggingface_hub/inference/_providers/openai.py +3 -1
  86. huggingface_hub/inference/_providers/publicai.py +6 -0
  87. huggingface_hub/inference/_providers/replicate.py +31 -13
  88. huggingface_hub/inference/_providers/sambanova.py +18 -4
  89. huggingface_hub/inference/_providers/scaleway.py +28 -0
  90. huggingface_hub/inference/_providers/together.py +20 -5
  91. huggingface_hub/inference/_providers/wavespeed.py +138 -0
  92. huggingface_hub/inference/_providers/zai_org.py +17 -0
  93. huggingface_hub/lfs.py +33 -100
  94. huggingface_hub/repocard.py +34 -38
  95. huggingface_hub/repocard_data.py +57 -57
  96. huggingface_hub/serialization/__init__.py +0 -1
  97. huggingface_hub/serialization/_base.py +12 -15
  98. huggingface_hub/serialization/_dduf.py +8 -8
  99. huggingface_hub/serialization/_torch.py +69 -69
  100. huggingface_hub/utils/__init__.py +19 -8
  101. huggingface_hub/utils/_auth.py +7 -7
  102. huggingface_hub/utils/_cache_manager.py +92 -147
  103. huggingface_hub/utils/_chunk_utils.py +2 -3
  104. huggingface_hub/utils/_deprecation.py +1 -1
  105. huggingface_hub/utils/_dotenv.py +55 -0
  106. huggingface_hub/utils/_experimental.py +7 -5
  107. huggingface_hub/utils/_fixes.py +0 -10
  108. huggingface_hub/utils/_git_credential.py +5 -5
  109. huggingface_hub/utils/_headers.py +8 -30
  110. huggingface_hub/utils/_http.py +398 -239
  111. huggingface_hub/utils/_pagination.py +4 -4
  112. huggingface_hub/utils/_parsing.py +98 -0
  113. huggingface_hub/utils/_paths.py +5 -5
  114. huggingface_hub/utils/_runtime.py +61 -24
  115. huggingface_hub/utils/_safetensors.py +21 -21
  116. huggingface_hub/utils/_subprocess.py +9 -9
  117. huggingface_hub/utils/_telemetry.py +4 -4
  118. huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
  119. huggingface_hub/utils/_typing.py +25 -5
  120. huggingface_hub/utils/_validators.py +55 -74
  121. huggingface_hub/utils/_verification.py +167 -0
  122. huggingface_hub/utils/_xet.py +64 -17
  123. huggingface_hub/utils/_xet_progress_reporting.py +162 -0
  124. huggingface_hub/utils/insecure_hashlib.py +3 -5
  125. huggingface_hub/utils/logging.py +8 -11
  126. huggingface_hub/utils/tqdm.py +5 -4
  127. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -85
  128. huggingface_hub-1.1.3.dist-info/RECORD +155 -0
  129. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
  130. huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
  131. huggingface_hub/commands/delete_cache.py +0 -474
  132. huggingface_hub/commands/download.py +0 -200
  133. huggingface_hub/commands/huggingface_cli.py +0 -61
  134. huggingface_hub/commands/lfs.py +0 -200
  135. huggingface_hub/commands/repo_files.py +0 -128
  136. huggingface_hub/commands/scan_cache.py +0 -181
  137. huggingface_hub/commands/tag.py +0 -159
  138. huggingface_hub/commands/upload.py +0 -314
  139. huggingface_hub/commands/upload_large_folder.py +0 -129
  140. huggingface_hub/commands/user.py +0 -304
  141. huggingface_hub/commands/version.py +0 -37
  142. huggingface_hub/inference_api.py +0 -217
  143. huggingface_hub/keras_mixin.py +0 -500
  144. huggingface_hub/repository.py +0 -1477
  145. huggingface_hub/serialization/_tensorflow.py +0 -95
  146. huggingface_hub/utils/_hf_folder.py +0 -68
  147. huggingface_hub-0.31.0rc0.dist-info/RECORD +0 -135
  148. huggingface_hub-0.31.0rc0.dist-info/entry_points.txt +0 -6
  149. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
  150. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
1
1
  import base64
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Optional, Union
3
3
 
4
4
  from huggingface_hub.hf_api import InferenceProviderMapping
5
5
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -15,7 +15,7 @@ class NebiusTextGenerationTask(BaseTextGenerationTask):
15
15
  def __init__(self):
16
16
  super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
17
17
 
18
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
18
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
19
19
  output = _as_dict(response)["choices"][0]
20
20
  return {
21
21
  "generated_text": output["text"],
@@ -30,6 +30,17 @@ class NebiusConversationalTask(BaseConversationalTask):
30
30
  def __init__(self):
31
31
  super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
32
32
 
33
+ def _prepare_payload_as_dict(
34
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
35
+ ) -> Optional[dict]:
36
+ payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
37
+ response_format = parameters.get("response_format")
38
+ if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
39
+ json_schema_details = response_format.get("json_schema")
40
+ if isinstance(json_schema_details, dict) and "schema" in json_schema_details:
41
+ payload["guided_json"] = json_schema_details["schema"] # type: ignore [index]
42
+ return payload
43
+
33
44
 
34
45
  class NebiusTextToImageTask(TaskProviderHelper):
35
46
  def __init__(self):
@@ -39,8 +50,8 @@ class NebiusTextToImageTask(TaskProviderHelper):
39
50
  return "/v1/images/generations"
40
51
 
41
52
  def _prepare_payload_as_dict(
42
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
43
- ) -> Optional[Dict]:
53
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
54
+ ) -> Optional[dict]:
44
55
  mapped_model = provider_mapping_info.provider_id
45
56
  parameters = filter_none(parameters)
46
57
  if "guidance_scale" in parameters:
@@ -50,6 +61,23 @@ class NebiusTextToImageTask(TaskProviderHelper):
50
61
 
51
62
  return {"prompt": inputs, **parameters, "model": mapped_model}
52
63
 
53
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
64
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
54
65
  response_dict = _as_dict(response)
55
66
  return base64.b64decode(response_dict["data"][0]["b64_json"])
67
+
68
+
69
+ class NebiusFeatureExtractionTask(TaskProviderHelper):
70
+ def __init__(self):
71
+ super().__init__(task="feature-extraction", provider="nebius", base_url="https://api.studio.nebius.ai")
72
+
73
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
74
+ return "/v1/embeddings"
75
+
76
+ def _prepare_payload_as_dict(
77
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
78
+ ) -> Optional[dict]:
79
+ return {"input": inputs, "model": provider_mapping_info.provider_id}
80
+
81
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
82
+ embeddings = _as_dict(response)["data"]
83
+ return [embedding["embedding"] for embedding in embeddings]
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -23,7 +23,7 @@ class NovitaTextGenerationTask(BaseTextGenerationTask):
23
23
  # there is no v1/ route for novita
24
24
  return "/v3/openai/completions"
25
25
 
26
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
26
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
27
27
  output = _as_dict(response)["choices"][0]
28
28
  return {
29
29
  "generated_text": output["text"],
@@ -51,11 +51,11 @@ class NovitaTextToVideoTask(TaskProviderHelper):
51
51
  return f"/v3/hf/{mapped_model}"
52
52
 
53
53
  def _prepare_payload_as_dict(
54
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
55
- ) -> Optional[Dict]:
54
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
55
+ ) -> Optional[dict]:
56
56
  return {"prompt": inputs, **filter_none(parameters)}
57
57
 
58
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
58
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
59
59
  response_dict = _as_dict(response)
60
60
  if not (
61
61
  isinstance(response_dict, dict)
@@ -0,0 +1,44 @@
1
+ import base64
2
+ from typing import Any, Optional, Union
3
+
4
+ from huggingface_hub.hf_api import InferenceProviderMapping
5
+ from huggingface_hub.inference._common import RequestParameters, _as_dict
6
+
7
+ from ._common import BaseConversationalTask, TaskProviderHelper, filter_none
8
+
9
+
10
+ class NscaleConversationalTask(BaseConversationalTask):
11
+ def __init__(self):
12
+ super().__init__(provider="nscale", base_url="https://inference.api.nscale.com")
13
+
14
+
15
+ class NscaleTextToImageTask(TaskProviderHelper):
16
+ def __init__(self):
17
+ super().__init__(provider="nscale", base_url="https://inference.api.nscale.com", task="text-to-image")
18
+
19
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
20
+ return "/v1/images/generations"
21
+
22
+ def _prepare_payload_as_dict(
23
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
24
+ ) -> Optional[dict]:
25
+ mapped_model = provider_mapping_info.provider_id
26
+ # Combine all parameters except inputs and parameters
27
+ parameters = filter_none(parameters)
28
+ if "width" in parameters and "height" in parameters:
29
+ parameters["size"] = f"{parameters.pop('width')}x{parameters.pop('height')}"
30
+ if "num_inference_steps" in parameters:
31
+ parameters.pop("num_inference_steps")
32
+ if "cfg_scale" in parameters:
33
+ parameters.pop("cfg_scale")
34
+ payload = {
35
+ "response_format": "b64_json",
36
+ "prompt": inputs,
37
+ "model": mapped_model,
38
+ **parameters,
39
+ }
40
+ return payload
41
+
42
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
43
+ response_dict = _as_dict(response)
44
+ return base64.b64decode(response_dict["data"][0]["b64_json"])
@@ -20,4 +20,6 @@ class OpenAIConversationalTask(BaseConversationalTask):
20
20
  def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:
21
21
  if model is None:
22
22
  raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.")
23
- return InferenceProviderMapping(providerId=model, task="conversational", status="live", hf_model_id=model)
23
+ return InferenceProviderMapping(
24
+ provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model
25
+ )
@@ -0,0 +1,6 @@
1
+ from ._common import BaseConversationalTask
2
+
3
+
4
+ class PublicAIConversationalTask(BaseConversationalTask):
5
+ def __init__(self):
6
+ super().__init__(provider="publicai", base_url="https://api.publicai.co")
@@ -1,7 +1,7 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
- from huggingface_hub.inference._common import RequestParameters, _as_dict
4
+ from huggingface_hub.inference._common import RequestParameters, _as_dict, _as_url
5
5
  from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none
6
6
  from huggingface_hub.utils import get_session
7
7
 
@@ -14,7 +14,7 @@ class ReplicateTask(TaskProviderHelper):
14
14
  def __init__(self, task: str):
15
15
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)
16
16
 
17
- def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:
17
+ def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
18
18
  headers = super()._prepare_headers(headers, api_key)
19
19
  headers["Prefer"] = "wait"
20
20
  return headers
@@ -25,16 +25,16 @@ class ReplicateTask(TaskProviderHelper):
25
25
  return f"/v1/models/{mapped_model}/predictions"
26
26
 
27
27
  def _prepare_payload_as_dict(
28
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
29
- ) -> Optional[Dict]:
28
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
29
+ ) -> Optional[dict]:
30
30
  mapped_model = provider_mapping_info.provider_id
31
- payload: Dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}
31
+ payload: dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}
32
32
  if ":" in mapped_model:
33
33
  version = mapped_model.split(":", 1)[1]
34
34
  payload["version"] = version
35
35
  return payload
36
36
 
37
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
37
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
38
38
  response_dict = _as_dict(response)
39
39
  if response_dict.get("output") is None:
40
40
  raise TimeoutError(
@@ -52,9 +52,9 @@ class ReplicateTextToImageTask(ReplicateTask):
52
52
  super().__init__("text-to-image")
53
53
 
54
54
  def _prepare_payload_as_dict(
55
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
56
- ) -> Optional[Dict]:
57
- payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
55
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
56
+ ) -> Optional[dict]:
57
+ payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
58
58
  if provider_mapping_info.adapter_weights_path is not None:
59
59
  payload["input"]["lora_weights"] = f"https://huggingface.co/{provider_mapping_info.hf_model_id}"
60
60
  return payload
@@ -65,8 +65,26 @@ class ReplicateTextToSpeechTask(ReplicateTask):
65
65
  super().__init__("text-to-speech")
66
66
 
67
67
  def _prepare_payload_as_dict(
68
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
69
- ) -> Optional[Dict]:
70
- payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
68
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
69
+ ) -> Optional[dict]:
70
+ payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
71
71
  payload["input"]["text"] = payload["input"].pop("prompt") # rename "prompt" to "text" for TTS
72
72
  return payload
73
+
74
+
75
+ class ReplicateImageToImageTask(ReplicateTask):
76
+ def __init__(self):
77
+ super().__init__("image-to-image")
78
+
79
+ def _prepare_payload_as_dict(
80
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
81
+ ) -> Optional[dict]:
82
+ image_url = _as_url(inputs, default_mime_type="image/jpeg")
83
+
84
+ payload: dict[str, Any] = {"input": {"input_image": image_url, **filter_none(parameters)}}
85
+
86
+ mapped_model = provider_mapping_info.provider_id
87
+ if ":" in mapped_model:
88
+ version = mapped_model.split(":", 1)[1]
89
+ payload["version"] = version
90
+ return payload
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -9,6 +9,20 @@ class SambanovaConversationalTask(BaseConversationalTask):
9
9
  def __init__(self):
10
10
  super().__init__(provider="sambanova", base_url="https://api.sambanova.ai")
11
11
 
12
+ def _prepare_payload_as_dict(
13
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
14
+ ) -> Optional[dict]:
15
+ response_format_config = parameters.get("response_format")
16
+ if isinstance(response_format_config, dict):
17
+ if response_format_config.get("type") == "json_schema":
18
+ json_schema_config = response_format_config.get("json_schema", {})
19
+ strict = json_schema_config.get("strict")
20
+ if isinstance(json_schema_config, dict) and (strict is True or strict is None):
21
+ json_schema_config["strict"] = False
22
+
23
+ payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
24
+ return payload
25
+
12
26
 
13
27
  class SambanovaFeatureExtractionTask(TaskProviderHelper):
14
28
  def __init__(self):
@@ -18,11 +32,11 @@ class SambanovaFeatureExtractionTask(TaskProviderHelper):
18
32
  return "/v1/embeddings"
19
33
 
20
34
  def _prepare_payload_as_dict(
21
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
22
- ) -> Optional[Dict]:
35
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
36
+ ) -> Optional[dict]:
23
37
  parameters = filter_none(parameters)
24
38
  return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
25
39
 
26
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
40
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
27
41
  embeddings = _as_dict(response)["data"]
28
42
  return [embedding["embedding"] for embedding in embeddings]
@@ -0,0 +1,28 @@
1
+ from typing import Any, Dict, Optional, Union
2
+
3
+ from huggingface_hub.inference._common import RequestParameters, _as_dict
4
+
5
+ from ._common import BaseConversationalTask, InferenceProviderMapping, TaskProviderHelper, filter_none
6
+
7
+
8
+ class ScalewayConversationalTask(BaseConversationalTask):
9
+ def __init__(self):
10
+ super().__init__(provider="scaleway", base_url="https://api.scaleway.ai")
11
+
12
+
13
+ class ScalewayFeatureExtractionTask(TaskProviderHelper):
14
+ def __init__(self):
15
+ super().__init__(provider="scaleway", base_url="https://api.scaleway.ai", task="feature-extraction")
16
+
17
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
18
+ return "/v1/embeddings"
19
+
20
+ def _prepare_payload_as_dict(
21
+ self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
22
+ ) -> Optional[Dict]:
23
+ parameters = filter_none(parameters)
24
+ return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
25
+
26
+ def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
27
+ embeddings = _as_dict(response)["data"]
28
+ return [embedding["embedding"] for embedding in embeddings]
@@ -1,6 +1,6 @@
1
1
  import base64
2
2
  from abc import ABC
3
- from typing import Any, Dict, Optional, Union
3
+ from typing import Any, Optional, Union
4
4
 
5
5
  from huggingface_hub.hf_api import InferenceProviderMapping
6
6
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -36,7 +36,7 @@ class TogetherTextGenerationTask(BaseTextGenerationTask):
36
36
  def __init__(self):
37
37
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
38
38
 
39
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
39
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
40
40
  output = _as_dict(response)["choices"][0]
41
41
  return {
42
42
  "generated_text": output["text"],
@@ -51,14 +51,29 @@ class TogetherConversationalTask(BaseConversationalTask):
51
51
  def __init__(self):
52
52
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
53
53
 
54
+ def _prepare_payload_as_dict(
55
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
56
+ ) -> Optional[dict]:
57
+ payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
58
+ response_format = parameters.get("response_format")
59
+ if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
60
+ json_schema_details = response_format.get("json_schema")
61
+ if isinstance(json_schema_details, dict) and "schema" in json_schema_details:
62
+ payload["response_format"] = { # type: ignore [index]
63
+ "type": "json_object",
64
+ "schema": json_schema_details["schema"],
65
+ }
66
+
67
+ return payload
68
+
54
69
 
55
70
  class TogetherTextToImageTask(TogetherTask):
56
71
  def __init__(self):
57
72
  super().__init__("text-to-image")
58
73
 
59
74
  def _prepare_payload_as_dict(
60
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
61
- ) -> Optional[Dict]:
75
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
76
+ ) -> Optional[dict]:
62
77
  mapped_model = provider_mapping_info.provider_id
63
78
  parameters = filter_none(parameters)
64
79
  if "num_inference_steps" in parameters:
@@ -68,6 +83,6 @@ class TogetherTextToImageTask(TogetherTask):
68
83
 
69
84
  return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model}
70
85
 
71
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
86
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
72
87
  response_dict = _as_dict(response)
73
88
  return base64.b64decode(response_dict["data"][0]["b64_json"])
@@ -0,0 +1,138 @@
1
+ import base64
2
+ import time
3
+ from abc import ABC
4
+ from typing import Any, Optional, Union
5
+ from urllib.parse import urlparse
6
+
7
+ from huggingface_hub.hf_api import InferenceProviderMapping
8
+ from huggingface_hub.inference._common import RequestParameters, _as_dict
9
+ from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none
10
+ from huggingface_hub.utils import get_session, hf_raise_for_status
11
+ from huggingface_hub.utils.logging import get_logger
12
+
13
+
14
+ logger = get_logger(__name__)
15
+
16
+ # Polling interval (in seconds)
17
+ _POLLING_INTERVAL = 0.5
18
+
19
+
20
+ class WavespeedAITask(TaskProviderHelper, ABC):
21
+ def __init__(self, task: str):
22
+ super().__init__(provider="wavespeed", base_url="https://api.wavespeed.ai", task=task)
23
+
24
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
25
+ return f"/api/v3/{mapped_model}"
26
+
27
+ def get_response(
28
+ self,
29
+ response: Union[bytes, dict],
30
+ request_params: Optional[RequestParameters] = None,
31
+ ) -> Any:
32
+ response_dict = _as_dict(response)
33
+ data = response_dict.get("data", {})
34
+ result_path = data.get("urls", {}).get("get")
35
+
36
+ if not result_path:
37
+ raise ValueError("No result URL found in the response")
38
+ if request_params is None:
39
+ raise ValueError("A `RequestParameters` object should be provided to get responses with WaveSpeed AI.")
40
+
41
+ # Parse the request URL to determine base URL
42
+ parsed_url = urlparse(request_params.url)
43
+ # Add /wavespeed to base URL if going through HF router
44
+ if parsed_url.netloc == "router.huggingface.co":
45
+ base_url = f"{parsed_url.scheme}://{parsed_url.netloc}/wavespeed"
46
+ else:
47
+ base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
48
+
49
+ # Extract path from result_path URL
50
+ if isinstance(result_path, str):
51
+ result_url_path = urlparse(result_path).path
52
+ else:
53
+ result_url_path = result_path
54
+
55
+ result_url = f"{base_url}{result_url_path}"
56
+
57
+ logger.info("Processing request, polling for results...")
58
+
59
+ # Poll until task is completed
60
+ while True:
61
+ time.sleep(_POLLING_INTERVAL)
62
+ result_response = get_session().get(result_url, headers=request_params.headers)
63
+ hf_raise_for_status(result_response)
64
+
65
+ result = result_response.json()
66
+ task_result = result.get("data", {})
67
+ status = task_result.get("status")
68
+
69
+ if status == "completed":
70
+ # Get content from the first output URL
71
+ if not task_result.get("outputs") or len(task_result["outputs"]) == 0:
72
+ raise ValueError("No output URL in completed response")
73
+
74
+ output_url = task_result["outputs"][0]
75
+ return get_session().get(output_url).content
76
+ elif status == "failed":
77
+ error_msg = task_result.get("error", "Task failed with no specific error message")
78
+ raise ValueError(f"WaveSpeed AI task failed: {error_msg}")
79
+ elif status in ["processing", "created"]:
80
+ continue
81
+ else:
82
+ raise ValueError(f"Unknown status: {status}")
83
+
84
+
85
+ class WavespeedAITextToImageTask(WavespeedAITask):
86
+ def __init__(self):
87
+ super().__init__("text-to-image")
88
+
89
+ def _prepare_payload_as_dict(
90
+ self,
91
+ inputs: Any,
92
+ parameters: dict,
93
+ provider_mapping_info: InferenceProviderMapping,
94
+ ) -> Optional[dict]:
95
+ return {"prompt": inputs, **filter_none(parameters)}
96
+
97
+
98
+ class WavespeedAITextToVideoTask(WavespeedAITextToImageTask):
99
+ def __init__(self):
100
+ WavespeedAITask.__init__(self, "text-to-video")
101
+
102
+
103
+ class WavespeedAIImageToImageTask(WavespeedAITask):
104
+ def __init__(self):
105
+ super().__init__("image-to-image")
106
+
107
+ def _prepare_payload_as_dict(
108
+ self,
109
+ inputs: Any,
110
+ parameters: dict,
111
+ provider_mapping_info: InferenceProviderMapping,
112
+ ) -> Optional[dict]:
113
+ # Convert inputs to image (URL or base64)
114
+ if isinstance(inputs, str) and inputs.startswith(("http://", "https://")):
115
+ image = inputs
116
+ elif isinstance(inputs, str):
117
+ # If input is a file path, read it first
118
+ with open(inputs, "rb") as f:
119
+ file_content = f.read()
120
+ image_b64 = base64.b64encode(file_content).decode("utf-8")
121
+ image = f"data:image/jpeg;base64,{image_b64}"
122
+ else:
123
+ # If input is binary data
124
+ image_b64 = base64.b64encode(inputs).decode("utf-8")
125
+ image = f"data:image/jpeg;base64,{image_b64}"
126
+
127
+ # Extract prompt from parameters if present
128
+ prompt = parameters.pop("prompt", None)
129
+ payload = {"image": image, **filter_none(parameters)}
130
+ if prompt is not None:
131
+ payload["prompt"] = prompt
132
+
133
+ return payload
134
+
135
+
136
+ class WavespeedAIImageToVideoTask(WavespeedAIImageToImageTask):
137
+ def __init__(self):
138
+ WavespeedAITask.__init__(self, "image-to-video")
@@ -0,0 +1,17 @@
1
+ from typing import Any, Dict
2
+
3
+ from huggingface_hub.inference._providers._common import BaseConversationalTask
4
+
5
+
6
+ class ZaiConversationalTask(BaseConversationalTask):
7
+ def __init__(self):
8
+ super().__init__(provider="zai-org", base_url="https://api.z.ai")
9
+
10
+ def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
11
+ headers = super()._prepare_headers(headers, api_key)
12
+ headers["Accept-Language"] = "en-US,en"
13
+ headers["x-source-channel"] = "hugging_face"
14
+ return headers
15
+
16
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
17
+ return "/api/paas/v4/chat/completions"