huggingface-hub 0.29.0rc2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- huggingface_hub/__init__.py +160 -46
- huggingface_hub/_commit_api.py +277 -71
- huggingface_hub/_commit_scheduler.py +15 -15
- huggingface_hub/_inference_endpoints.py +33 -22
- huggingface_hub/_jobs_api.py +301 -0
- huggingface_hub/_local_folder.py +18 -3
- huggingface_hub/_login.py +31 -63
- huggingface_hub/_oauth.py +460 -0
- huggingface_hub/_snapshot_download.py +241 -81
- huggingface_hub/_space_api.py +18 -10
- huggingface_hub/_tensorboard_logger.py +15 -19
- huggingface_hub/_upload_large_folder.py +196 -76
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +15 -25
- huggingface_hub/{commands → cli}/__init__.py +1 -15
- huggingface_hub/cli/_cli_utils.py +173 -0
- huggingface_hub/cli/auth.py +147 -0
- huggingface_hub/cli/cache.py +841 -0
- huggingface_hub/cli/download.py +189 -0
- huggingface_hub/cli/hf.py +60 -0
- huggingface_hub/cli/inference_endpoints.py +377 -0
- huggingface_hub/cli/jobs.py +772 -0
- huggingface_hub/cli/lfs.py +175 -0
- huggingface_hub/cli/repo.py +315 -0
- huggingface_hub/cli/repo_files.py +94 -0
- huggingface_hub/{commands/env.py → cli/system.py} +10 -13
- huggingface_hub/cli/upload.py +294 -0
- huggingface_hub/cli/upload_large_folder.py +117 -0
- huggingface_hub/community.py +20 -12
- huggingface_hub/constants.py +83 -59
- huggingface_hub/dataclasses.py +609 -0
- huggingface_hub/errors.py +99 -30
- huggingface_hub/fastai_utils.py +30 -41
- huggingface_hub/file_download.py +606 -346
- huggingface_hub/hf_api.py +2445 -1132
- huggingface_hub/hf_file_system.py +269 -152
- huggingface_hub/hub_mixin.py +61 -66
- huggingface_hub/inference/_client.py +501 -630
- huggingface_hub/inference/_common.py +133 -121
- huggingface_hub/inference/_generated/_async_client.py +536 -722
- huggingface_hub/inference/_generated/types/__init__.py +6 -1
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +5 -6
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +77 -31
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/image_to_image.py +8 -2
- huggingface_hub/inference/_generated/types/image_to_text.py +2 -3
- huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +11 -11
- huggingface_hub/inference/_generated/types/text_to_audio.py +1 -2
- huggingface_hub/inference/_generated/types/text_to_speech.py +1 -2
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/__init__.py +0 -0
- huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
- huggingface_hub/inference/_mcp/agent.py +100 -0
- huggingface_hub/inference/_mcp/cli.py +247 -0
- huggingface_hub/inference/_mcp/constants.py +81 -0
- huggingface_hub/inference/_mcp/mcp_client.py +395 -0
- huggingface_hub/inference/_mcp/types.py +45 -0
- huggingface_hub/inference/_mcp/utils.py +128 -0
- huggingface_hub/inference/_providers/__init__.py +149 -20
- huggingface_hub/inference/_providers/_common.py +160 -37
- huggingface_hub/inference/_providers/black_forest_labs.py +12 -9
- huggingface_hub/inference/_providers/cerebras.py +6 -0
- huggingface_hub/inference/_providers/clarifai.py +13 -0
- huggingface_hub/inference/_providers/cohere.py +32 -0
- huggingface_hub/inference/_providers/fal_ai.py +231 -22
- huggingface_hub/inference/_providers/featherless_ai.py +38 -0
- huggingface_hub/inference/_providers/fireworks_ai.py +22 -1
- huggingface_hub/inference/_providers/groq.py +9 -0
- huggingface_hub/inference/_providers/hf_inference.py +143 -33
- huggingface_hub/inference/_providers/hyperbolic.py +9 -5
- huggingface_hub/inference/_providers/nebius.py +47 -5
- huggingface_hub/inference/_providers/novita.py +48 -5
- huggingface_hub/inference/_providers/nscale.py +44 -0
- huggingface_hub/inference/_providers/openai.py +25 -0
- huggingface_hub/inference/_providers/publicai.py +6 -0
- huggingface_hub/inference/_providers/replicate.py +46 -9
- huggingface_hub/inference/_providers/sambanova.py +37 -1
- huggingface_hub/inference/_providers/scaleway.py +28 -0
- huggingface_hub/inference/_providers/together.py +34 -5
- huggingface_hub/inference/_providers/wavespeed.py +138 -0
- huggingface_hub/inference/_providers/zai_org.py +17 -0
- huggingface_hub/lfs.py +33 -100
- huggingface_hub/repocard.py +34 -38
- huggingface_hub/repocard_data.py +79 -59
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +12 -15
- huggingface_hub/serialization/_dduf.py +8 -8
- huggingface_hub/serialization/_torch.py +69 -69
- huggingface_hub/utils/__init__.py +27 -8
- huggingface_hub/utils/_auth.py +7 -7
- huggingface_hub/utils/_cache_manager.py +92 -147
- huggingface_hub/utils/_chunk_utils.py +2 -3
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +55 -0
- huggingface_hub/utils/_experimental.py +7 -5
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +5 -5
- huggingface_hub/utils/_headers.py +8 -30
- huggingface_hub/utils/_http.py +399 -237
- huggingface_hub/utils/_pagination.py +6 -6
- huggingface_hub/utils/_parsing.py +98 -0
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +74 -22
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +13 -11
- huggingface_hub/utils/_telemetry.py +4 -4
- huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
- huggingface_hub/utils/_typing.py +25 -5
- huggingface_hub/utils/_validators.py +55 -74
- huggingface_hub/utils/_verification.py +167 -0
- huggingface_hub/utils/_xet.py +235 -0
- huggingface_hub/utils/_xet_progress_reporting.py +162 -0
- huggingface_hub/utils/insecure_hashlib.py +3 -5
- huggingface_hub/utils/logging.py +8 -11
- huggingface_hub/utils/tqdm.py +33 -4
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -82
- huggingface_hub-1.1.3.dist-info/RECORD +155 -0
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
- huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
- huggingface_hub/commands/delete_cache.py +0 -428
- huggingface_hub/commands/download.py +0 -200
- huggingface_hub/commands/huggingface_cli.py +0 -61
- huggingface_hub/commands/lfs.py +0 -200
- huggingface_hub/commands/repo_files.py +0 -128
- huggingface_hub/commands/scan_cache.py +0 -181
- huggingface_hub/commands/tag.py +0 -159
- huggingface_hub/commands/upload.py +0 -299
- huggingface_hub/commands/upload_large_folder.py +0 -129
- huggingface_hub/commands/user.py +0 -304
- huggingface_hub/commands/version.py +0 -37
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -500
- huggingface_hub/repository.py +0 -1477
- huggingface_hub/serialization/_tensorflow.py +0 -95
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.29.0rc2.dist-info/RECORD +0 -131
- huggingface_hub-0.29.0rc2.dist-info/entry_points.txt +0 -6
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import base64
|
|
2
|
-
from typing import Any,
|
|
2
|
+
from typing import Any, Optional, Union
|
|
3
3
|
|
|
4
|
-
from huggingface_hub.
|
|
4
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
5
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
5
6
|
from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none
|
|
6
7
|
|
|
7
8
|
|
|
@@ -9,10 +10,13 @@ class HyperbolicTextToImageTask(TaskProviderHelper):
|
|
|
9
10
|
def __init__(self):
|
|
10
11
|
super().__init__(provider="hyperbolic", base_url="https://api.hyperbolic.xyz", task="text-to-image")
|
|
11
12
|
|
|
12
|
-
def _prepare_route(self, mapped_model: str) -> str:
|
|
13
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
13
14
|
return "/v1/images/generations"
|
|
14
15
|
|
|
15
|
-
def _prepare_payload_as_dict(
|
|
16
|
+
def _prepare_payload_as_dict(
|
|
17
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
18
|
+
) -> Optional[dict]:
|
|
19
|
+
mapped_model = provider_mapping_info.provider_id
|
|
16
20
|
parameters = filter_none(parameters)
|
|
17
21
|
if "num_inference_steps" in parameters:
|
|
18
22
|
parameters["steps"] = parameters.pop("num_inference_steps")
|
|
@@ -25,7 +29,7 @@ class HyperbolicTextToImageTask(TaskProviderHelper):
|
|
|
25
29
|
parameters["height"] = 512
|
|
26
30
|
return {"prompt": inputs, "model_name": mapped_model, **parameters}
|
|
27
31
|
|
|
28
|
-
def get_response(self, response: Union[bytes,
|
|
32
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
29
33
|
response_dict = _as_dict(response)
|
|
30
34
|
return base64.b64decode(response_dict["images"][0]["image"])
|
|
31
35
|
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import base64
|
|
2
|
-
from typing import Any,
|
|
2
|
+
from typing import Any, Optional, Union
|
|
3
3
|
|
|
4
|
-
from huggingface_hub.
|
|
4
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
5
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
5
6
|
from huggingface_hub.inference._providers._common import (
|
|
6
7
|
BaseConversationalTask,
|
|
7
8
|
BaseTextGenerationTask,
|
|
@@ -14,20 +15,44 @@ class NebiusTextGenerationTask(BaseTextGenerationTask):
|
|
|
14
15
|
def __init__(self):
|
|
15
16
|
super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
|
|
16
17
|
|
|
18
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
19
|
+
output = _as_dict(response)["choices"][0]
|
|
20
|
+
return {
|
|
21
|
+
"generated_text": output["text"],
|
|
22
|
+
"details": {
|
|
23
|
+
"finish_reason": output.get("finish_reason"),
|
|
24
|
+
"seed": output.get("seed"),
|
|
25
|
+
},
|
|
26
|
+
}
|
|
27
|
+
|
|
17
28
|
|
|
18
29
|
class NebiusConversationalTask(BaseConversationalTask):
|
|
19
30
|
def __init__(self):
|
|
20
31
|
super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
|
|
21
32
|
|
|
33
|
+
def _prepare_payload_as_dict(
|
|
34
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
35
|
+
) -> Optional[dict]:
|
|
36
|
+
payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
|
|
37
|
+
response_format = parameters.get("response_format")
|
|
38
|
+
if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
|
|
39
|
+
json_schema_details = response_format.get("json_schema")
|
|
40
|
+
if isinstance(json_schema_details, dict) and "schema" in json_schema_details:
|
|
41
|
+
payload["guided_json"] = json_schema_details["schema"] # type: ignore [index]
|
|
42
|
+
return payload
|
|
43
|
+
|
|
22
44
|
|
|
23
45
|
class NebiusTextToImageTask(TaskProviderHelper):
|
|
24
46
|
def __init__(self):
|
|
25
47
|
super().__init__(task="text-to-image", provider="nebius", base_url="https://api.studio.nebius.ai")
|
|
26
48
|
|
|
27
|
-
def _prepare_route(self, mapped_model: str) -> str:
|
|
49
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
28
50
|
return "/v1/images/generations"
|
|
29
51
|
|
|
30
|
-
def _prepare_payload_as_dict(
|
|
52
|
+
def _prepare_payload_as_dict(
|
|
53
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
54
|
+
) -> Optional[dict]:
|
|
55
|
+
mapped_model = provider_mapping_info.provider_id
|
|
31
56
|
parameters = filter_none(parameters)
|
|
32
57
|
if "guidance_scale" in parameters:
|
|
33
58
|
parameters.pop("guidance_scale")
|
|
@@ -36,6 +61,23 @@ class NebiusTextToImageTask(TaskProviderHelper):
|
|
|
36
61
|
|
|
37
62
|
return {"prompt": inputs, **parameters, "model": mapped_model}
|
|
38
63
|
|
|
39
|
-
def get_response(self, response: Union[bytes,
|
|
64
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
40
65
|
response_dict = _as_dict(response)
|
|
41
66
|
return base64.b64decode(response_dict["data"][0]["b64_json"])
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class NebiusFeatureExtractionTask(TaskProviderHelper):
|
|
70
|
+
def __init__(self):
|
|
71
|
+
super().__init__(task="feature-extraction", provider="nebius", base_url="https://api.studio.nebius.ai")
|
|
72
|
+
|
|
73
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
74
|
+
return "/v1/embeddings"
|
|
75
|
+
|
|
76
|
+
def _prepare_payload_as_dict(
|
|
77
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
78
|
+
) -> Optional[dict]:
|
|
79
|
+
return {"input": inputs, "model": provider_mapping_info.provider_id}
|
|
80
|
+
|
|
81
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
82
|
+
embeddings = _as_dict(response)["data"]
|
|
83
|
+
return [embedding["embedding"] for embedding in embeddings]
|
|
@@ -1,26 +1,69 @@
|
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
1
5
|
from huggingface_hub.inference._providers._common import (
|
|
2
6
|
BaseConversationalTask,
|
|
3
7
|
BaseTextGenerationTask,
|
|
8
|
+
TaskProviderHelper,
|
|
9
|
+
filter_none,
|
|
4
10
|
)
|
|
11
|
+
from huggingface_hub.utils import get_session
|
|
5
12
|
|
|
6
13
|
|
|
7
14
|
_PROVIDER = "novita"
|
|
8
|
-
_BASE_URL = "https://api.novita.ai
|
|
15
|
+
_BASE_URL = "https://api.novita.ai"
|
|
9
16
|
|
|
10
17
|
|
|
11
18
|
class NovitaTextGenerationTask(BaseTextGenerationTask):
|
|
12
19
|
def __init__(self):
|
|
13
20
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
14
21
|
|
|
15
|
-
def _prepare_route(self, mapped_model: str) -> str:
|
|
22
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
16
23
|
# there is no v1/ route for novita
|
|
17
|
-
return "/completions"
|
|
24
|
+
return "/v3/openai/completions"
|
|
25
|
+
|
|
26
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
27
|
+
output = _as_dict(response)["choices"][0]
|
|
28
|
+
return {
|
|
29
|
+
"generated_text": output["text"],
|
|
30
|
+
"details": {
|
|
31
|
+
"finish_reason": output.get("finish_reason"),
|
|
32
|
+
"seed": output.get("seed"),
|
|
33
|
+
},
|
|
34
|
+
}
|
|
18
35
|
|
|
19
36
|
|
|
20
37
|
class NovitaConversationalTask(BaseConversationalTask):
|
|
21
38
|
def __init__(self):
|
|
22
39
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
23
40
|
|
|
24
|
-
def _prepare_route(self, mapped_model: str) -> str:
|
|
41
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
25
42
|
# there is no v1/ route for novita
|
|
26
|
-
return "/chat/completions"
|
|
43
|
+
return "/v3/openai/chat/completions"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class NovitaTextToVideoTask(TaskProviderHelper):
|
|
47
|
+
def __init__(self):
|
|
48
|
+
super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task="text-to-video")
|
|
49
|
+
|
|
50
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
51
|
+
return f"/v3/hf/{mapped_model}"
|
|
52
|
+
|
|
53
|
+
def _prepare_payload_as_dict(
|
|
54
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
55
|
+
) -> Optional[dict]:
|
|
56
|
+
return {"prompt": inputs, **filter_none(parameters)}
|
|
57
|
+
|
|
58
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
59
|
+
response_dict = _as_dict(response)
|
|
60
|
+
if not (
|
|
61
|
+
isinstance(response_dict, dict)
|
|
62
|
+
and "video" in response_dict
|
|
63
|
+
and isinstance(response_dict["video"], dict)
|
|
64
|
+
and "video_url" in response_dict["video"]
|
|
65
|
+
):
|
|
66
|
+
raise ValueError("Expected response format: { 'video': { 'video_url': string } }")
|
|
67
|
+
|
|
68
|
+
video_url = response_dict["video"]["video_url"]
|
|
69
|
+
return get_session().get(video_url).content
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
from typing import Any, Optional, Union
|
|
3
|
+
|
|
4
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
5
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
6
|
+
|
|
7
|
+
from ._common import BaseConversationalTask, TaskProviderHelper, filter_none
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NscaleConversationalTask(BaseConversationalTask):
|
|
11
|
+
def __init__(self):
|
|
12
|
+
super().__init__(provider="nscale", base_url="https://inference.api.nscale.com")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class NscaleTextToImageTask(TaskProviderHelper):
|
|
16
|
+
def __init__(self):
|
|
17
|
+
super().__init__(provider="nscale", base_url="https://inference.api.nscale.com", task="text-to-image")
|
|
18
|
+
|
|
19
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
20
|
+
return "/v1/images/generations"
|
|
21
|
+
|
|
22
|
+
def _prepare_payload_as_dict(
|
|
23
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
24
|
+
) -> Optional[dict]:
|
|
25
|
+
mapped_model = provider_mapping_info.provider_id
|
|
26
|
+
# Combine all parameters except inputs and parameters
|
|
27
|
+
parameters = filter_none(parameters)
|
|
28
|
+
if "width" in parameters and "height" in parameters:
|
|
29
|
+
parameters["size"] = f"{parameters.pop('width')}x{parameters.pop('height')}"
|
|
30
|
+
if "num_inference_steps" in parameters:
|
|
31
|
+
parameters.pop("num_inference_steps")
|
|
32
|
+
if "cfg_scale" in parameters:
|
|
33
|
+
parameters.pop("cfg_scale")
|
|
34
|
+
payload = {
|
|
35
|
+
"response_format": "b64_json",
|
|
36
|
+
"prompt": inputs,
|
|
37
|
+
"model": mapped_model,
|
|
38
|
+
**parameters,
|
|
39
|
+
}
|
|
40
|
+
return payload
|
|
41
|
+
|
|
42
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
43
|
+
response_dict = _as_dict(response)
|
|
44
|
+
return base64.b64decode(response_dict["data"][0]["b64_json"])
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
|
+
from huggingface_hub.inference._providers._common import BaseConversationalTask
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class OpenAIConversationalTask(BaseConversationalTask):
|
|
8
|
+
def __init__(self):
|
|
9
|
+
super().__init__(provider="openai", base_url="https://api.openai.com")
|
|
10
|
+
|
|
11
|
+
def _prepare_api_key(self, api_key: Optional[str]) -> str:
|
|
12
|
+
if api_key is None:
|
|
13
|
+
raise ValueError("You must provide an api_key to work with OpenAI API.")
|
|
14
|
+
if api_key.startswith("hf_"):
|
|
15
|
+
raise ValueError(
|
|
16
|
+
"OpenAI provider is not available through Hugging Face routing, please use your own OpenAI API key."
|
|
17
|
+
)
|
|
18
|
+
return api_key
|
|
19
|
+
|
|
20
|
+
def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:
|
|
21
|
+
if model is None:
|
|
22
|
+
raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.")
|
|
23
|
+
return InferenceProviderMapping(
|
|
24
|
+
provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model
|
|
25
|
+
)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
2
|
|
|
3
|
-
from huggingface_hub.
|
|
3
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict, _as_url
|
|
4
5
|
from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none
|
|
5
6
|
from huggingface_hub.utils import get_session
|
|
6
7
|
|
|
@@ -13,24 +14,27 @@ class ReplicateTask(TaskProviderHelper):
|
|
|
13
14
|
def __init__(self, task: str):
|
|
14
15
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)
|
|
15
16
|
|
|
16
|
-
def _prepare_headers(self, headers:
|
|
17
|
+
def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
|
|
17
18
|
headers = super()._prepare_headers(headers, api_key)
|
|
18
19
|
headers["Prefer"] = "wait"
|
|
19
20
|
return headers
|
|
20
21
|
|
|
21
|
-
def _prepare_route(self, mapped_model: str) -> str:
|
|
22
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
22
23
|
if ":" in mapped_model:
|
|
23
24
|
return "/v1/predictions"
|
|
24
25
|
return f"/v1/models/{mapped_model}/predictions"
|
|
25
26
|
|
|
26
|
-
def _prepare_payload_as_dict(
|
|
27
|
-
|
|
27
|
+
def _prepare_payload_as_dict(
|
|
28
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
29
|
+
) -> Optional[dict]:
|
|
30
|
+
mapped_model = provider_mapping_info.provider_id
|
|
31
|
+
payload: dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}
|
|
28
32
|
if ":" in mapped_model:
|
|
29
33
|
version = mapped_model.split(":", 1)[1]
|
|
30
34
|
payload["version"] = version
|
|
31
35
|
return payload
|
|
32
36
|
|
|
33
|
-
def get_response(self, response: Union[bytes,
|
|
37
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
34
38
|
response_dict = _as_dict(response)
|
|
35
39
|
if response_dict.get("output") is None:
|
|
36
40
|
raise TimeoutError(
|
|
@@ -43,11 +47,44 @@ class ReplicateTask(TaskProviderHelper):
|
|
|
43
47
|
return get_session().get(output_url).content
|
|
44
48
|
|
|
45
49
|
|
|
50
|
+
class ReplicateTextToImageTask(ReplicateTask):
|
|
51
|
+
def __init__(self):
|
|
52
|
+
super().__init__("text-to-image")
|
|
53
|
+
|
|
54
|
+
def _prepare_payload_as_dict(
|
|
55
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
56
|
+
) -> Optional[dict]:
|
|
57
|
+
payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
|
|
58
|
+
if provider_mapping_info.adapter_weights_path is not None:
|
|
59
|
+
payload["input"]["lora_weights"] = f"https://huggingface.co/{provider_mapping_info.hf_model_id}"
|
|
60
|
+
return payload
|
|
61
|
+
|
|
62
|
+
|
|
46
63
|
class ReplicateTextToSpeechTask(ReplicateTask):
|
|
47
64
|
def __init__(self):
|
|
48
65
|
super().__init__("text-to-speech")
|
|
49
66
|
|
|
50
|
-
def _prepare_payload_as_dict(
|
|
51
|
-
|
|
67
|
+
def _prepare_payload_as_dict(
|
|
68
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
69
|
+
) -> Optional[dict]:
|
|
70
|
+
payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
|
|
52
71
|
payload["input"]["text"] = payload["input"].pop("prompt") # rename "prompt" to "text" for TTS
|
|
53
72
|
return payload
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ReplicateImageToImageTask(ReplicateTask):
|
|
76
|
+
def __init__(self):
|
|
77
|
+
super().__init__("image-to-image")
|
|
78
|
+
|
|
79
|
+
def _prepare_payload_as_dict(
|
|
80
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
81
|
+
) -> Optional[dict]:
|
|
82
|
+
image_url = _as_url(inputs, default_mime_type="image/jpeg")
|
|
83
|
+
|
|
84
|
+
payload: dict[str, Any] = {"input": {"input_image": image_url, **filter_none(parameters)}}
|
|
85
|
+
|
|
86
|
+
mapped_model = provider_mapping_info.provider_id
|
|
87
|
+
if ":" in mapped_model:
|
|
88
|
+
version = mapped_model.split(":", 1)[1]
|
|
89
|
+
payload["version"] = version
|
|
90
|
+
return payload
|
|
@@ -1,6 +1,42 @@
|
|
|
1
|
-
from
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
5
|
+
from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none
|
|
2
6
|
|
|
3
7
|
|
|
4
8
|
class SambanovaConversationalTask(BaseConversationalTask):
|
|
5
9
|
def __init__(self):
|
|
6
10
|
super().__init__(provider="sambanova", base_url="https://api.sambanova.ai")
|
|
11
|
+
|
|
12
|
+
def _prepare_payload_as_dict(
|
|
13
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
14
|
+
) -> Optional[dict]:
|
|
15
|
+
response_format_config = parameters.get("response_format")
|
|
16
|
+
if isinstance(response_format_config, dict):
|
|
17
|
+
if response_format_config.get("type") == "json_schema":
|
|
18
|
+
json_schema_config = response_format_config.get("json_schema", {})
|
|
19
|
+
strict = json_schema_config.get("strict")
|
|
20
|
+
if isinstance(json_schema_config, dict) and (strict is True or strict is None):
|
|
21
|
+
json_schema_config["strict"] = False
|
|
22
|
+
|
|
23
|
+
payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
|
|
24
|
+
return payload
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class SambanovaFeatureExtractionTask(TaskProviderHelper):
|
|
28
|
+
def __init__(self):
|
|
29
|
+
super().__init__(provider="sambanova", base_url="https://api.sambanova.ai", task="feature-extraction")
|
|
30
|
+
|
|
31
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
32
|
+
return "/v1/embeddings"
|
|
33
|
+
|
|
34
|
+
def _prepare_payload_as_dict(
|
|
35
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
36
|
+
) -> Optional[dict]:
|
|
37
|
+
parameters = filter_none(parameters)
|
|
38
|
+
return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
|
|
39
|
+
|
|
40
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
41
|
+
embeddings = _as_dict(response)["data"]
|
|
42
|
+
return [embedding["embedding"] for embedding in embeddings]
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Union
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
4
|
+
|
|
5
|
+
from ._common import BaseConversationalTask, InferenceProviderMapping, TaskProviderHelper, filter_none
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ScalewayConversationalTask(BaseConversationalTask):
|
|
9
|
+
def __init__(self):
|
|
10
|
+
super().__init__(provider="scaleway", base_url="https://api.scaleway.ai")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ScalewayFeatureExtractionTask(TaskProviderHelper):
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__(provider="scaleway", base_url="https://api.scaleway.ai", task="feature-extraction")
|
|
16
|
+
|
|
17
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
18
|
+
return "/v1/embeddings"
|
|
19
|
+
|
|
20
|
+
def _prepare_payload_as_dict(
|
|
21
|
+
self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
|
|
22
|
+
) -> Optional[Dict]:
|
|
23
|
+
parameters = filter_none(parameters)
|
|
24
|
+
return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
|
|
25
|
+
|
|
26
|
+
def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
27
|
+
embeddings = _as_dict(response)["data"]
|
|
28
|
+
return [embedding["embedding"] for embedding in embeddings]
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import base64
|
|
2
2
|
from abc import ABC
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional, Union
|
|
4
4
|
|
|
5
|
-
from huggingface_hub.
|
|
5
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
6
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
6
7
|
from huggingface_hub.inference._providers._common import (
|
|
7
8
|
BaseConversationalTask,
|
|
8
9
|
BaseTextGenerationTask,
|
|
@@ -21,7 +22,7 @@ class TogetherTask(TaskProviderHelper, ABC):
|
|
|
21
22
|
def __init__(self, task: str):
|
|
22
23
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)
|
|
23
24
|
|
|
24
|
-
def _prepare_route(self, mapped_model: str) -> str:
|
|
25
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
25
26
|
if self.task == "text-to-image":
|
|
26
27
|
return "/v1/images/generations"
|
|
27
28
|
elif self.task == "conversational":
|
|
@@ -35,17 +36,45 @@ class TogetherTextGenerationTask(BaseTextGenerationTask):
|
|
|
35
36
|
def __init__(self):
|
|
36
37
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
37
38
|
|
|
39
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
40
|
+
output = _as_dict(response)["choices"][0]
|
|
41
|
+
return {
|
|
42
|
+
"generated_text": output["text"],
|
|
43
|
+
"details": {
|
|
44
|
+
"finish_reason": output.get("finish_reason"),
|
|
45
|
+
"seed": output.get("seed"),
|
|
46
|
+
},
|
|
47
|
+
}
|
|
48
|
+
|
|
38
49
|
|
|
39
50
|
class TogetherConversationalTask(BaseConversationalTask):
|
|
40
51
|
def __init__(self):
|
|
41
52
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
42
53
|
|
|
54
|
+
def _prepare_payload_as_dict(
|
|
55
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
56
|
+
) -> Optional[dict]:
|
|
57
|
+
payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
|
|
58
|
+
response_format = parameters.get("response_format")
|
|
59
|
+
if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
|
|
60
|
+
json_schema_details = response_format.get("json_schema")
|
|
61
|
+
if isinstance(json_schema_details, dict) and "schema" in json_schema_details:
|
|
62
|
+
payload["response_format"] = { # type: ignore [index]
|
|
63
|
+
"type": "json_object",
|
|
64
|
+
"schema": json_schema_details["schema"],
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
return payload
|
|
68
|
+
|
|
43
69
|
|
|
44
70
|
class TogetherTextToImageTask(TogetherTask):
|
|
45
71
|
def __init__(self):
|
|
46
72
|
super().__init__("text-to-image")
|
|
47
73
|
|
|
48
|
-
def _prepare_payload_as_dict(
|
|
74
|
+
def _prepare_payload_as_dict(
|
|
75
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
76
|
+
) -> Optional[dict]:
|
|
77
|
+
mapped_model = provider_mapping_info.provider_id
|
|
49
78
|
parameters = filter_none(parameters)
|
|
50
79
|
if "num_inference_steps" in parameters:
|
|
51
80
|
parameters["steps"] = parameters.pop("num_inference_steps")
|
|
@@ -54,6 +83,6 @@ class TogetherTextToImageTask(TogetherTask):
|
|
|
54
83
|
|
|
55
84
|
return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model}
|
|
56
85
|
|
|
57
|
-
def get_response(self, response: Union[bytes,
|
|
86
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
58
87
|
response_dict = _as_dict(response)
|
|
59
88
|
return base64.b64decode(response_dict["data"][0]["b64_json"])
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import time
|
|
3
|
+
from abc import ABC
|
|
4
|
+
from typing import Any, Optional, Union
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
|
|
7
|
+
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
8
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
9
|
+
from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none
|
|
10
|
+
from huggingface_hub.utils import get_session, hf_raise_for_status
|
|
11
|
+
from huggingface_hub.utils.logging import get_logger
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
logger = get_logger(__name__)
|
|
15
|
+
|
|
16
|
+
# Polling interval (in seconds)
|
|
17
|
+
_POLLING_INTERVAL = 0.5
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class WavespeedAITask(TaskProviderHelper, ABC):
|
|
21
|
+
def __init__(self, task: str):
|
|
22
|
+
super().__init__(provider="wavespeed", base_url="https://api.wavespeed.ai", task=task)
|
|
23
|
+
|
|
24
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
25
|
+
return f"/api/v3/{mapped_model}"
|
|
26
|
+
|
|
27
|
+
def get_response(
|
|
28
|
+
self,
|
|
29
|
+
response: Union[bytes, dict],
|
|
30
|
+
request_params: Optional[RequestParameters] = None,
|
|
31
|
+
) -> Any:
|
|
32
|
+
response_dict = _as_dict(response)
|
|
33
|
+
data = response_dict.get("data", {})
|
|
34
|
+
result_path = data.get("urls", {}).get("get")
|
|
35
|
+
|
|
36
|
+
if not result_path:
|
|
37
|
+
raise ValueError("No result URL found in the response")
|
|
38
|
+
if request_params is None:
|
|
39
|
+
raise ValueError("A `RequestParameters` object should be provided to get responses with WaveSpeed AI.")
|
|
40
|
+
|
|
41
|
+
# Parse the request URL to determine base URL
|
|
42
|
+
parsed_url = urlparse(request_params.url)
|
|
43
|
+
# Add /wavespeed to base URL if going through HF router
|
|
44
|
+
if parsed_url.netloc == "router.huggingface.co":
|
|
45
|
+
base_url = f"{parsed_url.scheme}://{parsed_url.netloc}/wavespeed"
|
|
46
|
+
else:
|
|
47
|
+
base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
|
48
|
+
|
|
49
|
+
# Extract path from result_path URL
|
|
50
|
+
if isinstance(result_path, str):
|
|
51
|
+
result_url_path = urlparse(result_path).path
|
|
52
|
+
else:
|
|
53
|
+
result_url_path = result_path
|
|
54
|
+
|
|
55
|
+
result_url = f"{base_url}{result_url_path}"
|
|
56
|
+
|
|
57
|
+
logger.info("Processing request, polling for results...")
|
|
58
|
+
|
|
59
|
+
# Poll until task is completed
|
|
60
|
+
while True:
|
|
61
|
+
time.sleep(_POLLING_INTERVAL)
|
|
62
|
+
result_response = get_session().get(result_url, headers=request_params.headers)
|
|
63
|
+
hf_raise_for_status(result_response)
|
|
64
|
+
|
|
65
|
+
result = result_response.json()
|
|
66
|
+
task_result = result.get("data", {})
|
|
67
|
+
status = task_result.get("status")
|
|
68
|
+
|
|
69
|
+
if status == "completed":
|
|
70
|
+
# Get content from the first output URL
|
|
71
|
+
if not task_result.get("outputs") or len(task_result["outputs"]) == 0:
|
|
72
|
+
raise ValueError("No output URL in completed response")
|
|
73
|
+
|
|
74
|
+
output_url = task_result["outputs"][0]
|
|
75
|
+
return get_session().get(output_url).content
|
|
76
|
+
elif status == "failed":
|
|
77
|
+
error_msg = task_result.get("error", "Task failed with no specific error message")
|
|
78
|
+
raise ValueError(f"WaveSpeed AI task failed: {error_msg}")
|
|
79
|
+
elif status in ["processing", "created"]:
|
|
80
|
+
continue
|
|
81
|
+
else:
|
|
82
|
+
raise ValueError(f"Unknown status: {status}")
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class WavespeedAITextToImageTask(WavespeedAITask):
|
|
86
|
+
def __init__(self):
|
|
87
|
+
super().__init__("text-to-image")
|
|
88
|
+
|
|
89
|
+
def _prepare_payload_as_dict(
|
|
90
|
+
self,
|
|
91
|
+
inputs: Any,
|
|
92
|
+
parameters: dict,
|
|
93
|
+
provider_mapping_info: InferenceProviderMapping,
|
|
94
|
+
) -> Optional[dict]:
|
|
95
|
+
return {"prompt": inputs, **filter_none(parameters)}
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class WavespeedAITextToVideoTask(WavespeedAITextToImageTask):
|
|
99
|
+
def __init__(self):
|
|
100
|
+
WavespeedAITask.__init__(self, "text-to-video")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class WavespeedAIImageToImageTask(WavespeedAITask):
|
|
104
|
+
def __init__(self):
|
|
105
|
+
super().__init__("image-to-image")
|
|
106
|
+
|
|
107
|
+
def _prepare_payload_as_dict(
|
|
108
|
+
self,
|
|
109
|
+
inputs: Any,
|
|
110
|
+
parameters: dict,
|
|
111
|
+
provider_mapping_info: InferenceProviderMapping,
|
|
112
|
+
) -> Optional[dict]:
|
|
113
|
+
# Convert inputs to image (URL or base64)
|
|
114
|
+
if isinstance(inputs, str) and inputs.startswith(("http://", "https://")):
|
|
115
|
+
image = inputs
|
|
116
|
+
elif isinstance(inputs, str):
|
|
117
|
+
# If input is a file path, read it first
|
|
118
|
+
with open(inputs, "rb") as f:
|
|
119
|
+
file_content = f.read()
|
|
120
|
+
image_b64 = base64.b64encode(file_content).decode("utf-8")
|
|
121
|
+
image = f"data:image/jpeg;base64,{image_b64}"
|
|
122
|
+
else:
|
|
123
|
+
# If input is binary data
|
|
124
|
+
image_b64 = base64.b64encode(inputs).decode("utf-8")
|
|
125
|
+
image = f"data:image/jpeg;base64,{image_b64}"
|
|
126
|
+
|
|
127
|
+
# Extract prompt from parameters if present
|
|
128
|
+
prompt = parameters.pop("prompt", None)
|
|
129
|
+
payload = {"image": image, **filter_none(parameters)}
|
|
130
|
+
if prompt is not None:
|
|
131
|
+
payload["prompt"] = prompt
|
|
132
|
+
|
|
133
|
+
return payload
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class WavespeedAIImageToVideoTask(WavespeedAIImageToImageTask):
|
|
137
|
+
def __init__(self):
|
|
138
|
+
WavespeedAITask.__init__(self, "image-to-video")
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from typing import Any, Dict
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.inference._providers._common import BaseConversationalTask
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ZaiConversationalTask(BaseConversationalTask):
|
|
7
|
+
def __init__(self):
|
|
8
|
+
super().__init__(provider="zai-org", base_url="https://api.z.ai")
|
|
9
|
+
|
|
10
|
+
def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
|
|
11
|
+
headers = super()._prepare_headers(headers, api_key)
|
|
12
|
+
headers["Accept-Language"] = "en-US,en"
|
|
13
|
+
headers["x-source-channel"] = "hugging_face"
|
|
14
|
+
return headers
|
|
15
|
+
|
|
16
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
17
|
+
return "/api/paas/v4/chat/completions"
|