huggingface-hub 0.35.1__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (127) hide show
  1. huggingface_hub/__init__.py +28 -45
  2. huggingface_hub/_commit_api.py +28 -28
  3. huggingface_hub/_commit_scheduler.py +11 -8
  4. huggingface_hub/_inference_endpoints.py +8 -8
  5. huggingface_hub/_jobs_api.py +20 -20
  6. huggingface_hub/_login.py +13 -39
  7. huggingface_hub/_oauth.py +8 -8
  8. huggingface_hub/_snapshot_download.py +14 -28
  9. huggingface_hub/_space_api.py +4 -4
  10. huggingface_hub/_tensorboard_logger.py +5 -5
  11. huggingface_hub/_upload_large_folder.py +15 -15
  12. huggingface_hub/_webhooks_payload.py +3 -3
  13. huggingface_hub/_webhooks_server.py +2 -2
  14. huggingface_hub/cli/__init__.py +0 -14
  15. huggingface_hub/cli/_cli_utils.py +80 -3
  16. huggingface_hub/cli/auth.py +104 -150
  17. huggingface_hub/cli/cache.py +102 -126
  18. huggingface_hub/cli/download.py +93 -110
  19. huggingface_hub/cli/hf.py +37 -41
  20. huggingface_hub/cli/jobs.py +689 -1017
  21. huggingface_hub/cli/lfs.py +120 -143
  22. huggingface_hub/cli/repo.py +158 -216
  23. huggingface_hub/cli/repo_files.py +50 -84
  24. huggingface_hub/cli/system.py +6 -25
  25. huggingface_hub/cli/upload.py +198 -212
  26. huggingface_hub/cli/upload_large_folder.py +90 -105
  27. huggingface_hub/commands/_cli_utils.py +2 -2
  28. huggingface_hub/commands/delete_cache.py +11 -11
  29. huggingface_hub/commands/download.py +4 -13
  30. huggingface_hub/commands/lfs.py +4 -4
  31. huggingface_hub/commands/repo_files.py +2 -2
  32. huggingface_hub/commands/tag.py +1 -3
  33. huggingface_hub/commands/upload.py +4 -4
  34. huggingface_hub/commands/upload_large_folder.py +3 -3
  35. huggingface_hub/commands/user.py +4 -5
  36. huggingface_hub/community.py +5 -5
  37. huggingface_hub/constants.py +3 -41
  38. huggingface_hub/dataclasses.py +16 -22
  39. huggingface_hub/errors.py +43 -30
  40. huggingface_hub/fastai_utils.py +8 -9
  41. huggingface_hub/file_download.py +154 -253
  42. huggingface_hub/hf_api.py +329 -558
  43. huggingface_hub/hf_file_system.py +104 -62
  44. huggingface_hub/hub_mixin.py +32 -54
  45. huggingface_hub/inference/_client.py +178 -163
  46. huggingface_hub/inference/_common.py +38 -54
  47. huggingface_hub/inference/_generated/_async_client.py +219 -259
  48. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
  49. huggingface_hub/inference/_generated/types/base.py +10 -7
  50. huggingface_hub/inference/_generated/types/chat_completion.py +16 -16
  51. huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
  52. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
  53. huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
  54. huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
  55. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
  56. huggingface_hub/inference/_generated/types/summarization.py +2 -2
  57. huggingface_hub/inference/_generated/types/table_question_answering.py +4 -4
  58. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
  59. huggingface_hub/inference/_generated/types/text_generation.py +10 -10
  60. huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
  61. huggingface_hub/inference/_generated/types/token_classification.py +2 -2
  62. huggingface_hub/inference/_generated/types/translation.py +2 -2
  63. huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
  64. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
  65. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
  66. huggingface_hub/inference/_mcp/agent.py +3 -3
  67. huggingface_hub/inference/_mcp/constants.py +1 -2
  68. huggingface_hub/inference/_mcp/mcp_client.py +33 -22
  69. huggingface_hub/inference/_mcp/types.py +10 -10
  70. huggingface_hub/inference/_mcp/utils.py +4 -4
  71. huggingface_hub/inference/_providers/__init__.py +2 -13
  72. huggingface_hub/inference/_providers/_common.py +24 -25
  73. huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
  74. huggingface_hub/inference/_providers/cohere.py +3 -3
  75. huggingface_hub/inference/_providers/fal_ai.py +25 -25
  76. huggingface_hub/inference/_providers/featherless_ai.py +4 -4
  77. huggingface_hub/inference/_providers/fireworks_ai.py +3 -3
  78. huggingface_hub/inference/_providers/hf_inference.py +13 -13
  79. huggingface_hub/inference/_providers/hyperbolic.py +4 -4
  80. huggingface_hub/inference/_providers/nebius.py +10 -10
  81. huggingface_hub/inference/_providers/novita.py +5 -5
  82. huggingface_hub/inference/_providers/nscale.py +4 -4
  83. huggingface_hub/inference/_providers/replicate.py +15 -15
  84. huggingface_hub/inference/_providers/sambanova.py +6 -6
  85. huggingface_hub/inference/_providers/together.py +7 -7
  86. huggingface_hub/lfs.py +24 -33
  87. huggingface_hub/repocard.py +16 -17
  88. huggingface_hub/repocard_data.py +56 -56
  89. huggingface_hub/serialization/__init__.py +0 -1
  90. huggingface_hub/serialization/_base.py +9 -9
  91. huggingface_hub/serialization/_dduf.py +7 -7
  92. huggingface_hub/serialization/_torch.py +28 -28
  93. huggingface_hub/utils/__init__.py +10 -4
  94. huggingface_hub/utils/_auth.py +5 -5
  95. huggingface_hub/utils/_cache_manager.py +31 -31
  96. huggingface_hub/utils/_deprecation.py +1 -1
  97. huggingface_hub/utils/_dotenv.py +3 -3
  98. huggingface_hub/utils/_fixes.py +0 -10
  99. huggingface_hub/utils/_git_credential.py +3 -3
  100. huggingface_hub/utils/_headers.py +7 -29
  101. huggingface_hub/utils/_http.py +369 -209
  102. huggingface_hub/utils/_pagination.py +4 -4
  103. huggingface_hub/utils/_paths.py +5 -5
  104. huggingface_hub/utils/_runtime.py +15 -13
  105. huggingface_hub/utils/_safetensors.py +21 -21
  106. huggingface_hub/utils/_subprocess.py +9 -9
  107. huggingface_hub/utils/_telemetry.py +3 -3
  108. huggingface_hub/utils/_typing.py +3 -3
  109. huggingface_hub/utils/_validators.py +53 -72
  110. huggingface_hub/utils/_xet.py +16 -16
  111. huggingface_hub/utils/_xet_progress_reporting.py +1 -1
  112. huggingface_hub/utils/insecure_hashlib.py +3 -9
  113. huggingface_hub/utils/tqdm.py +3 -3
  114. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/METADATA +17 -26
  115. huggingface_hub-1.0.0rc1.dist-info/RECORD +161 -0
  116. huggingface_hub/inference/_providers/publicai.py +0 -6
  117. huggingface_hub/inference/_providers/scaleway.py +0 -28
  118. huggingface_hub/inference_api.py +0 -217
  119. huggingface_hub/keras_mixin.py +0 -500
  120. huggingface_hub/repository.py +0 -1477
  121. huggingface_hub/serialization/_tensorflow.py +0 -95
  122. huggingface_hub/utils/_hf_folder.py +0 -68
  123. huggingface_hub-0.35.1.dist-info/RECORD +0 -168
  124. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/LICENSE +0 -0
  125. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/WHEEL +0 -0
  126. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/entry_points.txt +0 -0
  127. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -3,9 +3,9 @@ import logging
3
3
  from contextlib import AsyncExitStack
4
4
  from datetime import timedelta
5
5
  from pathlib import Path
6
- from typing import TYPE_CHECKING, Any, AsyncIterable, Dict, List, Literal, Optional, Union, overload
6
+ from typing import TYPE_CHECKING, Any, AsyncIterable, Literal, Optional, TypedDict, Union, overload
7
7
 
8
- from typing_extensions import NotRequired, TypeAlias, TypedDict, Unpack
8
+ from typing_extensions import NotRequired, TypeAlias, Unpack
9
9
 
10
10
  from ...utils._runtime import get_hf_hub_version
11
11
  from .._generated._async_client import AsyncInferenceClient
@@ -32,14 +32,14 @@ ServerType: TypeAlias = Literal["stdio", "sse", "http"]
32
32
 
33
33
  class StdioServerParameters_T(TypedDict):
34
34
  command: str
35
- args: NotRequired[List[str]]
36
- env: NotRequired[Dict[str, str]]
35
+ args: NotRequired[list[str]]
36
+ env: NotRequired[dict[str, str]]
37
37
  cwd: NotRequired[Union[str, Path, None]]
38
38
 
39
39
 
40
40
  class SSEServerParameters_T(TypedDict):
41
41
  url: str
42
- headers: NotRequired[Dict[str, Any]]
42
+ headers: NotRequired[dict[str, Any]]
43
43
  timeout: NotRequired[float]
44
44
  sse_read_timeout: NotRequired[float]
45
45
 
@@ -84,9 +84,9 @@ class MCPClient:
84
84
  api_key: Optional[str] = None,
85
85
  ):
86
86
  # Initialize MCP sessions as a dictionary of ClientSession objects
87
- self.sessions: Dict[ToolName, "ClientSession"] = {}
87
+ self.sessions: dict[ToolName, "ClientSession"] = {}
88
88
  self.exit_stack = AsyncExitStack()
89
- self.available_tools: List[ChatCompletionInputTool] = []
89
+ self.available_tools: list[ChatCompletionInputTool] = []
90
90
  # To be able to send the model in the payload if `base_url` is provided
91
91
  if model is None and base_url is None:
92
92
  raise ValueError("At least one of `model` or `base_url` should be set in `MCPClient`.")
@@ -132,27 +132,27 @@ class MCPClient:
132
132
  - "stdio": Standard input/output server (local)
133
133
  - "sse": Server-sent events (SSE) server
134
134
  - "http": StreamableHTTP server
135
- **params (`Dict[str, Any]`):
135
+ **params (`dict[str, Any]`):
136
136
  Server parameters that can be either:
137
137
  - For stdio servers:
138
138
  - command (str): The command to run the MCP server
139
- - args (List[str], optional): Arguments for the command
140
- - env (Dict[str, str], optional): Environment variables for the command
139
+ - args (list[str], optional): Arguments for the command
140
+ - env (dict[str, str], optional): Environment variables for the command
141
141
  - cwd (Union[str, Path, None], optional): Working directory for the command
142
- - allowed_tools (List[str], optional): List of tool names to allow from this server
142
+ - allowed_tools (list[str], optional): List of tool names to allow from this server
143
143
  - For SSE servers:
144
144
  - url (str): The URL of the SSE server
145
- - headers (Dict[str, Any], optional): Headers for the SSE connection
145
+ - headers (dict[str, Any], optional): Headers for the SSE connection
146
146
  - timeout (float, optional): Connection timeout
147
147
  - sse_read_timeout (float, optional): SSE read timeout
148
- - allowed_tools (List[str], optional): List of tool names to allow from this server
148
+ - allowed_tools (list[str], optional): List of tool names to allow from this server
149
149
  - For StreamableHTTP servers:
150
150
  - url (str): The URL of the StreamableHTTP server
151
- - headers (Dict[str, Any], optional): Headers for the StreamableHTTP connection
151
+ - headers (dict[str, Any], optional): Headers for the StreamableHTTP connection
152
152
  - timeout (timedelta, optional): Connection timeout
153
153
  - sse_read_timeout (timedelta, optional): SSE read timeout
154
154
  - terminate_on_close (bool, optional): Whether to terminate on close
155
- - allowed_tools (List[str], optional): List of tool names to allow from this server
155
+ - allowed_tools (list[str], optional): List of tool names to allow from this server
156
156
  """
157
157
  from mcp import ClientSession, StdioServerParameters
158
158
  from mcp import types as mcp_types
@@ -249,16 +249,16 @@ class MCPClient:
249
249
 
250
250
  async def process_single_turn_with_tools(
251
251
  self,
252
- messages: List[Union[Dict, ChatCompletionInputMessage]],
253
- exit_loop_tools: Optional[List[ChatCompletionInputTool]] = None,
252
+ messages: list[Union[dict, ChatCompletionInputMessage]],
253
+ exit_loop_tools: Optional[list[ChatCompletionInputTool]] = None,
254
254
  exit_if_first_chunk_no_tool: bool = False,
255
255
  ) -> AsyncIterable[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage]]:
256
256
  """Process a query using `self.model` and available tools, yielding chunks and tool outputs.
257
257
 
258
258
  Args:
259
- messages (`List[Dict]`):
259
+ messages (`list[dict]`):
260
260
  List of message objects representing the conversation history
261
- exit_loop_tools (`List[ChatCompletionInputTool]`, *optional*):
261
+ exit_loop_tools (`list[ChatCompletionInputTool]`, *optional*):
262
262
  List of tools that should exit the generator when called
263
263
  exit_if_first_chunk_no_tool (`bool`, *optional*):
264
264
  Exit if no tool is present in the first chunks. Default to False.
@@ -280,8 +280,8 @@ class MCPClient:
280
280
  stream=True,
281
281
  )
282
282
 
283
- message: Dict[str, Any] = {"role": "unknown", "content": ""}
284
- final_tool_calls: Dict[int, ChatCompletionStreamOutputDeltaToolCall] = {}
283
+ message: dict[str, Any] = {"role": "unknown", "content": ""}
284
+ final_tool_calls: dict[int, ChatCompletionStreamOutputDeltaToolCall] = {}
285
285
  num_of_chunks = 0
286
286
 
287
287
  # Read from stream
@@ -328,7 +328,7 @@ class MCPClient:
328
328
  message["role"] = "assistant"
329
329
  # Convert final_tool_calls to the format expected by OpenAI
330
330
  if final_tool_calls:
331
- tool_calls_list: List[Dict[str, Any]] = []
331
+ tool_calls_list: list[dict[str, Any]] = []
332
332
  for tc in final_tool_calls.values():
333
333
  tool_calls_list.append(
334
334
  {
@@ -346,6 +346,17 @@ class MCPClient:
346
346
  # Process tool calls one by one
347
347
  for tool_call in final_tool_calls.values():
348
348
  function_name = tool_call.function.name
349
+ if function_name is None:
350
+ message = ChatCompletionInputMessage.parse_obj_as_instance(
351
+ {
352
+ "role": "tool",
353
+ "tool_call_id": tool_call.id,
354
+ "content": "Invalid tool call with no function name.",
355
+ }
356
+ )
357
+ messages.append(message)
358
+ yield message
359
+ continue # move to next tool call
349
360
  try:
350
361
  function_args = json.loads(tool_call.function.arguments or "{}")
351
362
  except json.JSONDecodeError as err:
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Literal, TypedDict, Union
1
+ from typing import Literal, TypedDict, Union
2
2
 
3
3
  from typing_extensions import NotRequired
4
4
 
@@ -13,24 +13,24 @@ class InputConfig(TypedDict, total=False):
13
13
  class StdioServerConfig(TypedDict):
14
14
  type: Literal["stdio"]
15
15
  command: str
16
- args: List[str]
17
- env: Dict[str, str]
16
+ args: list[str]
17
+ env: dict[str, str]
18
18
  cwd: str
19
- allowed_tools: NotRequired[List[str]]
19
+ allowed_tools: NotRequired[list[str]]
20
20
 
21
21
 
22
22
  class HTTPServerConfig(TypedDict):
23
23
  type: Literal["http"]
24
24
  url: str
25
- headers: Dict[str, str]
26
- allowed_tools: NotRequired[List[str]]
25
+ headers: dict[str, str]
26
+ allowed_tools: NotRequired[list[str]]
27
27
 
28
28
 
29
29
  class SSEServerConfig(TypedDict):
30
30
  type: Literal["sse"]
31
31
  url: str
32
- headers: Dict[str, str]
33
- allowed_tools: NotRequired[List[str]]
32
+ headers: dict[str, str]
33
+ allowed_tools: NotRequired[list[str]]
34
34
 
35
35
 
36
36
  ServerConfig = Union[StdioServerConfig, HTTPServerConfig, SSEServerConfig]
@@ -41,5 +41,5 @@ class AgentConfig(TypedDict):
41
41
  model: str
42
42
  provider: str
43
43
  apiKey: NotRequired[str]
44
- inputs: List[InputConfig]
45
- servers: List[ServerConfig]
44
+ inputs: list[InputConfig]
45
+ servers: list[ServerConfig]
@@ -6,7 +6,7 @@ Formatting utilities taken from the JS SDK: https://github.com/huggingface/huggi
6
6
 
7
7
  import json
8
8
  from pathlib import Path
9
- from typing import TYPE_CHECKING, List, Optional, Tuple
9
+ from typing import TYPE_CHECKING, Optional
10
10
 
11
11
  from huggingface_hub import snapshot_download
12
12
  from huggingface_hub.errors import EntryNotFoundError
@@ -36,7 +36,7 @@ def format_result(result: "mcp_types.CallToolResult") -> str:
36
36
  if len(content) == 0:
37
37
  return "[No content]"
38
38
 
39
- formatted_parts: List[str] = []
39
+ formatted_parts: list[str] = []
40
40
 
41
41
  for item in content:
42
42
  if item.type == "text":
@@ -84,10 +84,10 @@ def _get_base64_size(base64_str: str) -> int:
84
84
  return (len(base64_str) * 3) // 4 - padding
85
85
 
86
86
 
87
- def _load_agent_config(agent_path: Optional[str]) -> Tuple[AgentConfig, Optional[str]]:
87
+ def _load_agent_config(agent_path: Optional[str]) -> tuple[AgentConfig, Optional[str]]:
88
88
  """Load server config and prompt."""
89
89
 
90
- def _read_dir(directory: Path) -> Tuple[AgentConfig, Optional[str]]:
90
+ def _read_dir(directory: Path) -> tuple[AgentConfig, Optional[str]]:
91
91
  cfg_file = directory / FILENAME_CONFIG
92
92
  if not cfg_file.exists():
93
93
  raise FileNotFoundError(f" Config file not found in {directory}! Please make sure it exists locally")
@@ -1,4 +1,4 @@
1
- from typing import Dict, Literal, Optional, Union
1
+ from typing import Literal, Optional, Union
2
2
 
3
3
  from huggingface_hub.inference._providers.featherless_ai import (
4
4
  FeatherlessConversationalTask,
@@ -36,10 +36,8 @@ from .nebius import (
36
36
  from .novita import NovitaConversationalTask, NovitaTextGenerationTask, NovitaTextToVideoTask
37
37
  from .nscale import NscaleConversationalTask, NscaleTextToImageTask
38
38
  from .openai import OpenAIConversationalTask
39
- from .publicai import PublicAIConversationalTask
40
39
  from .replicate import ReplicateImageToImageTask, ReplicateTask, ReplicateTextToImageTask, ReplicateTextToSpeechTask
41
40
  from .sambanova import SambanovaConversationalTask, SambanovaFeatureExtractionTask
42
- from .scaleway import ScalewayConversationalTask, ScalewayFeatureExtractionTask
43
41
  from .together import TogetherConversationalTask, TogetherTextGenerationTask, TogetherTextToImageTask
44
42
 
45
43
 
@@ -60,16 +58,14 @@ PROVIDER_T = Literal[
60
58
  "novita",
61
59
  "nscale",
62
60
  "openai",
63
- "publicai",
64
61
  "replicate",
65
62
  "sambanova",
66
- "scaleway",
67
63
  "together",
68
64
  ]
69
65
 
70
66
  PROVIDER_OR_POLICY_T = Union[PROVIDER_T, Literal["auto"]]
71
67
 
72
- PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
68
+ PROVIDERS: dict[PROVIDER_T, dict[str, TaskProviderHelper]] = {
73
69
  "black-forest-labs": {
74
70
  "text-to-image": BlackForestLabsTextToImageTask(),
75
71
  },
@@ -148,9 +144,6 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
148
144
  "openai": {
149
145
  "conversational": OpenAIConversationalTask(),
150
146
  },
151
- "publicai": {
152
- "conversational": PublicAIConversationalTask(),
153
- },
154
147
  "replicate": {
155
148
  "image-to-image": ReplicateImageToImageTask(),
156
149
  "text-to-image": ReplicateTextToImageTask(),
@@ -161,10 +154,6 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
161
154
  "conversational": SambanovaConversationalTask(),
162
155
  "feature-extraction": SambanovaFeatureExtractionTask(),
163
156
  },
164
- "scaleway": {
165
- "conversational": ScalewayConversationalTask(),
166
- "feature-extraction": ScalewayFeatureExtractionTask(),
167
- },
168
157
  "together": {
169
158
  "text-to-image": TogetherTextToImageTask(),
170
159
  "conversational": TogetherConversationalTask(),
@@ -1,5 +1,5 @@
1
1
  from functools import lru_cache
2
- from typing import Any, Dict, List, Optional, Union, overload
2
+ from typing import Any, Optional, Union, overload
3
3
 
4
4
  from huggingface_hub import constants
5
5
  from huggingface_hub.hf_api import InferenceProviderMapping
@@ -14,7 +14,7 @@ logger = logging.get_logger(__name__)
14
14
  # Dev purposes only.
15
15
  # If you want to try to run inference for a new model locally before it's registered on huggingface.co
16
16
  # for a given Inference Provider, you can add it to the following dictionary.
17
- HARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]] = {
17
+ HARDCODED_MODEL_INFERENCE_MAPPING: dict[str, dict[str, InferenceProviderMapping]] = {
18
18
  # "HF model ID" => InferenceProviderMapping object initialized with "Model ID on Inference Provider's side"
19
19
  #
20
20
  # Example:
@@ -33,20 +33,19 @@ HARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]
33
33
  "nscale": {},
34
34
  "replicate": {},
35
35
  "sambanova": {},
36
- "scaleway": {},
37
36
  "together": {},
38
37
  }
39
38
 
40
39
 
41
40
  @overload
42
- def filter_none(obj: Dict[str, Any]) -> Dict[str, Any]: ...
41
+ def filter_none(obj: dict[str, Any]) -> dict[str, Any]: ...
43
42
  @overload
44
- def filter_none(obj: List[Any]) -> List[Any]: ...
43
+ def filter_none(obj: list[Any]) -> list[Any]: ...
45
44
 
46
45
 
47
- def filter_none(obj: Union[Dict[str, Any], List[Any]]) -> Union[Dict[str, Any], List[Any]]:
46
+ def filter_none(obj: Union[dict[str, Any], list[Any]]) -> Union[dict[str, Any], list[Any]]:
48
47
  if isinstance(obj, dict):
49
- cleaned: Dict[str, Any] = {}
48
+ cleaned: dict[str, Any] = {}
50
49
  for k, v in obj.items():
51
50
  if v is None:
52
51
  continue
@@ -73,11 +72,11 @@ class TaskProviderHelper:
73
72
  self,
74
73
  *,
75
74
  inputs: Any,
76
- parameters: Dict[str, Any],
77
- headers: Dict,
75
+ parameters: dict[str, Any],
76
+ headers: dict,
78
77
  model: Optional[str],
79
78
  api_key: Optional[str],
80
- extra_payload: Optional[Dict[str, Any]] = None,
79
+ extra_payload: Optional[dict[str, Any]] = None,
81
80
  ) -> RequestParameters:
82
81
  """
83
82
  Prepare the request to be sent to the provider.
@@ -124,7 +123,7 @@ class TaskProviderHelper:
124
123
 
125
124
  def get_response(
126
125
  self,
127
- response: Union[bytes, Dict],
126
+ response: Union[bytes, dict],
128
127
  request_params: Optional[RequestParameters] = None,
129
128
  ) -> Any:
130
129
  """
@@ -183,8 +182,8 @@ class TaskProviderHelper:
183
182
  return provider_mapping
184
183
 
185
184
  def _normalize_headers(
186
- self, headers: Dict[str, Any], payload: Optional[Dict[str, Any]], data: Optional[MimeBytes]
187
- ) -> Dict[str, Any]:
185
+ self, headers: dict[str, Any], payload: Optional[dict[str, Any]], data: Optional[MimeBytes]
186
+ ) -> dict[str, Any]:
188
187
  """Normalize the headers to use for the request.
189
188
 
190
189
  Override this method in subclasses for customized headers.
@@ -197,7 +196,7 @@ class TaskProviderHelper:
197
196
  normalized_headers["content-type"] = "application/json"
198
197
  return normalized_headers
199
198
 
200
- def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
199
+ def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
201
200
  """Return the headers to use for the request.
202
201
 
203
202
  Override this method in subclasses for customized headers.
@@ -232,8 +231,8 @@ class TaskProviderHelper:
232
231
  return ""
233
232
 
234
233
  def _prepare_payload_as_dict(
235
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
236
- ) -> Optional[Dict]:
234
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
235
+ ) -> Optional[dict]:
237
236
  """Return the payload to use for the request, as a dict.
238
237
 
239
238
  Override this method in subclasses for customized payloads.
@@ -244,9 +243,9 @@ class TaskProviderHelper:
244
243
  def _prepare_payload_as_bytes(
245
244
  self,
246
245
  inputs: Any,
247
- parameters: Dict,
246
+ parameters: dict,
248
247
  provider_mapping_info: InferenceProviderMapping,
249
- extra_payload: Optional[Dict],
248
+ extra_payload: Optional[dict],
250
249
  ) -> Optional[MimeBytes]:
251
250
  """Return the body to use for the request, as bytes.
252
251
 
@@ -270,10 +269,10 @@ class BaseConversationalTask(TaskProviderHelper):
270
269
 
271
270
  def _prepare_payload_as_dict(
272
271
  self,
273
- inputs: List[Union[Dict, ChatCompletionInputMessage]],
274
- parameters: Dict,
272
+ inputs: list[Union[dict, ChatCompletionInputMessage]],
273
+ parameters: dict,
275
274
  provider_mapping_info: InferenceProviderMapping,
276
- ) -> Optional[Dict]:
275
+ ) -> Optional[dict]:
277
276
  return filter_none({"messages": inputs, **parameters, "model": provider_mapping_info.provider_id})
278
277
 
279
278
 
@@ -290,13 +289,13 @@ class BaseTextGenerationTask(TaskProviderHelper):
290
289
  return "/v1/completions"
291
290
 
292
291
  def _prepare_payload_as_dict(
293
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
294
- ) -> Optional[Dict]:
292
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
293
+ ) -> Optional[dict]:
295
294
  return filter_none({"prompt": inputs, **parameters, "model": provider_mapping_info.provider_id})
296
295
 
297
296
 
298
297
  @lru_cache(maxsize=None)
299
- def _fetch_inference_provider_mapping(model: str) -> List["InferenceProviderMapping"]:
298
+ def _fetch_inference_provider_mapping(model: str) -> list["InferenceProviderMapping"]:
300
299
  """
301
300
  Fetch provider mappings for a model from the Hub.
302
301
  """
@@ -309,7 +308,7 @@ def _fetch_inference_provider_mapping(model: str) -> List["InferenceProviderMapp
309
308
  return provider_mapping
310
309
 
311
310
 
312
- def recursive_merge(dict1: Dict, dict2: Dict) -> Dict:
311
+ def recursive_merge(dict1: dict, dict2: dict) -> dict:
313
312
  return {
314
313
  **dict1,
315
314
  **{
@@ -1,5 +1,5 @@
1
1
  import time
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Optional, Union
3
3
 
4
4
  from huggingface_hub.hf_api import InferenceProviderMapping
5
5
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -18,7 +18,7 @@ class BlackForestLabsTextToImageTask(TaskProviderHelper):
18
18
  def __init__(self):
19
19
  super().__init__(provider="black-forest-labs", base_url="https://api.us1.bfl.ai", task="text-to-image")
20
20
 
21
- def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
21
+ def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
22
22
  headers = super()._prepare_headers(headers, api_key)
23
23
  if not api_key.startswith("hf_"):
24
24
  _ = headers.pop("authorization")
@@ -29,8 +29,8 @@ class BlackForestLabsTextToImageTask(TaskProviderHelper):
29
29
  return f"/v1/{mapped_model}"
30
30
 
31
31
  def _prepare_payload_as_dict(
32
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
33
- ) -> Optional[Dict]:
32
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
33
+ ) -> Optional[dict]:
34
34
  parameters = filter_none(parameters)
35
35
  if "num_inference_steps" in parameters:
36
36
  parameters["steps"] = parameters.pop("num_inference_steps")
@@ -39,7 +39,7 @@ class BlackForestLabsTextToImageTask(TaskProviderHelper):
39
39
 
40
40
  return {"prompt": inputs, **parameters}
41
41
 
42
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
42
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
43
43
  """
44
44
  Polling mechanism for Black Forest Labs since the API is asynchronous.
45
45
  """
@@ -50,7 +50,7 @@ class BlackForestLabsTextToImageTask(TaskProviderHelper):
50
50
 
51
51
  response = session.get(url, headers={"Content-Type": "application/json"}) # type: ignore
52
52
  response.raise_for_status() # type: ignore
53
- response_json: Dict = response.json() # type: ignore
53
+ response_json: dict = response.json() # type: ignore
54
54
  status = response_json.get("status")
55
55
  logger.info(
56
56
  f"Polling generation result from {url}. Current status: {status}. "
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional
1
+ from typing import Any, Optional
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
 
@@ -17,8 +17,8 @@ class CohereConversationalTask(BaseConversationalTask):
17
17
  return "/compatibility/v1/chat/completions"
18
18
 
19
19
  def _prepare_payload_as_dict(
20
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
21
- ) -> Optional[Dict]:
20
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
21
+ ) -> Optional[dict]:
22
22
  payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
23
23
  response_format = parameters.get("response_format")
24
24
  if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
@@ -1,7 +1,7 @@
1
1
  import base64
2
2
  import time
3
3
  from abc import ABC
4
- from typing import Any, Dict, Optional, Union
4
+ from typing import Any, Optional, Union
5
5
  from urllib.parse import urlparse
6
6
 
7
7
  from huggingface_hub import constants
@@ -22,7 +22,7 @@ class FalAITask(TaskProviderHelper, ABC):
22
22
  def __init__(self, task: str):
23
23
  super().__init__(provider="fal-ai", base_url="https://fal.run", task=task)
24
24
 
25
- def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
25
+ def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
26
26
  headers = super()._prepare_headers(headers, api_key)
27
27
  if not api_key.startswith("hf_"):
28
28
  headers["authorization"] = f"Key {api_key}"
@@ -36,7 +36,7 @@ class FalAIQueueTask(TaskProviderHelper, ABC):
36
36
  def __init__(self, task: str):
37
37
  super().__init__(provider="fal-ai", base_url="https://queue.fal.run", task=task)
38
38
 
39
- def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
39
+ def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
40
40
  headers = super()._prepare_headers(headers, api_key)
41
41
  if not api_key.startswith("hf_"):
42
42
  headers["authorization"] = f"Key {api_key}"
@@ -50,7 +50,7 @@ class FalAIQueueTask(TaskProviderHelper, ABC):
50
50
 
51
51
  def get_response(
52
52
  self,
53
- response: Union[bytes, Dict],
53
+ response: Union[bytes, dict],
54
54
  request_params: Optional[RequestParameters] = None,
55
55
  ) -> Any:
56
56
  response_dict = _as_dict(response)
@@ -91,8 +91,8 @@ class FalAIAutomaticSpeechRecognitionTask(FalAITask):
91
91
  super().__init__("automatic-speech-recognition")
92
92
 
93
93
  def _prepare_payload_as_dict(
94
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
95
- ) -> Optional[Dict]:
94
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
95
+ ) -> Optional[dict]:
96
96
  if isinstance(inputs, str) and inputs.startswith(("http://", "https://")):
97
97
  # If input is a URL, pass it directly
98
98
  audio_url = inputs
@@ -108,7 +108,7 @@ class FalAIAutomaticSpeechRecognitionTask(FalAITask):
108
108
 
109
109
  return {"audio_url": audio_url, **filter_none(parameters)}
110
110
 
111
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
111
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
112
112
  text = _as_dict(response)["text"]
113
113
  if not isinstance(text, str):
114
114
  raise ValueError(f"Unexpected output format from FalAI API. Expected string, got {type(text)}.")
@@ -120,9 +120,9 @@ class FalAITextToImageTask(FalAITask):
120
120
  super().__init__("text-to-image")
121
121
 
122
122
  def _prepare_payload_as_dict(
123
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
124
- ) -> Optional[Dict]:
125
- payload: Dict[str, Any] = {
123
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
124
+ ) -> Optional[dict]:
125
+ payload: dict[str, Any] = {
126
126
  "prompt": inputs,
127
127
  **filter_none(parameters),
128
128
  }
@@ -145,7 +145,7 @@ class FalAITextToImageTask(FalAITask):
145
145
 
146
146
  return payload
147
147
 
148
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
148
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
149
149
  url = _as_dict(response)["images"][0]["url"]
150
150
  return get_session().get(url).content
151
151
 
@@ -155,11 +155,11 @@ class FalAITextToSpeechTask(FalAITask):
155
155
  super().__init__("text-to-speech")
156
156
 
157
157
  def _prepare_payload_as_dict(
158
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
159
- ) -> Optional[Dict]:
158
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
159
+ ) -> Optional[dict]:
160
160
  return {"text": inputs, **filter_none(parameters)}
161
161
 
162
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
162
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
163
163
  url = _as_dict(response)["audio"]["url"]
164
164
  return get_session().get(url).content
165
165
 
@@ -169,13 +169,13 @@ class FalAITextToVideoTask(FalAIQueueTask):
169
169
  super().__init__("text-to-video")
170
170
 
171
171
  def _prepare_payload_as_dict(
172
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
173
- ) -> Optional[Dict]:
172
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
173
+ ) -> Optional[dict]:
174
174
  return {"prompt": inputs, **filter_none(parameters)}
175
175
 
176
176
  def get_response(
177
177
  self,
178
- response: Union[bytes, Dict],
178
+ response: Union[bytes, dict],
179
179
  request_params: Optional[RequestParameters] = None,
180
180
  ) -> Any:
181
181
  output = super().get_response(response, request_params)
@@ -188,10 +188,10 @@ class FalAIImageToImageTask(FalAIQueueTask):
188
188
  super().__init__("image-to-image")
189
189
 
190
190
  def _prepare_payload_as_dict(
191
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
192
- ) -> Optional[Dict]:
191
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
192
+ ) -> Optional[dict]:
193
193
  image_url = _as_url(inputs, default_mime_type="image/jpeg")
194
- payload: Dict[str, Any] = {
194
+ payload: dict[str, Any] = {
195
195
  "image_url": image_url,
196
196
  **filter_none(parameters),
197
197
  }
@@ -207,7 +207,7 @@ class FalAIImageToImageTask(FalAIQueueTask):
207
207
 
208
208
  def get_response(
209
209
  self,
210
- response: Union[bytes, Dict],
210
+ response: Union[bytes, dict],
211
211
  request_params: Optional[RequestParameters] = None,
212
212
  ) -> Any:
213
213
  output = super().get_response(response, request_params)
@@ -220,10 +220,10 @@ class FalAIImageToVideoTask(FalAIQueueTask):
220
220
  super().__init__("image-to-video")
221
221
 
222
222
  def _prepare_payload_as_dict(
223
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
224
- ) -> Optional[Dict]:
223
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
224
+ ) -> Optional[dict]:
225
225
  image_url = _as_url(inputs, default_mime_type="image/jpeg")
226
- payload: Dict[str, Any] = {
226
+ payload: dict[str, Any] = {
227
227
  "image_url": image_url,
228
228
  **filter_none(parameters),
229
229
  }
@@ -238,7 +238,7 @@ class FalAIImageToVideoTask(FalAIQueueTask):
238
238
 
239
239
  def get_response(
240
240
  self,
241
- response: Union[bytes, Dict],
241
+ response: Union[bytes, dict],
242
242
  request_params: Optional[RequestParameters] = None,
243
243
  ) -> Any:
244
244
  output = super().get_response(response, request_params)
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -15,14 +15,14 @@ class FeatherlessTextGenerationTask(BaseTextGenerationTask):
15
15
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
16
16
 
17
17
  def _prepare_payload_as_dict(
18
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
19
- ) -> Optional[Dict]:
18
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
19
+ ) -> Optional[dict]:
20
20
  params = filter_none(parameters.copy())
21
21
  params["max_tokens"] = params.pop("max_new_tokens", None)
22
22
 
23
23
  return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id}
24
24
 
25
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
25
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
26
26
  output = _as_dict(response)["choices"][0]
27
27
  return {
28
28
  "generated_text": output["text"],