huggingface-hub 0.32.1__py3-none-any.whl → 0.32.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

@@ -46,7 +46,7 @@ import sys
46
46
  from typing import TYPE_CHECKING
47
47
 
48
48
 
49
- __version__ = "0.32.1"
49
+ __version__ = "0.32.3"
50
50
 
51
51
  # Alphabetical order of definitions is ensured in tests
52
52
  # WARNING: any comment added in this dictionary definition will be lost when
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  from pathlib import Path
3
- from typing import Dict, List, Literal, Optional, Union
3
+ from typing import Dict, Iterable, List, Literal, Optional, Union
4
4
 
5
5
  import requests
6
6
  from tqdm.auto import tqdm as base_tqdm
@@ -15,13 +15,15 @@ from .errors import (
15
15
  RevisionNotFoundError,
16
16
  )
17
17
  from .file_download import REGEX_COMMIT_HASH, hf_hub_download, repo_folder_name
18
- from .hf_api import DatasetInfo, HfApi, ModelInfo, SpaceInfo
18
+ from .hf_api import DatasetInfo, HfApi, ModelInfo, RepoFile, SpaceInfo
19
19
  from .utils import OfflineModeIsEnabled, filter_repo_objects, logging, validate_hf_hub_args
20
20
  from .utils import tqdm as hf_tqdm
21
21
 
22
22
 
23
23
  logger = logging.get_logger(__name__)
24
24
 
25
+ VERY_LARGE_REPO_THRESHOLD = 50000 # After this limit, we don't consider `repo_info.siblings` to be reliable enough
26
+
25
27
 
26
28
  @validate_hf_hub_args
27
29
  def snapshot_download(
@@ -145,20 +147,22 @@ def snapshot_download(
145
147
 
146
148
  storage_folder = os.path.join(cache_dir, repo_folder_name(repo_id=repo_id, repo_type=repo_type))
147
149
 
150
+ api = HfApi(
151
+ library_name=library_name,
152
+ library_version=library_version,
153
+ user_agent=user_agent,
154
+ endpoint=endpoint,
155
+ headers=headers,
156
+ token=token,
157
+ )
158
+
148
159
  repo_info: Union[ModelInfo, DatasetInfo, SpaceInfo, None] = None
149
160
  api_call_error: Optional[Exception] = None
150
161
  if not local_files_only:
151
162
  # try/except logic to handle different errors => taken from `hf_hub_download`
152
163
  try:
153
164
  # if we have internet connection we want to list files to download
154
- api = HfApi(
155
- library_name=library_name,
156
- library_version=library_version,
157
- user_agent=user_agent,
158
- endpoint=endpoint,
159
- headers=headers,
160
- )
161
- repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision, token=token)
165
+ repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision)
162
166
  except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
163
167
  # Actually raise for those subclasses of ConnectionError
164
168
  raise
@@ -251,13 +255,31 @@ def snapshot_download(
251
255
  # => let's download the files!
252
256
  assert repo_info.sha is not None, "Repo info returned from server must have a revision sha."
253
257
  assert repo_info.siblings is not None, "Repo info returned from server must have a siblings list."
254
- filtered_repo_files = list(
255
- filter_repo_objects(
256
- items=[f.rfilename for f in repo_info.siblings],
257
- allow_patterns=allow_patterns,
258
- ignore_patterns=ignore_patterns,
258
+
259
+ # Corner case: on very large repos, the siblings list in `repo_info` might not contain all files.
260
+ # In that case, we need to use the `list_repo_tree` method to prevent caching issues.
261
+ repo_files: Iterable[str] = [f.rfilename for f in repo_info.siblings]
262
+ has_many_files = len(repo_info.siblings) > VERY_LARGE_REPO_THRESHOLD
263
+ if has_many_files:
264
+ logger.info("The repo has more than 50,000 files. Using `list_repo_tree` to ensure all files are listed.")
265
+ repo_files = (
266
+ f.rfilename
267
+ for f in api.list_repo_tree(repo_id=repo_id, recursive=True, revision=revision, repo_type=repo_type)
268
+ if isinstance(f, RepoFile)
259
269
  )
270
+
271
+ filtered_repo_files: Iterable[str] = filter_repo_objects(
272
+ items=repo_files,
273
+ allow_patterns=allow_patterns,
274
+ ignore_patterns=ignore_patterns,
260
275
  )
276
+
277
+ if not has_many_files:
278
+ filtered_repo_files = list(filtered_repo_files)
279
+ tqdm_desc = f"Fetching {len(filtered_repo_files)} files"
280
+ else:
281
+ tqdm_desc = "Fetching ... files"
282
+
261
283
  commit_hash = repo_info.sha
262
284
  snapshot_folder = os.path.join(storage_folder, "snapshots", commit_hash)
263
285
  # if passed revision is not identical to commit_hash
@@ -305,7 +327,7 @@ def snapshot_download(
305
327
  thread_map(
306
328
  _inner_hf_hub_download,
307
329
  filtered_repo_files,
308
- desc=f"Fetching {len(filtered_repo_files)} files",
330
+ desc=tqdm_desc,
309
331
  max_workers=max_workers,
310
332
  # User can use its own tqdm class or the default one from `huggingface_hub.utils`
311
333
  tqdm_class=tqdm_class or hf_tqdm,
@@ -0,0 +1,87 @@
1
+ import asyncio
2
+ import sys
3
+ from functools import partial
4
+
5
+ import typer
6
+
7
+
8
+ def _patch_anyio_open_process():
9
+ """
10
+ Patch anyio.open_process to allow detached processes on Windows and Unix-like systems.
11
+
12
+ This is necessary to prevent the MCP client from being interrupted by Ctrl+C when running in the CLI.
13
+ """
14
+ import subprocess
15
+
16
+ import anyio
17
+
18
+ if getattr(anyio, "_tiny_agents_patched", False):
19
+ return
20
+ anyio._tiny_agents_patched = True
21
+
22
+ original_open_process = anyio.open_process
23
+
24
+ if sys.platform == "win32":
25
+ # On Windows, we need to set the creation flags to create a new process group
26
+
27
+ async def open_process_in_new_group(*args, **kwargs):
28
+ """
29
+ Wrapper for open_process to handle Windows-specific process creation flags.
30
+ """
31
+ # Ensure we pass the creation flags for Windows
32
+ kwargs.setdefault("creationflags", subprocess.CREATE_NEW_PROCESS_GROUP)
33
+ return await original_open_process(*args, **kwargs)
34
+
35
+ anyio.open_process = open_process_in_new_group
36
+ else:
37
+ # For Unix-like systems, we can use setsid to create a new session
38
+ async def open_process_in_new_group(*args, **kwargs):
39
+ """
40
+ Wrapper for open_process to handle Unix-like systems with start_new_session=True.
41
+ """
42
+ kwargs.setdefault("start_new_session", True)
43
+ return await original_open_process(*args, **kwargs)
44
+
45
+ anyio.open_process = open_process_in_new_group
46
+
47
+
48
+ async def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str:
49
+ """
50
+ Asynchronous prompt function that reads input from stdin without blocking.
51
+
52
+ This function is designed to work in an asynchronous context, allowing the event loop to gracefully stop it (e.g. on Ctrl+C).
53
+
54
+ Alternatively, we could use https://github.com/vxgmichel/aioconsole but that would be an additional dependency.
55
+ """
56
+ loop = asyncio.get_event_loop()
57
+
58
+ if sys.platform == "win32":
59
+ # Windows: Use run_in_executor to avoid blocking the event loop
60
+ # Degraded solution: this is not ideal as user will have to CTRL+C once more to stop the prompt (and it'll not be graceful)
61
+ return await loop.run_in_executor(None, partial(typer.prompt, prompt, prompt_suffix=" "))
62
+ else:
63
+ # UNIX-like: Use loop.add_reader for non-blocking stdin read
64
+ future = loop.create_future()
65
+
66
+ def on_input():
67
+ line = sys.stdin.readline()
68
+ loop.remove_reader(sys.stdin)
69
+ future.set_result(line)
70
+
71
+ print(prompt, end=" ", flush=True)
72
+ loop.add_reader(sys.stdin, on_input) # not supported on Windows
73
+
74
+ # Wait for user input or exit event
75
+ # Wait until either the user hits enter or exit_event is set
76
+ await asyncio.wait(
77
+ [future, exit_event.wait()],
78
+ return_when=asyncio.FIRST_COMPLETED,
79
+ )
80
+
81
+ # Check which one has been triggered
82
+ if exit_event.is_set():
83
+ future.cancel()
84
+ return ""
85
+
86
+ line = await future
87
+ return line.strip()
@@ -20,7 +20,7 @@ class Agent(MCPClient):
20
20
  </Tip>
21
21
 
22
22
  Args:
23
- model (`str`):
23
+ model (`str`, *optional*):
24
24
  The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
25
25
  or a URL to a deployed Inference Endpoint or other local or remote endpoint.
26
26
  servers (`Iterable[Dict]`):
@@ -28,6 +28,8 @@ class Agent(MCPClient):
28
28
  provider (`str`, *optional*):
29
29
  Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
30
30
  If model is a URL or `base_url` is passed, then `provider` is not used.
31
+ base_url (`str`, *optional*):
32
+ The base URL to run inference. Defaults to None.
31
33
  api_key (`str`, *optional*):
32
34
  Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.
33
35
  prompt (`str`, *optional*):
@@ -37,13 +39,14 @@ class Agent(MCPClient):
37
39
  def __init__(
38
40
  self,
39
41
  *,
40
- model: str,
42
+ model: Optional[str] = None,
41
43
  servers: Iterable[Dict],
42
44
  provider: Optional[PROVIDER_OR_POLICY_T] = None,
45
+ base_url: Optional[str] = None,
43
46
  api_key: Optional[str] = None,
44
47
  prompt: Optional[str] = None,
45
48
  ):
46
- super().__init__(model=model, provider=provider, api_key=api_key)
49
+ super().__init__(model=model, provider=provider, base_url=base_url, api_key=api_key)
47
50
  self._servers_cfg = list(servers)
48
51
  self.messages: List[Union[Dict, ChatCompletionInputMessage]] = [
49
52
  {"role": "system", "content": prompt or DEFAULT_SYSTEM_PROMPT}
@@ -1,12 +1,13 @@
1
1
  import asyncio
2
2
  import os
3
3
  import signal
4
- from functools import partial
4
+ import traceback
5
5
  from typing import Any, Dict, List, Optional
6
6
 
7
7
  import typer
8
8
  from rich import print
9
9
 
10
+ from ._cli_hacks import _async_prompt, _patch_anyio_open_process
10
11
  from .agent import Agent
11
12
  from .utils import _load_agent_config
12
13
 
@@ -24,11 +25,6 @@ run_cli = typer.Typer(
24
25
  app.add_typer(run_cli, name="run")
25
26
 
26
27
 
27
- async def _ainput(prompt: str = "» ") -> str:
28
- loop = asyncio.get_running_loop()
29
- return await loop.run_in_executor(None, partial(typer.prompt, prompt, prompt_suffix=" "))
30
-
31
-
32
28
  async def run_agent(
33
29
  agent_path: Optional[str],
34
30
  ) -> None:
@@ -40,11 +36,15 @@ async def run_agent(
40
36
  Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` file or a built-in agent stored in a Hugging Face dataset.
41
37
 
42
38
  """
39
+ _patch_anyio_open_process() # Hacky way to prevent stdio connections to be stopped by Ctrl+C
40
+
43
41
  config, prompt = _load_agent_config(agent_path)
44
42
 
43
+ inputs: List[Dict[str, Any]] = config.get("inputs", [])
45
44
  servers: List[Dict[str, Any]] = config.get("servers", [])
46
45
 
47
46
  abort_event = asyncio.Event()
47
+ exit_event = asyncio.Event()
48
48
  first_sigint = True
49
49
 
50
50
  loop = asyncio.get_running_loop()
@@ -59,8 +59,7 @@ async def run_agent(
59
59
  return
60
60
 
61
61
  print("\n[red]Exiting...[/red]", flush=True)
62
-
63
- os._exit(130)
62
+ exit_event.set()
64
63
 
65
64
  try:
66
65
  sigint_registered_in_loop = False
@@ -70,9 +69,65 @@ async def run_agent(
70
69
  except (AttributeError, NotImplementedError):
71
70
  # Windows (or any loop that doesn't support it) : fall back to sync
72
71
  signal.signal(signal.SIGINT, lambda *_: _sigint_handler())
72
+
73
+ # Handle inputs (i.e. env variables injection)
74
+ if len(inputs) > 0:
75
+ print(
76
+ "[bold blue]Some initial inputs are required by the agent. "
77
+ "Please provide a value or leave empty to load from env.[/bold blue]"
78
+ )
79
+ for input_item in inputs:
80
+ input_id = input_item["id"]
81
+ description = input_item["description"]
82
+ env_special_value = "${input:" + input_id + "}" # Special value to indicate env variable injection
83
+
84
+ # Check env variables that will use this input
85
+ input_vars = list(
86
+ {
87
+ key
88
+ for server in servers
89
+ for key, value in server.get("config", {}).get("env", {}).items()
90
+ if value == env_special_value
91
+ }
92
+ )
93
+
94
+ if not input_vars:
95
+ print(f"[yellow]Input {input_id} defined in config but not used by any server.[/yellow]")
96
+ continue
97
+
98
+ # Prompt user for input
99
+ print(
100
+ f"[blue] • {input_id}[/blue]: {description}. (default: load from {', '.join(input_vars)}).",
101
+ end=" ",
102
+ )
103
+ user_input = (await _async_prompt(exit_event=exit_event)).strip()
104
+ if exit_event.is_set():
105
+ return
106
+
107
+ # Inject user input (or env variable) into servers' env
108
+ for server in servers:
109
+ env = server.get("config", {}).get("env", {})
110
+ for key, value in env.items():
111
+ if value == env_special_value:
112
+ if user_input:
113
+ env[key] = user_input
114
+ else:
115
+ value_from_env = os.getenv(key, "")
116
+ env[key] = value_from_env
117
+ if value_from_env:
118
+ print(f"[green]Value successfully loaded from '{key}'[/green]")
119
+ else:
120
+ print(
121
+ f"[yellow]No value found for '{key}' in environment variables. Continuing.[/yellow]"
122
+ )
123
+
124
+ print()
125
+
126
+ # Main agent loop
73
127
  async with Agent(
74
- provider=config["provider"],
75
- model=config["model"],
128
+ provider=config.get("provider"),
129
+ model=config.get("model"),
130
+ base_url=config.get("endpointUrl"),
76
131
  servers=servers,
77
132
  prompt=prompt,
78
133
  ) as agent:
@@ -84,8 +139,12 @@ async def run_agent(
84
139
  while True:
85
140
  abort_event.clear()
86
141
 
142
+ # Check if we should exit
143
+ if exit_event.is_set():
144
+ return
145
+
87
146
  try:
88
- user_input = await _ainput()
147
+ user_input = await _async_prompt(exit_event=exit_event)
89
148
  first_sigint = True
90
149
  except EOFError:
91
150
  print("\n[red]EOF received, exiting.[/red]", flush=True)
@@ -101,6 +160,8 @@ async def run_agent(
101
160
  async for chunk in agent.run(user_input, abort_event=abort_event):
102
161
  if abort_event.is_set() and not first_sigint:
103
162
  break
163
+ if exit_event.is_set():
164
+ return
104
165
 
105
166
  if hasattr(chunk, "choices"):
106
167
  delta = chunk.choices[0].delta
@@ -123,9 +184,15 @@ async def run_agent(
123
184
  print()
124
185
 
125
186
  except Exception as e:
126
- print(f"\n[bold red]Error during agent run: {e}[/bold red]", flush=True)
187
+ tb_str = traceback.format_exc()
188
+ print(f"\n[bold red]Error during agent run: {e}\n{tb_str}[/bold red]", flush=True)
127
189
  first_sigint = True # Allow graceful interrupt for the next command
128
190
 
191
+ except Exception as e:
192
+ tb_str = traceback.format_exc()
193
+ print(f"\n[bold red]An unexpected error occurred: {e}\n{tb_str}[/bold red]", flush=True)
194
+ raise e
195
+
129
196
  finally:
130
197
  if sigint_registered_in_loop:
131
198
  try:
@@ -69,6 +69,8 @@ class MCPClient:
69
69
  provider (`str`, *optional*):
70
70
  Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
71
71
  If model is a URL or `base_url` is passed, then `provider` is not used.
72
+ base_url (`str`, *optional*):
73
+ The base URL to run inference. Defaults to None.
72
74
  api_key (`str`, `optional`):
73
75
  Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.
74
76
  """
@@ -76,17 +78,25 @@ class MCPClient:
76
78
  def __init__(
77
79
  self,
78
80
  *,
79
- model: str,
81
+ model: Optional[str] = None,
80
82
  provider: Optional[PROVIDER_OR_POLICY_T] = None,
83
+ base_url: Optional[str] = None,
81
84
  api_key: Optional[str] = None,
82
85
  ):
83
86
  # Initialize MCP sessions as a dictionary of ClientSession objects
84
87
  self.sessions: Dict[ToolName, "ClientSession"] = {}
85
88
  self.exit_stack = AsyncExitStack()
86
89
  self.available_tools: List[ChatCompletionInputTool] = []
87
-
88
- # Initialize the AsyncInferenceClient
89
- self.client = AsyncInferenceClient(model=model, provider=provider, api_key=api_key)
90
+ # To be able to send the model in the payload if `base_url` is provided
91
+ if model is None and base_url is None:
92
+ raise ValueError("At least one of `model` or `base_url` should be set in `MCPClient`.")
93
+ self.payload_model = model
94
+ self.client = AsyncInferenceClient(
95
+ model=None if base_url is not None else model,
96
+ provider=provider,
97
+ api_key=api_key,
98
+ base_url=base_url,
99
+ )
90
100
 
91
101
  async def __aenter__(self):
92
102
  """Enter the context manager"""
@@ -99,6 +109,11 @@ class MCPClient:
99
109
  await self.client.__aexit__(exc_type, exc_val, exc_tb)
100
110
  await self.cleanup()
101
111
 
112
+ async def cleanup(self):
113
+ """Clean up resources"""
114
+ await self.client.close()
115
+ await self.exit_stack.aclose()
116
+
102
117
  @overload
103
118
  async def add_mcp_server(self, type: Literal["stdio"], **params: Unpack[StdioServerParameters_T]): ...
104
119
 
@@ -244,6 +259,7 @@ class MCPClient:
244
259
 
245
260
  # Create the streaming request
246
261
  response = await self.client.chat.completions.create(
262
+ model=self.payload_model,
247
263
  messages=messages,
248
264
  tools=tools,
249
265
  tool_choice="auto",
@@ -275,11 +291,13 @@ class MCPClient:
275
291
  for tool_call in delta.tool_calls:
276
292
  # Aggregate chunks into tool calls
277
293
  if tool_call.index not in final_tool_calls:
278
- if tool_call.function.arguments is None: # Corner case (depends on provider)
294
+ if (
295
+ tool_call.function.arguments is None or tool_call.function.arguments == "{}"
296
+ ): # Corner case (depends on provider)
279
297
  tool_call.function.arguments = ""
280
298
  final_tool_calls[tool_call.index] = tool_call
281
299
 
282
- if tool_call.function.arguments:
300
+ elif tool_call.function.arguments:
283
301
  final_tool_calls[tool_call.index].function.arguments += tool_call.function.arguments
284
302
 
285
303
  # Optionally exit early if no tools in first chunks
@@ -316,7 +334,3 @@ class MCPClient:
316
334
  tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
317
335
  messages.append(tool_message_as_obj)
318
336
  yield tool_message_as_obj
319
-
320
- async def cleanup(self):
321
- """Clean up resources"""
322
- await self.exit_stack.aclose()
@@ -246,7 +246,7 @@ def save_torch_state_dict(
246
246
  shared_tensors_to_discard=shared_tensors_to_discard,
247
247
  )
248
248
  else:
249
- from torch import save as save_file_fn # type: ignore[assignment]
249
+ from torch import save as save_file_fn # type: ignore[assignment, no-redef]
250
250
 
251
251
  logger.warning(
252
252
  "You are using unsafe serialization. Due to security reasons, it is recommended not to load "
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface-hub
3
- Version: 0.32.1
3
+ Version: 0.32.3
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -1,11 +1,11 @@
1
- huggingface_hub/__init__.py,sha256=MraPcpVeTtz7kvp75UQIcplF8NG871xwGxDI4QHg_Mg,50644
1
+ huggingface_hub/__init__.py,sha256=cK2MmEMaR4mvhSeDsQFuY2MLlfw8A3lKZbrwTzs-V9E,50644
2
2
  huggingface_hub/_commit_api.py,sha256=ZbmuIhFdF8B3F_cvGtxorka7MmIQOk8oBkCtYltnCvI,39456
3
3
  huggingface_hub/_commit_scheduler.py,sha256=tfIoO1xWHjTJ6qy6VS6HIoymDycFPg0d6pBSZprrU2U,14679
4
4
  huggingface_hub/_inference_endpoints.py,sha256=qXR0utAYRaEWTI8EXzAsDpVDcYpp8bJPEBbcOxRS52E,17413
5
5
  huggingface_hub/_local_folder.py,sha256=7Uce_z51D7ZZ58GF7eUOtcq1cCuYQMOEF-W4p85iQTo,16885
6
6
  huggingface_hub/_login.py,sha256=ssf4viT5BhHI2ZidnSuAZcrwSxzaLOrf8xgRVKuvu_A,20298
7
7
  huggingface_hub/_oauth.py,sha256=YNbSSZCNZLiCqwMoYboSAfI3XjEsbyAADJcwgRAdhBc,18802
8
- huggingface_hub/_snapshot_download.py,sha256=RqhfsESBHwXAoZxVvw68W7vGhmXSbl7RoEFOPLvw3Ls,15186
8
+ huggingface_hub/_snapshot_download.py,sha256=5BvLNm_1DgdBNXNWOP8omK-9unIyqNSVpDNmxfClxFk,16078
9
9
  huggingface_hub/_space_api.py,sha256=jb6rF8qLtjaNU12D-8ygAPM26xDiHCu8CHXHowhGTmg,5470
10
10
  huggingface_hub/_tensorboard_logger.py,sha256=ZkYcAUiRC8RGL214QUYtp58O8G5tn-HF6DCWha9imcA,8358
11
11
  huggingface_hub/_upload_large_folder.py,sha256=elY5Rv2YVJECVpdZ9PM1zdO8kG-jmi8DifLOa7aC3EU,24178
@@ -81,10 +81,11 @@ huggingface_hub/inference/_generated/types/zero_shot_classification.py,sha256=BA
81
81
  huggingface_hub/inference/_generated/types/zero_shot_image_classification.py,sha256=8J9n6VqFARkWvPfAZNWEG70AlrMGldU95EGQQwn06zI,1487
82
82
  huggingface_hub/inference/_generated/types/zero_shot_object_detection.py,sha256=GUd81LIV7oEbRWayDlAVgyLmY596r1M3AW0jXDp1yTA,1630
83
83
  huggingface_hub/inference/_mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
- huggingface_hub/inference/_mcp/agent.py,sha256=RDpqy2rcVKy0euIK43OAAczLlx0UH1QIACHMFjTItq0,4077
85
- huggingface_hub/inference/_mcp/cli.py,sha256=Pw6nZP57uzDWZGXRVCAqLHsLYO_WiQd4UPcQdXiCa-o,5557
84
+ huggingface_hub/inference/_mcp/_cli_hacks.py,sha256=NP8xA-7-3kJIzLHg3FuHXdnpli89OAMPILPrTk60lwU,3131
85
+ huggingface_hub/inference/_mcp/agent.py,sha256=azX9_lsFjNlgsEvRYdKgsmOmpNReWIcbuMeIVWc852k,4264
86
+ huggingface_hub/inference/_mcp/cli.py,sha256=6WP2bqdfSj80IYdwnzwO4ilEb7PgghgctD4iLV4ydqg,8588
86
87
  huggingface_hub/inference/_mcp/constants.py,sha256=tE_V6qcvsmvVoJa4eg04jhoTR2Cx1cNHieY2ENrm1_M,2511
87
- huggingface_hub/inference/_mcp/mcp_client.py,sha256=MwoovV2UTf7F8rxxQucAd4WSnIiJ71wx09QCrXDrP0Q,13477
88
+ huggingface_hub/inference/_mcp/mcp_client.py,sha256=yHfpfztIepARqD_3bFSFWOn402BWO1tptqlIVGR7zJk,14130
88
89
  huggingface_hub/inference/_mcp/utils.py,sha256=K7rr4FxCh9OYWwYNlnvQraNLy9y3z-5yVMBIaoCQMjA,4052
89
90
  huggingface_hub/inference/_providers/__init__.py,sha256=IrLTMERrbRuPiVdBQEMK9TMvXrsGId4-u2ucMkG-vTU,7671
90
91
  huggingface_hub/inference/_providers/_common.py,sha256=Octgz-PbHw62iW3Oa8rF7rxvBJR0ZmL4ouv3NoX-weE,10131
@@ -106,7 +107,7 @@ huggingface_hub/serialization/__init__.py,sha256=kn-Fa-m4FzMnN8lNsF-SwFcfzug4Cuc
106
107
  huggingface_hub/serialization/_base.py,sha256=Df3GwGR9NzeK_SD75prXLucJAzPiNPgHbgXSw-_LTk8,8126
107
108
  huggingface_hub/serialization/_dduf.py,sha256=s42239rLiHwaJE36QDEmS5GH7DSmQ__BffiHJO5RjIg,15424
108
109
  huggingface_hub/serialization/_tensorflow.py,sha256=zHOvEMg-JHC55Fm4roDT3LUCDO5zB9qtXZffG065RAM,3625
109
- huggingface_hub/serialization/_torch.py,sha256=hJglq5F56s3k06GfLuYKQV4bSSjXQLuk1CC9l1M3Fmo,45191
110
+ huggingface_hub/serialization/_torch.py,sha256=jpBmuSZJymMpvLcDcMaNxDu_fE5VkY_pAVH8e8stYIo,45201
110
111
  huggingface_hub/templates/datasetcard_template.md,sha256=W-EMqR6wndbrnZorkVv56URWPG49l7MATGeI015kTvs,5503
111
112
  huggingface_hub/templates/modelcard_template.md,sha256=4AqArS3cqdtbit5Bo-DhjcnDFR-pza5hErLLTPM4Yuc,6870
112
113
  huggingface_hub/utils/__init__.py,sha256=ORfVkn5D0wuLIq12jjhTzn5_c4F8fRPxB7TG-iednuQ,3722
@@ -137,9 +138,9 @@ huggingface_hub/utils/insecure_hashlib.py,sha256=iAaepavFZ5Dhfa5n8KozRfQprKmvcjS
137
138
  huggingface_hub/utils/logging.py,sha256=0A8fF1yh3L9Ka_bCDX2ml4U5Ht0tY8Dr3JcbRvWFuwo,4909
138
139
  huggingface_hub/utils/sha.py,sha256=OFnNGCba0sNcT2gUwaVCJnldxlltrHHe0DS_PCpV3C4,2134
139
140
  huggingface_hub/utils/tqdm.py,sha256=xAKcyfnNHsZ7L09WuEM5Ew5-MDhiahLACbbN2zMmcLs,10671
140
- huggingface_hub-0.32.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
141
- huggingface_hub-0.32.1.dist-info/METADATA,sha256=y3e9ulpqrSxlIWNwdsA0aXk-ZK9nVDChQ5fjApF_hNA,14777
142
- huggingface_hub-0.32.1.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
143
- huggingface_hub-0.32.1.dist-info/entry_points.txt,sha256=uelw0-fu0kd-CxIuOsR1bsjLIFnAaMQ6AIqluJYDhQw,184
144
- huggingface_hub-0.32.1.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
145
- huggingface_hub-0.32.1.dist-info/RECORD,,
141
+ huggingface_hub-0.32.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
142
+ huggingface_hub-0.32.3.dist-info/METADATA,sha256=YG4e05qfvJNzzMc6QTzFoDez-KfYpn3acXE_LmzpllY,14777
143
+ huggingface_hub-0.32.3.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
144
+ huggingface_hub-0.32.3.dist-info/entry_points.txt,sha256=uelw0-fu0kd-CxIuOsR1bsjLIFnAaMQ6AIqluJYDhQw,184
145
+ huggingface_hub-0.32.3.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
146
+ huggingface_hub-0.32.3.dist-info/RECORD,,