huggingface-hub 0.32.3__py3-none-any.whl → 0.32.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

@@ -46,7 +46,7 @@ import sys
46
46
  from typing import TYPE_CHECKING
47
47
 
48
48
 
49
- __version__ = "0.32.3"
49
+ __version__ = "0.32.5"
50
50
 
51
51
  # Alphabetical order of definitions is ensured in tests
52
52
  # WARNING: any comment added in this dictionary definition will be lost when
@@ -1502,7 +1502,7 @@ class InferenceClient:
1502
1502
  model_id = model or self.model
1503
1503
  provider_helper = get_provider_helper(self.provider, task="question-answering", model=model_id)
1504
1504
  request_parameters = provider_helper.prepare_request(
1505
- inputs=None,
1505
+ inputs={"question": question, "context": context},
1506
1506
  parameters={
1507
1507
  "align_to_words": align_to_words,
1508
1508
  "doc_stride": doc_stride,
@@ -1512,7 +1512,6 @@ class InferenceClient:
1512
1512
  "max_seq_len": max_seq_len,
1513
1513
  "top_k": top_k,
1514
1514
  },
1515
- extra_payload={"question": question, "context": context},
1516
1515
  headers=self.headers,
1517
1516
  model=model_id,
1518
1517
  api_key=self.token,
@@ -1551,7 +1551,7 @@ class AsyncInferenceClient:
1551
1551
  model_id = model or self.model
1552
1552
  provider_helper = get_provider_helper(self.provider, task="question-answering", model=model_id)
1553
1553
  request_parameters = provider_helper.prepare_request(
1554
- inputs=None,
1554
+ inputs={"question": question, "context": context},
1555
1555
  parameters={
1556
1556
  "align_to_words": align_to_words,
1557
1557
  "doc_stride": doc_stride,
@@ -1561,7 +1561,6 @@ class AsyncInferenceClient:
1561
1561
  "max_seq_len": max_seq_len,
1562
1562
  "top_k": top_k,
1563
1563
  },
1564
- extra_payload={"question": question, "context": context},
1565
1564
  headers=self.headers,
1566
1565
  model=model_id,
1567
1566
  api_key=self.token,
@@ -73,8 +73,9 @@ async def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str:
73
73
 
74
74
  # Wait for user input or exit event
75
75
  # Wait until either the user hits enter or exit_event is set
76
+ exit_task = asyncio.create_task(exit_event.wait())
76
77
  await asyncio.wait(
77
- [future, exit_event.wait()],
78
+ [future, exit_task],
78
79
  return_when=asyncio.FIRST_COMPLETED,
79
80
  )
80
81
 
@@ -2,7 +2,7 @@ import asyncio
2
2
  import os
3
3
  import signal
4
4
  import traceback
5
- from typing import Any, Dict, List, Optional
5
+ from typing import Optional
6
6
 
7
7
  import typer
8
8
  from rich import print
@@ -40,8 +40,8 @@ async def run_agent(
40
40
 
41
41
  config, prompt = _load_agent_config(agent_path)
42
42
 
43
- inputs: List[Dict[str, Any]] = config.get("inputs", [])
44
- servers: List[Dict[str, Any]] = config.get("servers", [])
43
+ inputs = config.get("inputs", [])
44
+ servers = config.get("servers", [])
45
45
 
46
46
  abort_event = asyncio.Event()
47
47
  exit_event = asyncio.Event()
@@ -82,14 +82,17 @@ async def run_agent(
82
82
  env_special_value = "${input:" + input_id + "}" # Special value to indicate env variable injection
83
83
 
84
84
  # Check env variables that will use this input
85
- input_vars = list(
86
- {
87
- key
88
- for server in servers
89
- for key, value in server.get("config", {}).get("env", {}).items()
90
- if value == env_special_value
91
- }
92
- )
85
+ input_vars = set()
86
+ for server in servers:
87
+ # Check stdio's "env" and http/sse's "headers" mappings
88
+ env_or_headers = (
89
+ server["config"].get("env", {})
90
+ if server["type"] == "stdio"
91
+ else server["config"].get("options", {}).get("requestInit", {}).get("headers", {})
92
+ )
93
+ for key, value in env_or_headers.items():
94
+ if env_special_value in value:
95
+ input_vars.add(key)
93
96
 
94
97
  if not input_vars:
95
98
  print(f"[yellow]Input {input_id} defined in config but not used by any server.[/yellow]")
@@ -97,23 +100,27 @@ async def run_agent(
97
100
 
98
101
  # Prompt user for input
99
102
  print(
100
- f"[blue] • {input_id}[/blue]: {description}. (default: load from {', '.join(input_vars)}).",
103
+ f"[blue] • {input_id}[/blue]: {description}. (default: load from {', '.join(sorted(input_vars))}).",
101
104
  end=" ",
102
105
  )
103
106
  user_input = (await _async_prompt(exit_event=exit_event)).strip()
104
107
  if exit_event.is_set():
105
108
  return
106
109
 
107
- # Inject user input (or env variable) into servers' env
110
+ # Inject user input (or env variable) into stdio's env or http/sse's headers
108
111
  for server in servers:
109
- env = server.get("config", {}).get("env", {})
110
- for key, value in env.items():
111
- if value == env_special_value:
112
+ env_or_headers = (
113
+ server["config"].get("env", {})
114
+ if server["type"] == "stdio"
115
+ else server["config"].get("options", {}).get("requestInit", {}).get("headers", {})
116
+ )
117
+ for key, value in env_or_headers.items():
118
+ if env_special_value in value:
112
119
  if user_input:
113
- env[key] = user_input
120
+ env_or_headers[key] = env_or_headers[key].replace(env_special_value, user_input)
114
121
  else:
115
122
  value_from_env = os.getenv(key, "")
116
- env[key] = value_from_env
123
+ env_or_headers[key] = env_or_headers[key].replace(env_special_value, value_from_env)
117
124
  if value_from_env:
118
125
  print(f"[green]Value successfully loaded from '{key}'[/green]")
119
126
  else:
@@ -125,10 +132,10 @@ async def run_agent(
125
132
 
126
133
  # Main agent loop
127
134
  async with Agent(
128
- provider=config.get("provider"),
135
+ provider=config.get("provider"), # type: ignore[arg-type]
129
136
  model=config.get("model"),
130
- base_url=config.get("endpointUrl"),
131
- servers=servers,
137
+ base_url=config.get("endpointUrl"), # type: ignore[arg-type]
138
+ servers=servers, # type: ignore[arg-type]
132
139
  prompt=prompt,
133
140
  ) as agent:
134
141
  await agent.load_tools()
@@ -272,9 +272,6 @@ class MCPClient:
272
272
 
273
273
  # Read from stream
274
274
  async for chunk in response:
275
- # Yield each chunk to caller
276
- yield chunk
277
-
278
275
  num_of_chunks += 1
279
276
  delta = chunk.choices[0].delta if chunk.choices and len(chunk.choices) > 0 else None
280
277
  if not delta:
@@ -304,6 +301,9 @@ class MCPClient:
304
301
  if exit_if_first_chunk_no_tool and num_of_chunks <= 2 and len(final_tool_calls) == 0:
305
302
  return
306
303
 
304
+ # Yield each chunk to caller
305
+ yield chunk
306
+
307
307
  if message["content"]:
308
308
  messages.append(message)
309
309
 
@@ -0,0 +1,65 @@
1
+ from typing import Dict, List, Literal, TypedDict, Union
2
+
3
+
4
+ # Input config
5
+ class InputConfig(TypedDict, total=False):
6
+ id: str
7
+ description: str
8
+ type: str
9
+ password: bool
10
+
11
+
12
+ # stdio server config
13
+ class StdioServerConfig(TypedDict, total=False):
14
+ command: str
15
+ args: List[str]
16
+ env: Dict[str, str]
17
+ cwd: str
18
+
19
+
20
+ class StdioServer(TypedDict):
21
+ type: Literal["stdio"]
22
+ config: StdioServerConfig
23
+
24
+
25
+ # http server config
26
+ class HTTPRequestInit(TypedDict, total=False):
27
+ headers: Dict[str, str]
28
+
29
+
30
+ class HTTPServerOptions(TypedDict, total=False):
31
+ requestInit: HTTPRequestInit
32
+ sessionId: str
33
+
34
+
35
+ class HTTPServerConfig(TypedDict, total=False):
36
+ url: str
37
+ options: HTTPServerOptions
38
+
39
+
40
+ class HTTPServer(TypedDict):
41
+ type: Literal["http"]
42
+ config: HTTPServerConfig
43
+
44
+
45
+ # sse server config
46
+ class SSEServerOptions(TypedDict, total=False):
47
+ requestInit: HTTPRequestInit
48
+
49
+
50
+ class SSEServerConfig(TypedDict):
51
+ url: str
52
+ options: SSEServerOptions
53
+
54
+
55
+ class SSEServer(TypedDict):
56
+ type: Literal["sse"]
57
+ config: SSEServerConfig
58
+
59
+
60
+ # AgentConfig root object
61
+ class AgentConfig(TypedDict):
62
+ model: str
63
+ provider: str
64
+ inputs: List[InputConfig]
65
+ servers: List[Union[StdioServer, HTTPServer, SSEServer]]
@@ -6,12 +6,13 @@ Formatting utilities taken from the JS SDK: https://github.com/huggingface/huggi
6
6
 
7
7
  import json
8
8
  from pathlib import Path
9
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
9
+ from typing import TYPE_CHECKING, List, Optional, Tuple
10
10
 
11
11
  from huggingface_hub import snapshot_download
12
12
  from huggingface_hub.errors import EntryNotFoundError
13
13
 
14
14
  from .constants import DEFAULT_AGENT, DEFAULT_REPO_ID, FILENAME_CONFIG, FILENAME_PROMPT
15
+ from .types import AgentConfig
15
16
 
16
17
 
17
18
  if TYPE_CHECKING:
@@ -83,21 +84,21 @@ def _get_base64_size(base64_str: str) -> int:
83
84
  return (len(base64_str) * 3) // 4 - padding
84
85
 
85
86
 
86
- def _load_agent_config(agent_path: Optional[str]) -> Tuple[Dict[str, Any], Optional[str]]:
87
+ def _load_agent_config(agent_path: Optional[str]) -> Tuple[AgentConfig, Optional[str]]:
87
88
  """Load server config and prompt."""
88
89
 
89
- def _read_dir(directory: Path) -> Tuple[Dict[str, Any], Optional[str]]:
90
+ def _read_dir(directory: Path) -> Tuple[AgentConfig, Optional[str]]:
90
91
  cfg_file = directory / FILENAME_CONFIG
91
92
  if not cfg_file.exists():
92
93
  raise FileNotFoundError(f" Config file not found in {directory}! Please make sure it exists locally")
93
94
 
94
- config: Dict[str, Any] = json.loads(cfg_file.read_text(encoding="utf-8"))
95
+ config: AgentConfig = json.loads(cfg_file.read_text(encoding="utf-8"))
95
96
  prompt_file = directory / FILENAME_PROMPT
96
97
  prompt: Optional[str] = prompt_file.read_text(encoding="utf-8") if prompt_file.exists() else None
97
98
  return config, prompt
98
99
 
99
100
  if agent_path is None:
100
- return DEFAULT_AGENT, None
101
+ return DEFAULT_AGENT, None # type: ignore[return-value]
101
102
 
102
103
  path = Path(agent_path).expanduser()
103
104
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface-hub
3
- Version: 0.32.3
3
+ Version: 0.32.5
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -1,4 +1,4 @@
1
- huggingface_hub/__init__.py,sha256=cK2MmEMaR4mvhSeDsQFuY2MLlfw8A3lKZbrwTzs-V9E,50644
1
+ huggingface_hub/__init__.py,sha256=KPUAhpuW3bgEl-TXncI_3l_Vt3qwj4KLyt9Rw8a0LGs,50644
2
2
  huggingface_hub/_commit_api.py,sha256=ZbmuIhFdF8B3F_cvGtxorka7MmIQOk8oBkCtYltnCvI,39456
3
3
  huggingface_hub/_commit_scheduler.py,sha256=tfIoO1xWHjTJ6qy6VS6HIoymDycFPg0d6pBSZprrU2U,14679
4
4
  huggingface_hub/_inference_endpoints.py,sha256=qXR0utAYRaEWTI8EXzAsDpVDcYpp8bJPEBbcOxRS52E,17413
@@ -43,10 +43,10 @@ huggingface_hub/commands/upload_large_folder.py,sha256=P-EO44JWVl39Ax4b0E0Z873d0
43
43
  huggingface_hub/commands/user.py,sha256=_4rjCrP84KqtqCMn-r3YWLuGLrnklOWTdJFVTNFMLuU,7096
44
44
  huggingface_hub/commands/version.py,sha256=vfCJn7GO1m-DtDmbdsty8_RTVtnZ7lX6MJsx0Bf4e-s,1266
45
45
  huggingface_hub/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- huggingface_hub/inference/_client.py,sha256=o0R0Nkz11vhCjBWqDvMbGXU_MqZS1RHQOYr7QZDgML8,161570
46
+ huggingface_hub/inference/_client.py,sha256=9XhzTsC-87iGfRLW0grvGp53f4tE2QvvNCmOsrd1vJU,161538
47
47
  huggingface_hub/inference/_common.py,sha256=iwCkq2fWE1MVoPTeeXN7UN5FZi7g5fZ3K8PHSOCi5dU,14591
48
48
  huggingface_hub/inference/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- huggingface_hub/inference/_generated/_async_client.py,sha256=CjGkQrFu4LX9JA056rEXmdokFmO7iZk98ND1D9ib1hg,167730
49
+ huggingface_hub/inference/_generated/_async_client.py,sha256=1rFYv_FcjaDW4F7L-QIYgkbWeFKgW6x3UdcZp1HYhZk,167698
50
50
  huggingface_hub/inference/_generated/types/__init__.py,sha256=qI8Eu9WcBcKhVkLli6YniGHpfiJ9MLqtzmwXX35E7bA,6443
51
51
  huggingface_hub/inference/_generated/types/audio_classification.py,sha256=Jg3mzfGhCSH6CfvVvgJSiFpkz6v4nNA0G4LJXacEgNc,1573
52
52
  huggingface_hub/inference/_generated/types/audio_to_audio.py,sha256=2Ep4WkePL7oJwcp5nRJqApwviumGHbft9HhXE9XLHj4,891
@@ -81,12 +81,13 @@ huggingface_hub/inference/_generated/types/zero_shot_classification.py,sha256=BA
81
81
  huggingface_hub/inference/_generated/types/zero_shot_image_classification.py,sha256=8J9n6VqFARkWvPfAZNWEG70AlrMGldU95EGQQwn06zI,1487
82
82
  huggingface_hub/inference/_generated/types/zero_shot_object_detection.py,sha256=GUd81LIV7oEbRWayDlAVgyLmY596r1M3AW0jXDp1yTA,1630
83
83
  huggingface_hub/inference/_mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
- huggingface_hub/inference/_mcp/_cli_hacks.py,sha256=NP8xA-7-3kJIzLHg3FuHXdnpli89OAMPILPrTk60lwU,3131
84
+ huggingface_hub/inference/_mcp/_cli_hacks.py,sha256=cMZirVFe4N0EM9Nzzs9aEmzUBUEBYR4oYZpByTWlZCM,3182
85
85
  huggingface_hub/inference/_mcp/agent.py,sha256=azX9_lsFjNlgsEvRYdKgsmOmpNReWIcbuMeIVWc852k,4264
86
- huggingface_hub/inference/_mcp/cli.py,sha256=6WP2bqdfSj80IYdwnzwO4ilEb7PgghgctD4iLV4ydqg,8588
86
+ huggingface_hub/inference/_mcp/cli.py,sha256=9IKItC1XJ4yzQAKP1iZwpYL1BA56bem2AQlKlB0SGdc,9251
87
87
  huggingface_hub/inference/_mcp/constants.py,sha256=tE_V6qcvsmvVoJa4eg04jhoTR2Cx1cNHieY2ENrm1_M,2511
88
- huggingface_hub/inference/_mcp/mcp_client.py,sha256=yHfpfztIepARqD_3bFSFWOn402BWO1tptqlIVGR7zJk,14130
89
- huggingface_hub/inference/_mcp/utils.py,sha256=K7rr4FxCh9OYWwYNlnvQraNLy9y3z-5yVMBIaoCQMjA,4052
88
+ huggingface_hub/inference/_mcp/mcp_client.py,sha256=jLIw_fkIPCGYbKI-T2C5UkYmCECJRGNGGw7gnVmXQ_s,14130
89
+ huggingface_hub/inference/_mcp/types.py,sha256=JPK7rC9j-abot8pN3xw1UbSv9S2OBSRStjl_cidWs1Q,1247
90
+ huggingface_hub/inference/_mcp/utils.py,sha256=VsRWl0fuSZDS0zNT9n7FOMSlzA0UBbP8p8xWKWDt2Pc,4093
90
91
  huggingface_hub/inference/_providers/__init__.py,sha256=IrLTMERrbRuPiVdBQEMK9TMvXrsGId4-u2ucMkG-vTU,7671
91
92
  huggingface_hub/inference/_providers/_common.py,sha256=Octgz-PbHw62iW3Oa8rF7rxvBJR0ZmL4ouv3NoX-weE,10131
92
93
  huggingface_hub/inference/_providers/black_forest_labs.py,sha256=wO7qgRyNyrIKlZtvL3vJEbS4-D19kfoXZk6PDh1dTis,2842
@@ -138,9 +139,9 @@ huggingface_hub/utils/insecure_hashlib.py,sha256=iAaepavFZ5Dhfa5n8KozRfQprKmvcjS
138
139
  huggingface_hub/utils/logging.py,sha256=0A8fF1yh3L9Ka_bCDX2ml4U5Ht0tY8Dr3JcbRvWFuwo,4909
139
140
  huggingface_hub/utils/sha.py,sha256=OFnNGCba0sNcT2gUwaVCJnldxlltrHHe0DS_PCpV3C4,2134
140
141
  huggingface_hub/utils/tqdm.py,sha256=xAKcyfnNHsZ7L09WuEM5Ew5-MDhiahLACbbN2zMmcLs,10671
141
- huggingface_hub-0.32.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
142
- huggingface_hub-0.32.3.dist-info/METADATA,sha256=YG4e05qfvJNzzMc6QTzFoDez-KfYpn3acXE_LmzpllY,14777
143
- huggingface_hub-0.32.3.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
144
- huggingface_hub-0.32.3.dist-info/entry_points.txt,sha256=uelw0-fu0kd-CxIuOsR1bsjLIFnAaMQ6AIqluJYDhQw,184
145
- huggingface_hub-0.32.3.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
146
- huggingface_hub-0.32.3.dist-info/RECORD,,
142
+ huggingface_hub-0.32.5.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
143
+ huggingface_hub-0.32.5.dist-info/METADATA,sha256=M-4wlI4JyB4h2UR7CXNVvKr-sMarVD1JZhAU_ue_nt0,14777
144
+ huggingface_hub-0.32.5.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
145
+ huggingface_hub-0.32.5.dist-info/entry_points.txt,sha256=uelw0-fu0kd-CxIuOsR1bsjLIFnAaMQ6AIqluJYDhQw,184
146
+ huggingface_hub-0.32.5.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
147
+ huggingface_hub-0.32.5.dist-info/RECORD,,