huggingface-hub 0.29.0rc2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. huggingface_hub/__init__.py +160 -46
  2. huggingface_hub/_commit_api.py +277 -71
  3. huggingface_hub/_commit_scheduler.py +15 -15
  4. huggingface_hub/_inference_endpoints.py +33 -22
  5. huggingface_hub/_jobs_api.py +301 -0
  6. huggingface_hub/_local_folder.py +18 -3
  7. huggingface_hub/_login.py +31 -63
  8. huggingface_hub/_oauth.py +460 -0
  9. huggingface_hub/_snapshot_download.py +241 -81
  10. huggingface_hub/_space_api.py +18 -10
  11. huggingface_hub/_tensorboard_logger.py +15 -19
  12. huggingface_hub/_upload_large_folder.py +196 -76
  13. huggingface_hub/_webhooks_payload.py +3 -3
  14. huggingface_hub/_webhooks_server.py +15 -25
  15. huggingface_hub/{commands → cli}/__init__.py +1 -15
  16. huggingface_hub/cli/_cli_utils.py +173 -0
  17. huggingface_hub/cli/auth.py +147 -0
  18. huggingface_hub/cli/cache.py +841 -0
  19. huggingface_hub/cli/download.py +189 -0
  20. huggingface_hub/cli/hf.py +60 -0
  21. huggingface_hub/cli/inference_endpoints.py +377 -0
  22. huggingface_hub/cli/jobs.py +772 -0
  23. huggingface_hub/cli/lfs.py +175 -0
  24. huggingface_hub/cli/repo.py +315 -0
  25. huggingface_hub/cli/repo_files.py +94 -0
  26. huggingface_hub/{commands/env.py → cli/system.py} +10 -13
  27. huggingface_hub/cli/upload.py +294 -0
  28. huggingface_hub/cli/upload_large_folder.py +117 -0
  29. huggingface_hub/community.py +20 -12
  30. huggingface_hub/constants.py +83 -59
  31. huggingface_hub/dataclasses.py +609 -0
  32. huggingface_hub/errors.py +99 -30
  33. huggingface_hub/fastai_utils.py +30 -41
  34. huggingface_hub/file_download.py +606 -346
  35. huggingface_hub/hf_api.py +2445 -1132
  36. huggingface_hub/hf_file_system.py +269 -152
  37. huggingface_hub/hub_mixin.py +61 -66
  38. huggingface_hub/inference/_client.py +501 -630
  39. huggingface_hub/inference/_common.py +133 -121
  40. huggingface_hub/inference/_generated/_async_client.py +536 -722
  41. huggingface_hub/inference/_generated/types/__init__.py +6 -1
  42. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +5 -6
  43. huggingface_hub/inference/_generated/types/base.py +10 -7
  44. huggingface_hub/inference/_generated/types/chat_completion.py +77 -31
  45. huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
  46. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
  47. huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
  48. huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
  49. huggingface_hub/inference/_generated/types/image_to_image.py +8 -2
  50. huggingface_hub/inference/_generated/types/image_to_text.py +2 -3
  51. huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
  52. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
  53. huggingface_hub/inference/_generated/types/summarization.py +2 -2
  54. huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
  55. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
  56. huggingface_hub/inference/_generated/types/text_generation.py +11 -11
  57. huggingface_hub/inference/_generated/types/text_to_audio.py +1 -2
  58. huggingface_hub/inference/_generated/types/text_to_speech.py +1 -2
  59. huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
  60. huggingface_hub/inference/_generated/types/token_classification.py +2 -2
  61. huggingface_hub/inference/_generated/types/translation.py +2 -2
  62. huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
  63. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
  64. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
  65. huggingface_hub/inference/_mcp/__init__.py +0 -0
  66. huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
  67. huggingface_hub/inference/_mcp/agent.py +100 -0
  68. huggingface_hub/inference/_mcp/cli.py +247 -0
  69. huggingface_hub/inference/_mcp/constants.py +81 -0
  70. huggingface_hub/inference/_mcp/mcp_client.py +395 -0
  71. huggingface_hub/inference/_mcp/types.py +45 -0
  72. huggingface_hub/inference/_mcp/utils.py +128 -0
  73. huggingface_hub/inference/_providers/__init__.py +149 -20
  74. huggingface_hub/inference/_providers/_common.py +160 -37
  75. huggingface_hub/inference/_providers/black_forest_labs.py +12 -9
  76. huggingface_hub/inference/_providers/cerebras.py +6 -0
  77. huggingface_hub/inference/_providers/clarifai.py +13 -0
  78. huggingface_hub/inference/_providers/cohere.py +32 -0
  79. huggingface_hub/inference/_providers/fal_ai.py +231 -22
  80. huggingface_hub/inference/_providers/featherless_ai.py +38 -0
  81. huggingface_hub/inference/_providers/fireworks_ai.py +22 -1
  82. huggingface_hub/inference/_providers/groq.py +9 -0
  83. huggingface_hub/inference/_providers/hf_inference.py +143 -33
  84. huggingface_hub/inference/_providers/hyperbolic.py +9 -5
  85. huggingface_hub/inference/_providers/nebius.py +47 -5
  86. huggingface_hub/inference/_providers/novita.py +48 -5
  87. huggingface_hub/inference/_providers/nscale.py +44 -0
  88. huggingface_hub/inference/_providers/openai.py +25 -0
  89. huggingface_hub/inference/_providers/publicai.py +6 -0
  90. huggingface_hub/inference/_providers/replicate.py +46 -9
  91. huggingface_hub/inference/_providers/sambanova.py +37 -1
  92. huggingface_hub/inference/_providers/scaleway.py +28 -0
  93. huggingface_hub/inference/_providers/together.py +34 -5
  94. huggingface_hub/inference/_providers/wavespeed.py +138 -0
  95. huggingface_hub/inference/_providers/zai_org.py +17 -0
  96. huggingface_hub/lfs.py +33 -100
  97. huggingface_hub/repocard.py +34 -38
  98. huggingface_hub/repocard_data.py +79 -59
  99. huggingface_hub/serialization/__init__.py +0 -1
  100. huggingface_hub/serialization/_base.py +12 -15
  101. huggingface_hub/serialization/_dduf.py +8 -8
  102. huggingface_hub/serialization/_torch.py +69 -69
  103. huggingface_hub/utils/__init__.py +27 -8
  104. huggingface_hub/utils/_auth.py +7 -7
  105. huggingface_hub/utils/_cache_manager.py +92 -147
  106. huggingface_hub/utils/_chunk_utils.py +2 -3
  107. huggingface_hub/utils/_deprecation.py +1 -1
  108. huggingface_hub/utils/_dotenv.py +55 -0
  109. huggingface_hub/utils/_experimental.py +7 -5
  110. huggingface_hub/utils/_fixes.py +0 -10
  111. huggingface_hub/utils/_git_credential.py +5 -5
  112. huggingface_hub/utils/_headers.py +8 -30
  113. huggingface_hub/utils/_http.py +399 -237
  114. huggingface_hub/utils/_pagination.py +6 -6
  115. huggingface_hub/utils/_parsing.py +98 -0
  116. huggingface_hub/utils/_paths.py +5 -5
  117. huggingface_hub/utils/_runtime.py +74 -22
  118. huggingface_hub/utils/_safetensors.py +21 -21
  119. huggingface_hub/utils/_subprocess.py +13 -11
  120. huggingface_hub/utils/_telemetry.py +4 -4
  121. huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
  122. huggingface_hub/utils/_typing.py +25 -5
  123. huggingface_hub/utils/_validators.py +55 -74
  124. huggingface_hub/utils/_verification.py +167 -0
  125. huggingface_hub/utils/_xet.py +235 -0
  126. huggingface_hub/utils/_xet_progress_reporting.py +162 -0
  127. huggingface_hub/utils/insecure_hashlib.py +3 -5
  128. huggingface_hub/utils/logging.py +8 -11
  129. huggingface_hub/utils/tqdm.py +33 -4
  130. {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -82
  131. huggingface_hub-1.1.3.dist-info/RECORD +155 -0
  132. {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
  133. huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
  134. huggingface_hub/commands/delete_cache.py +0 -428
  135. huggingface_hub/commands/download.py +0 -200
  136. huggingface_hub/commands/huggingface_cli.py +0 -61
  137. huggingface_hub/commands/lfs.py +0 -200
  138. huggingface_hub/commands/repo_files.py +0 -128
  139. huggingface_hub/commands/scan_cache.py +0 -181
  140. huggingface_hub/commands/tag.py +0 -159
  141. huggingface_hub/commands/upload.py +0 -299
  142. huggingface_hub/commands/upload_large_folder.py +0 -129
  143. huggingface_hub/commands/user.py +0 -304
  144. huggingface_hub/commands/version.py +0 -37
  145. huggingface_hub/inference_api.py +0 -217
  146. huggingface_hub/keras_mixin.py +0 -500
  147. huggingface_hub/repository.py +0 -1477
  148. huggingface_hub/serialization/_tensorflow.py +0 -95
  149. huggingface_hub/utils/_hf_folder.py +0 -68
  150. huggingface_hub-0.29.0rc2.dist-info/RECORD +0 -131
  151. huggingface_hub-0.29.0rc2.dist-info/entry_points.txt +0 -6
  152. {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
  153. {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,81 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ from huggingface_hub import ChatCompletionInputTool
7
+
8
+
9
+ FILENAME_CONFIG = "agent.json"
10
+ PROMPT_FILENAMES = ("PROMPT.md", "AGENTS.md")
11
+
12
+ DEFAULT_AGENT = {
13
+ "model": "Qwen/Qwen2.5-72B-Instruct",
14
+ "provider": "nebius",
15
+ "servers": [
16
+ {
17
+ "type": "stdio",
18
+ "command": "npx",
19
+ "args": [
20
+ "-y",
21
+ "@modelcontextprotocol/server-filesystem",
22
+ str(Path.home() / ("Desktop" if sys.platform == "darwin" else "")),
23
+ ],
24
+ },
25
+ {
26
+ "type": "stdio",
27
+ "command": "npx",
28
+ "args": ["@playwright/mcp@latest"],
29
+ },
30
+ ],
31
+ }
32
+
33
+
34
+ DEFAULT_SYSTEM_PROMPT = """
35
+ You are an agent - please keep going until the user’s query is completely
36
+ resolved, before ending your turn and yielding back to the user. Only terminate
37
+ your turn when you are sure that the problem is solved, or if you need more
38
+ info from the user to solve the problem.
39
+ If you are not sure about anything pertaining to the user’s request, use your
40
+ tools to read files and gather the relevant information: do NOT guess or make
41
+ up an answer.
42
+ You MUST plan extensively before each function call, and reflect extensively
43
+ on the outcomes of the previous function calls. DO NOT do this entire process
44
+ by making function calls only, as this can impair your ability to solve the
45
+ problem and think insightfully.
46
+ """.strip()
47
+
48
+ MAX_NUM_TURNS = 10
49
+
50
+ TASK_COMPLETE_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]
51
+ {
52
+ "type": "function",
53
+ "function": {
54
+ "name": "task_complete",
55
+ "description": "Call this tool when the task given by the user is complete",
56
+ "parameters": {
57
+ "type": "object",
58
+ "properties": {},
59
+ },
60
+ },
61
+ }
62
+ )
63
+
64
+ ASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]
65
+ {
66
+ "type": "function",
67
+ "function": {
68
+ "name": "ask_question",
69
+ "description": "Ask the user for more info required to solve or clarify their problem.",
70
+ "parameters": {
71
+ "type": "object",
72
+ "properties": {},
73
+ },
74
+ },
75
+ }
76
+ )
77
+
78
+ EXIT_LOOP_TOOLS: list[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL]
79
+
80
+
81
+ DEFAULT_REPO_ID = "tiny-agents/tiny-agents"
@@ -0,0 +1,395 @@
1
+ import json
2
+ import logging
3
+ from contextlib import AsyncExitStack
4
+ from datetime import timedelta
5
+ from pathlib import Path
6
+ from typing import TYPE_CHECKING, Any, AsyncIterable, Literal, Optional, TypedDict, Union, overload
7
+
8
+ from typing_extensions import NotRequired, TypeAlias, Unpack
9
+
10
+ from ...utils._runtime import get_hf_hub_version
11
+ from .._generated._async_client import AsyncInferenceClient
12
+ from .._generated.types import (
13
+ ChatCompletionInputMessage,
14
+ ChatCompletionInputTool,
15
+ ChatCompletionStreamOutput,
16
+ ChatCompletionStreamOutputDeltaToolCall,
17
+ )
18
+ from .._providers import PROVIDER_OR_POLICY_T
19
+ from .utils import format_result
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from mcp import ClientSession
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # Type alias for tool names
28
+ ToolName: TypeAlias = str
29
+
30
+ ServerType: TypeAlias = Literal["stdio", "sse", "http"]
31
+
32
+
33
+ class StdioServerParameters_T(TypedDict):
34
+ command: str
35
+ args: NotRequired[list[str]]
36
+ env: NotRequired[dict[str, str]]
37
+ cwd: NotRequired[Union[str, Path, None]]
38
+
39
+
40
+ class SSEServerParameters_T(TypedDict):
41
+ url: str
42
+ headers: NotRequired[dict[str, Any]]
43
+ timeout: NotRequired[float]
44
+ sse_read_timeout: NotRequired[float]
45
+
46
+
47
+ class StreamableHTTPParameters_T(TypedDict):
48
+ url: str
49
+ headers: NotRequired[dict[str, Any]]
50
+ timeout: NotRequired[timedelta]
51
+ sse_read_timeout: NotRequired[timedelta]
52
+ terminate_on_close: NotRequired[bool]
53
+
54
+
55
+ class MCPClient:
56
+ """
57
+ Client for connecting to one or more MCP servers and processing chat completions with tools.
58
+
59
+ > [!WARNING]
60
+ > This class is experimental and might be subject to breaking changes in the future without prior notice.
61
+
62
+ Args:
63
+ model (`str`, `optional`):
64
+ The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
65
+ or a URL to a deployed Inference Endpoint or other local or remote endpoint.
66
+ provider (`str`, *optional*):
67
+ Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
68
+ If model is a URL or `base_url` is passed, then `provider` is not used.
69
+ base_url (`str`, *optional*):
70
+ The base URL to run inference. Defaults to None.
71
+ api_key (`str`, `optional`):
72
+ Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ *,
78
+ model: Optional[str] = None,
79
+ provider: Optional[PROVIDER_OR_POLICY_T] = None,
80
+ base_url: Optional[str] = None,
81
+ api_key: Optional[str] = None,
82
+ ):
83
+ # Initialize MCP sessions as a dictionary of ClientSession objects
84
+ self.sessions: dict[ToolName, "ClientSession"] = {}
85
+ self.exit_stack = AsyncExitStack()
86
+ self.available_tools: list[ChatCompletionInputTool] = []
87
+ # To be able to send the model in the payload if `base_url` is provided
88
+ if model is None and base_url is None:
89
+ raise ValueError("At least one of `model` or `base_url` should be set in `MCPClient`.")
90
+ self.payload_model = model
91
+ self.client = AsyncInferenceClient(
92
+ model=None if base_url is not None else model,
93
+ provider=provider,
94
+ api_key=api_key,
95
+ base_url=base_url,
96
+ )
97
+
98
+ async def __aenter__(self):
99
+ """Enter the context manager"""
100
+ await self.client.__aenter__()
101
+ await self.exit_stack.__aenter__()
102
+ return self
103
+
104
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
105
+ """Exit the context manager"""
106
+ await self.client.__aexit__(exc_type, exc_val, exc_tb)
107
+ await self.cleanup()
108
+
109
+ async def cleanup(self):
110
+ """Clean up resources"""
111
+ await self.client.close()
112
+ await self.exit_stack.aclose()
113
+
114
+ @overload
115
+ async def add_mcp_server(self, type: Literal["stdio"], **params: Unpack[StdioServerParameters_T]): ...
116
+
117
+ @overload
118
+ async def add_mcp_server(self, type: Literal["sse"], **params: Unpack[SSEServerParameters_T]): ...
119
+
120
+ @overload
121
+ async def add_mcp_server(self, type: Literal["http"], **params: Unpack[StreamableHTTPParameters_T]): ...
122
+
123
+ async def add_mcp_server(self, type: ServerType, **params: Any):
124
+ """Connect to an MCP server
125
+
126
+ Args:
127
+ type (`str`):
128
+ Type of the server to connect to. Can be one of:
129
+ - "stdio": Standard input/output server (local)
130
+ - "sse": Server-sent events (SSE) server
131
+ - "http": StreamableHTTP server
132
+ **params (`dict[str, Any]`):
133
+ Server parameters that can be either:
134
+ - For stdio servers:
135
+ - command (str): The command to run the MCP server
136
+ - args (list[str], optional): Arguments for the command
137
+ - env (dict[str, str], optional): Environment variables for the command
138
+ - cwd (Union[str, Path, None], optional): Working directory for the command
139
+ - allowed_tools (list[str], optional): List of tool names to allow from this server
140
+ - For SSE servers:
141
+ - url (str): The URL of the SSE server
142
+ - headers (dict[str, Any], optional): Headers for the SSE connection
143
+ - timeout (float, optional): Connection timeout
144
+ - sse_read_timeout (float, optional): SSE read timeout
145
+ - allowed_tools (list[str], optional): List of tool names to allow from this server
146
+ - For StreamableHTTP servers:
147
+ - url (str): The URL of the StreamableHTTP server
148
+ - headers (dict[str, Any], optional): Headers for the StreamableHTTP connection
149
+ - timeout (timedelta, optional): Connection timeout
150
+ - sse_read_timeout (timedelta, optional): SSE read timeout
151
+ - terminate_on_close (bool, optional): Whether to terminate on close
152
+ - allowed_tools (list[str], optional): List of tool names to allow from this server
153
+ """
154
+ from mcp import ClientSession, StdioServerParameters
155
+ from mcp import types as mcp_types
156
+
157
+ # Extract allowed_tools configuration if provided
158
+ allowed_tools = params.pop("allowed_tools", None)
159
+
160
+ # Determine server type and create appropriate parameters
161
+ if type == "stdio":
162
+ # Handle stdio server
163
+ from mcp.client.stdio import stdio_client
164
+
165
+ logger.info(f"Connecting to stdio MCP server with command: {params['command']} {params.get('args', [])}")
166
+
167
+ client_kwargs = {"command": params["command"]}
168
+ for key in ["args", "env", "cwd"]:
169
+ if params.get(key) is not None:
170
+ client_kwargs[key] = params[key]
171
+ server_params = StdioServerParameters(**client_kwargs)
172
+ read, write = await self.exit_stack.enter_async_context(stdio_client(server_params))
173
+ elif type == "sse":
174
+ # Handle SSE server
175
+ from mcp.client.sse import sse_client
176
+
177
+ logger.info(f"Connecting to SSE MCP server at: {params['url']}")
178
+
179
+ client_kwargs = {"url": params["url"]}
180
+ for key in ["headers", "timeout", "sse_read_timeout"]:
181
+ if params.get(key) is not None:
182
+ client_kwargs[key] = params[key]
183
+ read, write = await self.exit_stack.enter_async_context(sse_client(**client_kwargs))
184
+ elif type == "http":
185
+ # Handle StreamableHTTP server
186
+ from mcp.client.streamable_http import streamablehttp_client
187
+
188
+ logger.info(f"Connecting to StreamableHTTP MCP server at: {params['url']}")
189
+
190
+ client_kwargs = {"url": params["url"]}
191
+ for key in ["headers", "timeout", "sse_read_timeout", "terminate_on_close"]:
192
+ if params.get(key) is not None:
193
+ client_kwargs[key] = params[key]
194
+ read, write, _ = await self.exit_stack.enter_async_context(streamablehttp_client(**client_kwargs))
195
+ # ^ TODO: should be handle `get_session_id_callback`? (function to retrieve the current session ID)
196
+ else:
197
+ raise ValueError(f"Unsupported server type: {type}")
198
+
199
+ session = await self.exit_stack.enter_async_context(
200
+ ClientSession(
201
+ read_stream=read,
202
+ write_stream=write,
203
+ client_info=mcp_types.Implementation(
204
+ name="huggingface_hub.MCPClient",
205
+ version=get_hf_hub_version(),
206
+ ),
207
+ )
208
+ )
209
+
210
+ logger.debug("Initializing session...")
211
+ await session.initialize()
212
+
213
+ # List available tools
214
+ response = await session.list_tools()
215
+ logger.debug("Connected to server with tools:", [tool.name for tool in response.tools])
216
+
217
+ # Filter tools based on allowed_tools configuration
218
+ filtered_tools = response.tools
219
+
220
+ if allowed_tools is not None:
221
+ filtered_tools = [tool for tool in response.tools if tool.name in allowed_tools]
222
+ logger.debug(
223
+ f"Tool filtering applied. Using {len(filtered_tools)} of {len(response.tools)} available tools: {[tool.name for tool in filtered_tools]}"
224
+ )
225
+
226
+ for tool in filtered_tools:
227
+ if tool.name in self.sessions:
228
+ logger.warning(f"Tool '{tool.name}' already defined by another server. Skipping.")
229
+ continue
230
+
231
+ # Map tool names to their server for later lookup
232
+ self.sessions[tool.name] = session
233
+
234
+ # Add tool to the list of available tools (for use in chat completions)
235
+ self.available_tools.append(
236
+ ChatCompletionInputTool.parse_obj_as_instance(
237
+ {
238
+ "type": "function",
239
+ "function": {
240
+ "name": tool.name,
241
+ "description": tool.description,
242
+ "parameters": tool.inputSchema,
243
+ },
244
+ }
245
+ )
246
+ )
247
+
248
+ async def process_single_turn_with_tools(
249
+ self,
250
+ messages: list[Union[dict, ChatCompletionInputMessage]],
251
+ exit_loop_tools: Optional[list[ChatCompletionInputTool]] = None,
252
+ exit_if_first_chunk_no_tool: bool = False,
253
+ ) -> AsyncIterable[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage]]:
254
+ """Process a query using `self.model` and available tools, yielding chunks and tool outputs.
255
+
256
+ Args:
257
+ messages (`list[dict]`):
258
+ List of message objects representing the conversation history
259
+ exit_loop_tools (`list[ChatCompletionInputTool]`, *optional*):
260
+ List of tools that should exit the generator when called
261
+ exit_if_first_chunk_no_tool (`bool`, *optional*):
262
+ Exit if no tool is present in the first chunks. Default to False.
263
+
264
+ Yields:
265
+ [`ChatCompletionStreamOutput`] chunks or [`ChatCompletionInputMessage`] objects
266
+ """
267
+ # Prepare tools list based on options
268
+ tools = self.available_tools
269
+ if exit_loop_tools is not None:
270
+ tools = [*exit_loop_tools, *self.available_tools]
271
+
272
+ # Create the streaming request
273
+ response = await self.client.chat.completions.create(
274
+ model=self.payload_model,
275
+ messages=messages,
276
+ tools=tools,
277
+ tool_choice="auto",
278
+ stream=True,
279
+ )
280
+
281
+ message: dict[str, Any] = {"role": "unknown", "content": ""}
282
+ final_tool_calls: dict[int, ChatCompletionStreamOutputDeltaToolCall] = {}
283
+ num_of_chunks = 0
284
+
285
+ # Read from stream
286
+ async for chunk in response:
287
+ num_of_chunks += 1
288
+ delta = chunk.choices[0].delta if chunk.choices and len(chunk.choices) > 0 else None
289
+ if not delta:
290
+ continue
291
+
292
+ # Process message
293
+ if delta.role:
294
+ message["role"] = delta.role
295
+ if delta.content:
296
+ message["content"] += delta.content
297
+
298
+ # Process tool calls
299
+ if delta.tool_calls:
300
+ for tool_call in delta.tool_calls:
301
+ idx = tool_call.index
302
+ # first chunk for this tool call
303
+ if idx not in final_tool_calls:
304
+ final_tool_calls[idx] = tool_call
305
+ if final_tool_calls[idx].function.arguments is None:
306
+ final_tool_calls[idx].function.arguments = ""
307
+ continue
308
+ # safety before concatenating text to .function.arguments
309
+ if final_tool_calls[idx].function.arguments is None:
310
+ final_tool_calls[idx].function.arguments = ""
311
+
312
+ if tool_call.function.arguments:
313
+ final_tool_calls[idx].function.arguments += tool_call.function.arguments
314
+
315
+ # Optionally exit early if no tools in first chunks
316
+ if exit_if_first_chunk_no_tool and num_of_chunks <= 2 and len(final_tool_calls) == 0:
317
+ return
318
+
319
+ # Yield each chunk to caller
320
+ yield chunk
321
+
322
+ # Add the assistant message with tool calls (if any) to messages
323
+ if message["content"] or final_tool_calls:
324
+ # if the role is unknown, set it to assistant
325
+ if message.get("role") == "unknown":
326
+ message["role"] = "assistant"
327
+ # Convert final_tool_calls to the format expected by OpenAI
328
+ if final_tool_calls:
329
+ tool_calls_list: list[dict[str, Any]] = []
330
+ for tc in final_tool_calls.values():
331
+ tool_calls_list.append(
332
+ {
333
+ "id": tc.id,
334
+ "type": "function",
335
+ "function": {
336
+ "name": tc.function.name,
337
+ "arguments": tc.function.arguments or "{}",
338
+ },
339
+ }
340
+ )
341
+ message["tool_calls"] = tool_calls_list
342
+ messages.append(message)
343
+
344
+ # Process tool calls one by one
345
+ for tool_call in final_tool_calls.values():
346
+ function_name = tool_call.function.name
347
+ if function_name is None:
348
+ message = ChatCompletionInputMessage.parse_obj_as_instance(
349
+ {
350
+ "role": "tool",
351
+ "tool_call_id": tool_call.id,
352
+ "content": "Invalid tool call with no function name.",
353
+ }
354
+ )
355
+ messages.append(message)
356
+ yield message
357
+ continue # move to next tool call
358
+ try:
359
+ function_args = json.loads(tool_call.function.arguments or "{}")
360
+ except json.JSONDecodeError as err:
361
+ tool_message = {
362
+ "role": "tool",
363
+ "tool_call_id": tool_call.id,
364
+ "name": function_name,
365
+ "content": f"Invalid JSON generated by the model: {err}",
366
+ }
367
+ tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
368
+ messages.append(tool_message_as_obj)
369
+ yield tool_message_as_obj
370
+ continue # move to next tool call
371
+
372
+ tool_message = {"role": "tool", "tool_call_id": tool_call.id, "content": "", "name": function_name}
373
+
374
+ # Check if this is an exit loop tool
375
+ if exit_loop_tools and function_name in [t.function.name for t in exit_loop_tools]:
376
+ tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
377
+ messages.append(tool_message_as_obj)
378
+ yield tool_message_as_obj
379
+ return
380
+
381
+ # Execute tool call with the appropriate session
382
+ session = self.sessions.get(function_name)
383
+ if session is not None:
384
+ try:
385
+ result = await session.call_tool(function_name, function_args)
386
+ tool_message["content"] = format_result(result)
387
+ except Exception as err:
388
+ tool_message["content"] = f"Error: MCP tool call failed with error message: {err}"
389
+ else:
390
+ tool_message["content"] = f"Error: No session found for tool: {function_name}"
391
+
392
+ # Yield tool message
393
+ tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
394
+ messages.append(tool_message_as_obj)
395
+ yield tool_message_as_obj
@@ -0,0 +1,45 @@
1
+ from typing import Literal, TypedDict, Union
2
+
3
+ from typing_extensions import NotRequired
4
+
5
+
6
+ class InputConfig(TypedDict, total=False):
7
+ id: str
8
+ description: str
9
+ type: str
10
+ password: bool
11
+
12
+
13
+ class StdioServerConfig(TypedDict):
14
+ type: Literal["stdio"]
15
+ command: str
16
+ args: list[str]
17
+ env: dict[str, str]
18
+ cwd: str
19
+ allowed_tools: NotRequired[list[str]]
20
+
21
+
22
+ class HTTPServerConfig(TypedDict):
23
+ type: Literal["http"]
24
+ url: str
25
+ headers: dict[str, str]
26
+ allowed_tools: NotRequired[list[str]]
27
+
28
+
29
+ class SSEServerConfig(TypedDict):
30
+ type: Literal["sse"]
31
+ url: str
32
+ headers: dict[str, str]
33
+ allowed_tools: NotRequired[list[str]]
34
+
35
+
36
+ ServerConfig = Union[StdioServerConfig, HTTPServerConfig, SSEServerConfig]
37
+
38
+
39
+ # AgentConfig root object
40
+ class AgentConfig(TypedDict):
41
+ model: str
42
+ provider: str
43
+ apiKey: NotRequired[str]
44
+ inputs: list[InputConfig]
45
+ servers: list[ServerConfig]
@@ -0,0 +1,128 @@
1
+ """
2
+ Utility functions for MCPClient and Tiny Agents.
3
+
4
+ Formatting utilities taken from the JS SDK: https://github.com/huggingface/huggingface.js/blob/main/packages/mcp-client/src/ResultFormatter.ts.
5
+ """
6
+
7
+ import json
8
+ from pathlib import Path
9
+ from typing import TYPE_CHECKING, Optional
10
+
11
+ from huggingface_hub import snapshot_download
12
+ from huggingface_hub.errors import EntryNotFoundError
13
+
14
+ from .constants import DEFAULT_AGENT, DEFAULT_REPO_ID, FILENAME_CONFIG, PROMPT_FILENAMES
15
+ from .types import AgentConfig
16
+
17
+
18
+ if TYPE_CHECKING:
19
+ from mcp import types as mcp_types
20
+
21
+
22
+ def format_result(result: "mcp_types.CallToolResult") -> str:
23
+ """
24
+ Formats a mcp.types.CallToolResult content into a human-readable string.
25
+
26
+ Args:
27
+ result (CallToolResult)
28
+ Object returned by mcp.ClientSession.call_tool.
29
+
30
+ Returns:
31
+ str
32
+ A formatted string representing the content of the result.
33
+ """
34
+ content = result.content
35
+
36
+ if len(content) == 0:
37
+ return "[No content]"
38
+
39
+ formatted_parts: list[str] = []
40
+
41
+ for item in content:
42
+ if item.type == "text":
43
+ formatted_parts.append(item.text)
44
+
45
+ elif item.type == "image":
46
+ formatted_parts.append(
47
+ f"[Binary Content: Image {item.mimeType}, {_get_base64_size(item.data)} bytes]\n"
48
+ f"The task is complete and the content accessible to the User"
49
+ )
50
+
51
+ elif item.type == "audio":
52
+ formatted_parts.append(
53
+ f"[Binary Content: Audio {item.mimeType}, {_get_base64_size(item.data)} bytes]\n"
54
+ f"The task is complete and the content accessible to the User"
55
+ )
56
+
57
+ elif item.type == "resource":
58
+ resource = item.resource
59
+
60
+ if hasattr(resource, "text"):
61
+ formatted_parts.append(resource.text)
62
+
63
+ elif hasattr(resource, "blob"):
64
+ formatted_parts.append(
65
+ f"[Binary Content ({resource.uri}): {resource.mimeType}, {_get_base64_size(resource.blob)} bytes]\n"
66
+ f"The task is complete and the content accessible to the User"
67
+ )
68
+
69
+ return "\n".join(formatted_parts)
70
+
71
+
72
+ def _get_base64_size(base64_str: str) -> int:
73
+ """Estimate the byte size of a base64-encoded string."""
74
+ # Remove any prefix like "data:image/png;base64,"
75
+ if "," in base64_str:
76
+ base64_str = base64_str.split(",")[1]
77
+
78
+ padding = 0
79
+ if base64_str.endswith("=="):
80
+ padding = 2
81
+ elif base64_str.endswith("="):
82
+ padding = 1
83
+
84
+ return (len(base64_str) * 3) // 4 - padding
85
+
86
+
87
+ def _load_agent_config(agent_path: Optional[str]) -> tuple[AgentConfig, Optional[str]]:
88
+ """Load server config and prompt."""
89
+
90
+ def _read_dir(directory: Path) -> tuple[AgentConfig, Optional[str]]:
91
+ cfg_file = directory / FILENAME_CONFIG
92
+ if not cfg_file.exists():
93
+ raise FileNotFoundError(f" Config file not found in {directory}! Please make sure it exists locally")
94
+
95
+ config: AgentConfig = json.loads(cfg_file.read_text(encoding="utf-8"))
96
+ prompt: Optional[str] = None
97
+ for filename in PROMPT_FILENAMES:
98
+ prompt_file = directory / filename
99
+ if prompt_file.exists():
100
+ prompt = prompt_file.read_text(encoding="utf-8")
101
+ break
102
+ return config, prompt
103
+
104
+ if agent_path is None:
105
+ return DEFAULT_AGENT, None # type: ignore[return-value]
106
+
107
+ path = Path(agent_path).expanduser()
108
+
109
+ if path.is_file():
110
+ return json.loads(path.read_text(encoding="utf-8")), None
111
+
112
+ if path.is_dir():
113
+ return _read_dir(path)
114
+
115
+ # fetch from the Hub
116
+ try:
117
+ repo_dir = Path(
118
+ snapshot_download(
119
+ repo_id=DEFAULT_REPO_ID,
120
+ allow_patterns=f"{agent_path}/*",
121
+ repo_type="dataset",
122
+ )
123
+ )
124
+ return _read_dir(repo_dir / agent_path)
125
+ except Exception as err:
126
+ raise EntryNotFoundError(
127
+ f" Agent {agent_path} not found in tiny-agents/tiny-agents! Please make sure it exists in https://huggingface.co/datasets/tiny-agents/tiny-agents."
128
+ ) from err