huggingface-hub 0.31.0rc0__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- huggingface_hub/__init__.py +145 -46
- huggingface_hub/_commit_api.py +168 -119
- huggingface_hub/_commit_scheduler.py +15 -15
- huggingface_hub/_inference_endpoints.py +15 -12
- huggingface_hub/_jobs_api.py +301 -0
- huggingface_hub/_local_folder.py +18 -3
- huggingface_hub/_login.py +31 -63
- huggingface_hub/_oauth.py +460 -0
- huggingface_hub/_snapshot_download.py +239 -80
- huggingface_hub/_space_api.py +5 -5
- huggingface_hub/_tensorboard_logger.py +15 -19
- huggingface_hub/_upload_large_folder.py +172 -76
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +13 -25
- huggingface_hub/{commands → cli}/__init__.py +1 -15
- huggingface_hub/cli/_cli_utils.py +173 -0
- huggingface_hub/cli/auth.py +147 -0
- huggingface_hub/cli/cache.py +841 -0
- huggingface_hub/cli/download.py +189 -0
- huggingface_hub/cli/hf.py +60 -0
- huggingface_hub/cli/inference_endpoints.py +377 -0
- huggingface_hub/cli/jobs.py +772 -0
- huggingface_hub/cli/lfs.py +175 -0
- huggingface_hub/cli/repo.py +315 -0
- huggingface_hub/cli/repo_files.py +94 -0
- huggingface_hub/{commands/env.py → cli/system.py} +10 -13
- huggingface_hub/cli/upload.py +294 -0
- huggingface_hub/cli/upload_large_folder.py +117 -0
- huggingface_hub/community.py +20 -12
- huggingface_hub/constants.py +38 -53
- huggingface_hub/dataclasses.py +609 -0
- huggingface_hub/errors.py +80 -30
- huggingface_hub/fastai_utils.py +30 -41
- huggingface_hub/file_download.py +435 -351
- huggingface_hub/hf_api.py +2050 -1124
- huggingface_hub/hf_file_system.py +269 -152
- huggingface_hub/hub_mixin.py +43 -63
- huggingface_hub/inference/_client.py +347 -434
- huggingface_hub/inference/_common.py +133 -121
- huggingface_hub/inference/_generated/_async_client.py +397 -541
- huggingface_hub/inference/_generated/types/__init__.py +5 -1
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +59 -23
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
- huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +10 -10
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/__init__.py +0 -0
- huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
- huggingface_hub/inference/_mcp/agent.py +100 -0
- huggingface_hub/inference/_mcp/cli.py +247 -0
- huggingface_hub/inference/_mcp/constants.py +81 -0
- huggingface_hub/inference/_mcp/mcp_client.py +395 -0
- huggingface_hub/inference/_mcp/types.py +45 -0
- huggingface_hub/inference/_mcp/utils.py +128 -0
- huggingface_hub/inference/_providers/__init__.py +82 -7
- huggingface_hub/inference/_providers/_common.py +129 -27
- huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
- huggingface_hub/inference/_providers/cerebras.py +1 -1
- huggingface_hub/inference/_providers/clarifai.py +13 -0
- huggingface_hub/inference/_providers/cohere.py +20 -3
- huggingface_hub/inference/_providers/fal_ai.py +183 -56
- huggingface_hub/inference/_providers/featherless_ai.py +38 -0
- huggingface_hub/inference/_providers/fireworks_ai.py +18 -0
- huggingface_hub/inference/_providers/groq.py +9 -0
- huggingface_hub/inference/_providers/hf_inference.py +69 -30
- huggingface_hub/inference/_providers/hyperbolic.py +4 -4
- huggingface_hub/inference/_providers/nebius.py +33 -5
- huggingface_hub/inference/_providers/novita.py +5 -5
- huggingface_hub/inference/_providers/nscale.py +44 -0
- huggingface_hub/inference/_providers/openai.py +3 -1
- huggingface_hub/inference/_providers/publicai.py +6 -0
- huggingface_hub/inference/_providers/replicate.py +31 -13
- huggingface_hub/inference/_providers/sambanova.py +18 -4
- huggingface_hub/inference/_providers/scaleway.py +28 -0
- huggingface_hub/inference/_providers/together.py +20 -5
- huggingface_hub/inference/_providers/wavespeed.py +138 -0
- huggingface_hub/inference/_providers/zai_org.py +17 -0
- huggingface_hub/lfs.py +33 -100
- huggingface_hub/repocard.py +34 -38
- huggingface_hub/repocard_data.py +57 -57
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +12 -15
- huggingface_hub/serialization/_dduf.py +8 -8
- huggingface_hub/serialization/_torch.py +69 -69
- huggingface_hub/utils/__init__.py +19 -8
- huggingface_hub/utils/_auth.py +7 -7
- huggingface_hub/utils/_cache_manager.py +92 -147
- huggingface_hub/utils/_chunk_utils.py +2 -3
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +55 -0
- huggingface_hub/utils/_experimental.py +7 -5
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +5 -5
- huggingface_hub/utils/_headers.py +8 -30
- huggingface_hub/utils/_http.py +398 -239
- huggingface_hub/utils/_pagination.py +4 -4
- huggingface_hub/utils/_parsing.py +98 -0
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +61 -24
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +9 -9
- huggingface_hub/utils/_telemetry.py +4 -4
- huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
- huggingface_hub/utils/_typing.py +25 -5
- huggingface_hub/utils/_validators.py +55 -74
- huggingface_hub/utils/_verification.py +167 -0
- huggingface_hub/utils/_xet.py +64 -17
- huggingface_hub/utils/_xet_progress_reporting.py +162 -0
- huggingface_hub/utils/insecure_hashlib.py +3 -5
- huggingface_hub/utils/logging.py +8 -11
- huggingface_hub/utils/tqdm.py +5 -4
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -85
- huggingface_hub-1.1.3.dist-info/RECORD +155 -0
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
- huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
- huggingface_hub/commands/delete_cache.py +0 -474
- huggingface_hub/commands/download.py +0 -200
- huggingface_hub/commands/huggingface_cli.py +0 -61
- huggingface_hub/commands/lfs.py +0 -200
- huggingface_hub/commands/repo_files.py +0 -128
- huggingface_hub/commands/scan_cache.py +0 -181
- huggingface_hub/commands/tag.py +0 -159
- huggingface_hub/commands/upload.py +0 -314
- huggingface_hub/commands/upload_large_folder.py +0 -129
- huggingface_hub/commands/user.py +0 -304
- huggingface_hub/commands/version.py +0 -37
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -500
- huggingface_hub/repository.py +0 -1477
- huggingface_hub/serialization/_tensorflow.py +0 -95
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.31.0rc0.dist-info/RECORD +0 -135
- huggingface_hub-0.31.0rc0.dist-info/entry_points.txt +0 -6
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
12
|
class ZeroShotClassificationParameters(BaseInferenceType):
|
|
13
13
|
"""Additional inference parameters for Zero Shot Classification"""
|
|
14
14
|
|
|
15
|
-
candidate_labels:
|
|
15
|
+
candidate_labels: list[str]
|
|
16
16
|
"""The set of possible class labels to classify the text into."""
|
|
17
17
|
hypothesis_template: Optional[str] = None
|
|
18
18
|
"""The sentence used in conjunction with `candidate_labels` to attempt the text
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
12
|
class ZeroShotImageClassificationParameters(BaseInferenceType):
|
|
13
13
|
"""Additional inference parameters for Zero Shot Image Classification"""
|
|
14
14
|
|
|
15
|
-
candidate_labels:
|
|
15
|
+
candidate_labels: list[str]
|
|
16
16
|
"""The candidate labels for this image"""
|
|
17
17
|
hypothesis_template: Optional[str] = None
|
|
18
18
|
"""The sentence used in conjunction with `candidate_labels` to attempt the image
|
|
@@ -3,8 +3,6 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import List
|
|
7
|
-
|
|
8
6
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
7
|
|
|
10
8
|
|
|
@@ -12,7 +10,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
10
|
class ZeroShotObjectDetectionParameters(BaseInferenceType):
|
|
13
11
|
"""Additional inference parameters for Zero Shot Object Detection"""
|
|
14
12
|
|
|
15
|
-
candidate_labels:
|
|
13
|
+
candidate_labels: list[str]
|
|
16
14
|
"""The candidate labels for this image"""
|
|
17
15
|
|
|
18
16
|
|
|
File without changes
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import sys
|
|
3
|
+
from functools import partial
|
|
4
|
+
|
|
5
|
+
import typer
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _patch_anyio_open_process():
|
|
9
|
+
"""
|
|
10
|
+
Patch anyio.open_process to allow detached processes on Windows and Unix-like systems.
|
|
11
|
+
|
|
12
|
+
This is necessary to prevent the MCP client from being interrupted by Ctrl+C when running in the CLI.
|
|
13
|
+
"""
|
|
14
|
+
import subprocess
|
|
15
|
+
|
|
16
|
+
import anyio
|
|
17
|
+
|
|
18
|
+
if getattr(anyio, "_tiny_agents_patched", False):
|
|
19
|
+
return
|
|
20
|
+
anyio._tiny_agents_patched = True # ty: ignore[invalid-assignment]
|
|
21
|
+
|
|
22
|
+
original_open_process = anyio.open_process
|
|
23
|
+
|
|
24
|
+
if sys.platform == "win32":
|
|
25
|
+
# On Windows, we need to set the creation flags to create a new process group
|
|
26
|
+
|
|
27
|
+
async def open_process_in_new_group(*args, **kwargs):
|
|
28
|
+
"""
|
|
29
|
+
Wrapper for open_process to handle Windows-specific process creation flags.
|
|
30
|
+
"""
|
|
31
|
+
# Ensure we pass the creation flags for Windows
|
|
32
|
+
kwargs.setdefault("creationflags", subprocess.CREATE_NEW_PROCESS_GROUP)
|
|
33
|
+
return await original_open_process(*args, **kwargs)
|
|
34
|
+
|
|
35
|
+
anyio.open_process = open_process_in_new_group # ty: ignore[invalid-assignment]
|
|
36
|
+
else:
|
|
37
|
+
# For Unix-like systems, we can use setsid to create a new session
|
|
38
|
+
async def open_process_in_new_group(*args, **kwargs):
|
|
39
|
+
"""
|
|
40
|
+
Wrapper for open_process to handle Unix-like systems with start_new_session=True.
|
|
41
|
+
"""
|
|
42
|
+
kwargs.setdefault("start_new_session", True)
|
|
43
|
+
return await original_open_process(*args, **kwargs)
|
|
44
|
+
|
|
45
|
+
anyio.open_process = open_process_in_new_group # ty: ignore[invalid-assignment]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
async def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str:
|
|
49
|
+
"""
|
|
50
|
+
Asynchronous prompt function that reads input from stdin without blocking.
|
|
51
|
+
|
|
52
|
+
This function is designed to work in an asynchronous context, allowing the event loop to gracefully stop it (e.g. on Ctrl+C).
|
|
53
|
+
|
|
54
|
+
Alternatively, we could use https://github.com/vxgmichel/aioconsole but that would be an additional dependency.
|
|
55
|
+
"""
|
|
56
|
+
loop = asyncio.get_event_loop()
|
|
57
|
+
|
|
58
|
+
if sys.platform == "win32":
|
|
59
|
+
# Windows: Use run_in_executor to avoid blocking the event loop
|
|
60
|
+
# Degraded solution: this is not ideal as user will have to CTRL+C once more to stop the prompt (and it'll not be graceful)
|
|
61
|
+
return await loop.run_in_executor(None, partial(typer.prompt, prompt, prompt_suffix=" "))
|
|
62
|
+
else:
|
|
63
|
+
# UNIX-like: Use loop.add_reader for non-blocking stdin read
|
|
64
|
+
future = loop.create_future()
|
|
65
|
+
|
|
66
|
+
def on_input():
|
|
67
|
+
line = sys.stdin.readline()
|
|
68
|
+
loop.remove_reader(sys.stdin)
|
|
69
|
+
future.set_result(line)
|
|
70
|
+
|
|
71
|
+
print(prompt, end=" ", flush=True)
|
|
72
|
+
loop.add_reader(sys.stdin, on_input) # not supported on Windows
|
|
73
|
+
|
|
74
|
+
# Wait for user input or exit event
|
|
75
|
+
# Wait until either the user hits enter or exit_event is set
|
|
76
|
+
exit_task = asyncio.create_task(exit_event.wait())
|
|
77
|
+
await asyncio.wait(
|
|
78
|
+
[future, exit_task],
|
|
79
|
+
return_when=asyncio.FIRST_COMPLETED,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Check which one has been triggered
|
|
83
|
+
if exit_event.is_set():
|
|
84
|
+
future.cancel()
|
|
85
|
+
return ""
|
|
86
|
+
|
|
87
|
+
line = await future
|
|
88
|
+
return line.strip()
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import AsyncGenerator, Iterable, Optional, Union
|
|
5
|
+
|
|
6
|
+
from huggingface_hub import ChatCompletionInputMessage, ChatCompletionStreamOutput, MCPClient
|
|
7
|
+
|
|
8
|
+
from .._providers import PROVIDER_OR_POLICY_T
|
|
9
|
+
from .constants import DEFAULT_SYSTEM_PROMPT, EXIT_LOOP_TOOLS, MAX_NUM_TURNS
|
|
10
|
+
from .types import ServerConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Agent(MCPClient):
|
|
14
|
+
"""
|
|
15
|
+
Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`].
|
|
16
|
+
|
|
17
|
+
> [!WARNING]
|
|
18
|
+
> This class is experimental and might be subject to breaking changes in the future without prior notice.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
model (`str`, *optional*):
|
|
22
|
+
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
|
|
23
|
+
or a URL to a deployed Inference Endpoint or other local or remote endpoint.
|
|
24
|
+
servers (`Iterable[dict]`):
|
|
25
|
+
MCP servers to connect to. Each server is a dictionary containing a `type` key and a `config` key. The `type` key can be `"stdio"` or `"sse"`, and the `config` key is a dictionary of arguments for the server.
|
|
26
|
+
provider (`str`, *optional*):
|
|
27
|
+
Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
28
|
+
If model is a URL or `base_url` is passed, then `provider` is not used.
|
|
29
|
+
base_url (`str`, *optional*):
|
|
30
|
+
The base URL to run inference. Defaults to None.
|
|
31
|
+
api_key (`str`, *optional*):
|
|
32
|
+
Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.
|
|
33
|
+
prompt (`str`, *optional*):
|
|
34
|
+
The system prompt to use for the agent. Defaults to the default system prompt in `constants.py`.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
model: Optional[str] = None,
|
|
41
|
+
servers: Iterable[ServerConfig],
|
|
42
|
+
provider: Optional[PROVIDER_OR_POLICY_T] = None,
|
|
43
|
+
base_url: Optional[str] = None,
|
|
44
|
+
api_key: Optional[str] = None,
|
|
45
|
+
prompt: Optional[str] = None,
|
|
46
|
+
):
|
|
47
|
+
super().__init__(model=model, provider=provider, base_url=base_url, api_key=api_key)
|
|
48
|
+
self._servers_cfg = list(servers)
|
|
49
|
+
self.messages: list[Union[dict, ChatCompletionInputMessage]] = [
|
|
50
|
+
{"role": "system", "content": prompt or DEFAULT_SYSTEM_PROMPT}
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
async def load_tools(self) -> None:
|
|
54
|
+
for cfg in self._servers_cfg:
|
|
55
|
+
await self.add_mcp_server(**cfg)
|
|
56
|
+
|
|
57
|
+
async def run(
|
|
58
|
+
self,
|
|
59
|
+
user_input: str,
|
|
60
|
+
*,
|
|
61
|
+
abort_event: Optional[asyncio.Event] = None,
|
|
62
|
+
) -> AsyncGenerator[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage], None]:
|
|
63
|
+
"""
|
|
64
|
+
Run the agent with the given user input.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
user_input (`str`):
|
|
68
|
+
The user input to run the agent with.
|
|
69
|
+
abort_event (`asyncio.Event`, *optional*):
|
|
70
|
+
An event that can be used to abort the agent. If the event is set, the agent will stop running.
|
|
71
|
+
"""
|
|
72
|
+
self.messages.append({"role": "user", "content": user_input})
|
|
73
|
+
|
|
74
|
+
num_turns: int = 0
|
|
75
|
+
next_turn_should_call_tools = True
|
|
76
|
+
|
|
77
|
+
while True:
|
|
78
|
+
if abort_event and abort_event.is_set():
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
async for item in self.process_single_turn_with_tools(
|
|
82
|
+
self.messages,
|
|
83
|
+
exit_loop_tools=EXIT_LOOP_TOOLS,
|
|
84
|
+
exit_if_first_chunk_no_tool=(num_turns > 0 and next_turn_should_call_tools),
|
|
85
|
+
):
|
|
86
|
+
yield item
|
|
87
|
+
|
|
88
|
+
num_turns += 1
|
|
89
|
+
last = self.messages[-1]
|
|
90
|
+
|
|
91
|
+
if last.get("role") == "tool" and last.get("name") in {t.function.name for t in EXIT_LOOP_TOOLS}:
|
|
92
|
+
return
|
|
93
|
+
|
|
94
|
+
if last.get("role") != "tool" and num_turns > MAX_NUM_TURNS:
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
if last.get("role") != "tool" and next_turn_should_call_tools:
|
|
98
|
+
return
|
|
99
|
+
|
|
100
|
+
next_turn_should_call_tools = last.get("role") != "tool"
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
import signal
|
|
4
|
+
import traceback
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
import typer
|
|
8
|
+
from rich import print
|
|
9
|
+
|
|
10
|
+
from ._cli_hacks import _async_prompt, _patch_anyio_open_process
|
|
11
|
+
from .agent import Agent
|
|
12
|
+
from .utils import _load_agent_config
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
app = typer.Typer(
|
|
16
|
+
rich_markup_mode="rich",
|
|
17
|
+
help="A squad of lightweight composable AI applications built on Hugging Face's Inference Client and MCP stack.",
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
run_cli = typer.Typer(
|
|
21
|
+
name="run",
|
|
22
|
+
help="Run the Agent in the CLI",
|
|
23
|
+
invoke_without_command=True,
|
|
24
|
+
)
|
|
25
|
+
app.add_typer(run_cli, name="run")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
async def run_agent(
|
|
29
|
+
agent_path: Optional[str],
|
|
30
|
+
) -> None:
|
|
31
|
+
"""
|
|
32
|
+
Tiny Agent loop.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
agent_path (`str`, *optional*):
|
|
36
|
+
Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` or `AGENTS.md` file or a built-in agent stored in a Hugging Face dataset.
|
|
37
|
+
|
|
38
|
+
"""
|
|
39
|
+
_patch_anyio_open_process() # Hacky way to prevent stdio connections to be stopped by Ctrl+C
|
|
40
|
+
|
|
41
|
+
config, prompt = _load_agent_config(agent_path)
|
|
42
|
+
|
|
43
|
+
inputs = config.get("inputs", [])
|
|
44
|
+
servers = config.get("servers", [])
|
|
45
|
+
|
|
46
|
+
abort_event = asyncio.Event()
|
|
47
|
+
exit_event = asyncio.Event()
|
|
48
|
+
first_sigint = True
|
|
49
|
+
|
|
50
|
+
loop = asyncio.get_running_loop()
|
|
51
|
+
original_sigint_handler = signal.getsignal(signal.SIGINT)
|
|
52
|
+
|
|
53
|
+
def _sigint_handler() -> None:
|
|
54
|
+
nonlocal first_sigint
|
|
55
|
+
if first_sigint:
|
|
56
|
+
first_sigint = False
|
|
57
|
+
abort_event.set()
|
|
58
|
+
print("\n[red]Interrupted. Press Ctrl+C again to quit.[/red]", flush=True)
|
|
59
|
+
return
|
|
60
|
+
|
|
61
|
+
print("\n[red]Exiting...[/red]", flush=True)
|
|
62
|
+
exit_event.set()
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
sigint_registered_in_loop = False
|
|
66
|
+
try:
|
|
67
|
+
loop.add_signal_handler(signal.SIGINT, _sigint_handler)
|
|
68
|
+
sigint_registered_in_loop = True
|
|
69
|
+
except (AttributeError, NotImplementedError):
|
|
70
|
+
# Windows (or any loop that doesn't support it) : fall back to sync
|
|
71
|
+
signal.signal(signal.SIGINT, lambda *_: _sigint_handler())
|
|
72
|
+
|
|
73
|
+
# Handle inputs (i.e. env variables injection)
|
|
74
|
+
resolved_inputs: dict[str, str] = {}
|
|
75
|
+
|
|
76
|
+
if len(inputs) > 0:
|
|
77
|
+
print(
|
|
78
|
+
"[bold blue]Some initial inputs are required by the agent. "
|
|
79
|
+
"Please provide a value or leave empty to load from env.[/bold blue]"
|
|
80
|
+
)
|
|
81
|
+
for input_item in inputs:
|
|
82
|
+
input_id = input_item["id"]
|
|
83
|
+
description = input_item["description"]
|
|
84
|
+
env_special_value = f"${{input:{input_id}}}"
|
|
85
|
+
|
|
86
|
+
# Check if the input is used by any server or as an apiKey
|
|
87
|
+
input_usages = set()
|
|
88
|
+
for server in servers:
|
|
89
|
+
# Check stdio's "env" and http/sse's "headers" mappings
|
|
90
|
+
env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {})
|
|
91
|
+
for key, value in env_or_headers.items():
|
|
92
|
+
if env_special_value in value:
|
|
93
|
+
input_usages.add(key)
|
|
94
|
+
|
|
95
|
+
raw_api_key = config.get("apiKey")
|
|
96
|
+
if isinstance(raw_api_key, str) and env_special_value in raw_api_key:
|
|
97
|
+
input_usages.add("apiKey")
|
|
98
|
+
|
|
99
|
+
if not input_usages:
|
|
100
|
+
print(
|
|
101
|
+
f"[yellow]Input '{input_id}' defined in config but not used by any server or as an API key."
|
|
102
|
+
" Skipping.[/yellow]"
|
|
103
|
+
)
|
|
104
|
+
continue
|
|
105
|
+
|
|
106
|
+
# Prompt user for input
|
|
107
|
+
env_variable_key = input_id.replace("-", "_").upper()
|
|
108
|
+
print(
|
|
109
|
+
f"[blue] • {input_id}[/blue]: {description}. (default: load from {env_variable_key}).",
|
|
110
|
+
end=" ",
|
|
111
|
+
)
|
|
112
|
+
user_input = (await _async_prompt(exit_event=exit_event)).strip()
|
|
113
|
+
if exit_event.is_set():
|
|
114
|
+
return
|
|
115
|
+
|
|
116
|
+
# Fallback to environment variable when user left blank
|
|
117
|
+
final_value = user_input
|
|
118
|
+
if not final_value:
|
|
119
|
+
final_value = os.getenv(env_variable_key, "")
|
|
120
|
+
if final_value:
|
|
121
|
+
print(f"[green]Value successfully loaded from '{env_variable_key}'[/green]")
|
|
122
|
+
else:
|
|
123
|
+
print(
|
|
124
|
+
f"[yellow]No value found for '{env_variable_key}' in environment variables. Continuing.[/yellow]"
|
|
125
|
+
)
|
|
126
|
+
resolved_inputs[input_id] = final_value
|
|
127
|
+
|
|
128
|
+
# Inject resolved value (can be empty) into stdio's env or http/sse's headers
|
|
129
|
+
for server in servers:
|
|
130
|
+
env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {})
|
|
131
|
+
for key, value in env_or_headers.items():
|
|
132
|
+
if env_special_value in value:
|
|
133
|
+
env_or_headers[key] = env_or_headers[key].replace(env_special_value, final_value)
|
|
134
|
+
|
|
135
|
+
print()
|
|
136
|
+
|
|
137
|
+
raw_api_key = config.get("apiKey")
|
|
138
|
+
if isinstance(raw_api_key, str):
|
|
139
|
+
substituted_api_key = raw_api_key
|
|
140
|
+
for input_id, val in resolved_inputs.items():
|
|
141
|
+
substituted_api_key = substituted_api_key.replace(f"${{input:{input_id}}}", val)
|
|
142
|
+
config["apiKey"] = substituted_api_key
|
|
143
|
+
# Main agent loop
|
|
144
|
+
async with Agent(
|
|
145
|
+
provider=config.get("provider"), # type: ignore[arg-type]
|
|
146
|
+
model=config.get("model"),
|
|
147
|
+
base_url=config.get("endpointUrl"), # type: ignore[arg-type]
|
|
148
|
+
api_key=config.get("apiKey"),
|
|
149
|
+
servers=servers, # type: ignore[arg-type]
|
|
150
|
+
prompt=prompt,
|
|
151
|
+
) as agent:
|
|
152
|
+
await agent.load_tools()
|
|
153
|
+
print(f"[bold blue]Agent loaded with {len(agent.available_tools)} tools:[/bold blue]")
|
|
154
|
+
for t in agent.available_tools:
|
|
155
|
+
print(f"[blue] • {t.function.name}[/blue]")
|
|
156
|
+
|
|
157
|
+
while True:
|
|
158
|
+
abort_event.clear()
|
|
159
|
+
|
|
160
|
+
# Check if we should exit
|
|
161
|
+
if exit_event.is_set():
|
|
162
|
+
return
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
user_input = await _async_prompt(exit_event=exit_event)
|
|
166
|
+
first_sigint = True
|
|
167
|
+
except EOFError:
|
|
168
|
+
print("\n[red]EOF received, exiting.[/red]", flush=True)
|
|
169
|
+
break
|
|
170
|
+
except KeyboardInterrupt:
|
|
171
|
+
if not first_sigint and abort_event.is_set():
|
|
172
|
+
continue
|
|
173
|
+
else:
|
|
174
|
+
print("\n[red]Keyboard interrupt during input processing.[/red]", flush=True)
|
|
175
|
+
break
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
async for chunk in agent.run(user_input, abort_event=abort_event):
|
|
179
|
+
if abort_event.is_set() and not first_sigint:
|
|
180
|
+
break
|
|
181
|
+
if exit_event.is_set():
|
|
182
|
+
return
|
|
183
|
+
|
|
184
|
+
if hasattr(chunk, "choices"):
|
|
185
|
+
delta = chunk.choices[0].delta
|
|
186
|
+
if delta.content:
|
|
187
|
+
print(delta.content, end="", flush=True)
|
|
188
|
+
if delta.tool_calls:
|
|
189
|
+
for call in delta.tool_calls:
|
|
190
|
+
if call.id:
|
|
191
|
+
print(f"<Tool {call.id}>", end="")
|
|
192
|
+
if call.function.name:
|
|
193
|
+
print(f"{call.function.name}", end=" ")
|
|
194
|
+
if call.function.arguments:
|
|
195
|
+
print(f"{call.function.arguments}", end="")
|
|
196
|
+
else:
|
|
197
|
+
print(
|
|
198
|
+
f"\n\n[green]Tool[{chunk.name}] {chunk.tool_call_id}\n{chunk.content}[/green]\n",
|
|
199
|
+
flush=True,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
print()
|
|
203
|
+
|
|
204
|
+
except Exception as e:
|
|
205
|
+
tb_str = traceback.format_exc()
|
|
206
|
+
print(f"\n[bold red]Error during agent run: {e}\n{tb_str}[/bold red]", flush=True)
|
|
207
|
+
first_sigint = True # Allow graceful interrupt for the next command
|
|
208
|
+
|
|
209
|
+
except Exception as e:
|
|
210
|
+
tb_str = traceback.format_exc()
|
|
211
|
+
print(f"\n[bold red]An unexpected error occurred: {e}\n{tb_str}[/bold red]", flush=True)
|
|
212
|
+
raise e
|
|
213
|
+
|
|
214
|
+
finally:
|
|
215
|
+
if sigint_registered_in_loop:
|
|
216
|
+
try:
|
|
217
|
+
loop.remove_signal_handler(signal.SIGINT)
|
|
218
|
+
except (AttributeError, NotImplementedError):
|
|
219
|
+
pass
|
|
220
|
+
else:
|
|
221
|
+
signal.signal(signal.SIGINT, original_sigint_handler)
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
@run_cli.callback()
|
|
225
|
+
def run(
|
|
226
|
+
path: Optional[str] = typer.Argument(
|
|
227
|
+
None,
|
|
228
|
+
help=(
|
|
229
|
+
"Path to a local folder containing an agent.json file or a built-in agent "
|
|
230
|
+
"stored in the 'tiny-agents/tiny-agents' Hugging Face dataset "
|
|
231
|
+
"(https://huggingface.co/datasets/tiny-agents/tiny-agents)"
|
|
232
|
+
),
|
|
233
|
+
show_default=False,
|
|
234
|
+
),
|
|
235
|
+
):
|
|
236
|
+
try:
|
|
237
|
+
asyncio.run(run_agent(path))
|
|
238
|
+
except KeyboardInterrupt:
|
|
239
|
+
print("\n[red]Application terminated by KeyboardInterrupt.[/red]", flush=True)
|
|
240
|
+
raise typer.Exit(code=130)
|
|
241
|
+
except Exception as e:
|
|
242
|
+
print(f"\n[bold red]An unexpected error occurred: {e}[/bold red]", flush=True)
|
|
243
|
+
raise e
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
if __name__ == "__main__":
|
|
247
|
+
app()
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from huggingface_hub import ChatCompletionInputTool
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
FILENAME_CONFIG = "agent.json"
|
|
10
|
+
PROMPT_FILENAMES = ("PROMPT.md", "AGENTS.md")
|
|
11
|
+
|
|
12
|
+
DEFAULT_AGENT = {
|
|
13
|
+
"model": "Qwen/Qwen2.5-72B-Instruct",
|
|
14
|
+
"provider": "nebius",
|
|
15
|
+
"servers": [
|
|
16
|
+
{
|
|
17
|
+
"type": "stdio",
|
|
18
|
+
"command": "npx",
|
|
19
|
+
"args": [
|
|
20
|
+
"-y",
|
|
21
|
+
"@modelcontextprotocol/server-filesystem",
|
|
22
|
+
str(Path.home() / ("Desktop" if sys.platform == "darwin" else "")),
|
|
23
|
+
],
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
"type": "stdio",
|
|
27
|
+
"command": "npx",
|
|
28
|
+
"args": ["@playwright/mcp@latest"],
|
|
29
|
+
},
|
|
30
|
+
],
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
DEFAULT_SYSTEM_PROMPT = """
|
|
35
|
+
You are an agent - please keep going until the user’s query is completely
|
|
36
|
+
resolved, before ending your turn and yielding back to the user. Only terminate
|
|
37
|
+
your turn when you are sure that the problem is solved, or if you need more
|
|
38
|
+
info from the user to solve the problem.
|
|
39
|
+
If you are not sure about anything pertaining to the user’s request, use your
|
|
40
|
+
tools to read files and gather the relevant information: do NOT guess or make
|
|
41
|
+
up an answer.
|
|
42
|
+
You MUST plan extensively before each function call, and reflect extensively
|
|
43
|
+
on the outcomes of the previous function calls. DO NOT do this entire process
|
|
44
|
+
by making function calls only, as this can impair your ability to solve the
|
|
45
|
+
problem and think insightfully.
|
|
46
|
+
""".strip()
|
|
47
|
+
|
|
48
|
+
MAX_NUM_TURNS = 10
|
|
49
|
+
|
|
50
|
+
TASK_COMPLETE_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]
|
|
51
|
+
{
|
|
52
|
+
"type": "function",
|
|
53
|
+
"function": {
|
|
54
|
+
"name": "task_complete",
|
|
55
|
+
"description": "Call this tool when the task given by the user is complete",
|
|
56
|
+
"parameters": {
|
|
57
|
+
"type": "object",
|
|
58
|
+
"properties": {},
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
ASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]
|
|
65
|
+
{
|
|
66
|
+
"type": "function",
|
|
67
|
+
"function": {
|
|
68
|
+
"name": "ask_question",
|
|
69
|
+
"description": "Ask the user for more info required to solve or clarify their problem.",
|
|
70
|
+
"parameters": {
|
|
71
|
+
"type": "object",
|
|
72
|
+
"properties": {},
|
|
73
|
+
},
|
|
74
|
+
},
|
|
75
|
+
}
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
EXIT_LOOP_TOOLS: list[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL]
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
DEFAULT_REPO_ID = "tiny-agents/tiny-agents"
|