huggingface-hub 0.35.0rc0__py3-none-any.whl → 1.0.0rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +46 -45
- huggingface_hub/_commit_api.py +28 -28
- huggingface_hub/_commit_scheduler.py +11 -8
- huggingface_hub/_inference_endpoints.py +8 -8
- huggingface_hub/_jobs_api.py +176 -20
- huggingface_hub/_local_folder.py +1 -1
- huggingface_hub/_login.py +13 -39
- huggingface_hub/_oauth.py +10 -14
- huggingface_hub/_snapshot_download.py +14 -28
- huggingface_hub/_space_api.py +4 -4
- huggingface_hub/_tensorboard_logger.py +13 -14
- huggingface_hub/_upload_large_folder.py +120 -13
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +2 -2
- huggingface_hub/cli/_cli_utils.py +2 -2
- huggingface_hub/cli/auth.py +8 -6
- huggingface_hub/cli/cache.py +18 -20
- huggingface_hub/cli/download.py +4 -4
- huggingface_hub/cli/hf.py +2 -5
- huggingface_hub/cli/jobs.py +599 -22
- huggingface_hub/cli/lfs.py +4 -4
- huggingface_hub/cli/repo.py +11 -7
- huggingface_hub/cli/repo_files.py +2 -2
- huggingface_hub/cli/upload.py +4 -4
- huggingface_hub/cli/upload_large_folder.py +3 -3
- huggingface_hub/commands/_cli_utils.py +2 -2
- huggingface_hub/commands/delete_cache.py +13 -13
- huggingface_hub/commands/download.py +4 -13
- huggingface_hub/commands/lfs.py +4 -4
- huggingface_hub/commands/repo_files.py +2 -2
- huggingface_hub/commands/scan_cache.py +1 -1
- huggingface_hub/commands/tag.py +1 -3
- huggingface_hub/commands/upload.py +4 -4
- huggingface_hub/commands/upload_large_folder.py +3 -3
- huggingface_hub/commands/user.py +4 -5
- huggingface_hub/community.py +5 -5
- huggingface_hub/constants.py +3 -41
- huggingface_hub/dataclasses.py +16 -19
- huggingface_hub/errors.py +42 -29
- huggingface_hub/fastai_utils.py +8 -9
- huggingface_hub/file_download.py +162 -259
- huggingface_hub/hf_api.py +841 -616
- huggingface_hub/hf_file_system.py +98 -62
- huggingface_hub/hub_mixin.py +37 -57
- huggingface_hub/inference/_client.py +257 -325
- huggingface_hub/inference/_common.py +110 -124
- huggingface_hub/inference/_generated/_async_client.py +307 -432
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +18 -16
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +4 -4
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +10 -10
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/_cli_hacks.py +3 -3
- huggingface_hub/inference/_mcp/agent.py +3 -3
- huggingface_hub/inference/_mcp/cli.py +1 -1
- huggingface_hub/inference/_mcp/constants.py +2 -3
- huggingface_hub/inference/_mcp/mcp_client.py +58 -30
- huggingface_hub/inference/_mcp/types.py +10 -7
- huggingface_hub/inference/_mcp/utils.py +11 -7
- huggingface_hub/inference/_providers/__init__.py +4 -2
- huggingface_hub/inference/_providers/_common.py +49 -25
- huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
- huggingface_hub/inference/_providers/cohere.py +3 -3
- huggingface_hub/inference/_providers/fal_ai.py +52 -21
- huggingface_hub/inference/_providers/featherless_ai.py +4 -4
- huggingface_hub/inference/_providers/fireworks_ai.py +3 -3
- huggingface_hub/inference/_providers/hf_inference.py +28 -20
- huggingface_hub/inference/_providers/hyperbolic.py +4 -4
- huggingface_hub/inference/_providers/nebius.py +10 -10
- huggingface_hub/inference/_providers/novita.py +5 -5
- huggingface_hub/inference/_providers/nscale.py +4 -4
- huggingface_hub/inference/_providers/replicate.py +15 -15
- huggingface_hub/inference/_providers/sambanova.py +6 -6
- huggingface_hub/inference/_providers/together.py +7 -7
- huggingface_hub/lfs.py +20 -31
- huggingface_hub/repocard.py +18 -18
- huggingface_hub/repocard_data.py +56 -56
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +9 -9
- huggingface_hub/serialization/_dduf.py +7 -7
- huggingface_hub/serialization/_torch.py +28 -28
- huggingface_hub/utils/__init__.py +10 -4
- huggingface_hub/utils/_auth.py +5 -5
- huggingface_hub/utils/_cache_manager.py +31 -31
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +25 -21
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +4 -4
- huggingface_hub/utils/_headers.py +7 -29
- huggingface_hub/utils/_http.py +366 -208
- huggingface_hub/utils/_pagination.py +4 -4
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +16 -13
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +9 -9
- huggingface_hub/utils/_telemetry.py +3 -3
- huggingface_hub/utils/_typing.py +25 -5
- huggingface_hub/utils/_validators.py +53 -72
- huggingface_hub/utils/_xet.py +16 -16
- huggingface_hub/utils/_xet_progress_reporting.py +32 -11
- huggingface_hub/utils/insecure_hashlib.py +3 -9
- huggingface_hub/utils/tqdm.py +3 -3
- {huggingface_hub-0.35.0rc0.dist-info → huggingface_hub-1.0.0rc0.dist-info}/METADATA +18 -29
- huggingface_hub-1.0.0rc0.dist-info/RECORD +161 -0
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -500
- huggingface_hub/repository.py +0 -1477
- huggingface_hub/serialization/_tensorflow.py +0 -95
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.35.0rc0.dist-info/RECORD +0 -166
- {huggingface_hub-0.35.0rc0.dist-info → huggingface_hub-1.0.0rc0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.35.0rc0.dist-info → huggingface_hub-1.0.0rc0.dist-info}/WHEEL +0 -0
- {huggingface_hub-0.35.0rc0.dist-info → huggingface_hub-1.0.0rc0.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.35.0rc0.dist-info → huggingface_hub-1.0.0rc0.dist-info}/top_level.txt +0 -0
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -97,7 +97,7 @@ class AutomaticSpeechRecognitionInput(BaseInferenceType):
|
|
|
97
97
|
class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):
|
|
98
98
|
text: str
|
|
99
99
|
"""A chunk of text identified by the model"""
|
|
100
|
-
timestamp:
|
|
100
|
+
timestamp: list[float]
|
|
101
101
|
"""The start and end timestamps corresponding with the text"""
|
|
102
102
|
|
|
103
103
|
|
|
@@ -107,7 +107,7 @@ class AutomaticSpeechRecognitionOutput(BaseInferenceType):
|
|
|
107
107
|
|
|
108
108
|
text: str
|
|
109
109
|
"""The recognized text."""
|
|
110
|
-
chunks: Optional[
|
|
110
|
+
chunks: Optional[list[AutomaticSpeechRecognitionOutputChunk]] = None
|
|
111
111
|
"""When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
|
|
112
112
|
the model.
|
|
113
113
|
"""
|
|
@@ -15,8 +15,9 @@
|
|
|
15
15
|
|
|
16
16
|
import inspect
|
|
17
17
|
import json
|
|
18
|
+
import types
|
|
18
19
|
from dataclasses import asdict, dataclass
|
|
19
|
-
from typing import Any,
|
|
20
|
+
from typing import Any, TypeVar, Union, get_args
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
T = TypeVar("T", bound="BaseInferenceType")
|
|
@@ -28,7 +29,7 @@ def _repr_with_extra(self):
|
|
|
28
29
|
return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"
|
|
29
30
|
|
|
30
31
|
|
|
31
|
-
def dataclass_with_extra(cls:
|
|
32
|
+
def dataclass_with_extra(cls: type[T]) -> type[T]:
|
|
32
33
|
"""Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.
|
|
33
34
|
|
|
34
35
|
This decorator only works with dataclasses that inherit from `BaseInferenceType`.
|
|
@@ -49,7 +50,7 @@ class BaseInferenceType(dict):
|
|
|
49
50
|
"""
|
|
50
51
|
|
|
51
52
|
@classmethod
|
|
52
|
-
def parse_obj_as_list(cls:
|
|
53
|
+
def parse_obj_as_list(cls: type[T], data: Union[bytes, str, list, dict]) -> list[T]:
|
|
53
54
|
"""Alias to parse server response and return a single instance.
|
|
54
55
|
|
|
55
56
|
See `parse_obj` for more details.
|
|
@@ -60,7 +61,7 @@ class BaseInferenceType(dict):
|
|
|
60
61
|
return output
|
|
61
62
|
|
|
62
63
|
@classmethod
|
|
63
|
-
def parse_obj_as_instance(cls:
|
|
64
|
+
def parse_obj_as_instance(cls: type[T], data: Union[bytes, str, list, dict]) -> T:
|
|
64
65
|
"""Alias to parse server response and return a single instance.
|
|
65
66
|
|
|
66
67
|
See `parse_obj` for more details.
|
|
@@ -71,7 +72,7 @@ class BaseInferenceType(dict):
|
|
|
71
72
|
return output
|
|
72
73
|
|
|
73
74
|
@classmethod
|
|
74
|
-
def parse_obj(cls:
|
|
75
|
+
def parse_obj(cls: type[T], data: Union[bytes, str, list, dict]) -> Union[list[T], T]:
|
|
75
76
|
"""Parse server response as a dataclass or list of dataclasses.
|
|
76
77
|
|
|
77
78
|
To enable future-compatibility, we want to handle cases where the server return more fields than expected.
|
|
@@ -85,7 +86,7 @@ class BaseInferenceType(dict):
|
|
|
85
86
|
data = json.loads(data)
|
|
86
87
|
|
|
87
88
|
# If a list, parse each item individually
|
|
88
|
-
if isinstance(data,
|
|
89
|
+
if isinstance(data, list):
|
|
89
90
|
return [cls.parse_obj(d) for d in data] # type: ignore [misc]
|
|
90
91
|
|
|
91
92
|
# At this point, we expect a dict
|
|
@@ -109,7 +110,9 @@ class BaseInferenceType(dict):
|
|
|
109
110
|
else:
|
|
110
111
|
expected_types = get_args(field_type)
|
|
111
112
|
for expected_type in expected_types:
|
|
112
|
-
if
|
|
113
|
+
if (
|
|
114
|
+
isinstance(expected_type, types.GenericAlias) and expected_type.__origin__ is list
|
|
115
|
+
) or getattr(expected_type, "_name", None) == "List":
|
|
113
116
|
expected_type = get_args(expected_type)[
|
|
114
117
|
0
|
|
115
118
|
] # assume same type for all items in the list
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -40,9 +40,9 @@ class ChatCompletionInputToolCall(BaseInferenceType):
|
|
|
40
40
|
@dataclass_with_extra
|
|
41
41
|
class ChatCompletionInputMessage(BaseInferenceType):
|
|
42
42
|
role: str
|
|
43
|
-
content: Optional[Union[
|
|
43
|
+
content: Optional[Union[list[ChatCompletionInputMessageChunk], str]] = None
|
|
44
44
|
name: Optional[str] = None
|
|
45
|
-
tool_calls: Optional[
|
|
45
|
+
tool_calls: Optional[list[ChatCompletionInputToolCall]] = None
|
|
46
46
|
|
|
47
47
|
|
|
48
48
|
@dataclass_with_extra
|
|
@@ -56,7 +56,7 @@ class ChatCompletionInputJSONSchema(BaseInferenceType):
|
|
|
56
56
|
A description of what the response format is for, used by the model to determine
|
|
57
57
|
how to respond in the format.
|
|
58
58
|
"""
|
|
59
|
-
schema: Optional[
|
|
59
|
+
schema: Optional[dict[str, object]] = None
|
|
60
60
|
"""
|
|
61
61
|
The schema for the response format, described as a JSON Schema object. Learn how
|
|
62
62
|
to build JSON schemas [here](https://json-schema.org/).
|
|
@@ -129,14 +129,14 @@ class ChatCompletionInput(BaseInferenceType):
|
|
|
129
129
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
130
130
|
"""
|
|
131
131
|
|
|
132
|
-
messages:
|
|
132
|
+
messages: list[ChatCompletionInputMessage]
|
|
133
133
|
"""A list of messages comprising the conversation so far."""
|
|
134
134
|
frequency_penalty: Optional[float] = None
|
|
135
135
|
"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
|
|
136
136
|
frequency in the text so far,
|
|
137
137
|
decreasing the model's likelihood to repeat the same line verbatim.
|
|
138
138
|
"""
|
|
139
|
-
logit_bias: Optional[
|
|
139
|
+
logit_bias: Optional[list[float]] = None
|
|
140
140
|
"""UNUSED
|
|
141
141
|
Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
|
|
142
142
|
object that maps tokens
|
|
@@ -172,7 +172,7 @@ class ChatCompletionInput(BaseInferenceType):
|
|
|
172
172
|
"""
|
|
173
173
|
response_format: Optional[ChatCompletionInputGrammarType] = None
|
|
174
174
|
seed: Optional[int] = None
|
|
175
|
-
stop: Optional[
|
|
175
|
+
stop: Optional[list[str]] = None
|
|
176
176
|
"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
177
177
|
stream: Optional[bool] = None
|
|
178
178
|
stream_options: Optional[ChatCompletionInputStreamOptions] = None
|
|
@@ -185,7 +185,7 @@ class ChatCompletionInput(BaseInferenceType):
|
|
|
185
185
|
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None
|
|
186
186
|
tool_prompt: Optional[str] = None
|
|
187
187
|
"""A prompt to be appended before the tools"""
|
|
188
|
-
tools: Optional[
|
|
188
|
+
tools: Optional[list[ChatCompletionInputTool]] = None
|
|
189
189
|
"""A list of tools the model may call. Currently, only functions are supported as a tool.
|
|
190
190
|
Use this to provide a list of
|
|
191
191
|
functions the model may generate JSON inputs for.
|
|
@@ -213,12 +213,12 @@ class ChatCompletionOutputTopLogprob(BaseInferenceType):
|
|
|
213
213
|
class ChatCompletionOutputLogprob(BaseInferenceType):
|
|
214
214
|
logprob: float
|
|
215
215
|
token: str
|
|
216
|
-
top_logprobs:
|
|
216
|
+
top_logprobs: list[ChatCompletionOutputTopLogprob]
|
|
217
217
|
|
|
218
218
|
|
|
219
219
|
@dataclass_with_extra
|
|
220
220
|
class ChatCompletionOutputLogprobs(BaseInferenceType):
|
|
221
|
-
content:
|
|
221
|
+
content: list[ChatCompletionOutputLogprob]
|
|
222
222
|
|
|
223
223
|
|
|
224
224
|
@dataclass_with_extra
|
|
@@ -239,8 +239,9 @@ class ChatCompletionOutputToolCall(BaseInferenceType):
|
|
|
239
239
|
class ChatCompletionOutputMessage(BaseInferenceType):
|
|
240
240
|
role: str
|
|
241
241
|
content: Optional[str] = None
|
|
242
|
+
reasoning: Optional[str] = None
|
|
242
243
|
tool_call_id: Optional[str] = None
|
|
243
|
-
tool_calls: Optional[
|
|
244
|
+
tool_calls: Optional[list[ChatCompletionOutputToolCall]] = None
|
|
244
245
|
|
|
245
246
|
|
|
246
247
|
@dataclass_with_extra
|
|
@@ -266,7 +267,7 @@ class ChatCompletionOutput(BaseInferenceType):
|
|
|
266
267
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
267
268
|
"""
|
|
268
269
|
|
|
269
|
-
choices:
|
|
270
|
+
choices: list[ChatCompletionOutputComplete]
|
|
270
271
|
created: int
|
|
271
272
|
id: str
|
|
272
273
|
model: str
|
|
@@ -292,8 +293,9 @@ class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
|
|
|
292
293
|
class ChatCompletionStreamOutputDelta(BaseInferenceType):
|
|
293
294
|
role: str
|
|
294
295
|
content: Optional[str] = None
|
|
296
|
+
reasoning: Optional[str] = None
|
|
295
297
|
tool_call_id: Optional[str] = None
|
|
296
|
-
tool_calls: Optional[
|
|
298
|
+
tool_calls: Optional[list[ChatCompletionStreamOutputDeltaToolCall]] = None
|
|
297
299
|
|
|
298
300
|
|
|
299
301
|
@dataclass_with_extra
|
|
@@ -306,12 +308,12 @@ class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
|
|
|
306
308
|
class ChatCompletionStreamOutputLogprob(BaseInferenceType):
|
|
307
309
|
logprob: float
|
|
308
310
|
token: str
|
|
309
|
-
top_logprobs:
|
|
311
|
+
top_logprobs: list[ChatCompletionStreamOutputTopLogprob]
|
|
310
312
|
|
|
311
313
|
|
|
312
314
|
@dataclass_with_extra
|
|
313
315
|
class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
|
|
314
|
-
content:
|
|
316
|
+
content: list[ChatCompletionStreamOutputLogprob]
|
|
315
317
|
|
|
316
318
|
|
|
317
319
|
@dataclass_with_extra
|
|
@@ -337,7 +339,7 @@ class ChatCompletionStreamOutput(BaseInferenceType):
|
|
|
337
339
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
338
340
|
"""
|
|
339
341
|
|
|
340
|
-
choices:
|
|
342
|
+
choices: list[ChatCompletionStreamOutputChoice]
|
|
341
343
|
created: int
|
|
342
344
|
id: str
|
|
343
345
|
model: str
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -14,7 +14,7 @@ class DepthEstimationInput(BaseInferenceType):
|
|
|
14
14
|
|
|
15
15
|
inputs: Any
|
|
16
16
|
"""The input image data"""
|
|
17
|
-
parameters: Optional[
|
|
17
|
+
parameters: Optional[dict[str, Any]] = None
|
|
18
18
|
"""Additional inference parameters for Depth Estimation"""
|
|
19
19
|
|
|
20
20
|
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -46,7 +46,7 @@ class DocumentQuestionAnsweringParameters(BaseInferenceType):
|
|
|
46
46
|
"""The number of answers to return (will be chosen by order of likelihood). Can return less
|
|
47
47
|
than top_k answers if there are not enough options available within the context.
|
|
48
48
|
"""
|
|
49
|
-
word_boxes: Optional[
|
|
49
|
+
word_boxes: Optional[list[Union[list[float], str]]] = None
|
|
50
50
|
"""A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
|
|
51
51
|
skip the OCR step and use the provided bounding boxes instead.
|
|
52
52
|
"""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -19,7 +19,7 @@ class FeatureExtractionInput(BaseInferenceType):
|
|
|
19
19
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
inputs: Union[
|
|
22
|
+
inputs: Union[list[str], str]
|
|
23
23
|
"""The text or list of texts to embed."""
|
|
24
24
|
normalize: Optional[bool] = None
|
|
25
25
|
prompt_name: Optional[str] = None
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
12
|
class FillMaskParameters(BaseInferenceType):
|
|
13
13
|
"""Additional inference parameters for Fill Mask"""
|
|
14
14
|
|
|
15
|
-
targets: Optional[
|
|
15
|
+
targets: Optional[list[str]] = None
|
|
16
16
|
"""When passed, the model will limit the scores to the passed targets instead of looking up
|
|
17
17
|
in the whole vocabulary. If the provided targets are not in the model vocab, they will be
|
|
18
18
|
tokenized and the first resulting token will be used (with a warning, and that might be
|
|
@@ -3,14 +3,14 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
@dataclass_with_extra
|
|
12
12
|
class SentenceSimilarityInputData(BaseInferenceType):
|
|
13
|
-
sentences:
|
|
13
|
+
sentences: list[str]
|
|
14
14
|
"""A list of strings which will be compared against the source_sentence."""
|
|
15
15
|
source_sentence: str
|
|
16
16
|
"""The string that you wish to compare the other strings with. This can be a phrase,
|
|
@@ -23,5 +23,5 @@ class SentenceSimilarityInput(BaseInferenceType):
|
|
|
23
23
|
"""Inputs for Sentence similarity inference"""
|
|
24
24
|
|
|
25
25
|
inputs: SentenceSimilarityInputData
|
|
26
|
-
parameters: Optional[
|
|
26
|
+
parameters: Optional[dict[str, Any]] = None
|
|
27
27
|
"""Additional inference parameters for Sentence Similarity"""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -17,7 +17,7 @@ class SummarizationParameters(BaseInferenceType):
|
|
|
17
17
|
|
|
18
18
|
clean_up_tokenization_spaces: Optional[bool] = None
|
|
19
19
|
"""Whether to clean up the potential extra spaces in the text output."""
|
|
20
|
-
generate_parameters: Optional[
|
|
20
|
+
generate_parameters: Optional[dict[str, Any]] = None
|
|
21
21
|
"""Additional parametrization of the text generation algorithm."""
|
|
22
22
|
truncation: Optional["SummarizationTruncationStrategy"] = None
|
|
23
23
|
"""The truncation strategy to use."""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -14,7 +14,7 @@ class TableQuestionAnsweringInputData(BaseInferenceType):
|
|
|
14
14
|
|
|
15
15
|
question: str
|
|
16
16
|
"""The question to be answered about the table"""
|
|
17
|
-
table:
|
|
17
|
+
table: dict[str, list[str]]
|
|
18
18
|
"""The table to serve as context for the questions"""
|
|
19
19
|
|
|
20
20
|
|
|
@@ -54,9 +54,9 @@ class TableQuestionAnsweringOutputElement(BaseInferenceType):
|
|
|
54
54
|
"""The answer of the question given the table. If there is an aggregator, the answer will be
|
|
55
55
|
preceded by `AGGREGATOR >`.
|
|
56
56
|
"""
|
|
57
|
-
cells:
|
|
57
|
+
cells: list[str]
|
|
58
58
|
"""List of strings made up of the answer cell values."""
|
|
59
|
-
coordinates:
|
|
59
|
+
coordinates: list[list[int]]
|
|
60
60
|
"""Coordinates of the cells of the answers."""
|
|
61
61
|
aggregator: Optional[str] = None
|
|
62
62
|
"""If the model has an aggregator, this returns the aggregator."""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -17,7 +17,7 @@ class Text2TextGenerationParameters(BaseInferenceType):
|
|
|
17
17
|
|
|
18
18
|
clean_up_tokenization_spaces: Optional[bool] = None
|
|
19
19
|
"""Whether to clean up the potential extra spaces in the text output."""
|
|
20
|
-
generate_parameters: Optional[
|
|
20
|
+
generate_parameters: Optional[dict[str, Any]] = None
|
|
21
21
|
"""Additional parametrization of the text generation algorithm"""
|
|
22
22
|
truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
|
|
23
23
|
"""The truncation strategy to use"""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -49,7 +49,7 @@ class TextGenerationInputGenerateParameters(BaseInferenceType):
|
|
|
49
49
|
"""Whether to prepend the prompt to the generated text"""
|
|
50
50
|
seed: Optional[int] = None
|
|
51
51
|
"""Random sampling seed."""
|
|
52
|
-
stop: Optional[
|
|
52
|
+
stop: Optional[list[str]] = None
|
|
53
53
|
"""Stop generating tokens if a member of `stop` is generated."""
|
|
54
54
|
temperature: Optional[float] = None
|
|
55
55
|
"""The value used to module the logits distribution."""
|
|
@@ -108,21 +108,21 @@ class TextGenerationOutputBestOfSequence(BaseInferenceType):
|
|
|
108
108
|
finish_reason: "TextGenerationOutputFinishReason"
|
|
109
109
|
generated_text: str
|
|
110
110
|
generated_tokens: int
|
|
111
|
-
prefill:
|
|
112
|
-
tokens:
|
|
111
|
+
prefill: list[TextGenerationOutputPrefillToken]
|
|
112
|
+
tokens: list[TextGenerationOutputToken]
|
|
113
113
|
seed: Optional[int] = None
|
|
114
|
-
top_tokens: Optional[
|
|
114
|
+
top_tokens: Optional[list[list[TextGenerationOutputToken]]] = None
|
|
115
115
|
|
|
116
116
|
|
|
117
117
|
@dataclass_with_extra
|
|
118
118
|
class TextGenerationOutputDetails(BaseInferenceType):
|
|
119
119
|
finish_reason: "TextGenerationOutputFinishReason"
|
|
120
120
|
generated_tokens: int
|
|
121
|
-
prefill:
|
|
122
|
-
tokens:
|
|
123
|
-
best_of_sequences: Optional[
|
|
121
|
+
prefill: list[TextGenerationOutputPrefillToken]
|
|
122
|
+
tokens: list[TextGenerationOutputToken]
|
|
123
|
+
best_of_sequences: Optional[list[TextGenerationOutputBestOfSequence]] = None
|
|
124
124
|
seed: Optional[int] = None
|
|
125
|
-
top_tokens: Optional[
|
|
125
|
+
top_tokens: Optional[list[list[TextGenerationOutputToken]]] = None
|
|
126
126
|
|
|
127
127
|
|
|
128
128
|
@dataclass_with_extra
|
|
@@ -165,4 +165,4 @@ class TextGenerationStreamOutput(BaseInferenceType):
|
|
|
165
165
|
token: TextGenerationStreamOutputToken
|
|
166
166
|
details: Optional[TextGenerationStreamOutputStreamDetails] = None
|
|
167
167
|
generated_text: Optional[str] = None
|
|
168
|
-
top_tokens: Optional[
|
|
168
|
+
top_tokens: Optional[list[TextGenerationStreamOutputToken]] = None
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -16,7 +16,7 @@ class TextToVideoParameters(BaseInferenceType):
|
|
|
16
16
|
"""A higher guidance scale value encourages the model to generate videos closely linked to
|
|
17
17
|
the text prompt, but values too high may cause saturation and other artifacts.
|
|
18
18
|
"""
|
|
19
|
-
negative_prompt: Optional[
|
|
19
|
+
negative_prompt: Optional[list[str]] = None
|
|
20
20
|
"""One or several prompt to guide what NOT to include in video generation."""
|
|
21
21
|
num_frames: Optional[float] = None
|
|
22
22
|
"""The num_frames parameter determines how many video frames are generated."""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -17,7 +17,7 @@ class TokenClassificationParameters(BaseInferenceType):
|
|
|
17
17
|
|
|
18
18
|
aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None
|
|
19
19
|
"""The strategy used to fuse tokens based on model predictions"""
|
|
20
|
-
ignore_labels: Optional[
|
|
20
|
+
ignore_labels: Optional[list[str]] = None
|
|
21
21
|
"""A list of labels to ignore"""
|
|
22
22
|
stride: Optional[int] = None
|
|
23
23
|
"""The number of overlapping tokens between chunks when splitting the input text."""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -17,7 +17,7 @@ class TranslationParameters(BaseInferenceType):
|
|
|
17
17
|
|
|
18
18
|
clean_up_tokenization_spaces: Optional[bool] = None
|
|
19
19
|
"""Whether to clean up the potential extra spaces in the text output."""
|
|
20
|
-
generate_parameters: Optional[
|
|
20
|
+
generate_parameters: Optional[dict[str, Any]] = None
|
|
21
21
|
"""Additional parametrization of the text generation algorithm."""
|
|
22
22
|
src_lang: Optional[str] = None
|
|
23
23
|
"""The source language of the text. Required for models that can translate from multiple
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
12
|
class ZeroShotClassificationParameters(BaseInferenceType):
|
|
13
13
|
"""Additional inference parameters for Zero Shot Classification"""
|
|
14
14
|
|
|
15
|
-
candidate_labels:
|
|
15
|
+
candidate_labels: list[str]
|
|
16
16
|
"""The set of possible class labels to classify the text into."""
|
|
17
17
|
hypothesis_template: Optional[str] = None
|
|
18
18
|
"""The sentence used in conjunction with `candidate_labels` to attempt the text
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
12
|
class ZeroShotImageClassificationParameters(BaseInferenceType):
|
|
13
13
|
"""Additional inference parameters for Zero Shot Image Classification"""
|
|
14
14
|
|
|
15
|
-
candidate_labels:
|
|
15
|
+
candidate_labels: list[str]
|
|
16
16
|
"""The candidate labels for this image"""
|
|
17
17
|
hypothesis_template: Optional[str] = None
|
|
18
18
|
"""The sentence used in conjunction with `candidate_labels` to attempt the image
|
|
@@ -3,8 +3,6 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import List
|
|
7
|
-
|
|
8
6
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
7
|
|
|
10
8
|
|
|
@@ -12,7 +10,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
10
|
class ZeroShotObjectDetectionParameters(BaseInferenceType):
|
|
13
11
|
"""Additional inference parameters for Zero Shot Object Detection"""
|
|
14
12
|
|
|
15
|
-
candidate_labels:
|
|
13
|
+
candidate_labels: list[str]
|
|
16
14
|
"""The candidate labels for this image"""
|
|
17
15
|
|
|
18
16
|
|
|
@@ -17,7 +17,7 @@ def _patch_anyio_open_process():
|
|
|
17
17
|
|
|
18
18
|
if getattr(anyio, "_tiny_agents_patched", False):
|
|
19
19
|
return
|
|
20
|
-
anyio._tiny_agents_patched = True
|
|
20
|
+
anyio._tiny_agents_patched = True # ty: ignore[invalid-assignment]
|
|
21
21
|
|
|
22
22
|
original_open_process = anyio.open_process
|
|
23
23
|
|
|
@@ -32,7 +32,7 @@ def _patch_anyio_open_process():
|
|
|
32
32
|
kwargs.setdefault("creationflags", subprocess.CREATE_NEW_PROCESS_GROUP)
|
|
33
33
|
return await original_open_process(*args, **kwargs)
|
|
34
34
|
|
|
35
|
-
anyio.open_process = open_process_in_new_group
|
|
35
|
+
anyio.open_process = open_process_in_new_group # ty: ignore[invalid-assignment]
|
|
36
36
|
else:
|
|
37
37
|
# For Unix-like systems, we can use setsid to create a new session
|
|
38
38
|
async def open_process_in_new_group(*args, **kwargs):
|
|
@@ -42,7 +42,7 @@ def _patch_anyio_open_process():
|
|
|
42
42
|
kwargs.setdefault("start_new_session", True)
|
|
43
43
|
return await original_open_process(*args, **kwargs)
|
|
44
44
|
|
|
45
|
-
anyio.open_process = open_process_in_new_group
|
|
45
|
+
anyio.open_process = open_process_in_new_group # ty: ignore[invalid-assignment]
|
|
46
46
|
|
|
47
47
|
|
|
48
48
|
async def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str:
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
-
from typing import AsyncGenerator,
|
|
4
|
+
from typing import AsyncGenerator, Iterable, Optional, Union
|
|
5
5
|
|
|
6
6
|
from huggingface_hub import ChatCompletionInputMessage, ChatCompletionStreamOutput, MCPClient
|
|
7
7
|
|
|
@@ -24,7 +24,7 @@ class Agent(MCPClient):
|
|
|
24
24
|
model (`str`, *optional*):
|
|
25
25
|
The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`
|
|
26
26
|
or a URL to a deployed Inference Endpoint or other local or remote endpoint.
|
|
27
|
-
servers (`Iterable[
|
|
27
|
+
servers (`Iterable[dict]`):
|
|
28
28
|
MCP servers to connect to. Each server is a dictionary containing a `type` key and a `config` key. The `type` key can be `"stdio"` or `"sse"`, and the `config` key is a dictionary of arguments for the server.
|
|
29
29
|
provider (`str`, *optional*):
|
|
30
30
|
Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
@@ -49,7 +49,7 @@ class Agent(MCPClient):
|
|
|
49
49
|
):
|
|
50
50
|
super().__init__(model=model, provider=provider, base_url=base_url, api_key=api_key)
|
|
51
51
|
self._servers_cfg = list(servers)
|
|
52
|
-
self.messages:
|
|
52
|
+
self.messages: list[Union[dict, ChatCompletionInputMessage]] = [
|
|
53
53
|
{"role": "system", "content": prompt or DEFAULT_SYSTEM_PROMPT}
|
|
54
54
|
]
|
|
55
55
|
|
|
@@ -33,7 +33,7 @@ async def run_agent(
|
|
|
33
33
|
|
|
34
34
|
Args:
|
|
35
35
|
agent_path (`str`, *optional*):
|
|
36
|
-
Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` file or a built-in agent stored in a Hugging Face dataset.
|
|
36
|
+
Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` or `AGENTS.md` file or a built-in agent stored in a Hugging Face dataset.
|
|
37
37
|
|
|
38
38
|
"""
|
|
39
39
|
_patch_anyio_open_process() # Hacky way to prevent stdio connections to be stopped by Ctrl+C
|
|
@@ -2,13 +2,12 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import sys
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import List
|
|
6
5
|
|
|
7
6
|
from huggingface_hub import ChatCompletionInputTool
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
FILENAME_CONFIG = "agent.json"
|
|
11
|
-
|
|
10
|
+
PROMPT_FILENAMES = ("PROMPT.md", "AGENTS.md")
|
|
12
11
|
|
|
13
12
|
DEFAULT_AGENT = {
|
|
14
13
|
"model": "Qwen/Qwen2.5-72B-Instruct",
|
|
@@ -76,7 +75,7 @@ ASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj(
|
|
|
76
75
|
}
|
|
77
76
|
)
|
|
78
77
|
|
|
79
|
-
EXIT_LOOP_TOOLS:
|
|
78
|
+
EXIT_LOOP_TOOLS: list[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL]
|
|
80
79
|
|
|
81
80
|
|
|
82
81
|
DEFAULT_REPO_ID = "tiny-agents/tiny-agents"
|