huggingface-hub 0.29.0rc2__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- huggingface_hub/__init__.py +160 -46
- huggingface_hub/_commit_api.py +277 -71
- huggingface_hub/_commit_scheduler.py +15 -15
- huggingface_hub/_inference_endpoints.py +33 -22
- huggingface_hub/_jobs_api.py +301 -0
- huggingface_hub/_local_folder.py +18 -3
- huggingface_hub/_login.py +31 -63
- huggingface_hub/_oauth.py +460 -0
- huggingface_hub/_snapshot_download.py +241 -81
- huggingface_hub/_space_api.py +18 -10
- huggingface_hub/_tensorboard_logger.py +15 -19
- huggingface_hub/_upload_large_folder.py +196 -76
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +15 -25
- huggingface_hub/{commands → cli}/__init__.py +1 -15
- huggingface_hub/cli/_cli_utils.py +173 -0
- huggingface_hub/cli/auth.py +147 -0
- huggingface_hub/cli/cache.py +841 -0
- huggingface_hub/cli/download.py +189 -0
- huggingface_hub/cli/hf.py +60 -0
- huggingface_hub/cli/inference_endpoints.py +377 -0
- huggingface_hub/cli/jobs.py +772 -0
- huggingface_hub/cli/lfs.py +175 -0
- huggingface_hub/cli/repo.py +315 -0
- huggingface_hub/cli/repo_files.py +94 -0
- huggingface_hub/{commands/env.py → cli/system.py} +10 -13
- huggingface_hub/cli/upload.py +294 -0
- huggingface_hub/cli/upload_large_folder.py +117 -0
- huggingface_hub/community.py +20 -12
- huggingface_hub/constants.py +83 -59
- huggingface_hub/dataclasses.py +609 -0
- huggingface_hub/errors.py +99 -30
- huggingface_hub/fastai_utils.py +30 -41
- huggingface_hub/file_download.py +606 -346
- huggingface_hub/hf_api.py +2445 -1132
- huggingface_hub/hf_file_system.py +269 -152
- huggingface_hub/hub_mixin.py +61 -66
- huggingface_hub/inference/_client.py +501 -630
- huggingface_hub/inference/_common.py +133 -121
- huggingface_hub/inference/_generated/_async_client.py +536 -722
- huggingface_hub/inference/_generated/types/__init__.py +6 -1
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +5 -6
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +77 -31
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/image_to_image.py +8 -2
- huggingface_hub/inference/_generated/types/image_to_text.py +2 -3
- huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +11 -11
- huggingface_hub/inference/_generated/types/text_to_audio.py +1 -2
- huggingface_hub/inference/_generated/types/text_to_speech.py +1 -2
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/__init__.py +0 -0
- huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
- huggingface_hub/inference/_mcp/agent.py +100 -0
- huggingface_hub/inference/_mcp/cli.py +247 -0
- huggingface_hub/inference/_mcp/constants.py +81 -0
- huggingface_hub/inference/_mcp/mcp_client.py +395 -0
- huggingface_hub/inference/_mcp/types.py +45 -0
- huggingface_hub/inference/_mcp/utils.py +128 -0
- huggingface_hub/inference/_providers/__init__.py +149 -20
- huggingface_hub/inference/_providers/_common.py +160 -37
- huggingface_hub/inference/_providers/black_forest_labs.py +12 -9
- huggingface_hub/inference/_providers/cerebras.py +6 -0
- huggingface_hub/inference/_providers/clarifai.py +13 -0
- huggingface_hub/inference/_providers/cohere.py +32 -0
- huggingface_hub/inference/_providers/fal_ai.py +231 -22
- huggingface_hub/inference/_providers/featherless_ai.py +38 -0
- huggingface_hub/inference/_providers/fireworks_ai.py +22 -1
- huggingface_hub/inference/_providers/groq.py +9 -0
- huggingface_hub/inference/_providers/hf_inference.py +143 -33
- huggingface_hub/inference/_providers/hyperbolic.py +9 -5
- huggingface_hub/inference/_providers/nebius.py +47 -5
- huggingface_hub/inference/_providers/novita.py +48 -5
- huggingface_hub/inference/_providers/nscale.py +44 -0
- huggingface_hub/inference/_providers/openai.py +25 -0
- huggingface_hub/inference/_providers/publicai.py +6 -0
- huggingface_hub/inference/_providers/replicate.py +46 -9
- huggingface_hub/inference/_providers/sambanova.py +37 -1
- huggingface_hub/inference/_providers/scaleway.py +28 -0
- huggingface_hub/inference/_providers/together.py +34 -5
- huggingface_hub/inference/_providers/wavespeed.py +138 -0
- huggingface_hub/inference/_providers/zai_org.py +17 -0
- huggingface_hub/lfs.py +33 -100
- huggingface_hub/repocard.py +34 -38
- huggingface_hub/repocard_data.py +79 -59
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +12 -15
- huggingface_hub/serialization/_dduf.py +8 -8
- huggingface_hub/serialization/_torch.py +69 -69
- huggingface_hub/utils/__init__.py +27 -8
- huggingface_hub/utils/_auth.py +7 -7
- huggingface_hub/utils/_cache_manager.py +92 -147
- huggingface_hub/utils/_chunk_utils.py +2 -3
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +55 -0
- huggingface_hub/utils/_experimental.py +7 -5
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +5 -5
- huggingface_hub/utils/_headers.py +8 -30
- huggingface_hub/utils/_http.py +399 -237
- huggingface_hub/utils/_pagination.py +6 -6
- huggingface_hub/utils/_parsing.py +98 -0
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +74 -22
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +13 -11
- huggingface_hub/utils/_telemetry.py +4 -4
- huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
- huggingface_hub/utils/_typing.py +25 -5
- huggingface_hub/utils/_validators.py +55 -74
- huggingface_hub/utils/_verification.py +167 -0
- huggingface_hub/utils/_xet.py +235 -0
- huggingface_hub/utils/_xet_progress_reporting.py +162 -0
- huggingface_hub/utils/insecure_hashlib.py +3 -5
- huggingface_hub/utils/logging.py +8 -11
- huggingface_hub/utils/tqdm.py +33 -4
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -82
- huggingface_hub-1.1.3.dist-info/RECORD +155 -0
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
- huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
- huggingface_hub/commands/delete_cache.py +0 -428
- huggingface_hub/commands/download.py +0 -200
- huggingface_hub/commands/huggingface_cli.py +0 -61
- huggingface_hub/commands/lfs.py +0 -200
- huggingface_hub/commands/repo_files.py +0 -128
- huggingface_hub/commands/scan_cache.py +0 -181
- huggingface_hub/commands/tag.py +0 -159
- huggingface_hub/commands/upload.py +0 -299
- huggingface_hub/commands/upload_large_folder.py +0 -129
- huggingface_hub/commands/user.py +0 -304
- huggingface_hub/commands/version.py +0 -37
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -500
- huggingface_hub/repository.py +0 -1477
- huggingface_hub/serialization/_tensorflow.py +0 -95
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.29.0rc2.dist-info/RECORD +0 -131
- huggingface_hub-0.29.0rc2.dist-info/entry_points.txt +0 -6
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
- {huggingface_hub-0.29.0rc2.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -24,12 +24,16 @@ from .chat_completion import (
|
|
|
24
24
|
ChatCompletionInputFunctionDefinition,
|
|
25
25
|
ChatCompletionInputFunctionName,
|
|
26
26
|
ChatCompletionInputGrammarType,
|
|
27
|
-
|
|
27
|
+
ChatCompletionInputJSONSchema,
|
|
28
28
|
ChatCompletionInputMessage,
|
|
29
29
|
ChatCompletionInputMessageChunk,
|
|
30
30
|
ChatCompletionInputMessageChunkType,
|
|
31
|
+
ChatCompletionInputResponseFormatJSONObject,
|
|
32
|
+
ChatCompletionInputResponseFormatJSONSchema,
|
|
33
|
+
ChatCompletionInputResponseFormatText,
|
|
31
34
|
ChatCompletionInputStreamOptions,
|
|
32
35
|
ChatCompletionInputTool,
|
|
36
|
+
ChatCompletionInputToolCall,
|
|
33
37
|
ChatCompletionInputToolChoiceClass,
|
|
34
38
|
ChatCompletionInputToolChoiceEnum,
|
|
35
39
|
ChatCompletionInputURL,
|
|
@@ -81,6 +85,7 @@ from .image_to_text import (
|
|
|
81
85
|
ImageToTextOutput,
|
|
82
86
|
ImageToTextParameters,
|
|
83
87
|
)
|
|
88
|
+
from .image_to_video import ImageToVideoInput, ImageToVideoOutput, ImageToVideoParameters, ImageToVideoTargetSize
|
|
84
89
|
from .object_detection import (
|
|
85
90
|
ObjectDetectionBoundingBox,
|
|
86
91
|
ObjectDetectionInput,
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -75,11 +75,10 @@ class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType):
|
|
|
75
75
|
class AutomaticSpeechRecognitionParameters(BaseInferenceType):
|
|
76
76
|
"""Additional inference parameters for Automatic Speech Recognition"""
|
|
77
77
|
|
|
78
|
+
generation_parameters: Optional[AutomaticSpeechRecognitionGenerationParameters] = None
|
|
79
|
+
"""Parametrization of the text generation process"""
|
|
78
80
|
return_timestamps: Optional[bool] = None
|
|
79
81
|
"""Whether to output corresponding timestamps with the generated text"""
|
|
80
|
-
# Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
|
|
81
|
-
generate_kwargs: Optional[AutomaticSpeechRecognitionGenerationParameters] = None
|
|
82
|
-
"""Parametrization of the text generation process"""
|
|
83
82
|
|
|
84
83
|
|
|
85
84
|
@dataclass_with_extra
|
|
@@ -98,7 +97,7 @@ class AutomaticSpeechRecognitionInput(BaseInferenceType):
|
|
|
98
97
|
class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):
|
|
99
98
|
text: str
|
|
100
99
|
"""A chunk of text identified by the model"""
|
|
101
|
-
timestamp:
|
|
100
|
+
timestamp: list[float]
|
|
102
101
|
"""The start and end timestamps corresponding with the text"""
|
|
103
102
|
|
|
104
103
|
|
|
@@ -108,7 +107,7 @@ class AutomaticSpeechRecognitionOutput(BaseInferenceType):
|
|
|
108
107
|
|
|
109
108
|
text: str
|
|
110
109
|
"""The recognized text."""
|
|
111
|
-
chunks: Optional[
|
|
110
|
+
chunks: Optional[list[AutomaticSpeechRecognitionOutputChunk]] = None
|
|
112
111
|
"""When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
|
|
113
112
|
the model.
|
|
114
113
|
"""
|
|
@@ -15,8 +15,9 @@
|
|
|
15
15
|
|
|
16
16
|
import inspect
|
|
17
17
|
import json
|
|
18
|
+
import types
|
|
18
19
|
from dataclasses import asdict, dataclass
|
|
19
|
-
from typing import Any,
|
|
20
|
+
from typing import Any, TypeVar, Union, get_args
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
T = TypeVar("T", bound="BaseInferenceType")
|
|
@@ -28,7 +29,7 @@ def _repr_with_extra(self):
|
|
|
28
29
|
return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"
|
|
29
30
|
|
|
30
31
|
|
|
31
|
-
def dataclass_with_extra(cls:
|
|
32
|
+
def dataclass_with_extra(cls: type[T]) -> type[T]:
|
|
32
33
|
"""Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.
|
|
33
34
|
|
|
34
35
|
This decorator only works with dataclasses that inherit from `BaseInferenceType`.
|
|
@@ -49,7 +50,7 @@ class BaseInferenceType(dict):
|
|
|
49
50
|
"""
|
|
50
51
|
|
|
51
52
|
@classmethod
|
|
52
|
-
def parse_obj_as_list(cls:
|
|
53
|
+
def parse_obj_as_list(cls: type[T], data: Union[bytes, str, list, dict]) -> list[T]:
|
|
53
54
|
"""Alias to parse server response and return a single instance.
|
|
54
55
|
|
|
55
56
|
See `parse_obj` for more details.
|
|
@@ -60,7 +61,7 @@ class BaseInferenceType(dict):
|
|
|
60
61
|
return output
|
|
61
62
|
|
|
62
63
|
@classmethod
|
|
63
|
-
def parse_obj_as_instance(cls:
|
|
64
|
+
def parse_obj_as_instance(cls: type[T], data: Union[bytes, str, list, dict]) -> T:
|
|
64
65
|
"""Alias to parse server response and return a single instance.
|
|
65
66
|
|
|
66
67
|
See `parse_obj` for more details.
|
|
@@ -71,7 +72,7 @@ class BaseInferenceType(dict):
|
|
|
71
72
|
return output
|
|
72
73
|
|
|
73
74
|
@classmethod
|
|
74
|
-
def parse_obj(cls:
|
|
75
|
+
def parse_obj(cls: type[T], data: Union[bytes, str, list, dict]) -> Union[list[T], T]:
|
|
75
76
|
"""Parse server response as a dataclass or list of dataclasses.
|
|
76
77
|
|
|
77
78
|
To enable future-compatibility, we want to handle cases where the server return more fields than expected.
|
|
@@ -85,7 +86,7 @@ class BaseInferenceType(dict):
|
|
|
85
86
|
data = json.loads(data)
|
|
86
87
|
|
|
87
88
|
# If a list, parse each item individually
|
|
88
|
-
if isinstance(data,
|
|
89
|
+
if isinstance(data, list):
|
|
89
90
|
return [cls.parse_obj(d) for d in data] # type: ignore [misc]
|
|
90
91
|
|
|
91
92
|
# At this point, we expect a dict
|
|
@@ -109,7 +110,9 @@ class BaseInferenceType(dict):
|
|
|
109
110
|
else:
|
|
110
111
|
expected_types = get_args(field_type)
|
|
111
112
|
for expected_type in expected_types:
|
|
112
|
-
if
|
|
113
|
+
if (
|
|
114
|
+
isinstance(expected_type, types.GenericAlias) and expected_type.__origin__ is list
|
|
115
|
+
) or getattr(expected_type, "_name", None) == "List":
|
|
113
116
|
expected_type = get_args(expected_type)[
|
|
114
117
|
0
|
|
115
118
|
] # assume same type for all items in the list
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -23,29 +23,78 @@ class ChatCompletionInputMessageChunk(BaseInferenceType):
|
|
|
23
23
|
text: Optional[str] = None
|
|
24
24
|
|
|
25
25
|
|
|
26
|
+
@dataclass_with_extra
|
|
27
|
+
class ChatCompletionInputFunctionDefinition(BaseInferenceType):
|
|
28
|
+
name: str
|
|
29
|
+
parameters: Any
|
|
30
|
+
description: Optional[str] = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass_with_extra
|
|
34
|
+
class ChatCompletionInputToolCall(BaseInferenceType):
|
|
35
|
+
function: ChatCompletionInputFunctionDefinition
|
|
36
|
+
id: str
|
|
37
|
+
type: str
|
|
38
|
+
|
|
39
|
+
|
|
26
40
|
@dataclass_with_extra
|
|
27
41
|
class ChatCompletionInputMessage(BaseInferenceType):
|
|
28
|
-
content: Union[List[ChatCompletionInputMessageChunk], str]
|
|
29
42
|
role: str
|
|
43
|
+
content: Optional[Union[list[ChatCompletionInputMessageChunk], str]] = None
|
|
30
44
|
name: Optional[str] = None
|
|
45
|
+
tool_calls: Optional[list[ChatCompletionInputToolCall]] = None
|
|
31
46
|
|
|
32
47
|
|
|
33
|
-
|
|
48
|
+
@dataclass_with_extra
|
|
49
|
+
class ChatCompletionInputJSONSchema(BaseInferenceType):
|
|
50
|
+
name: str
|
|
51
|
+
"""
|
|
52
|
+
The name of the response format.
|
|
53
|
+
"""
|
|
54
|
+
description: Optional[str] = None
|
|
55
|
+
"""
|
|
56
|
+
A description of what the response format is for, used by the model to determine
|
|
57
|
+
how to respond in the format.
|
|
58
|
+
"""
|
|
59
|
+
schema: Optional[dict[str, object]] = None
|
|
60
|
+
"""
|
|
61
|
+
The schema for the response format, described as a JSON Schema object. Learn how
|
|
62
|
+
to build JSON schemas [here](https://json-schema.org/).
|
|
63
|
+
"""
|
|
64
|
+
strict: Optional[bool] = None
|
|
65
|
+
"""
|
|
66
|
+
Whether to enable strict schema adherence when generating the output. If set to
|
|
67
|
+
true, the model will always follow the exact schema defined in the `schema`
|
|
68
|
+
field.
|
|
69
|
+
"""
|
|
34
70
|
|
|
35
71
|
|
|
36
72
|
@dataclass_with_extra
|
|
37
|
-
class
|
|
38
|
-
type: "
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
""
|
|
73
|
+
class ChatCompletionInputResponseFormatText(BaseInferenceType):
|
|
74
|
+
type: Literal["text"]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass_with_extra
|
|
78
|
+
class ChatCompletionInputResponseFormatJSONSchema(BaseInferenceType):
|
|
79
|
+
type: Literal["json_schema"]
|
|
80
|
+
json_schema: ChatCompletionInputJSONSchema
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@dataclass_with_extra
|
|
84
|
+
class ChatCompletionInputResponseFormatJSONObject(BaseInferenceType):
|
|
85
|
+
type: Literal["json_object"]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
ChatCompletionInputGrammarType = Union[
|
|
89
|
+
ChatCompletionInputResponseFormatText,
|
|
90
|
+
ChatCompletionInputResponseFormatJSONSchema,
|
|
91
|
+
ChatCompletionInputResponseFormatJSONObject,
|
|
92
|
+
]
|
|
44
93
|
|
|
45
94
|
|
|
46
95
|
@dataclass_with_extra
|
|
47
96
|
class ChatCompletionInputStreamOptions(BaseInferenceType):
|
|
48
|
-
include_usage: bool
|
|
97
|
+
include_usage: Optional[bool] = None
|
|
49
98
|
"""If set, an additional chunk will be streamed before the data: [DONE] message. The usage
|
|
50
99
|
field on this chunk shows the token usage statistics for the entire request, and the
|
|
51
100
|
choices field will always be an empty array. All other chunks will also include a usage
|
|
@@ -66,13 +115,6 @@ class ChatCompletionInputToolChoiceClass(BaseInferenceType):
|
|
|
66
115
|
ChatCompletionInputToolChoiceEnum = Literal["auto", "none", "required"]
|
|
67
116
|
|
|
68
117
|
|
|
69
|
-
@dataclass_with_extra
|
|
70
|
-
class ChatCompletionInputFunctionDefinition(BaseInferenceType):
|
|
71
|
-
arguments: Any
|
|
72
|
-
name: str
|
|
73
|
-
description: Optional[str] = None
|
|
74
|
-
|
|
75
|
-
|
|
76
118
|
@dataclass_with_extra
|
|
77
119
|
class ChatCompletionInputTool(BaseInferenceType):
|
|
78
120
|
function: ChatCompletionInputFunctionDefinition
|
|
@@ -87,14 +129,14 @@ class ChatCompletionInput(BaseInferenceType):
|
|
|
87
129
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
88
130
|
"""
|
|
89
131
|
|
|
90
|
-
messages:
|
|
132
|
+
messages: list[ChatCompletionInputMessage]
|
|
91
133
|
"""A list of messages comprising the conversation so far."""
|
|
92
134
|
frequency_penalty: Optional[float] = None
|
|
93
135
|
"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
|
|
94
136
|
frequency in the text so far,
|
|
95
137
|
decreasing the model's likelihood to repeat the same line verbatim.
|
|
96
138
|
"""
|
|
97
|
-
logit_bias: Optional[
|
|
139
|
+
logit_bias: Optional[list[float]] = None
|
|
98
140
|
"""UNUSED
|
|
99
141
|
Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
|
|
100
142
|
object that maps tokens
|
|
@@ -130,7 +172,7 @@ class ChatCompletionInput(BaseInferenceType):
|
|
|
130
172
|
"""
|
|
131
173
|
response_format: Optional[ChatCompletionInputGrammarType] = None
|
|
132
174
|
seed: Optional[int] = None
|
|
133
|
-
stop: Optional[
|
|
175
|
+
stop: Optional[list[str]] = None
|
|
134
176
|
"""Up to 4 sequences where the API will stop generating further tokens."""
|
|
135
177
|
stream: Optional[bool] = None
|
|
136
178
|
stream_options: Optional[ChatCompletionInputStreamOptions] = None
|
|
@@ -143,7 +185,7 @@ class ChatCompletionInput(BaseInferenceType):
|
|
|
143
185
|
tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None
|
|
144
186
|
tool_prompt: Optional[str] = None
|
|
145
187
|
"""A prompt to be appended before the tools"""
|
|
146
|
-
tools: Optional[
|
|
188
|
+
tools: Optional[list[ChatCompletionInputTool]] = None
|
|
147
189
|
"""A list of tools the model may call. Currently, only functions are supported as a tool.
|
|
148
190
|
Use this to provide a list of
|
|
149
191
|
functions the model may generate JSON inputs for.
|
|
@@ -171,17 +213,17 @@ class ChatCompletionOutputTopLogprob(BaseInferenceType):
|
|
|
171
213
|
class ChatCompletionOutputLogprob(BaseInferenceType):
|
|
172
214
|
logprob: float
|
|
173
215
|
token: str
|
|
174
|
-
top_logprobs:
|
|
216
|
+
top_logprobs: list[ChatCompletionOutputTopLogprob]
|
|
175
217
|
|
|
176
218
|
|
|
177
219
|
@dataclass_with_extra
|
|
178
220
|
class ChatCompletionOutputLogprobs(BaseInferenceType):
|
|
179
|
-
content:
|
|
221
|
+
content: list[ChatCompletionOutputLogprob]
|
|
180
222
|
|
|
181
223
|
|
|
182
224
|
@dataclass_with_extra
|
|
183
225
|
class ChatCompletionOutputFunctionDefinition(BaseInferenceType):
|
|
184
|
-
arguments:
|
|
226
|
+
arguments: str
|
|
185
227
|
name: str
|
|
186
228
|
description: Optional[str] = None
|
|
187
229
|
|
|
@@ -197,7 +239,9 @@ class ChatCompletionOutputToolCall(BaseInferenceType):
|
|
|
197
239
|
class ChatCompletionOutputMessage(BaseInferenceType):
|
|
198
240
|
role: str
|
|
199
241
|
content: Optional[str] = None
|
|
200
|
-
|
|
242
|
+
reasoning: Optional[str] = None
|
|
243
|
+
tool_call_id: Optional[str] = None
|
|
244
|
+
tool_calls: Optional[list[ChatCompletionOutputToolCall]] = None
|
|
201
245
|
|
|
202
246
|
|
|
203
247
|
@dataclass_with_extra
|
|
@@ -223,7 +267,7 @@ class ChatCompletionOutput(BaseInferenceType):
|
|
|
223
267
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
224
268
|
"""
|
|
225
269
|
|
|
226
|
-
choices:
|
|
270
|
+
choices: list[ChatCompletionOutputComplete]
|
|
227
271
|
created: int
|
|
228
272
|
id: str
|
|
229
273
|
model: str
|
|
@@ -249,7 +293,9 @@ class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
|
|
|
249
293
|
class ChatCompletionStreamOutputDelta(BaseInferenceType):
|
|
250
294
|
role: str
|
|
251
295
|
content: Optional[str] = None
|
|
252
|
-
|
|
296
|
+
reasoning: Optional[str] = None
|
|
297
|
+
tool_call_id: Optional[str] = None
|
|
298
|
+
tool_calls: Optional[list[ChatCompletionStreamOutputDeltaToolCall]] = None
|
|
253
299
|
|
|
254
300
|
|
|
255
301
|
@dataclass_with_extra
|
|
@@ -262,12 +308,12 @@ class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
|
|
|
262
308
|
class ChatCompletionStreamOutputLogprob(BaseInferenceType):
|
|
263
309
|
logprob: float
|
|
264
310
|
token: str
|
|
265
|
-
top_logprobs:
|
|
311
|
+
top_logprobs: list[ChatCompletionStreamOutputTopLogprob]
|
|
266
312
|
|
|
267
313
|
|
|
268
314
|
@dataclass_with_extra
|
|
269
315
|
class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
|
|
270
|
-
content:
|
|
316
|
+
content: list[ChatCompletionStreamOutputLogprob]
|
|
271
317
|
|
|
272
318
|
|
|
273
319
|
@dataclass_with_extra
|
|
@@ -293,7 +339,7 @@ class ChatCompletionStreamOutput(BaseInferenceType):
|
|
|
293
339
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
|
|
294
340
|
"""
|
|
295
341
|
|
|
296
|
-
choices:
|
|
342
|
+
choices: list[ChatCompletionStreamOutputChoice]
|
|
297
343
|
created: int
|
|
298
344
|
id: str
|
|
299
345
|
model: str
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -14,7 +14,7 @@ class DepthEstimationInput(BaseInferenceType):
|
|
|
14
14
|
|
|
15
15
|
inputs: Any
|
|
16
16
|
"""The input image data"""
|
|
17
|
-
parameters: Optional[
|
|
17
|
+
parameters: Optional[dict[str, Any]] = None
|
|
18
18
|
"""Additional inference parameters for Depth Estimation"""
|
|
19
19
|
|
|
20
20
|
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -46,7 +46,7 @@ class DocumentQuestionAnsweringParameters(BaseInferenceType):
|
|
|
46
46
|
"""The number of answers to return (will be chosen by order of likelihood). Can return less
|
|
47
47
|
than top_k answers if there are not enough options available within the context.
|
|
48
48
|
"""
|
|
49
|
-
word_boxes: Optional[
|
|
49
|
+
word_boxes: Optional[list[Union[list[float], str]]] = None
|
|
50
50
|
"""A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
|
|
51
51
|
skip the OCR step and use the provided bounding boxes instead.
|
|
52
52
|
"""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional, Union
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -19,7 +19,7 @@ class FeatureExtractionInput(BaseInferenceType):
|
|
|
19
19
|
https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
inputs: Union[
|
|
22
|
+
inputs: Union[list[str], str]
|
|
23
23
|
"""The text or list of texts to embed."""
|
|
24
24
|
normalize: Optional[bool] = None
|
|
25
25
|
prompt_name: Optional[str] = None
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
12
12
|
class FillMaskParameters(BaseInferenceType):
|
|
13
13
|
"""Additional inference parameters for Fill Mask"""
|
|
14
14
|
|
|
15
|
-
targets: Optional[
|
|
15
|
+
targets: Optional[list[str]] = None
|
|
16
16
|
"""When passed, the model will limit the scores to the passed targets instead of looking up
|
|
17
17
|
in the whole vocabulary. If the provided targets are not in the model vocab, they will be
|
|
18
18
|
tokenized and the first resulting token will be used (with a warning, and that might be
|
|
@@ -10,7 +10,9 @@ from .base import BaseInferenceType, dataclass_with_extra
|
|
|
10
10
|
|
|
11
11
|
@dataclass_with_extra
|
|
12
12
|
class ImageToImageTargetSize(BaseInferenceType):
|
|
13
|
-
"""The size in
|
|
13
|
+
"""The size in pixels of the output image. This parameter is only supported by some
|
|
14
|
+
providers and for specific models. It will be ignored when unsupported.
|
|
15
|
+
"""
|
|
14
16
|
|
|
15
17
|
height: int
|
|
16
18
|
width: int
|
|
@@ -30,8 +32,12 @@ class ImageToImageParameters(BaseInferenceType):
|
|
|
30
32
|
"""For diffusion models. The number of denoising steps. More denoising steps usually lead to
|
|
31
33
|
a higher quality image at the expense of slower inference.
|
|
32
34
|
"""
|
|
35
|
+
prompt: Optional[str] = None
|
|
36
|
+
"""The text prompt to guide the image generation."""
|
|
33
37
|
target_size: Optional[ImageToImageTargetSize] = None
|
|
34
|
-
"""The size in
|
|
38
|
+
"""The size in pixels of the output image. This parameter is only supported by some
|
|
39
|
+
providers and for specific models. It will be ignored when unsupported.
|
|
40
|
+
"""
|
|
35
41
|
|
|
36
42
|
|
|
37
43
|
@dataclass_with_extra
|
|
@@ -75,11 +75,10 @@ class ImageToTextGenerationParameters(BaseInferenceType):
|
|
|
75
75
|
class ImageToTextParameters(BaseInferenceType):
|
|
76
76
|
"""Additional inference parameters for Image To Text"""
|
|
77
77
|
|
|
78
|
+
generation_parameters: Optional[ImageToTextGenerationParameters] = None
|
|
79
|
+
"""Parametrization of the text generation process"""
|
|
78
80
|
max_new_tokens: Optional[int] = None
|
|
79
81
|
"""The amount of maximum tokens to generate."""
|
|
80
|
-
# Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
|
|
81
|
-
generate_kwargs: Optional[ImageToTextGenerationParameters] = None
|
|
82
|
-
"""Parametrization of the text generation process"""
|
|
83
82
|
|
|
84
83
|
|
|
85
84
|
@dataclass_with_extra
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
# Inference code generated from the JSON schema spec in @huggingface/tasks.
|
|
2
|
+
#
|
|
3
|
+
# See:
|
|
4
|
+
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
|
+
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
|
|
8
|
+
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass_with_extra
|
|
12
|
+
class ImageToVideoTargetSize(BaseInferenceType):
|
|
13
|
+
"""The size in pixel of the output video frames."""
|
|
14
|
+
|
|
15
|
+
height: int
|
|
16
|
+
width: int
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass_with_extra
|
|
20
|
+
class ImageToVideoParameters(BaseInferenceType):
|
|
21
|
+
"""Additional inference parameters for Image To Video"""
|
|
22
|
+
|
|
23
|
+
guidance_scale: Optional[float] = None
|
|
24
|
+
"""For diffusion models. A higher guidance scale value encourages the model to generate
|
|
25
|
+
videos closely linked to the text prompt at the expense of lower image quality.
|
|
26
|
+
"""
|
|
27
|
+
negative_prompt: Optional[str] = None
|
|
28
|
+
"""One prompt to guide what NOT to include in video generation."""
|
|
29
|
+
num_frames: Optional[float] = None
|
|
30
|
+
"""The num_frames parameter determines how many video frames are generated."""
|
|
31
|
+
num_inference_steps: Optional[int] = None
|
|
32
|
+
"""The number of denoising steps. More denoising steps usually lead to a higher quality
|
|
33
|
+
video at the expense of slower inference.
|
|
34
|
+
"""
|
|
35
|
+
prompt: Optional[str] = None
|
|
36
|
+
"""The text prompt to guide the video generation."""
|
|
37
|
+
seed: Optional[int] = None
|
|
38
|
+
"""Seed for the random number generator."""
|
|
39
|
+
target_size: Optional[ImageToVideoTargetSize] = None
|
|
40
|
+
"""The size in pixel of the output video frames."""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass_with_extra
|
|
44
|
+
class ImageToVideoInput(BaseInferenceType):
|
|
45
|
+
"""Inputs for Image To Video inference"""
|
|
46
|
+
|
|
47
|
+
inputs: str
|
|
48
|
+
"""The input image data as a base64-encoded string. If no `parameters` are provided, you can
|
|
49
|
+
also provide the image data as a raw bytes payload.
|
|
50
|
+
"""
|
|
51
|
+
parameters: Optional[ImageToVideoParameters] = None
|
|
52
|
+
"""Additional inference parameters for Image To Video"""
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@dataclass_with_extra
|
|
56
|
+
class ImageToVideoOutput(BaseInferenceType):
|
|
57
|
+
"""Outputs of inference for the Image To Video task"""
|
|
58
|
+
|
|
59
|
+
video: Any
|
|
60
|
+
"""The generated video returned as raw bytes in the payload."""
|
|
@@ -3,14 +3,14 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
@dataclass_with_extra
|
|
12
12
|
class SentenceSimilarityInputData(BaseInferenceType):
|
|
13
|
-
sentences:
|
|
13
|
+
sentences: list[str]
|
|
14
14
|
"""A list of strings which will be compared against the source_sentence."""
|
|
15
15
|
source_sentence: str
|
|
16
16
|
"""The string that you wish to compare the other strings with. This can be a phrase,
|
|
@@ -23,5 +23,5 @@ class SentenceSimilarityInput(BaseInferenceType):
|
|
|
23
23
|
"""Inputs for Sentence similarity inference"""
|
|
24
24
|
|
|
25
25
|
inputs: SentenceSimilarityInputData
|
|
26
|
-
parameters: Optional[
|
|
26
|
+
parameters: Optional[dict[str, Any]] = None
|
|
27
27
|
"""Additional inference parameters for Sentence Similarity"""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -17,7 +17,7 @@ class SummarizationParameters(BaseInferenceType):
|
|
|
17
17
|
|
|
18
18
|
clean_up_tokenization_spaces: Optional[bool] = None
|
|
19
19
|
"""Whether to clean up the potential extra spaces in the text output."""
|
|
20
|
-
generate_parameters: Optional[
|
|
20
|
+
generate_parameters: Optional[dict[str, Any]] = None
|
|
21
21
|
"""Additional parametrization of the text generation algorithm."""
|
|
22
22
|
truncation: Optional["SummarizationTruncationStrategy"] = None
|
|
23
23
|
"""The truncation strategy to use."""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -14,7 +14,7 @@ class TableQuestionAnsweringInputData(BaseInferenceType):
|
|
|
14
14
|
|
|
15
15
|
question: str
|
|
16
16
|
"""The question to be answered about the table"""
|
|
17
|
-
table:
|
|
17
|
+
table: dict[str, list[str]]
|
|
18
18
|
"""The table to serve as context for the questions"""
|
|
19
19
|
|
|
20
20
|
|
|
@@ -54,9 +54,9 @@ class TableQuestionAnsweringOutputElement(BaseInferenceType):
|
|
|
54
54
|
"""The answer of the question given the table. If there is an aggregator, the answer will be
|
|
55
55
|
preceded by `AGGREGATOR >`.
|
|
56
56
|
"""
|
|
57
|
-
cells:
|
|
58
|
-
"""
|
|
59
|
-
coordinates:
|
|
57
|
+
cells: list[str]
|
|
58
|
+
"""list of strings made up of the answer cell values."""
|
|
59
|
+
coordinates: list[list[int]]
|
|
60
60
|
"""Coordinates of the cells of the answers."""
|
|
61
61
|
aggregator: Optional[str] = None
|
|
62
62
|
"""If the model has an aggregator, this returns the aggregator."""
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
# See:
|
|
4
4
|
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
|
|
5
5
|
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
|
|
6
|
-
from typing import Any,
|
|
6
|
+
from typing import Any, Literal, Optional
|
|
7
7
|
|
|
8
8
|
from .base import BaseInferenceType, dataclass_with_extra
|
|
9
9
|
|
|
@@ -17,7 +17,7 @@ class Text2TextGenerationParameters(BaseInferenceType):
|
|
|
17
17
|
|
|
18
18
|
clean_up_tokenization_spaces: Optional[bool] = None
|
|
19
19
|
"""Whether to clean up the potential extra spaces in the text output."""
|
|
20
|
-
generate_parameters: Optional[
|
|
20
|
+
generate_parameters: Optional[dict[str, Any]] = None
|
|
21
21
|
"""Additional parametrization of the text generation algorithm"""
|
|
22
22
|
truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
|
|
23
23
|
"""The truncation strategy to use"""
|