huggingface-hub 0.31.0rc0__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. huggingface_hub/__init__.py +145 -46
  2. huggingface_hub/_commit_api.py +168 -119
  3. huggingface_hub/_commit_scheduler.py +15 -15
  4. huggingface_hub/_inference_endpoints.py +15 -12
  5. huggingface_hub/_jobs_api.py +301 -0
  6. huggingface_hub/_local_folder.py +18 -3
  7. huggingface_hub/_login.py +31 -63
  8. huggingface_hub/_oauth.py +460 -0
  9. huggingface_hub/_snapshot_download.py +239 -80
  10. huggingface_hub/_space_api.py +5 -5
  11. huggingface_hub/_tensorboard_logger.py +15 -19
  12. huggingface_hub/_upload_large_folder.py +172 -76
  13. huggingface_hub/_webhooks_payload.py +3 -3
  14. huggingface_hub/_webhooks_server.py +13 -25
  15. huggingface_hub/{commands → cli}/__init__.py +1 -15
  16. huggingface_hub/cli/_cli_utils.py +173 -0
  17. huggingface_hub/cli/auth.py +147 -0
  18. huggingface_hub/cli/cache.py +841 -0
  19. huggingface_hub/cli/download.py +189 -0
  20. huggingface_hub/cli/hf.py +60 -0
  21. huggingface_hub/cli/inference_endpoints.py +377 -0
  22. huggingface_hub/cli/jobs.py +772 -0
  23. huggingface_hub/cli/lfs.py +175 -0
  24. huggingface_hub/cli/repo.py +315 -0
  25. huggingface_hub/cli/repo_files.py +94 -0
  26. huggingface_hub/{commands/env.py → cli/system.py} +10 -13
  27. huggingface_hub/cli/upload.py +294 -0
  28. huggingface_hub/cli/upload_large_folder.py +117 -0
  29. huggingface_hub/community.py +20 -12
  30. huggingface_hub/constants.py +38 -53
  31. huggingface_hub/dataclasses.py +609 -0
  32. huggingface_hub/errors.py +80 -30
  33. huggingface_hub/fastai_utils.py +30 -41
  34. huggingface_hub/file_download.py +435 -351
  35. huggingface_hub/hf_api.py +2050 -1124
  36. huggingface_hub/hf_file_system.py +269 -152
  37. huggingface_hub/hub_mixin.py +43 -63
  38. huggingface_hub/inference/_client.py +347 -434
  39. huggingface_hub/inference/_common.py +133 -121
  40. huggingface_hub/inference/_generated/_async_client.py +397 -541
  41. huggingface_hub/inference/_generated/types/__init__.py +5 -1
  42. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
  43. huggingface_hub/inference/_generated/types/base.py +10 -7
  44. huggingface_hub/inference/_generated/types/chat_completion.py +59 -23
  45. huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
  46. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
  47. huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
  48. huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
  49. huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
  50. huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
  51. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
  52. huggingface_hub/inference/_generated/types/summarization.py +2 -2
  53. huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
  54. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
  55. huggingface_hub/inference/_generated/types/text_generation.py +10 -10
  56. huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
  57. huggingface_hub/inference/_generated/types/token_classification.py +2 -2
  58. huggingface_hub/inference/_generated/types/translation.py +2 -2
  59. huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
  60. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
  61. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
  62. huggingface_hub/inference/_mcp/__init__.py +0 -0
  63. huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
  64. huggingface_hub/inference/_mcp/agent.py +100 -0
  65. huggingface_hub/inference/_mcp/cli.py +247 -0
  66. huggingface_hub/inference/_mcp/constants.py +81 -0
  67. huggingface_hub/inference/_mcp/mcp_client.py +395 -0
  68. huggingface_hub/inference/_mcp/types.py +45 -0
  69. huggingface_hub/inference/_mcp/utils.py +128 -0
  70. huggingface_hub/inference/_providers/__init__.py +82 -7
  71. huggingface_hub/inference/_providers/_common.py +129 -27
  72. huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
  73. huggingface_hub/inference/_providers/cerebras.py +1 -1
  74. huggingface_hub/inference/_providers/clarifai.py +13 -0
  75. huggingface_hub/inference/_providers/cohere.py +20 -3
  76. huggingface_hub/inference/_providers/fal_ai.py +183 -56
  77. huggingface_hub/inference/_providers/featherless_ai.py +38 -0
  78. huggingface_hub/inference/_providers/fireworks_ai.py +18 -0
  79. huggingface_hub/inference/_providers/groq.py +9 -0
  80. huggingface_hub/inference/_providers/hf_inference.py +69 -30
  81. huggingface_hub/inference/_providers/hyperbolic.py +4 -4
  82. huggingface_hub/inference/_providers/nebius.py +33 -5
  83. huggingface_hub/inference/_providers/novita.py +5 -5
  84. huggingface_hub/inference/_providers/nscale.py +44 -0
  85. huggingface_hub/inference/_providers/openai.py +3 -1
  86. huggingface_hub/inference/_providers/publicai.py +6 -0
  87. huggingface_hub/inference/_providers/replicate.py +31 -13
  88. huggingface_hub/inference/_providers/sambanova.py +18 -4
  89. huggingface_hub/inference/_providers/scaleway.py +28 -0
  90. huggingface_hub/inference/_providers/together.py +20 -5
  91. huggingface_hub/inference/_providers/wavespeed.py +138 -0
  92. huggingface_hub/inference/_providers/zai_org.py +17 -0
  93. huggingface_hub/lfs.py +33 -100
  94. huggingface_hub/repocard.py +34 -38
  95. huggingface_hub/repocard_data.py +57 -57
  96. huggingface_hub/serialization/__init__.py +0 -1
  97. huggingface_hub/serialization/_base.py +12 -15
  98. huggingface_hub/serialization/_dduf.py +8 -8
  99. huggingface_hub/serialization/_torch.py +69 -69
  100. huggingface_hub/utils/__init__.py +19 -8
  101. huggingface_hub/utils/_auth.py +7 -7
  102. huggingface_hub/utils/_cache_manager.py +92 -147
  103. huggingface_hub/utils/_chunk_utils.py +2 -3
  104. huggingface_hub/utils/_deprecation.py +1 -1
  105. huggingface_hub/utils/_dotenv.py +55 -0
  106. huggingface_hub/utils/_experimental.py +7 -5
  107. huggingface_hub/utils/_fixes.py +0 -10
  108. huggingface_hub/utils/_git_credential.py +5 -5
  109. huggingface_hub/utils/_headers.py +8 -30
  110. huggingface_hub/utils/_http.py +398 -239
  111. huggingface_hub/utils/_pagination.py +4 -4
  112. huggingface_hub/utils/_parsing.py +98 -0
  113. huggingface_hub/utils/_paths.py +5 -5
  114. huggingface_hub/utils/_runtime.py +61 -24
  115. huggingface_hub/utils/_safetensors.py +21 -21
  116. huggingface_hub/utils/_subprocess.py +9 -9
  117. huggingface_hub/utils/_telemetry.py +4 -4
  118. huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
  119. huggingface_hub/utils/_typing.py +25 -5
  120. huggingface_hub/utils/_validators.py +55 -74
  121. huggingface_hub/utils/_verification.py +167 -0
  122. huggingface_hub/utils/_xet.py +64 -17
  123. huggingface_hub/utils/_xet_progress_reporting.py +162 -0
  124. huggingface_hub/utils/insecure_hashlib.py +3 -5
  125. huggingface_hub/utils/logging.py +8 -11
  126. huggingface_hub/utils/tqdm.py +5 -4
  127. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -85
  128. huggingface_hub-1.1.3.dist-info/RECORD +155 -0
  129. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
  130. huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
  131. huggingface_hub/commands/delete_cache.py +0 -474
  132. huggingface_hub/commands/download.py +0 -200
  133. huggingface_hub/commands/huggingface_cli.py +0 -61
  134. huggingface_hub/commands/lfs.py +0 -200
  135. huggingface_hub/commands/repo_files.py +0 -128
  136. huggingface_hub/commands/scan_cache.py +0 -181
  137. huggingface_hub/commands/tag.py +0 -159
  138. huggingface_hub/commands/upload.py +0 -314
  139. huggingface_hub/commands/upload_large_folder.py +0 -129
  140. huggingface_hub/commands/user.py +0 -304
  141. huggingface_hub/commands/version.py +0 -37
  142. huggingface_hub/inference_api.py +0 -217
  143. huggingface_hub/keras_mixin.py +0 -500
  144. huggingface_hub/repository.py +0 -1477
  145. huggingface_hub/serialization/_tensorflow.py +0 -95
  146. huggingface_hub/utils/_hf_folder.py +0 -68
  147. huggingface_hub-0.31.0rc0.dist-info/RECORD +0 -135
  148. huggingface_hub-0.31.0rc0.dist-info/entry_points.txt +0 -6
  149. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
  150. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
@@ -24,10 +24,13 @@ from .chat_completion import (
24
24
  ChatCompletionInputFunctionDefinition,
25
25
  ChatCompletionInputFunctionName,
26
26
  ChatCompletionInputGrammarType,
27
- ChatCompletionInputGrammarTypeType,
27
+ ChatCompletionInputJSONSchema,
28
28
  ChatCompletionInputMessage,
29
29
  ChatCompletionInputMessageChunk,
30
30
  ChatCompletionInputMessageChunkType,
31
+ ChatCompletionInputResponseFormatJSONObject,
32
+ ChatCompletionInputResponseFormatJSONSchema,
33
+ ChatCompletionInputResponseFormatText,
31
34
  ChatCompletionInputStreamOptions,
32
35
  ChatCompletionInputTool,
33
36
  ChatCompletionInputToolCall,
@@ -82,6 +85,7 @@ from .image_to_text import (
82
85
  ImageToTextOutput,
83
86
  ImageToTextParameters,
84
87
  )
88
+ from .image_to_video import ImageToVideoInput, ImageToVideoOutput, ImageToVideoParameters, ImageToVideoTargetSize
85
89
  from .object_detection import (
86
90
  ObjectDetectionBoundingBox,
87
91
  ObjectDetectionInput,
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import List, Literal, Optional, Union
6
+ from typing import Literal, Optional, Union
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -97,7 +97,7 @@ class AutomaticSpeechRecognitionInput(BaseInferenceType):
97
97
  class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):
98
98
  text: str
99
99
  """A chunk of text identified by the model"""
100
- timestamp: List[float]
100
+ timestamp: list[float]
101
101
  """The start and end timestamps corresponding with the text"""
102
102
 
103
103
 
@@ -107,7 +107,7 @@ class AutomaticSpeechRecognitionOutput(BaseInferenceType):
107
107
 
108
108
  text: str
109
109
  """The recognized text."""
110
- chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None
110
+ chunks: Optional[list[AutomaticSpeechRecognitionOutputChunk]] = None
111
111
  """When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
112
112
  the model.
113
113
  """
@@ -15,8 +15,9 @@
15
15
 
16
16
  import inspect
17
17
  import json
18
+ import types
18
19
  from dataclasses import asdict, dataclass
19
- from typing import Any, Dict, List, Type, TypeVar, Union, get_args
20
+ from typing import Any, TypeVar, Union, get_args
20
21
 
21
22
 
22
23
  T = TypeVar("T", bound="BaseInferenceType")
@@ -28,7 +29,7 @@ def _repr_with_extra(self):
28
29
  return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"
29
30
 
30
31
 
31
- def dataclass_with_extra(cls: Type[T]) -> Type[T]:
32
+ def dataclass_with_extra(cls: type[T]) -> type[T]:
32
33
  """Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.
33
34
 
34
35
  This decorator only works with dataclasses that inherit from `BaseInferenceType`.
@@ -49,7 +50,7 @@ class BaseInferenceType(dict):
49
50
  """
50
51
 
51
52
  @classmethod
52
- def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]:
53
+ def parse_obj_as_list(cls: type[T], data: Union[bytes, str, list, dict]) -> list[T]:
53
54
  """Alias to parse server response and return a single instance.
54
55
 
55
56
  See `parse_obj` for more details.
@@ -60,7 +61,7 @@ class BaseInferenceType(dict):
60
61
  return output
61
62
 
62
63
  @classmethod
63
- def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T:
64
+ def parse_obj_as_instance(cls: type[T], data: Union[bytes, str, list, dict]) -> T:
64
65
  """Alias to parse server response and return a single instance.
65
66
 
66
67
  See `parse_obj` for more details.
@@ -71,7 +72,7 @@ class BaseInferenceType(dict):
71
72
  return output
72
73
 
73
74
  @classmethod
74
- def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]:
75
+ def parse_obj(cls: type[T], data: Union[bytes, str, list, dict]) -> Union[list[T], T]:
75
76
  """Parse server response as a dataclass or list of dataclasses.
76
77
 
77
78
  To enable future-compatibility, we want to handle cases where the server return more fields than expected.
@@ -85,7 +86,7 @@ class BaseInferenceType(dict):
85
86
  data = json.loads(data)
86
87
 
87
88
  # If a list, parse each item individually
88
- if isinstance(data, List):
89
+ if isinstance(data, list):
89
90
  return [cls.parse_obj(d) for d in data] # type: ignore [misc]
90
91
 
91
92
  # At this point, we expect a dict
@@ -109,7 +110,9 @@ class BaseInferenceType(dict):
109
110
  else:
110
111
  expected_types = get_args(field_type)
111
112
  for expected_type in expected_types:
112
- if getattr(expected_type, "_name", None) == "List":
113
+ if (
114
+ isinstance(expected_type, types.GenericAlias) and expected_type.__origin__ is list
115
+ ) or getattr(expected_type, "_name", None) == "List":
113
116
  expected_type = get_args(expected_type)[
114
117
  0
115
118
  ] # assume same type for all items in the list
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, List, Literal, Optional, Union
6
+ from typing import Any, Literal, Optional, Union
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -40,22 +40,56 @@ class ChatCompletionInputToolCall(BaseInferenceType):
40
40
  @dataclass_with_extra
41
41
  class ChatCompletionInputMessage(BaseInferenceType):
42
42
  role: str
43
- content: Optional[Union[List[ChatCompletionInputMessageChunk], str]] = None
43
+ content: Optional[Union[list[ChatCompletionInputMessageChunk], str]] = None
44
44
  name: Optional[str] = None
45
- tool_calls: Optional[List[ChatCompletionInputToolCall]] = None
45
+ tool_calls: Optional[list[ChatCompletionInputToolCall]] = None
46
46
 
47
47
 
48
- ChatCompletionInputGrammarTypeType = Literal["json", "regex", "json_schema"]
48
+ @dataclass_with_extra
49
+ class ChatCompletionInputJSONSchema(BaseInferenceType):
50
+ name: str
51
+ """
52
+ The name of the response format.
53
+ """
54
+ description: Optional[str] = None
55
+ """
56
+ A description of what the response format is for, used by the model to determine
57
+ how to respond in the format.
58
+ """
59
+ schema: Optional[dict[str, object]] = None
60
+ """
61
+ The schema for the response format, described as a JSON Schema object. Learn how
62
+ to build JSON schemas [here](https://json-schema.org/).
63
+ """
64
+ strict: Optional[bool] = None
65
+ """
66
+ Whether to enable strict schema adherence when generating the output. If set to
67
+ true, the model will always follow the exact schema defined in the `schema`
68
+ field.
69
+ """
49
70
 
50
71
 
51
72
  @dataclass_with_extra
52
- class ChatCompletionInputGrammarType(BaseInferenceType):
53
- type: "ChatCompletionInputGrammarTypeType"
54
- value: Any
55
- """A string that represents a [JSON Schema](https://json-schema.org/).
56
- JSON Schema is a declarative language that allows to annotate JSON documents
57
- with types and descriptions.
58
- """
73
+ class ChatCompletionInputResponseFormatText(BaseInferenceType):
74
+ type: Literal["text"]
75
+
76
+
77
+ @dataclass_with_extra
78
+ class ChatCompletionInputResponseFormatJSONSchema(BaseInferenceType):
79
+ type: Literal["json_schema"]
80
+ json_schema: ChatCompletionInputJSONSchema
81
+
82
+
83
+ @dataclass_with_extra
84
+ class ChatCompletionInputResponseFormatJSONObject(BaseInferenceType):
85
+ type: Literal["json_object"]
86
+
87
+
88
+ ChatCompletionInputGrammarType = Union[
89
+ ChatCompletionInputResponseFormatText,
90
+ ChatCompletionInputResponseFormatJSONSchema,
91
+ ChatCompletionInputResponseFormatJSONObject,
92
+ ]
59
93
 
60
94
 
61
95
  @dataclass_with_extra
@@ -95,14 +129,14 @@ class ChatCompletionInput(BaseInferenceType):
95
129
  https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
96
130
  """
97
131
 
98
- messages: List[ChatCompletionInputMessage]
132
+ messages: list[ChatCompletionInputMessage]
99
133
  """A list of messages comprising the conversation so far."""
100
134
  frequency_penalty: Optional[float] = None
101
135
  """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
102
136
  frequency in the text so far,
103
137
  decreasing the model's likelihood to repeat the same line verbatim.
104
138
  """
105
- logit_bias: Optional[List[float]] = None
139
+ logit_bias: Optional[list[float]] = None
106
140
  """UNUSED
107
141
  Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
108
142
  object that maps tokens
@@ -138,7 +172,7 @@ class ChatCompletionInput(BaseInferenceType):
138
172
  """
139
173
  response_format: Optional[ChatCompletionInputGrammarType] = None
140
174
  seed: Optional[int] = None
141
- stop: Optional[List[str]] = None
175
+ stop: Optional[list[str]] = None
142
176
  """Up to 4 sequences where the API will stop generating further tokens."""
143
177
  stream: Optional[bool] = None
144
178
  stream_options: Optional[ChatCompletionInputStreamOptions] = None
@@ -151,7 +185,7 @@ class ChatCompletionInput(BaseInferenceType):
151
185
  tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None
152
186
  tool_prompt: Optional[str] = None
153
187
  """A prompt to be appended before the tools"""
154
- tools: Optional[List[ChatCompletionInputTool]] = None
188
+ tools: Optional[list[ChatCompletionInputTool]] = None
155
189
  """A list of tools the model may call. Currently, only functions are supported as a tool.
156
190
  Use this to provide a list of
157
191
  functions the model may generate JSON inputs for.
@@ -179,12 +213,12 @@ class ChatCompletionOutputTopLogprob(BaseInferenceType):
179
213
  class ChatCompletionOutputLogprob(BaseInferenceType):
180
214
  logprob: float
181
215
  token: str
182
- top_logprobs: List[ChatCompletionOutputTopLogprob]
216
+ top_logprobs: list[ChatCompletionOutputTopLogprob]
183
217
 
184
218
 
185
219
  @dataclass_with_extra
186
220
  class ChatCompletionOutputLogprobs(BaseInferenceType):
187
- content: List[ChatCompletionOutputLogprob]
221
+ content: list[ChatCompletionOutputLogprob]
188
222
 
189
223
 
190
224
  @dataclass_with_extra
@@ -205,8 +239,9 @@ class ChatCompletionOutputToolCall(BaseInferenceType):
205
239
  class ChatCompletionOutputMessage(BaseInferenceType):
206
240
  role: str
207
241
  content: Optional[str] = None
242
+ reasoning: Optional[str] = None
208
243
  tool_call_id: Optional[str] = None
209
- tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None
244
+ tool_calls: Optional[list[ChatCompletionOutputToolCall]] = None
210
245
 
211
246
 
212
247
  @dataclass_with_extra
@@ -232,7 +267,7 @@ class ChatCompletionOutput(BaseInferenceType):
232
267
  https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
233
268
  """
234
269
 
235
- choices: List[ChatCompletionOutputComplete]
270
+ choices: list[ChatCompletionOutputComplete]
236
271
  created: int
237
272
  id: str
238
273
  model: str
@@ -258,8 +293,9 @@ class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
258
293
  class ChatCompletionStreamOutputDelta(BaseInferenceType):
259
294
  role: str
260
295
  content: Optional[str] = None
296
+ reasoning: Optional[str] = None
261
297
  tool_call_id: Optional[str] = None
262
- tool_calls: Optional[List[ChatCompletionStreamOutputDeltaToolCall]] = None
298
+ tool_calls: Optional[list[ChatCompletionStreamOutputDeltaToolCall]] = None
263
299
 
264
300
 
265
301
  @dataclass_with_extra
@@ -272,12 +308,12 @@ class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
272
308
  class ChatCompletionStreamOutputLogprob(BaseInferenceType):
273
309
  logprob: float
274
310
  token: str
275
- top_logprobs: List[ChatCompletionStreamOutputTopLogprob]
311
+ top_logprobs: list[ChatCompletionStreamOutputTopLogprob]
276
312
 
277
313
 
278
314
  @dataclass_with_extra
279
315
  class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
280
- content: List[ChatCompletionStreamOutputLogprob]
316
+ content: list[ChatCompletionStreamOutputLogprob]
281
317
 
282
318
 
283
319
  @dataclass_with_extra
@@ -303,7 +339,7 @@ class ChatCompletionStreamOutput(BaseInferenceType):
303
339
  https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
304
340
  """
305
341
 
306
- choices: List[ChatCompletionStreamOutputChoice]
342
+ choices: list[ChatCompletionStreamOutputChoice]
307
343
  created: int
308
344
  id: str
309
345
  model: str
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, Dict, Optional
6
+ from typing import Any, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -14,7 +14,7 @@ class DepthEstimationInput(BaseInferenceType):
14
14
 
15
15
  inputs: Any
16
16
  """The input image data"""
17
- parameters: Optional[Dict[str, Any]] = None
17
+ parameters: Optional[dict[str, Any]] = None
18
18
  """Additional inference parameters for Depth Estimation"""
19
19
 
20
20
 
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, List, Optional, Union
6
+ from typing import Any, Optional, Union
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -46,7 +46,7 @@ class DocumentQuestionAnsweringParameters(BaseInferenceType):
46
46
  """The number of answers to return (will be chosen by order of likelihood). Can return less
47
47
  than top_k answers if there are not enough options available within the context.
48
48
  """
49
- word_boxes: Optional[List[Union[List[float], str]]] = None
49
+ word_boxes: Optional[list[Union[list[float], str]]] = None
50
50
  """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
51
51
  skip the OCR step and use the provided bounding boxes instead.
52
52
  """
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import List, Literal, Optional, Union
6
+ from typing import Literal, Optional, Union
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -19,7 +19,7 @@ class FeatureExtractionInput(BaseInferenceType):
19
19
  https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
20
20
  """
21
21
 
22
- inputs: Union[List[str], str]
22
+ inputs: Union[list[str], str]
23
23
  """The text or list of texts to embed."""
24
24
  normalize: Optional[bool] = None
25
25
  prompt_name: Optional[str] = None
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, List, Optional
6
+ from typing import Any, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -12,7 +12,7 @@ from .base import BaseInferenceType, dataclass_with_extra
12
12
  class FillMaskParameters(BaseInferenceType):
13
13
  """Additional inference parameters for Fill Mask"""
14
14
 
15
- targets: Optional[List[str]] = None
15
+ targets: Optional[list[str]] = None
16
16
  """When passed, the model will limit the scores to the passed targets instead of looking up
17
17
  in the whole vocabulary. If the provided targets are not in the model vocab, they will be
18
18
  tokenized and the first resulting token will be used (with a warning, and that might be
@@ -10,7 +10,9 @@ from .base import BaseInferenceType, dataclass_with_extra
10
10
 
11
11
  @dataclass_with_extra
12
12
  class ImageToImageTargetSize(BaseInferenceType):
13
- """The size in pixel of the output image."""
13
+ """The size in pixels of the output image. This parameter is only supported by some
14
+ providers and for specific models. It will be ignored when unsupported.
15
+ """
14
16
 
15
17
  height: int
16
18
  width: int
@@ -33,7 +35,9 @@ class ImageToImageParameters(BaseInferenceType):
33
35
  prompt: Optional[str] = None
34
36
  """The text prompt to guide the image generation."""
35
37
  target_size: Optional[ImageToImageTargetSize] = None
36
- """The size in pixel of the output image."""
38
+ """The size in pixels of the output image. This parameter is only supported by some
39
+ providers and for specific models. It will be ignored when unsupported.
40
+ """
37
41
 
38
42
 
39
43
  @dataclass_with_extra
@@ -0,0 +1,60 @@
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ImageToVideoTargetSize(BaseInferenceType):
13
+ """The size in pixel of the output video frames."""
14
+
15
+ height: int
16
+ width: int
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ImageToVideoParameters(BaseInferenceType):
21
+ """Additional inference parameters for Image To Video"""
22
+
23
+ guidance_scale: Optional[float] = None
24
+ """For diffusion models. A higher guidance scale value encourages the model to generate
25
+ videos closely linked to the text prompt at the expense of lower image quality.
26
+ """
27
+ negative_prompt: Optional[str] = None
28
+ """One prompt to guide what NOT to include in video generation."""
29
+ num_frames: Optional[float] = None
30
+ """The num_frames parameter determines how many video frames are generated."""
31
+ num_inference_steps: Optional[int] = None
32
+ """The number of denoising steps. More denoising steps usually lead to a higher quality
33
+ video at the expense of slower inference.
34
+ """
35
+ prompt: Optional[str] = None
36
+ """The text prompt to guide the video generation."""
37
+ seed: Optional[int] = None
38
+ """Seed for the random number generator."""
39
+ target_size: Optional[ImageToVideoTargetSize] = None
40
+ """The size in pixel of the output video frames."""
41
+
42
+
43
+ @dataclass_with_extra
44
+ class ImageToVideoInput(BaseInferenceType):
45
+ """Inputs for Image To Video inference"""
46
+
47
+ inputs: str
48
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
49
+ also provide the image data as a raw bytes payload.
50
+ """
51
+ parameters: Optional[ImageToVideoParameters] = None
52
+ """Additional inference parameters for Image To Video"""
53
+
54
+
55
+ @dataclass_with_extra
56
+ class ImageToVideoOutput(BaseInferenceType):
57
+ """Outputs of inference for the Image To Video task"""
58
+
59
+ video: Any
60
+ """The generated video returned as raw bytes in the payload."""
@@ -3,14 +3,14 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, Dict, List, Optional
6
+ from typing import Any, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
10
10
 
11
11
  @dataclass_with_extra
12
12
  class SentenceSimilarityInputData(BaseInferenceType):
13
- sentences: List[str]
13
+ sentences: list[str]
14
14
  """A list of strings which will be compared against the source_sentence."""
15
15
  source_sentence: str
16
16
  """The string that you wish to compare the other strings with. This can be a phrase,
@@ -23,5 +23,5 @@ class SentenceSimilarityInput(BaseInferenceType):
23
23
  """Inputs for Sentence similarity inference"""
24
24
 
25
25
  inputs: SentenceSimilarityInputData
26
- parameters: Optional[Dict[str, Any]] = None
26
+ parameters: Optional[dict[str, Any]] = None
27
27
  """Additional inference parameters for Sentence Similarity"""
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, Dict, Literal, Optional
6
+ from typing import Any, Literal, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -17,7 +17,7 @@ class SummarizationParameters(BaseInferenceType):
17
17
 
18
18
  clean_up_tokenization_spaces: Optional[bool] = None
19
19
  """Whether to clean up the potential extra spaces in the text output."""
20
- generate_parameters: Optional[Dict[str, Any]] = None
20
+ generate_parameters: Optional[dict[str, Any]] = None
21
21
  """Additional parametrization of the text generation algorithm."""
22
22
  truncation: Optional["SummarizationTruncationStrategy"] = None
23
23
  """The truncation strategy to use."""
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Dict, List, Literal, Optional
6
+ from typing import Literal, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -14,7 +14,7 @@ class TableQuestionAnsweringInputData(BaseInferenceType):
14
14
 
15
15
  question: str
16
16
  """The question to be answered about the table"""
17
- table: Dict[str, List[str]]
17
+ table: dict[str, list[str]]
18
18
  """The table to serve as context for the questions"""
19
19
 
20
20
 
@@ -54,9 +54,9 @@ class TableQuestionAnsweringOutputElement(BaseInferenceType):
54
54
  """The answer of the question given the table. If there is an aggregator, the answer will be
55
55
  preceded by `AGGREGATOR >`.
56
56
  """
57
- cells: List[str]
58
- """List of strings made up of the answer cell values."""
59
- coordinates: List[List[int]]
57
+ cells: list[str]
58
+ """list of strings made up of the answer cell values."""
59
+ coordinates: list[list[int]]
60
60
  """Coordinates of the cells of the answers."""
61
61
  aggregator: Optional[str] = None
62
62
  """If the model has an aggregator, this returns the aggregator."""
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, Dict, Literal, Optional
6
+ from typing import Any, Literal, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -17,7 +17,7 @@ class Text2TextGenerationParameters(BaseInferenceType):
17
17
 
18
18
  clean_up_tokenization_spaces: Optional[bool] = None
19
19
  """Whether to clean up the potential extra spaces in the text output."""
20
- generate_parameters: Optional[Dict[str, Any]] = None
20
+ generate_parameters: Optional[dict[str, Any]] = None
21
21
  """Additional parametrization of the text generation algorithm"""
22
22
  truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
23
23
  """The truncation strategy to use"""
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, List, Literal, Optional
6
+ from typing import Any, Literal, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -49,7 +49,7 @@ class TextGenerationInputGenerateParameters(BaseInferenceType):
49
49
  """Whether to prepend the prompt to the generated text"""
50
50
  seed: Optional[int] = None
51
51
  """Random sampling seed."""
52
- stop: Optional[List[str]] = None
52
+ stop: Optional[list[str]] = None
53
53
  """Stop generating tokens if a member of `stop` is generated."""
54
54
  temperature: Optional[float] = None
55
55
  """The value used to module the logits distribution."""
@@ -108,21 +108,21 @@ class TextGenerationOutputBestOfSequence(BaseInferenceType):
108
108
  finish_reason: "TextGenerationOutputFinishReason"
109
109
  generated_text: str
110
110
  generated_tokens: int
111
- prefill: List[TextGenerationOutputPrefillToken]
112
- tokens: List[TextGenerationOutputToken]
111
+ prefill: list[TextGenerationOutputPrefillToken]
112
+ tokens: list[TextGenerationOutputToken]
113
113
  seed: Optional[int] = None
114
- top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
114
+ top_tokens: Optional[list[list[TextGenerationOutputToken]]] = None
115
115
 
116
116
 
117
117
  @dataclass_with_extra
118
118
  class TextGenerationOutputDetails(BaseInferenceType):
119
119
  finish_reason: "TextGenerationOutputFinishReason"
120
120
  generated_tokens: int
121
- prefill: List[TextGenerationOutputPrefillToken]
122
- tokens: List[TextGenerationOutputToken]
123
- best_of_sequences: Optional[List[TextGenerationOutputBestOfSequence]] = None
121
+ prefill: list[TextGenerationOutputPrefillToken]
122
+ tokens: list[TextGenerationOutputToken]
123
+ best_of_sequences: Optional[list[TextGenerationOutputBestOfSequence]] = None
124
124
  seed: Optional[int] = None
125
- top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None
125
+ top_tokens: Optional[list[list[TextGenerationOutputToken]]] = None
126
126
 
127
127
 
128
128
  @dataclass_with_extra
@@ -165,4 +165,4 @@ class TextGenerationStreamOutput(BaseInferenceType):
165
165
  token: TextGenerationStreamOutputToken
166
166
  details: Optional[TextGenerationStreamOutputStreamDetails] = None
167
167
  generated_text: Optional[str] = None
168
- top_tokens: Optional[List[TextGenerationStreamOutputToken]] = None
168
+ top_tokens: Optional[list[TextGenerationStreamOutputToken]] = None
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, List, Optional
6
+ from typing import Any, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -16,7 +16,7 @@ class TextToVideoParameters(BaseInferenceType):
16
16
  """A higher guidance scale value encourages the model to generate videos closely linked to
17
17
  the text prompt, but values too high may cause saturation and other artifacts.
18
18
  """
19
- negative_prompt: Optional[List[str]] = None
19
+ negative_prompt: Optional[list[str]] = None
20
20
  """One or several prompt to guide what NOT to include in video generation."""
21
21
  num_frames: Optional[float] = None
22
22
  """The num_frames parameter determines how many video frames are generated."""
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import List, Literal, Optional
6
+ from typing import Literal, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -17,7 +17,7 @@ class TokenClassificationParameters(BaseInferenceType):
17
17
 
18
18
  aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None
19
19
  """The strategy used to fuse tokens based on model predictions"""
20
- ignore_labels: Optional[List[str]] = None
20
+ ignore_labels: Optional[list[str]] = None
21
21
  """A list of labels to ignore"""
22
22
  stride: Optional[int] = None
23
23
  """The number of overlapping tokens between chunks when splitting the input text."""
@@ -3,7 +3,7 @@
3
3
  # See:
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
- from typing import Any, Dict, Literal, Optional
6
+ from typing import Any, Literal, Optional
7
7
 
8
8
  from .base import BaseInferenceType, dataclass_with_extra
9
9
 
@@ -17,7 +17,7 @@ class TranslationParameters(BaseInferenceType):
17
17
 
18
18
  clean_up_tokenization_spaces: Optional[bool] = None
19
19
  """Whether to clean up the potential extra spaces in the text output."""
20
- generate_parameters: Optional[Dict[str, Any]] = None
20
+ generate_parameters: Optional[dict[str, Any]] = None
21
21
  """Additional parametrization of the text generation algorithm."""
22
22
  src_lang: Optional[str] = None
23
23
  """The source language of the text. Required for models that can translate from multiple