huggingface-hub 0.36.0rc0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +33 -45
- huggingface_hub/_commit_api.py +39 -43
- huggingface_hub/_commit_scheduler.py +11 -8
- huggingface_hub/_inference_endpoints.py +8 -8
- huggingface_hub/_jobs_api.py +20 -20
- huggingface_hub/_login.py +17 -43
- huggingface_hub/_oauth.py +8 -8
- huggingface_hub/_snapshot_download.py +135 -50
- huggingface_hub/_space_api.py +4 -4
- huggingface_hub/_tensorboard_logger.py +5 -5
- huggingface_hub/_upload_large_folder.py +18 -32
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +2 -2
- huggingface_hub/cli/__init__.py +0 -14
- huggingface_hub/cli/_cli_utils.py +143 -39
- huggingface_hub/cli/auth.py +105 -171
- huggingface_hub/cli/cache.py +594 -361
- huggingface_hub/cli/download.py +120 -112
- huggingface_hub/cli/hf.py +38 -41
- huggingface_hub/cli/jobs.py +689 -1017
- huggingface_hub/cli/lfs.py +120 -143
- huggingface_hub/cli/repo.py +282 -216
- huggingface_hub/cli/repo_files.py +50 -84
- huggingface_hub/cli/system.py +6 -25
- huggingface_hub/cli/upload.py +198 -220
- huggingface_hub/cli/upload_large_folder.py +91 -106
- huggingface_hub/community.py +5 -5
- huggingface_hub/constants.py +17 -52
- huggingface_hub/dataclasses.py +135 -21
- huggingface_hub/errors.py +47 -30
- huggingface_hub/fastai_utils.py +8 -9
- huggingface_hub/file_download.py +351 -303
- huggingface_hub/hf_api.py +398 -570
- huggingface_hub/hf_file_system.py +101 -66
- huggingface_hub/hub_mixin.py +32 -54
- huggingface_hub/inference/_client.py +177 -162
- huggingface_hub/inference/_common.py +38 -54
- huggingface_hub/inference/_generated/_async_client.py +218 -258
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +16 -16
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +4 -4
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +10 -10
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/agent.py +3 -3
- huggingface_hub/inference/_mcp/constants.py +1 -2
- huggingface_hub/inference/_mcp/mcp_client.py +33 -22
- huggingface_hub/inference/_mcp/types.py +10 -10
- huggingface_hub/inference/_mcp/utils.py +4 -4
- huggingface_hub/inference/_providers/__init__.py +12 -4
- huggingface_hub/inference/_providers/_common.py +62 -24
- huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
- huggingface_hub/inference/_providers/cohere.py +3 -3
- huggingface_hub/inference/_providers/fal_ai.py +25 -25
- huggingface_hub/inference/_providers/featherless_ai.py +4 -4
- huggingface_hub/inference/_providers/fireworks_ai.py +3 -3
- huggingface_hub/inference/_providers/hf_inference.py +13 -13
- huggingface_hub/inference/_providers/hyperbolic.py +4 -4
- huggingface_hub/inference/_providers/nebius.py +10 -10
- huggingface_hub/inference/_providers/novita.py +5 -5
- huggingface_hub/inference/_providers/nscale.py +4 -4
- huggingface_hub/inference/_providers/replicate.py +15 -15
- huggingface_hub/inference/_providers/sambanova.py +6 -6
- huggingface_hub/inference/_providers/together.py +7 -7
- huggingface_hub/lfs.py +21 -94
- huggingface_hub/repocard.py +15 -16
- huggingface_hub/repocard_data.py +57 -57
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +9 -9
- huggingface_hub/serialization/_dduf.py +7 -7
- huggingface_hub/serialization/_torch.py +28 -28
- huggingface_hub/utils/__init__.py +11 -6
- huggingface_hub/utils/_auth.py +5 -5
- huggingface_hub/utils/_cache_manager.py +49 -74
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +3 -3
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +3 -3
- huggingface_hub/utils/_headers.py +7 -29
- huggingface_hub/utils/_http.py +371 -208
- huggingface_hub/utils/_pagination.py +4 -4
- huggingface_hub/utils/_parsing.py +98 -0
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +59 -23
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +9 -9
- huggingface_hub/utils/_telemetry.py +3 -3
- huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -9
- huggingface_hub/utils/_typing.py +3 -3
- huggingface_hub/utils/_validators.py +53 -72
- huggingface_hub/utils/_xet.py +16 -16
- huggingface_hub/utils/_xet_progress_reporting.py +1 -1
- huggingface_hub/utils/insecure_hashlib.py +3 -9
- huggingface_hub/utils/tqdm.py +3 -3
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/METADATA +16 -35
- huggingface_hub-1.0.0.dist-info/RECORD +152 -0
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/entry_points.txt +0 -1
- huggingface_hub/commands/__init__.py +0 -27
- huggingface_hub/commands/delete_cache.py +0 -476
- huggingface_hub/commands/download.py +0 -204
- huggingface_hub/commands/env.py +0 -39
- huggingface_hub/commands/huggingface_cli.py +0 -65
- huggingface_hub/commands/lfs.py +0 -200
- huggingface_hub/commands/repo.py +0 -151
- huggingface_hub/commands/repo_files.py +0 -132
- huggingface_hub/commands/scan_cache.py +0 -183
- huggingface_hub/commands/tag.py +0 -161
- huggingface_hub/commands/upload.py +0 -318
- huggingface_hub/commands/upload_large_folder.py +0 -131
- huggingface_hub/commands/user.py +0 -208
- huggingface_hub/commands/version.py +0 -40
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -497
- huggingface_hub/repository.py +0 -1471
- huggingface_hub/serialization/_tensorflow.py +0 -92
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.36.0rc0.dist-info/RECORD +0 -170
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/WHEEL +0 -0
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
2
|
|
|
3
3
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
4
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -15,14 +15,14 @@ class FeatherlessTextGenerationTask(BaseTextGenerationTask):
|
|
|
15
15
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
16
16
|
|
|
17
17
|
def _prepare_payload_as_dict(
|
|
18
|
-
self, inputs: Any, parameters:
|
|
19
|
-
) -> Optional[
|
|
18
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
19
|
+
) -> Optional[dict]:
|
|
20
20
|
params = filter_none(parameters.copy())
|
|
21
21
|
params["max_tokens"] = params.pop("max_new_tokens", None)
|
|
22
22
|
|
|
23
23
|
return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id}
|
|
24
24
|
|
|
25
|
-
def get_response(self, response: Union[bytes,
|
|
25
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
26
26
|
output = _as_dict(response)["choices"][0]
|
|
27
27
|
return {
|
|
28
28
|
"generated_text": output["text"],
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional
|
|
2
2
|
|
|
3
3
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
4
|
|
|
@@ -13,8 +13,8 @@ class FireworksAIConversationalTask(BaseConversationalTask):
|
|
|
13
13
|
return "/inference/v1/chat/completions"
|
|
14
14
|
|
|
15
15
|
def _prepare_payload_as_dict(
|
|
16
|
-
self, inputs: Any, parameters:
|
|
17
|
-
) -> Optional[
|
|
16
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
17
|
+
) -> Optional[dict]:
|
|
18
18
|
payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
|
|
19
19
|
response_format = parameters.get("response_format")
|
|
20
20
|
if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from functools import lru_cache
|
|
3
3
|
from pathlib import Path
|
|
4
|
-
from typing import Any,
|
|
4
|
+
from typing import Any, Optional, Union
|
|
5
5
|
from urllib.parse import urlparse, urlunparse
|
|
6
6
|
|
|
7
7
|
from huggingface_hub import constants
|
|
@@ -60,8 +60,8 @@ class HFInferenceTask(TaskProviderHelper):
|
|
|
60
60
|
)
|
|
61
61
|
|
|
62
62
|
def _prepare_payload_as_dict(
|
|
63
|
-
self, inputs: Any, parameters:
|
|
64
|
-
) -> Optional[
|
|
63
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
64
|
+
) -> Optional[dict]:
|
|
65
65
|
if isinstance(inputs, bytes):
|
|
66
66
|
raise ValueError(f"Unexpected binary input for task {self.task}.")
|
|
67
67
|
if isinstance(inputs, Path):
|
|
@@ -71,16 +71,16 @@ class HFInferenceTask(TaskProviderHelper):
|
|
|
71
71
|
|
|
72
72
|
class HFInferenceBinaryInputTask(HFInferenceTask):
|
|
73
73
|
def _prepare_payload_as_dict(
|
|
74
|
-
self, inputs: Any, parameters:
|
|
75
|
-
) -> Optional[
|
|
74
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
75
|
+
) -> Optional[dict]:
|
|
76
76
|
return None
|
|
77
77
|
|
|
78
78
|
def _prepare_payload_as_bytes(
|
|
79
79
|
self,
|
|
80
80
|
inputs: Any,
|
|
81
|
-
parameters:
|
|
81
|
+
parameters: dict,
|
|
82
82
|
provider_mapping_info: InferenceProviderMapping,
|
|
83
|
-
extra_payload: Optional[
|
|
83
|
+
extra_payload: Optional[dict],
|
|
84
84
|
) -> Optional[MimeBytes]:
|
|
85
85
|
parameters = filter_none(parameters)
|
|
86
86
|
extra_payload = extra_payload or {}
|
|
@@ -106,8 +106,8 @@ class HFInferenceConversational(HFInferenceTask):
|
|
|
106
106
|
super().__init__("conversational")
|
|
107
107
|
|
|
108
108
|
def _prepare_payload_as_dict(
|
|
109
|
-
self, inputs: Any, parameters:
|
|
110
|
-
) -> Optional[
|
|
109
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
110
|
+
) -> Optional[dict]:
|
|
111
111
|
payload = filter_none(parameters)
|
|
112
112
|
mapped_model = provider_mapping_info.provider_id
|
|
113
113
|
payload_model = parameters.get("model") or mapped_model
|
|
@@ -156,7 +156,7 @@ def _build_chat_completion_url(model_url: str) -> str:
|
|
|
156
156
|
|
|
157
157
|
|
|
158
158
|
@lru_cache(maxsize=1)
|
|
159
|
-
def _fetch_recommended_models() ->
|
|
159
|
+
def _fetch_recommended_models() -> dict[str, Optional[str]]:
|
|
160
160
|
response = get_session().get(f"{constants.ENDPOINT}/api/tasks", headers=build_hf_headers())
|
|
161
161
|
hf_raise_for_status(response)
|
|
162
162
|
return {task: next(iter(details["widgetModels"]), None) for task, details in response.json().items()}
|
|
@@ -211,8 +211,8 @@ class HFInferenceFeatureExtractionTask(HFInferenceTask):
|
|
|
211
211
|
super().__init__("feature-extraction")
|
|
212
212
|
|
|
213
213
|
def _prepare_payload_as_dict(
|
|
214
|
-
self, inputs: Any, parameters:
|
|
215
|
-
) -> Optional[
|
|
214
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
215
|
+
) -> Optional[dict]:
|
|
216
216
|
if isinstance(inputs, bytes):
|
|
217
217
|
raise ValueError(f"Unexpected binary input for task {self.task}.")
|
|
218
218
|
if isinstance(inputs, Path):
|
|
@@ -222,7 +222,7 @@ class HFInferenceFeatureExtractionTask(HFInferenceTask):
|
|
|
222
222
|
# See specs: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/tasks/feature-extraction/spec/input.json
|
|
223
223
|
return {"inputs": inputs, **filter_none(parameters)}
|
|
224
224
|
|
|
225
|
-
def get_response(self, response: Union[bytes,
|
|
225
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
226
226
|
if isinstance(response, bytes):
|
|
227
227
|
return _bytes_to_dict(response)
|
|
228
228
|
return response
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import base64
|
|
2
|
-
from typing import Any,
|
|
2
|
+
from typing import Any, Optional, Union
|
|
3
3
|
|
|
4
4
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
5
5
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -14,8 +14,8 @@ class HyperbolicTextToImageTask(TaskProviderHelper):
|
|
|
14
14
|
return "/v1/images/generations"
|
|
15
15
|
|
|
16
16
|
def _prepare_payload_as_dict(
|
|
17
|
-
self, inputs: Any, parameters:
|
|
18
|
-
) -> Optional[
|
|
17
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
18
|
+
) -> Optional[dict]:
|
|
19
19
|
mapped_model = provider_mapping_info.provider_id
|
|
20
20
|
parameters = filter_none(parameters)
|
|
21
21
|
if "num_inference_steps" in parameters:
|
|
@@ -29,7 +29,7 @@ class HyperbolicTextToImageTask(TaskProviderHelper):
|
|
|
29
29
|
parameters["height"] = 512
|
|
30
30
|
return {"prompt": inputs, "model_name": mapped_model, **parameters}
|
|
31
31
|
|
|
32
|
-
def get_response(self, response: Union[bytes,
|
|
32
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
33
33
|
response_dict = _as_dict(response)
|
|
34
34
|
return base64.b64decode(response_dict["images"][0]["image"])
|
|
35
35
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import base64
|
|
2
|
-
from typing import Any,
|
|
2
|
+
from typing import Any, Optional, Union
|
|
3
3
|
|
|
4
4
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
5
5
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -15,7 +15,7 @@ class NebiusTextGenerationTask(BaseTextGenerationTask):
|
|
|
15
15
|
def __init__(self):
|
|
16
16
|
super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
|
|
17
17
|
|
|
18
|
-
def get_response(self, response: Union[bytes,
|
|
18
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
19
19
|
output = _as_dict(response)["choices"][0]
|
|
20
20
|
return {
|
|
21
21
|
"generated_text": output["text"],
|
|
@@ -31,8 +31,8 @@ class NebiusConversationalTask(BaseConversationalTask):
|
|
|
31
31
|
super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
|
|
32
32
|
|
|
33
33
|
def _prepare_payload_as_dict(
|
|
34
|
-
self, inputs: Any, parameters:
|
|
35
|
-
) -> Optional[
|
|
34
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
35
|
+
) -> Optional[dict]:
|
|
36
36
|
payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
|
|
37
37
|
response_format = parameters.get("response_format")
|
|
38
38
|
if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
|
|
@@ -50,8 +50,8 @@ class NebiusTextToImageTask(TaskProviderHelper):
|
|
|
50
50
|
return "/v1/images/generations"
|
|
51
51
|
|
|
52
52
|
def _prepare_payload_as_dict(
|
|
53
|
-
self, inputs: Any, parameters:
|
|
54
|
-
) -> Optional[
|
|
53
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
54
|
+
) -> Optional[dict]:
|
|
55
55
|
mapped_model = provider_mapping_info.provider_id
|
|
56
56
|
parameters = filter_none(parameters)
|
|
57
57
|
if "guidance_scale" in parameters:
|
|
@@ -61,7 +61,7 @@ class NebiusTextToImageTask(TaskProviderHelper):
|
|
|
61
61
|
|
|
62
62
|
return {"prompt": inputs, **parameters, "model": mapped_model}
|
|
63
63
|
|
|
64
|
-
def get_response(self, response: Union[bytes,
|
|
64
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
65
65
|
response_dict = _as_dict(response)
|
|
66
66
|
return base64.b64decode(response_dict["data"][0]["b64_json"])
|
|
67
67
|
|
|
@@ -74,10 +74,10 @@ class NebiusFeatureExtractionTask(TaskProviderHelper):
|
|
|
74
74
|
return "/v1/embeddings"
|
|
75
75
|
|
|
76
76
|
def _prepare_payload_as_dict(
|
|
77
|
-
self, inputs: Any, parameters:
|
|
78
|
-
) -> Optional[
|
|
77
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
78
|
+
) -> Optional[dict]:
|
|
79
79
|
return {"input": inputs, "model": provider_mapping_info.provider_id}
|
|
80
80
|
|
|
81
|
-
def get_response(self, response: Union[bytes,
|
|
81
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
82
82
|
embeddings = _as_dict(response)["data"]
|
|
83
83
|
return [embedding["embedding"] for embedding in embeddings]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
2
|
|
|
3
3
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
4
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -23,7 +23,7 @@ class NovitaTextGenerationTask(BaseTextGenerationTask):
|
|
|
23
23
|
# there is no v1/ route for novita
|
|
24
24
|
return "/v3/openai/completions"
|
|
25
25
|
|
|
26
|
-
def get_response(self, response: Union[bytes,
|
|
26
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
27
27
|
output = _as_dict(response)["choices"][0]
|
|
28
28
|
return {
|
|
29
29
|
"generated_text": output["text"],
|
|
@@ -51,11 +51,11 @@ class NovitaTextToVideoTask(TaskProviderHelper):
|
|
|
51
51
|
return f"/v3/hf/{mapped_model}"
|
|
52
52
|
|
|
53
53
|
def _prepare_payload_as_dict(
|
|
54
|
-
self, inputs: Any, parameters:
|
|
55
|
-
) -> Optional[
|
|
54
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
55
|
+
) -> Optional[dict]:
|
|
56
56
|
return {"prompt": inputs, **filter_none(parameters)}
|
|
57
57
|
|
|
58
|
-
def get_response(self, response: Union[bytes,
|
|
58
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
59
59
|
response_dict = _as_dict(response)
|
|
60
60
|
if not (
|
|
61
61
|
isinstance(response_dict, dict)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import base64
|
|
2
|
-
from typing import Any,
|
|
2
|
+
from typing import Any, Optional, Union
|
|
3
3
|
|
|
4
4
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
5
5
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -20,8 +20,8 @@ class NscaleTextToImageTask(TaskProviderHelper):
|
|
|
20
20
|
return "/v1/images/generations"
|
|
21
21
|
|
|
22
22
|
def _prepare_payload_as_dict(
|
|
23
|
-
self, inputs: Any, parameters:
|
|
24
|
-
) -> Optional[
|
|
23
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
24
|
+
) -> Optional[dict]:
|
|
25
25
|
mapped_model = provider_mapping_info.provider_id
|
|
26
26
|
# Combine all parameters except inputs and parameters
|
|
27
27
|
parameters = filter_none(parameters)
|
|
@@ -39,6 +39,6 @@ class NscaleTextToImageTask(TaskProviderHelper):
|
|
|
39
39
|
}
|
|
40
40
|
return payload
|
|
41
41
|
|
|
42
|
-
def get_response(self, response: Union[bytes,
|
|
42
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
43
43
|
response_dict = _as_dict(response)
|
|
44
44
|
return base64.b64decode(response_dict["data"][0]["b64_json"])
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
2
|
|
|
3
3
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
4
|
from huggingface_hub.inference._common import RequestParameters, _as_dict, _as_url
|
|
@@ -14,7 +14,7 @@ class ReplicateTask(TaskProviderHelper):
|
|
|
14
14
|
def __init__(self, task: str):
|
|
15
15
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)
|
|
16
16
|
|
|
17
|
-
def _prepare_headers(self, headers:
|
|
17
|
+
def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
|
|
18
18
|
headers = super()._prepare_headers(headers, api_key)
|
|
19
19
|
headers["Prefer"] = "wait"
|
|
20
20
|
return headers
|
|
@@ -25,16 +25,16 @@ class ReplicateTask(TaskProviderHelper):
|
|
|
25
25
|
return f"/v1/models/{mapped_model}/predictions"
|
|
26
26
|
|
|
27
27
|
def _prepare_payload_as_dict(
|
|
28
|
-
self, inputs: Any, parameters:
|
|
29
|
-
) -> Optional[
|
|
28
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
29
|
+
) -> Optional[dict]:
|
|
30
30
|
mapped_model = provider_mapping_info.provider_id
|
|
31
|
-
payload:
|
|
31
|
+
payload: dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}
|
|
32
32
|
if ":" in mapped_model:
|
|
33
33
|
version = mapped_model.split(":", 1)[1]
|
|
34
34
|
payload["version"] = version
|
|
35
35
|
return payload
|
|
36
36
|
|
|
37
|
-
def get_response(self, response: Union[bytes,
|
|
37
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
38
38
|
response_dict = _as_dict(response)
|
|
39
39
|
if response_dict.get("output") is None:
|
|
40
40
|
raise TimeoutError(
|
|
@@ -52,9 +52,9 @@ class ReplicateTextToImageTask(ReplicateTask):
|
|
|
52
52
|
super().__init__("text-to-image")
|
|
53
53
|
|
|
54
54
|
def _prepare_payload_as_dict(
|
|
55
|
-
self, inputs: Any, parameters:
|
|
56
|
-
) -> Optional[
|
|
57
|
-
payload:
|
|
55
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
56
|
+
) -> Optional[dict]:
|
|
57
|
+
payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
|
|
58
58
|
if provider_mapping_info.adapter_weights_path is not None:
|
|
59
59
|
payload["input"]["lora_weights"] = f"https://huggingface.co/{provider_mapping_info.hf_model_id}"
|
|
60
60
|
return payload
|
|
@@ -65,9 +65,9 @@ class ReplicateTextToSpeechTask(ReplicateTask):
|
|
|
65
65
|
super().__init__("text-to-speech")
|
|
66
66
|
|
|
67
67
|
def _prepare_payload_as_dict(
|
|
68
|
-
self, inputs: Any, parameters:
|
|
69
|
-
) -> Optional[
|
|
70
|
-
payload:
|
|
68
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
69
|
+
) -> Optional[dict]:
|
|
70
|
+
payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
|
|
71
71
|
payload["input"]["text"] = payload["input"].pop("prompt") # rename "prompt" to "text" for TTS
|
|
72
72
|
return payload
|
|
73
73
|
|
|
@@ -77,11 +77,11 @@ class ReplicateImageToImageTask(ReplicateTask):
|
|
|
77
77
|
super().__init__("image-to-image")
|
|
78
78
|
|
|
79
79
|
def _prepare_payload_as_dict(
|
|
80
|
-
self, inputs: Any, parameters:
|
|
81
|
-
) -> Optional[
|
|
80
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
81
|
+
) -> Optional[dict]:
|
|
82
82
|
image_url = _as_url(inputs, default_mime_type="image/jpeg")
|
|
83
83
|
|
|
84
|
-
payload:
|
|
84
|
+
payload: dict[str, Any] = {"input": {"input_image": image_url, **filter_none(parameters)}}
|
|
85
85
|
|
|
86
86
|
mapped_model = provider_mapping_info.provider_id
|
|
87
87
|
if ":" in mapped_model:
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any,
|
|
1
|
+
from typing import Any, Optional, Union
|
|
2
2
|
|
|
3
3
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
4
4
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -10,8 +10,8 @@ class SambanovaConversationalTask(BaseConversationalTask):
|
|
|
10
10
|
super().__init__(provider="sambanova", base_url="https://api.sambanova.ai")
|
|
11
11
|
|
|
12
12
|
def _prepare_payload_as_dict(
|
|
13
|
-
self, inputs: Any, parameters:
|
|
14
|
-
) -> Optional[
|
|
13
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
14
|
+
) -> Optional[dict]:
|
|
15
15
|
response_format_config = parameters.get("response_format")
|
|
16
16
|
if isinstance(response_format_config, dict):
|
|
17
17
|
if response_format_config.get("type") == "json_schema":
|
|
@@ -32,11 +32,11 @@ class SambanovaFeatureExtractionTask(TaskProviderHelper):
|
|
|
32
32
|
return "/v1/embeddings"
|
|
33
33
|
|
|
34
34
|
def _prepare_payload_as_dict(
|
|
35
|
-
self, inputs: Any, parameters:
|
|
36
|
-
) -> Optional[
|
|
35
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
36
|
+
) -> Optional[dict]:
|
|
37
37
|
parameters = filter_none(parameters)
|
|
38
38
|
return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
|
|
39
39
|
|
|
40
|
-
def get_response(self, response: Union[bytes,
|
|
40
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
41
41
|
embeddings = _as_dict(response)["data"]
|
|
42
42
|
return [embedding["embedding"] for embedding in embeddings]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import base64
|
|
2
2
|
from abc import ABC
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, Optional, Union
|
|
4
4
|
|
|
5
5
|
from huggingface_hub.hf_api import InferenceProviderMapping
|
|
6
6
|
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
@@ -36,7 +36,7 @@ class TogetherTextGenerationTask(BaseTextGenerationTask):
|
|
|
36
36
|
def __init__(self):
|
|
37
37
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
38
38
|
|
|
39
|
-
def get_response(self, response: Union[bytes,
|
|
39
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
40
40
|
output = _as_dict(response)["choices"][0]
|
|
41
41
|
return {
|
|
42
42
|
"generated_text": output["text"],
|
|
@@ -52,8 +52,8 @@ class TogetherConversationalTask(BaseConversationalTask):
|
|
|
52
52
|
super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
|
|
53
53
|
|
|
54
54
|
def _prepare_payload_as_dict(
|
|
55
|
-
self, inputs: Any, parameters:
|
|
56
|
-
) -> Optional[
|
|
55
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
56
|
+
) -> Optional[dict]:
|
|
57
57
|
payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
|
|
58
58
|
response_format = parameters.get("response_format")
|
|
59
59
|
if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
|
|
@@ -72,8 +72,8 @@ class TogetherTextToImageTask(TogetherTask):
|
|
|
72
72
|
super().__init__("text-to-image")
|
|
73
73
|
|
|
74
74
|
def _prepare_payload_as_dict(
|
|
75
|
-
self, inputs: Any, parameters:
|
|
76
|
-
) -> Optional[
|
|
75
|
+
self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
|
|
76
|
+
) -> Optional[dict]:
|
|
77
77
|
mapped_model = provider_mapping_info.provider_id
|
|
78
78
|
parameters = filter_none(parameters)
|
|
79
79
|
if "num_inference_steps" in parameters:
|
|
@@ -83,6 +83,6 @@ class TogetherTextToImageTask(TogetherTask):
|
|
|
83
83
|
|
|
84
84
|
return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model}
|
|
85
85
|
|
|
86
|
-
def get_response(self, response: Union[bytes,
|
|
86
|
+
def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
87
87
|
response_dict = _as_dict(response)
|
|
88
88
|
return base64.b64decode(response_dict["data"][0]["b64_json"])
|
huggingface_hub/lfs.py
CHANGED
|
@@ -14,15 +14,12 @@
|
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
"""Git LFS related type definitions and utilities"""
|
|
16
16
|
|
|
17
|
-
import inspect
|
|
18
17
|
import io
|
|
19
18
|
import re
|
|
20
|
-
import warnings
|
|
21
19
|
from dataclasses import dataclass
|
|
22
20
|
from math import ceil
|
|
23
21
|
from os.path import getsize
|
|
24
|
-
from
|
|
25
|
-
from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional, Tuple, TypedDict
|
|
22
|
+
from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, TypedDict
|
|
26
23
|
from urllib.parse import unquote
|
|
27
24
|
|
|
28
25
|
from huggingface_hub import constants
|
|
@@ -34,12 +31,10 @@ from .utils import (
|
|
|
34
31
|
hf_raise_for_status,
|
|
35
32
|
http_backoff,
|
|
36
33
|
logging,
|
|
37
|
-
tqdm,
|
|
38
34
|
validate_hf_hub_args,
|
|
39
35
|
)
|
|
40
36
|
from .utils._lfs import SliceFileObj
|
|
41
37
|
from .utils.sha import sha256, sha_fileobj
|
|
42
|
-
from .utils.tqdm import is_tqdm_disabled
|
|
43
38
|
|
|
44
39
|
|
|
45
40
|
if TYPE_CHECKING:
|
|
@@ -107,9 +102,9 @@ def post_lfs_batch_info(
|
|
|
107
102
|
repo_id: str,
|
|
108
103
|
revision: Optional[str] = None,
|
|
109
104
|
endpoint: Optional[str] = None,
|
|
110
|
-
headers: Optional[
|
|
111
|
-
transfers: Optional[
|
|
112
|
-
) ->
|
|
105
|
+
headers: Optional[dict[str, str]] = None,
|
|
106
|
+
transfers: Optional[list[str]] = None,
|
|
107
|
+
) -> tuple[list[dict], list[dict], Optional[str]]:
|
|
113
108
|
"""
|
|
114
109
|
Requests the LFS batch endpoint to retrieve upload instructions
|
|
115
110
|
|
|
@@ -140,7 +135,7 @@ def post_lfs_batch_info(
|
|
|
140
135
|
Raises:
|
|
141
136
|
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
|
|
142
137
|
If an argument is invalid or the server response is malformed.
|
|
143
|
-
[`
|
|
138
|
+
[`HfHubHTTPError`]
|
|
144
139
|
If the server returned an error.
|
|
145
140
|
"""
|
|
146
141
|
endpoint = endpoint if endpoint is not None else constants.ENDPOINT
|
|
@@ -148,7 +143,7 @@ def post_lfs_batch_info(
|
|
|
148
143
|
if repo_type in constants.REPO_TYPES_URL_PREFIXES:
|
|
149
144
|
url_prefix = constants.REPO_TYPES_URL_PREFIXES[repo_type]
|
|
150
145
|
batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
|
|
151
|
-
payload:
|
|
146
|
+
payload: dict = {
|
|
152
147
|
"operation": "upload",
|
|
153
148
|
"transfers": transfers if transfers is not None else ["basic", "multipart"],
|
|
154
149
|
"objects": [
|
|
@@ -195,14 +190,14 @@ class CompletionPayloadT(TypedDict):
|
|
|
195
190
|
"""Payload that will be sent to the Hub when uploading multi-part."""
|
|
196
191
|
|
|
197
192
|
oid: str
|
|
198
|
-
parts:
|
|
193
|
+
parts: list[PayloadPartT]
|
|
199
194
|
|
|
200
195
|
|
|
201
196
|
def lfs_upload(
|
|
202
197
|
operation: "CommitOperationAdd",
|
|
203
|
-
lfs_batch_action:
|
|
198
|
+
lfs_batch_action: dict,
|
|
204
199
|
token: Optional[str] = None,
|
|
205
|
-
headers: Optional[
|
|
200
|
+
headers: Optional[dict[str, str]] = None,
|
|
206
201
|
endpoint: Optional[str] = None,
|
|
207
202
|
) -> None:
|
|
208
203
|
"""
|
|
@@ -222,7 +217,7 @@ def lfs_upload(
|
|
|
222
217
|
Raises:
|
|
223
218
|
[`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
|
|
224
219
|
If `lfs_batch_action` is improperly formatted
|
|
225
|
-
[`
|
|
220
|
+
[`HfHubHTTPError`]
|
|
226
221
|
If the upload resulted in an error
|
|
227
222
|
"""
|
|
228
223
|
# 0. If LFS file is already present, skip upload
|
|
@@ -316,11 +311,9 @@ def _upload_single_part(operation: "CommitOperationAdd", upload_url: str) -> Non
|
|
|
316
311
|
fileobj:
|
|
317
312
|
The file-like object holding the data to upload.
|
|
318
313
|
|
|
319
|
-
Returns: `requests.Response`
|
|
320
|
-
|
|
321
314
|
Raises:
|
|
322
|
-
|
|
323
|
-
|
|
315
|
+
[`HfHubHTTPError`]
|
|
316
|
+
If the upload resulted in an error.
|
|
324
317
|
"""
|
|
325
318
|
with operation.as_file(with_tqdm=True) as fileobj:
|
|
326
319
|
# S3 might raise a transient 500 error -> let's retry if that happens
|
|
@@ -328,30 +321,16 @@ def _upload_single_part(operation: "CommitOperationAdd", upload_url: str) -> Non
|
|
|
328
321
|
hf_raise_for_status(response)
|
|
329
322
|
|
|
330
323
|
|
|
331
|
-
def _upload_multi_part(operation: "CommitOperationAdd", header:
|
|
324
|
+
def _upload_multi_part(operation: "CommitOperationAdd", header: dict, chunk_size: int, upload_url: str) -> None:
|
|
332
325
|
"""
|
|
333
326
|
Uploads file using HF multipart LFS transfer protocol.
|
|
334
327
|
"""
|
|
335
328
|
# 1. Get upload URLs for each part
|
|
336
329
|
sorted_parts_urls = _get_sorted_parts_urls(header=header, upload_info=operation.upload_info, chunk_size=chunk_size)
|
|
337
330
|
|
|
338
|
-
# 2. Upload parts (
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
constants.HF_HUB_ENABLE_HF_TRANSFER
|
|
342
|
-
and not isinstance(operation.path_or_fileobj, str)
|
|
343
|
-
and not isinstance(operation.path_or_fileobj, Path)
|
|
344
|
-
):
|
|
345
|
-
warnings.warn(
|
|
346
|
-
"hf_transfer is enabled but does not support uploading from bytes or BinaryIO, falling back to regular"
|
|
347
|
-
" upload"
|
|
348
|
-
)
|
|
349
|
-
use_hf_transfer = False
|
|
350
|
-
|
|
351
|
-
response_headers = (
|
|
352
|
-
_upload_parts_hf_transfer(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size)
|
|
353
|
-
if use_hf_transfer
|
|
354
|
-
else _upload_parts_iteratively(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size)
|
|
331
|
+
# 2. Upload parts (pure Python)
|
|
332
|
+
response_headers = _upload_parts_iteratively(
|
|
333
|
+
operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size
|
|
355
334
|
)
|
|
356
335
|
|
|
357
336
|
# 3. Send completion request
|
|
@@ -363,7 +342,7 @@ def _upload_multi_part(operation: "CommitOperationAdd", header: Dict, chunk_size
|
|
|
363
342
|
hf_raise_for_status(completion_res)
|
|
364
343
|
|
|
365
344
|
|
|
366
|
-
def _get_sorted_parts_urls(header:
|
|
345
|
+
def _get_sorted_parts_urls(header: dict, upload_info: UploadInfo, chunk_size: int) -> list[str]:
|
|
367
346
|
sorted_part_upload_urls = [
|
|
368
347
|
upload_url
|
|
369
348
|
for _, upload_url in sorted(
|
|
@@ -381,8 +360,8 @@ def _get_sorted_parts_urls(header: Dict, upload_info: UploadInfo, chunk_size: in
|
|
|
381
360
|
return sorted_part_upload_urls
|
|
382
361
|
|
|
383
362
|
|
|
384
|
-
def _get_completion_payload(response_headers:
|
|
385
|
-
parts:
|
|
363
|
+
def _get_completion_payload(response_headers: list[dict], oid: str) -> CompletionPayloadT:
|
|
364
|
+
parts: list[PayloadPartT] = []
|
|
386
365
|
for part_number, header in enumerate(response_headers):
|
|
387
366
|
etag = header.get("etag")
|
|
388
367
|
if etag is None or etag == "":
|
|
@@ -397,8 +376,8 @@ def _get_completion_payload(response_headers: List[Dict], oid: str) -> Completio
|
|
|
397
376
|
|
|
398
377
|
|
|
399
378
|
def _upload_parts_iteratively(
|
|
400
|
-
operation: "CommitOperationAdd", sorted_parts_urls:
|
|
401
|
-
) ->
|
|
379
|
+
operation: "CommitOperationAdd", sorted_parts_urls: list[str], chunk_size: int
|
|
380
|
+
) -> list[dict]:
|
|
402
381
|
headers = []
|
|
403
382
|
with operation.as_file(with_tqdm=True) as fileobj:
|
|
404
383
|
for part_idx, part_upload_url in enumerate(sorted_parts_urls):
|
|
@@ -412,55 +391,3 @@ def _upload_parts_iteratively(
|
|
|
412
391
|
hf_raise_for_status(part_upload_res)
|
|
413
392
|
headers.append(part_upload_res.headers)
|
|
414
393
|
return headers # type: ignore
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
def _upload_parts_hf_transfer(
|
|
418
|
-
operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
|
|
419
|
-
) -> List[Dict]:
|
|
420
|
-
# Upload file using an external Rust-based package. Upload is faster but support less features (no progress bars).
|
|
421
|
-
try:
|
|
422
|
-
from hf_transfer import multipart_upload
|
|
423
|
-
except ImportError:
|
|
424
|
-
raise ValueError(
|
|
425
|
-
"Fast uploading using 'hf_transfer' is enabled (HF_HUB_ENABLE_HF_TRANSFER=1) but 'hf_transfer' package is"
|
|
426
|
-
" not available in your environment. Try `pip install hf_transfer`."
|
|
427
|
-
)
|
|
428
|
-
|
|
429
|
-
supports_callback = "callback" in inspect.signature(multipart_upload).parameters
|
|
430
|
-
if not supports_callback:
|
|
431
|
-
warnings.warn(
|
|
432
|
-
"You are using an outdated version of `hf_transfer`. Consider upgrading to latest version to enable progress bars using `pip install -U hf_transfer`."
|
|
433
|
-
)
|
|
434
|
-
|
|
435
|
-
total = operation.upload_info.size
|
|
436
|
-
desc = operation.path_in_repo
|
|
437
|
-
if len(desc) > 40:
|
|
438
|
-
desc = f"(…){desc[-40:]}"
|
|
439
|
-
|
|
440
|
-
with tqdm(
|
|
441
|
-
unit="B",
|
|
442
|
-
unit_scale=True,
|
|
443
|
-
total=total,
|
|
444
|
-
initial=0,
|
|
445
|
-
desc=desc,
|
|
446
|
-
disable=is_tqdm_disabled(logger.getEffectiveLevel()),
|
|
447
|
-
name="huggingface_hub.lfs_upload",
|
|
448
|
-
) as progress:
|
|
449
|
-
try:
|
|
450
|
-
output = multipart_upload(
|
|
451
|
-
file_path=operation.path_or_fileobj,
|
|
452
|
-
parts_urls=sorted_parts_urls,
|
|
453
|
-
chunk_size=chunk_size,
|
|
454
|
-
max_files=128,
|
|
455
|
-
parallel_failures=127, # could be removed
|
|
456
|
-
max_retries=5,
|
|
457
|
-
**({"callback": progress.update} if supports_callback else {}),
|
|
458
|
-
)
|
|
459
|
-
except Exception as e:
|
|
460
|
-
raise RuntimeError(
|
|
461
|
-
"An error occurred while uploading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for"
|
|
462
|
-
" better error handling."
|
|
463
|
-
) from e
|
|
464
|
-
if not supports_callback:
|
|
465
|
-
progress.update(total)
|
|
466
|
-
return output
|