magic_hour 0.35.0__py3-none-any.whl → 0.36.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of magic_hour might be problematic. Click here for more details.
- magic_hour/README.md +35 -0
- magic_hour/core/base_client.py +6 -5
- magic_hour/core/query.py +12 -6
- magic_hour/core/request.py +3 -3
- magic_hour/core/response.py +18 -14
- magic_hour/core/utils.py +3 -3
- magic_hour/environment.py +1 -1
- magic_hour/helpers/__init__.py +3 -0
- magic_hour/helpers/download.py +75 -0
- magic_hour/resources/v1/README.md +33 -0
- magic_hour/resources/v1/ai_clothes_changer/README.md +73 -0
- magic_hour/resources/v1/ai_clothes_changer/client.py +146 -0
- magic_hour/resources/v1/ai_face_editor/README.md +110 -0
- magic_hour/resources/v1/ai_face_editor/client.py +168 -0
- magic_hour/resources/v1/ai_gif_generator/README.md +59 -0
- magic_hour/resources/v1/ai_gif_generator/client.py +119 -0
- magic_hour/resources/v1/ai_headshot_generator/README.md +60 -0
- magic_hour/resources/v1/ai_headshot_generator/client.py +140 -0
- magic_hour/resources/v1/ai_image_editor/README.md +64 -0
- magic_hour/resources/v1/ai_image_editor/client.py +136 -0
- magic_hour/resources/v1/ai_image_generator/README.md +66 -0
- magic_hour/resources/v1/ai_image_generator/client.py +139 -0
- magic_hour/resources/v1/ai_image_upscaler/README.md +67 -0
- magic_hour/resources/v1/ai_image_upscaler/client.py +150 -0
- magic_hour/resources/v1/ai_meme_generator/README.md +71 -0
- magic_hour/resources/v1/ai_meme_generator/client.py +127 -0
- magic_hour/resources/v1/ai_photo_editor/README.md +98 -7
- magic_hour/resources/v1/ai_photo_editor/client.py +174 -0
- magic_hour/resources/v1/ai_qr_code_generator/README.md +63 -0
- magic_hour/resources/v1/ai_qr_code_generator/client.py +123 -0
- magic_hour/resources/v1/ai_talking_photo/README.md +74 -0
- magic_hour/resources/v1/ai_talking_photo/client.py +170 -0
- magic_hour/resources/v1/animation/README.md +100 -0
- magic_hour/resources/v1/animation/client.py +218 -0
- magic_hour/resources/v1/auto_subtitle_generator/README.md +69 -0
- magic_hour/resources/v1/auto_subtitle_generator/client.py +178 -0
- magic_hour/resources/v1/face_detection/README.md +59 -0
- magic_hour/resources/v1/face_detection/__init__.py +10 -2
- magic_hour/resources/v1/face_detection/client.py +179 -0
- magic_hour/resources/v1/face_swap/README.md +105 -8
- magic_hour/resources/v1/face_swap/client.py +242 -0
- magic_hour/resources/v1/face_swap_photo/README.md +84 -0
- magic_hour/resources/v1/face_swap_photo/client.py +172 -0
- magic_hour/resources/v1/files/README.md +40 -0
- magic_hour/resources/v1/files/client.py +350 -0
- magic_hour/resources/v1/files/client_test.py +414 -0
- magic_hour/resources/v1/files/upload_urls/README.md +8 -0
- magic_hour/resources/v1/image_background_remover/README.md +68 -0
- magic_hour/resources/v1/image_background_remover/client.py +130 -0
- magic_hour/resources/v1/image_projects/README.md +52 -0
- magic_hour/resources/v1/image_projects/__init__.py +10 -2
- magic_hour/resources/v1/image_projects/client.py +138 -0
- magic_hour/resources/v1/image_projects/client_test.py +527 -0
- magic_hour/resources/v1/image_to_video/README.md +77 -9
- magic_hour/resources/v1/image_to_video/client.py +186 -0
- magic_hour/resources/v1/lip_sync/README.md +87 -9
- magic_hour/resources/v1/lip_sync/client.py +210 -0
- magic_hour/resources/v1/photo_colorizer/README.md +59 -0
- magic_hour/resources/v1/photo_colorizer/client.py +130 -0
- magic_hour/resources/v1/text_to_video/README.md +68 -0
- magic_hour/resources/v1/text_to_video/client.py +151 -0
- magic_hour/resources/v1/video_projects/README.md +52 -0
- magic_hour/resources/v1/video_projects/__init__.py +10 -2
- magic_hour/resources/v1/video_projects/client.py +137 -0
- magic_hour/resources/v1/video_projects/client_test.py +527 -0
- magic_hour/resources/v1/video_to_video/README.md +98 -10
- magic_hour/resources/v1/video_to_video/client.py +222 -0
- magic_hour/types/params/__init__.py +58 -0
- magic_hour/types/params/v1_ai_clothes_changer_generate_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_face_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_headshot_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_upscaler_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_photo_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_talking_photo_generate_body_assets.py +26 -0
- magic_hour/types/params/v1_animation_generate_body_assets.py +39 -0
- magic_hour/types/params/v1_auto_subtitle_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_detection_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_swap_create_body.py +12 -0
- magic_hour/types/params/v1_face_swap_create_body_style.py +33 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets.py +56 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets.py +47 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_image_background_remover_generate_body_assets.py +27 -0
- magic_hour/types/params/v1_image_to_video_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_lip_sync_generate_body_assets.py +36 -0
- magic_hour/types/params/v1_photo_colorizer_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_video_to_video_generate_body_assets.py +27 -0
- magic_hour-0.36.1.dist-info/METADATA +306 -0
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.1.dist-info}/RECORD +93 -65
- magic_hour-0.35.0.dist-info/METADATA +0 -166
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.1.dist-info}/LICENSE +0 -0
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.1.dist-info}/WHEEL +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import typing
|
|
2
3
|
|
|
3
4
|
from magic_hour.core import (
|
|
@@ -8,13 +9,106 @@ from magic_hour.core import (
|
|
|
8
9
|
to_encodable,
|
|
9
10
|
type_utils,
|
|
10
11
|
)
|
|
12
|
+
from magic_hour.resources.v1.files.client import AsyncFilesClient, FilesClient
|
|
13
|
+
from magic_hour.resources.v1.video_projects.client import (
|
|
14
|
+
AsyncVideoProjectsClient,
|
|
15
|
+
VideoProjectsClient,
|
|
16
|
+
)
|
|
11
17
|
from magic_hour.types import models, params
|
|
12
18
|
|
|
13
19
|
|
|
20
|
+
logging.basicConfig(level=logging.INFO)
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
14
24
|
class AutoSubtitleGeneratorClient:
|
|
15
25
|
def __init__(self, *, base_client: SyncBaseClient):
|
|
16
26
|
self._base_client = base_client
|
|
17
27
|
|
|
28
|
+
def generate(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
assets: params.V1AutoSubtitleGeneratorCreateBodyAssets,
|
|
32
|
+
end_seconds: float,
|
|
33
|
+
start_seconds: float,
|
|
34
|
+
style: params.V1AutoSubtitleGeneratorCreateBodyStyle,
|
|
35
|
+
name: typing.Union[
|
|
36
|
+
typing.Optional[str], type_utils.NotGiven
|
|
37
|
+
] = type_utils.NOT_GIVEN,
|
|
38
|
+
wait_for_completion: bool = True,
|
|
39
|
+
download_outputs: bool = True,
|
|
40
|
+
download_directory: typing.Optional[str] = None,
|
|
41
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
42
|
+
):
|
|
43
|
+
"""
|
|
44
|
+
Generate subtitled video (alias for create with additional functionality).
|
|
45
|
+
|
|
46
|
+
Automatically generate subtitles for your video in multiple languages.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
50
|
+
assets: Provide the assets for auto subtitle generator
|
|
51
|
+
end_seconds: The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
52
|
+
start_seconds: The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
53
|
+
style: Style of the subtitle. At least one of `.style.template` or `.style.custom_config` must be provided.
|
|
54
|
+
* If only `.style.template` is provided, default values for the template will be used.
|
|
55
|
+
* If both are provided, the fields in `.style.custom_config` will be used to overwrite the fields in `.style.template`.
|
|
56
|
+
* If only `.style.custom_config` is provided, then all fields in `.style.custom_config` will be used.
|
|
57
|
+
|
|
58
|
+
To use custom config only, the following `custom_config` params are required:
|
|
59
|
+
* `.style.custom_config.font`
|
|
60
|
+
* `.style.custom_config.text_color`
|
|
61
|
+
* `.style.custom_config.vertical_position`
|
|
62
|
+
* `.style.custom_config.horizontal_position`
|
|
63
|
+
|
|
64
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
65
|
+
download_outputs: Whether to download the outputs
|
|
66
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
67
|
+
request_options: Additional options to customize the HTTP request
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the Auto Subtitle Generator API with the downloaded paths if `download_outputs` is True.
|
|
71
|
+
|
|
72
|
+
Examples:
|
|
73
|
+
```py
|
|
74
|
+
response = client.v1.auto_subtitle_generator.generate(
|
|
75
|
+
assets={"video_file_path": "path/to/video.mp4"},
|
|
76
|
+
end_seconds=15.0,
|
|
77
|
+
start_seconds=0.0,
|
|
78
|
+
style={},
|
|
79
|
+
name="Subtitled Video",
|
|
80
|
+
wait_for_completion=True,
|
|
81
|
+
download_outputs=True,
|
|
82
|
+
download_directory="outputs/",
|
|
83
|
+
)
|
|
84
|
+
```
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
file_client = FilesClient(base_client=self._base_client)
|
|
88
|
+
|
|
89
|
+
video_file_path = assets["video_file_path"]
|
|
90
|
+
assets["video_file_path"] = file_client.upload_file(file=video_file_path)
|
|
91
|
+
|
|
92
|
+
create_response = self.create(
|
|
93
|
+
assets=assets,
|
|
94
|
+
end_seconds=end_seconds,
|
|
95
|
+
start_seconds=start_seconds,
|
|
96
|
+
style=style,
|
|
97
|
+
name=name,
|
|
98
|
+
request_options=request_options,
|
|
99
|
+
)
|
|
100
|
+
logger.info(f"Auto Subtitle Generator response: {create_response}")
|
|
101
|
+
|
|
102
|
+
video_projects_client = VideoProjectsClient(base_client=self._base_client)
|
|
103
|
+
response = video_projects_client.check_result(
|
|
104
|
+
id=create_response.id,
|
|
105
|
+
wait_for_completion=wait_for_completion,
|
|
106
|
+
download_outputs=download_outputs,
|
|
107
|
+
download_directory=download_directory,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return response
|
|
111
|
+
|
|
18
112
|
def create(
|
|
19
113
|
self,
|
|
20
114
|
*,
|
|
@@ -94,6 +188,90 @@ class AsyncAutoSubtitleGeneratorClient:
|
|
|
94
188
|
def __init__(self, *, base_client: AsyncBaseClient):
|
|
95
189
|
self._base_client = base_client
|
|
96
190
|
|
|
191
|
+
async def generate(
|
|
192
|
+
self,
|
|
193
|
+
*,
|
|
194
|
+
assets: params.V1AutoSubtitleGeneratorCreateBodyAssets,
|
|
195
|
+
end_seconds: float,
|
|
196
|
+
start_seconds: float,
|
|
197
|
+
style: params.V1AutoSubtitleGeneratorCreateBodyStyle,
|
|
198
|
+
name: typing.Union[
|
|
199
|
+
typing.Optional[str], type_utils.NotGiven
|
|
200
|
+
] = type_utils.NOT_GIVEN,
|
|
201
|
+
wait_for_completion: bool = True,
|
|
202
|
+
download_outputs: bool = True,
|
|
203
|
+
download_directory: typing.Optional[str] = None,
|
|
204
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
205
|
+
):
|
|
206
|
+
"""
|
|
207
|
+
Generate subtitled video (alias for create with additional functionality).
|
|
208
|
+
|
|
209
|
+
Automatically generate subtitles for your video in multiple languages.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
213
|
+
assets: Provide the assets for auto subtitle generator
|
|
214
|
+
end_seconds: The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
215
|
+
start_seconds: The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
216
|
+
style: Style of the subtitle. At least one of `.style.template` or `.style.custom_config` must be provided.
|
|
217
|
+
* If only `.style.template` is provided, default values for the template will be used.
|
|
218
|
+
* If both are provided, the fields in `.style.custom_config` will be used to overwrite the fields in `.style.template`.
|
|
219
|
+
* If only `.style.custom_config` is provided, then all fields in `.style.custom_config` will be used.
|
|
220
|
+
|
|
221
|
+
To use custom config only, the following `custom_config` params are required:
|
|
222
|
+
* `.style.custom_config.font`
|
|
223
|
+
* `.style.custom_config.text_color`
|
|
224
|
+
* `.style.custom_config.vertical_position`
|
|
225
|
+
* `.style.custom_config.horizontal_position`
|
|
226
|
+
|
|
227
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
228
|
+
download_outputs: Whether to download the outputs
|
|
229
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
230
|
+
request_options: Additional options to customize the HTTP request
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the Auto Subtitle Generator API with the downloaded paths if `download_outputs` is True.
|
|
234
|
+
|
|
235
|
+
Examples:
|
|
236
|
+
```py
|
|
237
|
+
response = await client.v1.auto_subtitle_generator.generate(
|
|
238
|
+
assets={"video_file_path": "path/to/video.mp4"},
|
|
239
|
+
end_seconds=15.0,
|
|
240
|
+
start_seconds=0.0,
|
|
241
|
+
style={},
|
|
242
|
+
name="Subtitled Video",
|
|
243
|
+
wait_for_completion=True,
|
|
244
|
+
download_outputs=True,
|
|
245
|
+
download_directory="outputs/",
|
|
246
|
+
)
|
|
247
|
+
```
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
file_client = AsyncFilesClient(base_client=self._base_client)
|
|
251
|
+
|
|
252
|
+
video_file_path = assets["video_file_path"]
|
|
253
|
+
assets["video_file_path"] = await file_client.upload_file(file=video_file_path)
|
|
254
|
+
|
|
255
|
+
create_response = await self.create(
|
|
256
|
+
assets=assets,
|
|
257
|
+
end_seconds=end_seconds,
|
|
258
|
+
start_seconds=start_seconds,
|
|
259
|
+
style=style,
|
|
260
|
+
name=name,
|
|
261
|
+
request_options=request_options,
|
|
262
|
+
)
|
|
263
|
+
logger.info(f"Auto Subtitle Generator response: {create_response}")
|
|
264
|
+
|
|
265
|
+
video_projects_client = AsyncVideoProjectsClient(base_client=self._base_client)
|
|
266
|
+
response = await video_projects_client.check_result(
|
|
267
|
+
id=create_response.id,
|
|
268
|
+
wait_for_completion=wait_for_completion,
|
|
269
|
+
download_outputs=download_outputs,
|
|
270
|
+
download_directory=download_directory,
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
return response
|
|
274
|
+
|
|
97
275
|
async def create(
|
|
98
276
|
self,
|
|
99
277
|
*,
|
|
@@ -1,3 +1,60 @@
|
|
|
1
|
+
# v1_face_detection
|
|
2
|
+
|
|
3
|
+
## Module Functions
|
|
4
|
+
|
|
5
|
+
<!-- CUSTOM DOCS START -->
|
|
6
|
+
|
|
7
|
+
### Face Detection Generate Workflow <a name="generate"></a>
|
|
8
|
+
|
|
9
|
+
The workflow performs the following action
|
|
10
|
+
|
|
11
|
+
1. upload local assets to Magic Hour storage. So you can pass in a local path instead of having to upload files yourself
|
|
12
|
+
2. trigger a generation
|
|
13
|
+
3. poll for a completion status. This is configurable
|
|
14
|
+
4. if success, download the output to local directory
|
|
15
|
+
|
|
16
|
+
> [!TIP]
|
|
17
|
+
> This is the recommended way to use the SDK unless you have specific needs where it is necessary to split up the actions.
|
|
18
|
+
|
|
19
|
+
#### Parameters
|
|
20
|
+
|
|
21
|
+
In Additional to the parameters listed in the `.create` section below, `.generate` introduces 3 new parameters:
|
|
22
|
+
|
|
23
|
+
- `wait_for_completion` (bool, default True): Whether to wait for the project to complete.
|
|
24
|
+
- `download_outputs` (bool, default True): Whether to download the generated files
|
|
25
|
+
- `download_directory` (str, optional): Directory to save downloaded files (defaults to current directory)
|
|
26
|
+
|
|
27
|
+
#### Synchronous Client
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from magic_hour import Client
|
|
31
|
+
from os import getenv
|
|
32
|
+
|
|
33
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
34
|
+
res = client.v1.face_detection.generate(
|
|
35
|
+
assets={"target_file_path": "/path/to/1234.png"}, confidence_score=0.5
|
|
36
|
+
wait_for_completion=True,
|
|
37
|
+
download_outputs=True,
|
|
38
|
+
download_directory="outputs"
|
|
39
|
+
)
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
#### Asynchronous Client
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
from magic_hour import AsyncClient
|
|
46
|
+
from os import getenv
|
|
47
|
+
|
|
48
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
49
|
+
res = await client.v1.face_detection.generate(
|
|
50
|
+
assets={"target_file_path": "/path/to/1234.png"}, confidence_score=0.5
|
|
51
|
+
wait_for_completion=True,
|
|
52
|
+
download_outputs=True,
|
|
53
|
+
download_directory="outputs"
|
|
54
|
+
)
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
<!-- CUSTOM DOCS END -->
|
|
1
58
|
|
|
2
59
|
### Get face detection details <a name="get"></a>
|
|
3
60
|
|
|
@@ -58,6 +115,7 @@ Note: Face detection is free to use for the near future. Pricing may change in t
|
|
|
58
115
|
| Parameter | Required | Description | Example |
|
|
59
116
|
|-----------|:--------:|-------------|--------|
|
|
60
117
|
| `assets` | ✓ | Provide the assets for face detection | `{"target_file_path": "api-assets/id/1234.png"}` |
|
|
118
|
+
| `└─ target_file_path` | ✓ | This is the image or video where the face will be detected. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.png"` |
|
|
61
119
|
| `confidence_score` | ✗ | Confidence threshold for filtering detected faces. * Higher values (e.g., 0.9) include only faces detected with high certainty, reducing false positives. * Lower values (e.g., 0.3) include more faces, but may increase the chance of incorrect detections. | `0.5` |
|
|
62
120
|
|
|
63
121
|
#### Synchronous Client
|
|
@@ -93,3 +151,4 @@ res = await client.v1.face_detection.create(
|
|
|
93
151
|
|
|
94
152
|
##### Example
|
|
95
153
|
`{"credits_charged": 123, "id": "uuid-example"}`
|
|
154
|
+
|
|
@@ -1,4 +1,12 @@
|
|
|
1
|
-
from .client import
|
|
1
|
+
from .client import (
|
|
2
|
+
AsyncFaceDetectionClient,
|
|
3
|
+
FaceDetectionClient,
|
|
4
|
+
V1FaceDetectionGetResponseWithDownloads,
|
|
5
|
+
)
|
|
2
6
|
|
|
3
7
|
|
|
4
|
-
__all__ = [
|
|
8
|
+
__all__ = [
|
|
9
|
+
"AsyncFaceDetectionClient",
|
|
10
|
+
"FaceDetectionClient",
|
|
11
|
+
"V1FaceDetectionGetResponseWithDownloads",
|
|
12
|
+
]
|
|
@@ -1,3 +1,8 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import pydantic
|
|
5
|
+
import time
|
|
1
6
|
import typing
|
|
2
7
|
|
|
3
8
|
from magic_hour.core import (
|
|
@@ -8,13 +13,107 @@ from magic_hour.core import (
|
|
|
8
13
|
to_encodable,
|
|
9
14
|
type_utils,
|
|
10
15
|
)
|
|
16
|
+
from magic_hour.helpers.download import download_files_async, download_files_sync
|
|
17
|
+
from magic_hour.resources.v1.files import AsyncFilesClient, FilesClient
|
|
11
18
|
from magic_hour.types import models, params
|
|
12
19
|
|
|
13
20
|
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class V1FaceDetectionGetResponseWithDownloads(models.V1FaceDetectionGetResponse):
|
|
25
|
+
downloaded_paths: typing.Optional[typing.List[str]] = pydantic.Field(
|
|
26
|
+
default=None, alias="downloaded_paths"
|
|
27
|
+
)
|
|
28
|
+
"""
|
|
29
|
+
The paths to the downloaded face images.
|
|
30
|
+
|
|
31
|
+
This field is only populated if `download_outputs` is True and the face detection is complete.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
|
|
14
35
|
class FaceDetectionClient:
|
|
15
36
|
def __init__(self, *, base_client: SyncBaseClient):
|
|
16
37
|
self._base_client = base_client
|
|
17
38
|
|
|
39
|
+
def generate(
|
|
40
|
+
self,
|
|
41
|
+
*,
|
|
42
|
+
assets: params.V1FaceDetectionCreateBodyAssets,
|
|
43
|
+
confidence_score: typing.Union[
|
|
44
|
+
typing.Optional[float], type_utils.NotGiven
|
|
45
|
+
] = type_utils.NOT_GIVEN,
|
|
46
|
+
wait_for_completion: bool = True,
|
|
47
|
+
download_outputs: bool = True,
|
|
48
|
+
download_directory: typing.Optional[str] = None,
|
|
49
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
50
|
+
) -> V1FaceDetectionGetResponseWithDownloads:
|
|
51
|
+
"""
|
|
52
|
+
Generate face detection results with optional waiting and downloading.
|
|
53
|
+
|
|
54
|
+
This method creates a face detection task and optionally waits for completion
|
|
55
|
+
and downloads the detected face images.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
assets: Provide the assets for face detection
|
|
59
|
+
confidence_score: Confidence threshold for filtering detected faces
|
|
60
|
+
wait_for_completion: Whether to wait for the face detection task to complete
|
|
61
|
+
download_outputs: Whether to download the detected face images
|
|
62
|
+
download_directory: The directory to download the face images to. If not provided,
|
|
63
|
+
the images will be downloaded to the current working directory
|
|
64
|
+
request_options: Additional options to customize the HTTP request
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
V1FaceDetectionGetResponseWithDownloads: The face detection response with optional
|
|
68
|
+
downloaded face image paths included
|
|
69
|
+
"""
|
|
70
|
+
# Handle file upload if needed
|
|
71
|
+
file_client = FilesClient(base_client=self._base_client)
|
|
72
|
+
target_file_path = assets["target_file_path"]
|
|
73
|
+
assets["target_file_path"] = file_client.upload_file(file=target_file_path)
|
|
74
|
+
|
|
75
|
+
create_response = self.create(
|
|
76
|
+
assets=assets,
|
|
77
|
+
confidence_score=confidence_score,
|
|
78
|
+
request_options=request_options,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
task_id = create_response.id
|
|
82
|
+
|
|
83
|
+
api_response = self.get(id=task_id)
|
|
84
|
+
if not wait_for_completion:
|
|
85
|
+
return V1FaceDetectionGetResponseWithDownloads(**api_response.model_dump())
|
|
86
|
+
|
|
87
|
+
poll_interval = float(os.getenv("MAGIC_HOUR_POLL_INTERVAL", "0.5"))
|
|
88
|
+
|
|
89
|
+
while api_response.status not in ["complete", "error"]:
|
|
90
|
+
api_response = self.get(id=task_id)
|
|
91
|
+
time.sleep(poll_interval)
|
|
92
|
+
|
|
93
|
+
if api_response.status != "complete":
|
|
94
|
+
log = logger.error if api_response.status == "error" else logger.info
|
|
95
|
+
log(f"Face detection {task_id} has status {api_response.status}")
|
|
96
|
+
return V1FaceDetectionGetResponseWithDownloads(**api_response.model_dump())
|
|
97
|
+
|
|
98
|
+
if not download_outputs or not api_response.faces:
|
|
99
|
+
return V1FaceDetectionGetResponseWithDownloads(**api_response.model_dump())
|
|
100
|
+
|
|
101
|
+
face_downloads = [
|
|
102
|
+
models.V1ImageProjectsGetResponseDownloadsItem(
|
|
103
|
+
url=face.url,
|
|
104
|
+
expires_at="ignore",
|
|
105
|
+
)
|
|
106
|
+
for face in api_response.faces
|
|
107
|
+
]
|
|
108
|
+
downloaded_paths = download_files_sync(
|
|
109
|
+
downloads=face_downloads,
|
|
110
|
+
download_directory=download_directory,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
return V1FaceDetectionGetResponseWithDownloads(
|
|
114
|
+
**api_response.model_dump(), downloaded_paths=downloaded_paths
|
|
115
|
+
)
|
|
116
|
+
|
|
18
117
|
def get(
|
|
19
118
|
self, *, id: str, request_options: typing.Optional[RequestOptions] = None
|
|
20
119
|
) -> models.V1FaceDetectionGetResponse:
|
|
@@ -110,6 +209,86 @@ class AsyncFaceDetectionClient:
|
|
|
110
209
|
def __init__(self, *, base_client: AsyncBaseClient):
|
|
111
210
|
self._base_client = base_client
|
|
112
211
|
|
|
212
|
+
async def generate(
|
|
213
|
+
self,
|
|
214
|
+
*,
|
|
215
|
+
assets: params.V1FaceDetectionCreateBodyAssets,
|
|
216
|
+
confidence_score: typing.Union[
|
|
217
|
+
typing.Optional[float], type_utils.NotGiven
|
|
218
|
+
] = type_utils.NOT_GIVEN,
|
|
219
|
+
wait_for_completion: bool = True,
|
|
220
|
+
download_outputs: bool = True,
|
|
221
|
+
download_directory: typing.Optional[str] = None,
|
|
222
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
223
|
+
) -> V1FaceDetectionGetResponseWithDownloads:
|
|
224
|
+
"""
|
|
225
|
+
Generate face detection results with optional waiting and downloading.
|
|
226
|
+
|
|
227
|
+
This method creates a face detection task and optionally waits for completion
|
|
228
|
+
and downloads the detected face images.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
assets: Provide the assets for face detection
|
|
232
|
+
confidence_score: Confidence threshold for filtering detected faces
|
|
233
|
+
wait_for_completion: Whether to wait for the face detection task to complete
|
|
234
|
+
download_outputs: Whether to download the detected face images
|
|
235
|
+
download_directory: The directory to download the face images to. If not provided,
|
|
236
|
+
the images will be downloaded to the current working directory
|
|
237
|
+
request_options: Additional options to customize the HTTP request
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
V1FaceDetectionGetResponseWithDownloads: The face detection response with optional
|
|
241
|
+
downloaded face image paths included
|
|
242
|
+
"""
|
|
243
|
+
# Handle file upload if needed
|
|
244
|
+
file_client = AsyncFilesClient(base_client=self._base_client)
|
|
245
|
+
target_file_path = assets["target_file_path"]
|
|
246
|
+
assets["target_file_path"] = await file_client.upload_file(
|
|
247
|
+
file=target_file_path
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
create_response = await self.create(
|
|
251
|
+
assets=assets,
|
|
252
|
+
confidence_score=confidence_score,
|
|
253
|
+
request_options=request_options,
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
task_id = create_response.id
|
|
257
|
+
|
|
258
|
+
api_response = await self.get(id=task_id)
|
|
259
|
+
if not wait_for_completion:
|
|
260
|
+
return V1FaceDetectionGetResponseWithDownloads(**api_response.model_dump())
|
|
261
|
+
|
|
262
|
+
poll_interval = float(os.getenv("MAGIC_HOUR_POLL_INTERVAL", "0.5"))
|
|
263
|
+
|
|
264
|
+
while api_response.status not in ["complete", "error"]:
|
|
265
|
+
api_response = await self.get(id=task_id)
|
|
266
|
+
await asyncio.sleep(poll_interval)
|
|
267
|
+
|
|
268
|
+
if api_response.status != "complete":
|
|
269
|
+
log = logger.error if api_response.status == "error" else logger.info
|
|
270
|
+
log(f"Face detection {task_id} has status {api_response.status}")
|
|
271
|
+
return V1FaceDetectionGetResponseWithDownloads(**api_response.model_dump())
|
|
272
|
+
|
|
273
|
+
if not download_outputs or not api_response.faces:
|
|
274
|
+
return V1FaceDetectionGetResponseWithDownloads(**api_response.model_dump())
|
|
275
|
+
|
|
276
|
+
face_downloads = [
|
|
277
|
+
models.V1ImageProjectsGetResponseDownloadsItem(
|
|
278
|
+
url=face.url,
|
|
279
|
+
expires_at="ignore",
|
|
280
|
+
)
|
|
281
|
+
for face in api_response.faces
|
|
282
|
+
]
|
|
283
|
+
downloaded_paths = await download_files_async(
|
|
284
|
+
downloads=face_downloads,
|
|
285
|
+
download_directory=download_directory,
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
return V1FaceDetectionGetResponseWithDownloads(
|
|
289
|
+
**api_response.model_dump(), downloaded_paths=downloaded_paths
|
|
290
|
+
)
|
|
291
|
+
|
|
113
292
|
async def get(
|
|
114
293
|
self, *, id: str, request_options: typing.Optional[RequestOptions] = None
|
|
115
294
|
) -> models.V1FaceDetectionGetResponse:
|
|
@@ -1,3 +1,90 @@
|
|
|
1
|
+
# v1_face_swap
|
|
2
|
+
|
|
3
|
+
## Module Functions
|
|
4
|
+
|
|
5
|
+
<!-- CUSTOM DOCS START -->
|
|
6
|
+
|
|
7
|
+
### Face Swap Generate Workflow <a name="generate"></a>
|
|
8
|
+
|
|
9
|
+
The workflow performs the following action
|
|
10
|
+
|
|
11
|
+
1. upload local assets to Magic Hour storage. So you can pass in a local path instead of having to upload files yourself
|
|
12
|
+
2. trigger a generation
|
|
13
|
+
3. poll for a completion status. This is configurable
|
|
14
|
+
4. if success, download the output to local directory
|
|
15
|
+
|
|
16
|
+
> [!TIP]
|
|
17
|
+
> This is the recommended way to use the SDK unless you have specific needs where it is necessary to split up the actions.
|
|
18
|
+
|
|
19
|
+
#### Parameters
|
|
20
|
+
|
|
21
|
+
In Additional to the parameters listed in the `.create` section below, `.generate` introduces 3 new parameters:
|
|
22
|
+
|
|
23
|
+
- `wait_for_completion` (bool, default True): Whether to wait for the project to complete.
|
|
24
|
+
- `download_outputs` (bool, default True): Whether to download the generated files
|
|
25
|
+
- `download_directory` (str, optional): Directory to save downloaded files (defaults to current directory)
|
|
26
|
+
|
|
27
|
+
#### Synchronous Client
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from magic_hour import Client
|
|
31
|
+
from os import getenv
|
|
32
|
+
|
|
33
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
34
|
+
res = client.v1.face_swap.generate(
|
|
35
|
+
assets={
|
|
36
|
+
"face_mappings": [
|
|
37
|
+
{
|
|
38
|
+
"new_face": "/path/to/1234.png",
|
|
39
|
+
"original_face": "api-assets/id/0-0.png",
|
|
40
|
+
}
|
|
41
|
+
],
|
|
42
|
+
"face_swap_mode": "all-faces",
|
|
43
|
+
"image_file_path": "image/id/1234.png",
|
|
44
|
+
"video_file_path": "/path/to/1234.mp4",
|
|
45
|
+
"video_source": "file",
|
|
46
|
+
},
|
|
47
|
+
end_seconds=15.0,
|
|
48
|
+
start_seconds=0.0,
|
|
49
|
+
name="Face Swap video",
|
|
50
|
+
style={"version": "default"},
|
|
51
|
+
wait_for_completion=True,
|
|
52
|
+
download_outputs=True,
|
|
53
|
+
download_directory="outputs"
|
|
54
|
+
)
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
#### Asynchronous Client
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from magic_hour import AsyncClient
|
|
61
|
+
from os import getenv
|
|
62
|
+
|
|
63
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
64
|
+
res = await client.v1.face_swap.generate(
|
|
65
|
+
assets={
|
|
66
|
+
"face_mappings": [
|
|
67
|
+
{
|
|
68
|
+
"new_face": "/path/to/1234.png",
|
|
69
|
+
"original_face": "api-assets/id/0-0.png",
|
|
70
|
+
}
|
|
71
|
+
],
|
|
72
|
+
"face_swap_mode": "all-faces",
|
|
73
|
+
"image_file_path": "image/id/1234.png",
|
|
74
|
+
"video_file_path": "/path/to/1234.mp4",
|
|
75
|
+
"video_source": "file",
|
|
76
|
+
},
|
|
77
|
+
end_seconds=15.0,
|
|
78
|
+
start_seconds=0.0,
|
|
79
|
+
name="Face Swap video",
|
|
80
|
+
style={"version": "default"},
|
|
81
|
+
wait_for_completion=True,
|
|
82
|
+
download_outputs=True,
|
|
83
|
+
download_directory="outputs"
|
|
84
|
+
)
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
<!-- CUSTOM DOCS END -->
|
|
1
88
|
|
|
2
89
|
### Face Swap video <a name="create"></a>
|
|
3
90
|
|
|
@@ -10,14 +97,22 @@ Get more information about this mode at our [product page](https://magichour.ai/
|
|
|
10
97
|
|
|
11
98
|
#### Parameters
|
|
12
99
|
|
|
13
|
-
| Parameter | Required | Description | Example |
|
|
14
|
-
|
|
15
|
-
| `assets` | ✓ | Provide the assets for face swap. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used | `{"face_mappings": [{"new_face": "api-assets/id/1234.png", "original_face": "api-assets/id/0-0.png"}], "face_swap_mode": "all-faces", "image_file_path": "image/id/1234.png", "video_file_path": "api-assets/id/1234.mp4", "video_source": "file"}` |
|
|
16
|
-
| `
|
|
17
|
-
| `
|
|
18
|
-
| `
|
|
19
|
-
| `
|
|
20
|
-
| `
|
|
100
|
+
| Parameter | Required | Deprecated | Description | Example |
|
|
101
|
+
|-----------|:--------:|:----------:|-------------|--------|
|
|
102
|
+
| `assets` | ✓ | ✗ | Provide the assets for face swap. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used | `{"face_mappings": [{"new_face": "api-assets/id/1234.png", "original_face": "api-assets/id/0-0.png"}], "face_swap_mode": "all-faces", "image_file_path": "image/id/1234.png", "video_file_path": "api-assets/id/1234.mp4", "video_source": "file"}` |
|
|
103
|
+
| `└─ face_mappings` | ✗ | — | This is the array of face mappings used for multiple face swap. The value is required if `face_swap_mode` is `individual-faces`. | `[{"new_face": "api-assets/id/1234.png", "original_face": "api-assets/id/0-0.png"}]` |
|
|
104
|
+
| `└─ face_swap_mode` | ✗ | — | The mode of face swap. * `all-faces` - Swap all faces in the target image or video. `source_file_path` is required. * `individual-faces` - Swap individual faces in the target image or video. `source_faces` is required. | `"all-faces"` |
|
|
105
|
+
| `└─ image_file_path` | ✗ | — | The path of the input image with the face to be swapped. The value is required if `face_swap_mode` is `all-faces`. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"image/id/1234.png"` |
|
|
106
|
+
| `└─ video_file_path` | ✗ | — | Required if `video_source` is `file`. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.mp4"` |
|
|
107
|
+
| `└─ video_source` | ✓ | — | | `"file"` |
|
|
108
|
+
| `└─ youtube_url` | ✗ | — | Using a youtube video as the input source. This field is required if `video_source` is `youtube` | `"http://www.example.com"` |
|
|
109
|
+
| `end_seconds` | ✓ | ✗ | The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds. | `15.0` |
|
|
110
|
+
| `start_seconds` | ✓ | ✗ | The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0. | `0.0` |
|
|
111
|
+
| `height` | ✗ | ✓ | `height` is deprecated and no longer influences the output video's resolution. Output resolution is determined by the **minimum** of: - The resolution of the input video - The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details. This field is retained only for backward compatibility and will be removed in a future release. | `123` |
|
|
112
|
+
| `name` | ✗ | ✗ | The name of video. This value is mainly used for your own identification of the video. | `"Face Swap video"` |
|
|
113
|
+
| `style` | ✗ | ✗ | Style of the face swap video. | `{"version": "default"}` |
|
|
114
|
+
| `└─ version` | ✗ | — | * `v1` - May preserve skin detail and texture better, but weaker identity preservation. * `v2` - Faster, sharper, better handling of hair and glasses. stronger identity preservation. (Recommended) * `default` - Use the version we recommend, which will change over time. This is recommended unless you need a specific earlier version. This is the default behavior. | `"default"` |
|
|
115
|
+
| `width` | ✗ | ✓ | `width` is deprecated and no longer influences the output video's resolution. Output resolution is determined by the **minimum** of: - The resolution of the input video - The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details. This field is retained only for backward compatibility and will be removed in a future release. | `123` |
|
|
21
116
|
|
|
22
117
|
#### Synchronous Client
|
|
23
118
|
|
|
@@ -42,6 +137,7 @@ res = client.v1.face_swap.create(
|
|
|
42
137
|
end_seconds=15.0,
|
|
43
138
|
start_seconds=0.0,
|
|
44
139
|
name="Face Swap video",
|
|
140
|
+
style={"version": "default"},
|
|
45
141
|
)
|
|
46
142
|
|
|
47
143
|
```
|
|
@@ -69,6 +165,7 @@ res = await client.v1.face_swap.create(
|
|
|
69
165
|
end_seconds=15.0,
|
|
70
166
|
start_seconds=0.0,
|
|
71
167
|
name="Face Swap video",
|
|
168
|
+
style={"version": "default"},
|
|
72
169
|
)
|
|
73
170
|
|
|
74
171
|
```
|