magic_hour 0.35.0__py3-none-any.whl → 0.36.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of magic_hour might be problematic. Click here for more details.
- magic_hour/README.md +35 -0
- magic_hour/core/base_client.py +6 -5
- magic_hour/core/query.py +12 -6
- magic_hour/core/request.py +3 -3
- magic_hour/core/response.py +18 -14
- magic_hour/core/utils.py +3 -3
- magic_hour/environment.py +1 -1
- magic_hour/helpers/__init__.py +3 -0
- magic_hour/helpers/download.py +75 -0
- magic_hour/resources/v1/README.md +33 -0
- magic_hour/resources/v1/ai_clothes_changer/README.md +73 -0
- magic_hour/resources/v1/ai_clothes_changer/client.py +146 -0
- magic_hour/resources/v1/ai_face_editor/README.md +110 -0
- magic_hour/resources/v1/ai_face_editor/client.py +168 -0
- magic_hour/resources/v1/ai_gif_generator/README.md +59 -0
- magic_hour/resources/v1/ai_gif_generator/client.py +119 -0
- magic_hour/resources/v1/ai_headshot_generator/README.md +60 -0
- magic_hour/resources/v1/ai_headshot_generator/client.py +140 -0
- magic_hour/resources/v1/ai_image_editor/README.md +64 -0
- magic_hour/resources/v1/ai_image_editor/client.py +136 -0
- magic_hour/resources/v1/ai_image_generator/README.md +66 -0
- magic_hour/resources/v1/ai_image_generator/client.py +139 -0
- magic_hour/resources/v1/ai_image_upscaler/README.md +67 -0
- magic_hour/resources/v1/ai_image_upscaler/client.py +150 -0
- magic_hour/resources/v1/ai_meme_generator/README.md +71 -0
- magic_hour/resources/v1/ai_meme_generator/client.py +127 -0
- magic_hour/resources/v1/ai_photo_editor/README.md +98 -7
- magic_hour/resources/v1/ai_photo_editor/client.py +174 -0
- magic_hour/resources/v1/ai_qr_code_generator/README.md +63 -0
- magic_hour/resources/v1/ai_qr_code_generator/client.py +123 -0
- magic_hour/resources/v1/ai_talking_photo/README.md +74 -0
- magic_hour/resources/v1/ai_talking_photo/client.py +170 -0
- magic_hour/resources/v1/animation/README.md +100 -0
- magic_hour/resources/v1/animation/client.py +218 -0
- magic_hour/resources/v1/auto_subtitle_generator/README.md +69 -0
- magic_hour/resources/v1/auto_subtitle_generator/client.py +178 -0
- magic_hour/resources/v1/face_detection/README.md +59 -0
- magic_hour/resources/v1/face_detection/__init__.py +10 -2
- magic_hour/resources/v1/face_detection/client.py +179 -0
- magic_hour/resources/v1/face_swap/README.md +105 -8
- magic_hour/resources/v1/face_swap/client.py +242 -0
- magic_hour/resources/v1/face_swap_photo/README.md +84 -0
- magic_hour/resources/v1/face_swap_photo/client.py +172 -0
- magic_hour/resources/v1/files/README.md +6 -0
- magic_hour/resources/v1/files/client.py +350 -0
- magic_hour/resources/v1/files/client_test.py +414 -0
- magic_hour/resources/v1/files/upload_urls/README.md +8 -0
- magic_hour/resources/v1/image_background_remover/README.md +68 -0
- magic_hour/resources/v1/image_background_remover/client.py +130 -0
- magic_hour/resources/v1/image_projects/README.md +8 -0
- magic_hour/resources/v1/image_projects/__init__.py +10 -2
- magic_hour/resources/v1/image_projects/client.py +138 -0
- magic_hour/resources/v1/image_projects/client_test.py +527 -0
- magic_hour/resources/v1/image_to_video/README.md +77 -9
- magic_hour/resources/v1/image_to_video/client.py +186 -0
- magic_hour/resources/v1/lip_sync/README.md +87 -9
- magic_hour/resources/v1/lip_sync/client.py +210 -0
- magic_hour/resources/v1/photo_colorizer/README.md +59 -0
- magic_hour/resources/v1/photo_colorizer/client.py +130 -0
- magic_hour/resources/v1/text_to_video/README.md +68 -0
- magic_hour/resources/v1/text_to_video/client.py +151 -0
- magic_hour/resources/v1/video_projects/README.md +8 -0
- magic_hour/resources/v1/video_projects/__init__.py +10 -2
- magic_hour/resources/v1/video_projects/client.py +137 -0
- magic_hour/resources/v1/video_projects/client_test.py +527 -0
- magic_hour/resources/v1/video_to_video/README.md +98 -10
- magic_hour/resources/v1/video_to_video/client.py +222 -0
- magic_hour/types/params/__init__.py +58 -0
- magic_hour/types/params/v1_ai_clothes_changer_generate_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_face_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_headshot_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_upscaler_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_photo_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_talking_photo_generate_body_assets.py +26 -0
- magic_hour/types/params/v1_animation_generate_body_assets.py +39 -0
- magic_hour/types/params/v1_auto_subtitle_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_detection_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_swap_create_body.py +12 -0
- magic_hour/types/params/v1_face_swap_create_body_style.py +33 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets.py +56 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets.py +47 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_image_background_remover_generate_body_assets.py +27 -0
- magic_hour/types/params/v1_image_to_video_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_lip_sync_generate_body_assets.py +36 -0
- magic_hour/types/params/v1_photo_colorizer_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_video_to_video_generate_body_assets.py +27 -0
- magic_hour-0.36.0.dist-info/METADATA +303 -0
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.0.dist-info}/RECORD +93 -65
- magic_hour-0.35.0.dist-info/METADATA +0 -166
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.0.dist-info}/LICENSE +0 -0
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.0.dist-info}/WHEEL +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import typing
|
|
2
3
|
import typing_extensions
|
|
3
4
|
|
|
@@ -9,13 +10,127 @@ from magic_hour.core import (
|
|
|
9
10
|
to_encodable,
|
|
10
11
|
type_utils,
|
|
11
12
|
)
|
|
13
|
+
from magic_hour.resources.v1.files.client import AsyncFilesClient, FilesClient
|
|
14
|
+
from magic_hour.resources.v1.video_projects.client import (
|
|
15
|
+
AsyncVideoProjectsClient,
|
|
16
|
+
VideoProjectsClient,
|
|
17
|
+
)
|
|
12
18
|
from magic_hour.types import models, params
|
|
13
19
|
|
|
14
20
|
|
|
21
|
+
logging.basicConfig(level=logging.INFO)
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
15
25
|
class VideoToVideoClient:
|
|
16
26
|
def __init__(self, *, base_client: SyncBaseClient):
|
|
17
27
|
self._base_client = base_client
|
|
18
28
|
|
|
29
|
+
def generate(
|
|
30
|
+
self,
|
|
31
|
+
*,
|
|
32
|
+
assets: params.V1VideoToVideoGenerateBodyAssets,
|
|
33
|
+
end_seconds: float,
|
|
34
|
+
start_seconds: float,
|
|
35
|
+
style: params.V1VideoToVideoCreateBodyStyle,
|
|
36
|
+
fps_resolution: typing.Union[
|
|
37
|
+
typing.Optional[typing_extensions.Literal["FULL", "HALF"]],
|
|
38
|
+
type_utils.NotGiven,
|
|
39
|
+
] = type_utils.NOT_GIVEN,
|
|
40
|
+
height: typing.Union[
|
|
41
|
+
typing.Optional[int], type_utils.NotGiven
|
|
42
|
+
] = type_utils.NOT_GIVEN,
|
|
43
|
+
name: typing.Union[
|
|
44
|
+
typing.Optional[str], type_utils.NotGiven
|
|
45
|
+
] = type_utils.NOT_GIVEN,
|
|
46
|
+
width: typing.Union[
|
|
47
|
+
typing.Optional[int], type_utils.NotGiven
|
|
48
|
+
] = type_utils.NOT_GIVEN,
|
|
49
|
+
wait_for_completion: bool = True,
|
|
50
|
+
download_outputs: bool = True,
|
|
51
|
+
download_directory: typing.Optional[str] = None,
|
|
52
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
53
|
+
):
|
|
54
|
+
"""
|
|
55
|
+
Generate video-to-video (alias for create with additional functionality).
|
|
56
|
+
|
|
57
|
+
Create a Video To Video video. The estimated frame cost is calculated using 30 FPS. This amount is deducted from your account balance when a video is queued. Once the video is complete, the cost will be updated based on the actual number of frames rendered.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
fps_resolution: Determines whether the resulting video will have the same frame per second as the original video, or half.
|
|
61
|
+
height: `height` is deprecated and no longer influences the output video's resolution.
|
|
62
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
63
|
+
width: `width` is deprecated and no longer influences the output video's resolution.
|
|
64
|
+
assets: Provide the assets for video-to-video. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used
|
|
65
|
+
end_seconds: The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
66
|
+
start_seconds: The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
67
|
+
style: V1VideoToVideoCreateBodyStyle
|
|
68
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
69
|
+
download_outputs: Whether to download the outputs
|
|
70
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
71
|
+
request_options: Additional options to customize the HTTP request
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the Video-to-Video API with the downloaded paths if `download_outputs` is True.
|
|
75
|
+
|
|
76
|
+
Examples:
|
|
77
|
+
```py
|
|
78
|
+
response = client.v1.video_to_video.generate(
|
|
79
|
+
assets={
|
|
80
|
+
"video_file_path": "path/to/video.mp4",
|
|
81
|
+
"video_source": "file",
|
|
82
|
+
},
|
|
83
|
+
end_seconds=15.0,
|
|
84
|
+
start_seconds=0.0,
|
|
85
|
+
style={
|
|
86
|
+
"art_style": "3D Render",
|
|
87
|
+
"model": "default",
|
|
88
|
+
"prompt": "cyberpunk city",
|
|
89
|
+
"prompt_type": "default",
|
|
90
|
+
"version": "default",
|
|
91
|
+
},
|
|
92
|
+
fps_resolution="HALF",
|
|
93
|
+
wait_for_completion=True,
|
|
94
|
+
download_outputs=True,
|
|
95
|
+
download_directory="outputs/",
|
|
96
|
+
)
|
|
97
|
+
```
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
file_client = FilesClient(base_client=self._base_client)
|
|
101
|
+
|
|
102
|
+
# Upload video file if video_source is "file" and video_file_path is provided
|
|
103
|
+
if (
|
|
104
|
+
assets.get("video_source") == "file"
|
|
105
|
+
and "video_file_path" in assets
|
|
106
|
+
and assets["video_file_path"]
|
|
107
|
+
):
|
|
108
|
+
video_file_path = assets["video_file_path"]
|
|
109
|
+
assets["video_file_path"] = file_client.upload_file(file=video_file_path)
|
|
110
|
+
|
|
111
|
+
create_response = self.create(
|
|
112
|
+
assets=assets,
|
|
113
|
+
end_seconds=end_seconds,
|
|
114
|
+
start_seconds=start_seconds,
|
|
115
|
+
style=style,
|
|
116
|
+
fps_resolution=fps_resolution,
|
|
117
|
+
height=height,
|
|
118
|
+
name=name,
|
|
119
|
+
width=width,
|
|
120
|
+
request_options=request_options,
|
|
121
|
+
)
|
|
122
|
+
logger.info(f"Video-to-Video response: {create_response}")
|
|
123
|
+
|
|
124
|
+
video_projects_client = VideoProjectsClient(base_client=self._base_client)
|
|
125
|
+
response = video_projects_client.check_result(
|
|
126
|
+
id=create_response.id,
|
|
127
|
+
wait_for_completion=wait_for_completion,
|
|
128
|
+
download_outputs=download_outputs,
|
|
129
|
+
download_directory=download_directory,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return response
|
|
133
|
+
|
|
19
134
|
def create(
|
|
20
135
|
self,
|
|
21
136
|
*,
|
|
@@ -128,6 +243,113 @@ class AsyncVideoToVideoClient:
|
|
|
128
243
|
def __init__(self, *, base_client: AsyncBaseClient):
|
|
129
244
|
self._base_client = base_client
|
|
130
245
|
|
|
246
|
+
async def generate(
|
|
247
|
+
self,
|
|
248
|
+
*,
|
|
249
|
+
assets: params.V1VideoToVideoGenerateBodyAssets,
|
|
250
|
+
end_seconds: float,
|
|
251
|
+
start_seconds: float,
|
|
252
|
+
style: params.V1VideoToVideoCreateBodyStyle,
|
|
253
|
+
fps_resolution: typing.Union[
|
|
254
|
+
typing.Optional[typing_extensions.Literal["FULL", "HALF"]],
|
|
255
|
+
type_utils.NotGiven,
|
|
256
|
+
] = type_utils.NOT_GIVEN,
|
|
257
|
+
height: typing.Union[
|
|
258
|
+
typing.Optional[int], type_utils.NotGiven
|
|
259
|
+
] = type_utils.NOT_GIVEN,
|
|
260
|
+
name: typing.Union[
|
|
261
|
+
typing.Optional[str], type_utils.NotGiven
|
|
262
|
+
] = type_utils.NOT_GIVEN,
|
|
263
|
+
width: typing.Union[
|
|
264
|
+
typing.Optional[int], type_utils.NotGiven
|
|
265
|
+
] = type_utils.NOT_GIVEN,
|
|
266
|
+
wait_for_completion: bool = True,
|
|
267
|
+
download_outputs: bool = True,
|
|
268
|
+
download_directory: typing.Optional[str] = None,
|
|
269
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
270
|
+
):
|
|
271
|
+
"""
|
|
272
|
+
Generate video-to-video (alias for create with additional functionality).
|
|
273
|
+
|
|
274
|
+
Create a Video To Video video. The estimated frame cost is calculated using 30 FPS. This amount is deducted from your account balance when a video is queued. Once the video is complete, the cost will be updated based on the actual number of frames rendered.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
fps_resolution: Determines whether the resulting video will have the same frame per second as the original video, or half.
|
|
278
|
+
height: `height` is deprecated and no longer influences the output video's resolution.
|
|
279
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
280
|
+
width: `width` is deprecated and no longer influences the output video's resolution.
|
|
281
|
+
assets: Provide the assets for video-to-video. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used
|
|
282
|
+
end_seconds: The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
283
|
+
start_seconds: The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
284
|
+
style: V1VideoToVideoCreateBodyStyle
|
|
285
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
286
|
+
download_outputs: Whether to download the outputs
|
|
287
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
288
|
+
request_options: Additional options to customize the HTTP request
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the Video-to-Video API with the downloaded paths if `download_outputs` is True.
|
|
292
|
+
|
|
293
|
+
Examples:
|
|
294
|
+
```py
|
|
295
|
+
response = await client.v1.video_to_video.generate(
|
|
296
|
+
assets={
|
|
297
|
+
"video_file_path": "path/to/video.mp4",
|
|
298
|
+
"video_source": "file",
|
|
299
|
+
},
|
|
300
|
+
end_seconds=15.0,
|
|
301
|
+
start_seconds=0.0,
|
|
302
|
+
style={
|
|
303
|
+
"art_style": "3D Render",
|
|
304
|
+
"model": "default",
|
|
305
|
+
"prompt": "cyberpunk city",
|
|
306
|
+
"prompt_type": "default",
|
|
307
|
+
"version": "default",
|
|
308
|
+
},
|
|
309
|
+
fps_resolution="HALF",
|
|
310
|
+
wait_for_completion=True,
|
|
311
|
+
download_outputs=True,
|
|
312
|
+
download_directory="outputs/",
|
|
313
|
+
)
|
|
314
|
+
```
|
|
315
|
+
"""
|
|
316
|
+
|
|
317
|
+
file_client = AsyncFilesClient(base_client=self._base_client)
|
|
318
|
+
|
|
319
|
+
# Upload video file if video_source is "file" and video_file_path is provided
|
|
320
|
+
if (
|
|
321
|
+
assets.get("video_source") == "file"
|
|
322
|
+
and "video_file_path" in assets
|
|
323
|
+
and assets["video_file_path"]
|
|
324
|
+
):
|
|
325
|
+
video_file_path = assets["video_file_path"]
|
|
326
|
+
assets["video_file_path"] = await file_client.upload_file(
|
|
327
|
+
file=video_file_path
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
create_response = await self.create(
|
|
331
|
+
assets=assets,
|
|
332
|
+
end_seconds=end_seconds,
|
|
333
|
+
start_seconds=start_seconds,
|
|
334
|
+
style=style,
|
|
335
|
+
fps_resolution=fps_resolution,
|
|
336
|
+
height=height,
|
|
337
|
+
name=name,
|
|
338
|
+
width=width,
|
|
339
|
+
request_options=request_options,
|
|
340
|
+
)
|
|
341
|
+
logger.info(f"Video-to-Video response: {create_response}")
|
|
342
|
+
|
|
343
|
+
video_projects_client = AsyncVideoProjectsClient(base_client=self._base_client)
|
|
344
|
+
response = await video_projects_client.check_result(
|
|
345
|
+
id=create_response.id,
|
|
346
|
+
wait_for_completion=wait_for_completion,
|
|
347
|
+
download_outputs=download_outputs,
|
|
348
|
+
download_directory=download_directory,
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
return response
|
|
352
|
+
|
|
131
353
|
async def create(
|
|
132
354
|
self,
|
|
133
355
|
*,
|
|
@@ -6,6 +6,9 @@ from .v1_ai_clothes_changer_create_body_assets import (
|
|
|
6
6
|
V1AiClothesChangerCreateBodyAssets,
|
|
7
7
|
_SerializerV1AiClothesChangerCreateBodyAssets,
|
|
8
8
|
)
|
|
9
|
+
from .v1_ai_clothes_changer_generate_body_assets import (
|
|
10
|
+
V1AiClothesChangerGenerateBodyAssets,
|
|
11
|
+
)
|
|
9
12
|
from .v1_ai_face_editor_create_body import (
|
|
10
13
|
V1AiFaceEditorCreateBody,
|
|
11
14
|
_SerializerV1AiFaceEditorCreateBody,
|
|
@@ -18,6 +21,7 @@ from .v1_ai_face_editor_create_body_style import (
|
|
|
18
21
|
V1AiFaceEditorCreateBodyStyle,
|
|
19
22
|
_SerializerV1AiFaceEditorCreateBodyStyle,
|
|
20
23
|
)
|
|
24
|
+
from .v1_ai_face_editor_generate_body_assets import V1AiFaceEditorGenerateBodyAssets
|
|
21
25
|
from .v1_ai_gif_generator_create_body import (
|
|
22
26
|
V1AiGifGeneratorCreateBody,
|
|
23
27
|
_SerializerV1AiGifGeneratorCreateBody,
|
|
@@ -38,6 +42,9 @@ from .v1_ai_headshot_generator_create_body_style import (
|
|
|
38
42
|
V1AiHeadshotGeneratorCreateBodyStyle,
|
|
39
43
|
_SerializerV1AiHeadshotGeneratorCreateBodyStyle,
|
|
40
44
|
)
|
|
45
|
+
from .v1_ai_headshot_generator_generate_body_assets import (
|
|
46
|
+
V1AiHeadshotGeneratorGenerateBodyAssets,
|
|
47
|
+
)
|
|
41
48
|
from .v1_ai_image_editor_create_body import (
|
|
42
49
|
V1AiImageEditorCreateBody,
|
|
43
50
|
_SerializerV1AiImageEditorCreateBody,
|
|
@@ -50,6 +57,7 @@ from .v1_ai_image_editor_create_body_style import (
|
|
|
50
57
|
V1AiImageEditorCreateBodyStyle,
|
|
51
58
|
_SerializerV1AiImageEditorCreateBodyStyle,
|
|
52
59
|
)
|
|
60
|
+
from .v1_ai_image_editor_generate_body_assets import V1AiImageEditorGenerateBodyAssets
|
|
53
61
|
from .v1_ai_image_generator_create_body import (
|
|
54
62
|
V1AiImageGeneratorCreateBody,
|
|
55
63
|
_SerializerV1AiImageGeneratorCreateBody,
|
|
@@ -70,6 +78,9 @@ from .v1_ai_image_upscaler_create_body_style import (
|
|
|
70
78
|
V1AiImageUpscalerCreateBodyStyle,
|
|
71
79
|
_SerializerV1AiImageUpscalerCreateBodyStyle,
|
|
72
80
|
)
|
|
81
|
+
from .v1_ai_image_upscaler_generate_body_assets import (
|
|
82
|
+
V1AiImageUpscalerGenerateBodyAssets,
|
|
83
|
+
)
|
|
73
84
|
from .v1_ai_meme_generator_create_body import (
|
|
74
85
|
V1AiMemeGeneratorCreateBody,
|
|
75
86
|
_SerializerV1AiMemeGeneratorCreateBody,
|
|
@@ -90,6 +101,7 @@ from .v1_ai_photo_editor_create_body_style import (
|
|
|
90
101
|
V1AiPhotoEditorCreateBodyStyle,
|
|
91
102
|
_SerializerV1AiPhotoEditorCreateBodyStyle,
|
|
92
103
|
)
|
|
104
|
+
from .v1_ai_photo_editor_generate_body_assets import V1AiPhotoEditorGenerateBodyAssets
|
|
93
105
|
from .v1_ai_qr_code_generator_create_body import (
|
|
94
106
|
V1AiQrCodeGeneratorCreateBody,
|
|
95
107
|
_SerializerV1AiQrCodeGeneratorCreateBody,
|
|
@@ -110,6 +122,7 @@ from .v1_ai_talking_photo_create_body_style import (
|
|
|
110
122
|
V1AiTalkingPhotoCreateBodyStyle,
|
|
111
123
|
_SerializerV1AiTalkingPhotoCreateBodyStyle,
|
|
112
124
|
)
|
|
125
|
+
from .v1_ai_talking_photo_generate_body_assets import V1AiTalkingPhotoGenerateBodyAssets
|
|
113
126
|
from .v1_animation_create_body import (
|
|
114
127
|
V1AnimationCreateBody,
|
|
115
128
|
_SerializerV1AnimationCreateBody,
|
|
@@ -122,6 +135,7 @@ from .v1_animation_create_body_style import (
|
|
|
122
135
|
V1AnimationCreateBodyStyle,
|
|
123
136
|
_SerializerV1AnimationCreateBodyStyle,
|
|
124
137
|
)
|
|
138
|
+
from .v1_animation_generate_body_assets import V1AnimationGenerateBodyAssets
|
|
125
139
|
from .v1_auto_subtitle_generator_create_body import (
|
|
126
140
|
V1AutoSubtitleGeneratorCreateBody,
|
|
127
141
|
_SerializerV1AutoSubtitleGeneratorCreateBody,
|
|
@@ -138,6 +152,9 @@ from .v1_auto_subtitle_generator_create_body_style_custom_config import (
|
|
|
138
152
|
V1AutoSubtitleGeneratorCreateBodyStyleCustomConfig,
|
|
139
153
|
_SerializerV1AutoSubtitleGeneratorCreateBodyStyleCustomConfig,
|
|
140
154
|
)
|
|
155
|
+
from .v1_auto_subtitle_generator_generate_body_assets import (
|
|
156
|
+
V1AutoSubtitleGeneratorGenerateBodyAssets,
|
|
157
|
+
)
|
|
141
158
|
from .v1_face_detection_create_body import (
|
|
142
159
|
V1FaceDetectionCreateBody,
|
|
143
160
|
_SerializerV1FaceDetectionCreateBody,
|
|
@@ -146,6 +163,7 @@ from .v1_face_detection_create_body_assets import (
|
|
|
146
163
|
V1FaceDetectionCreateBodyAssets,
|
|
147
164
|
_SerializerV1FaceDetectionCreateBodyAssets,
|
|
148
165
|
)
|
|
166
|
+
from .v1_face_detection_generate_body_assets import V1FaceDetectionGenerateBodyAssets
|
|
149
167
|
from .v1_face_swap_create_body import (
|
|
150
168
|
V1FaceSwapCreateBody,
|
|
151
169
|
_SerializerV1FaceSwapCreateBody,
|
|
@@ -158,6 +176,14 @@ from .v1_face_swap_create_body_assets_face_mappings_item import (
|
|
|
158
176
|
V1FaceSwapCreateBodyAssetsFaceMappingsItem,
|
|
159
177
|
_SerializerV1FaceSwapCreateBodyAssetsFaceMappingsItem,
|
|
160
178
|
)
|
|
179
|
+
from .v1_face_swap_create_body_style import (
|
|
180
|
+
V1FaceSwapCreateBodyStyle,
|
|
181
|
+
_SerializerV1FaceSwapCreateBodyStyle,
|
|
182
|
+
)
|
|
183
|
+
from .v1_face_swap_generate_body_assets import V1FaceSwapGenerateBodyAssets
|
|
184
|
+
from .v1_face_swap_generate_body_assets_face_mappings_item import (
|
|
185
|
+
V1FaceSwapGenerateBodyAssetsFaceMappingsItem,
|
|
186
|
+
)
|
|
161
187
|
from .v1_face_swap_photo_create_body import (
|
|
162
188
|
V1FaceSwapPhotoCreateBody,
|
|
163
189
|
_SerializerV1FaceSwapPhotoCreateBody,
|
|
@@ -170,6 +196,10 @@ from .v1_face_swap_photo_create_body_assets_face_mappings_item import (
|
|
|
170
196
|
V1FaceSwapPhotoCreateBodyAssetsFaceMappingsItem,
|
|
171
197
|
_SerializerV1FaceSwapPhotoCreateBodyAssetsFaceMappingsItem,
|
|
172
198
|
)
|
|
199
|
+
from .v1_face_swap_photo_generate_body_assets import V1FaceSwapPhotoGenerateBodyAssets
|
|
200
|
+
from .v1_face_swap_photo_generate_body_assets_face_mappings_item import (
|
|
201
|
+
V1FaceSwapPhotoGenerateBodyAssetsFaceMappingsItem,
|
|
202
|
+
)
|
|
173
203
|
from .v1_files_upload_urls_create_body import (
|
|
174
204
|
V1FilesUploadUrlsCreateBody,
|
|
175
205
|
_SerializerV1FilesUploadUrlsCreateBody,
|
|
@@ -186,6 +216,9 @@ from .v1_image_background_remover_create_body_assets import (
|
|
|
186
216
|
V1ImageBackgroundRemoverCreateBodyAssets,
|
|
187
217
|
_SerializerV1ImageBackgroundRemoverCreateBodyAssets,
|
|
188
218
|
)
|
|
219
|
+
from .v1_image_background_remover_generate_body_assets import (
|
|
220
|
+
V1ImageBackgroundRemoverGenerateBodyAssets,
|
|
221
|
+
)
|
|
189
222
|
from .v1_image_to_video_create_body import (
|
|
190
223
|
V1ImageToVideoCreateBody,
|
|
191
224
|
_SerializerV1ImageToVideoCreateBody,
|
|
@@ -198,11 +231,13 @@ from .v1_image_to_video_create_body_style import (
|
|
|
198
231
|
V1ImageToVideoCreateBodyStyle,
|
|
199
232
|
_SerializerV1ImageToVideoCreateBodyStyle,
|
|
200
233
|
)
|
|
234
|
+
from .v1_image_to_video_generate_body_assets import V1ImageToVideoGenerateBodyAssets
|
|
201
235
|
from .v1_lip_sync_create_body import V1LipSyncCreateBody, _SerializerV1LipSyncCreateBody
|
|
202
236
|
from .v1_lip_sync_create_body_assets import (
|
|
203
237
|
V1LipSyncCreateBodyAssets,
|
|
204
238
|
_SerializerV1LipSyncCreateBodyAssets,
|
|
205
239
|
)
|
|
240
|
+
from .v1_lip_sync_generate_body_assets import V1LipSyncGenerateBodyAssets
|
|
206
241
|
from .v1_photo_colorizer_create_body import (
|
|
207
242
|
V1PhotoColorizerCreateBody,
|
|
208
243
|
_SerializerV1PhotoColorizerCreateBody,
|
|
@@ -211,6 +246,7 @@ from .v1_photo_colorizer_create_body_assets import (
|
|
|
211
246
|
V1PhotoColorizerCreateBodyAssets,
|
|
212
247
|
_SerializerV1PhotoColorizerCreateBodyAssets,
|
|
213
248
|
)
|
|
249
|
+
from .v1_photo_colorizer_generate_body_assets import V1PhotoColorizerGenerateBodyAssets
|
|
214
250
|
from .v1_text_to_video_create_body import (
|
|
215
251
|
V1TextToVideoCreateBody,
|
|
216
252
|
_SerializerV1TextToVideoCreateBody,
|
|
@@ -231,68 +267,89 @@ from .v1_video_to_video_create_body_style import (
|
|
|
231
267
|
V1VideoToVideoCreateBodyStyle,
|
|
232
268
|
_SerializerV1VideoToVideoCreateBodyStyle,
|
|
233
269
|
)
|
|
270
|
+
from .v1_video_to_video_generate_body_assets import V1VideoToVideoGenerateBodyAssets
|
|
234
271
|
|
|
235
272
|
|
|
236
273
|
__all__ = [
|
|
237
274
|
"V1AiClothesChangerCreateBody",
|
|
238
275
|
"V1AiClothesChangerCreateBodyAssets",
|
|
276
|
+
"V1AiClothesChangerGenerateBodyAssets",
|
|
239
277
|
"V1AiFaceEditorCreateBody",
|
|
240
278
|
"V1AiFaceEditorCreateBodyAssets",
|
|
241
279
|
"V1AiFaceEditorCreateBodyStyle",
|
|
280
|
+
"V1AiFaceEditorGenerateBodyAssets",
|
|
242
281
|
"V1AiGifGeneratorCreateBody",
|
|
243
282
|
"V1AiGifGeneratorCreateBodyStyle",
|
|
244
283
|
"V1AiHeadshotGeneratorCreateBody",
|
|
245
284
|
"V1AiHeadshotGeneratorCreateBodyAssets",
|
|
246
285
|
"V1AiHeadshotGeneratorCreateBodyStyle",
|
|
286
|
+
"V1AiHeadshotGeneratorGenerateBodyAssets",
|
|
247
287
|
"V1AiImageEditorCreateBody",
|
|
248
288
|
"V1AiImageEditorCreateBodyAssets",
|
|
249
289
|
"V1AiImageEditorCreateBodyStyle",
|
|
290
|
+
"V1AiImageEditorGenerateBodyAssets",
|
|
250
291
|
"V1AiImageGeneratorCreateBody",
|
|
251
292
|
"V1AiImageGeneratorCreateBodyStyle",
|
|
252
293
|
"V1AiImageUpscalerCreateBody",
|
|
253
294
|
"V1AiImageUpscalerCreateBodyAssets",
|
|
254
295
|
"V1AiImageUpscalerCreateBodyStyle",
|
|
296
|
+
"V1AiImageUpscalerGenerateBodyAssets",
|
|
255
297
|
"V1AiMemeGeneratorCreateBody",
|
|
256
298
|
"V1AiMemeGeneratorCreateBodyStyle",
|
|
257
299
|
"V1AiPhotoEditorCreateBody",
|
|
258
300
|
"V1AiPhotoEditorCreateBodyAssets",
|
|
259
301
|
"V1AiPhotoEditorCreateBodyStyle",
|
|
302
|
+
"V1AiPhotoEditorGenerateBodyAssets",
|
|
260
303
|
"V1AiQrCodeGeneratorCreateBody",
|
|
261
304
|
"V1AiQrCodeGeneratorCreateBodyStyle",
|
|
262
305
|
"V1AiTalkingPhotoCreateBody",
|
|
263
306
|
"V1AiTalkingPhotoCreateBodyAssets",
|
|
264
307
|
"V1AiTalkingPhotoCreateBodyStyle",
|
|
308
|
+
"V1AiTalkingPhotoGenerateBodyAssets",
|
|
265
309
|
"V1AnimationCreateBody",
|
|
266
310
|
"V1AnimationCreateBodyAssets",
|
|
267
311
|
"V1AnimationCreateBodyStyle",
|
|
312
|
+
"V1AnimationGenerateBodyAssets",
|
|
268
313
|
"V1AutoSubtitleGeneratorCreateBody",
|
|
269
314
|
"V1AutoSubtitleGeneratorCreateBodyAssets",
|
|
270
315
|
"V1AutoSubtitleGeneratorCreateBodyStyle",
|
|
271
316
|
"V1AutoSubtitleGeneratorCreateBodyStyleCustomConfig",
|
|
317
|
+
"V1AutoSubtitleGeneratorGenerateBodyAssets",
|
|
272
318
|
"V1FaceDetectionCreateBody",
|
|
273
319
|
"V1FaceDetectionCreateBodyAssets",
|
|
320
|
+
"V1FaceDetectionGenerateBodyAssets",
|
|
274
321
|
"V1FaceSwapCreateBody",
|
|
275
322
|
"V1FaceSwapCreateBodyAssets",
|
|
276
323
|
"V1FaceSwapCreateBodyAssetsFaceMappingsItem",
|
|
324
|
+
"V1FaceSwapCreateBodyStyle",
|
|
325
|
+
"V1FaceSwapGenerateBodyAssets",
|
|
326
|
+
"V1FaceSwapGenerateBodyAssetsFaceMappingsItem",
|
|
277
327
|
"V1FaceSwapPhotoCreateBody",
|
|
278
328
|
"V1FaceSwapPhotoCreateBodyAssets",
|
|
279
329
|
"V1FaceSwapPhotoCreateBodyAssetsFaceMappingsItem",
|
|
330
|
+
"V1FaceSwapPhotoGenerateBodyAssets",
|
|
331
|
+
"V1FaceSwapPhotoGenerateBodyAssetsFaceMappingsItem",
|
|
280
332
|
"V1FilesUploadUrlsCreateBody",
|
|
281
333
|
"V1FilesUploadUrlsCreateBodyItemsItem",
|
|
282
334
|
"V1ImageBackgroundRemoverCreateBody",
|
|
283
335
|
"V1ImageBackgroundRemoverCreateBodyAssets",
|
|
336
|
+
"V1ImageBackgroundRemoverGenerateBodyAssets",
|
|
284
337
|
"V1ImageToVideoCreateBody",
|
|
285
338
|
"V1ImageToVideoCreateBodyAssets",
|
|
286
339
|
"V1ImageToVideoCreateBodyStyle",
|
|
340
|
+
"V1ImageToVideoGenerateBodyAssets",
|
|
287
341
|
"V1LipSyncCreateBody",
|
|
288
342
|
"V1LipSyncCreateBodyAssets",
|
|
343
|
+
"V1LipSyncGenerateBodyAssets",
|
|
289
344
|
"V1PhotoColorizerCreateBody",
|
|
290
345
|
"V1PhotoColorizerCreateBodyAssets",
|
|
346
|
+
"V1PhotoColorizerGenerateBodyAssets",
|
|
291
347
|
"V1TextToVideoCreateBody",
|
|
292
348
|
"V1TextToVideoCreateBodyStyle",
|
|
293
349
|
"V1VideoToVideoCreateBody",
|
|
294
350
|
"V1VideoToVideoCreateBodyAssets",
|
|
295
351
|
"V1VideoToVideoCreateBodyStyle",
|
|
352
|
+
"V1VideoToVideoGenerateBodyAssets",
|
|
296
353
|
"_SerializerV1AiClothesChangerCreateBody",
|
|
297
354
|
"_SerializerV1AiClothesChangerCreateBodyAssets",
|
|
298
355
|
"_SerializerV1AiFaceEditorCreateBody",
|
|
@@ -333,6 +390,7 @@ __all__ = [
|
|
|
333
390
|
"_SerializerV1FaceSwapCreateBody",
|
|
334
391
|
"_SerializerV1FaceSwapCreateBodyAssets",
|
|
335
392
|
"_SerializerV1FaceSwapCreateBodyAssetsFaceMappingsItem",
|
|
393
|
+
"_SerializerV1FaceSwapCreateBodyStyle",
|
|
336
394
|
"_SerializerV1FaceSwapPhotoCreateBody",
|
|
337
395
|
"_SerializerV1FaceSwapPhotoCreateBodyAssets",
|
|
338
396
|
"_SerializerV1FaceSwapPhotoCreateBodyAssetsFaceMappingsItem",
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiClothesChangerGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for clothes changer
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
garment_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The image of the outfit. This value is either
|
|
13
|
+
- a direct URL to the image file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
garment_type: typing_extensions.Required[
|
|
20
|
+
typing_extensions.Literal["dresses", "lower_body", "upper_body"]
|
|
21
|
+
]
|
|
22
|
+
"""
|
|
23
|
+
The type of the outfit.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
person_file_path: typing_extensions.Required[str]
|
|
27
|
+
"""
|
|
28
|
+
The image with the person. This value is either
|
|
29
|
+
- a direct URL to the image file
|
|
30
|
+
- a path to a local file
|
|
31
|
+
|
|
32
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
33
|
+
"""
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiFaceEditorGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for face editor
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
image_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
This is the image whose face will be edited. This value is either
|
|
13
|
+
- a direct URL to the image file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiHeadshotGeneratorGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for headshot photo
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
image_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The image used to generate the headshot. This image must contain one detectable face. This value is either
|
|
13
|
+
- a direct URL to the image file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiImageEditorGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for image edit
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
image_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The image used in the edit. This value is either
|
|
13
|
+
- a direct URL to the image file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiImageUpscalerGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for upscaling
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
image_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The image to upscale. This value is either
|
|
13
|
+
- a direct URL to the image file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiPhotoEditorGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for photo editor
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
image_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The image used to generate the output. This value is either
|
|
13
|
+
- a direct URL to the image file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiTalkingPhotoGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for creating a talking photo
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
audio_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The audio file to sync with the image. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- a path to a local file
|
|
15
|
+
|
|
16
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
image_file_path: typing_extensions.Required[str]
|
|
20
|
+
"""
|
|
21
|
+
The source image to animate. This value is either
|
|
22
|
+
- a direct URL to the video file
|
|
23
|
+
- a path to a local file
|
|
24
|
+
|
|
25
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
26
|
+
"""
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing
|
|
3
|
+
import typing_extensions
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class V1AnimationGenerateBodyAssets(typing_extensions.TypedDict):
|
|
7
|
+
"""
|
|
8
|
+
Provide the assets for animation.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
audio_file_path: typing_extensions.NotRequired[str]
|
|
12
|
+
"""
|
|
13
|
+
The path of the input audio. This field is required if `audio_source` is `file`. This value is either
|
|
14
|
+
- a direct URL to the video file
|
|
15
|
+
- a path to a local file
|
|
16
|
+
|
|
17
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
audio_source: typing_extensions.Required[
|
|
21
|
+
typing_extensions.Literal["file", "none", "youtube"]
|
|
22
|
+
]
|
|
23
|
+
"""
|
|
24
|
+
Optionally add an audio source if you'd like to incorporate audio into your video
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
image_file_path: typing_extensions.NotRequired[str]
|
|
28
|
+
"""
|
|
29
|
+
An initial image to use a the first frame of the video. This value is either
|
|
30
|
+
- a direct URL to the image file
|
|
31
|
+
- a path to a local file
|
|
32
|
+
|
|
33
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
youtube_url: typing_extensions.NotRequired[str]
|
|
37
|
+
"""
|
|
38
|
+
Using a youtube video as the input source. This field is required if `audio_source` is `youtube`
|
|
39
|
+
"""
|