magic_hour 0.35.0__py3-none-any.whl → 0.36.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of magic_hour might be problematic. Click here for more details.
- magic_hour/README.md +35 -0
- magic_hour/core/base_client.py +6 -5
- magic_hour/core/query.py +12 -6
- magic_hour/core/request.py +3 -3
- magic_hour/core/response.py +18 -14
- magic_hour/core/utils.py +3 -3
- magic_hour/environment.py +1 -1
- magic_hour/helpers/__init__.py +3 -0
- magic_hour/helpers/download.py +75 -0
- magic_hour/resources/v1/README.md +33 -0
- magic_hour/resources/v1/ai_clothes_changer/README.md +73 -0
- magic_hour/resources/v1/ai_clothes_changer/client.py +146 -0
- magic_hour/resources/v1/ai_face_editor/README.md +110 -0
- magic_hour/resources/v1/ai_face_editor/client.py +168 -0
- magic_hour/resources/v1/ai_gif_generator/README.md +59 -0
- magic_hour/resources/v1/ai_gif_generator/client.py +119 -0
- magic_hour/resources/v1/ai_headshot_generator/README.md +60 -0
- magic_hour/resources/v1/ai_headshot_generator/client.py +140 -0
- magic_hour/resources/v1/ai_image_editor/README.md +64 -0
- magic_hour/resources/v1/ai_image_editor/client.py +136 -0
- magic_hour/resources/v1/ai_image_generator/README.md +66 -0
- magic_hour/resources/v1/ai_image_generator/client.py +139 -0
- magic_hour/resources/v1/ai_image_upscaler/README.md +67 -0
- magic_hour/resources/v1/ai_image_upscaler/client.py +150 -0
- magic_hour/resources/v1/ai_meme_generator/README.md +71 -0
- magic_hour/resources/v1/ai_meme_generator/client.py +127 -0
- magic_hour/resources/v1/ai_photo_editor/README.md +98 -7
- magic_hour/resources/v1/ai_photo_editor/client.py +174 -0
- magic_hour/resources/v1/ai_qr_code_generator/README.md +63 -0
- magic_hour/resources/v1/ai_qr_code_generator/client.py +123 -0
- magic_hour/resources/v1/ai_talking_photo/README.md +74 -0
- magic_hour/resources/v1/ai_talking_photo/client.py +170 -0
- magic_hour/resources/v1/animation/README.md +100 -0
- magic_hour/resources/v1/animation/client.py +218 -0
- magic_hour/resources/v1/auto_subtitle_generator/README.md +69 -0
- magic_hour/resources/v1/auto_subtitle_generator/client.py +178 -0
- magic_hour/resources/v1/face_detection/README.md +59 -0
- magic_hour/resources/v1/face_detection/__init__.py +10 -2
- magic_hour/resources/v1/face_detection/client.py +179 -0
- magic_hour/resources/v1/face_swap/README.md +105 -8
- magic_hour/resources/v1/face_swap/client.py +242 -0
- magic_hour/resources/v1/face_swap_photo/README.md +84 -0
- magic_hour/resources/v1/face_swap_photo/client.py +172 -0
- magic_hour/resources/v1/files/README.md +40 -0
- magic_hour/resources/v1/files/client.py +350 -0
- magic_hour/resources/v1/files/client_test.py +414 -0
- magic_hour/resources/v1/files/upload_urls/README.md +8 -0
- magic_hour/resources/v1/image_background_remover/README.md +68 -0
- magic_hour/resources/v1/image_background_remover/client.py +130 -0
- magic_hour/resources/v1/image_projects/README.md +52 -0
- magic_hour/resources/v1/image_projects/__init__.py +10 -2
- magic_hour/resources/v1/image_projects/client.py +138 -0
- magic_hour/resources/v1/image_projects/client_test.py +527 -0
- magic_hour/resources/v1/image_to_video/README.md +77 -9
- magic_hour/resources/v1/image_to_video/client.py +186 -0
- magic_hour/resources/v1/lip_sync/README.md +87 -9
- magic_hour/resources/v1/lip_sync/client.py +210 -0
- magic_hour/resources/v1/photo_colorizer/README.md +59 -0
- magic_hour/resources/v1/photo_colorizer/client.py +130 -0
- magic_hour/resources/v1/text_to_video/README.md +68 -0
- magic_hour/resources/v1/text_to_video/client.py +151 -0
- magic_hour/resources/v1/video_projects/README.md +52 -0
- magic_hour/resources/v1/video_projects/__init__.py +10 -2
- magic_hour/resources/v1/video_projects/client.py +137 -0
- magic_hour/resources/v1/video_projects/client_test.py +527 -0
- magic_hour/resources/v1/video_to_video/README.md +98 -10
- magic_hour/resources/v1/video_to_video/client.py +222 -0
- magic_hour/types/params/__init__.py +58 -0
- magic_hour/types/params/v1_ai_clothes_changer_generate_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_face_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_headshot_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_upscaler_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_photo_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_talking_photo_generate_body_assets.py +26 -0
- magic_hour/types/params/v1_animation_generate_body_assets.py +39 -0
- magic_hour/types/params/v1_auto_subtitle_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_detection_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_swap_create_body.py +12 -0
- magic_hour/types/params/v1_face_swap_create_body_style.py +33 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets.py +56 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets.py +47 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_image_background_remover_generate_body_assets.py +27 -0
- magic_hour/types/params/v1_image_to_video_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_lip_sync_generate_body_assets.py +36 -0
- magic_hour/types/params/v1_photo_colorizer_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_video_to_video_generate_body_assets.py +27 -0
- magic_hour-0.36.1.dist-info/METADATA +306 -0
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.1.dist-info}/RECORD +93 -65
- magic_hour-0.35.0.dist-info/METADATA +0 -166
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.1.dist-info}/LICENSE +0 -0
- {magic_hour-0.35.0.dist-info → magic_hour-0.36.1.dist-info}/WHEEL +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import typing
|
|
2
3
|
|
|
3
4
|
from magic_hour.core import (
|
|
@@ -8,13 +9,102 @@ from magic_hour.core import (
|
|
|
8
9
|
to_encodable,
|
|
9
10
|
type_utils,
|
|
10
11
|
)
|
|
12
|
+
from magic_hour.resources.v1.files.client import AsyncFilesClient, FilesClient
|
|
13
|
+
from magic_hour.resources.v1.video_projects.client import (
|
|
14
|
+
AsyncVideoProjectsClient,
|
|
15
|
+
VideoProjectsClient,
|
|
16
|
+
)
|
|
11
17
|
from magic_hour.types import models, params
|
|
12
18
|
|
|
13
19
|
|
|
20
|
+
logging.basicConfig(level=logging.INFO)
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
14
24
|
class AiTalkingPhotoClient:
|
|
15
25
|
def __init__(self, *, base_client: SyncBaseClient):
|
|
16
26
|
self._base_client = base_client
|
|
17
27
|
|
|
28
|
+
def generate(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
assets: params.V1AiTalkingPhotoGenerateBodyAssets,
|
|
32
|
+
end_seconds: float,
|
|
33
|
+
start_seconds: float,
|
|
34
|
+
name: typing.Union[
|
|
35
|
+
typing.Optional[str], type_utils.NotGiven
|
|
36
|
+
] = type_utils.NOT_GIVEN,
|
|
37
|
+
style: typing.Union[
|
|
38
|
+
typing.Optional[params.V1AiTalkingPhotoCreateBodyStyle], type_utils.NotGiven
|
|
39
|
+
] = type_utils.NOT_GIVEN,
|
|
40
|
+
wait_for_completion: bool = True,
|
|
41
|
+
download_outputs: bool = True,
|
|
42
|
+
download_directory: typing.Optional[str] = None,
|
|
43
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Generate talking photo (alias for create with additional functionality).
|
|
47
|
+
|
|
48
|
+
Create a talking photo from an image and audio or text input. Each generation costs credits.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
name: The name of image. This value is mainly used for your own identification of the image.
|
|
52
|
+
style: Attributes used to dictate the style of the output
|
|
53
|
+
assets: Provide the assets for creating a talking photo
|
|
54
|
+
end_seconds: The end time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
55
|
+
start_seconds: The start time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
56
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
57
|
+
download_outputs: Whether to download the outputs
|
|
58
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
59
|
+
request_options: Additional options to customize the HTTP request
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the AI Talking Photo API with the downloaded paths if `download_outputs` is True.
|
|
63
|
+
|
|
64
|
+
Examples:
|
|
65
|
+
```py
|
|
66
|
+
response = client.v1.ai_talking_photo.generate(
|
|
67
|
+
assets={
|
|
68
|
+
"audio_file_path": "path/to/audio.mp3",
|
|
69
|
+
"image_file_path": "path/to/image.png",
|
|
70
|
+
},
|
|
71
|
+
end_seconds=30.0,
|
|
72
|
+
start_seconds=5.0,
|
|
73
|
+
style={"enhancement": "high"},
|
|
74
|
+
wait_for_completion=True,
|
|
75
|
+
download_outputs=True,
|
|
76
|
+
download_directory="outputs/",
|
|
77
|
+
)
|
|
78
|
+
```
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
file_client = FilesClient(base_client=self._base_client)
|
|
82
|
+
|
|
83
|
+
audio_file_path = assets["audio_file_path"]
|
|
84
|
+
image_file_path = assets["image_file_path"]
|
|
85
|
+
assets["audio_file_path"] = file_client.upload_file(file=audio_file_path)
|
|
86
|
+
assets["image_file_path"] = file_client.upload_file(file=image_file_path)
|
|
87
|
+
|
|
88
|
+
create_response = self.create(
|
|
89
|
+
assets=assets,
|
|
90
|
+
end_seconds=end_seconds,
|
|
91
|
+
start_seconds=start_seconds,
|
|
92
|
+
name=name,
|
|
93
|
+
style=style,
|
|
94
|
+
request_options=request_options,
|
|
95
|
+
)
|
|
96
|
+
logger.info(f"AI Talking Photo response: {create_response}")
|
|
97
|
+
|
|
98
|
+
video_projects_client = VideoProjectsClient(base_client=self._base_client)
|
|
99
|
+
response = video_projects_client.check_result(
|
|
100
|
+
id=create_response.id,
|
|
101
|
+
wait_for_completion=wait_for_completion,
|
|
102
|
+
download_outputs=download_outputs,
|
|
103
|
+
download_directory=download_directory,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
return response
|
|
107
|
+
|
|
18
108
|
def create(
|
|
19
109
|
self,
|
|
20
110
|
*,
|
|
@@ -88,6 +178,86 @@ class AsyncAiTalkingPhotoClient:
|
|
|
88
178
|
def __init__(self, *, base_client: AsyncBaseClient):
|
|
89
179
|
self._base_client = base_client
|
|
90
180
|
|
|
181
|
+
async def generate(
|
|
182
|
+
self,
|
|
183
|
+
*,
|
|
184
|
+
assets: params.V1AiTalkingPhotoGenerateBodyAssets,
|
|
185
|
+
end_seconds: float,
|
|
186
|
+
start_seconds: float,
|
|
187
|
+
name: typing.Union[
|
|
188
|
+
typing.Optional[str], type_utils.NotGiven
|
|
189
|
+
] = type_utils.NOT_GIVEN,
|
|
190
|
+
style: typing.Union[
|
|
191
|
+
typing.Optional[params.V1AiTalkingPhotoCreateBodyStyle], type_utils.NotGiven
|
|
192
|
+
] = type_utils.NOT_GIVEN,
|
|
193
|
+
wait_for_completion: bool = True,
|
|
194
|
+
download_outputs: bool = True,
|
|
195
|
+
download_directory: typing.Optional[str] = None,
|
|
196
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
197
|
+
):
|
|
198
|
+
"""
|
|
199
|
+
Generate talking photo (alias for create with additional functionality).
|
|
200
|
+
|
|
201
|
+
Create a talking photo from an image and audio or text input. Each generation costs credits.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
name: The name of image. This value is mainly used for your own identification of the image.
|
|
205
|
+
style: Attributes used to dictate the style of the output
|
|
206
|
+
assets: Provide the assets for creating a talking photo
|
|
207
|
+
end_seconds: The end time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
208
|
+
start_seconds: The start time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
209
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
210
|
+
download_outputs: Whether to download the outputs
|
|
211
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
212
|
+
request_options: Additional options to customize the HTTP request
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the AI Talking Photo API with the downloaded paths if `download_outputs` is True.
|
|
216
|
+
|
|
217
|
+
Examples:
|
|
218
|
+
```py
|
|
219
|
+
response = await client.v1.ai_talking_photo.generate(
|
|
220
|
+
assets={
|
|
221
|
+
"audio_file_path": "path/to/audio.mp3",
|
|
222
|
+
"image_file_path": "path/to/image.png",
|
|
223
|
+
},
|
|
224
|
+
end_seconds=30.0,
|
|
225
|
+
start_seconds=5.0,
|
|
226
|
+
style={"enhancement": "high"},
|
|
227
|
+
wait_for_completion=True,
|
|
228
|
+
download_outputs=True,
|
|
229
|
+
download_directory="outputs/",
|
|
230
|
+
)
|
|
231
|
+
```
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
file_client = AsyncFilesClient(base_client=self._base_client)
|
|
235
|
+
|
|
236
|
+
audio_file_path = assets["audio_file_path"]
|
|
237
|
+
image_file_path = assets["image_file_path"]
|
|
238
|
+
assets["audio_file_path"] = await file_client.upload_file(file=audio_file_path)
|
|
239
|
+
assets["image_file_path"] = await file_client.upload_file(file=image_file_path)
|
|
240
|
+
|
|
241
|
+
create_response = await self.create(
|
|
242
|
+
assets=assets,
|
|
243
|
+
end_seconds=end_seconds,
|
|
244
|
+
start_seconds=start_seconds,
|
|
245
|
+
name=name,
|
|
246
|
+
style=style,
|
|
247
|
+
request_options=request_options,
|
|
248
|
+
)
|
|
249
|
+
logger.info(f"AI Talking Photo response: {create_response}")
|
|
250
|
+
|
|
251
|
+
video_projects_client = AsyncVideoProjectsClient(base_client=self._base_client)
|
|
252
|
+
response = await video_projects_client.check_result(
|
|
253
|
+
id=create_response.id,
|
|
254
|
+
wait_for_completion=wait_for_completion,
|
|
255
|
+
download_outputs=download_outputs,
|
|
256
|
+
download_directory=download_directory,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
return response
|
|
260
|
+
|
|
91
261
|
async def create(
|
|
92
262
|
self,
|
|
93
263
|
*,
|
|
@@ -1,3 +1,92 @@
|
|
|
1
|
+
# v1_animation
|
|
2
|
+
|
|
3
|
+
## Module Functions
|
|
4
|
+
|
|
5
|
+
<!-- CUSTOM DOCS START -->
|
|
6
|
+
|
|
7
|
+
### Animation Generate Workflow <a name="generate"></a>
|
|
8
|
+
|
|
9
|
+
The workflow performs the following action
|
|
10
|
+
|
|
11
|
+
1. upload local assets to Magic Hour storage. So you can pass in a local path instead of having to upload files yourself
|
|
12
|
+
2. trigger a generation
|
|
13
|
+
3. poll for a completion status. This is configurable
|
|
14
|
+
4. if success, download the output to local directory
|
|
15
|
+
|
|
16
|
+
> [!TIP]
|
|
17
|
+
> This is the recommended way to use the SDK unless you have specific needs where it is necessary to split up the actions.
|
|
18
|
+
|
|
19
|
+
#### Parameters
|
|
20
|
+
|
|
21
|
+
In Additional to the parameters listed in the `.create` section below, `.generate` introduces 3 new parameters:
|
|
22
|
+
|
|
23
|
+
- `wait_for_completion` (bool, default True): Whether to wait for the project to complete.
|
|
24
|
+
- `download_outputs` (bool, default True): Whether to download the generated files
|
|
25
|
+
- `download_directory` (str, optional): Directory to save downloaded files (defaults to current directory)
|
|
26
|
+
|
|
27
|
+
#### Synchronous Client
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from magic_hour import Client
|
|
31
|
+
from os import getenv
|
|
32
|
+
|
|
33
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
34
|
+
res = client.v1.animation.generate(
|
|
35
|
+
assets={
|
|
36
|
+
"audio_file_path": "/path/to/1234.mp3",
|
|
37
|
+
"audio_source": "file",
|
|
38
|
+
"image_file_path": "/path/to/1234.png",
|
|
39
|
+
},
|
|
40
|
+
end_seconds=15.0,
|
|
41
|
+
fps=12.0,
|
|
42
|
+
height=960,
|
|
43
|
+
style={
|
|
44
|
+
"art_style": "Painterly Illustration",
|
|
45
|
+
"camera_effect": "Simple Zoom In",
|
|
46
|
+
"prompt": "Cyberpunk city",
|
|
47
|
+
"prompt_type": "custom",
|
|
48
|
+
"transition_speed": 5,
|
|
49
|
+
},
|
|
50
|
+
width=512,
|
|
51
|
+
name="Animation video",
|
|
52
|
+
wait_for_completion=True,
|
|
53
|
+
download_outputs=True,
|
|
54
|
+
download_directory="outputs"
|
|
55
|
+
)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
#### Asynchronous Client
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
from magic_hour import AsyncClient
|
|
62
|
+
from os import getenv
|
|
63
|
+
|
|
64
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
65
|
+
res = await client.v1.animation.generate(
|
|
66
|
+
assets={
|
|
67
|
+
"audio_file_path": "/path/to/1234.mp3",
|
|
68
|
+
"audio_source": "file",
|
|
69
|
+
"image_file_path": "/path/to/1234.png",
|
|
70
|
+
},
|
|
71
|
+
end_seconds=15.0,
|
|
72
|
+
fps=12.0,
|
|
73
|
+
height=960,
|
|
74
|
+
style={
|
|
75
|
+
"art_style": "Painterly Illustration",
|
|
76
|
+
"camera_effect": "Simple Zoom In",
|
|
77
|
+
"prompt": "Cyberpunk city",
|
|
78
|
+
"prompt_type": "custom",
|
|
79
|
+
"transition_speed": 5,
|
|
80
|
+
},
|
|
81
|
+
width=512,
|
|
82
|
+
name="Animation video",
|
|
83
|
+
wait_for_completion=True,
|
|
84
|
+
download_outputs=True,
|
|
85
|
+
download_directory="outputs"
|
|
86
|
+
)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
<!-- CUSTOM DOCS END -->
|
|
1
90
|
|
|
2
91
|
### Animation <a name="create"></a>
|
|
3
92
|
|
|
@@ -10,10 +99,20 @@ Create a Animation video. The estimated frame cost is calculated based on the `f
|
|
|
10
99
|
| Parameter | Required | Description | Example |
|
|
11
100
|
|-----------|:--------:|-------------|--------|
|
|
12
101
|
| `assets` | ✓ | Provide the assets for animation. | `{"audio_file_path": "api-assets/id/1234.mp3", "audio_source": "file", "image_file_path": "api-assets/id/1234.png"}` |
|
|
102
|
+
| `└─ audio_file_path` | ✗ | The path of the input audio. This field is required if `audio_source` is `file`. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.mp3"` |
|
|
103
|
+
| `└─ audio_source` | ✓ | Optionally add an audio source if you'd like to incorporate audio into your video | `"file"` |
|
|
104
|
+
| `└─ image_file_path` | ✗ | An initial image to use a the first frame of the video. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.png"` |
|
|
105
|
+
| `└─ youtube_url` | ✗ | Using a youtube video as the input source. This field is required if `audio_source` is `youtube` | `"http://www.example.com"` |
|
|
13
106
|
| `end_seconds` | ✓ | This value determines the duration of the output video. | `15.0` |
|
|
14
107
|
| `fps` | ✓ | The desire output video frame rate | `12.0` |
|
|
15
108
|
| `height` | ✓ | The height of the final output video. The maximum height depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details | `960` |
|
|
16
109
|
| `style` | ✓ | Defines the style of the output video | `{"art_style": "Painterly Illustration", "camera_effect": "Simple Zoom In", "prompt": "Cyberpunk city", "prompt_type": "custom", "transition_speed": 5}` |
|
|
110
|
+
| `└─ art_style` | ✓ | The art style used to create the output video | `"Painterly Illustration"` |
|
|
111
|
+
| `└─ art_style_custom` | ✗ | Describe custom art style. This field is required if `art_style` is `Custom` | `"string"` |
|
|
112
|
+
| `└─ camera_effect` | ✓ | The camera effect used to create the output video | `"Simple Zoom In"` |
|
|
113
|
+
| `└─ prompt` | ✗ | The prompt used for the video. Prompt is required if `prompt_type` is `custom`. Otherwise this value is ignored | `"Cyberpunk city"` |
|
|
114
|
+
| `└─ prompt_type` | ✓ | * `custom` - Use your own prompt for the video. * `use_lyrics` - Use the lyrics of the audio to create the prompt. If this option is selected, then `assets.audio_source` must be `file` or `youtube`. * `ai_choose` - Let AI write the prompt. If this option is selected, then `assets.audio_source` must be `file` or `youtube`. | `"custom"` |
|
|
115
|
+
| `└─ transition_speed` | ✓ | Change determines how quickly the video's content changes across frames. * Higher = more rapid transitions. * Lower = more stable visual experience. | `5` |
|
|
17
116
|
| `width` | ✓ | The width of the final output video. The maximum width depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details | `512` |
|
|
18
117
|
| `name` | ✗ | The name of video. This value is mainly used for your own identification of the video. | `"Animation video"` |
|
|
19
118
|
|
|
@@ -82,3 +181,4 @@ res = await client.v1.animation.create(
|
|
|
82
181
|
|
|
83
182
|
##### Example
|
|
84
183
|
`{"credits_charged": 450, "estimated_frame_cost": 450, "id": "cuid-example"}`
|
|
184
|
+
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import logging
|
|
1
2
|
import typing
|
|
2
3
|
|
|
3
4
|
from magic_hour.core import (
|
|
@@ -8,13 +9,124 @@ from magic_hour.core import (
|
|
|
8
9
|
to_encodable,
|
|
9
10
|
type_utils,
|
|
10
11
|
)
|
|
12
|
+
from magic_hour.resources.v1.files.client import AsyncFilesClient, FilesClient
|
|
13
|
+
from magic_hour.resources.v1.video_projects.client import (
|
|
14
|
+
AsyncVideoProjectsClient,
|
|
15
|
+
VideoProjectsClient,
|
|
16
|
+
)
|
|
11
17
|
from magic_hour.types import models, params
|
|
12
18
|
|
|
13
19
|
|
|
20
|
+
logging.basicConfig(level=logging.INFO)
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
14
24
|
class AnimationClient:
|
|
15
25
|
def __init__(self, *, base_client: SyncBaseClient):
|
|
16
26
|
self._base_client = base_client
|
|
17
27
|
|
|
28
|
+
def generate(
|
|
29
|
+
self,
|
|
30
|
+
*,
|
|
31
|
+
assets: params.V1AnimationGenerateBodyAssets,
|
|
32
|
+
end_seconds: float,
|
|
33
|
+
fps: float,
|
|
34
|
+
height: int,
|
|
35
|
+
style: params.V1AnimationCreateBodyStyle,
|
|
36
|
+
width: int,
|
|
37
|
+
name: typing.Union[
|
|
38
|
+
typing.Optional[str], type_utils.NotGiven
|
|
39
|
+
] = type_utils.NOT_GIVEN,
|
|
40
|
+
wait_for_completion: bool = True,
|
|
41
|
+
download_outputs: bool = True,
|
|
42
|
+
download_directory: typing.Optional[str] = None,
|
|
43
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Generate animation (alias for create with additional functionality).
|
|
47
|
+
|
|
48
|
+
Create a Animation video. The estimated frame cost is calculated based on the `fps` and `end_seconds` input.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
52
|
+
assets: Provide the assets for animation.
|
|
53
|
+
end_seconds: This value determines the duration of the output video.
|
|
54
|
+
fps: The desire output video frame rate
|
|
55
|
+
height: The height of the final output video. The maximum height depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
56
|
+
style: Defines the style of the output video
|
|
57
|
+
width: The width of the final output video. The maximum width depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
58
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
59
|
+
download_outputs: Whether to download the outputs
|
|
60
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
61
|
+
request_options: Additional options to customize the HTTP request
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the Animation API with the downloaded paths if `download_outputs` is True.
|
|
65
|
+
|
|
66
|
+
Examples:
|
|
67
|
+
```py
|
|
68
|
+
response = client.v1.animation.generate(
|
|
69
|
+
assets={
|
|
70
|
+
"audio_file_path": "path/to/audio.mp3",
|
|
71
|
+
"audio_source": "file",
|
|
72
|
+
"image_file_path": "path/to/image.png",
|
|
73
|
+
},
|
|
74
|
+
end_seconds=15.0,
|
|
75
|
+
fps=12.0,
|
|
76
|
+
height=960,
|
|
77
|
+
style={
|
|
78
|
+
"art_style": "Painterly Illustration",
|
|
79
|
+
"camera_effect": "Simple Zoom In",
|
|
80
|
+
"prompt": "Cyberpunk city",
|
|
81
|
+
"prompt_type": "custom",
|
|
82
|
+
"transition_speed": 5,
|
|
83
|
+
},
|
|
84
|
+
width=512,
|
|
85
|
+
wait_for_completion=True,
|
|
86
|
+
download_outputs=True,
|
|
87
|
+
download_directory="outputs/",
|
|
88
|
+
)
|
|
89
|
+
```
|
|
90
|
+
"""
|
|
91
|
+
|
|
92
|
+
file_client = FilesClient(base_client=self._base_client)
|
|
93
|
+
|
|
94
|
+
# Upload image file if provided
|
|
95
|
+
if "image_file_path" in assets and assets["image_file_path"]:
|
|
96
|
+
image_file_path = assets["image_file_path"]
|
|
97
|
+
assets["image_file_path"] = file_client.upload_file(file=image_file_path)
|
|
98
|
+
|
|
99
|
+
# Upload audio file if audio_source is "file" and audio_file_path is provided
|
|
100
|
+
if (
|
|
101
|
+
assets.get("audio_source") == "file"
|
|
102
|
+
and "audio_file_path" in assets
|
|
103
|
+
and assets["audio_file_path"]
|
|
104
|
+
):
|
|
105
|
+
audio_file_path = assets["audio_file_path"]
|
|
106
|
+
assets["audio_file_path"] = file_client.upload_file(file=audio_file_path)
|
|
107
|
+
|
|
108
|
+
create_response = self.create(
|
|
109
|
+
assets=assets,
|
|
110
|
+
end_seconds=end_seconds,
|
|
111
|
+
fps=fps,
|
|
112
|
+
height=height,
|
|
113
|
+
style=style,
|
|
114
|
+
width=width,
|
|
115
|
+
name=name,
|
|
116
|
+
request_options=request_options,
|
|
117
|
+
)
|
|
118
|
+
logger.info(f"Animation response: {create_response}")
|
|
119
|
+
|
|
120
|
+
video_projects_client = VideoProjectsClient(base_client=self._base_client)
|
|
121
|
+
response = video_projects_client.check_result(
|
|
122
|
+
id=create_response.id,
|
|
123
|
+
wait_for_completion=wait_for_completion,
|
|
124
|
+
download_outputs=download_outputs,
|
|
125
|
+
download_directory=download_directory,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
return response
|
|
129
|
+
|
|
18
130
|
def create(
|
|
19
131
|
self,
|
|
20
132
|
*,
|
|
@@ -102,6 +214,112 @@ class AsyncAnimationClient:
|
|
|
102
214
|
def __init__(self, *, base_client: AsyncBaseClient):
|
|
103
215
|
self._base_client = base_client
|
|
104
216
|
|
|
217
|
+
async def generate(
|
|
218
|
+
self,
|
|
219
|
+
*,
|
|
220
|
+
assets: params.V1AnimationGenerateBodyAssets,
|
|
221
|
+
end_seconds: float,
|
|
222
|
+
fps: float,
|
|
223
|
+
height: int,
|
|
224
|
+
style: params.V1AnimationCreateBodyStyle,
|
|
225
|
+
width: int,
|
|
226
|
+
name: typing.Union[
|
|
227
|
+
typing.Optional[str], type_utils.NotGiven
|
|
228
|
+
] = type_utils.NOT_GIVEN,
|
|
229
|
+
wait_for_completion: bool = True,
|
|
230
|
+
download_outputs: bool = True,
|
|
231
|
+
download_directory: typing.Optional[str] = None,
|
|
232
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
233
|
+
):
|
|
234
|
+
"""
|
|
235
|
+
Generate animation (alias for create with additional functionality).
|
|
236
|
+
|
|
237
|
+
Create a Animation video. The estimated frame cost is calculated based on the `fps` and `end_seconds` input.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
241
|
+
assets: Provide the assets for animation.
|
|
242
|
+
end_seconds: This value determines the duration of the output video.
|
|
243
|
+
fps: The desire output video frame rate
|
|
244
|
+
height: The height of the final output video. The maximum height depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
245
|
+
style: Defines the style of the output video
|
|
246
|
+
width: The width of the final output video. The maximum width depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
247
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
248
|
+
download_outputs: Whether to download the outputs
|
|
249
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
250
|
+
request_options: Additional options to customize the HTTP request
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the Animation API with the downloaded paths if `download_outputs` is True.
|
|
254
|
+
|
|
255
|
+
Examples:
|
|
256
|
+
```py
|
|
257
|
+
response = await client.v1.animation.generate(
|
|
258
|
+
assets={
|
|
259
|
+
"audio_file_path": "path/to/audio.mp3",
|
|
260
|
+
"audio_source": "file",
|
|
261
|
+
"image_file_path": "path/to/image.png",
|
|
262
|
+
},
|
|
263
|
+
end_seconds=15.0,
|
|
264
|
+
fps=12.0,
|
|
265
|
+
height=960,
|
|
266
|
+
style={
|
|
267
|
+
"art_style": "Painterly Illustration",
|
|
268
|
+
"camera_effect": "Simple Zoom In",
|
|
269
|
+
"prompt": "Cyberpunk city",
|
|
270
|
+
"prompt_type": "custom",
|
|
271
|
+
"transition_speed": 5,
|
|
272
|
+
},
|
|
273
|
+
width=512,
|
|
274
|
+
wait_for_completion=True,
|
|
275
|
+
download_outputs=True,
|
|
276
|
+
download_directory="outputs/",
|
|
277
|
+
)
|
|
278
|
+
```
|
|
279
|
+
"""
|
|
280
|
+
|
|
281
|
+
file_client = AsyncFilesClient(base_client=self._base_client)
|
|
282
|
+
|
|
283
|
+
# Upload image file if provided
|
|
284
|
+
if "image_file_path" in assets and assets["image_file_path"]:
|
|
285
|
+
image_file_path = assets["image_file_path"]
|
|
286
|
+
assets["image_file_path"] = await file_client.upload_file(
|
|
287
|
+
file=image_file_path
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Upload audio file if audio_source is "file" and audio_file_path is provided
|
|
291
|
+
if (
|
|
292
|
+
assets.get("audio_source") == "file"
|
|
293
|
+
and "audio_file_path" in assets
|
|
294
|
+
and assets["audio_file_path"]
|
|
295
|
+
):
|
|
296
|
+
audio_file_path = assets["audio_file_path"]
|
|
297
|
+
assets["audio_file_path"] = await file_client.upload_file(
|
|
298
|
+
file=audio_file_path
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
create_response = await self.create(
|
|
302
|
+
assets=assets,
|
|
303
|
+
end_seconds=end_seconds,
|
|
304
|
+
fps=fps,
|
|
305
|
+
height=height,
|
|
306
|
+
style=style,
|
|
307
|
+
width=width,
|
|
308
|
+
name=name,
|
|
309
|
+
request_options=request_options,
|
|
310
|
+
)
|
|
311
|
+
logger.info(f"Animation response: {create_response}")
|
|
312
|
+
|
|
313
|
+
video_projects_client = AsyncVideoProjectsClient(base_client=self._base_client)
|
|
314
|
+
response = await video_projects_client.check_result(
|
|
315
|
+
id=create_response.id,
|
|
316
|
+
wait_for_completion=wait_for_completion,
|
|
317
|
+
download_outputs=download_outputs,
|
|
318
|
+
download_directory=download_directory,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
return response
|
|
322
|
+
|
|
105
323
|
async def create(
|
|
106
324
|
self,
|
|
107
325
|
*,
|
|
@@ -1,3 +1,68 @@
|
|
|
1
|
+
# v1_auto_subtitle_generator
|
|
2
|
+
|
|
3
|
+
## Module Functions
|
|
4
|
+
|
|
5
|
+
<!-- CUSTOM DOCS START -->
|
|
6
|
+
|
|
7
|
+
### Auto Subtitle Generator Generate Workflow <a name="generate"></a>
|
|
8
|
+
|
|
9
|
+
The workflow performs the following action
|
|
10
|
+
|
|
11
|
+
1. upload local assets to Magic Hour storage. So you can pass in a local path instead of having to upload files yourself
|
|
12
|
+
2. trigger a generation
|
|
13
|
+
3. poll for a completion status. This is configurable
|
|
14
|
+
4. if success, download the output to local directory
|
|
15
|
+
|
|
16
|
+
> [!TIP]
|
|
17
|
+
> This is the recommended way to use the SDK unless you have specific needs where it is necessary to split up the actions.
|
|
18
|
+
|
|
19
|
+
#### Parameters
|
|
20
|
+
|
|
21
|
+
In Additional to the parameters listed in the `.create` section below, `.generate` introduces 3 new parameters:
|
|
22
|
+
|
|
23
|
+
- `wait_for_completion` (bool, default True): Whether to wait for the project to complete.
|
|
24
|
+
- `download_outputs` (bool, default True): Whether to download the generated files
|
|
25
|
+
- `download_directory` (str, optional): Directory to save downloaded files (defaults to current directory)
|
|
26
|
+
|
|
27
|
+
#### Synchronous Client
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from magic_hour import Client
|
|
31
|
+
from os import getenv
|
|
32
|
+
|
|
33
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
34
|
+
res = client.v1.auto_subtitle_generator.generate(
|
|
35
|
+
assets={"video_file_path": "/path/to/1234.mp4"},
|
|
36
|
+
end_seconds=15.0,
|
|
37
|
+
start_seconds=0.0,
|
|
38
|
+
style={},
|
|
39
|
+
name="Auto Subtitle video",
|
|
40
|
+
wait_for_completion=True,
|
|
41
|
+
download_outputs=True,
|
|
42
|
+
download_directory="outputs"
|
|
43
|
+
)
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
#### Asynchronous Client
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
from magic_hour import AsyncClient
|
|
50
|
+
from os import getenv
|
|
51
|
+
|
|
52
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
53
|
+
res = await client.v1.auto_subtitle_generator.generate(
|
|
54
|
+
assets={"video_file_path": "/path/to/1234.mp4"},
|
|
55
|
+
end_seconds=15.0,
|
|
56
|
+
start_seconds=0.0,
|
|
57
|
+
style={},
|
|
58
|
+
name="Auto Subtitle video",
|
|
59
|
+
wait_for_completion=True,
|
|
60
|
+
download_outputs=True,
|
|
61
|
+
download_directory="outputs"
|
|
62
|
+
)
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
<!-- CUSTOM DOCS END -->
|
|
1
66
|
|
|
2
67
|
### Auto Subtitle Generator <a name="create"></a>
|
|
3
68
|
|
|
@@ -10,9 +75,12 @@ Automatically generate subtitles for your video in multiple languages.
|
|
|
10
75
|
| Parameter | Required | Description | Example |
|
|
11
76
|
|-----------|:--------:|-------------|--------|
|
|
12
77
|
| `assets` | ✓ | Provide the assets for auto subtitle generator | `{"video_file_path": "api-assets/id/1234.mp4"}` |
|
|
78
|
+
| `└─ video_file_path` | ✓ | This is the video used to add subtitles. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.mp4"` |
|
|
13
79
|
| `end_seconds` | ✓ | The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds. | `15.0` |
|
|
14
80
|
| `start_seconds` | ✓ | The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0. | `0.0` |
|
|
15
81
|
| `style` | ✓ | Style of the subtitle. At least one of `.style.template` or `.style.custom_config` must be provided. * If only `.style.template` is provided, default values for the template will be used. * If both are provided, the fields in `.style.custom_config` will be used to overwrite the fields in `.style.template`. * If only `.style.custom_config` is provided, then all fields in `.style.custom_config` will be used. To use custom config only, the following `custom_config` params are required: * `.style.custom_config.font` * `.style.custom_config.text_color` * `.style.custom_config.vertical_position` * `.style.custom_config.horizontal_position` | `{}` |
|
|
82
|
+
| `└─ custom_config` | ✗ | Custom subtitle configuration. | `{"font": "Noto Sans", "font_size": 24.0, "font_style": "normal", "highlighted_text_color": "#FFD700", "horizontal_position": "center", "stroke_color": "#000000", "stroke_width": 1.0, "text_color": "#FFFFFF", "vertical_position": "bottom"}` |
|
|
83
|
+
| `└─ template` | ✗ | Preset subtitle templates. Please visit https://magichour.ai/create/auto-subtitle-generator to see the style of the existing templates. | `"cinematic"` |
|
|
16
84
|
| `name` | ✗ | The name of video. This value is mainly used for your own identification of the video. | `"Auto Subtitle video"` |
|
|
17
85
|
|
|
18
86
|
#### Synchronous Client
|
|
@@ -56,3 +124,4 @@ res = await client.v1.auto_subtitle_generator.create(
|
|
|
56
124
|
|
|
57
125
|
##### Example
|
|
58
126
|
`{"credits_charged": 450, "estimated_frame_cost": 450, "id": "cuid-example"}`
|
|
127
|
+
|