magic_hour 0.9.5__py3-none-any.whl → 0.44.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- magic_hour/README.md +34 -0
- magic_hour/__init__.py +1 -1
- magic_hour/client.py +8 -17
- magic_hour/environment.py +13 -1
- magic_hour/helpers/__init__.py +4 -0
- magic_hour/helpers/download.py +77 -0
- magic_hour/helpers/logger.py +8 -0
- magic_hour/resources/v1/README.md +32 -0
- magic_hour/resources/v1/ai_clothes_changer/README.md +94 -5
- magic_hour/resources/v1/ai_clothes_changer/client.py +161 -16
- magic_hour/resources/v1/ai_face_editor/README.md +195 -0
- magic_hour/resources/v1/ai_face_editor/__init__.py +4 -0
- magic_hour/resources/v1/ai_face_editor/client.py +324 -0
- magic_hour/resources/v1/ai_gif_generator/README.md +116 -0
- magic_hour/resources/v1/ai_gif_generator/__init__.py +4 -0
- magic_hour/resources/v1/ai_gif_generator/client.py +257 -0
- magic_hour/resources/v1/ai_headshot_generator/README.md +81 -3
- magic_hour/resources/v1/ai_headshot_generator/client.py +167 -18
- magic_hour/resources/v1/ai_image_editor/README.md +125 -0
- magic_hour/resources/v1/ai_image_editor/__init__.py +4 -0
- magic_hour/resources/v1/ai_image_editor/client.py +290 -0
- magic_hour/resources/v1/ai_image_generator/README.md +99 -5
- magic_hour/resources/v1/ai_image_generator/client.py +170 -24
- magic_hour/resources/v1/ai_image_upscaler/README.md +89 -3
- magic_hour/resources/v1/ai_image_upscaler/client.py +173 -20
- magic_hour/resources/v1/ai_meme_generator/README.md +129 -0
- magic_hour/resources/v1/ai_meme_generator/__init__.py +4 -0
- magic_hour/resources/v1/ai_meme_generator/client.py +253 -0
- magic_hour/resources/v1/ai_photo_editor/README.md +119 -4
- magic_hour/resources/v1/ai_photo_editor/client.py +199 -18
- magic_hour/resources/v1/ai_qr_code_generator/README.md +84 -3
- magic_hour/resources/v1/ai_qr_code_generator/client.py +140 -18
- magic_hour/resources/v1/ai_talking_photo/README.md +137 -0
- magic_hour/resources/v1/ai_talking_photo/__init__.py +4 -0
- magic_hour/resources/v1/ai_talking_photo/client.py +326 -0
- magic_hour/resources/v1/ai_voice_cloner/README.md +62 -0
- magic_hour/resources/v1/ai_voice_cloner/__init__.py +4 -0
- magic_hour/resources/v1/ai_voice_cloner/client.py +272 -0
- magic_hour/resources/v1/ai_voice_generator/README.md +112 -0
- magic_hour/resources/v1/ai_voice_generator/__init__.py +4 -0
- magic_hour/resources/v1/ai_voice_generator/client.py +241 -0
- magic_hour/resources/v1/animation/README.md +128 -6
- magic_hour/resources/v1/animation/client.py +247 -22
- magic_hour/resources/v1/audio_projects/README.md +135 -0
- magic_hour/resources/v1/audio_projects/__init__.py +12 -0
- magic_hour/resources/v1/audio_projects/client.py +310 -0
- magic_hour/resources/v1/audio_projects/client_test.py +520 -0
- magic_hour/resources/v1/auto_subtitle_generator/README.md +128 -0
- magic_hour/resources/v1/auto_subtitle_generator/__init__.py +4 -0
- magic_hour/resources/v1/auto_subtitle_generator/client.py +346 -0
- magic_hour/resources/v1/client.py +75 -1
- magic_hour/resources/v1/face_detection/README.md +157 -0
- magic_hour/resources/v1/face_detection/__init__.py +12 -0
- magic_hour/resources/v1/face_detection/client.py +380 -0
- magic_hour/resources/v1/face_swap/README.md +137 -9
- magic_hour/resources/v1/face_swap/client.py +329 -38
- magic_hour/resources/v1/face_swap_photo/README.md +118 -3
- magic_hour/resources/v1/face_swap_photo/client.py +199 -14
- magic_hour/resources/v1/files/README.md +39 -0
- magic_hour/resources/v1/files/client.py +351 -1
- magic_hour/resources/v1/files/client_test.py +414 -0
- magic_hour/resources/v1/files/upload_urls/README.md +38 -17
- magic_hour/resources/v1/files/upload_urls/client.py +38 -34
- magic_hour/resources/v1/image_background_remover/README.md +96 -5
- magic_hour/resources/v1/image_background_remover/client.py +151 -16
- magic_hour/resources/v1/image_projects/README.md +82 -10
- magic_hour/resources/v1/image_projects/__init__.py +10 -2
- magic_hour/resources/v1/image_projects/client.py +154 -16
- magic_hour/resources/v1/image_projects/client_test.py +527 -0
- magic_hour/resources/v1/image_to_video/README.md +96 -11
- magic_hour/resources/v1/image_to_video/client.py +282 -38
- magic_hour/resources/v1/lip_sync/README.md +112 -9
- magic_hour/resources/v1/lip_sync/client.py +288 -34
- magic_hour/resources/v1/photo_colorizer/README.md +107 -0
- magic_hour/resources/v1/photo_colorizer/__init__.py +4 -0
- magic_hour/resources/v1/photo_colorizer/client.py +248 -0
- magic_hour/resources/v1/text_to_video/README.md +96 -7
- magic_hour/resources/v1/text_to_video/client.py +204 -18
- magic_hour/resources/v1/video_projects/README.md +81 -9
- magic_hour/resources/v1/video_projects/__init__.py +10 -2
- magic_hour/resources/v1/video_projects/client.py +151 -14
- magic_hour/resources/v1/video_projects/client_test.py +527 -0
- magic_hour/resources/v1/video_to_video/README.md +119 -15
- magic_hour/resources/v1/video_to_video/client.py +299 -46
- magic_hour/types/models/__init__.py +92 -56
- magic_hour/types/models/v1_ai_clothes_changer_create_response.py +33 -0
- magic_hour/types/models/v1_ai_face_editor_create_response.py +33 -0
- magic_hour/types/models/v1_ai_gif_generator_create_response.py +33 -0
- magic_hour/types/models/v1_ai_headshot_generator_create_response.py +33 -0
- magic_hour/types/models/v1_ai_image_editor_create_response.py +33 -0
- magic_hour/types/models/v1_ai_image_generator_create_response.py +33 -0
- magic_hour/types/models/v1_ai_image_upscaler_create_response.py +33 -0
- magic_hour/types/models/v1_ai_meme_generator_create_response.py +33 -0
- magic_hour/types/models/v1_ai_photo_editor_create_response.py +33 -0
- magic_hour/types/models/v1_ai_qr_code_generator_create_response.py +33 -0
- magic_hour/types/models/v1_ai_talking_photo_create_response.py +35 -0
- magic_hour/types/models/v1_ai_voice_cloner_create_response.py +27 -0
- magic_hour/types/models/v1_ai_voice_generator_create_response.py +27 -0
- magic_hour/types/models/v1_animation_create_response.py +35 -0
- magic_hour/types/models/v1_audio_projects_get_response.py +72 -0
- magic_hour/types/models/v1_audio_projects_get_response_downloads_item.py +19 -0
- magic_hour/types/models/{get_v1_image_projects_id_response_error.py → v1_audio_projects_get_response_error.py} +2 -2
- magic_hour/types/models/v1_auto_subtitle_generator_create_response.py +35 -0
- magic_hour/types/models/v1_face_detection_create_response.py +25 -0
- magic_hour/types/models/v1_face_detection_get_response.py +45 -0
- magic_hour/types/models/v1_face_detection_get_response_faces_item.py +25 -0
- magic_hour/types/models/v1_face_swap_create_response.py +35 -0
- magic_hour/types/models/v1_face_swap_photo_create_response.py +33 -0
- magic_hour/types/models/v1_files_upload_urls_create_response.py +24 -0
- magic_hour/types/models/{post_v1_files_upload_urls_response_items_item.py → v1_files_upload_urls_create_response_items_item.py} +2 -2
- magic_hour/types/models/v1_image_background_remover_create_response.py +33 -0
- magic_hour/types/models/{get_v1_image_projects_id_response.py → v1_image_projects_get_response.py} +20 -18
- magic_hour/types/models/{get_v1_video_projects_id_response_downloads_item.py → v1_image_projects_get_response_downloads_item.py} +1 -1
- magic_hour/types/models/{get_v1_video_projects_id_response_error.py → v1_image_projects_get_response_error.py} +2 -2
- magic_hour/types/models/v1_image_to_video_create_response.py +35 -0
- magic_hour/types/models/v1_lip_sync_create_response.py +35 -0
- magic_hour/types/models/v1_photo_colorizer_create_response.py +33 -0
- magic_hour/types/models/v1_text_to_video_create_response.py +35 -0
- magic_hour/types/models/{get_v1_video_projects_id_response.py → v1_video_projects_get_response.py} +26 -23
- magic_hour/types/models/{get_v1_video_projects_id_response_download.py → v1_video_projects_get_response_download.py} +1 -1
- magic_hour/types/models/{get_v1_image_projects_id_response_downloads_item.py → v1_video_projects_get_response_downloads_item.py} +1 -1
- magic_hour/types/models/v1_video_projects_get_response_error.py +25 -0
- magic_hour/types/models/v1_video_to_video_create_response.py +35 -0
- magic_hour/types/params/__init__.py +422 -176
- magic_hour/types/params/v1_ai_clothes_changer_create_body.py +40 -0
- magic_hour/types/params/v1_ai_clothes_changer_create_body_assets.py +58 -0
- magic_hour/types/params/v1_ai_clothes_changer_generate_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_face_editor_create_body.py +52 -0
- magic_hour/types/params/v1_ai_face_editor_create_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_face_editor_create_body_style.py +137 -0
- magic_hour/types/params/v1_ai_face_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_gif_generator_create_body.py +47 -0
- magic_hour/types/params/{post_v1_ai_image_generator_body_style.py → v1_ai_gif_generator_create_body_style.py} +5 -5
- magic_hour/types/params/v1_ai_headshot_generator_create_body.py +49 -0
- magic_hour/types/params/v1_ai_headshot_generator_create_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_headshot_generator_create_body_style.py +27 -0
- magic_hour/types/params/v1_ai_headshot_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_image_editor_create_body.py +49 -0
- magic_hour/types/params/v1_ai_image_editor_create_body_assets.py +47 -0
- magic_hour/types/params/v1_ai_image_editor_create_body_style.py +41 -0
- magic_hour/types/params/v1_ai_image_editor_generate_body_assets.py +28 -0
- magic_hour/types/params/{post_v1_ai_image_generator_body.py → v1_ai_image_generator_create_body.py} +17 -11
- magic_hour/types/params/v1_ai_image_generator_create_body_style.py +127 -0
- magic_hour/types/params/v1_ai_image_upscaler_create_body.py +59 -0
- magic_hour/types/params/v1_ai_image_upscaler_create_body_assets.py +33 -0
- magic_hour/types/params/{post_v1_ai_image_upscaler_body_style.py → v1_ai_image_upscaler_create_body_style.py} +4 -4
- magic_hour/types/params/v1_ai_image_upscaler_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_meme_generator_create_body.py +37 -0
- magic_hour/types/params/v1_ai_meme_generator_create_body_style.py +73 -0
- magic_hour/types/params/{post_v1_ai_photo_editor_body.py → v1_ai_photo_editor_create_body.py} +15 -15
- magic_hour/types/params/v1_ai_photo_editor_create_body_assets.py +33 -0
- magic_hour/types/params/{post_v1_ai_photo_editor_body_style.py → v1_ai_photo_editor_create_body_style.py} +20 -4
- magic_hour/types/params/v1_ai_photo_editor_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_ai_qr_code_generator_create_body.py +45 -0
- magic_hour/types/params/{post_v1_ai_qr_code_generator_body_style.py → v1_ai_qr_code_generator_create_body_style.py} +4 -4
- magic_hour/types/params/v1_ai_talking_photo_create_body.py +68 -0
- magic_hour/types/params/v1_ai_talking_photo_create_body_assets.py +46 -0
- magic_hour/types/params/v1_ai_talking_photo_create_body_style.py +44 -0
- magic_hour/types/params/v1_ai_talking_photo_generate_body_assets.py +26 -0
- magic_hour/types/params/v1_ai_voice_cloner_create_body.py +49 -0
- magic_hour/types/params/v1_ai_voice_cloner_create_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_voice_cloner_create_body_style.py +28 -0
- magic_hour/types/params/v1_ai_voice_cloner_generate_body_assets.py +28 -0
- magic_hour/types/params/v1_ai_voice_generator_create_body.py +40 -0
- magic_hour/types/params/v1_ai_voice_generator_create_body_style.py +440 -0
- magic_hour/types/params/{post_v1_animation_body.py → v1_animation_create_body.py} +16 -16
- magic_hour/types/params/{post_v1_animation_body_assets.py → v1_animation_create_body_assets.py} +15 -5
- magic_hour/types/params/{post_v1_animation_body_style.py → v1_animation_create_body_style.py} +13 -10
- magic_hour/types/params/v1_animation_generate_body_assets.py +39 -0
- magic_hour/types/params/v1_auto_subtitle_generator_create_body.py +78 -0
- magic_hour/types/params/v1_auto_subtitle_generator_create_body_assets.py +33 -0
- magic_hour/types/params/v1_auto_subtitle_generator_create_body_style.py +56 -0
- magic_hour/types/params/v1_auto_subtitle_generator_create_body_style_custom_config.py +86 -0
- magic_hour/types/params/v1_auto_subtitle_generator_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_detection_create_body.py +44 -0
- magic_hour/types/params/v1_face_detection_create_body_assets.py +33 -0
- magic_hour/types/params/v1_face_detection_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_face_swap_create_body.py +92 -0
- magic_hour/types/params/v1_face_swap_create_body_assets.py +91 -0
- magic_hour/types/params/v1_face_swap_create_body_assets_face_mappings_item.py +44 -0
- magic_hour/types/params/v1_face_swap_create_body_style.py +33 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets.py +56 -0
- magic_hour/types/params/v1_face_swap_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_face_swap_photo_create_body.py +40 -0
- magic_hour/types/params/v1_face_swap_photo_create_body_assets.py +76 -0
- magic_hour/types/params/v1_face_swap_photo_create_body_assets_face_mappings_item.py +44 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets.py +47 -0
- magic_hour/types/params/v1_face_swap_photo_generate_body_assets_face_mappings_item.py +25 -0
- magic_hour/types/params/v1_files_upload_urls_create_body.py +36 -0
- magic_hour/types/params/v1_files_upload_urls_create_body_items_item.py +38 -0
- magic_hour/types/params/v1_image_background_remover_create_body.py +40 -0
- magic_hour/types/params/v1_image_background_remover_create_body_assets.py +49 -0
- magic_hour/types/params/v1_image_background_remover_generate_body_assets.py +27 -0
- magic_hour/types/params/v1_image_to_video_create_body.py +101 -0
- magic_hour/types/params/v1_image_to_video_create_body_assets.py +33 -0
- magic_hour/types/params/v1_image_to_video_create_body_style.py +53 -0
- magic_hour/types/params/v1_image_to_video_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_lip_sync_create_body.py +100 -0
- magic_hour/types/params/{post_v1_lip_sync_body_assets.py → v1_lip_sync_create_body_assets.py} +15 -5
- magic_hour/types/params/v1_lip_sync_create_body_style.py +37 -0
- magic_hour/types/params/v1_lip_sync_generate_body_assets.py +36 -0
- magic_hour/types/params/v1_photo_colorizer_create_body.py +40 -0
- magic_hour/types/params/v1_photo_colorizer_create_body_assets.py +33 -0
- magic_hour/types/params/v1_photo_colorizer_generate_body_assets.py +17 -0
- magic_hour/types/params/v1_text_to_video_create_body.py +78 -0
- magic_hour/types/params/v1_text_to_video_create_body_style.py +43 -0
- magic_hour/types/params/v1_video_to_video_create_body.py +101 -0
- magic_hour/types/params/{post_v1_video_to_video_body_assets.py → v1_video_to_video_create_body_assets.py} +9 -4
- magic_hour/types/params/{post_v1_video_to_video_body_style.py → v1_video_to_video_create_body_style.py} +68 -26
- magic_hour/types/params/v1_video_to_video_generate_body_assets.py +27 -0
- magic_hour-0.44.0.dist-info/METADATA +328 -0
- magic_hour-0.44.0.dist-info/RECORD +231 -0
- magic_hour/core/__init__.py +0 -52
- magic_hour/core/api_error.py +0 -56
- magic_hour/core/auth.py +0 -314
- magic_hour/core/base_client.py +0 -618
- magic_hour/core/binary_response.py +0 -23
- magic_hour/core/query.py +0 -106
- magic_hour/core/request.py +0 -156
- magic_hour/core/response.py +0 -293
- magic_hour/core/type_utils.py +0 -28
- magic_hour/core/utils.py +0 -55
- magic_hour/types/models/post_v1_ai_clothes_changer_response.py +0 -25
- magic_hour/types/models/post_v1_ai_headshot_generator_response.py +0 -25
- magic_hour/types/models/post_v1_ai_image_generator_response.py +0 -25
- magic_hour/types/models/post_v1_ai_image_upscaler_response.py +0 -25
- magic_hour/types/models/post_v1_ai_photo_editor_response.py +0 -25
- magic_hour/types/models/post_v1_ai_qr_code_generator_response.py +0 -25
- magic_hour/types/models/post_v1_animation_response.py +0 -25
- magic_hour/types/models/post_v1_face_swap_photo_response.py +0 -25
- magic_hour/types/models/post_v1_face_swap_response.py +0 -25
- magic_hour/types/models/post_v1_files_upload_urls_response.py +0 -21
- magic_hour/types/models/post_v1_image_background_remover_response.py +0 -25
- magic_hour/types/models/post_v1_image_to_video_response.py +0 -25
- magic_hour/types/models/post_v1_lip_sync_response.py +0 -25
- magic_hour/types/models/post_v1_text_to_video_response.py +0 -25
- magic_hour/types/models/post_v1_video_to_video_response.py +0 -25
- magic_hour/types/params/post_v1_ai_clothes_changer_body.py +0 -40
- magic_hour/types/params/post_v1_ai_clothes_changer_body_assets.py +0 -45
- magic_hour/types/params/post_v1_ai_headshot_generator_body.py +0 -40
- magic_hour/types/params/post_v1_ai_headshot_generator_body_assets.py +0 -28
- magic_hour/types/params/post_v1_ai_image_upscaler_body.py +0 -57
- magic_hour/types/params/post_v1_ai_image_upscaler_body_assets.py +0 -28
- magic_hour/types/params/post_v1_ai_photo_editor_body_assets.py +0 -28
- magic_hour/types/params/post_v1_ai_qr_code_generator_body.py +0 -45
- magic_hour/types/params/post_v1_face_swap_body.py +0 -72
- magic_hour/types/params/post_v1_face_swap_body_assets.py +0 -52
- magic_hour/types/params/post_v1_face_swap_photo_body.py +0 -40
- magic_hour/types/params/post_v1_face_swap_photo_body_assets.py +0 -36
- magic_hour/types/params/post_v1_files_upload_urls_body.py +0 -31
- magic_hour/types/params/post_v1_files_upload_urls_body_items_item.py +0 -38
- magic_hour/types/params/post_v1_image_background_remover_body.py +0 -40
- magic_hour/types/params/post_v1_image_background_remover_body_assets.py +0 -28
- magic_hour/types/params/post_v1_image_to_video_body.py +0 -73
- magic_hour/types/params/post_v1_image_to_video_body_assets.py +0 -28
- magic_hour/types/params/post_v1_image_to_video_body_style.py +0 -37
- magic_hour/types/params/post_v1_lip_sync_body.py +0 -80
- magic_hour/types/params/post_v1_text_to_video_body.py +0 -57
- magic_hour/types/params/post_v1_text_to_video_body_style.py +0 -28
- magic_hour/types/params/post_v1_video_to_video_body.py +0 -93
- magic_hour-0.9.5.dist-info/METADATA +0 -133
- magic_hour-0.9.5.dist-info/RECORD +0 -132
- {magic_hour-0.9.5.dist-info → magic_hour-0.44.0.dist-info}/LICENSE +0 -0
- {magic_hour-0.9.5.dist-info → magic_hour-0.44.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# v1.ai_talking_photo
|
|
2
|
+
|
|
3
|
+
## Module Functions
|
|
4
|
+
|
|
5
|
+
<!-- CUSTOM DOCS START -->
|
|
6
|
+
|
|
7
|
+
### Ai Talking Photo Generate Workflow <a name="generate"></a>
|
|
8
|
+
|
|
9
|
+
The workflow performs the following action
|
|
10
|
+
|
|
11
|
+
1. upload local assets to Magic Hour storage. So you can pass in a local path instead of having to upload files yourself
|
|
12
|
+
2. trigger a generation
|
|
13
|
+
3. poll for a completion status. This is configurable
|
|
14
|
+
4. if success, download the output to local directory
|
|
15
|
+
|
|
16
|
+
> [!TIP]
|
|
17
|
+
> This is the recommended way to use the SDK unless you have specific needs where it is necessary to split up the actions.
|
|
18
|
+
|
|
19
|
+
#### Parameters
|
|
20
|
+
|
|
21
|
+
In Additional to the parameters listed in the `.create` section below, `.generate` introduces 3 new parameters:
|
|
22
|
+
|
|
23
|
+
- `wait_for_completion` (bool, default True): Whether to wait for the project to complete.
|
|
24
|
+
- `download_outputs` (bool, default True): Whether to download the generated files
|
|
25
|
+
- `download_directory` (str, optional): Directory to save downloaded files (defaults to current directory)
|
|
26
|
+
|
|
27
|
+
#### Synchronous Client
|
|
28
|
+
|
|
29
|
+
```python
|
|
30
|
+
from magic_hour import Client
|
|
31
|
+
from os import getenv
|
|
32
|
+
|
|
33
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
34
|
+
res = client.v1.ai_talking_photo.generate(
|
|
35
|
+
assets={
|
|
36
|
+
"audio_file_path": "/path/to/1234.mp3",
|
|
37
|
+
"image_file_path": "/path/to/1234.png",
|
|
38
|
+
},
|
|
39
|
+
end_seconds=15.0,
|
|
40
|
+
start_seconds=0.0,
|
|
41
|
+
name="Talking Photo image",
|
|
42
|
+
wait_for_completion=True,
|
|
43
|
+
download_outputs=True,
|
|
44
|
+
download_directory="outputs"
|
|
45
|
+
)
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
#### Asynchronous Client
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
from magic_hour import AsyncClient
|
|
52
|
+
from os import getenv
|
|
53
|
+
|
|
54
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
55
|
+
res = await client.v1.ai_talking_photo.generate(
|
|
56
|
+
assets={
|
|
57
|
+
"audio_file_path": "/path/to/1234.mp3",
|
|
58
|
+
"image_file_path": "/path/to/1234.png",
|
|
59
|
+
},
|
|
60
|
+
end_seconds=15.0,
|
|
61
|
+
start_seconds=0.0,
|
|
62
|
+
name="Talking Photo image",
|
|
63
|
+
wait_for_completion=True,
|
|
64
|
+
download_outputs=True,
|
|
65
|
+
download_directory="outputs"
|
|
66
|
+
)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
<!-- CUSTOM DOCS END -->
|
|
70
|
+
|
|
71
|
+
### AI Talking Photo <a name="create"></a>
|
|
72
|
+
|
|
73
|
+
Create a talking photo from an image and audio or text input.
|
|
74
|
+
|
|
75
|
+
**API Endpoint**: `POST /v1/ai-talking-photo`
|
|
76
|
+
|
|
77
|
+
#### Parameters
|
|
78
|
+
|
|
79
|
+
| Parameter | Required | Description | Example |
|
|
80
|
+
| -------------------- | :------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
|
|
81
|
+
| `assets` | ✓ | Provide the assets for creating a talking photo | `{"audio_file_path": "api-assets/id/1234.mp3", "image_file_path": "api-assets/id/1234.png"}` |
|
|
82
|
+
| `└─ audio_file_path` | ✓ | The audio file to sync with the image. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.mp3"` |
|
|
83
|
+
| `└─ image_file_path` | ✓ | The source image to animate. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.png"` |
|
|
84
|
+
| `end_seconds` | ✓ | The end time of the input audio in seconds. The maximum duration allowed is 60 seconds. | `15.0` |
|
|
85
|
+
| `start_seconds` | ✓ | The start time of the input audio in seconds. The maximum duration allowed is 60 seconds. | `0.0` |
|
|
86
|
+
| `name` | ✗ | The name of image. This value is mainly used for your own identification of the image. | `"Talking Photo image"` |
|
|
87
|
+
| `style` | ✗ | Attributes used to dictate the style of the output | `{"generation_mode": "pro", "intensity": 1.5}` |
|
|
88
|
+
| `└─ generation_mode` | ✗ | Controls overall motion style. * `pro` - Higher fidelity, realistic detail, accurate lip sync, and faster generation. * `standard` - More expressive motion, but lower visual fidelity. * `expressive` - More motion and facial expressiveness; may introduce visual artifacts. (Deprecated: passing this value will be treated as `standard`) * `stable` - Reduced motion for cleaner output; may result in minimal animation. (Deprecated: passing this value will be treated as `pro`) | `"pro"` |
|
|
89
|
+
| `└─ intensity` | ✗ | Note: this value is only applicable when generation_mode is `expressive`. The value can include up to 2 decimal places. * Lower values yield more stability but can suppress mouth movement. * Higher values increase motion and expressiveness, with a higher risk of distortion. | `1.5` |
|
|
90
|
+
|
|
91
|
+
#### Synchronous Client
|
|
92
|
+
|
|
93
|
+
```python
|
|
94
|
+
from magic_hour import Client
|
|
95
|
+
from os import getenv
|
|
96
|
+
|
|
97
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
98
|
+
res = client.v1.ai_talking_photo.create(
|
|
99
|
+
assets={
|
|
100
|
+
"audio_file_path": "api-assets/id/1234.mp3",
|
|
101
|
+
"image_file_path": "api-assets/id/1234.png",
|
|
102
|
+
},
|
|
103
|
+
end_seconds=15.0,
|
|
104
|
+
start_seconds=0.0,
|
|
105
|
+
name="Talking Photo image",
|
|
106
|
+
)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
#### Asynchronous Client
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
from magic_hour import AsyncClient
|
|
113
|
+
from os import getenv
|
|
114
|
+
|
|
115
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
116
|
+
res = await client.v1.ai_talking_photo.create(
|
|
117
|
+
assets={
|
|
118
|
+
"audio_file_path": "api-assets/id/1234.mp3",
|
|
119
|
+
"image_file_path": "api-assets/id/1234.png",
|
|
120
|
+
},
|
|
121
|
+
end_seconds=15.0,
|
|
122
|
+
start_seconds=0.0,
|
|
123
|
+
name="Talking Photo image",
|
|
124
|
+
)
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
#### Response
|
|
128
|
+
|
|
129
|
+
##### Type
|
|
130
|
+
|
|
131
|
+
[V1AiTalkingPhotoCreateResponse](/magic_hour/types/models/v1_ai_talking_photo_create_response.py)
|
|
132
|
+
|
|
133
|
+
##### Example
|
|
134
|
+
|
|
135
|
+
```python
|
|
136
|
+
{"credits_charged": 450, "estimated_frame_cost": 450, "id": "cuid-example"}
|
|
137
|
+
```
|
|
@@ -0,0 +1,326 @@
|
|
|
1
|
+
import typing
|
|
2
|
+
|
|
3
|
+
from magic_hour.helpers.logger import get_sdk_logger
|
|
4
|
+
from magic_hour.resources.v1.files.client import AsyncFilesClient, FilesClient
|
|
5
|
+
from magic_hour.resources.v1.video_projects.client import (
|
|
6
|
+
AsyncVideoProjectsClient,
|
|
7
|
+
VideoProjectsClient,
|
|
8
|
+
)
|
|
9
|
+
from magic_hour.types import models, params
|
|
10
|
+
from make_api_request import (
|
|
11
|
+
AsyncBaseClient,
|
|
12
|
+
RequestOptions,
|
|
13
|
+
SyncBaseClient,
|
|
14
|
+
default_request_options,
|
|
15
|
+
to_encodable,
|
|
16
|
+
type_utils,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
logger = get_sdk_logger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AiTalkingPhotoClient:
|
|
24
|
+
def __init__(self, *, base_client: SyncBaseClient):
|
|
25
|
+
self._base_client = base_client
|
|
26
|
+
|
|
27
|
+
def generate(
|
|
28
|
+
self,
|
|
29
|
+
*,
|
|
30
|
+
assets: params.V1AiTalkingPhotoGenerateBodyAssets,
|
|
31
|
+
end_seconds: float,
|
|
32
|
+
start_seconds: float,
|
|
33
|
+
name: typing.Union[
|
|
34
|
+
typing.Optional[str], type_utils.NotGiven
|
|
35
|
+
] = type_utils.NOT_GIVEN,
|
|
36
|
+
style: typing.Union[
|
|
37
|
+
typing.Optional[params.V1AiTalkingPhotoCreateBodyStyle], type_utils.NotGiven
|
|
38
|
+
] = type_utils.NOT_GIVEN,
|
|
39
|
+
wait_for_completion: bool = True,
|
|
40
|
+
download_outputs: bool = True,
|
|
41
|
+
download_directory: typing.Optional[str] = None,
|
|
42
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
43
|
+
):
|
|
44
|
+
"""
|
|
45
|
+
Generate talking photo (alias for create with additional functionality).
|
|
46
|
+
|
|
47
|
+
Create a talking photo from an image and audio or text input. Each generation costs credits.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
name: The name of image. This value is mainly used for your own identification of the image.
|
|
51
|
+
style: Attributes used to dictate the style of the output
|
|
52
|
+
assets: Provide the assets for creating a talking photo
|
|
53
|
+
end_seconds: The end time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
54
|
+
start_seconds: The start time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
55
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
56
|
+
download_outputs: Whether to download the outputs
|
|
57
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
58
|
+
request_options: Additional options to customize the HTTP request
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the AI Talking Photo API with the downloaded paths if `download_outputs` is True.
|
|
62
|
+
|
|
63
|
+
Examples:
|
|
64
|
+
```py
|
|
65
|
+
response = client.v1.ai_talking_photo.generate(
|
|
66
|
+
assets={
|
|
67
|
+
"audio_file_path": "path/to/audio.mp3",
|
|
68
|
+
"image_file_path": "path/to/image.png",
|
|
69
|
+
},
|
|
70
|
+
end_seconds=30.0,
|
|
71
|
+
start_seconds=5.0,
|
|
72
|
+
style={"enhancement": "high"},
|
|
73
|
+
wait_for_completion=True,
|
|
74
|
+
download_outputs=True,
|
|
75
|
+
download_directory="outputs/",
|
|
76
|
+
)
|
|
77
|
+
```
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
file_client = FilesClient(base_client=self._base_client)
|
|
81
|
+
|
|
82
|
+
audio_file_path = assets["audio_file_path"]
|
|
83
|
+
image_file_path = assets["image_file_path"]
|
|
84
|
+
assets["audio_file_path"] = file_client.upload_file(file=audio_file_path)
|
|
85
|
+
assets["image_file_path"] = file_client.upload_file(file=image_file_path)
|
|
86
|
+
|
|
87
|
+
create_response = self.create(
|
|
88
|
+
assets=assets,
|
|
89
|
+
end_seconds=end_seconds,
|
|
90
|
+
start_seconds=start_seconds,
|
|
91
|
+
name=name,
|
|
92
|
+
style=style,
|
|
93
|
+
request_options=request_options,
|
|
94
|
+
)
|
|
95
|
+
logger.info(f"AI Talking Photo response: {create_response}")
|
|
96
|
+
|
|
97
|
+
video_projects_client = VideoProjectsClient(base_client=self._base_client)
|
|
98
|
+
response = video_projects_client.check_result(
|
|
99
|
+
id=create_response.id,
|
|
100
|
+
wait_for_completion=wait_for_completion,
|
|
101
|
+
download_outputs=download_outputs,
|
|
102
|
+
download_directory=download_directory,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
return response
|
|
106
|
+
|
|
107
|
+
def create(
|
|
108
|
+
self,
|
|
109
|
+
*,
|
|
110
|
+
assets: params.V1AiTalkingPhotoCreateBodyAssets,
|
|
111
|
+
end_seconds: float,
|
|
112
|
+
start_seconds: float,
|
|
113
|
+
name: typing.Union[
|
|
114
|
+
typing.Optional[str], type_utils.NotGiven
|
|
115
|
+
] = type_utils.NOT_GIVEN,
|
|
116
|
+
style: typing.Union[
|
|
117
|
+
typing.Optional[params.V1AiTalkingPhotoCreateBodyStyle], type_utils.NotGiven
|
|
118
|
+
] = type_utils.NOT_GIVEN,
|
|
119
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
120
|
+
) -> models.V1AiTalkingPhotoCreateResponse:
|
|
121
|
+
"""
|
|
122
|
+
AI Talking Photo
|
|
123
|
+
|
|
124
|
+
Create a talking photo from an image and audio or text input.
|
|
125
|
+
|
|
126
|
+
POST /v1/ai-talking-photo
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
name: The name of image. This value is mainly used for your own identification of the image.
|
|
130
|
+
style: Attributes used to dictate the style of the output
|
|
131
|
+
assets: Provide the assets for creating a talking photo
|
|
132
|
+
end_seconds: The end time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
133
|
+
start_seconds: The start time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
134
|
+
request_options: Additional options to customize the HTTP request
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Success
|
|
138
|
+
|
|
139
|
+
Raises:
|
|
140
|
+
ApiError: A custom exception class that provides additional context
|
|
141
|
+
for API errors, including the HTTP status code and response body.
|
|
142
|
+
|
|
143
|
+
Examples:
|
|
144
|
+
```py
|
|
145
|
+
client.v1.ai_talking_photo.create(
|
|
146
|
+
assets={
|
|
147
|
+
"audio_file_path": "api-assets/id/1234.mp3",
|
|
148
|
+
"image_file_path": "api-assets/id/1234.png",
|
|
149
|
+
},
|
|
150
|
+
end_seconds=15.0,
|
|
151
|
+
start_seconds=0.0,
|
|
152
|
+
name="Talking Photo image",
|
|
153
|
+
)
|
|
154
|
+
```
|
|
155
|
+
"""
|
|
156
|
+
_json = to_encodable(
|
|
157
|
+
item={
|
|
158
|
+
"name": name,
|
|
159
|
+
"style": style,
|
|
160
|
+
"assets": assets,
|
|
161
|
+
"end_seconds": end_seconds,
|
|
162
|
+
"start_seconds": start_seconds,
|
|
163
|
+
},
|
|
164
|
+
dump_with=params._SerializerV1AiTalkingPhotoCreateBody,
|
|
165
|
+
)
|
|
166
|
+
return self._base_client.request(
|
|
167
|
+
method="POST",
|
|
168
|
+
path="/v1/ai-talking-photo",
|
|
169
|
+
auth_names=["bearerAuth"],
|
|
170
|
+
json=_json,
|
|
171
|
+
cast_to=models.V1AiTalkingPhotoCreateResponse,
|
|
172
|
+
request_options=request_options or default_request_options(),
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class AsyncAiTalkingPhotoClient:
|
|
177
|
+
def __init__(self, *, base_client: AsyncBaseClient):
|
|
178
|
+
self._base_client = base_client
|
|
179
|
+
|
|
180
|
+
async def generate(
|
|
181
|
+
self,
|
|
182
|
+
*,
|
|
183
|
+
assets: params.V1AiTalkingPhotoGenerateBodyAssets,
|
|
184
|
+
end_seconds: float,
|
|
185
|
+
start_seconds: float,
|
|
186
|
+
name: typing.Union[
|
|
187
|
+
typing.Optional[str], type_utils.NotGiven
|
|
188
|
+
] = type_utils.NOT_GIVEN,
|
|
189
|
+
style: typing.Union[
|
|
190
|
+
typing.Optional[params.V1AiTalkingPhotoCreateBodyStyle], type_utils.NotGiven
|
|
191
|
+
] = type_utils.NOT_GIVEN,
|
|
192
|
+
wait_for_completion: bool = True,
|
|
193
|
+
download_outputs: bool = True,
|
|
194
|
+
download_directory: typing.Optional[str] = None,
|
|
195
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
196
|
+
):
|
|
197
|
+
"""
|
|
198
|
+
Generate talking photo (alias for create with additional functionality).
|
|
199
|
+
|
|
200
|
+
Create a talking photo from an image and audio or text input. Each generation costs credits.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
name: The name of image. This value is mainly used for your own identification of the image.
|
|
204
|
+
style: Attributes used to dictate the style of the output
|
|
205
|
+
assets: Provide the assets for creating a talking photo
|
|
206
|
+
end_seconds: The end time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
207
|
+
start_seconds: The start time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
208
|
+
wait_for_completion: Whether to wait for the video project to complete
|
|
209
|
+
download_outputs: Whether to download the outputs
|
|
210
|
+
download_directory: The directory to download the outputs to. If not provided, the outputs will be downloaded to the current working directory
|
|
211
|
+
request_options: Additional options to customize the HTTP request
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
V1VideoProjectsGetResponseWithDownloads: The response from the AI Talking Photo API with the downloaded paths if `download_outputs` is True.
|
|
215
|
+
|
|
216
|
+
Examples:
|
|
217
|
+
```py
|
|
218
|
+
response = await client.v1.ai_talking_photo.generate(
|
|
219
|
+
assets={
|
|
220
|
+
"audio_file_path": "path/to/audio.mp3",
|
|
221
|
+
"image_file_path": "path/to/image.png",
|
|
222
|
+
},
|
|
223
|
+
end_seconds=30.0,
|
|
224
|
+
start_seconds=5.0,
|
|
225
|
+
style={"enhancement": "high"},
|
|
226
|
+
wait_for_completion=True,
|
|
227
|
+
download_outputs=True,
|
|
228
|
+
download_directory="outputs/",
|
|
229
|
+
)
|
|
230
|
+
```
|
|
231
|
+
"""
|
|
232
|
+
|
|
233
|
+
file_client = AsyncFilesClient(base_client=self._base_client)
|
|
234
|
+
|
|
235
|
+
audio_file_path = assets["audio_file_path"]
|
|
236
|
+
image_file_path = assets["image_file_path"]
|
|
237
|
+
assets["audio_file_path"] = await file_client.upload_file(file=audio_file_path)
|
|
238
|
+
assets["image_file_path"] = await file_client.upload_file(file=image_file_path)
|
|
239
|
+
|
|
240
|
+
create_response = await self.create(
|
|
241
|
+
assets=assets,
|
|
242
|
+
end_seconds=end_seconds,
|
|
243
|
+
start_seconds=start_seconds,
|
|
244
|
+
name=name,
|
|
245
|
+
style=style,
|
|
246
|
+
request_options=request_options,
|
|
247
|
+
)
|
|
248
|
+
logger.info(f"AI Talking Photo response: {create_response}")
|
|
249
|
+
|
|
250
|
+
video_projects_client = AsyncVideoProjectsClient(base_client=self._base_client)
|
|
251
|
+
response = await video_projects_client.check_result(
|
|
252
|
+
id=create_response.id,
|
|
253
|
+
wait_for_completion=wait_for_completion,
|
|
254
|
+
download_outputs=download_outputs,
|
|
255
|
+
download_directory=download_directory,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
return response
|
|
259
|
+
|
|
260
|
+
async def create(
|
|
261
|
+
self,
|
|
262
|
+
*,
|
|
263
|
+
assets: params.V1AiTalkingPhotoCreateBodyAssets,
|
|
264
|
+
end_seconds: float,
|
|
265
|
+
start_seconds: float,
|
|
266
|
+
name: typing.Union[
|
|
267
|
+
typing.Optional[str], type_utils.NotGiven
|
|
268
|
+
] = type_utils.NOT_GIVEN,
|
|
269
|
+
style: typing.Union[
|
|
270
|
+
typing.Optional[params.V1AiTalkingPhotoCreateBodyStyle], type_utils.NotGiven
|
|
271
|
+
] = type_utils.NOT_GIVEN,
|
|
272
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
273
|
+
) -> models.V1AiTalkingPhotoCreateResponse:
|
|
274
|
+
"""
|
|
275
|
+
AI Talking Photo
|
|
276
|
+
|
|
277
|
+
Create a talking photo from an image and audio or text input.
|
|
278
|
+
|
|
279
|
+
POST /v1/ai-talking-photo
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
name: The name of image. This value is mainly used for your own identification of the image.
|
|
283
|
+
style: Attributes used to dictate the style of the output
|
|
284
|
+
assets: Provide the assets for creating a talking photo
|
|
285
|
+
end_seconds: The end time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
286
|
+
start_seconds: The start time of the input audio in seconds. The maximum duration allowed is 60 seconds.
|
|
287
|
+
request_options: Additional options to customize the HTTP request
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
Success
|
|
291
|
+
|
|
292
|
+
Raises:
|
|
293
|
+
ApiError: A custom exception class that provides additional context
|
|
294
|
+
for API errors, including the HTTP status code and response body.
|
|
295
|
+
|
|
296
|
+
Examples:
|
|
297
|
+
```py
|
|
298
|
+
await client.v1.ai_talking_photo.create(
|
|
299
|
+
assets={
|
|
300
|
+
"audio_file_path": "api-assets/id/1234.mp3",
|
|
301
|
+
"image_file_path": "api-assets/id/1234.png",
|
|
302
|
+
},
|
|
303
|
+
end_seconds=15.0,
|
|
304
|
+
start_seconds=0.0,
|
|
305
|
+
name="Talking Photo image",
|
|
306
|
+
)
|
|
307
|
+
```
|
|
308
|
+
"""
|
|
309
|
+
_json = to_encodable(
|
|
310
|
+
item={
|
|
311
|
+
"name": name,
|
|
312
|
+
"style": style,
|
|
313
|
+
"assets": assets,
|
|
314
|
+
"end_seconds": end_seconds,
|
|
315
|
+
"start_seconds": start_seconds,
|
|
316
|
+
},
|
|
317
|
+
dump_with=params._SerializerV1AiTalkingPhotoCreateBody,
|
|
318
|
+
)
|
|
319
|
+
return await self._base_client.request(
|
|
320
|
+
method="POST",
|
|
321
|
+
path="/v1/ai-talking-photo",
|
|
322
|
+
auth_names=["bearerAuth"],
|
|
323
|
+
json=_json,
|
|
324
|
+
cast_to=models.V1AiTalkingPhotoCreateResponse,
|
|
325
|
+
request_options=request_options or default_request_options(),
|
|
326
|
+
)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# v1.ai_voice_cloner
|
|
2
|
+
|
|
3
|
+
## Module Functions
|
|
4
|
+
|
|
5
|
+
### AI Voice Cloner <a name="create"></a>
|
|
6
|
+
|
|
7
|
+
Clone a voice from an audio sample and generate speech.
|
|
8
|
+
|
|
9
|
+
- Each character costs 0.05 credits.
|
|
10
|
+
- The cost is rounded up to the nearest whole number
|
|
11
|
+
|
|
12
|
+
**API Endpoint**: `POST /v1/ai-voice-cloner`
|
|
13
|
+
|
|
14
|
+
#### Parameters
|
|
15
|
+
|
|
16
|
+
| Parameter | Required | Description | Example |
|
|
17
|
+
| -------------------- | :------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------- |
|
|
18
|
+
| `assets` | ✓ | Provide the assets for voice cloning. | `{"audio_file_path": "api-assets/id/1234.mp3"}` |
|
|
19
|
+
| `└─ audio_file_path` | ✓ | The audio used to clone the voice. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.mp3"` |
|
|
20
|
+
| `style` | ✓ | | `{"prompt": "Hello, this is my cloned voice."}` |
|
|
21
|
+
| `└─ prompt` | ✓ | Text used to generate speech from the cloned voice. The character limit is 1000 characters. | `"Hello, this is my cloned voice."` |
|
|
22
|
+
| `name` | ✗ | The name of audio. This value is mainly used for your own identification of the audio. | `"Voice Cloner audio"` |
|
|
23
|
+
|
|
24
|
+
#### Synchronous Client
|
|
25
|
+
|
|
26
|
+
```python
|
|
27
|
+
from magic_hour import Client
|
|
28
|
+
from os import getenv
|
|
29
|
+
|
|
30
|
+
client = Client(token=getenv("API_TOKEN"))
|
|
31
|
+
res = client.v1.ai_voice_cloner.create(
|
|
32
|
+
assets={"audio_file_path": "api-assets/id/1234.mp3"},
|
|
33
|
+
style={"prompt": "Hello, this is my cloned voice."},
|
|
34
|
+
name="Voice Cloner audio",
|
|
35
|
+
)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
#### Asynchronous Client
|
|
39
|
+
|
|
40
|
+
```python
|
|
41
|
+
from magic_hour import AsyncClient
|
|
42
|
+
from os import getenv
|
|
43
|
+
|
|
44
|
+
client = AsyncClient(token=getenv("API_TOKEN"))
|
|
45
|
+
res = await client.v1.ai_voice_cloner.create(
|
|
46
|
+
assets={"audio_file_path": "api-assets/id/1234.mp3"},
|
|
47
|
+
style={"prompt": "Hello, this is my cloned voice."},
|
|
48
|
+
name="Voice Cloner audio",
|
|
49
|
+
)
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
#### Response
|
|
53
|
+
|
|
54
|
+
##### Type
|
|
55
|
+
|
|
56
|
+
[V1AiVoiceClonerCreateResponse](/magic_hour/types/models/v1_ai_voice_cloner_create_response.py)
|
|
57
|
+
|
|
58
|
+
##### Example
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
{"credits_charged": 1, "id": "cuid-example"}
|
|
62
|
+
```
|