magic_hour 0.40.0__py3-none-any.whl → 0.44.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- magic_hour/README.md +2 -3
- magic_hour/environment.py +1 -1
- magic_hour/helpers/download.py +2 -0
- magic_hour/resources/v1/README.md +2 -3
- magic_hour/resources/v1/ai_clothes_changer/README.md +13 -14
- magic_hour/resources/v1/ai_face_editor/README.md +26 -27
- magic_hour/resources/v1/ai_gif_generator/README.md +12 -13
- magic_hour/resources/v1/ai_gif_generator/client.py +2 -2
- magic_hour/resources/v1/ai_headshot_generator/README.md +13 -14
- magic_hour/resources/v1/ai_headshot_generator/client.py +2 -2
- magic_hour/resources/v1/ai_image_editor/README.md +24 -17
- magic_hour/resources/v1/ai_image_editor/client.py +40 -10
- magic_hour/resources/v1/ai_image_generator/README.md +26 -18
- magic_hour/resources/v1/ai_image_generator/client.py +14 -6
- magic_hour/resources/v1/ai_image_upscaler/README.md +14 -15
- magic_hour/resources/v1/ai_meme_generator/README.md +12 -13
- magic_hour/resources/v1/ai_photo_editor/README.md +22 -23
- magic_hour/resources/v1/ai_qr_code_generator/README.md +13 -14
- magic_hour/resources/v1/ai_qr_code_generator/client.py +4 -4
- magic_hour/resources/v1/ai_talking_photo/README.md +16 -17
- magic_hour/resources/v1/ai_voice_cloner/README.md +62 -0
- magic_hour/resources/v1/ai_voice_cloner/__init__.py +4 -0
- magic_hour/resources/v1/ai_voice_cloner/client.py +272 -0
- magic_hour/resources/v1/ai_voice_generator/README.md +66 -10
- magic_hour/resources/v1/ai_voice_generator/client.py +122 -0
- magic_hour/resources/v1/animation/README.md +24 -25
- magic_hour/resources/v1/audio_projects/README.md +58 -13
- magic_hour/resources/v1/audio_projects/__init__.py +10 -2
- magic_hour/resources/v1/audio_projects/client.py +137 -0
- magic_hour/resources/v1/audio_projects/client_test.py +520 -0
- magic_hour/resources/v1/auto_subtitle_generator/README.md +15 -16
- magic_hour/resources/v1/client.py +6 -0
- magic_hour/resources/v1/face_detection/README.md +21 -20
- magic_hour/resources/v1/face_swap/README.md +23 -25
- magic_hour/resources/v1/face_swap/client.py +2 -2
- magic_hour/resources/v1/face_swap_photo/README.md +13 -14
- magic_hour/resources/v1/files/README.md +1 -5
- magic_hour/resources/v1/files/upload_urls/README.md +11 -10
- magic_hour/resources/v1/files/upload_urls/client.py +6 -4
- magic_hour/resources/v1/image_background_remover/README.md +11 -12
- magic_hour/resources/v1/image_projects/README.md +12 -16
- magic_hour/resources/v1/image_to_video/README.md +19 -21
- magic_hour/resources/v1/lip_sync/README.md +27 -21
- magic_hour/resources/v1/lip_sync/client.py +15 -0
- magic_hour/resources/v1/photo_colorizer/README.md +10 -11
- magic_hour/resources/v1/text_to_video/README.md +15 -17
- magic_hour/resources/v1/video_projects/README.md +12 -16
- magic_hour/resources/v1/video_to_video/README.md +24 -26
- magic_hour/types/models/__init__.py +2 -0
- magic_hour/types/models/v1_ai_voice_cloner_create_response.py +27 -0
- magic_hour/types/models/v1_audio_projects_get_response.py +1 -1
- magic_hour/types/models/v1_video_projects_get_response.py +1 -1
- magic_hour/types/params/__init__.py +26 -0
- magic_hour/types/params/v1_ai_image_editor_create_body_assets.py +18 -4
- magic_hour/types/params/v1_ai_image_editor_create_body_style.py +13 -0
- magic_hour/types/params/v1_ai_image_editor_generate_body_assets.py +12 -1
- magic_hour/types/params/v1_ai_image_generator_create_body_style.py +16 -0
- magic_hour/types/params/v1_ai_talking_photo_create_body_style.py +6 -4
- magic_hour/types/params/v1_ai_voice_cloner_create_body.py +49 -0
- magic_hour/types/params/v1_ai_voice_cloner_create_body_assets.py +33 -0
- magic_hour/types/params/v1_ai_voice_cloner_create_body_style.py +28 -0
- magic_hour/types/params/v1_ai_voice_cloner_generate_body_assets.py +28 -0
- magic_hour/types/params/v1_ai_voice_generator_create_body_style.py +382 -2
- magic_hour/types/params/v1_face_swap_create_body_style.py +1 -1
- magic_hour/types/params/v1_files_upload_urls_create_body_items_item.py +1 -1
- magic_hour/types/params/v1_lip_sync_create_body.py +12 -0
- magic_hour/types/params/v1_lip_sync_create_body_style.py +37 -0
- magic_hour/types/params/v1_video_to_video_create_body.py +1 -1
- magic_hour/types/params/v1_video_to_video_create_body_style.py +32 -4
- {magic_hour-0.40.0.dist-info → magic_hour-0.44.0.dist-info}/METADATA +77 -62
- {magic_hour-0.40.0.dist-info → magic_hour-0.44.0.dist-info}/RECORD +73 -63
- {magic_hour-0.40.0.dist-info → magic_hour-0.44.0.dist-info}/LICENSE +0 -0
- {magic_hour-0.40.0.dist-info → magic_hour-0.44.0.dist-info}/WHEEL +0 -0
|
@@ -2,8 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
## Module Functions
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
|
|
7
5
|
<!-- CUSTOM DOCS START -->
|
|
8
6
|
|
|
9
7
|
### Video To Video Generate Workflow <a name="generate"></a>
|
|
@@ -79,35 +77,35 @@ res = await client.v1.video_to_video.generate(
|
|
|
79
77
|
```
|
|
80
78
|
|
|
81
79
|
<!-- CUSTOM DOCS END -->
|
|
80
|
+
|
|
82
81
|
### Video-to-Video <a name="create"></a>
|
|
83
82
|
|
|
84
83
|
Create a Video To Video video. The estimated frame cost is calculated using 30 FPS. This amount is deducted from your account balance when a video is queued. Once the video is complete, the cost will be updated based on the actual number of frames rendered.
|
|
85
|
-
|
|
84
|
+
|
|
86
85
|
Get more information about this mode at our [product page](https://magichour.ai/products/video-to-video).
|
|
87
|
-
|
|
88
86
|
|
|
89
87
|
**API Endpoint**: `POST /v1/video-to-video`
|
|
90
88
|
|
|
91
89
|
#### Parameters
|
|
92
90
|
|
|
93
|
-
| Parameter
|
|
94
|
-
|
|
95
|
-
| `assets`
|
|
96
|
-
| `└─ video_file_path` |
|
|
97
|
-
| `└─ video_source`
|
|
98
|
-
| `└─ youtube_url`
|
|
99
|
-
| `end_seconds`
|
|
100
|
-
| `start_seconds`
|
|
101
|
-
| `style`
|
|
102
|
-
| `└─ art_style`
|
|
103
|
-
| `└─ model`
|
|
104
|
-
| `└─ prompt`
|
|
105
|
-
| `└─ prompt_type`
|
|
106
|
-
| `└─ version`
|
|
107
|
-
| `fps_resolution`
|
|
108
|
-
| `height`
|
|
109
|
-
| `name`
|
|
110
|
-
| `width`
|
|
91
|
+
| Parameter | Required | Deprecated | Description | Example |
|
|
92
|
+
| -------------------- | :------: | :--------: | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ |
|
|
93
|
+
| `assets` | ✓ | ✗ | Provide the assets for video-to-video. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used | `{"video_file_path": "api-assets/id/1234.mp4", "video_source": "file"}` |
|
|
94
|
+
| `└─ video_file_path` | ✗ | — | Required if `video_source` is `file`. This value is either - a direct URL to the video file - `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls). Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more. | `"api-assets/id/1234.mp4"` |
|
|
95
|
+
| `└─ video_source` | ✓ | — | | `"file"` |
|
|
96
|
+
| `└─ youtube_url` | ✗ | — | Using a youtube video as the input source. This field is required if `video_source` is `youtube` | `"http://www.example.com"` |
|
|
97
|
+
| `end_seconds` | ✓ | ✗ | The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds. | `15.0` |
|
|
98
|
+
| `start_seconds` | ✓ | ✗ | The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0. | `0.0` |
|
|
99
|
+
| `style` | ✓ | ✗ | | `{"art_style": "3D Render", "model": "default", "prompt_type": "default", "version": "default"}` |
|
|
100
|
+
| `└─ art_style` | ✓ | — | | `"3D Render"` |
|
|
101
|
+
| `└─ model` | ✗ | — | * `Dreamshaper` - a good all-around model that works for both animations as well as realism. * `Absolute Reality` - better at realism, but you'll often get similar results with Dreamshaper as well. * `Flat 2D Anime` - best for a flat illustration style that's common in most anime. * `default` - use the default recommended model for the selected art style. | `"default"` |
|
|
102
|
+
| `└─ prompt` | ✗ | — | The prompt used for the video. Prompt is required if `prompt_type` is `custom` or `append_default`. If `prompt_type` is `default`, then the `prompt` value passed will be ignored. | `"string"` |
|
|
103
|
+
| `└─ prompt_type` | ✗ | — | * `default` - Use the default recommended prompt for the art style. * `custom` - Only use the prompt passed in the API. Note: for v1, lora prompt will still be auto added to apply the art style properly. * `append_default` - Add the default recommended prompt to the end of the prompt passed in the API. | `"default"` |
|
|
104
|
+
| `└─ version` | ✗ | — | * `v1` - more detail, closer prompt adherence, and frame-by-frame previews. * `v2` - faster, more consistent, and less noisy. * `default` - use the default version for the selected art style. | `"default"` |
|
|
105
|
+
| `fps_resolution` | ✗ | ✗ | Determines whether the resulting video will have the same frame per second as the original video, or half. * `FULL` - the result video will have the same FPS as the input video * `HALF` - the result video will have half the FPS as the input video | `"HALF"` |
|
|
106
|
+
| `height` | ✗ | ✓ | `height` is deprecated and no longer influences the output video's resolution. Output resolution is determined by the **minimum** of: - The resolution of the input video - The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details. This field is retained only for backward compatibility and will be removed in a future release. | `123` |
|
|
107
|
+
| `name` | ✗ | ✗ | The name of video. This value is mainly used for your own identification of the video. | `"Video To Video video"` |
|
|
108
|
+
| `width` | ✗ | ✓ | `width` is deprecated and no longer influences the output video's resolution. Output resolution is determined by the **minimum** of: - The resolution of the input video - The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details. This field is retained only for backward compatibility and will be removed in a future release. | `123` |
|
|
111
109
|
|
|
112
110
|
#### Synchronous Client
|
|
113
111
|
|
|
@@ -129,7 +127,6 @@ res = client.v1.video_to_video.create(
|
|
|
129
127
|
fps_resolution="HALF",
|
|
130
128
|
name="Video To Video video",
|
|
131
129
|
)
|
|
132
|
-
|
|
133
130
|
```
|
|
134
131
|
|
|
135
132
|
#### Asynchronous Client
|
|
@@ -152,15 +149,16 @@ res = await client.v1.video_to_video.create(
|
|
|
152
149
|
fps_resolution="HALF",
|
|
153
150
|
name="Video To Video video",
|
|
154
151
|
)
|
|
155
|
-
|
|
156
152
|
```
|
|
157
153
|
|
|
158
154
|
#### Response
|
|
159
155
|
|
|
160
156
|
##### Type
|
|
157
|
+
|
|
161
158
|
[V1VideoToVideoCreateResponse](/magic_hour/types/models/v1_video_to_video_create_response.py)
|
|
162
159
|
|
|
163
160
|
##### Example
|
|
164
|
-
`{"credits_charged": 450, "estimated_frame_cost": 450, "id": "cuid-example"}`
|
|
165
|
-
|
|
166
161
|
|
|
162
|
+
```python
|
|
163
|
+
{"credits_charged": 450, "estimated_frame_cost": 450, "id": "cuid-example"}
|
|
164
|
+
```
|
|
@@ -11,6 +11,7 @@ from .v1_ai_meme_generator_create_response import V1AiMemeGeneratorCreateRespons
|
|
|
11
11
|
from .v1_ai_photo_editor_create_response import V1AiPhotoEditorCreateResponse
|
|
12
12
|
from .v1_ai_qr_code_generator_create_response import V1AiQrCodeGeneratorCreateResponse
|
|
13
13
|
from .v1_ai_talking_photo_create_response import V1AiTalkingPhotoCreateResponse
|
|
14
|
+
from .v1_ai_voice_cloner_create_response import V1AiVoiceClonerCreateResponse
|
|
14
15
|
from .v1_ai_voice_generator_create_response import V1AiVoiceGeneratorCreateResponse
|
|
15
16
|
from .v1_animation_create_response import V1AnimationCreateResponse
|
|
16
17
|
from .v1_audio_projects_get_response import V1AudioProjectsGetResponse
|
|
@@ -65,6 +66,7 @@ __all__ = [
|
|
|
65
66
|
"V1AiPhotoEditorCreateResponse",
|
|
66
67
|
"V1AiQrCodeGeneratorCreateResponse",
|
|
67
68
|
"V1AiTalkingPhotoCreateResponse",
|
|
69
|
+
"V1AiVoiceClonerCreateResponse",
|
|
68
70
|
"V1AiVoiceGeneratorCreateResponse",
|
|
69
71
|
"V1AnimationCreateResponse",
|
|
70
72
|
"V1AudioProjectsGetResponse",
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class V1AiVoiceClonerCreateResponse(pydantic.BaseModel):
|
|
5
|
+
"""
|
|
6
|
+
Success
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
model_config = pydantic.ConfigDict(
|
|
10
|
+
arbitrary_types_allowed=True,
|
|
11
|
+
populate_by_name=True,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
credits_charged: int = pydantic.Field(
|
|
15
|
+
alias="credits_charged",
|
|
16
|
+
)
|
|
17
|
+
"""
|
|
18
|
+
The amount of credits deducted from your account to generate the audio. We charge credits right when the request is made.
|
|
19
|
+
|
|
20
|
+
If an error occurred while generating the audio, credits will be refunded and this field will be updated to include the refund.
|
|
21
|
+
"""
|
|
22
|
+
id: str = pydantic.Field(
|
|
23
|
+
alias="id",
|
|
24
|
+
)
|
|
25
|
+
"""
|
|
26
|
+
Unique ID of the audio. This value can be used in the [get audio project API](https://docs.magichour.ai/api-reference/audio-projects/get-audio-details) to fetch additional details such as status
|
|
27
|
+
"""
|
|
@@ -107,7 +107,7 @@ class V1VideoProjectsGetResponse(pydantic.BaseModel):
|
|
|
107
107
|
alias="type",
|
|
108
108
|
)
|
|
109
109
|
"""
|
|
110
|
-
The type of the video project. Possible values are ANIMATION, IMAGE_TO_VIDEO, VIDEO_TO_VIDEO, TEXT_TO_VIDEO, FACE_SWAP, LIP_SYNC, AUTO_SUBTITLE, TALKING_PHOTO
|
|
110
|
+
The type of the video project. Possible values are ANIMATION, IMAGE_TO_VIDEO, VIDEO_TO_VIDEO, TEXT_TO_VIDEO, FACE_SWAP, LIP_SYNC, AUTO_SUBTITLE, TALKING_PHOTO, UGC_AD
|
|
111
111
|
"""
|
|
112
112
|
width: int = pydantic.Field(
|
|
113
113
|
alias="width",
|
|
@@ -123,6 +123,19 @@ from .v1_ai_talking_photo_create_body_style import (
|
|
|
123
123
|
_SerializerV1AiTalkingPhotoCreateBodyStyle,
|
|
124
124
|
)
|
|
125
125
|
from .v1_ai_talking_photo_generate_body_assets import V1AiTalkingPhotoGenerateBodyAssets
|
|
126
|
+
from .v1_ai_voice_cloner_create_body import (
|
|
127
|
+
V1AiVoiceClonerCreateBody,
|
|
128
|
+
_SerializerV1AiVoiceClonerCreateBody,
|
|
129
|
+
)
|
|
130
|
+
from .v1_ai_voice_cloner_create_body_assets import (
|
|
131
|
+
V1AiVoiceClonerCreateBodyAssets,
|
|
132
|
+
_SerializerV1AiVoiceClonerCreateBodyAssets,
|
|
133
|
+
)
|
|
134
|
+
from .v1_ai_voice_cloner_create_body_style import (
|
|
135
|
+
V1AiVoiceClonerCreateBodyStyle,
|
|
136
|
+
_SerializerV1AiVoiceClonerCreateBodyStyle,
|
|
137
|
+
)
|
|
138
|
+
from .v1_ai_voice_cloner_generate_body_assets import V1AiVoiceClonerGenerateBodyAssets
|
|
126
139
|
from .v1_ai_voice_generator_create_body import (
|
|
127
140
|
V1AiVoiceGeneratorCreateBody,
|
|
128
141
|
_SerializerV1AiVoiceGeneratorCreateBody,
|
|
@@ -245,6 +258,10 @@ from .v1_lip_sync_create_body_assets import (
|
|
|
245
258
|
V1LipSyncCreateBodyAssets,
|
|
246
259
|
_SerializerV1LipSyncCreateBodyAssets,
|
|
247
260
|
)
|
|
261
|
+
from .v1_lip_sync_create_body_style import (
|
|
262
|
+
V1LipSyncCreateBodyStyle,
|
|
263
|
+
_SerializerV1LipSyncCreateBodyStyle,
|
|
264
|
+
)
|
|
248
265
|
from .v1_lip_sync_generate_body_assets import V1LipSyncGenerateBodyAssets
|
|
249
266
|
from .v1_photo_colorizer_create_body import (
|
|
250
267
|
V1PhotoColorizerCreateBody,
|
|
@@ -314,6 +331,10 @@ __all__ = [
|
|
|
314
331
|
"V1AiTalkingPhotoCreateBodyAssets",
|
|
315
332
|
"V1AiTalkingPhotoCreateBodyStyle",
|
|
316
333
|
"V1AiTalkingPhotoGenerateBodyAssets",
|
|
334
|
+
"V1AiVoiceClonerCreateBody",
|
|
335
|
+
"V1AiVoiceClonerCreateBodyAssets",
|
|
336
|
+
"V1AiVoiceClonerCreateBodyStyle",
|
|
337
|
+
"V1AiVoiceClonerGenerateBodyAssets",
|
|
317
338
|
"V1AiVoiceGeneratorCreateBody",
|
|
318
339
|
"V1AiVoiceGeneratorCreateBodyStyle",
|
|
319
340
|
"V1AnimationCreateBody",
|
|
@@ -350,6 +371,7 @@ __all__ = [
|
|
|
350
371
|
"V1ImageToVideoGenerateBodyAssets",
|
|
351
372
|
"V1LipSyncCreateBody",
|
|
352
373
|
"V1LipSyncCreateBodyAssets",
|
|
374
|
+
"V1LipSyncCreateBodyStyle",
|
|
353
375
|
"V1LipSyncGenerateBodyAssets",
|
|
354
376
|
"V1PhotoColorizerCreateBody",
|
|
355
377
|
"V1PhotoColorizerCreateBodyAssets",
|
|
@@ -388,6 +410,9 @@ __all__ = [
|
|
|
388
410
|
"_SerializerV1AiTalkingPhotoCreateBody",
|
|
389
411
|
"_SerializerV1AiTalkingPhotoCreateBodyAssets",
|
|
390
412
|
"_SerializerV1AiTalkingPhotoCreateBodyStyle",
|
|
413
|
+
"_SerializerV1AiVoiceClonerCreateBody",
|
|
414
|
+
"_SerializerV1AiVoiceClonerCreateBodyAssets",
|
|
415
|
+
"_SerializerV1AiVoiceClonerCreateBodyStyle",
|
|
391
416
|
"_SerializerV1AiVoiceGeneratorCreateBody",
|
|
392
417
|
"_SerializerV1AiVoiceGeneratorCreateBodyStyle",
|
|
393
418
|
"_SerializerV1AnimationCreateBody",
|
|
@@ -415,6 +440,7 @@ __all__ = [
|
|
|
415
440
|
"_SerializerV1ImageToVideoCreateBodyStyle",
|
|
416
441
|
"_SerializerV1LipSyncCreateBody",
|
|
417
442
|
"_SerializerV1LipSyncCreateBodyAssets",
|
|
443
|
+
"_SerializerV1LipSyncCreateBodyStyle",
|
|
418
444
|
"_SerializerV1PhotoColorizerCreateBody",
|
|
419
445
|
"_SerializerV1PhotoColorizerCreateBodyAssets",
|
|
420
446
|
"_SerializerV1TextToVideoCreateBody",
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import pydantic
|
|
2
|
+
import typing
|
|
2
3
|
import typing_extensions
|
|
3
4
|
|
|
4
5
|
|
|
@@ -7,9 +8,19 @@ class V1AiImageEditorCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
7
8
|
Provide the assets for image edit
|
|
8
9
|
"""
|
|
9
10
|
|
|
10
|
-
image_file_path: typing_extensions.
|
|
11
|
+
image_file_path: typing_extensions.NotRequired[str]
|
|
11
12
|
"""
|
|
12
|
-
The image used in the edit. This value is either
|
|
13
|
+
Deprecated: Please use `image_file_paths` instead as edits with multiple images are now supported. The image used in the edit. This value is either
|
|
14
|
+
- a direct URL to the video file
|
|
15
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
16
|
+
|
|
17
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
18
|
+
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
image_file_paths: typing_extensions.NotRequired[typing.List[str]]
|
|
22
|
+
"""
|
|
23
|
+
The image(s) used in the edit, maximum of 10 images. This value is either
|
|
13
24
|
- a direct URL to the video file
|
|
14
25
|
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
26
|
|
|
@@ -28,6 +39,9 @@ class _SerializerV1AiImageEditorCreateBodyAssets(pydantic.BaseModel):
|
|
|
28
39
|
populate_by_name=True,
|
|
29
40
|
)
|
|
30
41
|
|
|
31
|
-
image_file_path: str = pydantic.Field(
|
|
32
|
-
alias="image_file_path",
|
|
42
|
+
image_file_path: typing.Optional[str] = pydantic.Field(
|
|
43
|
+
alias="image_file_path", default=None
|
|
44
|
+
)
|
|
45
|
+
image_file_paths: typing.Optional[typing.List[str]] = pydantic.Field(
|
|
46
|
+
alias="image_file_paths", default=None
|
|
33
47
|
)
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import pydantic
|
|
2
|
+
import typing
|
|
2
3
|
import typing_extensions
|
|
3
4
|
|
|
4
5
|
|
|
@@ -7,6 +8,15 @@ class V1AiImageEditorCreateBodyStyle(typing_extensions.TypedDict):
|
|
|
7
8
|
V1AiImageEditorCreateBodyStyle
|
|
8
9
|
"""
|
|
9
10
|
|
|
11
|
+
model: typing_extensions.NotRequired[
|
|
12
|
+
typing_extensions.Literal["Nano Banana", "Seedream", "default"]
|
|
13
|
+
]
|
|
14
|
+
"""
|
|
15
|
+
The AI model to use for image editing. * `Nano Banana` - Precise, realistic edits with consistent results
|
|
16
|
+
* `Seedream` - Creative, imaginative images with artistic freedom
|
|
17
|
+
* `default` - Use the model we recommend, which will change over time. This is recommended unless you need a specific model. This is the default behavior.
|
|
18
|
+
"""
|
|
19
|
+
|
|
10
20
|
prompt: typing_extensions.Required[str]
|
|
11
21
|
"""
|
|
12
22
|
The prompt used to edit the image.
|
|
@@ -23,6 +33,9 @@ class _SerializerV1AiImageEditorCreateBodyStyle(pydantic.BaseModel):
|
|
|
23
33
|
populate_by_name=True,
|
|
24
34
|
)
|
|
25
35
|
|
|
36
|
+
model: typing.Optional[
|
|
37
|
+
typing_extensions.Literal["Nano Banana", "Seedream", "default"]
|
|
38
|
+
] = pydantic.Field(alias="model", default=None)
|
|
26
39
|
prompt: str = pydantic.Field(
|
|
27
40
|
alias="prompt",
|
|
28
41
|
)
|
|
@@ -7,11 +7,22 @@ class V1AiImageEditorGenerateBodyAssets(typing_extensions.TypedDict):
|
|
|
7
7
|
Provide the assets for image edit
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
|
-
image_file_path: typing_extensions.
|
|
10
|
+
image_file_path: typing_extensions.NotRequired[str]
|
|
11
11
|
"""
|
|
12
|
+
Deprecated: Please use `image_file_paths` instead as edits with multiple images are now supported.
|
|
13
|
+
|
|
12
14
|
The image used in the edit. This value is either
|
|
13
15
|
- a direct URL to the image file
|
|
14
16
|
- a path to a local file
|
|
15
17
|
|
|
16
18
|
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
17
19
|
"""
|
|
20
|
+
|
|
21
|
+
image_file_paths: typing_extensions.NotRequired[typing_extensions.List[str]]
|
|
22
|
+
"""
|
|
23
|
+
The image(s) used in the edit, maximum of 10 images. This value is either
|
|
24
|
+
- a direct URL to the video file
|
|
25
|
+
- a path to a local file
|
|
26
|
+
|
|
27
|
+
Note: if the path begins with `api-assets`, it will be assumed to already be uploaded to Magic Hour's storage, and will not be uploaded again.
|
|
28
|
+
"""
|
|
@@ -13,6 +13,19 @@ class V1AiImageGeneratorCreateBodyStyle(typing_extensions.TypedDict):
|
|
|
13
13
|
The prompt used for the image(s).
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
|
+
quality_mode: typing_extensions.NotRequired[
|
|
17
|
+
typing_extensions.Literal["pro", "standard"]
|
|
18
|
+
]
|
|
19
|
+
"""
|
|
20
|
+
Controls the quality of the generated image. Defaults to 'standard' if not specified.
|
|
21
|
+
|
|
22
|
+
**Options:**
|
|
23
|
+
- `standard` - Standard quality generation. Cost: 5 credits per image.
|
|
24
|
+
- `pro` - Pro quality generation with enhanced details and quality. Cost: 30 credits per image.
|
|
25
|
+
|
|
26
|
+
Note: Pro mode is available for users on Creator, Pro, or Business tier.
|
|
27
|
+
"""
|
|
28
|
+
|
|
16
29
|
tool: typing_extensions.NotRequired[
|
|
17
30
|
typing_extensions.Literal[
|
|
18
31
|
"ai-anime-generator",
|
|
@@ -70,6 +83,9 @@ class _SerializerV1AiImageGeneratorCreateBodyStyle(pydantic.BaseModel):
|
|
|
70
83
|
prompt: str = pydantic.Field(
|
|
71
84
|
alias="prompt",
|
|
72
85
|
)
|
|
86
|
+
quality_mode: typing.Optional[typing_extensions.Literal["pro", "standard"]] = (
|
|
87
|
+
pydantic.Field(alias="quality_mode", default=None)
|
|
88
|
+
)
|
|
73
89
|
tool: typing.Optional[
|
|
74
90
|
typing_extensions.Literal[
|
|
75
91
|
"ai-anime-generator",
|
|
@@ -9,12 +9,14 @@ class V1AiTalkingPhotoCreateBodyStyle(typing_extensions.TypedDict):
|
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
11
|
generation_mode: typing_extensions.NotRequired[
|
|
12
|
-
typing_extensions.Literal["expressive", "pro", "stable"]
|
|
12
|
+
typing_extensions.Literal["expressive", "pro", "stable", "standard"]
|
|
13
13
|
]
|
|
14
14
|
"""
|
|
15
15
|
Controls overall motion style.
|
|
16
|
-
* `pro` -
|
|
17
|
-
* `
|
|
16
|
+
* `pro` - Higher fidelity, realistic detail, accurate lip sync, and faster generation.
|
|
17
|
+
* `standard` - More expressive motion, but lower visual fidelity.
|
|
18
|
+
|
|
19
|
+
* `expressive` - More motion and facial expressiveness; may introduce visual artifacts. (Deprecated: passing this value will be treated as `standard`)
|
|
18
20
|
* `stable` - Reduced motion for cleaner output; may result in minimal animation. (Deprecated: passing this value will be treated as `pro`)
|
|
19
21
|
"""
|
|
20
22
|
|
|
@@ -37,6 +39,6 @@ class _SerializerV1AiTalkingPhotoCreateBodyStyle(pydantic.BaseModel):
|
|
|
37
39
|
)
|
|
38
40
|
|
|
39
41
|
generation_mode: typing.Optional[
|
|
40
|
-
typing_extensions.Literal["expressive", "pro", "stable"]
|
|
42
|
+
typing_extensions.Literal["expressive", "pro", "stable", "standard"]
|
|
41
43
|
] = pydantic.Field(alias="generation_mode", default=None)
|
|
42
44
|
intensity: typing.Optional[float] = pydantic.Field(alias="intensity", default=None)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing
|
|
3
|
+
import typing_extensions
|
|
4
|
+
|
|
5
|
+
from .v1_ai_voice_cloner_create_body_assets import (
|
|
6
|
+
V1AiVoiceClonerCreateBodyAssets,
|
|
7
|
+
_SerializerV1AiVoiceClonerCreateBodyAssets,
|
|
8
|
+
)
|
|
9
|
+
from .v1_ai_voice_cloner_create_body_style import (
|
|
10
|
+
V1AiVoiceClonerCreateBodyStyle,
|
|
11
|
+
_SerializerV1AiVoiceClonerCreateBodyStyle,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class V1AiVoiceClonerCreateBody(typing_extensions.TypedDict):
|
|
16
|
+
"""
|
|
17
|
+
V1AiVoiceClonerCreateBody
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
assets: typing_extensions.Required[V1AiVoiceClonerCreateBodyAssets]
|
|
21
|
+
"""
|
|
22
|
+
Provide the assets for voice cloning.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
name: typing_extensions.NotRequired[str]
|
|
26
|
+
"""
|
|
27
|
+
The name of audio. This value is mainly used for your own identification of the audio.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
style: typing_extensions.Required[V1AiVoiceClonerCreateBodyStyle]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class _SerializerV1AiVoiceClonerCreateBody(pydantic.BaseModel):
|
|
34
|
+
"""
|
|
35
|
+
Serializer for V1AiVoiceClonerCreateBody handling case conversions
|
|
36
|
+
and file omissions as dictated by the API
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
model_config = pydantic.ConfigDict(
|
|
40
|
+
populate_by_name=True,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
assets: _SerializerV1AiVoiceClonerCreateBodyAssets = pydantic.Field(
|
|
44
|
+
alias="assets",
|
|
45
|
+
)
|
|
46
|
+
name: typing.Optional[str] = pydantic.Field(alias="name", default=None)
|
|
47
|
+
style: _SerializerV1AiVoiceClonerCreateBodyStyle = pydantic.Field(
|
|
48
|
+
alias="style",
|
|
49
|
+
)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiVoiceClonerCreateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for voice cloning.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
audio_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The audio used to clone the voice. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class _SerializerV1AiVoiceClonerCreateBodyAssets(pydantic.BaseModel):
|
|
22
|
+
"""
|
|
23
|
+
Serializer for V1AiVoiceClonerCreateBodyAssets handling case conversions
|
|
24
|
+
and file omissions as dictated by the API
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
model_config = pydantic.ConfigDict(
|
|
28
|
+
populate_by_name=True,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
audio_file_path: str = pydantic.Field(
|
|
32
|
+
alias="audio_file_path",
|
|
33
|
+
)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiVoiceClonerCreateBodyStyle(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
V1AiVoiceClonerCreateBodyStyle
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
prompt: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
Text used to generate speech from the cloned voice. The character limit is 1000 characters.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class _SerializerV1AiVoiceClonerCreateBodyStyle(pydantic.BaseModel):
|
|
17
|
+
"""
|
|
18
|
+
Serializer for V1AiVoiceClonerCreateBodyStyle handling case conversions
|
|
19
|
+
and file omissions as dictated by the API
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
model_config = pydantic.ConfigDict(
|
|
23
|
+
populate_by_name=True,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
prompt: str = pydantic.Field(
|
|
27
|
+
alias="prompt",
|
|
28
|
+
)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing_extensions
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class V1AiVoiceClonerGenerateBodyAssets(typing_extensions.TypedDict):
|
|
6
|
+
"""
|
|
7
|
+
Provide the assets for voice cloning.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
audio_file_path: typing_extensions.Required[str]
|
|
11
|
+
"""
|
|
12
|
+
The audio used to clone the voice. This can be a local file path or URL.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class _SerializerV1AiVoiceClonerGenerateBodyAssets(pydantic.BaseModel):
|
|
17
|
+
"""
|
|
18
|
+
Serializer for V1AiVoiceClonerGenerateBodyAssets handling case conversions
|
|
19
|
+
and file omissions as dictated by the API
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
model_config = pydantic.ConfigDict(
|
|
23
|
+
populate_by_name=True,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
audio_file_path: str = pydantic.Field(
|
|
27
|
+
alias="audio_file_path",
|
|
28
|
+
)
|