magic_hour 0.33.1__py3-none-any.whl → 0.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of magic_hour might be problematic. Click here for more details.
- magic_hour/environment.py +1 -1
- magic_hour/resources/v1/ai_clothes_changer/README.md +5 -5
- magic_hour/resources/v1/ai_clothes_changer/client.py +4 -4
- magic_hour/resources/v1/ai_face_editor/README.md +2 -2
- magic_hour/resources/v1/ai_face_editor/client.py +2 -2
- magic_hour/resources/v1/ai_gif_generator/README.md +2 -2
- magic_hour/resources/v1/ai_gif_generator/client.py +2 -2
- magic_hour/resources/v1/ai_headshot_generator/README.md +3 -3
- magic_hour/resources/v1/ai_headshot_generator/client.py +2 -2
- magic_hour/resources/v1/ai_image_editor/README.md +2 -2
- magic_hour/resources/v1/ai_image_editor/client.py +2 -2
- magic_hour/resources/v1/ai_image_generator/README.md +5 -5
- magic_hour/resources/v1/ai_image_generator/client.py +8 -8
- magic_hour/resources/v1/ai_image_upscaler/README.md +3 -3
- magic_hour/resources/v1/ai_image_upscaler/client.py +8 -4
- magic_hour/resources/v1/ai_meme_generator/README.md +1 -1
- magic_hour/resources/v1/ai_photo_editor/README.md +2 -2
- magic_hour/resources/v1/ai_photo_editor/client.py +2 -2
- magic_hour/resources/v1/ai_qr_code_generator/README.md +2 -2
- magic_hour/resources/v1/ai_qr_code_generator/client.py +2 -2
- magic_hour/resources/v1/ai_talking_photo/README.md +2 -2
- magic_hour/resources/v1/ai_talking_photo/client.py +2 -2
- magic_hour/resources/v1/animation/README.md +8 -8
- magic_hour/resources/v1/animation/client.py +8 -8
- magic_hour/resources/v1/auto_subtitle_generator/README.md +4 -4
- magic_hour/resources/v1/auto_subtitle_generator/client.py +6 -6
- magic_hour/resources/v1/face_detection/README.md +10 -6
- magic_hour/resources/v1/face_detection/client.py +12 -4
- magic_hour/resources/v1/face_swap/README.md +6 -10
- magic_hour/resources/v1/face_swap/client.py +26 -34
- magic_hour/resources/v1/face_swap_photo/README.md +2 -2
- magic_hour/resources/v1/face_swap_photo/client.py +2 -2
- magic_hour/resources/v1/files/upload_urls/README.md +7 -6
- magic_hour/resources/v1/files/upload_urls/client.py +14 -12
- magic_hour/resources/v1/image_background_remover/README.md +2 -2
- magic_hour/resources/v1/image_background_remover/client.py +2 -2
- magic_hour/resources/v1/image_projects/README.md +8 -8
- magic_hour/resources/v1/image_projects/client.py +10 -10
- magic_hour/resources/v1/image_to_video/README.md +7 -5
- magic_hour/resources/v1/image_to_video/client.py +34 -12
- magic_hour/resources/v1/lip_sync/README.md +6 -10
- magic_hour/resources/v1/lip_sync/client.py +26 -34
- magic_hour/resources/v1/photo_colorizer/README.md +2 -2
- magic_hour/resources/v1/photo_colorizer/client.py +2 -2
- magic_hour/resources/v1/text_to_video/README.md +6 -4
- magic_hour/resources/v1/text_to_video/client.py +18 -4
- magic_hour/resources/v1/video_projects/README.md +7 -7
- magic_hour/resources/v1/video_projects/client.py +8 -8
- magic_hour/resources/v1/video_to_video/README.md +11 -15
- magic_hour/resources/v1/video_to_video/client.py +30 -38
- magic_hour/types/models/v1_face_detection_create_response.py +1 -1
- magic_hour/types/models/v1_face_detection_get_response.py +1 -1
- magic_hour/types/models/v1_files_upload_urls_create_response.py +3 -0
- magic_hour/types/models/v1_video_projects_get_response.py +2 -2
- magic_hour/types/params/v1_ai_clothes_changer_create_body.py +1 -1
- magic_hour/types/params/v1_ai_clothes_changer_create_body_assets.py +15 -2
- magic_hour/types/params/v1_ai_face_editor_create_body.py +1 -1
- magic_hour/types/params/v1_ai_face_editor_create_body_assets.py +6 -1
- magic_hour/types/params/v1_ai_face_editor_create_body_style.py +44 -47
- magic_hour/types/params/v1_ai_gif_generator_create_body.py +1 -1
- magic_hour/types/params/v1_ai_headshot_generator_create_body.py +1 -1
- magic_hour/types/params/v1_ai_headshot_generator_create_body_assets.py +6 -1
- magic_hour/types/params/v1_ai_headshot_generator_create_body_style.py +1 -1
- magic_hour/types/params/v1_ai_image_editor_create_body.py +1 -1
- magic_hour/types/params/v1_ai_image_editor_create_body_assets.py +6 -1
- magic_hour/types/params/v1_ai_image_generator_create_body.py +8 -2
- magic_hour/types/params/v1_ai_image_generator_create_body_style.py +2 -2
- magic_hour/types/params/v1_ai_image_upscaler_create_body.py +4 -2
- magic_hour/types/params/v1_ai_image_upscaler_create_body_assets.py +6 -1
- magic_hour/types/params/v1_ai_photo_editor_create_body.py +1 -1
- magic_hour/types/params/v1_ai_photo_editor_create_body_assets.py +6 -1
- magic_hour/types/params/v1_ai_qr_code_generator_create_body.py +1 -1
- magic_hour/types/params/v1_ai_talking_photo_create_body.py +1 -1
- magic_hour/types/params/v1_ai_talking_photo_create_body_assets.py +12 -2
- magic_hour/types/params/v1_animation_create_body.py +2 -2
- magic_hour/types/params/v1_animation_create_body_assets.py +12 -2
- magic_hour/types/params/v1_animation_create_body_style.py +10 -7
- magic_hour/types/params/v1_auto_subtitle_generator_create_body.py +3 -3
- magic_hour/types/params/v1_auto_subtitle_generator_create_body_assets.py +6 -1
- magic_hour/types/params/v1_face_detection_create_body_assets.py +6 -1
- magic_hour/types/params/v1_face_swap_create_body.py +15 -17
- magic_hour/types/params/v1_face_swap_create_body_assets.py +12 -2
- magic_hour/types/params/v1_face_swap_create_body_assets_face_mappings_item.py +6 -1
- magic_hour/types/params/v1_face_swap_photo_create_body.py +1 -1
- magic_hour/types/params/v1_face_swap_photo_create_body_assets.py +12 -2
- magic_hour/types/params/v1_face_swap_photo_create_body_assets_face_mappings_item.py +6 -1
- magic_hour/types/params/v1_files_upload_urls_create_body.py +3 -0
- magic_hour/types/params/v1_files_upload_urls_create_body_items_item.py +2 -2
- magic_hour/types/params/v1_image_background_remover_create_body.py +1 -1
- magic_hour/types/params/v1_image_background_remover_create_body_assets.py +12 -2
- magic_hour/types/params/v1_image_to_video_create_body.py +18 -8
- magic_hour/types/params/v1_image_to_video_create_body_assets.py +6 -1
- magic_hour/types/params/v1_lip_sync_create_body.py +15 -17
- magic_hour/types/params/v1_lip_sync_create_body_assets.py +12 -2
- magic_hour/types/params/v1_photo_colorizer_create_body.py +1 -1
- magic_hour/types/params/v1_photo_colorizer_create_body_assets.py +6 -1
- magic_hour/types/params/v1_text_to_video_create_body.py +8 -2
- magic_hour/types/params/v1_video_to_video_create_body.py +15 -17
- magic_hour/types/params/v1_video_to_video_create_body_assets.py +6 -1
- {magic_hour-0.33.1.dist-info → magic_hour-0.35.0.dist-info}/METADATA +1 -1
- {magic_hour-0.33.1.dist-info → magic_hour-0.35.0.dist-info}/RECORD +103 -103
- {magic_hour-0.33.1.dist-info → magic_hour-0.35.0.dist-info}/LICENSE +0 -0
- {magic_hour-0.33.1.dist-info → magic_hour-0.35.0.dist-info}/WHEEL +0 -0
|
@@ -52,26 +52,24 @@ class VideoToVideoClient:
|
|
|
52
52
|
fps_resolution: Determines whether the resulting video will have the same frame per second as the original video, or half.
|
|
53
53
|
* `FULL` - the result video will have the same FPS as the input video
|
|
54
54
|
* `HALF` - the result video will have half the FPS as the input video
|
|
55
|
-
height:
|
|
55
|
+
height: `height` is deprecated and no longer influences the output video's resolution.
|
|
56
56
|
|
|
57
|
-
|
|
58
|
-
|
|
57
|
+
Output resolution is determined by the **minimum** of:
|
|
58
|
+
- The resolution of the input video
|
|
59
|
+
- The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details.
|
|
59
60
|
|
|
60
|
-
|
|
61
|
+
This field is retained only for backward compatibility and will be removed in a future release.
|
|
62
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
63
|
+
width: `width` is deprecated and no longer influences the output video's resolution.
|
|
61
64
|
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
+
Output resolution is determined by the **minimum** of:
|
|
66
|
+
- The resolution of the input video
|
|
67
|
+
- The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details.
|
|
65
68
|
|
|
66
|
-
|
|
67
|
-
* If both height and width are omitted, the video will be resized according to your subscription's maximum resolution, while preserving aspect ratio.
|
|
68
|
-
|
|
69
|
-
Note: if the video's original resolution is less than the maximum, the video will not be resized.
|
|
70
|
-
|
|
71
|
-
See our [pricing page](https://magichour.ai/pricing) for more details.
|
|
69
|
+
This field is retained only for backward compatibility and will be removed in a future release.
|
|
72
70
|
assets: Provide the assets for video-to-video. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used
|
|
73
|
-
end_seconds: The end time of the input video in seconds
|
|
74
|
-
start_seconds: The start time of the input video in seconds
|
|
71
|
+
end_seconds: The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
72
|
+
start_seconds: The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
75
73
|
style: V1VideoToVideoCreateBodyStyle
|
|
76
74
|
request_options: Additional options to customize the HTTP request
|
|
77
75
|
|
|
@@ -93,15 +91,13 @@ class VideoToVideoClient:
|
|
|
93
91
|
start_seconds=0.0,
|
|
94
92
|
style={
|
|
95
93
|
"art_style": "3D Render",
|
|
96
|
-
"model": "
|
|
94
|
+
"model": "default",
|
|
97
95
|
"prompt": "string",
|
|
98
|
-
"prompt_type": "
|
|
96
|
+
"prompt_type": "default",
|
|
99
97
|
"version": "default",
|
|
100
98
|
},
|
|
101
99
|
fps_resolution="HALF",
|
|
102
|
-
height=960,
|
|
103
100
|
name="Video To Video video",
|
|
104
|
-
width=512,
|
|
105
101
|
)
|
|
106
102
|
```
|
|
107
103
|
"""
|
|
@@ -168,26 +164,24 @@ class AsyncVideoToVideoClient:
|
|
|
168
164
|
fps_resolution: Determines whether the resulting video will have the same frame per second as the original video, or half.
|
|
169
165
|
* `FULL` - the result video will have the same FPS as the input video
|
|
170
166
|
* `HALF` - the result video will have half the FPS as the input video
|
|
171
|
-
height:
|
|
172
|
-
|
|
173
|
-
* If height is provided, width will also be required. The larger value between width and height will be used to determine the maximum output resolution while maintaining the original aspect ratio.
|
|
174
|
-
* If both height and width are omitted, the video will be resized according to your subscription's maximum resolution, while preserving aspect ratio.
|
|
175
|
-
|
|
176
|
-
Note: if the video's original resolution is less than the maximum, the video will not be resized.
|
|
167
|
+
height: `height` is deprecated and no longer influences the output video's resolution.
|
|
177
168
|
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
169
|
+
Output resolution is determined by the **minimum** of:
|
|
170
|
+
- The resolution of the input video
|
|
171
|
+
- The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details.
|
|
181
172
|
|
|
182
|
-
|
|
183
|
-
|
|
173
|
+
This field is retained only for backward compatibility and will be removed in a future release.
|
|
174
|
+
name: The name of video. This value is mainly used for your own identification of the video.
|
|
175
|
+
width: `width` is deprecated and no longer influences the output video's resolution.
|
|
184
176
|
|
|
185
|
-
|
|
177
|
+
Output resolution is determined by the **minimum** of:
|
|
178
|
+
- The resolution of the input video
|
|
179
|
+
- The maximum resolution allowed by your subscription tier. See our [pricing page](https://magichour.ai/pricing) for more details.
|
|
186
180
|
|
|
187
|
-
|
|
181
|
+
This field is retained only for backward compatibility and will be removed in a future release.
|
|
188
182
|
assets: Provide the assets for video-to-video. For video, The `video_source` field determines whether `video_file_path` or `youtube_url` field is used
|
|
189
|
-
end_seconds: The end time of the input video in seconds
|
|
190
|
-
start_seconds: The start time of the input video in seconds
|
|
183
|
+
end_seconds: The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
184
|
+
start_seconds: The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
191
185
|
style: V1VideoToVideoCreateBodyStyle
|
|
192
186
|
request_options: Additional options to customize the HTTP request
|
|
193
187
|
|
|
@@ -209,15 +203,13 @@ class AsyncVideoToVideoClient:
|
|
|
209
203
|
start_seconds=0.0,
|
|
210
204
|
style={
|
|
211
205
|
"art_style": "3D Render",
|
|
212
|
-
"model": "
|
|
206
|
+
"model": "default",
|
|
213
207
|
"prompt": "string",
|
|
214
|
-
"prompt_type": "
|
|
208
|
+
"prompt_type": "default",
|
|
215
209
|
"version": "default",
|
|
216
210
|
},
|
|
217
211
|
fps_resolution="HALF",
|
|
218
|
-
height=960,
|
|
219
212
|
name="Video To Video video",
|
|
220
|
-
width=512,
|
|
221
213
|
)
|
|
222
214
|
```
|
|
223
215
|
"""
|
|
@@ -21,5 +21,5 @@ class V1FaceDetectionCreateResponse(pydantic.BaseModel):
|
|
|
21
21
|
alias="id",
|
|
22
22
|
)
|
|
23
23
|
"""
|
|
24
|
-
The id of the task
|
|
24
|
+
The id of the task. Use this value in the [get face detection details API](/api-reference/files/get-face-detection-details) to get the details of the face detection task.
|
|
25
25
|
"""
|
|
@@ -33,7 +33,7 @@ class V1FaceDetectionGetResponse(pydantic.BaseModel):
|
|
|
33
33
|
alias="id",
|
|
34
34
|
)
|
|
35
35
|
"""
|
|
36
|
-
The id of the task
|
|
36
|
+
The id of the task. This value is returned by the [face detection API](/api-reference/files/face-detection#response-id).
|
|
37
37
|
"""
|
|
38
38
|
status: typing_extensions.Literal["complete", "error", "queued", "rendering"] = (
|
|
39
39
|
pydantic.Field(
|
|
@@ -19,3 +19,6 @@ class V1FilesUploadUrlsCreateResponse(pydantic.BaseModel):
|
|
|
19
19
|
items: typing.List[V1FilesUploadUrlsCreateResponseItemsItem] = pydantic.Field(
|
|
20
20
|
alias="items",
|
|
21
21
|
)
|
|
22
|
+
"""
|
|
23
|
+
The list of upload URLs and file paths for the assets. The response array will match the order of items in the request body. Refer to the [Input Files Guide](/integration/input-files) for more details.
|
|
24
|
+
"""
|
|
@@ -49,7 +49,7 @@ class V1VideoProjectsGetResponse(pydantic.BaseModel):
|
|
|
49
49
|
alias="end_seconds",
|
|
50
50
|
)
|
|
51
51
|
"""
|
|
52
|
-
The end time of the input video in seconds
|
|
52
|
+
The end time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.1, and more than the start_seconds.
|
|
53
53
|
"""
|
|
54
54
|
error: typing.Optional[V1VideoProjectsGetResponseError] = pydantic.Field(
|
|
55
55
|
alias="error",
|
|
@@ -85,7 +85,7 @@ class V1VideoProjectsGetResponse(pydantic.BaseModel):
|
|
|
85
85
|
alias="start_seconds",
|
|
86
86
|
)
|
|
87
87
|
"""
|
|
88
|
-
The start time of the input video in seconds
|
|
88
|
+
The start time of the input video in seconds. This value is used to trim the input video. The value must be greater than 0.
|
|
89
89
|
"""
|
|
90
90
|
status: typing_extensions.Literal[
|
|
91
91
|
"canceled", "complete", "draft", "error", "queued", "rendering"
|
|
@@ -9,16 +9,29 @@ class V1AiClothesChangerCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
garment_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
The image of the outfit. This value
|
|
12
|
+
The image of the outfit. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
garment_type: typing_extensions.Required[
|
|
16
21
|
typing_extensions.Literal["dresses", "lower_body", "upper_body"]
|
|
17
22
|
]
|
|
23
|
+
"""
|
|
24
|
+
The type of the outfit.
|
|
25
|
+
"""
|
|
18
26
|
|
|
19
27
|
person_file_path: typing_extensions.Required[str]
|
|
20
28
|
"""
|
|
21
|
-
The image with the person. This value
|
|
29
|
+
The image with the person. This value is either
|
|
30
|
+
- a direct URL to the video file
|
|
31
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
32
|
+
|
|
33
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
34
|
+
|
|
22
35
|
"""
|
|
23
36
|
|
|
24
37
|
|
|
@@ -24,7 +24,7 @@ class V1AiFaceEditorCreateBody(typing_extensions.TypedDict):
|
|
|
24
24
|
|
|
25
25
|
name: typing_extensions.NotRequired[str]
|
|
26
26
|
"""
|
|
27
|
-
The name of image
|
|
27
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
style: typing_extensions.Required[V1AiFaceEditorCreateBodyStyle]
|
|
@@ -9,7 +9,12 @@ class V1AiFaceEditorCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
image_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
This is the image whose face will be edited. This value
|
|
12
|
+
This is the image whose face will be edited. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import pydantic
|
|
2
|
+
import typing
|
|
2
3
|
import typing_extensions
|
|
3
4
|
|
|
4
5
|
|
|
@@ -7,77 +8,77 @@ class V1AiFaceEditorCreateBodyStyle(typing_extensions.TypedDict):
|
|
|
7
8
|
Face editing parameters
|
|
8
9
|
"""
|
|
9
10
|
|
|
10
|
-
enhance_face: typing_extensions.
|
|
11
|
+
enhance_face: typing_extensions.NotRequired[bool]
|
|
11
12
|
"""
|
|
12
13
|
Enhance face features
|
|
13
14
|
"""
|
|
14
15
|
|
|
15
|
-
eye_gaze_horizontal: typing_extensions.
|
|
16
|
+
eye_gaze_horizontal: typing_extensions.NotRequired[float]
|
|
16
17
|
"""
|
|
17
18
|
Horizontal eye gaze (-100 to 100), in increments of 5
|
|
18
19
|
"""
|
|
19
20
|
|
|
20
|
-
eye_gaze_vertical: typing_extensions.
|
|
21
|
+
eye_gaze_vertical: typing_extensions.NotRequired[float]
|
|
21
22
|
"""
|
|
22
23
|
Vertical eye gaze (-100 to 100), in increments of 5
|
|
23
24
|
"""
|
|
24
25
|
|
|
25
|
-
eye_open_ratio: typing_extensions.
|
|
26
|
+
eye_open_ratio: typing_extensions.NotRequired[float]
|
|
26
27
|
"""
|
|
27
28
|
Eye open ratio (-100 to 100), in increments of 5
|
|
28
29
|
"""
|
|
29
30
|
|
|
30
|
-
eyebrow_direction: typing_extensions.
|
|
31
|
+
eyebrow_direction: typing_extensions.NotRequired[float]
|
|
31
32
|
"""
|
|
32
33
|
Eyebrow direction (-100 to 100), in increments of 5
|
|
33
34
|
"""
|
|
34
35
|
|
|
35
|
-
head_pitch: typing_extensions.
|
|
36
|
+
head_pitch: typing_extensions.NotRequired[float]
|
|
36
37
|
"""
|
|
37
38
|
Head pitch (-100 to 100), in increments of 5
|
|
38
39
|
"""
|
|
39
40
|
|
|
40
|
-
head_roll: typing_extensions.
|
|
41
|
+
head_roll: typing_extensions.NotRequired[float]
|
|
41
42
|
"""
|
|
42
43
|
Head roll (-100 to 100), in increments of 5
|
|
43
44
|
"""
|
|
44
45
|
|
|
45
|
-
head_yaw: typing_extensions.
|
|
46
|
+
head_yaw: typing_extensions.NotRequired[float]
|
|
46
47
|
"""
|
|
47
48
|
Head yaw (-100 to 100), in increments of 5
|
|
48
49
|
"""
|
|
49
50
|
|
|
50
|
-
lip_open_ratio: typing_extensions.
|
|
51
|
+
lip_open_ratio: typing_extensions.NotRequired[float]
|
|
51
52
|
"""
|
|
52
53
|
Lip open ratio (-100 to 100), in increments of 5
|
|
53
54
|
"""
|
|
54
55
|
|
|
55
|
-
mouth_grim: typing_extensions.
|
|
56
|
+
mouth_grim: typing_extensions.NotRequired[float]
|
|
56
57
|
"""
|
|
57
58
|
Mouth grim (-100 to 100), in increments of 5
|
|
58
59
|
"""
|
|
59
60
|
|
|
60
|
-
mouth_position_horizontal: typing_extensions.
|
|
61
|
+
mouth_position_horizontal: typing_extensions.NotRequired[float]
|
|
61
62
|
"""
|
|
62
63
|
Horizontal mouth position (-100 to 100), in increments of 5
|
|
63
64
|
"""
|
|
64
65
|
|
|
65
|
-
mouth_position_vertical: typing_extensions.
|
|
66
|
+
mouth_position_vertical: typing_extensions.NotRequired[float]
|
|
66
67
|
"""
|
|
67
68
|
Vertical mouth position (-100 to 100), in increments of 5
|
|
68
69
|
"""
|
|
69
70
|
|
|
70
|
-
mouth_pout: typing_extensions.
|
|
71
|
+
mouth_pout: typing_extensions.NotRequired[float]
|
|
71
72
|
"""
|
|
72
73
|
Mouth pout (-100 to 100), in increments of 5
|
|
73
74
|
"""
|
|
74
75
|
|
|
75
|
-
mouth_purse: typing_extensions.
|
|
76
|
+
mouth_purse: typing_extensions.NotRequired[float]
|
|
76
77
|
"""
|
|
77
78
|
Mouth purse (-100 to 100), in increments of 5
|
|
78
79
|
"""
|
|
79
80
|
|
|
80
|
-
mouth_smile: typing_extensions.
|
|
81
|
+
mouth_smile: typing_extensions.NotRequired[float]
|
|
81
82
|
"""
|
|
82
83
|
Mouth smile (-100 to 100), in increments of 5
|
|
83
84
|
"""
|
|
@@ -93,48 +94,44 @@ class _SerializerV1AiFaceEditorCreateBodyStyle(pydantic.BaseModel):
|
|
|
93
94
|
populate_by_name=True,
|
|
94
95
|
)
|
|
95
96
|
|
|
96
|
-
enhance_face: bool = pydantic.Field(
|
|
97
|
-
alias="enhance_face",
|
|
97
|
+
enhance_face: typing.Optional[bool] = pydantic.Field(
|
|
98
|
+
alias="enhance_face", default=None
|
|
98
99
|
)
|
|
99
|
-
eye_gaze_horizontal: float = pydantic.Field(
|
|
100
|
-
alias="eye_gaze_horizontal",
|
|
100
|
+
eye_gaze_horizontal: typing.Optional[float] = pydantic.Field(
|
|
101
|
+
alias="eye_gaze_horizontal", default=None
|
|
101
102
|
)
|
|
102
|
-
eye_gaze_vertical: float = pydantic.Field(
|
|
103
|
-
alias="eye_gaze_vertical",
|
|
103
|
+
eye_gaze_vertical: typing.Optional[float] = pydantic.Field(
|
|
104
|
+
alias="eye_gaze_vertical", default=None
|
|
104
105
|
)
|
|
105
|
-
eye_open_ratio: float = pydantic.Field(
|
|
106
|
-
alias="eye_open_ratio",
|
|
106
|
+
eye_open_ratio: typing.Optional[float] = pydantic.Field(
|
|
107
|
+
alias="eye_open_ratio", default=None
|
|
107
108
|
)
|
|
108
|
-
eyebrow_direction: float = pydantic.Field(
|
|
109
|
-
alias="eyebrow_direction",
|
|
109
|
+
eyebrow_direction: typing.Optional[float] = pydantic.Field(
|
|
110
|
+
alias="eyebrow_direction", default=None
|
|
110
111
|
)
|
|
111
|
-
head_pitch: float = pydantic.Field(
|
|
112
|
-
alias="head_pitch",
|
|
112
|
+
head_pitch: typing.Optional[float] = pydantic.Field(
|
|
113
|
+
alias="head_pitch", default=None
|
|
113
114
|
)
|
|
114
|
-
head_roll: float = pydantic.Field(
|
|
115
|
-
|
|
115
|
+
head_roll: typing.Optional[float] = pydantic.Field(alias="head_roll", default=None)
|
|
116
|
+
head_yaw: typing.Optional[float] = pydantic.Field(alias="head_yaw", default=None)
|
|
117
|
+
lip_open_ratio: typing.Optional[float] = pydantic.Field(
|
|
118
|
+
alias="lip_open_ratio", default=None
|
|
116
119
|
)
|
|
117
|
-
|
|
118
|
-
alias="
|
|
120
|
+
mouth_grim: typing.Optional[float] = pydantic.Field(
|
|
121
|
+
alias="mouth_grim", default=None
|
|
119
122
|
)
|
|
120
|
-
|
|
121
|
-
alias="
|
|
123
|
+
mouth_position_horizontal: typing.Optional[float] = pydantic.Field(
|
|
124
|
+
alias="mouth_position_horizontal", default=None
|
|
122
125
|
)
|
|
123
|
-
|
|
124
|
-
alias="
|
|
126
|
+
mouth_position_vertical: typing.Optional[float] = pydantic.Field(
|
|
127
|
+
alias="mouth_position_vertical", default=None
|
|
125
128
|
)
|
|
126
|
-
|
|
127
|
-
alias="
|
|
129
|
+
mouth_pout: typing.Optional[float] = pydantic.Field(
|
|
130
|
+
alias="mouth_pout", default=None
|
|
128
131
|
)
|
|
129
|
-
|
|
130
|
-
alias="
|
|
132
|
+
mouth_purse: typing.Optional[float] = pydantic.Field(
|
|
133
|
+
alias="mouth_purse", default=None
|
|
131
134
|
)
|
|
132
|
-
|
|
133
|
-
alias="
|
|
134
|
-
)
|
|
135
|
-
mouth_purse: float = pydantic.Field(
|
|
136
|
-
alias="mouth_purse",
|
|
137
|
-
)
|
|
138
|
-
mouth_smile: float = pydantic.Field(
|
|
139
|
-
alias="mouth_smile",
|
|
135
|
+
mouth_smile: typing.Optional[float] = pydantic.Field(
|
|
136
|
+
alias="mouth_smile", default=None
|
|
140
137
|
)
|
|
@@ -15,7 +15,7 @@ class V1AiGifGeneratorCreateBody(typing_extensions.TypedDict):
|
|
|
15
15
|
|
|
16
16
|
name: typing_extensions.NotRequired[str]
|
|
17
17
|
"""
|
|
18
|
-
The name of gif
|
|
18
|
+
The name of gif. This value is mainly used for your own identification of the gif.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
21
|
style: typing_extensions.Required[V1AiGifGeneratorCreateBodyStyle]
|
|
@@ -24,7 +24,7 @@ class V1AiHeadshotGeneratorCreateBody(typing_extensions.TypedDict):
|
|
|
24
24
|
|
|
25
25
|
name: typing_extensions.NotRequired[str]
|
|
26
26
|
"""
|
|
27
|
-
The name of image
|
|
27
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
style: typing_extensions.NotRequired[V1AiHeadshotGeneratorCreateBodyStyle]
|
|
@@ -9,7 +9,12 @@ class V1AiHeadshotGeneratorCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
image_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
The image used to generate the headshot. This image must contain one detectable face. This value
|
|
12
|
+
The image used to generate the headshot. This image must contain one detectable face. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
|
|
@@ -10,7 +10,7 @@ class V1AiHeadshotGeneratorCreateBodyStyle(typing_extensions.TypedDict):
|
|
|
10
10
|
|
|
11
11
|
prompt: typing_extensions.NotRequired[str]
|
|
12
12
|
"""
|
|
13
|
-
|
|
13
|
+
Prompt used to guide the style of your headshot. We recommend omitting the prompt unless you want to customize your headshot. You can visit [AI headshot generator](https://magichour.ai/create/ai-headshot-generator) to view an example of a good prompt used for our 'Professional' style.
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
16
|
|
|
@@ -24,7 +24,7 @@ class V1AiImageEditorCreateBody(typing_extensions.TypedDict):
|
|
|
24
24
|
|
|
25
25
|
name: typing_extensions.NotRequired[str]
|
|
26
26
|
"""
|
|
27
|
-
The name of image
|
|
27
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
style: typing_extensions.Required[V1AiImageEditorCreateBodyStyle]
|
|
@@ -9,7 +9,12 @@ class V1AiImageEditorCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
image_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
The image used in the edit. This value
|
|
12
|
+
The image used in the edit. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
|
|
@@ -15,19 +15,25 @@ class V1AiImageGeneratorCreateBody(typing_extensions.TypedDict):
|
|
|
15
15
|
|
|
16
16
|
image_count: typing_extensions.Required[int]
|
|
17
17
|
"""
|
|
18
|
-
|
|
18
|
+
Number of images to generate.
|
|
19
19
|
"""
|
|
20
20
|
|
|
21
21
|
name: typing_extensions.NotRequired[str]
|
|
22
22
|
"""
|
|
23
|
-
The name of image
|
|
23
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
24
24
|
"""
|
|
25
25
|
|
|
26
26
|
orientation: typing_extensions.Required[
|
|
27
27
|
typing_extensions.Literal["landscape", "portrait", "square"]
|
|
28
28
|
]
|
|
29
|
+
"""
|
|
30
|
+
The orientation of the output image(s).
|
|
31
|
+
"""
|
|
29
32
|
|
|
30
33
|
style: typing_extensions.Required[V1AiImageGeneratorCreateBodyStyle]
|
|
34
|
+
"""
|
|
35
|
+
The art style to use for image generation.
|
|
36
|
+
"""
|
|
31
37
|
|
|
32
38
|
|
|
33
39
|
class _SerializerV1AiImageGeneratorCreateBody(pydantic.BaseModel):
|
|
@@ -5,12 +5,12 @@ import typing_extensions
|
|
|
5
5
|
|
|
6
6
|
class V1AiImageGeneratorCreateBodyStyle(typing_extensions.TypedDict):
|
|
7
7
|
"""
|
|
8
|
-
|
|
8
|
+
The art style to use for image generation.
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
11
|
prompt: typing_extensions.Required[str]
|
|
12
12
|
"""
|
|
13
|
-
The prompt used for the image.
|
|
13
|
+
The prompt used for the image(s).
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
16
|
tool: typing_extensions.NotRequired[
|
|
@@ -24,12 +24,14 @@ class V1AiImageUpscalerCreateBody(typing_extensions.TypedDict):
|
|
|
24
24
|
|
|
25
25
|
name: typing_extensions.NotRequired[str]
|
|
26
26
|
"""
|
|
27
|
-
The name of image
|
|
27
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
scale_factor: typing_extensions.Required[float]
|
|
31
31
|
"""
|
|
32
|
-
How much to scale the image. Must be either 2 or 4
|
|
32
|
+
How much to scale the image. Must be either 2 or 4.
|
|
33
|
+
|
|
34
|
+
Note: 4x upscale is only available on Creator, Pro, or Business tier.
|
|
33
35
|
"""
|
|
34
36
|
|
|
35
37
|
style: typing_extensions.Required[V1AiImageUpscalerCreateBodyStyle]
|
|
@@ -9,7 +9,12 @@ class V1AiImageUpscalerCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
image_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
The image to upscale. This value
|
|
12
|
+
The image to upscale. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
|
|
@@ -24,7 +24,7 @@ class V1AiPhotoEditorCreateBody(typing_extensions.TypedDict):
|
|
|
24
24
|
|
|
25
25
|
name: typing_extensions.NotRequired[str]
|
|
26
26
|
"""
|
|
27
|
-
The name of image
|
|
27
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
resolution: typing_extensions.Required[int]
|
|
@@ -9,7 +9,12 @@ class V1AiPhotoEditorCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
image_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
The image used to generate the output. This value
|
|
12
|
+
The image used to generate the output. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
|
|
@@ -20,7 +20,7 @@ class V1AiQrCodeGeneratorCreateBody(typing_extensions.TypedDict):
|
|
|
20
20
|
|
|
21
21
|
name: typing_extensions.NotRequired[str]
|
|
22
22
|
"""
|
|
23
|
-
The name of image
|
|
23
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
24
24
|
"""
|
|
25
25
|
|
|
26
26
|
style: typing_extensions.Required[V1AiQrCodeGeneratorCreateBodyStyle]
|
|
@@ -29,7 +29,7 @@ class V1AiTalkingPhotoCreateBody(typing_extensions.TypedDict):
|
|
|
29
29
|
|
|
30
30
|
name: typing_extensions.NotRequired[str]
|
|
31
31
|
"""
|
|
32
|
-
The name of image
|
|
32
|
+
The name of image. This value is mainly used for your own identification of the image.
|
|
33
33
|
"""
|
|
34
34
|
|
|
35
35
|
start_seconds: typing_extensions.Required[float]
|
|
@@ -9,12 +9,22 @@ class V1AiTalkingPhotoCreateBodyAssets(typing_extensions.TypedDict):
|
|
|
9
9
|
|
|
10
10
|
audio_file_path: typing_extensions.Required[str]
|
|
11
11
|
"""
|
|
12
|
-
The audio file to sync with the image. This value
|
|
12
|
+
The audio file to sync with the image. This value is either
|
|
13
|
+
- a direct URL to the video file
|
|
14
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
15
|
+
|
|
16
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
17
|
+
|
|
13
18
|
"""
|
|
14
19
|
|
|
15
20
|
image_file_path: typing_extensions.Required[str]
|
|
16
21
|
"""
|
|
17
|
-
The source image to animate. This value
|
|
22
|
+
The source image to animate. This value is either
|
|
23
|
+
- a direct URL to the video file
|
|
24
|
+
- `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls).
|
|
25
|
+
|
|
26
|
+
Please refer to the [Input File documentation](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls#input-file) to learn more.
|
|
27
|
+
|
|
18
28
|
"""
|
|
19
29
|
|
|
20
30
|
|