magic_hour 0.10.0__py3-none-any.whl → 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of magic_hour might be problematic. Click here for more details.
- magic_hour/environment.py +1 -1
- magic_hour/resources/v1/ai_clothes_changer/client.py +8 -8
- magic_hour/resources/v1/ai_headshot_generator/client.py +12 -12
- magic_hour/resources/v1/ai_image_generator/client.py +10 -10
- magic_hour/resources/v1/ai_image_upscaler/client.py +12 -12
- magic_hour/resources/v1/ai_photo_editor/client.py +12 -12
- magic_hour/resources/v1/ai_qr_code_generator/client.py +10 -10
- magic_hour/resources/v1/ai_talking_photo/README.md +43 -0
- magic_hour/resources/v1/ai_talking_photo/__init__.py +4 -0
- magic_hour/resources/v1/ai_talking_photo/client.py +147 -0
- magic_hour/resources/v1/animation/client.py +10 -10
- magic_hour/resources/v1/client.py +6 -0
- magic_hour/resources/v1/face_swap/client.py +8 -8
- magic_hour/resources/v1/face_swap_photo/client.py +8 -8
- magic_hour/resources/v1/files/upload_urls/README.md +4 -4
- magic_hour/resources/v1/files/upload_urls/client.py +14 -12
- magic_hour/resources/v1/image_background_remover/client.py +8 -8
- magic_hour/resources/v1/image_projects/client.py +4 -4
- magic_hour/resources/v1/image_to_video/client.py +12 -12
- magic_hour/resources/v1/lip_sync/client.py +8 -8
- magic_hour/resources/v1/text_to_video/client.py +10 -10
- magic_hour/resources/v1/video_projects/client.py +4 -4
- magic_hour/resources/v1/video_to_video/client.py +12 -12
- magic_hour/types/models/__init__.py +53 -51
- magic_hour/types/models/{post_v1_ai_clothes_changer_response.py → v1_ai_clothes_changer_create_response.py} +1 -1
- magic_hour/types/models/v1_ai_headshot_generator_create_response.py +25 -0
- magic_hour/types/models/{post_v1_ai_photo_editor_response.py → v1_ai_image_generator_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_ai_image_generator_response.py → v1_ai_image_upscaler_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_face_swap_photo_response.py → v1_ai_photo_editor_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_ai_headshot_generator_response.py → v1_ai_qr_code_generator_create_response.py} +1 -1
- magic_hour/types/models/v1_ai_talking_photo_create_response.py +25 -0
- magic_hour/types/models/{post_v1_text_to_video_response.py → v1_animation_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_face_swap_response.py → v1_face_swap_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_ai_image_upscaler_response.py → v1_face_swap_photo_create_response.py} +1 -1
- magic_hour/types/models/v1_files_upload_urls_create_response.py +21 -0
- magic_hour/types/models/{post_v1_files_upload_urls_response_items_item.py → v1_files_upload_urls_create_response_items_item.py} +2 -2
- magic_hour/types/models/v1_image_background_remover_create_response.py +25 -0
- magic_hour/types/models/{get_v1_image_projects_id_response.py → v1_image_projects_get_response.py} +7 -7
- magic_hour/types/models/{get_v1_image_projects_id_response_downloads_item.py → v1_image_projects_get_response_downloads_item.py} +1 -1
- magic_hour/types/models/{get_v1_video_projects_id_response_error.py → v1_image_projects_get_response_error.py} +2 -2
- magic_hour/types/models/{post_v1_lip_sync_response.py → v1_image_to_video_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_animation_response.py → v1_lip_sync_create_response.py} +1 -1
- magic_hour/types/models/{post_v1_image_to_video_response.py → v1_text_to_video_create_response.py} +1 -1
- magic_hour/types/models/{get_v1_video_projects_id_response.py → v1_video_projects_get_response.py} +10 -11
- magic_hour/types/models/{get_v1_video_projects_id_response_download.py → v1_video_projects_get_response_download.py} +1 -1
- magic_hour/types/models/{get_v1_video_projects_id_response_downloads_item.py → v1_video_projects_get_response_downloads_item.py} +1 -1
- magic_hour/types/models/{get_v1_image_projects_id_response_error.py → v1_video_projects_get_response_error.py} +2 -2
- magic_hour/types/models/v1_video_to_video_create_response.py +25 -0
- magic_hour/types/params/__init__.py +200 -182
- magic_hour/types/params/v1_ai_clothes_changer_create_body.py +40 -0
- magic_hour/types/params/{post_v1_ai_clothes_changer_body_assets.py → v1_ai_clothes_changer_create_body_assets.py} +3 -3
- magic_hour/types/params/v1_ai_headshot_generator_create_body.py +49 -0
- magic_hour/types/params/{post_v1_ai_headshot_generator_body_assets.py → v1_ai_headshot_generator_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_ai_headshot_generator_body_style.py → v1_ai_headshot_generator_create_body_style.py} +4 -4
- magic_hour/types/params/{post_v1_ai_image_generator_body.py → v1_ai_image_generator_create_body.py} +9 -9
- magic_hour/types/params/{post_v1_ai_image_generator_body_style.py → v1_ai_image_generator_create_body_style.py} +4 -4
- magic_hour/types/params/v1_ai_image_upscaler_create_body.py +57 -0
- magic_hour/types/params/{post_v1_ai_image_upscaler_body_assets.py → v1_ai_image_upscaler_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_ai_image_upscaler_body_style.py → v1_ai_image_upscaler_create_body_style.py} +4 -4
- magic_hour/types/params/{post_v1_ai_photo_editor_body.py → v1_ai_photo_editor_create_body.py} +14 -14
- magic_hour/types/params/{post_v1_ai_photo_editor_body_assets.py → v1_ai_photo_editor_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_ai_photo_editor_body_style.py → v1_ai_photo_editor_create_body_style.py} +4 -4
- magic_hour/types/params/{post_v1_ai_qr_code_generator_body.py → v1_ai_qr_code_generator_create_body.py} +9 -9
- magic_hour/types/params/{post_v1_ai_qr_code_generator_body_style.py → v1_ai_qr_code_generator_create_body_style.py} +4 -4
- magic_hour/types/params/v1_ai_talking_photo_create_body.py +56 -0
- magic_hour/types/params/v1_ai_talking_photo_create_body_assets.py +36 -0
- magic_hour/types/params/{post_v1_animation_body.py → v1_animation_create_body.py} +14 -14
- magic_hour/types/params/{post_v1_animation_body_assets.py → v1_animation_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_animation_body_style.py → v1_animation_create_body_style.py} +3 -3
- magic_hour/types/params/{post_v1_face_swap_body.py → v1_face_swap_create_body.py} +9 -9
- magic_hour/types/params/{post_v1_face_swap_body_assets.py → v1_face_swap_create_body_assets.py} +3 -3
- magic_hour/types/params/v1_face_swap_photo_create_body.py +40 -0
- magic_hour/types/params/{post_v1_face_swap_photo_body_assets.py → v1_face_swap_photo_create_body_assets.py} +3 -3
- magic_hour/types/params/v1_files_upload_urls_create_body.py +33 -0
- magic_hour/types/params/{post_v1_files_upload_urls_body_items_item.py → v1_files_upload_urls_create_body_items_item.py} +6 -6
- magic_hour/types/params/v1_image_background_remover_create_body.py +40 -0
- magic_hour/types/params/{post_v1_image_background_remover_body_assets.py → v1_image_background_remover_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_image_to_video_body.py → v1_image_to_video_create_body.py} +14 -14
- magic_hour/types/params/{post_v1_image_to_video_body_assets.py → v1_image_to_video_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_image_to_video_body_style.py → v1_image_to_video_create_body_style.py} +4 -4
- magic_hour/types/params/{post_v1_lip_sync_body.py → v1_lip_sync_create_body.py} +9 -9
- magic_hour/types/params/{post_v1_lip_sync_body_assets.py → v1_lip_sync_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_text_to_video_body.py → v1_text_to_video_create_body.py} +9 -9
- magic_hour/types/params/{post_v1_text_to_video_body_style.py → v1_text_to_video_create_body_style.py} +4 -4
- magic_hour/types/params/{post_v1_video_to_video_body.py → v1_video_to_video_create_body.py} +14 -14
- magic_hour/types/params/{post_v1_video_to_video_body_assets.py → v1_video_to_video_create_body_assets.py} +3 -3
- magic_hour/types/params/{post_v1_video_to_video_body_style.py → v1_video_to_video_create_body_style.py} +14 -4
- {magic_hour-0.10.0.dist-info → magic_hour-0.12.0.dist-info}/METADATA +5 -1
- magic_hour-0.12.0.dist-info/RECORD +139 -0
- magic_hour/types/models/post_v1_ai_qr_code_generator_response.py +0 -25
- magic_hour/types/models/post_v1_files_upload_urls_response.py +0 -21
- magic_hour/types/models/post_v1_image_background_remover_response.py +0 -25
- magic_hour/types/models/post_v1_video_to_video_response.py +0 -25
- magic_hour/types/params/post_v1_ai_clothes_changer_body.py +0 -40
- magic_hour/types/params/post_v1_ai_headshot_generator_body.py +0 -49
- magic_hour/types/params/post_v1_ai_image_upscaler_body.py +0 -57
- magic_hour/types/params/post_v1_face_swap_photo_body.py +0 -40
- magic_hour/types/params/post_v1_files_upload_urls_body.py +0 -31
- magic_hour/types/params/post_v1_image_background_remover_body.py +0 -40
- magic_hour-0.10.0.dist-info/RECORD +0 -133
- {magic_hour-0.10.0.dist-info → magic_hour-0.12.0.dist-info}/LICENSE +0 -0
- {magic_hour-0.10.0.dist-info → magic_hour-0.12.0.dist-info}/WHEEL +0 -0
|
@@ -18,7 +18,7 @@ class LipSyncClient:
|
|
|
18
18
|
def create(
|
|
19
19
|
self,
|
|
20
20
|
*,
|
|
21
|
-
assets: params.
|
|
21
|
+
assets: params.V1LipSyncCreateBodyAssets,
|
|
22
22
|
end_seconds: float,
|
|
23
23
|
height: int,
|
|
24
24
|
start_seconds: float,
|
|
@@ -30,7 +30,7 @@ class LipSyncClient:
|
|
|
30
30
|
typing.Optional[str], type_utils.NotGiven
|
|
31
31
|
] = type_utils.NOT_GIVEN,
|
|
32
32
|
request_options: typing.Optional[RequestOptions] = None,
|
|
33
|
-
) -> models.
|
|
33
|
+
) -> models.V1LipSyncCreateResponse:
|
|
34
34
|
"""
|
|
35
35
|
Lip Sync
|
|
36
36
|
|
|
@@ -84,14 +84,14 @@ class LipSyncClient:
|
|
|
84
84
|
"start_seconds": start_seconds,
|
|
85
85
|
"width": width,
|
|
86
86
|
},
|
|
87
|
-
dump_with=params.
|
|
87
|
+
dump_with=params._SerializerV1LipSyncCreateBody,
|
|
88
88
|
)
|
|
89
89
|
return self._base_client.request(
|
|
90
90
|
method="POST",
|
|
91
91
|
path="/v1/lip-sync",
|
|
92
92
|
auth_names=["bearerAuth"],
|
|
93
93
|
json=_json,
|
|
94
|
-
cast_to=models.
|
|
94
|
+
cast_to=models.V1LipSyncCreateResponse,
|
|
95
95
|
request_options=request_options or default_request_options(),
|
|
96
96
|
)
|
|
97
97
|
|
|
@@ -103,7 +103,7 @@ class AsyncLipSyncClient:
|
|
|
103
103
|
async def create(
|
|
104
104
|
self,
|
|
105
105
|
*,
|
|
106
|
-
assets: params.
|
|
106
|
+
assets: params.V1LipSyncCreateBodyAssets,
|
|
107
107
|
end_seconds: float,
|
|
108
108
|
height: int,
|
|
109
109
|
start_seconds: float,
|
|
@@ -115,7 +115,7 @@ class AsyncLipSyncClient:
|
|
|
115
115
|
typing.Optional[str], type_utils.NotGiven
|
|
116
116
|
] = type_utils.NOT_GIVEN,
|
|
117
117
|
request_options: typing.Optional[RequestOptions] = None,
|
|
118
|
-
) -> models.
|
|
118
|
+
) -> models.V1LipSyncCreateResponse:
|
|
119
119
|
"""
|
|
120
120
|
Lip Sync
|
|
121
121
|
|
|
@@ -169,13 +169,13 @@ class AsyncLipSyncClient:
|
|
|
169
169
|
"start_seconds": start_seconds,
|
|
170
170
|
"width": width,
|
|
171
171
|
},
|
|
172
|
-
dump_with=params.
|
|
172
|
+
dump_with=params._SerializerV1LipSyncCreateBody,
|
|
173
173
|
)
|
|
174
174
|
return await self._base_client.request(
|
|
175
175
|
method="POST",
|
|
176
176
|
path="/v1/lip-sync",
|
|
177
177
|
auth_names=["bearerAuth"],
|
|
178
178
|
json=_json,
|
|
179
|
-
cast_to=models.
|
|
179
|
+
cast_to=models.V1LipSyncCreateResponse,
|
|
180
180
|
request_options=request_options or default_request_options(),
|
|
181
181
|
)
|
|
@@ -21,12 +21,12 @@ class TextToVideoClient:
|
|
|
21
21
|
*,
|
|
22
22
|
end_seconds: float,
|
|
23
23
|
orientation: typing_extensions.Literal["landscape", "portrait", "square"],
|
|
24
|
-
style: params.
|
|
24
|
+
style: params.V1TextToVideoCreateBodyStyle,
|
|
25
25
|
name: typing.Union[
|
|
26
26
|
typing.Optional[str], type_utils.NotGiven
|
|
27
27
|
] = type_utils.NOT_GIVEN,
|
|
28
28
|
request_options: typing.Optional[RequestOptions] = None,
|
|
29
|
-
) -> models.
|
|
29
|
+
) -> models.V1TextToVideoCreateResponse:
|
|
30
30
|
"""
|
|
31
31
|
Text-to-Video
|
|
32
32
|
|
|
@@ -41,7 +41,7 @@ class TextToVideoClient:
|
|
|
41
41
|
name: The name of video
|
|
42
42
|
end_seconds: The total duration of the output video in seconds.
|
|
43
43
|
orientation: Determines the orientation of the output video
|
|
44
|
-
style:
|
|
44
|
+
style: V1TextToVideoCreateBodyStyle
|
|
45
45
|
request_options: Additional options to customize the HTTP request
|
|
46
46
|
|
|
47
47
|
Returns:
|
|
@@ -68,14 +68,14 @@ class TextToVideoClient:
|
|
|
68
68
|
"orientation": orientation,
|
|
69
69
|
"style": style,
|
|
70
70
|
},
|
|
71
|
-
dump_with=params.
|
|
71
|
+
dump_with=params._SerializerV1TextToVideoCreateBody,
|
|
72
72
|
)
|
|
73
73
|
return self._base_client.request(
|
|
74
74
|
method="POST",
|
|
75
75
|
path="/v1/text-to-video",
|
|
76
76
|
auth_names=["bearerAuth"],
|
|
77
77
|
json=_json,
|
|
78
|
-
cast_to=models.
|
|
78
|
+
cast_to=models.V1TextToVideoCreateResponse,
|
|
79
79
|
request_options=request_options or default_request_options(),
|
|
80
80
|
)
|
|
81
81
|
|
|
@@ -89,12 +89,12 @@ class AsyncTextToVideoClient:
|
|
|
89
89
|
*,
|
|
90
90
|
end_seconds: float,
|
|
91
91
|
orientation: typing_extensions.Literal["landscape", "portrait", "square"],
|
|
92
|
-
style: params.
|
|
92
|
+
style: params.V1TextToVideoCreateBodyStyle,
|
|
93
93
|
name: typing.Union[
|
|
94
94
|
typing.Optional[str], type_utils.NotGiven
|
|
95
95
|
] = type_utils.NOT_GIVEN,
|
|
96
96
|
request_options: typing.Optional[RequestOptions] = None,
|
|
97
|
-
) -> models.
|
|
97
|
+
) -> models.V1TextToVideoCreateResponse:
|
|
98
98
|
"""
|
|
99
99
|
Text-to-Video
|
|
100
100
|
|
|
@@ -109,7 +109,7 @@ class AsyncTextToVideoClient:
|
|
|
109
109
|
name: The name of video
|
|
110
110
|
end_seconds: The total duration of the output video in seconds.
|
|
111
111
|
orientation: Determines the orientation of the output video
|
|
112
|
-
style:
|
|
112
|
+
style: V1TextToVideoCreateBodyStyle
|
|
113
113
|
request_options: Additional options to customize the HTTP request
|
|
114
114
|
|
|
115
115
|
Returns:
|
|
@@ -136,13 +136,13 @@ class AsyncTextToVideoClient:
|
|
|
136
136
|
"orientation": orientation,
|
|
137
137
|
"style": style,
|
|
138
138
|
},
|
|
139
|
-
dump_with=params.
|
|
139
|
+
dump_with=params._SerializerV1TextToVideoCreateBody,
|
|
140
140
|
)
|
|
141
141
|
return await self._base_client.request(
|
|
142
142
|
method="POST",
|
|
143
143
|
path="/v1/text-to-video",
|
|
144
144
|
auth_names=["bearerAuth"],
|
|
145
145
|
json=_json,
|
|
146
|
-
cast_to=models.
|
|
146
|
+
cast_to=models.V1TextToVideoCreateResponse,
|
|
147
147
|
request_options=request_options or default_request_options(),
|
|
148
148
|
)
|
|
@@ -49,7 +49,7 @@ class VideoProjectsClient:
|
|
|
49
49
|
|
|
50
50
|
def get(
|
|
51
51
|
self, *, id: str, request_options: typing.Optional[RequestOptions] = None
|
|
52
|
-
) -> models.
|
|
52
|
+
) -> models.V1VideoProjectsGetResponse:
|
|
53
53
|
"""
|
|
54
54
|
Get video details
|
|
55
55
|
|
|
@@ -86,7 +86,7 @@ class VideoProjectsClient:
|
|
|
86
86
|
method="GET",
|
|
87
87
|
path=f"/v1/video-projects/{id}",
|
|
88
88
|
auth_names=["bearerAuth"],
|
|
89
|
-
cast_to=models.
|
|
89
|
+
cast_to=models.V1VideoProjectsGetResponse,
|
|
90
90
|
request_options=request_options or default_request_options(),
|
|
91
91
|
)
|
|
92
92
|
|
|
@@ -131,7 +131,7 @@ class AsyncVideoProjectsClient:
|
|
|
131
131
|
|
|
132
132
|
async def get(
|
|
133
133
|
self, *, id: str, request_options: typing.Optional[RequestOptions] = None
|
|
134
|
-
) -> models.
|
|
134
|
+
) -> models.V1VideoProjectsGetResponse:
|
|
135
135
|
"""
|
|
136
136
|
Get video details
|
|
137
137
|
|
|
@@ -168,6 +168,6 @@ class AsyncVideoProjectsClient:
|
|
|
168
168
|
method="GET",
|
|
169
169
|
path=f"/v1/video-projects/{id}",
|
|
170
170
|
auth_names=["bearerAuth"],
|
|
171
|
-
cast_to=models.
|
|
171
|
+
cast_to=models.V1VideoProjectsGetResponse,
|
|
172
172
|
request_options=request_options or default_request_options(),
|
|
173
173
|
)
|
|
@@ -19,11 +19,11 @@ class VideoToVideoClient:
|
|
|
19
19
|
def create(
|
|
20
20
|
self,
|
|
21
21
|
*,
|
|
22
|
-
assets: params.
|
|
22
|
+
assets: params.V1VideoToVideoCreateBodyAssets,
|
|
23
23
|
end_seconds: float,
|
|
24
24
|
height: int,
|
|
25
25
|
start_seconds: float,
|
|
26
|
-
style: params.
|
|
26
|
+
style: params.V1VideoToVideoCreateBodyStyle,
|
|
27
27
|
width: int,
|
|
28
28
|
fps_resolution: typing.Union[
|
|
29
29
|
typing.Optional[typing_extensions.Literal["FULL", "HALF"]],
|
|
@@ -33,7 +33,7 @@ class VideoToVideoClient:
|
|
|
33
33
|
typing.Optional[str], type_utils.NotGiven
|
|
34
34
|
] = type_utils.NOT_GIVEN,
|
|
35
35
|
request_options: typing.Optional[RequestOptions] = None,
|
|
36
|
-
) -> models.
|
|
36
|
+
) -> models.V1VideoToVideoCreateResponse:
|
|
37
37
|
"""
|
|
38
38
|
Video-to-Video
|
|
39
39
|
|
|
@@ -53,7 +53,7 @@ class VideoToVideoClient:
|
|
|
53
53
|
end_seconds: The end time of the input video in seconds
|
|
54
54
|
height: The height of the final output video. Must be divisible by 64. The maximum height depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
55
55
|
start_seconds: The start time of the input video in seconds
|
|
56
|
-
style:
|
|
56
|
+
style: V1VideoToVideoCreateBodyStyle
|
|
57
57
|
width: The width of the final output video. Must be divisible by 64. The maximum width depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
58
58
|
request_options: Additional options to customize the HTTP request
|
|
59
59
|
|
|
@@ -95,14 +95,14 @@ class VideoToVideoClient:
|
|
|
95
95
|
"style": style,
|
|
96
96
|
"width": width,
|
|
97
97
|
},
|
|
98
|
-
dump_with=params.
|
|
98
|
+
dump_with=params._SerializerV1VideoToVideoCreateBody,
|
|
99
99
|
)
|
|
100
100
|
return self._base_client.request(
|
|
101
101
|
method="POST",
|
|
102
102
|
path="/v1/video-to-video",
|
|
103
103
|
auth_names=["bearerAuth"],
|
|
104
104
|
json=_json,
|
|
105
|
-
cast_to=models.
|
|
105
|
+
cast_to=models.V1VideoToVideoCreateResponse,
|
|
106
106
|
request_options=request_options or default_request_options(),
|
|
107
107
|
)
|
|
108
108
|
|
|
@@ -114,11 +114,11 @@ class AsyncVideoToVideoClient:
|
|
|
114
114
|
async def create(
|
|
115
115
|
self,
|
|
116
116
|
*,
|
|
117
|
-
assets: params.
|
|
117
|
+
assets: params.V1VideoToVideoCreateBodyAssets,
|
|
118
118
|
end_seconds: float,
|
|
119
119
|
height: int,
|
|
120
120
|
start_seconds: float,
|
|
121
|
-
style: params.
|
|
121
|
+
style: params.V1VideoToVideoCreateBodyStyle,
|
|
122
122
|
width: int,
|
|
123
123
|
fps_resolution: typing.Union[
|
|
124
124
|
typing.Optional[typing_extensions.Literal["FULL", "HALF"]],
|
|
@@ -128,7 +128,7 @@ class AsyncVideoToVideoClient:
|
|
|
128
128
|
typing.Optional[str], type_utils.NotGiven
|
|
129
129
|
] = type_utils.NOT_GIVEN,
|
|
130
130
|
request_options: typing.Optional[RequestOptions] = None,
|
|
131
|
-
) -> models.
|
|
131
|
+
) -> models.V1VideoToVideoCreateResponse:
|
|
132
132
|
"""
|
|
133
133
|
Video-to-Video
|
|
134
134
|
|
|
@@ -148,7 +148,7 @@ class AsyncVideoToVideoClient:
|
|
|
148
148
|
end_seconds: The end time of the input video in seconds
|
|
149
149
|
height: The height of the final output video. Must be divisible by 64. The maximum height depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
150
150
|
start_seconds: The start time of the input video in seconds
|
|
151
|
-
style:
|
|
151
|
+
style: V1VideoToVideoCreateBodyStyle
|
|
152
152
|
width: The width of the final output video. Must be divisible by 64. The maximum width depends on your subscription. Please refer to our [pricing page](https://magichour.ai/pricing) for more details
|
|
153
153
|
request_options: Additional options to customize the HTTP request
|
|
154
154
|
|
|
@@ -190,13 +190,13 @@ class AsyncVideoToVideoClient:
|
|
|
190
190
|
"style": style,
|
|
191
191
|
"width": width,
|
|
192
192
|
},
|
|
193
|
-
dump_with=params.
|
|
193
|
+
dump_with=params._SerializerV1VideoToVideoCreateBody,
|
|
194
194
|
)
|
|
195
195
|
return await self._base_client.request(
|
|
196
196
|
method="POST",
|
|
197
197
|
path="/v1/video-to-video",
|
|
198
198
|
auth_names=["bearerAuth"],
|
|
199
199
|
json=_json,
|
|
200
|
-
cast_to=models.
|
|
200
|
+
cast_to=models.V1VideoToVideoCreateResponse,
|
|
201
201
|
request_options=request_options or default_request_options(),
|
|
202
202
|
)
|
|
@@ -1,60 +1,62 @@
|
|
|
1
|
-
from .
|
|
2
|
-
from .
|
|
3
|
-
|
|
1
|
+
from .v1_ai_clothes_changer_create_response import V1AiClothesChangerCreateResponse
|
|
2
|
+
from .v1_ai_headshot_generator_create_response import (
|
|
3
|
+
V1AiHeadshotGeneratorCreateResponse,
|
|
4
4
|
)
|
|
5
|
-
from .
|
|
6
|
-
from .
|
|
7
|
-
from .
|
|
8
|
-
|
|
5
|
+
from .v1_ai_image_generator_create_response import V1AiImageGeneratorCreateResponse
|
|
6
|
+
from .v1_ai_image_upscaler_create_response import V1AiImageUpscalerCreateResponse
|
|
7
|
+
from .v1_ai_photo_editor_create_response import V1AiPhotoEditorCreateResponse
|
|
8
|
+
from .v1_ai_qr_code_generator_create_response import V1AiQrCodeGeneratorCreateResponse
|
|
9
|
+
from .v1_ai_talking_photo_create_response import V1AiTalkingPhotoCreateResponse
|
|
10
|
+
from .v1_animation_create_response import V1AnimationCreateResponse
|
|
11
|
+
from .v1_face_swap_create_response import V1FaceSwapCreateResponse
|
|
12
|
+
from .v1_face_swap_photo_create_response import V1FaceSwapPhotoCreateResponse
|
|
13
|
+
from .v1_files_upload_urls_create_response import V1FilesUploadUrlsCreateResponse
|
|
14
|
+
from .v1_files_upload_urls_create_response_items_item import (
|
|
15
|
+
V1FilesUploadUrlsCreateResponseItemsItem,
|
|
9
16
|
)
|
|
10
|
-
from .
|
|
11
|
-
|
|
17
|
+
from .v1_image_background_remover_create_response import (
|
|
18
|
+
V1ImageBackgroundRemoverCreateResponse,
|
|
12
19
|
)
|
|
13
|
-
from .
|
|
14
|
-
from .
|
|
15
|
-
|
|
16
|
-
from .post_v1_ai_image_generator_response import PostV1AiImageGeneratorResponse
|
|
17
|
-
from .post_v1_ai_image_upscaler_response import PostV1AiImageUpscalerResponse
|
|
18
|
-
from .post_v1_ai_photo_editor_response import PostV1AiPhotoEditorResponse
|
|
19
|
-
from .post_v1_ai_qr_code_generator_response import PostV1AiQrCodeGeneratorResponse
|
|
20
|
-
from .post_v1_animation_response import PostV1AnimationResponse
|
|
21
|
-
from .post_v1_face_swap_photo_response import PostV1FaceSwapPhotoResponse
|
|
22
|
-
from .post_v1_face_swap_response import PostV1FaceSwapResponse
|
|
23
|
-
from .post_v1_files_upload_urls_response import PostV1FilesUploadUrlsResponse
|
|
24
|
-
from .post_v1_files_upload_urls_response_items_item import (
|
|
25
|
-
PostV1FilesUploadUrlsResponseItemsItem,
|
|
20
|
+
from .v1_image_projects_get_response import V1ImageProjectsGetResponse
|
|
21
|
+
from .v1_image_projects_get_response_downloads_item import (
|
|
22
|
+
V1ImageProjectsGetResponseDownloadsItem,
|
|
26
23
|
)
|
|
27
|
-
from .
|
|
28
|
-
|
|
24
|
+
from .v1_image_projects_get_response_error import V1ImageProjectsGetResponseError
|
|
25
|
+
from .v1_image_to_video_create_response import V1ImageToVideoCreateResponse
|
|
26
|
+
from .v1_lip_sync_create_response import V1LipSyncCreateResponse
|
|
27
|
+
from .v1_text_to_video_create_response import V1TextToVideoCreateResponse
|
|
28
|
+
from .v1_video_projects_get_response import V1VideoProjectsGetResponse
|
|
29
|
+
from .v1_video_projects_get_response_download import V1VideoProjectsGetResponseDownload
|
|
30
|
+
from .v1_video_projects_get_response_downloads_item import (
|
|
31
|
+
V1VideoProjectsGetResponseDownloadsItem,
|
|
29
32
|
)
|
|
30
|
-
from .
|
|
31
|
-
from .
|
|
32
|
-
from .post_v1_text_to_video_response import PostV1TextToVideoResponse
|
|
33
|
-
from .post_v1_video_to_video_response import PostV1VideoToVideoResponse
|
|
33
|
+
from .v1_video_projects_get_response_error import V1VideoProjectsGetResponseError
|
|
34
|
+
from .v1_video_to_video_create_response import V1VideoToVideoCreateResponse
|
|
34
35
|
|
|
35
36
|
|
|
36
37
|
__all__ = [
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
"
|
|
40
|
-
"
|
|
41
|
-
"
|
|
42
|
-
"
|
|
43
|
-
"
|
|
44
|
-
"
|
|
45
|
-
"
|
|
46
|
-
"
|
|
47
|
-
"
|
|
48
|
-
"
|
|
49
|
-
"
|
|
50
|
-
"
|
|
51
|
-
"
|
|
52
|
-
"
|
|
53
|
-
"
|
|
54
|
-
"
|
|
55
|
-
"
|
|
56
|
-
"
|
|
57
|
-
"
|
|
58
|
-
"
|
|
59
|
-
"
|
|
38
|
+
"V1AiClothesChangerCreateResponse",
|
|
39
|
+
"V1AiHeadshotGeneratorCreateResponse",
|
|
40
|
+
"V1AiImageGeneratorCreateResponse",
|
|
41
|
+
"V1AiImageUpscalerCreateResponse",
|
|
42
|
+
"V1AiPhotoEditorCreateResponse",
|
|
43
|
+
"V1AiQrCodeGeneratorCreateResponse",
|
|
44
|
+
"V1AiTalkingPhotoCreateResponse",
|
|
45
|
+
"V1AnimationCreateResponse",
|
|
46
|
+
"V1FaceSwapCreateResponse",
|
|
47
|
+
"V1FaceSwapPhotoCreateResponse",
|
|
48
|
+
"V1FilesUploadUrlsCreateResponse",
|
|
49
|
+
"V1FilesUploadUrlsCreateResponseItemsItem",
|
|
50
|
+
"V1ImageBackgroundRemoverCreateResponse",
|
|
51
|
+
"V1ImageProjectsGetResponse",
|
|
52
|
+
"V1ImageProjectsGetResponseDownloadsItem",
|
|
53
|
+
"V1ImageProjectsGetResponseError",
|
|
54
|
+
"V1ImageToVideoCreateResponse",
|
|
55
|
+
"V1LipSyncCreateResponse",
|
|
56
|
+
"V1TextToVideoCreateResponse",
|
|
57
|
+
"V1VideoProjectsGetResponse",
|
|
58
|
+
"V1VideoProjectsGetResponseDownload",
|
|
59
|
+
"V1VideoProjectsGetResponseDownloadsItem",
|
|
60
|
+
"V1VideoProjectsGetResponseError",
|
|
61
|
+
"V1VideoToVideoCreateResponse",
|
|
60
62
|
]
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class V1AiHeadshotGeneratorCreateResponse(pydantic.BaseModel):
|
|
5
|
+
"""
|
|
6
|
+
Success
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
model_config = pydantic.ConfigDict(
|
|
10
|
+
arbitrary_types_allowed=True,
|
|
11
|
+
populate_by_name=True,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
frame_cost: int = pydantic.Field(
|
|
15
|
+
alias="frame_cost",
|
|
16
|
+
)
|
|
17
|
+
"""
|
|
18
|
+
The frame cost of the image generation
|
|
19
|
+
"""
|
|
20
|
+
id: str = pydantic.Field(
|
|
21
|
+
alias="id",
|
|
22
|
+
)
|
|
23
|
+
"""
|
|
24
|
+
Unique ID of the image. This value can be used in the [get image project API](https://docs.magichour.ai/api-reference/image-projects/get-image-details) to fetch additional details such as status
|
|
25
|
+
"""
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class V1AiTalkingPhotoCreateResponse(pydantic.BaseModel):
|
|
5
|
+
"""
|
|
6
|
+
Success
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
model_config = pydantic.ConfigDict(
|
|
10
|
+
arbitrary_types_allowed=True,
|
|
11
|
+
populate_by_name=True,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
estimated_frame_cost: int = pydantic.Field(
|
|
15
|
+
alias="estimated_frame_cost",
|
|
16
|
+
)
|
|
17
|
+
"""
|
|
18
|
+
Estimated cost of the video in terms of number of frames needed to render the video. Frames will be adjusted when the video completes
|
|
19
|
+
"""
|
|
20
|
+
id: str = pydantic.Field(
|
|
21
|
+
alias="id",
|
|
22
|
+
)
|
|
23
|
+
"""
|
|
24
|
+
Unique ID of the video. This value can be used in the [get video project API](https://docs.magichour.ai/api-reference/video-projects/get-video-details) to fetch additional details such as status
|
|
25
|
+
"""
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
import typing
|
|
3
|
+
|
|
4
|
+
from .v1_files_upload_urls_create_response_items_item import (
|
|
5
|
+
V1FilesUploadUrlsCreateResponseItemsItem,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class V1FilesUploadUrlsCreateResponse(pydantic.BaseModel):
|
|
10
|
+
"""
|
|
11
|
+
Success
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
model_config = pydantic.ConfigDict(
|
|
15
|
+
arbitrary_types_allowed=True,
|
|
16
|
+
populate_by_name=True,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
items: typing.List[V1FilesUploadUrlsCreateResponseItemsItem] = pydantic.Field(
|
|
20
|
+
alias="items",
|
|
21
|
+
)
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import pydantic
|
|
2
2
|
|
|
3
3
|
|
|
4
|
-
class
|
|
4
|
+
class V1FilesUploadUrlsCreateResponseItemsItem(pydantic.BaseModel):
|
|
5
5
|
"""
|
|
6
|
-
|
|
6
|
+
V1FilesUploadUrlsCreateResponseItemsItem
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
model_config = pydantic.ConfigDict(
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import pydantic
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class V1ImageBackgroundRemoverCreateResponse(pydantic.BaseModel):
|
|
5
|
+
"""
|
|
6
|
+
Success
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
model_config = pydantic.ConfigDict(
|
|
10
|
+
arbitrary_types_allowed=True,
|
|
11
|
+
populate_by_name=True,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
frame_cost: int = pydantic.Field(
|
|
15
|
+
alias="frame_cost",
|
|
16
|
+
)
|
|
17
|
+
"""
|
|
18
|
+
The frame cost of the image generation
|
|
19
|
+
"""
|
|
20
|
+
id: str = pydantic.Field(
|
|
21
|
+
alias="id",
|
|
22
|
+
)
|
|
23
|
+
"""
|
|
24
|
+
Unique ID of the image. This value can be used in the [get image project API](https://docs.magichour.ai/api-reference/image-projects/get-image-details) to fetch additional details such as status
|
|
25
|
+
"""
|
magic_hour/types/models/{get_v1_image_projects_id_response.py → v1_image_projects_get_response.py}
RENAMED
|
@@ -2,13 +2,13 @@ import pydantic
|
|
|
2
2
|
import typing
|
|
3
3
|
import typing_extensions
|
|
4
4
|
|
|
5
|
-
from .
|
|
6
|
-
|
|
5
|
+
from .v1_image_projects_get_response_downloads_item import (
|
|
6
|
+
V1ImageProjectsGetResponseDownloadsItem,
|
|
7
7
|
)
|
|
8
|
-
from .
|
|
8
|
+
from .v1_image_projects_get_response_error import V1ImageProjectsGetResponseError
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
class
|
|
11
|
+
class V1ImageProjectsGetResponse(pydantic.BaseModel):
|
|
12
12
|
"""
|
|
13
13
|
Success
|
|
14
14
|
"""
|
|
@@ -21,7 +21,7 @@ class GetV1ImageProjectsIdResponse(pydantic.BaseModel):
|
|
|
21
21
|
created_at: str = pydantic.Field(
|
|
22
22
|
alias="created_at",
|
|
23
23
|
)
|
|
24
|
-
downloads: typing.List[
|
|
24
|
+
downloads: typing.List[V1ImageProjectsGetResponseDownloadsItem] = pydantic.Field(
|
|
25
25
|
alias="downloads",
|
|
26
26
|
)
|
|
27
27
|
enabled: bool = pydantic.Field(
|
|
@@ -30,7 +30,7 @@ class GetV1ImageProjectsIdResponse(pydantic.BaseModel):
|
|
|
30
30
|
"""
|
|
31
31
|
Indicates whether the resource is deleted
|
|
32
32
|
"""
|
|
33
|
-
error: typing.Optional[
|
|
33
|
+
error: typing.Optional[V1ImageProjectsGetResponseError] = pydantic.Field(
|
|
34
34
|
alias="error",
|
|
35
35
|
)
|
|
36
36
|
"""
|
|
@@ -68,7 +68,7 @@ class GetV1ImageProjectsIdResponse(pydantic.BaseModel):
|
|
|
68
68
|
"""
|
|
69
69
|
The amount of frames used to generate the image.
|
|
70
70
|
"""
|
|
71
|
-
|
|
71
|
+
type_: typing_extensions.Literal[
|
|
72
72
|
"AI_HEADSHOT",
|
|
73
73
|
"AI_IMAGE",
|
|
74
74
|
"BACKGROUND_REMOVER",
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import pydantic
|
|
2
2
|
|
|
3
3
|
|
|
4
|
-
class
|
|
4
|
+
class V1ImageProjectsGetResponseError(pydantic.BaseModel):
|
|
5
5
|
"""
|
|
6
6
|
In the case of an error, this object will contain the error encountered during video render
|
|
7
7
|
"""
|
|
@@ -11,7 +11,7 @@ class GetV1VideoProjectsIdResponseError(pydantic.BaseModel):
|
|
|
11
11
|
populate_by_name=True,
|
|
12
12
|
)
|
|
13
13
|
|
|
14
|
-
|
|
14
|
+
code: str = pydantic.Field(
|
|
15
15
|
alias="code",
|
|
16
16
|
)
|
|
17
17
|
"""
|