magic_hour 0.29.0__py3-none-any.whl → 0.30.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of magic_hour might be problematic. Click here for more details.

magic_hour/environment.py CHANGED
@@ -6,7 +6,7 @@ class Environment(enum.Enum):
6
6
  """Pre-defined base URLs for the API"""
7
7
 
8
8
  ENVIRONMENT = "https://api.magichour.ai"
9
- MOCK_SERVER = "https://api.sideko.dev/v1/mock/magichour/magic-hour/0.29.0"
9
+ MOCK_SERVER = "https://api.sideko.dev/v1/mock/magichour/magic-hour/0.30.0"
10
10
 
11
11
 
12
12
  def _get_base_url(
@@ -48,6 +48,10 @@ from magic_hour.resources.v1.auto_subtitle_generator import (
48
48
  AsyncAutoSubtitleGeneratorClient,
49
49
  AutoSubtitleGeneratorClient,
50
50
  )
51
+ from magic_hour.resources.v1.face_detection import (
52
+ AsyncFaceDetectionClient,
53
+ FaceDetectionClient,
54
+ )
51
55
  from magic_hour.resources.v1.face_swap import AsyncFaceSwapClient, FaceSwapClient
52
56
  from magic_hour.resources.v1.face_swap_photo import (
53
57
  AsyncFaceSwapPhotoClient,
@@ -90,6 +94,7 @@ class V1Client:
90
94
  self._base_client = base_client
91
95
  self.image_projects = ImageProjectsClient(base_client=self._base_client)
92
96
  self.video_projects = VideoProjectsClient(base_client=self._base_client)
97
+ self.face_detection = FaceDetectionClient(base_client=self._base_client)
93
98
  self.ai_clothes_changer = AiClothesChangerClient(base_client=self._base_client)
94
99
  self.ai_face_editor = AiFaceEditorClient(base_client=self._base_client)
95
100
  self.ai_gif_generator = AiGifGeneratorClient(base_client=self._base_client)
@@ -127,6 +132,7 @@ class AsyncV1Client:
127
132
  self._base_client = base_client
128
133
  self.image_projects = AsyncImageProjectsClient(base_client=self._base_client)
129
134
  self.video_projects = AsyncVideoProjectsClient(base_client=self._base_client)
135
+ self.face_detection = AsyncFaceDetectionClient(base_client=self._base_client)
130
136
  self.ai_clothes_changer = AsyncAiClothesChangerClient(
131
137
  base_client=self._base_client
132
138
  )
@@ -0,0 +1,91 @@
1
+
2
+ ### Get face detection details <a name="get"></a>
3
+
4
+ Get the details of a face detection task.
5
+
6
+ **API Endpoint**: `GET /v1/face-detection/{id}`
7
+
8
+ #### Parameters
9
+
10
+ | Parameter | Required | Description | Example |
11
+ |-----------|:--------:|-------------|--------|
12
+ | `id` | ✓ | The id of the task | `"string"` |
13
+
14
+ #### Synchronous Client
15
+
16
+ ```python
17
+ from magic_hour import Client
18
+ from os import getenv
19
+
20
+ client = Client(token=getenv("API_TOKEN"))
21
+ res = client.v1.face_detection.get(id="string")
22
+
23
+ ```
24
+
25
+ #### Asynchronous Client
26
+
27
+ ```python
28
+ from magic_hour import AsyncClient
29
+ from os import getenv
30
+
31
+ client = AsyncClient(token=getenv("API_TOKEN"))
32
+ res = await client.v1.face_detection.get(id="string")
33
+
34
+ ```
35
+
36
+ #### Response
37
+
38
+ ##### Type
39
+ [V1FaceDetectionGetResponse](/magic_hour/types/models/v1_face_detection_get_response.py)
40
+
41
+ ##### Example
42
+ `{"credits_charged": 123, "faces": [{"path": "api-assets/id/0-0.png", "url": "https://videos.magichour.ai/api-assets/id/0-0.png"}], "id": "string", "status": "complete"}`
43
+
44
+ ### Face Detection <a name="create"></a>
45
+
46
+ Detect faces in an image or video.
47
+
48
+ Note: Face detection is free to use for the near future. Pricing may change in the future.
49
+
50
+ **API Endpoint**: `POST /v1/face-detection`
51
+
52
+ #### Parameters
53
+
54
+ | Parameter | Required | Description | Example |
55
+ |-----------|:--------:|-------------|--------|
56
+ | `assets` | ✓ | Provide the assets for face detection | `{"target_file_path": "api-assets/id/1234.png"}` |
57
+ | `confidence_score` | ✗ | Confidence threshold for filtering detected faces. * Higher values (e.g., 0.9) include only faces detected with high certainty, reducing false positives. * Lower values (e.g., 0.3) include more faces, but may increase the chance of incorrect detections. | `0.5` |
58
+
59
+ #### Synchronous Client
60
+
61
+ ```python
62
+ from magic_hour import Client
63
+ from os import getenv
64
+
65
+ client = Client(token=getenv("API_TOKEN"))
66
+ res = client.v1.face_detection.create(
67
+ assets={"target_file_path": "api-assets/id/1234.png"}, confidence_score=0.5
68
+ )
69
+
70
+ ```
71
+
72
+ #### Asynchronous Client
73
+
74
+ ```python
75
+ from magic_hour import AsyncClient
76
+ from os import getenv
77
+
78
+ client = AsyncClient(token=getenv("API_TOKEN"))
79
+ res = await client.v1.face_detection.create(
80
+ assets={"target_file_path": "api-assets/id/1234.png"}, confidence_score=0.5
81
+ )
82
+
83
+ ```
84
+
85
+ #### Response
86
+
87
+ ##### Type
88
+ [V1FaceDetectionCreateResponse](/magic_hour/types/models/v1_face_detection_create_response.py)
89
+
90
+ ##### Example
91
+ `{"credits_charged": 123, "id": "string"}`
@@ -0,0 +1,4 @@
1
+ from .client import AsyncFaceDetectionClient, FaceDetectionClient
2
+
3
+
4
+ __all__ = ["AsyncFaceDetectionClient", "FaceDetectionClient"]
@@ -0,0 +1,191 @@
1
+ import typing
2
+
3
+ from magic_hour.core import (
4
+ AsyncBaseClient,
5
+ RequestOptions,
6
+ SyncBaseClient,
7
+ default_request_options,
8
+ to_encodable,
9
+ type_utils,
10
+ )
11
+ from magic_hour.types import models, params
12
+
13
+
14
+ class FaceDetectionClient:
15
+ def __init__(self, *, base_client: SyncBaseClient):
16
+ self._base_client = base_client
17
+
18
+ def get(
19
+ self, *, id: str, request_options: typing.Optional[RequestOptions] = None
20
+ ) -> models.V1FaceDetectionGetResponse:
21
+ """
22
+ Get face detection details
23
+
24
+ Get the details of a face detection task.
25
+
26
+ GET /v1/face-detection/{id}
27
+
28
+ Args:
29
+ id: The id of the task
30
+ request_options: Additional options to customize the HTTP request
31
+
32
+ Returns:
33
+ 200
34
+
35
+ Raises:
36
+ ApiError: A custom exception class that provides additional context
37
+ for API errors, including the HTTP status code and response body.
38
+
39
+ Examples:
40
+ ```py
41
+ client.v1.face_detection.get(id="string")
42
+ ```
43
+ """
44
+ return self._base_client.request(
45
+ method="GET",
46
+ path=f"/v1/face-detection/{id}",
47
+ cast_to=models.V1FaceDetectionGetResponse,
48
+ request_options=request_options or default_request_options(),
49
+ )
50
+
51
+ def create(
52
+ self,
53
+ *,
54
+ assets: params.V1FaceDetectionCreateBodyAssets,
55
+ confidence_score: typing.Union[
56
+ typing.Optional[float], type_utils.NotGiven
57
+ ] = type_utils.NOT_GIVEN,
58
+ request_options: typing.Optional[RequestOptions] = None,
59
+ ) -> models.V1FaceDetectionCreateResponse:
60
+ """
61
+ Face Detection
62
+
63
+ Detect faces in an image or video.
64
+
65
+ Note: Face detection is free to use for the near future. Pricing may change in the future.
66
+
67
+ POST /v1/face-detection
68
+
69
+ Args:
70
+ confidence_score: Confidence threshold for filtering detected faces.
71
+ * Higher values (e.g., 0.9) include only faces detected with high certainty, reducing false positives.
72
+ * Lower values (e.g., 0.3) include more faces, but may increase the chance of incorrect detections.
73
+ assets: Provide the assets for face detection
74
+ request_options: Additional options to customize the HTTP request
75
+
76
+ Returns:
77
+ 200
78
+
79
+ Raises:
80
+ ApiError: A custom exception class that provides additional context
81
+ for API errors, including the HTTP status code and response body.
82
+
83
+ Examples:
84
+ ```py
85
+ client.v1.face_detection.create(
86
+ assets={"target_file_path": "api-assets/id/1234.png"}, confidence_score=0.5
87
+ )
88
+ ```
89
+ """
90
+ _json = to_encodable(
91
+ item={"confidence_score": confidence_score, "assets": assets},
92
+ dump_with=params._SerializerV1FaceDetectionCreateBody,
93
+ )
94
+ return self._base_client.request(
95
+ method="POST",
96
+ path="/v1/face-detection",
97
+ auth_names=["bearerAuth"],
98
+ json=_json,
99
+ cast_to=models.V1FaceDetectionCreateResponse,
100
+ request_options=request_options or default_request_options(),
101
+ )
102
+
103
+
104
+ class AsyncFaceDetectionClient:
105
+ def __init__(self, *, base_client: AsyncBaseClient):
106
+ self._base_client = base_client
107
+
108
+ async def get(
109
+ self, *, id: str, request_options: typing.Optional[RequestOptions] = None
110
+ ) -> models.V1FaceDetectionGetResponse:
111
+ """
112
+ Get face detection details
113
+
114
+ Get the details of a face detection task.
115
+
116
+ GET /v1/face-detection/{id}
117
+
118
+ Args:
119
+ id: The id of the task
120
+ request_options: Additional options to customize the HTTP request
121
+
122
+ Returns:
123
+ 200
124
+
125
+ Raises:
126
+ ApiError: A custom exception class that provides additional context
127
+ for API errors, including the HTTP status code and response body.
128
+
129
+ Examples:
130
+ ```py
131
+ await client.v1.face_detection.get(id="string")
132
+ ```
133
+ """
134
+ return await self._base_client.request(
135
+ method="GET",
136
+ path=f"/v1/face-detection/{id}",
137
+ cast_to=models.V1FaceDetectionGetResponse,
138
+ request_options=request_options or default_request_options(),
139
+ )
140
+
141
+ async def create(
142
+ self,
143
+ *,
144
+ assets: params.V1FaceDetectionCreateBodyAssets,
145
+ confidence_score: typing.Union[
146
+ typing.Optional[float], type_utils.NotGiven
147
+ ] = type_utils.NOT_GIVEN,
148
+ request_options: typing.Optional[RequestOptions] = None,
149
+ ) -> models.V1FaceDetectionCreateResponse:
150
+ """
151
+ Face Detection
152
+
153
+ Detect faces in an image or video.
154
+
155
+ Note: Face detection is free to use for the near future. Pricing may change in the future.
156
+
157
+ POST /v1/face-detection
158
+
159
+ Args:
160
+ confidence_score: Confidence threshold for filtering detected faces.
161
+ * Higher values (e.g., 0.9) include only faces detected with high certainty, reducing false positives.
162
+ * Lower values (e.g., 0.3) include more faces, but may increase the chance of incorrect detections.
163
+ assets: Provide the assets for face detection
164
+ request_options: Additional options to customize the HTTP request
165
+
166
+ Returns:
167
+ 200
168
+
169
+ Raises:
170
+ ApiError: A custom exception class that provides additional context
171
+ for API errors, including the HTTP status code and response body.
172
+
173
+ Examples:
174
+ ```py
175
+ await client.v1.face_detection.create(
176
+ assets={"target_file_path": "api-assets/id/1234.png"}, confidence_score=0.5
177
+ )
178
+ ```
179
+ """
180
+ _json = to_encodable(
181
+ item={"confidence_score": confidence_score, "assets": assets},
182
+ dump_with=params._SerializerV1FaceDetectionCreateBody,
183
+ )
184
+ return await self._base_client.request(
185
+ method="POST",
186
+ path="/v1/face-detection",
187
+ auth_names=["bearerAuth"],
188
+ json=_json,
189
+ cast_to=models.V1FaceDetectionCreateResponse,
190
+ request_options=request_options or default_request_options(),
191
+ )
@@ -15,6 +15,11 @@ from .v1_animation_create_response import V1AnimationCreateResponse
15
15
  from .v1_auto_subtitle_generator_create_response import (
16
16
  V1AutoSubtitleGeneratorCreateResponse,
17
17
  )
18
+ from .v1_face_detection_create_response import V1FaceDetectionCreateResponse
19
+ from .v1_face_detection_get_response import V1FaceDetectionGetResponse
20
+ from .v1_face_detection_get_response_faces_item import (
21
+ V1FaceDetectionGetResponseFacesItem,
22
+ )
18
23
  from .v1_face_swap_create_response import V1FaceSwapCreateResponse
19
24
  from .v1_face_swap_photo_create_response import V1FaceSwapPhotoCreateResponse
20
25
  from .v1_files_upload_urls_create_response import V1FilesUploadUrlsCreateResponse
@@ -56,6 +61,9 @@ __all__ = [
56
61
  "V1AiTalkingPhotoCreateResponse",
57
62
  "V1AnimationCreateResponse",
58
63
  "V1AutoSubtitleGeneratorCreateResponse",
64
+ "V1FaceDetectionCreateResponse",
65
+ "V1FaceDetectionGetResponse",
66
+ "V1FaceDetectionGetResponseFacesItem",
59
67
  "V1FaceSwapCreateResponse",
60
68
  "V1FaceSwapPhotoCreateResponse",
61
69
  "V1FilesUploadUrlsCreateResponse",
@@ -0,0 +1,25 @@
1
+ import pydantic
2
+
3
+
4
+ class V1FaceDetectionCreateResponse(pydantic.BaseModel):
5
+ """
6
+ V1FaceDetectionCreateResponse
7
+ """
8
+
9
+ model_config = pydantic.ConfigDict(
10
+ arbitrary_types_allowed=True,
11
+ populate_by_name=True,
12
+ )
13
+
14
+ credits_charged: int = pydantic.Field(
15
+ alias="credits_charged",
16
+ )
17
+ """
18
+ The credits charged for the task.
19
+ """
20
+ id: str = pydantic.Field(
21
+ alias="id",
22
+ )
23
+ """
24
+ The id of the task
25
+ """
@@ -0,0 +1,45 @@
1
+ import pydantic
2
+ import typing
3
+ import typing_extensions
4
+
5
+ from .v1_face_detection_get_response_faces_item import (
6
+ V1FaceDetectionGetResponseFacesItem,
7
+ )
8
+
9
+
10
+ class V1FaceDetectionGetResponse(pydantic.BaseModel):
11
+ """
12
+ V1FaceDetectionGetResponse
13
+ """
14
+
15
+ model_config = pydantic.ConfigDict(
16
+ arbitrary_types_allowed=True,
17
+ populate_by_name=True,
18
+ )
19
+
20
+ credits_charged: int = pydantic.Field(
21
+ alias="credits_charged",
22
+ )
23
+ """
24
+ The credits charged for the task.
25
+ """
26
+ faces: typing.List[V1FaceDetectionGetResponseFacesItem] = pydantic.Field(
27
+ alias="faces",
28
+ )
29
+ """
30
+ The faces detected in the image or video. The list is populated as faces are detected.
31
+ """
32
+ id: str = pydantic.Field(
33
+ alias="id",
34
+ )
35
+ """
36
+ The id of the task
37
+ """
38
+ status: typing_extensions.Literal["complete", "error", "queued", "rendering"] = (
39
+ pydantic.Field(
40
+ alias="status",
41
+ )
42
+ )
43
+ """
44
+ The status of the detection.
45
+ """
@@ -0,0 +1,25 @@
1
+ import pydantic
2
+
3
+
4
+ class V1FaceDetectionGetResponseFacesItem(pydantic.BaseModel):
5
+ """
6
+ V1FaceDetectionGetResponseFacesItem
7
+ """
8
+
9
+ model_config = pydantic.ConfigDict(
10
+ arbitrary_types_allowed=True,
11
+ populate_by_name=True,
12
+ )
13
+
14
+ path: str = pydantic.Field(
15
+ alias="path",
16
+ )
17
+ """
18
+ The path to the face image. This should be used in face swap photo/video API calls as `.assets.face_mappings.original_face`
19
+ """
20
+ url: str = pydantic.Field(
21
+ alias="url",
22
+ )
23
+ """
24
+ The url to the face image. This is used to render the image in your applications.
25
+ """
@@ -138,6 +138,14 @@ from .v1_auto_subtitle_generator_create_body_style_custom_config import (
138
138
  V1AutoSubtitleGeneratorCreateBodyStyleCustomConfig,
139
139
  _SerializerV1AutoSubtitleGeneratorCreateBodyStyleCustomConfig,
140
140
  )
141
+ from .v1_face_detection_create_body import (
142
+ V1FaceDetectionCreateBody,
143
+ _SerializerV1FaceDetectionCreateBody,
144
+ )
145
+ from .v1_face_detection_create_body_assets import (
146
+ V1FaceDetectionCreateBodyAssets,
147
+ _SerializerV1FaceDetectionCreateBodyAssets,
148
+ )
141
149
  from .v1_face_swap_create_body import (
142
150
  V1FaceSwapCreateBody,
143
151
  _SerializerV1FaceSwapCreateBody,
@@ -257,6 +265,8 @@ __all__ = [
257
265
  "V1AutoSubtitleGeneratorCreateBodyAssets",
258
266
  "V1AutoSubtitleGeneratorCreateBodyStyle",
259
267
  "V1AutoSubtitleGeneratorCreateBodyStyleCustomConfig",
268
+ "V1FaceDetectionCreateBody",
269
+ "V1FaceDetectionCreateBodyAssets",
260
270
  "V1FaceSwapCreateBody",
261
271
  "V1FaceSwapCreateBodyAssets",
262
272
  "V1FaceSwapPhotoCreateBody",
@@ -313,6 +323,8 @@ __all__ = [
313
323
  "_SerializerV1AutoSubtitleGeneratorCreateBodyAssets",
314
324
  "_SerializerV1AutoSubtitleGeneratorCreateBodyStyle",
315
325
  "_SerializerV1AutoSubtitleGeneratorCreateBodyStyleCustomConfig",
326
+ "_SerializerV1FaceDetectionCreateBody",
327
+ "_SerializerV1FaceDetectionCreateBodyAssets",
316
328
  "_SerializerV1FaceSwapCreateBody",
317
329
  "_SerializerV1FaceSwapCreateBodyAssets",
318
330
  "_SerializerV1FaceSwapPhotoCreateBody",
@@ -0,0 +1,44 @@
1
+ import pydantic
2
+ import typing
3
+ import typing_extensions
4
+
5
+ from .v1_face_detection_create_body_assets import (
6
+ V1FaceDetectionCreateBodyAssets,
7
+ _SerializerV1FaceDetectionCreateBodyAssets,
8
+ )
9
+
10
+
11
+ class V1FaceDetectionCreateBody(typing_extensions.TypedDict):
12
+ """
13
+ V1FaceDetectionCreateBody
14
+ """
15
+
16
+ assets: typing_extensions.Required[V1FaceDetectionCreateBodyAssets]
17
+ """
18
+ Provide the assets for face detection
19
+ """
20
+
21
+ confidence_score: typing_extensions.NotRequired[float]
22
+ """
23
+ Confidence threshold for filtering detected faces.
24
+ * Higher values (e.g., 0.9) include only faces detected with high certainty, reducing false positives.
25
+ * Lower values (e.g., 0.3) include more faces, but may increase the chance of incorrect detections.
26
+ """
27
+
28
+
29
+ class _SerializerV1FaceDetectionCreateBody(pydantic.BaseModel):
30
+ """
31
+ Serializer for V1FaceDetectionCreateBody handling case conversions
32
+ and file omissions as dictated by the API
33
+ """
34
+
35
+ model_config = pydantic.ConfigDict(
36
+ populate_by_name=True,
37
+ )
38
+
39
+ assets: _SerializerV1FaceDetectionCreateBodyAssets = pydantic.Field(
40
+ alias="assets",
41
+ )
42
+ confidence_score: typing.Optional[float] = pydantic.Field(
43
+ alias="confidence_score", default=None
44
+ )
@@ -0,0 +1,28 @@
1
+ import pydantic
2
+ import typing_extensions
3
+
4
+
5
+ class V1FaceDetectionCreateBodyAssets(typing_extensions.TypedDict):
6
+ """
7
+ Provide the assets for face detection
8
+ """
9
+
10
+ target_file_path: typing_extensions.Required[str]
11
+ """
12
+ This is the image or video where the face will be detected. This value can be either the `file_path` field from the response of the [upload urls API](https://docs.magichour.ai/api-reference/files/generate-asset-upload-urls), or the url of the file.
13
+ """
14
+
15
+
16
+ class _SerializerV1FaceDetectionCreateBodyAssets(pydantic.BaseModel):
17
+ """
18
+ Serializer for V1FaceDetectionCreateBodyAssets handling case conversions
19
+ and file omissions as dictated by the API
20
+ """
21
+
22
+ model_config = pydantic.ConfigDict(
23
+ populate_by_name=True,
24
+ )
25
+
26
+ target_file_path: str = pydantic.Field(
27
+ alias="target_file_path",
28
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: magic_hour
3
- Version: 0.29.0
3
+ Version: 0.30.0
4
4
  Summary: Python SDK for Magic Hour API
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Programming Language :: Python :: 3
@@ -111,6 +111,11 @@ client = AsyncClient(token="my api key")
111
111
 
112
112
  * [create](magic_hour/resources/v1/auto_subtitle_generator/README.md#create) - Auto Subtitle Generator
113
113
 
114
+ ### [v1.face_detection](magic_hour/resources/v1/face_detection/README.md)
115
+
116
+ * [create](magic_hour/resources/v1/face_detection/README.md#create) - Face Detection
117
+ * [get](magic_hour/resources/v1/face_detection/README.md#get) - Get face detection details
118
+
114
119
  ### [v1.face_swap](magic_hour/resources/v1/face_swap/README.md)
115
120
 
116
121
  * [create](magic_hour/resources/v1/face_swap/README.md#create) - Face Swap video
@@ -10,7 +10,7 @@ magic_hour/core/request.py,sha256=_ikn8iZ2fU9Ubqnt7M9hdEnXGV6AAFHJYmDKBtxEY4I,52
10
10
  magic_hour/core/response.py,sha256=Sl7nPL2axmz7em_6d9TkFSnQQKUpalWaVWbPPWoXJgM,10180
11
11
  magic_hour/core/type_utils.py,sha256=4bU9WXnMXJ6YTtuqOMiB8t6Xw0RlfVWJ-IDBONlqEtQ,461
12
12
  magic_hour/core/utils.py,sha256=34SiC1vw2A0TkYHONgMA_d09soIIYiiBWRXCZGdwGIk,1669
13
- magic_hour/environment.py,sha256=mxZ7QLQF8sbaorhkKKGLAx1spIr-FZckYu_7axRgU9s,535
13
+ magic_hour/environment.py,sha256=UHZwPSDACt4Te7qmhCyVpckUCu4kcddbgnUIBp7Uztw,535
14
14
  magic_hour/resources/v1/__init__.py,sha256=Aj0sjVcoijjQyieNBxv2_uewPYC2vO2UG-ehoBgCz5E,86
15
15
  magic_hour/resources/v1/ai_clothes_changer/README.md,sha256=x9cVTx9nHsyIutYjoUk1DeJg55cti6DAN_C-kBI_47Q,1564
16
16
  magic_hour/resources/v1/ai_clothes_changer/__init__.py,sha256=6W_Y2HxG2sDOBiJyzngK3Q2S3xfQgpK-j8xFRmBAhbQ,142
@@ -51,7 +51,10 @@ magic_hour/resources/v1/animation/client.py,sha256=YYjggl_hszTW-Sn9SFs3m7bz7PvtR
51
51
  magic_hour/resources/v1/auto_subtitle_generator/README.md,sha256=sg2GpGO_4dl_FAVzNf-DdisQMB5GyO971T1IFBZflHw,2238
52
52
  magic_hour/resources/v1/auto_subtitle_generator/__init__.py,sha256=dnWFEiSdIl3AwFVprqWHSMzqpeHgZz9wPEMxm7c3Xnc,162
53
53
  magic_hour/resources/v1/auto_subtitle_generator/client.py,sha256=6HvbAVsB9cJol6ul_3WNy-NebMSmZ28FJykBRytYRvE,6267
54
- magic_hour/resources/v1/client.py,sha256=FCY0SI1tYEK8hs3JbSdQuMODC5n0uLoteFh4oLaZvqg,7286
54
+ magic_hour/resources/v1/client.py,sha256=ERCl_sMEwUgTEoo7hmknz4c4QoyPkSc5Hvlci976Xt8,7563
55
+ magic_hour/resources/v1/face_detection/README.md,sha256=tbnVcaHIWrRcaGc8Ea3Cg5XlyrVTtxaXXHnCWdzDSIw,2445
56
+ magic_hour/resources/v1/face_detection/__init__.py,sha256=nLsNn9D1wGryYyD41H7QqAeeHuSsfFSJtYQTMVCQQUk,130
57
+ magic_hour/resources/v1/face_detection/client.py,sha256=UOhnkvkUR772QGXVFWraJJ3LWcq-Hnp2-Guwtl0F7jI,6271
55
58
  magic_hour/resources/v1/face_swap/README.md,sha256=-bENF57lpZQu55C1psxKc45Ev1kMcD8GYWXGxa9hbzU,3334
56
59
  magic_hour/resources/v1/face_swap/__init__.py,sha256=lyg5uAHyYHEUVAiAZtP3zwjGCEGqq8IWbQKexVdhr00,110
57
60
  magic_hour/resources/v1/face_swap/client.py,sha256=-BpJae7J4PZPUG45BMA3HBB2XhrbHpgWqwwyaDFH88A,8519
@@ -87,7 +90,7 @@ magic_hour/resources/v1/video_projects/client.py,sha256=JvhYhf3phYkdVj8VpWxvxF8q
87
90
  magic_hour/resources/v1/video_to_video/README.md,sha256=-b1nUKbUxXgtffS0yNPmvkYGL8ZVSAM4K3cVfT4-ghs,4081
88
91
  magic_hour/resources/v1/video_to_video/__init__.py,sha256=1SHaRLlsrlBkdxxKBYgdbHrGATlRvqlXc22RpjjHaOA,126
89
92
  magic_hour/resources/v1/video_to_video/client.py,sha256=WFmYL3ZBLyKLDBOOOc9tJigtwviI6JLjbH7yJSsiIyM,10404
90
- magic_hour/types/models/__init__.py,sha256=Kd0_aVtbVhB36cD7P9VnqvELSySOKqjHjLyXTsuEnLs,3686
93
+ magic_hour/types/models/__init__.py,sha256=VLH_NXpUAVSZd16zlv4DobqAQdQFPWkWORqttLv5iDw,4048
91
94
  magic_hour/types/models/v1_ai_clothes_changer_create_response.py,sha256=rQJqlDf7Ql46hR4eAepU6SnZS3fH-gewmSJ-OvEY5K0,1102
92
95
  magic_hour/types/models/v1_ai_face_editor_create_response.py,sha256=pGpfZMCRhhDYCV-tj3hfmuXvQPhb44csnyrcwh9tfQM,1098
93
96
  magic_hour/types/models/v1_ai_gif_generator_create_response.py,sha256=3T7PE17mdU9msZTfl2Gw-u1mTbjZiJm7gAxgIICloN4,1100
@@ -101,6 +104,9 @@ magic_hour/types/models/v1_ai_qr_code_generator_create_response.py,sha256=dwTaT_
101
104
  magic_hour/types/models/v1_ai_talking_photo_create_response.py,sha256=4dnMUHVcVAVxywGPj_2wcH_BOCjS6qh_loyMszJVzBY,1348
102
105
  magic_hour/types/models/v1_animation_create_response.py,sha256=EXgZ-7dGPSKgGDyG72r_273vxXYsOkKHbvmujmmCE-c,1343
103
106
  magic_hour/types/models/v1_auto_subtitle_generator_create_response.py,sha256=HGwpgKkYBToPRhbGX7SfjP1HwaAQF-mMszxGNsP_Yhg,1355
107
+ magic_hour/types/models/v1_face_detection_create_response.py,sha256=fEvCFMgcVF5LwVQq1Egp4OjOSTEM_oxzForZnmmh3WY,473
108
+ magic_hour/types/models/v1_face_detection_get_response.py,sha256=CNDz1mWoGqAFjk8n4DnagefV7eQfJ3eyfJpJwmFe-9s,1024
109
+ magic_hour/types/models/v1_face_detection_get_response_faces_item.py,sha256=gg5YSTar6mZteC2cJV2vkGiol3caomAnm3W9f2KbKzM,618
104
110
  magic_hour/types/models/v1_face_swap_create_response.py,sha256=hbVncqJZ4_57DX6k6ufG9ipIEqbMaT9jiCn59LOgPiw,1342
105
111
  magic_hour/types/models/v1_face_swap_photo_create_response.py,sha256=6SQ4lbfHGsZRoDS1PVatqpZN6pOuFL0rqimObYrq7X4,1099
106
112
  magic_hour/types/models/v1_files_upload_urls_create_response.py,sha256=ecdnxoo-ZBTa2kAusHq4nyz6RdugzyN7w4oazJt5ri0,460
@@ -118,7 +124,7 @@ magic_hour/types/models/v1_video_projects_get_response_download.py,sha256=nudDCN
118
124
  magic_hour/types/models/v1_video_projects_get_response_downloads_item.py,sha256=DlUuLBSGa7jWoozxferkaOsGc4jASItcjjWbBXGu620,410
119
125
  magic_hour/types/models/v1_video_projects_get_response_error.py,sha256=49QxnXAmYHcvSWuuhbQZeGlUfqVcO4YwZ414GczQnvA,568
120
126
  magic_hour/types/models/v1_video_to_video_create_response.py,sha256=HCquU2Dciu6jCvhlpce8sGg1CypZngvtrvkwyCWOkSY,1346
121
- magic_hour/types/params/__init__.py,sha256=TJKYYVhnhDCTzxv1G6bPtLeH0HTpIniCpIRBxATf1CM,12526
127
+ magic_hour/types/params/__init__.py,sha256=5p8SY-XeAXQFQPu0aoVa9jkQ4_LSxGI3rYbp-eF8PMY,12951
122
128
  magic_hour/types/params/v1_ai_clothes_changer_create_body.py,sha256=X5koqrTxYLiKcRMqPF7r-VwQzy4r_7k81o1289zHJvo,1006
123
129
  magic_hour/types/params/v1_ai_clothes_changer_create_body_assets.py,sha256=GGnXOExxXtnHT9wQpDCEkLHQlQB5MbAbYuU47iHGf70,1509
124
130
  magic_hour/types/params/v1_ai_face_editor_create_body.py,sha256=sF7mJbqratllYwQ3slqUTctOndAYnH9BDMJu-49Db-4,1313
@@ -154,6 +160,8 @@ magic_hour/types/params/v1_auto_subtitle_generator_create_body.py,sha256=QdCJtdS
154
160
  magic_hour/types/params/v1_auto_subtitle_generator_create_body_assets.py,sha256=-VQ9lC0jQRSwjIkWKvpxhcfrLivtlHKB251ueq0yXDA,878
155
161
  magic_hour/types/params/v1_auto_subtitle_generator_create_body_style.py,sha256=IRF1I56TacS5DSWsCeccYfgy2nq5C9q0bivkih8YIsk,2129
156
162
  magic_hour/types/params/v1_auto_subtitle_generator_create_body_style_custom_config.py,sha256=D5R-Ek62hDy0lUjBkLtyVlqwxA9pU7EO92AT4EuI8yI,2866
163
+ magic_hour/types/params/v1_face_detection_create_body.py,sha256=GSk6zzi5jqb-oluAf5IpPfVuwXkKyKceWblieLZ572s,1277
164
+ magic_hour/types/params/v1_face_detection_create_body_assets.py,sha256=tprBC360DJze2fyyQaWFH28CgW-kwg5sRvoBmYuHYyM,867
157
165
  magic_hour/types/params/v1_face_swap_create_body.py,sha256=ZLxWHSs5NHHeBxBAE-8AEgUwEpEhpYQC4fW9dkmDXxQ,2923
158
166
  magic_hour/types/params/v1_face_swap_create_body_assets.py,sha256=CYcleQ4o_YLxRjRiVRwB-L_Cr0WTjsb6417uwdT0fas,1888
159
167
  magic_hour/types/params/v1_face_swap_photo_create_body.py,sha256=OYsrz7d7i7eg28bR_YS5ucl6k_bMhmNrOt2dF7MYdM4,979
@@ -175,7 +183,7 @@ magic_hour/types/params/v1_text_to_video_create_body_style.py,sha256=cEZO917hipE
175
183
  magic_hour/types/params/v1_video_to_video_create_body.py,sha256=Pgok6GUVHrpW6H3rwdVFA3O5YJvjgviCZkmmHddOSWo,3802
176
184
  magic_hour/types/params/v1_video_to_video_create_body_assets.py,sha256=_-6iA5d8ndka6iJWyWvlJwzRkQcmurJE6hkg-fDwBmQ,1531
177
185
  magic_hour/types/params/v1_video_to_video_create_body_style.py,sha256=RrDBhN2KQnCf9hGsnl3sAYvuFRsxth2JXfe5la0IYJg,5749
178
- magic_hour-0.29.0.dist-info/LICENSE,sha256=F3fxj7JXPgB2K0uj8YXRsVss4u-Dgt_-U3V4VXsivNI,1070
179
- magic_hour-0.29.0.dist-info/METADATA,sha256=jE9pI1si6PdWWx_0e-pbgf6uOVCnCLo6Gzsgk6ehuL8,5840
180
- magic_hour-0.29.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
181
- magic_hour-0.29.0.dist-info/RECORD,,
186
+ magic_hour-0.30.0.dist-info/LICENSE,sha256=F3fxj7JXPgB2K0uj8YXRsVss4u-Dgt_-U3V4VXsivNI,1070
187
+ magic_hour-0.30.0.dist-info/METADATA,sha256=GYEMkJXeN9w8Ip84R1eupCy9bIdGwDauEfRLu53hkQ8,6092
188
+ magic_hour-0.30.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
189
+ magic_hour-0.30.0.dist-info/RECORD,,