sticker-convert 2.13.3.0__py3-none-any.whl → 2.17.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. sticker_convert/__main__.py +24 -27
  2. sticker_convert/auth/__init__.py +0 -0
  3. sticker_convert/auth/auth_base.py +19 -0
  4. sticker_convert/{utils/auth/get_discord_auth.py → auth/auth_discord.py} +149 -118
  5. sticker_convert/{utils/auth/get_kakao_auth.py → auth/auth_kakao_android_login.py} +331 -330
  6. sticker_convert/auth/auth_kakao_desktop_login.py +327 -0
  7. sticker_convert/{utils/auth/get_kakao_desktop_auth.py → auth/auth_kakao_desktop_memdump.py} +281 -263
  8. sticker_convert/{utils/auth/get_line_auth.py → auth/auth_line.py} +98 -80
  9. sticker_convert/{utils/auth/get_signal_auth.py → auth/auth_signal.py} +139 -135
  10. sticker_convert/auth/auth_telethon.py +161 -0
  11. sticker_convert/{utils/auth/get_viber_auth.py → auth/auth_viber.py} +250 -235
  12. sticker_convert/{utils/auth → auth}/telegram_api.py +736 -675
  13. sticker_convert/cli.py +623 -608
  14. sticker_convert/converter.py +1093 -1084
  15. sticker_convert/definitions.py +4 -0
  16. sticker_convert/downloaders/download_band.py +111 -110
  17. sticker_convert/downloaders/download_base.py +171 -166
  18. sticker_convert/downloaders/download_discord.py +92 -91
  19. sticker_convert/downloaders/download_kakao.py +417 -404
  20. sticker_convert/downloaders/download_line.py +484 -475
  21. sticker_convert/downloaders/download_ogq.py +80 -79
  22. sticker_convert/downloaders/download_signal.py +108 -105
  23. sticker_convert/downloaders/download_telegram.py +56 -55
  24. sticker_convert/downloaders/download_viber.py +121 -120
  25. sticker_convert/gui.py +788 -873
  26. sticker_convert/gui_components/frames/comp_frame.py +180 -166
  27. sticker_convert/gui_components/frames/config_frame.py +156 -113
  28. sticker_convert/gui_components/frames/control_frame.py +32 -30
  29. sticker_convert/gui_components/frames/cred_frame.py +232 -233
  30. sticker_convert/gui_components/frames/input_frame.py +139 -137
  31. sticker_convert/gui_components/frames/output_frame.py +112 -110
  32. sticker_convert/gui_components/frames/right_clicker.py +25 -23
  33. sticker_convert/gui_components/windows/advanced_compression_window.py +757 -757
  34. sticker_convert/gui_components/windows/base_window.py +7 -2
  35. sticker_convert/gui_components/windows/discord_get_auth_window.py +79 -82
  36. sticker_convert/gui_components/windows/kakao_get_auth_window.py +511 -321
  37. sticker_convert/gui_components/windows/line_get_auth_window.py +94 -102
  38. sticker_convert/gui_components/windows/signal_get_auth_window.py +84 -89
  39. sticker_convert/gui_components/windows/viber_get_auth_window.py +168 -168
  40. sticker_convert/ios-message-stickers-template/.github/FUNDING.yml +3 -3
  41. sticker_convert/ios-message-stickers-template/README.md +10 -10
  42. sticker_convert/ios-message-stickers-template/stickers/Info.plist +43 -43
  43. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Info.plist +31 -31
  44. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Contents.json +6 -6
  45. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Contents.json +20 -20
  46. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 1.sticker/Contents.json +9 -9
  47. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 2.sticker/Contents.json +9 -9
  48. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 3.sticker/Contents.json +9 -9
  49. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Contents.json +91 -91
  50. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.pbxproj +364 -364
  51. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -7
  52. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -8
  53. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/xcuserdata/niklaspeterson.xcuserdatad/xcschemes/xcschememanagement.plist +14 -14
  54. sticker_convert/job.py +166 -130
  55. sticker_convert/job_option.py +1 -0
  56. sticker_convert/locales/en_US/LC_MESSAGES/base.mo +0 -0
  57. sticker_convert/locales/ja_JP/LC_MESSAGES/base.mo +0 -0
  58. sticker_convert/locales/zh_CN/LC_MESSAGES/base.mo +0 -0
  59. sticker_convert/locales/zh_TW/LC_MESSAGES/base.mo +0 -0
  60. sticker_convert/py.typed +0 -0
  61. sticker_convert/resources/NotoColorEmoji.ttf +0 -0
  62. sticker_convert/resources/help.ja_JP.json +88 -0
  63. sticker_convert/resources/help.json +10 -7
  64. sticker_convert/resources/help.zh_CN.json +88 -0
  65. sticker_convert/resources/help.zh_TW.json +88 -0
  66. sticker_convert/resources/input.ja_JP.json +74 -0
  67. sticker_convert/resources/input.json +121 -121
  68. sticker_convert/resources/input.zh_CN.json +74 -0
  69. sticker_convert/resources/input.zh_TW.json +74 -0
  70. sticker_convert/resources/output.ja_JP.json +38 -0
  71. sticker_convert/resources/output.zh_CN.json +38 -0
  72. sticker_convert/resources/output.zh_TW.json +38 -0
  73. sticker_convert/uploaders/compress_wastickers.py +186 -177
  74. sticker_convert/uploaders/upload_base.py +44 -35
  75. sticker_convert/uploaders/upload_signal.py +218 -203
  76. sticker_convert/uploaders/upload_telegram.py +353 -338
  77. sticker_convert/uploaders/upload_viber.py +178 -169
  78. sticker_convert/uploaders/xcode_imessage.py +295 -286
  79. sticker_convert/utils/callback.py +238 -6
  80. sticker_convert/utils/emoji.py +16 -4
  81. sticker_convert/utils/files/json_resources_loader.py +24 -19
  82. sticker_convert/utils/files/metadata_handler.py +3 -3
  83. sticker_convert/utils/translate.py +108 -0
  84. sticker_convert/utils/url_detect.py +40 -37
  85. sticker_convert/version.py +1 -1
  86. {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/METADATA +89 -74
  87. {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/RECORD +91 -74
  88. sticker_convert/utils/auth/telethon_setup.py +0 -97
  89. sticker_convert/utils/singletons.py +0 -18
  90. {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/WHEEL +0 -0
  91. {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/entry_points.txt +0 -0
  92. {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/licenses/LICENSE +0 -0
  93. {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/top_level.txt +0 -0
@@ -1,1084 +1,1093 @@
1
- #!/usr/bin/env python3
2
- import json
3
- import os
4
- from fractions import Fraction
5
- from io import BytesIO
6
- from math import ceil, floor, log2
7
- from pathlib import Path
8
- from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
9
-
10
- import numpy as np
11
- from bs4 import BeautifulSoup
12
- from PIL import Image
13
- from PIL import __version__ as PillowVersion
14
- from PIL import features
15
-
16
- from sticker_convert.job_option import CompOption
17
- from sticker_convert.utils.callback import CallbackProtocol, CallbackReturn
18
- from sticker_convert.utils.chrome_remotedebug import CRD
19
- from sticker_convert.utils.files.cache_store import CacheStore
20
- from sticker_convert.utils.media.codec_info import CodecInfo, rounding
21
- from sticker_convert.utils.media.format_verify import FormatVerify
22
- from sticker_convert.utils.singletons import singletons
23
-
24
- if TYPE_CHECKING:
25
- from av.video.frame import VideoFrame
26
- from av.video.plane import VideoPlane
27
-
28
- MSG_START_COMP = "[I] Start compressing {} -> {}"
29
- MSG_SKIP_COMP = "[S] Compatible file found, skip compress and just copy {} -> {}"
30
- MSG_COMP = (
31
- "[C] Compressing {} -> {} res={}x{}, quality={}, fps={}, color={} (step {}-{}-{})"
32
- )
33
- MSG_REDO_COMP = "[{}] Compressed {} -> {} but size {} {} limit {}, recompressing"
34
- MSG_DONE_COMP = "[S] Successful compression {} -> {} size {} (step {})"
35
- MSG_FAIL_COMP = (
36
- "[F] Failed Compression {} -> {}, "
37
- "cannot get below limit {} with lowest quality under current settings (Best size: {})"
38
- )
39
-
40
- YUV_RGB_MATRIX = np.array(
41
- [
42
- [1.164, 0.000, 1.793],
43
- [1.164, -0.213, -0.533],
44
- [1.164, 2.112, 0.000],
45
- ]
46
- )
47
-
48
- # Whether animated WebP is supported
49
- # See https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#saving-sequences
50
- PIL_WEBP_ANIM = cast(bool, features.check("webp_anim")) # type: ignore
51
-
52
-
53
- def get_step_value(
54
- max_step: Optional[int],
55
- min_step: Optional[int],
56
- step: int,
57
- steps: int,
58
- power: float = 1.0,
59
- even: bool = False,
60
- snap_pow2: bool = False,
61
- ) -> Optional[int]:
62
- # Power should be between -1 and positive infinity
63
- # Smaller power = More 'importance' of the parameter
64
- # Power of 1 is linear relationship
65
- # e.g. fps has lower power -> Try not to reduce it early on
66
-
67
- if step > 0:
68
- factor = pow(step / steps, power)
69
- else:
70
- factor = 0
71
-
72
- if max_step is not None and min_step is not None:
73
- v = round((max_step - min_step) * step / steps * factor + min_step)
74
- if snap_pow2 is True and floor(log2(max_step)) >= ceil(log2(min_step)):
75
- lower_exp = max(floor(log2(v)), ceil(log2(min_step)))
76
- lower_pow2 = 2**lower_exp
77
- upper_exp = min(ceil(log2(v)), floor(log2(max_step)))
78
- upper_pow2 = 2**upper_exp
79
- if abs(v - lower_pow2) <= abs(v - upper_pow2):
80
- return lower_pow2
81
- else:
82
- return upper_pow2
83
- if even is True and v % 2 == 1:
84
- return v + 1
85
- return v
86
- return None
87
-
88
-
89
- def useful_array(
90
- plane: "VideoPlane", bytes_per_pixel: int = 1, dtype: str = "uint8"
91
- ) -> "np.ndarray[Any, Any]":
92
- total_line_size = abs(plane.line_size)
93
- useful_line_size = plane.width * bytes_per_pixel
94
- arr: "np.ndarray[Any, Any]" = np.frombuffer(cast(bytes, plane), np.uint8)
95
- if total_line_size != useful_line_size:
96
- arr = arr.reshape(-1, total_line_size)[:, 0:useful_line_size].reshape(-1)
97
- return arr.view(np.dtype(dtype))
98
-
99
-
100
- def yuva_to_rgba(frame: "VideoFrame") -> "np.ndarray[Any, Any]":
101
- # https://stackoverflow.com/questions/72308308/converting-yuv-to-rgb-in-python-coefficients-work-with-array-dont-work-with-n
102
-
103
- width = frame.width
104
- height = frame.height
105
-
106
- y = useful_array(frame.planes[0]).reshape(height, width)
107
- u = useful_array(frame.planes[1]).reshape(height // 2, width // 2)
108
- v = useful_array(frame.planes[2]).reshape(height // 2, width // 2)
109
- a = useful_array(frame.planes[3]).reshape(height, width)
110
-
111
- u = u.repeat(2, axis=0).repeat(2, axis=1)
112
- v = v.repeat(2, axis=0).repeat(2, axis=1)
113
-
114
- y = y.reshape((y.shape[0], y.shape[1], 1))
115
- u = u.reshape((u.shape[0], u.shape[1], 1))
116
- v = v.reshape((v.shape[0], v.shape[1], 1))
117
- a = a.reshape((a.shape[0], a.shape[1], 1))
118
-
119
- yuv_array = np.concatenate((y, u, v), axis=2)
120
-
121
- yuv_array = yuv_array.astype(np.float32)
122
- yuv_array[:, :, 0] = (
123
- yuv_array[:, :, 0].clip(16, 235).astype(yuv_array.dtype) - 16 # type: ignore
124
- )
125
- yuv_array[:, :, 1:] = (
126
- yuv_array[:, :, 1:].clip(16, 240).astype(yuv_array.dtype) - 128 # type: ignore
127
- )
128
-
129
- rgb_array = np.matmul(yuv_array, YUV_RGB_MATRIX.T).clip(0, 255).astype("uint8")
130
-
131
- return np.concatenate((rgb_array, a), axis=2)
132
-
133
-
134
- class StickerConvert:
135
- def __init__(
136
- self,
137
- in_f: Union[Path, Tuple[Path, bytes]],
138
- out_f: Path,
139
- opt_comp: CompOption,
140
- cb: CallbackProtocol,
141
- # cb_return: CallbackReturn
142
- ) -> None:
143
- self.in_f: Union[bytes, Path]
144
- if isinstance(in_f, Path):
145
- self.in_f = in_f
146
- self.in_f_name = self.in_f.name
147
- self.in_f_path = in_f
148
- self.codec_info_orig = CodecInfo(self.in_f)
149
- else:
150
- self.in_f = in_f[1]
151
- self.in_f_name = Path(in_f[0]).name
152
- self.in_f_path = in_f[0]
153
- self.codec_info_orig = CodecInfo(in_f[1], Path(in_f[0]).suffix)
154
-
155
- valid_formats: List[str] = []
156
- for i in opt_comp.get_format():
157
- valid_formats.extend(i)
158
-
159
- valid_ext = False
160
- self.out_f = Path()
161
- if len(valid_formats) == 0 or Path(out_f).suffix in valid_formats:
162
- self.out_f = Path(out_f)
163
- valid_ext = True
164
-
165
- if not valid_ext:
166
- if self.codec_info_orig.is_animated or opt_comp.fake_vid:
167
- ext = opt_comp.format_vid[0]
168
- else:
169
- ext = opt_comp.format_img[0]
170
- self.out_f = out_f.with_suffix(ext)
171
-
172
- self.out_f_name: str = self.out_f.name
173
-
174
- self.cb = cb
175
- self.frames_raw: "List[np.ndarray[Any, Any]]" = []
176
- self.frames_processed: "List[np.ndarray[Any, Any]]" = []
177
- self.opt_comp: CompOption = opt_comp
178
- if not self.opt_comp.steps:
179
- self.opt_comp.steps = 1
180
-
181
- self.size: int = 0
182
- self.size_max: Optional[int] = None
183
- self.res_w: Optional[int] = None
184
- self.res_h: Optional[int] = None
185
- self.quality: Optional[int] = None
186
- self.fps: Optional[Fraction] = None
187
- self.color: Optional[int] = None
188
-
189
- self.bg_color: Optional[Tuple[int, int, int, int]] = None
190
- if self.opt_comp.bg_color:
191
- r, g, b, a = bytes.fromhex(self.opt_comp.bg_color)
192
- self.bg_color = (r, g, b, a)
193
-
194
- self.tmp_f: BytesIO = BytesIO()
195
- self.result: Optional[bytes] = None
196
- self.result_size: int = 0
197
- self.result_step: Optional[int] = None
198
-
199
- self.apngasm = None
200
-
201
- @staticmethod
202
- def convert(
203
- in_f: Union[Path, Tuple[Path, bytes]],
204
- out_f: Path,
205
- opt_comp: CompOption,
206
- cb: CallbackProtocol,
207
- _cb_return: CallbackReturn,
208
- ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
209
- sticker = StickerConvert(in_f, out_f, opt_comp, cb)
210
- result = sticker._convert()
211
- cb.put("update_bar")
212
- return result
213
-
214
- def _convert(self) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
215
- result = self.check_if_compatible()
216
- if result:
217
- return self.compress_done(result)
218
-
219
- self.cb.put((MSG_START_COMP.format(self.in_f_name, self.out_f_name)))
220
-
221
- steps_list = self.generate_steps_list()
222
-
223
- step_lower = 0
224
- step_upper = self.opt_comp.steps
225
-
226
- if self.codec_info_orig.is_animated is True:
227
- self.size_max = self.opt_comp.size_max_vid
228
- else:
229
- self.size_max = self.opt_comp.size_max_img
230
-
231
- if self.size_max in (None, 0):
232
- # No limit to size, create the best quality result
233
- step_current = 0
234
- else:
235
- step_current = int(rounding((step_lower + step_upper) / 2))
236
-
237
- self.frames_import()
238
- while True:
239
- param = steps_list[step_current]
240
- self.res_w = param[0]
241
- self.res_h = param[1]
242
- self.quality = param[2]
243
- if param[3] and self.codec_info_orig.fps:
244
- fps_tmp = min(param[3], self.codec_info_orig.fps)
245
- self.fps = self.fix_fps(fps_tmp)
246
- else:
247
- self.fps = Fraction(0)
248
- self.color = param[4]
249
-
250
- self.tmp_f = BytesIO()
251
- msg = MSG_COMP.format(
252
- self.in_f_name,
253
- self.out_f_name,
254
- self.res_w,
255
- self.res_h,
256
- self.quality,
257
- int(self.fps),
258
- self.color,
259
- step_lower,
260
- step_current,
261
- step_upper,
262
- )
263
- self.cb.put(msg)
264
-
265
- self.frames_processed = self.frames_drop(self.frames_raw)
266
- self.frames_processed = self.frames_resize(self.frames_processed)
267
- self.frames_export()
268
-
269
- self.tmp_f.seek(0)
270
- self.size = self.tmp_f.getbuffer().nbytes
271
-
272
- if not self.size_max or (
273
- self.size <= self.size_max and self.size >= self.result_size
274
- ):
275
- self.result = self.tmp_f.read()
276
- self.result_size = self.size
277
- self.result_step = step_current
278
-
279
- if (
280
- step_upper - step_lower > 0
281
- and step_current != step_lower
282
- and self.size_max
283
- ):
284
- if self.size <= self.size_max:
285
- sign = "<"
286
- step_upper = step_current
287
- else:
288
- sign = ">"
289
- step_lower = step_current
290
- if step_current == step_lower + 1:
291
- step_current = step_lower
292
- else:
293
- step_current = int(rounding((step_lower + step_upper) / 2))
294
- self.recompress(sign)
295
- elif self.result:
296
- return self.compress_done(self.result, self.result_step)
297
- else:
298
- return self.compress_fail()
299
-
300
- def check_if_compatible(self) -> Optional[bytes]:
301
- f_fmt = self.opt_comp.get_format()
302
- if (
303
- # Issue #260: Some webp file not accepted by Whatsapp
304
- ".webp" not in f_fmt[0]
305
- and ".webp" not in f_fmt[1]
306
- and FormatVerify.check_format(
307
- self.in_f,
308
- fmt=f_fmt,
309
- file_info=self.codec_info_orig,
310
- )
311
- and FormatVerify.check_file_res(
312
- self.in_f, res=self.opt_comp.get_res(), file_info=self.codec_info_orig
313
- )
314
- and FormatVerify.check_file_fps(
315
- self.in_f, fps=self.opt_comp.get_fps(), file_info=self.codec_info_orig
316
- )
317
- and FormatVerify.check_file_size(
318
- self.in_f,
319
- size=self.opt_comp.get_size_max(),
320
- file_info=self.codec_info_orig,
321
- )
322
- and FormatVerify.check_file_duration(
323
- self.in_f,
324
- duration=self.opt_comp.get_duration(),
325
- file_info=self.codec_info_orig,
326
- )
327
- ):
328
- self.cb.put((MSG_SKIP_COMP.format(self.in_f_name, self.out_f_name)))
329
-
330
- if isinstance(self.in_f, Path):
331
- with open(self.in_f, "rb") as f:
332
- result = f.read()
333
- self.result_size = os.path.getsize(self.in_f)
334
- else:
335
- result = self.in_f
336
- self.result_size = len(self.in_f)
337
-
338
- return result
339
-
340
- return None
341
-
342
- def generate_steps_list(self) -> List[Tuple[Optional[int], ...]]:
343
- steps_list: List[Tuple[Optional[int], ...]] = []
344
- need_even = self.out_f.suffix in (".webm", ".mp4", ".mkv", ".webp")
345
- for step in range(self.opt_comp.steps, -1, -1):
346
- steps_list.append(
347
- (
348
- get_step_value(
349
- self.opt_comp.res_w_max,
350
- self.opt_comp.res_w_min,
351
- step,
352
- self.opt_comp.steps,
353
- self.opt_comp.res_power,
354
- need_even,
355
- self.opt_comp.res_snap_pow2,
356
- ),
357
- get_step_value(
358
- self.opt_comp.res_h_max,
359
- self.opt_comp.res_h_min,
360
- step,
361
- self.opt_comp.steps,
362
- self.opt_comp.res_power,
363
- need_even,
364
- self.opt_comp.res_snap_pow2,
365
- ),
366
- get_step_value(
367
- self.opt_comp.quality_max,
368
- self.opt_comp.quality_min,
369
- step,
370
- self.opt_comp.steps,
371
- self.opt_comp.quality_power,
372
- ),
373
- get_step_value(
374
- self.opt_comp.fps_max,
375
- self.opt_comp.fps_min,
376
- step,
377
- self.opt_comp.steps,
378
- self.opt_comp.fps_power,
379
- ),
380
- get_step_value(
381
- self.opt_comp.color_max,
382
- self.opt_comp.color_min,
383
- step,
384
- self.opt_comp.steps,
385
- self.opt_comp.color_power,
386
- ),
387
- )
388
- )
389
-
390
- return steps_list
391
-
392
- def recompress(self, sign: str) -> None:
393
- msg = MSG_REDO_COMP.format(
394
- sign, self.in_f_name, self.out_f_name, self.size, sign, self.size_max
395
- )
396
- self.cb.put(msg)
397
-
398
- def compress_fail(
399
- self,
400
- ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
401
- msg = MSG_FAIL_COMP.format(
402
- self.in_f_name, self.out_f_name, self.size_max, self.size
403
- )
404
- self.cb.put(msg)
405
-
406
- return False, self.in_f_path, self.out_f, self.size
407
-
408
- def compress_done(
409
- self, data: bytes, result_step: Optional[int] = None
410
- ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
411
- out_f: Union[None, bytes, Path]
412
-
413
- if self.out_f.stem == "none":
414
- out_f = None
415
- elif self.out_f.stem == "bytes":
416
- out_f = data
417
- else:
418
- out_f = self.out_f
419
- with open(self.out_f, "wb+") as f:
420
- f.write(data)
421
-
422
- if result_step is not None:
423
- msg = MSG_DONE_COMP.format(
424
- self.in_f_name, self.out_f_name, self.result_size, result_step
425
- )
426
- self.cb.put(msg)
427
-
428
- return True, self.in_f_path, out_f, self.result_size
429
-
430
- def frames_import(self) -> None:
431
- if isinstance(self.in_f, Path):
432
- suffix = self.in_f.suffix
433
- else:
434
- suffix = Path(self.in_f_name).suffix
435
-
436
- if suffix in (".tgs", ".lottie", ".json"):
437
- self._frames_import_lottie()
438
- elif suffix in (".webp", ".apng", ".png", ".gif"):
439
- # ffmpeg do not support webp decoding (yet)
440
- # ffmpeg could fail to decode apng if file is buggy
441
- self._frames_import_pillow()
442
- elif suffix == ".svg":
443
- self._frames_import_svg()
444
- else:
445
- self._frames_import_pyav()
446
-
447
- def _frames_import_svg(self) -> None:
448
- width = self.codec_info_orig.res[0]
449
- height = self.codec_info_orig.res[1]
450
-
451
- if singletons.objs.get("crd") is None:
452
- chrome_path: Optional[str]
453
- if self.opt_comp.chromium_path:
454
- chrome_path = self.opt_comp.chromium_path
455
- else:
456
- chrome_path = CRD.get_chromium_path()
457
- args = [
458
- "--headless",
459
- "--kiosk",
460
- "--disable-extensions",
461
- "--disable-infobars",
462
- "--disable-gpu",
463
- "--disable-gpu-rasterization",
464
- "--hide-scrollbars",
465
- "--force-device-scale-factor=1",
466
- "about:blank",
467
- ]
468
- if chrome_path is None:
469
- raise RuntimeError("[F] Chrome/Chromium required for importing svg")
470
- self.cb.put("[W] Importing SVG takes long time")
471
- singletons.objs["crd"] = CRD(chrome_path, args=args)
472
- singletons.objs["crd"].connect(-1) # type: ignore
473
-
474
- crd = cast(CRD, singletons.objs["crd"])
475
- if isinstance(self.in_f, bytes):
476
- svg = self.in_f.decode()
477
- else:
478
- with open(self.in_f) as f:
479
- svg = f.read()
480
- soup = BeautifulSoup(svg, "html.parser")
481
- svg_tag = soup.find_all("svg")[0]
482
-
483
- if svg_tag.get("width") is None:
484
- svg_tag["width"] = width
485
- if svg_tag.get("height") is None:
486
- svg_tag["height"] = height
487
- svg = str(soup)
488
-
489
- crd.open_html_str(svg)
490
- crd.set_transparent_bg()
491
- init_js = 'svg = document.getElementsByTagName("svg")[0];'
492
- if self.codec_info_orig.fps > 0:
493
- init_js += "svg.pauseAnimations();"
494
- init_js += "JSON.stringify(svg.getBoundingClientRect());"
495
- bound = json.loads(
496
- json.loads(crd.exec_js(init_js))["result"]["result"]["value"]
497
- )
498
- clip = {
499
- "x": bound["x"],
500
- "y": bound["y"],
501
- "width": width,
502
- "height": height,
503
- "scale": 1,
504
- }
505
-
506
- if self.codec_info_orig.fps > 0:
507
- for i in range(self.codec_info_orig.frames):
508
- curr_time = (
509
- i
510
- / self.codec_info_orig.frames
511
- * self.codec_info_orig.duration
512
- / 1000
513
- )
514
- crd.exec_js(f"svg.setCurrentTime({curr_time})")
515
- self.frames_raw.append(np.asarray(crd.screenshot(clip)))
516
- else:
517
- self.frames_raw.append(np.asarray(crd.screenshot(clip)))
518
-
519
- def _frames_import_pillow(self) -> None:
520
- with Image.open(self.in_f) as im:
521
- # Note: im.convert("RGBA") would return rgba image of current frame only
522
- if (
523
- "n_frames" in dir(im)
524
- and im.n_frames != 0
525
- and self.codec_info_orig.fps != 0.0
526
- ):
527
- # Pillow is not reliable for getting webp frame durations
528
- durations: Optional[List[int]]
529
- if im.format == "WEBP":
530
- _, _, _, durations = CodecInfo._get_file_fps_frames_duration_webp( # type: ignore
531
- self.in_f
532
- )
533
- else:
534
- durations = None
535
-
536
- duration_ptr = 0.0
537
- duration_inc = 1 / self.codec_info_orig.fps * 1000
538
- frame = 0
539
- if durations is None:
540
- next_frame_start_duration = cast(int, im.info.get("duration", 1000))
541
- else:
542
- next_frame_start_duration = durations[0]
543
- while True:
544
- self.frames_raw.append(np.asarray(im.convert("RGBA")))
545
- duration_ptr += duration_inc
546
- if duration_ptr >= next_frame_start_duration:
547
- frame += 1
548
- if frame == im.n_frames:
549
- break
550
- im.seek(frame)
551
-
552
- if durations is None:
553
- next_frame_start_duration += cast(
554
- int, im.info.get("duration", 1000)
555
- )
556
- else:
557
- next_frame_start_duration += durations[frame]
558
- else:
559
- self.frames_raw.append(np.asarray(im.convert("RGBA")))
560
-
561
- def _frames_import_pyav(self) -> None:
562
- import av
563
- from av.codec.context import CodecContext
564
- from av.container.input import InputContainer
565
- from av.video.codeccontext import VideoCodecContext
566
- from av.video.frame import VideoFrame
567
-
568
- # Crashes when handling some webm in yuv420p and convert to rgba
569
- # https://github.com/PyAV-Org/PyAV/issues/1166
570
- file: Union[BytesIO, str]
571
- if isinstance(self.in_f, Path):
572
- file = self.in_f.as_posix()
573
- else:
574
- file = BytesIO(self.in_f)
575
- with av.open(file) as container:
576
- container = cast(InputContainer, container)
577
- context = container.streams.video[0].codec_context
578
- if context.name == "vp8":
579
- context = CodecContext.create("libvpx", "r")
580
- elif context.name == "vp9":
581
- context = cast(
582
- VideoCodecContext, CodecContext.create("libvpx-vp9", "r")
583
- )
584
-
585
- for packet in container.demux(container.streams.video):
586
- for frame in context.decode(packet):
587
- width_orig = frame.width
588
- height_orig = frame.height
589
-
590
- # Need to pad frame to even dimension first
591
- if width_orig % 2 == 1 or height_orig % 2 == 1:
592
- from av.filter import Graph
593
-
594
- width_new = width_orig + width_orig % 2
595
- height_new = height_orig + height_orig % 2
596
-
597
- graph = Graph()
598
- in_src = graph.add_buffer(template=container.streams.video[0])
599
- pad = graph.add(
600
- "pad", f"{width_new}:{height_new}:0:0:color=#00000000"
601
- )
602
- in_src.link_to(pad)
603
- sink = graph.add("buffersink")
604
- pad.link_to(sink)
605
- graph.configure()
606
-
607
- graph.push(frame)
608
- frame_resized = cast(VideoFrame, graph.pull())
609
- else:
610
- frame_resized = frame
611
-
612
- # yuva420p may cause crash
613
- # Not safe to directly call frame.to_ndarray(format="rgba")
614
- # https://github.com/PyAV-Org/PyAV/discussions/1510
615
- # if int(av.__version__.split(".")[0]) >= 14:
616
- # rgba_array = frame_resized.to_ndarray(format="rgba")
617
- if frame_resized.format.name == "yuv420p":
618
- rgb_array = frame_resized.to_ndarray(format="rgb24")
619
- rgba_array = np.dstack(
620
- (
621
- rgb_array,
622
- cast(
623
- np.ndarray[Any, np.dtype[np.uint8]],
624
- np.zeros(rgb_array.shape[:2], dtype=np.uint8) + 255,
625
- ),
626
- )
627
- )
628
- else:
629
- frame_resized = frame_resized.reformat(
630
- format="yuva420p",
631
- dst_colorspace=1,
632
- )
633
- rgba_array = yuva_to_rgba(frame_resized)
634
-
635
- # Remove pixels that was added to make dimensions even
636
- rgba_array = rgba_array[0:height_orig, 0:width_orig]
637
- self.frames_raw.append(rgba_array)
638
-
639
- def _frames_import_lottie(self) -> None:
640
- from rlottie_python.rlottie_wrapper import LottieAnimation
641
-
642
- if isinstance(self.in_f, Path):
643
- suffix = self.in_f.suffix
644
- else:
645
- suffix = Path(self.in_f_name).suffix
646
-
647
- if suffix == ".tgs":
648
- if isinstance(self.in_f, Path):
649
- anim = LottieAnimation.from_tgs(self.in_f.as_posix())
650
- else:
651
- import gzip
652
-
653
- with gzip.open(BytesIO(self.in_f)) as f:
654
- data = f.read().decode(encoding="utf-8")
655
- anim = LottieAnimation.from_data(data)
656
- else:
657
- if isinstance(self.in_f, Path):
658
- anim = LottieAnimation.from_file(self.in_f.as_posix())
659
- else:
660
- anim = LottieAnimation.from_data(self.in_f.decode("utf-8"))
661
-
662
- for i in range(anim.lottie_animation_get_totalframe()):
663
- frame = np.asarray(anim.render_pillow_frame(frame_num=i))
664
- self.frames_raw.append(frame)
665
-
666
- anim.lottie_animation_destroy()
667
-
668
- def determine_bg_color(self) -> Tuple[int, int, int, int]:
669
- mean_total = 0.0
670
- # Calculate average color of all frames for selecting background color
671
- for frame in self.frames_raw:
672
- s = frame.shape
673
- colors = frame.reshape((-1, s[2])) # type: ignore
674
- # Do not count in alpha=0
675
- # If alpha > 0, use alpha as weight
676
- colors = colors[colors[:, 3] != 0]
677
- if colors.shape[0] != 0:
678
- alphas = colors[:, 3] / 255
679
- r_mean = cast(float, np.mean(colors[:, 0] * alphas))
680
- g_mean = cast(float, np.mean(colors[:, 1] * alphas))
681
- b_mean = cast(float, np.mean(colors[:, 2] * alphas))
682
- mean_total += (r_mean + g_mean + b_mean) / 3
683
-
684
- if mean_total / len(self.frames_raw) < 128:
685
- return (255, 255, 255, 0)
686
- else:
687
- return (0, 0, 0, 0)
688
-
689
- def frames_resize(
690
- self, frames_in: "List[np.ndarray[Any, Any]]"
691
- ) -> "List[np.ndarray[Any, Any]]":
692
- frames_out: "List[np.ndarray[Any, Any]]" = []
693
-
694
- resample: Literal[0, 1, 2, 3, 4, 5]
695
- if self.opt_comp.scale_filter == "nearest":
696
- resample = Image.NEAREST
697
- elif self.opt_comp.scale_filter == "box":
698
- resample = Image.BOX
699
- elif self.opt_comp.scale_filter == "bilinear":
700
- resample = Image.BILINEAR
701
- elif self.opt_comp.scale_filter == "hamming":
702
- resample = Image.HAMMING
703
- elif self.opt_comp.scale_filter == "bicubic":
704
- resample = Image.BICUBIC
705
- elif self.opt_comp.scale_filter == "lanczos":
706
- resample = Image.LANCZOS
707
- else:
708
- resample = Image.BICUBIC
709
-
710
- if self.bg_color is None:
711
- self.bg_color = self.determine_bg_color()
712
-
713
- for frame in frames_in:
714
- with Image.fromarray(frame, "RGBA") as im: # type: ignore
715
- width, height = im.size
716
-
717
- if self.res_w is None:
718
- self.res_w = width
719
- if self.res_h is None:
720
- self.res_h = height
721
-
722
- scaling = 1 - (self.opt_comp.padding_percent / 100)
723
- if width / self.res_w > height / self.res_h:
724
- width_new = int(self.res_w * scaling)
725
- height_new = int(height * self.res_w / width * scaling)
726
- else:
727
- height_new = int(self.res_h * scaling)
728
- width_new = int(width * self.res_h / height * scaling)
729
-
730
- with im.resize((width_new, height_new), resample=resample) as im_resized:
731
- with Image.new(
732
- "RGBA", (self.res_w, self.res_h), self.bg_color
733
- ) as im_new:
734
- im_new.alpha_composite(
735
- im_resized,
736
- ((self.res_w - width_new) // 2, (self.res_h - height_new) // 2),
737
- )
738
- frames_out.append(np.asarray(im_new))
739
-
740
- return frames_out
741
-
742
- def frames_drop(
743
- self, frames_in: "List[np.ndarray[Any, Any]]"
744
- ) -> "List[np.ndarray[Any, Any]]":
745
- if (
746
- not self.codec_info_orig.is_animated
747
- or not self.fps
748
- or len(self.frames_processed) == 1
749
- ):
750
- return [frames_in[0]]
751
-
752
- frames_out: "List[np.ndarray[Any, Any]]" = []
753
-
754
- # fps_ratio: 1 frame in new anim equal to how many frame in old anim
755
- # speed_ratio: How much to speed up / slow down
756
- fps_ratio = self.codec_info_orig.fps / self.fps
757
- if (
758
- self.opt_comp.duration_min
759
- and self.codec_info_orig.duration < self.opt_comp.duration_min
760
- ):
761
- speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_min
762
- elif (
763
- self.opt_comp.duration_max
764
- and self.codec_info_orig.duration > self.opt_comp.duration_max
765
- ):
766
- speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_max
767
- else:
768
- speed_ratio = 1
769
-
770
- # How many frames to advance in original video for each frame of output video
771
- frame_increment = fps_ratio * speed_ratio
772
-
773
- frames_out_min = None
774
- frames_out_max = None
775
- if self.opt_comp.duration_min:
776
- frames_out_min = ceil(self.fps * self.opt_comp.duration_min / 1000)
777
- if self.opt_comp.duration_max:
778
- frames_out_max = floor(self.fps * self.opt_comp.duration_max / 1000)
779
-
780
- frame_current = 0
781
- frame_current_float = 0.0
782
- while True:
783
- if frame_current <= len(frames_in) - 1 and not (
784
- frames_out_max and len(frames_out) == frames_out_max
785
- ):
786
- frames_out.append(frames_in[frame_current])
787
- else:
788
- while len(frames_out) == 0 or (
789
- frames_out_min and len(frames_out) < frames_out_min
790
- ):
791
- frames_out.append(frames_in[-1])
792
- return frames_out
793
- frame_current_float += frame_increment
794
- frame_current = int(rounding(frame_current_float))
795
-
796
- def frames_export(self) -> None:
797
- is_animated = len(self.frames_processed) > 1 and self.fps
798
- if self.out_f.suffix in (".apng", ".png"):
799
- if is_animated:
800
- self._frames_export_apng()
801
- else:
802
- self._frames_export_png()
803
- elif self.out_f.suffix in (".gif", ".webp"):
804
- self._frames_export_pil_anim()
805
- elif self.out_f.suffix in (".webm", ".mp4", ".mkv") or is_animated:
806
- self._frames_export_pyav()
807
- else:
808
- self._frames_export_pil()
809
-
810
- def _check_dup(self) -> bool:
811
- if len(self.frames_processed) == 1:
812
- return False
813
-
814
- prev_frame = self.frames_processed[0]
815
- for frame in self.frames_processed[1:]:
816
- if np.array_equal(frame, prev_frame):
817
- return True
818
- prev_frame = frame
819
-
820
- return False
821
-
822
- def _frames_export_pil(self) -> None:
823
- with Image.fromarray(self.frames_processed[0]) as im: # type: ignore
824
- im.save(
825
- self.tmp_f,
826
- format=self.out_f.suffix.replace(".", ""),
827
- quality=self.quality,
828
- )
829
-
830
- def _frames_export_pyav(self) -> None:
831
- import av
832
- from av.video.stream import VideoStream
833
-
834
- options_container: Dict[str, str] = {}
835
- options_stream: Dict[str, str] = {}
836
-
837
- if isinstance(self.quality, int):
838
- # Seems not actually working
839
- options_stream["quality"] = str(self.quality)
840
- options_stream["lossless"] = "0"
841
-
842
- if self.out_f.suffix in (".apng", ".png"):
843
- codec = "apng"
844
- pixel_format = "rgba"
845
- options_stream["plays"] = "0"
846
- elif self.out_f.suffix in (".webm", ".mkv"):
847
- codec = "libvpx-vp9"
848
- pixel_format = "yuva420p"
849
- options_stream["loop"] = "0"
850
- elif self.out_f.suffix == ".webp":
851
- codec = "webp"
852
- pixel_format = "yuva420p"
853
- options_container["loop"] = "0"
854
- else:
855
- codec = "libvpx-vp9"
856
- pixel_format = "yuv420p"
857
- options_stream["loop"] = "0"
858
-
859
- with av.open(
860
- self.tmp_f,
861
- "w",
862
- format=self.out_f.suffix.replace(".", ""),
863
- options=options_container,
864
- ) as output:
865
- out_stream = output.add_stream(codec, rate=self.fps, options=options_stream) # type: ignore
866
- out_stream = cast(VideoStream, out_stream)
867
- assert isinstance(self.res_w, int) and isinstance(self.res_h, int)
868
- out_stream.width = self.res_w
869
- out_stream.height = self.res_h
870
- out_stream.pix_fmt = pixel_format
871
-
872
- for frame in self.frames_processed:
873
- av_frame = av.VideoFrame.from_ndarray(frame, format="rgba")
874
- output.mux(out_stream.encode(av_frame))
875
- output.mux(out_stream.encode())
876
-
877
- def _frames_export_pil_anim(self) -> None:
878
- extra_kwargs: Dict[str, Any] = {}
879
-
880
- # disposal=2 on gif cause flicker in image with transparency
881
- # Occurs in Pillow == 10.2.0
882
- # https://github.com/python-pillow/Pillow/issues/7787
883
- if PillowVersion == "10.2.0":
884
- extra_kwargs["optimize"] = False
885
- else:
886
- extra_kwargs["optimize"] = True
887
-
888
- if self.out_f.suffix == ".gif":
889
- # GIF can only have one alpha color
890
- # Change lowest alpha to alpha=0
891
- # Only keep alpha=0 and alpha=255, nothing in between
892
- extra_kwargs["format"] = "GIF"
893
- frames_processed = np.array(self.frames_processed)
894
- alpha = frames_processed[:, :, :, 3]
895
- alpha_min = np.min(alpha) # type: ignore
896
- if alpha_min < 255:
897
- alpha[alpha > alpha_min] = 255
898
- alpha[alpha == alpha_min] = 0
899
-
900
- if 0 in alpha:
901
- extra_kwargs["transparency"] = 0
902
- extra_kwargs["disposal"] = 2
903
- im_out = [self.quantize(Image.fromarray(i)) for i in frames_processed] # type: ignore
904
- else:
905
- im_out = [
906
- self.quantize(Image.fromarray(i).convert("RGB")).convert("RGB") # type: ignore
907
- for i in frames_processed
908
- ]
909
- elif self.out_f.suffix == ".webp":
910
- im_out = [Image.fromarray(i) for i in self.frames_processed] # type: ignore
911
- extra_kwargs["format"] = "WebP"
912
- extra_kwargs["allow_mixed"] = True
913
- extra_kwargs["kmax"] = (
914
- 1 # Keyframe every frame, otherwise black lines artifact can appear
915
- )
916
- if self.quality:
917
- if self.quality < 20:
918
- extra_kwargs["minimize_size"] = True
919
- extra_kwargs["method"] = 4 + int(2 * (100 - self.quality) / 100)
920
- extra_kwargs["alpha_quality"] = self.quality
921
- else:
922
- raise RuntimeError(f"Invalid format {self.out_f.suffix}")
923
-
924
- if self.fps:
925
- extra_kwargs["save_all"] = True
926
- extra_kwargs["append_images"] = im_out[1:]
927
- extra_kwargs["duration"] = int(1000 / self.fps)
928
- extra_kwargs["loop"] = 0
929
-
930
- im_out[0].save(
931
- self.tmp_f,
932
- quality=self.quality,
933
- **extra_kwargs,
934
- )
935
-
936
- def _frames_export_png(self) -> None:
937
- with Image.fromarray(self.frames_processed[0], "RGBA") as image: # type: ignore
938
- image_quant = self.quantize(image)
939
-
940
- with BytesIO() as f:
941
- image_quant.save(f, format="png")
942
- f.seek(0)
943
- frame_optimized = self.optimize_png(f.read())
944
- self.tmp_f.write(frame_optimized)
945
-
946
- def _frames_export_apng(self) -> None:
947
- from apngasm_python._apngasm_python import APNGAsm, create_frame_from_rgb, create_frame_from_rgba # type: ignore
948
-
949
- assert self.fps
950
- assert self.res_h
951
-
952
- frames_concat = np.concatenate(self.frames_processed)
953
- with Image.fromarray(frames_concat, "RGBA") as image_concat: # type: ignore
954
- if image_concat.getextrema()[3][0] < 255: # type: ignore
955
- mode = "RGBA"
956
- create_frame_method = create_frame_from_rgba
957
- else:
958
- mode = "RGB"
959
- create_frame_method = create_frame_from_rgb
960
- image_quant = self.quantize(image_concat)
961
-
962
- if self.apngasm is None:
963
- self.apngasm = APNGAsm() # type: ignore
964
- assert isinstance(self.apngasm, APNGAsm)
965
-
966
- delay_num = int(1000 / self.fps)
967
- for i in range(0, image_quant.height, self.res_h):
968
- crop_dimension = (0, i, image_quant.width, i + self.res_h)
969
- image_cropped = image_quant.crop(crop_dimension)
970
- image_final = image_cropped.convert(mode)
971
- frame_final = create_frame_method(
972
- np.array(image_final),
973
- width=image_final.width,
974
- height=image_final.height,
975
- delay_num=delay_num,
976
- delay_den=1000,
977
- )
978
- self.apngasm.add_frame(frame_final)
979
-
980
- with CacheStore.get_cache_store(path=self.opt_comp.cache_dir) as tempdir:
981
- tmp_apng = Path(tempdir, f"out{self.out_f.suffix}")
982
- self.apngasm.assemble(tmp_apng.as_posix())
983
-
984
- with open(tmp_apng, "rb") as f:
985
- apng_optimized = self.optimize_png(f.read())
986
- self.tmp_f.write(apng_optimized)
987
-
988
- self.apngasm.reset()
989
-
990
- def optimize_png(self, image_bytes: bytes) -> bytes:
991
- import oxipng
992
-
993
- return oxipng.optimize_from_memory(
994
- image_bytes,
995
- level=6,
996
- fix_errors=True,
997
- filter=[oxipng.RowFilter.Brute],
998
- optimize_alpha=True,
999
- strip=oxipng.StripChunks.safe(),
1000
- )
1001
-
1002
- def quantize(self, image: Image.Image) -> Image.Image:
1003
- if not (self.color and self.color <= 256):
1004
- return image.copy()
1005
- if self.opt_comp.quantize_method == "imagequant":
1006
- return self._quantize_by_imagequant(image)
1007
- if self.opt_comp.quantize_method in ("mediancut", "maxcoverage", "fastoctree"):
1008
- return self._quantize_by_pillow(image)
1009
-
1010
- return image
1011
-
1012
- def _quantize_by_imagequant(self, image: Image.Image) -> Image.Image:
1013
- import imagequant # type: ignore
1014
-
1015
- assert isinstance(self.quality, int)
1016
- assert isinstance(self.opt_comp.quality_min, int)
1017
- assert isinstance(self.opt_comp.quality_max, int)
1018
- assert isinstance(self.color, int)
1019
-
1020
- dither = 1 - (self.quality - self.opt_comp.quality_min) / (
1021
- self.opt_comp.quality_max - self.opt_comp.quality_min
1022
- )
1023
- image_quant = None
1024
- for i in range(self.quality, 101, 5):
1025
- try:
1026
- image_quant = imagequant.quantize_pil_image( # type: ignore
1027
- image,
1028
- dithering_level=dither,
1029
- max_colors=self.color,
1030
- min_quality=self.opt_comp.quality_min,
1031
- max_quality=i,
1032
- )
1033
- return image_quant
1034
- except RuntimeError:
1035
- pass
1036
-
1037
- return image
1038
-
1039
- def _quantize_by_pillow(self, image: Image.Image) -> Image.Image:
1040
- assert self.color
1041
-
1042
- if image.mode == "RGBA" and self.opt_comp.quantize_method in (
1043
- "mediancut",
1044
- "maxcoverage",
1045
- ):
1046
- self.cb.put(
1047
- f"[W] {self.opt_comp.quantize_method} does not support RGBA, defaulted to fastoctree quantization"
1048
- )
1049
- method = Image.Quantize.FASTOCTREE
1050
- elif self.opt_comp.quantize_method == "mediancut":
1051
- method = Image.Quantize.MEDIANCUT
1052
- elif self.opt_comp.quantize_method == "maxcoverage":
1053
- method = Image.Quantize.MAXCOVERAGE
1054
- else:
1055
- method = Image.Quantize.FASTOCTREE
1056
- return image.quantize(colors=self.color, method=method)
1057
-
1058
- def fix_fps(self, fps: float) -> Fraction:
1059
- # After rounding fps/duration during export,
1060
- # Video duration may exceed limit.
1061
- # Hence we need to 'fix' the fps
1062
- if self.out_f.suffix == ".gif":
1063
- # Quote from https://www.w3.org/Graphics/GIF/spec-gif89a.txt
1064
- # vii) Delay Time - If not 0, this field specifies
1065
- # the number of hundredths (1/100) of a second
1066
- #
1067
- # For GIF, we need to adjust fps such that delay is matching to hundreths of second
1068
- return self._fix_fps_duration(fps, 100)
1069
- if self.out_f.suffix in (".webp", ".apng", ".png"):
1070
- return self._fix_fps_duration(fps, 1000)
1071
-
1072
- return self._fix_fps_pyav(fps)
1073
-
1074
- def _fix_fps_duration(self, fps: float, denominator: int) -> Fraction:
1075
- delay = int(rounding(denominator / fps))
1076
- fps_fraction = Fraction(denominator, delay)
1077
- if self.opt_comp.fps_max and fps_fraction > self.opt_comp.fps_max:
1078
- return Fraction(denominator, (delay + 1))
1079
- if self.opt_comp.fps_min and fps_fraction < self.opt_comp.fps_min:
1080
- return Fraction(denominator, (delay - 1))
1081
- return fps_fraction
1082
-
1083
- def _fix_fps_pyav(self, fps: float) -> Fraction:
1084
- return Fraction(rounding(fps))
1
+ #!/usr/bin/env python3
2
+ import json
3
+ import os
4
+ from fractions import Fraction
5
+ from io import BytesIO
6
+ from math import ceil, floor, log2
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
9
+
10
+ import numpy as np
11
+ from bs4 import BeautifulSoup
12
+ from PIL import Image
13
+ from PIL import __version__ as PillowVersion
14
+ from PIL import features
15
+
16
+ from sticker_convert.definitions import RUNTIME_STATE
17
+ from sticker_convert.job_option import CompOption
18
+ from sticker_convert.utils.callback import CallbackProtocol, CallbackReturn
19
+ from sticker_convert.utils.chrome_remotedebug import CRD
20
+ from sticker_convert.utils.files.cache_store import CacheStore
21
+ from sticker_convert.utils.media.codec_info import CodecInfo, rounding
22
+ from sticker_convert.utils.media.format_verify import FormatVerify
23
+ from sticker_convert.utils.translate import get_translator
24
+
25
+ I = get_translator() # noqa: E741
26
+
27
+ if TYPE_CHECKING:
28
+ from av.video.frame import VideoFrame
29
+ from av.video.plane import VideoPlane
30
+
31
+ YUV_RGB_MATRIX = np.array(
32
+ [
33
+ [1.164, 0.000, 1.793],
34
+ [1.164, -0.213, -0.533],
35
+ [1.164, 2.112, 0.000],
36
+ ]
37
+ )
38
+
39
+ # Whether animated WebP is supported
40
+ # See https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#saving-sequences
41
+ PIL_WEBP_ANIM = cast(bool, features.check("webp_anim")) # type: ignore
42
+
43
+
44
+ def get_step_value(
45
+ max_step: Optional[int],
46
+ min_step: Optional[int],
47
+ step: int,
48
+ steps: int,
49
+ power: float = 1.0,
50
+ even: bool = False,
51
+ snap_pow2: bool = False,
52
+ ) -> Optional[int]:
53
+ # Power should be between -1 and positive infinity
54
+ # Smaller power = More 'importance' of the parameter
55
+ # Power of 1 is linear relationship
56
+ # e.g. fps has lower power -> Try not to reduce it early on
57
+
58
+ if step > 0:
59
+ factor = pow(step / steps, power)
60
+ else:
61
+ factor = 0
62
+
63
+ if max_step is not None and min_step is not None:
64
+ v = round((max_step - min_step) * step / steps * factor + min_step)
65
+ if snap_pow2 is True and floor(log2(max_step)) >= ceil(log2(min_step)):
66
+ lower_exp = max(floor(log2(v)), ceil(log2(min_step)))
67
+ lower_pow2 = 2**lower_exp
68
+ upper_exp = min(ceil(log2(v)), floor(log2(max_step)))
69
+ upper_pow2 = 2**upper_exp
70
+ if abs(v - lower_pow2) <= abs(v - upper_pow2):
71
+ return lower_pow2
72
+ else:
73
+ return upper_pow2
74
+ if even is True and v % 2 == 1:
75
+ return v + 1
76
+ return v
77
+ return None
78
+
79
+
80
+ def useful_array(
81
+ plane: "VideoPlane", bytes_per_pixel: int = 1, dtype: str = "uint8"
82
+ ) -> "np.ndarray[Any, Any]":
83
+ total_line_size = abs(plane.line_size)
84
+ useful_line_size = plane.width * bytes_per_pixel
85
+ arr: "np.ndarray[Any, Any]" = np.frombuffer(cast(bytes, plane), np.uint8)
86
+ if total_line_size != useful_line_size:
87
+ arr = arr.reshape(-1, total_line_size)[:, 0:useful_line_size].reshape(-1)
88
+ return arr.view(np.dtype(dtype))
89
+
90
+
91
+ def yuva_to_rgba(frame: "VideoFrame") -> "np.ndarray[Any, Any]":
92
+ # https://stackoverflow.com/questions/72308308/converting-yuv-to-rgb-in-python-coefficients-work-with-array-dont-work-with-n
93
+
94
+ width = frame.width
95
+ height = frame.height
96
+
97
+ y = useful_array(frame.planes[0]).reshape(height, width)
98
+ u = useful_array(frame.planes[1]).reshape(height // 2, width // 2)
99
+ v = useful_array(frame.planes[2]).reshape(height // 2, width // 2)
100
+ a = useful_array(frame.planes[3]).reshape(height, width)
101
+
102
+ u = u.repeat(2, axis=0).repeat(2, axis=1) # type: ignore
103
+ v = v.repeat(2, axis=0).repeat(2, axis=1) # type: ignore
104
+
105
+ y = y.reshape((y.shape[0], y.shape[1], 1)) # type: ignore
106
+ u = u.reshape((u.shape[0], u.shape[1], 1)) # type: ignore
107
+ v = v.reshape((v.shape[0], v.shape[1], 1)) # type: ignore
108
+ a = a.reshape((a.shape[0], a.shape[1], 1)) # type: ignore
109
+
110
+ yuv_array = np.concatenate((y, u, v), axis=2)
111
+
112
+ yuv_array = yuv_array.astype(np.float32)
113
+ yuv_array[:, :, 0] = (
114
+ yuv_array[:, :, 0].clip(16, 235).astype(yuv_array.dtype) - 16 # type: ignore
115
+ )
116
+ yuv_array[:, :, 1:] = (
117
+ yuv_array[:, :, 1:].clip(16, 240).astype(yuv_array.dtype) - 128 # type: ignore
118
+ )
119
+
120
+ rgb_array = np.matmul(yuv_array, YUV_RGB_MATRIX.T).clip(0, 255).astype("uint8")
121
+
122
+ return np.concatenate((rgb_array, a), axis=2)
123
+
124
+
125
+ class StickerConvert:
126
+ def __init__(
127
+ self,
128
+ in_f: Union[Path, Tuple[Path, bytes]],
129
+ out_f: Path,
130
+ opt_comp: CompOption,
131
+ cb: CallbackProtocol,
132
+ # cb_return: CallbackReturn
133
+ ) -> None:
134
+ self.MSG_START_COMP = I("[I] Start compressing {} -> {}")
135
+ self.MSG_SKIP_COMP = I(
136
+ "[S] Compatible file found, skip compress and just copy {} -> {}"
137
+ )
138
+ self.MSG_COMP = I(
139
+ "[C] Compressing {} -> {} res={}x{}, quality={}, fps={}, color={} (step {}-{}-{})"
140
+ )
141
+ self.MSG_REDO_COMP = I(
142
+ "[{}] Compressed {} -> {} but size {} {} limit {}, recompressing"
143
+ )
144
+ self.MSG_DONE_COMP = I("[S] Successful compression {} -> {} size {} (step {})")
145
+ self.MSG_FAIL_COMP = I(
146
+ "[F] Failed Compression {} -> {}, "
147
+ "cannot get below limit {} with lowest quality under current settings (Best size: {})"
148
+ )
149
+ self.MSG_QUANT_NO_ALPHA = I(
150
+ "[W] {} does not support RGBA, defaulted to fastoctree quantization"
151
+ )
152
+ self.MSG_SVG_LONG = I("[W] Importing SVG takes long time")
153
+
154
+ self.in_f: Union[bytes, Path]
155
+ if isinstance(in_f, Path):
156
+ self.in_f = in_f
157
+ self.in_f_name = self.in_f.name
158
+ self.in_f_path = in_f
159
+ self.codec_info_orig = CodecInfo(self.in_f)
160
+ else:
161
+ self.in_f = in_f[1]
162
+ self.in_f_name = Path(in_f[0]).name
163
+ self.in_f_path = in_f[0]
164
+ self.codec_info_orig = CodecInfo(in_f[1], Path(in_f[0]).suffix)
165
+
166
+ valid_formats: List[str] = []
167
+ for i in opt_comp.get_format():
168
+ valid_formats.extend(i)
169
+
170
+ valid_ext = False
171
+ self.out_f = Path()
172
+ if len(valid_formats) == 0 or Path(out_f).suffix in valid_formats:
173
+ self.out_f = Path(out_f)
174
+ valid_ext = True
175
+
176
+ if not valid_ext:
177
+ if self.codec_info_orig.is_animated or opt_comp.fake_vid:
178
+ ext = opt_comp.format_vid[0]
179
+ else:
180
+ ext = opt_comp.format_img[0]
181
+ self.out_f = out_f.with_suffix(ext)
182
+
183
+ self.out_f_name: str = self.out_f.name
184
+
185
+ self.cb = cb
186
+ self.frames_raw: "List[np.ndarray[Any, Any]]" = []
187
+ self.frames_processed: "List[np.ndarray[Any, Any]]" = []
188
+ self.opt_comp: CompOption = opt_comp
189
+ if not self.opt_comp.steps:
190
+ self.opt_comp.steps = 1
191
+
192
+ self.size: int = 0
193
+ self.size_max: Optional[int] = None
194
+ self.res_w: Optional[int] = None
195
+ self.res_h: Optional[int] = None
196
+ self.quality: Optional[int] = None
197
+ self.fps: Optional[Fraction] = None
198
+ self.color: Optional[int] = None
199
+
200
+ self.bg_color: Optional[Tuple[int, int, int, int]] = None
201
+ if self.opt_comp.bg_color:
202
+ r, g, b, a = bytes.fromhex(self.opt_comp.bg_color)
203
+ self.bg_color = (r, g, b, a)
204
+
205
+ self.tmp_f: BytesIO = BytesIO()
206
+ self.result: Optional[bytes] = None
207
+ self.result_size: int = 0
208
+ self.result_step: Optional[int] = None
209
+
210
+ self.apngasm = None
211
+
212
+ @staticmethod
213
+ def convert(
214
+ in_f: Union[Path, Tuple[Path, bytes]],
215
+ out_f: Path,
216
+ opt_comp: CompOption,
217
+ cb: CallbackProtocol,
218
+ _cb_return: CallbackReturn,
219
+ ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
220
+ sticker = StickerConvert(in_f, out_f, opt_comp, cb)
221
+ result = sticker._convert()
222
+ cb.put("update_bar")
223
+ return result
224
+
225
+ def _convert(self) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
226
+ result = self.check_if_compatible()
227
+ if result:
228
+ return self.compress_done(result)
229
+
230
+ self.cb.put((self.MSG_START_COMP.format(self.in_f_name, self.out_f_name)))
231
+
232
+ steps_list = self.generate_steps_list()
233
+
234
+ step_lower = 0
235
+ step_upper = self.opt_comp.steps
236
+
237
+ if self.codec_info_orig.is_animated is True:
238
+ self.size_max = self.opt_comp.size_max_vid
239
+ else:
240
+ self.size_max = self.opt_comp.size_max_img
241
+
242
+ if self.size_max in (None, 0):
243
+ # No limit to size, create the best quality result
244
+ step_current = 0
245
+ else:
246
+ step_current = int(rounding((step_lower + step_upper) / 2))
247
+
248
+ self.frames_import()
249
+ while True:
250
+ param = steps_list[step_current]
251
+ self.res_w = param[0]
252
+ self.res_h = param[1]
253
+ self.quality = param[2]
254
+ if param[3] and self.codec_info_orig.fps:
255
+ fps_tmp = min(param[3], self.codec_info_orig.fps)
256
+ self.fps = self.fix_fps(fps_tmp)
257
+ else:
258
+ self.fps = Fraction(0)
259
+ self.color = param[4]
260
+
261
+ self.tmp_f = BytesIO()
262
+ msg = self.MSG_COMP.format(
263
+ self.in_f_name,
264
+ self.out_f_name,
265
+ self.res_w,
266
+ self.res_h,
267
+ self.quality,
268
+ int(self.fps),
269
+ self.color,
270
+ step_lower,
271
+ step_current,
272
+ step_upper,
273
+ )
274
+ self.cb.put(msg)
275
+
276
+ self.frames_processed = self.frames_drop(self.frames_raw)
277
+ self.frames_processed = self.frames_resize(self.frames_processed)
278
+ self.frames_export()
279
+
280
+ self.tmp_f.seek(0)
281
+ self.size = self.tmp_f.getbuffer().nbytes
282
+
283
+ if not self.size_max or (
284
+ self.size <= self.size_max and self.size >= self.result_size
285
+ ):
286
+ self.result = self.tmp_f.read()
287
+ self.result_size = self.size
288
+ self.result_step = step_current
289
+
290
+ if (
291
+ step_upper - step_lower > 0
292
+ and step_current != step_lower
293
+ and self.size_max
294
+ ):
295
+ if self.size <= self.size_max:
296
+ sign = "<"
297
+ step_upper = step_current
298
+ else:
299
+ sign = ">"
300
+ step_lower = step_current
301
+ if step_current == step_lower + 1:
302
+ step_current = step_lower
303
+ else:
304
+ step_current = int(rounding((step_lower + step_upper) / 2))
305
+ self.recompress(sign)
306
+ elif self.result:
307
+ return self.compress_done(self.result, self.result_step)
308
+ else:
309
+ return self.compress_fail()
310
+
311
+ def check_if_compatible(self) -> Optional[bytes]:
312
+ f_fmt = self.opt_comp.get_format()
313
+ if (
314
+ # Issue #260: Some webp file not accepted by Whatsapp
315
+ ".webp" not in f_fmt[0]
316
+ and ".webp" not in f_fmt[1]
317
+ and FormatVerify.check_format(
318
+ self.in_f,
319
+ fmt=f_fmt,
320
+ file_info=self.codec_info_orig,
321
+ )
322
+ and FormatVerify.check_file_res(
323
+ self.in_f, res=self.opt_comp.get_res(), file_info=self.codec_info_orig
324
+ )
325
+ and FormatVerify.check_file_fps(
326
+ self.in_f, fps=self.opt_comp.get_fps(), file_info=self.codec_info_orig
327
+ )
328
+ and FormatVerify.check_file_size(
329
+ self.in_f,
330
+ size=self.opt_comp.get_size_max(),
331
+ file_info=self.codec_info_orig,
332
+ )
333
+ and FormatVerify.check_file_duration(
334
+ self.in_f,
335
+ duration=self.opt_comp.get_duration(),
336
+ file_info=self.codec_info_orig,
337
+ )
338
+ ):
339
+ self.cb.put((self.MSG_SKIP_COMP.format(self.in_f_name, self.out_f_name)))
340
+
341
+ if isinstance(self.in_f, Path):
342
+ with open(self.in_f, "rb") as f:
343
+ result = f.read()
344
+ self.result_size = os.path.getsize(self.in_f)
345
+ else:
346
+ result = self.in_f
347
+ self.result_size = len(self.in_f)
348
+
349
+ return result
350
+
351
+ return None
352
+
353
+ def generate_steps_list(self) -> List[Tuple[Optional[int], ...]]:
354
+ steps_list: List[Tuple[Optional[int], ...]] = []
355
+ need_even = self.out_f.suffix in (".webm", ".mp4", ".mkv", ".webp")
356
+ for step in range(self.opt_comp.steps, -1, -1):
357
+ steps_list.append(
358
+ (
359
+ get_step_value(
360
+ self.opt_comp.res_w_max,
361
+ self.opt_comp.res_w_min,
362
+ step,
363
+ self.opt_comp.steps,
364
+ self.opt_comp.res_power,
365
+ need_even,
366
+ self.opt_comp.res_snap_pow2,
367
+ ),
368
+ get_step_value(
369
+ self.opt_comp.res_h_max,
370
+ self.opt_comp.res_h_min,
371
+ step,
372
+ self.opt_comp.steps,
373
+ self.opt_comp.res_power,
374
+ need_even,
375
+ self.opt_comp.res_snap_pow2,
376
+ ),
377
+ get_step_value(
378
+ self.opt_comp.quality_max,
379
+ self.opt_comp.quality_min,
380
+ step,
381
+ self.opt_comp.steps,
382
+ self.opt_comp.quality_power,
383
+ ),
384
+ get_step_value(
385
+ self.opt_comp.fps_max,
386
+ self.opt_comp.fps_min,
387
+ step,
388
+ self.opt_comp.steps,
389
+ self.opt_comp.fps_power,
390
+ ),
391
+ get_step_value(
392
+ self.opt_comp.color_max,
393
+ self.opt_comp.color_min,
394
+ step,
395
+ self.opt_comp.steps,
396
+ self.opt_comp.color_power,
397
+ ),
398
+ )
399
+ )
400
+
401
+ return steps_list
402
+
403
+ def recompress(self, sign: str) -> None:
404
+ msg = self.MSG_REDO_COMP.format(
405
+ sign, self.in_f_name, self.out_f_name, self.size, sign, self.size_max
406
+ )
407
+ self.cb.put(msg)
408
+
409
+ def compress_fail(
410
+ self,
411
+ ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
412
+ msg = self.MSG_FAIL_COMP.format(
413
+ self.in_f_name, self.out_f_name, self.size_max, self.size
414
+ )
415
+ self.cb.put(msg)
416
+
417
+ return False, self.in_f_path, self.out_f, self.size
418
+
419
+ def compress_done(
420
+ self, data: bytes, result_step: Optional[int] = None
421
+ ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
422
+ out_f: Union[None, bytes, Path]
423
+
424
+ if self.out_f.stem == "none":
425
+ out_f = None
426
+ elif self.out_f.stem == "bytes":
427
+ out_f = data
428
+ else:
429
+ out_f = self.out_f
430
+ with open(self.out_f, "wb+") as f:
431
+ f.write(data)
432
+
433
+ if result_step is not None:
434
+ msg = self.MSG_DONE_COMP.format(
435
+ self.in_f_name, self.out_f_name, self.result_size, result_step
436
+ )
437
+ self.cb.put(msg)
438
+
439
+ return True, self.in_f_path, out_f, self.result_size
440
+
441
+ def frames_import(self) -> None:
442
+ if isinstance(self.in_f, Path):
443
+ suffix = self.in_f.suffix
444
+ else:
445
+ suffix = Path(self.in_f_name).suffix
446
+
447
+ if suffix in (".tgs", ".lottie", ".json"):
448
+ self._frames_import_lottie()
449
+ elif suffix in (".webp", ".apng", ".png", ".gif"):
450
+ # ffmpeg do not support webp decoding (yet)
451
+ # ffmpeg could fail to decode apng if file is buggy
452
+ self._frames_import_pillow()
453
+ elif suffix == ".svg":
454
+ self._frames_import_svg()
455
+ else:
456
+ self._frames_import_pyav()
457
+
458
+ def _frames_import_svg(self) -> None:
459
+ width = self.codec_info_orig.res[0]
460
+ height = self.codec_info_orig.res[1]
461
+
462
+ if RUNTIME_STATE.get("crd") is None:
463
+ chrome_path: Optional[str]
464
+ if self.opt_comp.chromium_path:
465
+ chrome_path = self.opt_comp.chromium_path
466
+ else:
467
+ chrome_path = CRD.get_chromium_path()
468
+ args = [
469
+ "--headless",
470
+ "--kiosk",
471
+ "--disable-extensions",
472
+ "--disable-infobars",
473
+ "--disable-gpu",
474
+ "--disable-gpu-rasterization",
475
+ "--hide-scrollbars",
476
+ "--force-device-scale-factor=1",
477
+ "about:blank",
478
+ ]
479
+ if chrome_path is None:
480
+ raise RuntimeError("[F] Chrome/Chromium required for importing svg")
481
+ self.cb.put(self.MSG_SVG_LONG)
482
+ RUNTIME_STATE["crd"] = CRD(chrome_path, args=args)
483
+ RUNTIME_STATE["crd"].connect(-1) # type: ignore
484
+
485
+ crd = cast(CRD, RUNTIME_STATE["crd"])
486
+ if isinstance(self.in_f, bytes):
487
+ svg = self.in_f.decode()
488
+ else:
489
+ with open(self.in_f) as f:
490
+ svg = f.read()
491
+ soup = BeautifulSoup(svg, "html.parser")
492
+ svg_tag = soup.find_all("svg")[0]
493
+
494
+ if svg_tag.get("width") is None:
495
+ svg_tag["width"] = width
496
+ if svg_tag.get("height") is None:
497
+ svg_tag["height"] = height
498
+ svg = str(soup)
499
+
500
+ crd.open_html_str(svg)
501
+ crd.set_transparent_bg()
502
+ init_js = 'svg = document.getElementsByTagName("svg")[0];'
503
+ if self.codec_info_orig.fps > 0:
504
+ init_js += "svg.pauseAnimations();"
505
+ init_js += "JSON.stringify(svg.getBoundingClientRect());"
506
+ bound = json.loads(
507
+ json.loads(crd.exec_js(init_js))["result"]["result"]["value"]
508
+ )
509
+ clip = {
510
+ "x": bound["x"],
511
+ "y": bound["y"],
512
+ "width": width,
513
+ "height": height,
514
+ "scale": 1,
515
+ }
516
+
517
+ if self.codec_info_orig.fps > 0:
518
+ for i in range(self.codec_info_orig.frames):
519
+ curr_time = (
520
+ i
521
+ / self.codec_info_orig.frames
522
+ * self.codec_info_orig.duration
523
+ / 1000
524
+ )
525
+ crd.exec_js(f"svg.setCurrentTime({curr_time})")
526
+ self.frames_raw.append(np.asarray(crd.screenshot(clip)))
527
+ else:
528
+ self.frames_raw.append(np.asarray(crd.screenshot(clip)))
529
+
530
+ def _frames_import_pillow(self) -> None:
531
+ with Image.open(self.in_f) as im:
532
+ # Note: im.convert("RGBA") would return rgba image of current frame only
533
+ if (
534
+ "n_frames" in dir(im)
535
+ and im.n_frames != 0
536
+ and self.codec_info_orig.fps != 0.0
537
+ ):
538
+ # Pillow is not reliable for getting webp frame durations
539
+ durations: Optional[List[int]]
540
+ if im.format == "WEBP":
541
+ _, _, _, durations = CodecInfo._get_file_fps_frames_duration_webp( # type: ignore
542
+ self.in_f
543
+ )
544
+ else:
545
+ durations = None
546
+
547
+ duration_ptr = 0.0
548
+ duration_inc = 1 / self.codec_info_orig.fps * 1000
549
+ frame = 0
550
+ if durations is None:
551
+ next_frame_start_duration = cast(int, im.info.get("duration", 1000))
552
+ else:
553
+ next_frame_start_duration = durations[0]
554
+ while True:
555
+ self.frames_raw.append(np.asarray(im.convert("RGBA")))
556
+ duration_ptr += duration_inc
557
+ if duration_ptr >= next_frame_start_duration:
558
+ frame += 1
559
+ if frame == im.n_frames:
560
+ break
561
+ im.seek(frame)
562
+
563
+ if durations is None:
564
+ next_frame_start_duration += cast(
565
+ int, im.info.get("duration", 1000)
566
+ )
567
+ else:
568
+ next_frame_start_duration += durations[frame]
569
+ else:
570
+ self.frames_raw.append(np.asarray(im.convert("RGBA")))
571
+
572
+ def _frames_import_pyav(self) -> None:
573
+ import av
574
+ from av.codec.context import CodecContext
575
+ from av.container.input import InputContainer
576
+ from av.video.codeccontext import VideoCodecContext
577
+ from av.video.frame import VideoFrame
578
+
579
+ # Crashes when handling some webm in yuv420p and convert to rgba
580
+ # https://github.com/PyAV-Org/PyAV/issues/1166
581
+ file: Union[BytesIO, str]
582
+ if isinstance(self.in_f, Path):
583
+ file = self.in_f.as_posix()
584
+ else:
585
+ file = BytesIO(self.in_f)
586
+ with av.open(file) as container:
587
+ container = cast(InputContainer, container)
588
+ context = container.streams.video[0].codec_context
589
+ if context.name == "vp8":
590
+ context = CodecContext.create("libvpx", "r")
591
+ elif context.name == "vp9":
592
+ context = cast(
593
+ VideoCodecContext, CodecContext.create("libvpx-vp9", "r")
594
+ )
595
+
596
+ for packet in container.demux(container.streams.video):
597
+ for frame in context.decode(packet):
598
+ width_orig = frame.width
599
+ height_orig = frame.height
600
+
601
+ # Need to pad frame to even dimension first
602
+ if width_orig % 2 == 1 or height_orig % 2 == 1:
603
+ from av.filter import Graph
604
+
605
+ width_new = width_orig + width_orig % 2
606
+ height_new = height_orig + height_orig % 2
607
+
608
+ graph = Graph()
609
+ in_src = graph.add_buffer(template=container.streams.video[0])
610
+ pad = graph.add(
611
+ "pad", f"{width_new}:{height_new}:0:0:color=#00000000"
612
+ )
613
+ in_src.link_to(pad)
614
+ sink = graph.add("buffersink")
615
+ pad.link_to(sink)
616
+ graph.configure()
617
+
618
+ graph.push(frame)
619
+ frame_resized = cast(VideoFrame, graph.pull())
620
+ else:
621
+ frame_resized = frame
622
+
623
+ # yuva420p may cause crash
624
+ # Not safe to directly call frame.to_ndarray(format="rgba")
625
+ # https://github.com/PyAV-Org/PyAV/discussions/1510
626
+ # if int(av.__version__.split(".")[0]) >= 14:
627
+ # rgba_array = frame_resized.to_ndarray(format="rgba")
628
+ if frame_resized.format.name == "yuv420p":
629
+ rgb_array = frame_resized.to_ndarray(format="rgb24")
630
+ rgba_array = np.dstack(
631
+ (
632
+ rgb_array,
633
+ cast(
634
+ np.ndarray[Any, np.dtype[np.uint8]],
635
+ np.zeros(rgb_array.shape[:2], dtype=np.uint8) + 255,
636
+ ),
637
+ )
638
+ )
639
+ else:
640
+ frame_resized = frame_resized.reformat(
641
+ format="yuva420p",
642
+ dst_colorspace=1,
643
+ )
644
+ rgba_array = yuva_to_rgba(frame_resized)
645
+
646
+ # Remove pixels that was added to make dimensions even
647
+ rgba_array = rgba_array[0:height_orig, 0:width_orig]
648
+ self.frames_raw.append(rgba_array)
649
+
650
+ def _frames_import_lottie(self) -> None:
651
+ from rlottie_python.rlottie_wrapper import LottieAnimation
652
+
653
+ if isinstance(self.in_f, Path):
654
+ suffix = self.in_f.suffix
655
+ else:
656
+ suffix = Path(self.in_f_name).suffix
657
+
658
+ if suffix == ".tgs":
659
+ if isinstance(self.in_f, Path):
660
+ anim = LottieAnimation.from_tgs(self.in_f.as_posix())
661
+ else:
662
+ import gzip
663
+
664
+ with gzip.open(BytesIO(self.in_f)) as f:
665
+ data = f.read().decode(encoding="utf-8")
666
+ anim = LottieAnimation.from_data(data)
667
+ else:
668
+ if isinstance(self.in_f, Path):
669
+ anim = LottieAnimation.from_file(self.in_f.as_posix())
670
+ else:
671
+ anim = LottieAnimation.from_data(self.in_f.decode("utf-8"))
672
+
673
+ for i in range(anim.lottie_animation_get_totalframe()):
674
+ frame = np.asarray(anim.render_pillow_frame(frame_num=i))
675
+ self.frames_raw.append(frame)
676
+
677
+ anim.lottie_animation_destroy()
678
+
679
+ def determine_bg_color(self) -> Tuple[int, int, int, int]:
680
+ mean_total = 0.0
681
+ # Calculate average color of all frames for selecting background color
682
+ for frame in self.frames_raw:
683
+ s = frame.shape
684
+ colors = frame.reshape((-1, s[2])) # type: ignore
685
+ # Do not count in alpha=0
686
+ # If alpha > 0, use alpha as weight
687
+ colors = colors[colors[:, 3] != 0]
688
+ if colors.shape[0] != 0:
689
+ alphas = colors[:, 3] / 255
690
+ r_mean = cast(float, np.mean(colors[:, 0] * alphas))
691
+ g_mean = cast(float, np.mean(colors[:, 1] * alphas))
692
+ b_mean = cast(float, np.mean(colors[:, 2] * alphas))
693
+ mean_total += (r_mean + g_mean + b_mean) / 3
694
+
695
+ if mean_total / len(self.frames_raw) < 128:
696
+ return (255, 255, 255, 0)
697
+ else:
698
+ return (0, 0, 0, 0)
699
+
700
+ def frames_resize(
701
+ self, frames_in: "List[np.ndarray[Any, Any]]"
702
+ ) -> "List[np.ndarray[Any, Any]]":
703
+ frames_out: "List[np.ndarray[Any, Any]]" = []
704
+
705
+ resample: Literal[0, 1, 2, 3, 4, 5]
706
+ if self.opt_comp.scale_filter == "nearest":
707
+ resample = Image.NEAREST
708
+ elif self.opt_comp.scale_filter == "box":
709
+ resample = Image.BOX
710
+ elif self.opt_comp.scale_filter == "bilinear":
711
+ resample = Image.BILINEAR
712
+ elif self.opt_comp.scale_filter == "hamming":
713
+ resample = Image.HAMMING
714
+ elif self.opt_comp.scale_filter == "bicubic":
715
+ resample = Image.BICUBIC
716
+ elif self.opt_comp.scale_filter == "lanczos":
717
+ resample = Image.LANCZOS
718
+ else:
719
+ resample = Image.BICUBIC
720
+
721
+ if self.bg_color is None:
722
+ self.bg_color = self.determine_bg_color()
723
+
724
+ for frame in frames_in:
725
+ with Image.fromarray(frame, "RGBA") as im: # type: ignore
726
+ width, height = im.size
727
+
728
+ if self.res_w is None:
729
+ self.res_w = width
730
+ if self.res_h is None:
731
+ self.res_h = height
732
+
733
+ scaling = 1 - (self.opt_comp.padding_percent / 100)
734
+ if width / self.res_w > height / self.res_h:
735
+ width_new = int(self.res_w * scaling)
736
+ height_new = int(height * self.res_w / width * scaling)
737
+ else:
738
+ height_new = int(self.res_h * scaling)
739
+ width_new = int(width * self.res_h / height * scaling)
740
+
741
+ with im.resize((width_new, height_new), resample=resample) as im_resized:
742
+ with Image.new(
743
+ "RGBA", (self.res_w, self.res_h), self.bg_color
744
+ ) as im_new:
745
+ im_new.alpha_composite(
746
+ im_resized,
747
+ ((self.res_w - width_new) // 2, (self.res_h - height_new) // 2),
748
+ )
749
+ frames_out.append(np.asarray(im_new))
750
+
751
+ return frames_out
752
+
753
+ def frames_drop(
754
+ self, frames_in: "List[np.ndarray[Any, Any]]"
755
+ ) -> "List[np.ndarray[Any, Any]]":
756
+ if (
757
+ not self.codec_info_orig.is_animated
758
+ or not self.fps
759
+ or len(self.frames_processed) == 1
760
+ ):
761
+ return [frames_in[0]]
762
+
763
+ frames_out: "List[np.ndarray[Any, Any]]" = []
764
+
765
+ # fps_ratio: 1 frame in new anim equal to how many frame in old anim
766
+ # speed_ratio: How much to speed up / slow down
767
+ fps_ratio = self.codec_info_orig.fps / self.fps
768
+ if (
769
+ self.opt_comp.duration_min
770
+ and self.codec_info_orig.duration < self.opt_comp.duration_min
771
+ ):
772
+ speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_min
773
+ elif (
774
+ self.opt_comp.duration_max
775
+ and self.codec_info_orig.duration > self.opt_comp.duration_max
776
+ ):
777
+ speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_max
778
+ else:
779
+ speed_ratio = 1
780
+
781
+ # How many frames to advance in original video for each frame of output video
782
+ frame_increment = fps_ratio * speed_ratio
783
+
784
+ frames_out_min = None
785
+ frames_out_max = None
786
+ if self.opt_comp.duration_min:
787
+ frames_out_min = ceil(self.fps * self.opt_comp.duration_min / 1000)
788
+ if self.opt_comp.duration_max:
789
+ frames_out_max = floor(self.fps * self.opt_comp.duration_max / 1000)
790
+
791
+ frame_current = 0
792
+ frame_current_float = 0.0
793
+ while True:
794
+ if frame_current <= len(frames_in) - 1 and not (
795
+ frames_out_max and len(frames_out) == frames_out_max
796
+ ):
797
+ frames_out.append(frames_in[frame_current])
798
+ else:
799
+ while len(frames_out) == 0 or (
800
+ frames_out_min and len(frames_out) < frames_out_min
801
+ ):
802
+ frames_out.append(frames_in[-1])
803
+ return frames_out
804
+ frame_current_float += frame_increment
805
+ frame_current = int(rounding(frame_current_float))
806
+
807
+ def frames_export(self) -> None:
808
+ is_animated = len(self.frames_processed) > 1 and self.fps
809
+ if self.out_f.suffix in (".apng", ".png"):
810
+ if is_animated:
811
+ self._frames_export_apng()
812
+ else:
813
+ self._frames_export_png()
814
+ elif self.out_f.suffix in (".gif", ".webp"):
815
+ self._frames_export_pil_anim()
816
+ elif self.out_f.suffix in (".webm", ".mp4", ".mkv") or is_animated:
817
+ self._frames_export_pyav()
818
+ else:
819
+ self._frames_export_pil()
820
+
821
+ def _check_dup(self) -> bool:
822
+ if len(self.frames_processed) == 1:
823
+ return False
824
+
825
+ prev_frame = self.frames_processed[0]
826
+ for frame in self.frames_processed[1:]:
827
+ if np.array_equal(frame, prev_frame):
828
+ return True
829
+ prev_frame = frame
830
+
831
+ return False
832
+
833
+ def _frames_export_pil(self) -> None:
834
+ with Image.fromarray(self.frames_processed[0]) as im: # type: ignore
835
+ im.save(
836
+ self.tmp_f,
837
+ format=self.out_f.suffix.replace(".", ""),
838
+ quality=self.quality,
839
+ )
840
+
841
+ def _frames_export_pyav(self) -> None:
842
+ import av
843
+ from av.video.stream import VideoStream
844
+
845
+ options_container: Dict[str, str] = {}
846
+ options_stream: Dict[str, str] = {}
847
+
848
+ if isinstance(self.quality, int):
849
+ # Seems not actually working
850
+ options_stream["quality"] = str(self.quality)
851
+ options_stream["lossless"] = "0"
852
+
853
+ if self.out_f.suffix in (".apng", ".png"):
854
+ codec = "apng"
855
+ pixel_format = "rgba"
856
+ options_stream["plays"] = "0"
857
+ elif self.out_f.suffix in (".webm", ".mkv"):
858
+ codec = "libvpx-vp9"
859
+ pixel_format = "yuva420p"
860
+ options_stream["loop"] = "0"
861
+ elif self.out_f.suffix == ".webp":
862
+ codec = "webp"
863
+ pixel_format = "yuva420p"
864
+ options_container["loop"] = "0"
865
+ else:
866
+ codec = "libvpx-vp9"
867
+ pixel_format = "yuv420p"
868
+ options_stream["loop"] = "0"
869
+
870
+ with av.open(
871
+ self.tmp_f,
872
+ "w",
873
+ format=self.out_f.suffix.replace(".", ""),
874
+ options=options_container,
875
+ ) as output:
876
+ out_stream = output.add_stream(codec, rate=self.fps, options=options_stream) # type: ignore
877
+ out_stream = cast(VideoStream, out_stream)
878
+ assert isinstance(self.res_w, int) and isinstance(self.res_h, int)
879
+ out_stream.width = self.res_w
880
+ out_stream.height = self.res_h
881
+ out_stream.pix_fmt = pixel_format
882
+
883
+ for frame in self.frames_processed:
884
+ av_frame = av.VideoFrame.from_ndarray(frame, format="rgba")
885
+ output.mux(out_stream.encode(av_frame))
886
+ output.mux(out_stream.encode())
887
+
888
+ def _frames_export_pil_anim(self) -> None:
889
+ extra_kwargs: Dict[str, Any] = {}
890
+
891
+ # disposal=2 on gif cause flicker in image with transparency
892
+ # Occurs in Pillow == 10.2.0
893
+ # https://github.com/python-pillow/Pillow/issues/7787
894
+ if PillowVersion == "10.2.0":
895
+ extra_kwargs["optimize"] = False
896
+ else:
897
+ extra_kwargs["optimize"] = True
898
+
899
+ if self.out_f.suffix == ".gif":
900
+ # GIF can only have one alpha color
901
+ # Change lowest alpha to alpha=0
902
+ # Only keep alpha=0 and alpha=255, nothing in between
903
+ extra_kwargs["format"] = "GIF"
904
+ frames_processed = np.array(self.frames_processed)
905
+ alpha = frames_processed[:, :, :, 3]
906
+ alpha_min = np.min(alpha) # type: ignore
907
+ if alpha_min < 255:
908
+ alpha[alpha > alpha_min] = 255
909
+ alpha[alpha == alpha_min] = 0
910
+
911
+ if 0 in alpha:
912
+ extra_kwargs["transparency"] = 0
913
+ extra_kwargs["disposal"] = 2
914
+ im_out = [self.quantize(Image.fromarray(i)) for i in frames_processed] # type: ignore
915
+ else:
916
+ im_out = [
917
+ self.quantize(Image.fromarray(i).convert("RGB")).convert("RGB") # type: ignore
918
+ for i in frames_processed
919
+ ]
920
+ elif self.out_f.suffix == ".webp":
921
+ im_out = [Image.fromarray(i) for i in self.frames_processed] # type: ignore
922
+ extra_kwargs["format"] = "WebP"
923
+ extra_kwargs["allow_mixed"] = True
924
+ extra_kwargs["kmax"] = (
925
+ 1 # Keyframe every frame, otherwise black lines artifact can appear
926
+ )
927
+ if self.quality:
928
+ if self.quality < 20:
929
+ extra_kwargs["minimize_size"] = True
930
+ extra_kwargs["method"] = 4 + int(2 * (100 - self.quality) / 100)
931
+ extra_kwargs["alpha_quality"] = self.quality
932
+ else:
933
+ raise RuntimeError(f"Invalid format {self.out_f.suffix}")
934
+
935
+ if self.fps:
936
+ extra_kwargs["save_all"] = True
937
+ extra_kwargs["append_images"] = im_out[1:]
938
+ extra_kwargs["duration"] = int(1000 / self.fps)
939
+ extra_kwargs["loop"] = 0
940
+
941
+ im_out[0].save(
942
+ self.tmp_f,
943
+ quality=self.quality,
944
+ **extra_kwargs,
945
+ )
946
+
947
+ def _frames_export_png(self) -> None:
948
+ with Image.fromarray(self.frames_processed[0], "RGBA") as image: # type: ignore
949
+ image_quant = self.quantize(image)
950
+
951
+ with BytesIO() as f:
952
+ image_quant.save(f, format="png")
953
+ f.seek(0)
954
+ frame_optimized = self.optimize_png(f.read())
955
+ self.tmp_f.write(frame_optimized)
956
+
957
+ def _frames_export_apng(self) -> None:
958
+ from apngasm_python._apngasm_python import APNGAsm, create_frame_from_rgb, create_frame_from_rgba # type: ignore
959
+
960
+ assert self.fps
961
+ assert self.res_h
962
+
963
+ frames_concat = np.concatenate(self.frames_processed)
964
+ with Image.fromarray(frames_concat, "RGBA") as image_concat: # type: ignore
965
+ if image_concat.getextrema()[3][0] < 255: # type: ignore
966
+ mode = "RGBA"
967
+ create_frame_method = create_frame_from_rgba
968
+ else:
969
+ mode = "RGB"
970
+ create_frame_method = create_frame_from_rgb
971
+ image_quant = self.quantize(image_concat)
972
+
973
+ if self.apngasm is None:
974
+ self.apngasm = APNGAsm() # type: ignore
975
+ assert isinstance(self.apngasm, APNGAsm)
976
+
977
+ delay_num = int(1000 / self.fps)
978
+ for i in range(0, image_quant.height, self.res_h):
979
+ crop_dimension = (0, i, image_quant.width, i + self.res_h)
980
+ image_cropped = image_quant.crop(crop_dimension)
981
+ image_final = image_cropped.convert(mode)
982
+ frame_final = create_frame_method(
983
+ np.array(image_final),
984
+ width=image_final.width,
985
+ height=image_final.height,
986
+ delay_num=delay_num,
987
+ delay_den=1000,
988
+ )
989
+ self.apngasm.add_frame(frame_final)
990
+
991
+ with CacheStore.get_cache_store(path=self.opt_comp.cache_dir) as tempdir:
992
+ tmp_apng = Path(tempdir, f"out{self.out_f.suffix}")
993
+ self.apngasm.assemble(tmp_apng.as_posix())
994
+
995
+ with open(tmp_apng, "rb") as f:
996
+ apng_optimized = self.optimize_png(f.read())
997
+ self.tmp_f.write(apng_optimized)
998
+
999
+ self.apngasm.reset()
1000
+
1001
+ def optimize_png(self, image_bytes: bytes) -> bytes:
1002
+ import oxipng
1003
+
1004
+ return oxipng.optimize_from_memory(
1005
+ image_bytes,
1006
+ level=6,
1007
+ fix_errors=True,
1008
+ filter=[oxipng.RowFilter.Brute],
1009
+ optimize_alpha=True,
1010
+ strip=oxipng.StripChunks.safe(),
1011
+ )
1012
+
1013
+ def quantize(self, image: Image.Image) -> Image.Image:
1014
+ if not (self.color and self.color <= 256):
1015
+ return image.copy()
1016
+ if self.opt_comp.quantize_method == "imagequant":
1017
+ return self._quantize_by_imagequant(image)
1018
+ if self.opt_comp.quantize_method in ("mediancut", "maxcoverage", "fastoctree"):
1019
+ return self._quantize_by_pillow(image)
1020
+
1021
+ return image
1022
+
1023
+ def _quantize_by_imagequant(self, image: Image.Image) -> Image.Image:
1024
+ import imagequant # type: ignore
1025
+
1026
+ assert isinstance(self.quality, int)
1027
+ assert isinstance(self.opt_comp.quality_min, int)
1028
+ assert isinstance(self.opt_comp.quality_max, int)
1029
+ assert isinstance(self.color, int)
1030
+
1031
+ dither = 1 - (self.quality - self.opt_comp.quality_min) / (
1032
+ self.opt_comp.quality_max - self.opt_comp.quality_min
1033
+ )
1034
+ image_quant = None
1035
+ for i in range(self.quality, 101, 5):
1036
+ try:
1037
+ image_quant = imagequant.quantize_pil_image( # type: ignore
1038
+ image,
1039
+ dithering_level=dither,
1040
+ max_colors=self.color,
1041
+ min_quality=self.opt_comp.quality_min,
1042
+ max_quality=i,
1043
+ )
1044
+ return image_quant
1045
+ except RuntimeError:
1046
+ pass
1047
+
1048
+ return image
1049
+
1050
+ def _quantize_by_pillow(self, image: Image.Image) -> Image.Image:
1051
+ assert self.color
1052
+
1053
+ if image.mode == "RGBA" and self.opt_comp.quantize_method in (
1054
+ "mediancut",
1055
+ "maxcoverage",
1056
+ ):
1057
+ self.cb.put(self.MSG_QUANT_NO_ALPHA.format(self.opt_comp.quantize_method))
1058
+ method = Image.Quantize.FASTOCTREE
1059
+ elif self.opt_comp.quantize_method == "mediancut":
1060
+ method = Image.Quantize.MEDIANCUT
1061
+ elif self.opt_comp.quantize_method == "maxcoverage":
1062
+ method = Image.Quantize.MAXCOVERAGE
1063
+ else:
1064
+ method = Image.Quantize.FASTOCTREE
1065
+ return image.quantize(colors=self.color, method=method)
1066
+
1067
+ def fix_fps(self, fps: float) -> Fraction:
1068
+ # After rounding fps/duration during export,
1069
+ # Video duration may exceed limit.
1070
+ # Hence we need to 'fix' the fps
1071
+ if self.out_f.suffix == ".gif":
1072
+ # Quote from https://www.w3.org/Graphics/GIF/spec-gif89a.txt
1073
+ # vii) Delay Time - If not 0, this field specifies
1074
+ # the number of hundredths (1/100) of a second
1075
+ #
1076
+ # For GIF, we need to adjust fps such that delay is matching to hundreths of second
1077
+ return self._fix_fps_duration(fps, 100)
1078
+ if self.out_f.suffix in (".webp", ".apng", ".png"):
1079
+ return self._fix_fps_duration(fps, 1000)
1080
+
1081
+ return self._fix_fps_pyav(fps)
1082
+
1083
+ def _fix_fps_duration(self, fps: float, denominator: int) -> Fraction:
1084
+ delay = int(rounding(denominator / fps))
1085
+ fps_fraction = Fraction(denominator, delay)
1086
+ if self.opt_comp.fps_max and fps_fraction > self.opt_comp.fps_max:
1087
+ return Fraction(denominator, (delay + 1))
1088
+ if self.opt_comp.fps_min and fps_fraction < self.opt_comp.fps_min:
1089
+ return Fraction(denominator, (delay - 1))
1090
+ return fps_fraction
1091
+
1092
+ def _fix_fps_pyav(self, fps: float) -> Fraction:
1093
+ return Fraction(rounding(fps))