sticker-convert 2.8.12__py3-none-any.whl → 2.17.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. sticker_convert/__main__.py +24 -24
  2. sticker_convert/auth/__init__.py +0 -0
  3. sticker_convert/auth/auth_base.py +19 -0
  4. sticker_convert/auth/auth_discord.py +149 -0
  5. sticker_convert/{utils/auth/get_kakao_auth.py → auth/auth_kakao_android_login.py} +331 -300
  6. sticker_convert/auth/auth_kakao_desktop_login.py +327 -0
  7. sticker_convert/auth/auth_kakao_desktop_memdump.py +281 -0
  8. sticker_convert/{utils/auth/get_line_auth.py → auth/auth_line.py} +98 -80
  9. sticker_convert/auth/auth_signal.py +139 -0
  10. sticker_convert/auth/auth_telethon.py +161 -0
  11. sticker_convert/auth/auth_viber.py +250 -0
  12. sticker_convert/auth/telegram_api.py +736 -0
  13. sticker_convert/cli.py +623 -509
  14. sticker_convert/converter.py +1093 -962
  15. sticker_convert/definitions.py +11 -0
  16. sticker_convert/downloaders/download_band.py +111 -0
  17. sticker_convert/downloaders/download_base.py +171 -130
  18. sticker_convert/downloaders/download_discord.py +92 -0
  19. sticker_convert/downloaders/download_kakao.py +417 -255
  20. sticker_convert/downloaders/download_line.py +484 -472
  21. sticker_convert/downloaders/download_ogq.py +80 -0
  22. sticker_convert/downloaders/download_signal.py +108 -92
  23. sticker_convert/downloaders/download_telegram.py +56 -130
  24. sticker_convert/downloaders/download_viber.py +121 -95
  25. sticker_convert/gui.py +788 -795
  26. sticker_convert/gui_components/frames/comp_frame.py +180 -165
  27. sticker_convert/gui_components/frames/config_frame.py +156 -113
  28. sticker_convert/gui_components/frames/control_frame.py +32 -30
  29. sticker_convert/gui_components/frames/cred_frame.py +232 -162
  30. sticker_convert/gui_components/frames/input_frame.py +139 -137
  31. sticker_convert/gui_components/frames/output_frame.py +112 -110
  32. sticker_convert/gui_components/frames/right_clicker.py +25 -23
  33. sticker_convert/gui_components/windows/advanced_compression_window.py +757 -715
  34. sticker_convert/gui_components/windows/base_window.py +7 -2
  35. sticker_convert/gui_components/windows/discord_get_auth_window.py +79 -0
  36. sticker_convert/gui_components/windows/kakao_get_auth_window.py +511 -186
  37. sticker_convert/gui_components/windows/line_get_auth_window.py +94 -102
  38. sticker_convert/gui_components/windows/signal_get_auth_window.py +84 -135
  39. sticker_convert/gui_components/windows/viber_get_auth_window.py +168 -0
  40. sticker_convert/ios-message-stickers-template/.github/FUNDING.yml +3 -3
  41. sticker_convert/ios-message-stickers-template/.gitignore +0 -0
  42. sticker_convert/ios-message-stickers-template/README.md +10 -10
  43. sticker_convert/ios-message-stickers-template/stickers/Info.plist +43 -43
  44. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Info.plist +31 -31
  45. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Contents.json +6 -6
  46. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Contents.json +20 -20
  47. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 1.sticker/Contents.json +9 -9
  48. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 1.sticker/Sticker 1.png +0 -0
  49. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 2.sticker/Contents.json +9 -9
  50. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 2.sticker/Sticker 2.png +0 -0
  51. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 3.sticker/Contents.json +9 -9
  52. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 3.sticker/Sticker 3.png +0 -0
  53. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/App-Store-1024x1024pt.png +0 -0
  54. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Contents.json +91 -91
  55. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages-App-Store-1024x768pt.png +0 -0
  56. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages-iPad-67x50pt@2x.png +0 -0
  57. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages-iPad-Pro-74x55pt@2x.png +0 -0
  58. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages-iPhone-60x45pt@2x.png +0 -0
  59. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages-iPhone-60x45pt@3x.png +0 -0
  60. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages27x20pt@2x.png +0 -0
  61. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages27x20pt@3x.png +0 -0
  62. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages32x24pt@2x.png +0 -0
  63. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Messages32x24pt@3x.png +0 -0
  64. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/iPad-Settings-29pt@2x.png +0 -0
  65. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/iPhone-Settings-29pt@3x.png +0 -0
  66. sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/iPhone-settings-29pt@2x.png +0 -0
  67. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.pbxproj +364 -364
  68. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -7
  69. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -8
  70. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/xcuserdata/niklaspeterson.xcuserdatad/UserInterfaceState.xcuserstate +0 -0
  71. sticker_convert/ios-message-stickers-template/stickers.xcodeproj/xcuserdata/niklaspeterson.xcuserdatad/xcschemes/xcschememanagement.plist +14 -14
  72. sticker_convert/job.py +279 -179
  73. sticker_convert/job_option.py +15 -2
  74. sticker_convert/locales/en_US/LC_MESSAGES/base.mo +0 -0
  75. sticker_convert/locales/ja_JP/LC_MESSAGES/base.mo +0 -0
  76. sticker_convert/locales/zh_CN/LC_MESSAGES/base.mo +0 -0
  77. sticker_convert/locales/zh_TW/LC_MESSAGES/base.mo +0 -0
  78. sticker_convert/py.typed +0 -0
  79. sticker_convert/resources/NotoColorEmoji.ttf +0 -0
  80. sticker_convert/resources/compression.json +220 -16
  81. sticker_convert/resources/emoji.json +527 -77
  82. sticker_convert/resources/help.ja_JP.json +88 -0
  83. sticker_convert/resources/help.json +24 -10
  84. sticker_convert/resources/help.zh_CN.json +88 -0
  85. sticker_convert/resources/help.zh_TW.json +88 -0
  86. sticker_convert/resources/input.ja_JP.json +74 -0
  87. sticker_convert/resources/input.json +121 -71
  88. sticker_convert/resources/input.zh_CN.json +74 -0
  89. sticker_convert/resources/input.zh_TW.json +74 -0
  90. sticker_convert/resources/memdump_linux.sh +25 -0
  91. sticker_convert/resources/memdump_windows.ps1 +8 -0
  92. sticker_convert/resources/output.ja_JP.json +38 -0
  93. sticker_convert/resources/output.json +24 -0
  94. sticker_convert/resources/output.zh_CN.json +38 -0
  95. sticker_convert/resources/output.zh_TW.json +38 -0
  96. sticker_convert/uploaders/compress_wastickers.py +186 -156
  97. sticker_convert/uploaders/upload_base.py +44 -35
  98. sticker_convert/uploaders/upload_signal.py +218 -173
  99. sticker_convert/uploaders/upload_telegram.py +353 -388
  100. sticker_convert/uploaders/upload_viber.py +178 -0
  101. sticker_convert/uploaders/xcode_imessage.py +295 -285
  102. sticker_convert/utils/callback.py +238 -6
  103. sticker_convert/utils/chrome_remotedebug.py +219 -0
  104. sticker_convert/utils/chromiums/linux.py +52 -0
  105. sticker_convert/utils/chromiums/osx.py +68 -0
  106. sticker_convert/utils/chromiums/windows.py +45 -0
  107. sticker_convert/utils/emoji.py +28 -0
  108. sticker_convert/utils/files/json_resources_loader.py +24 -19
  109. sticker_convert/utils/files/metadata_handler.py +8 -7
  110. sticker_convert/utils/files/run_bin.py +1 -1
  111. sticker_convert/utils/media/codec_info.py +99 -67
  112. sticker_convert/utils/media/format_verify.py +33 -20
  113. sticker_convert/utils/process.py +231 -0
  114. sticker_convert/utils/translate.py +108 -0
  115. sticker_convert/utils/url_detect.py +40 -33
  116. sticker_convert/version.py +1 -1
  117. {sticker_convert-2.8.12.dist-info → sticker_convert-2.17.0.0.dist-info}/METADATA +189 -96
  118. sticker_convert-2.17.0.0.dist-info/RECORD +138 -0
  119. {sticker_convert-2.8.12.dist-info → sticker_convert-2.17.0.0.dist-info}/WHEEL +1 -1
  120. sticker_convert/utils/auth/get_signal_auth.py +0 -129
  121. sticker_convert-2.8.12.dist-info/RECORD +0 -101
  122. {sticker_convert-2.8.12.dist-info → sticker_convert-2.17.0.0.dist-info}/entry_points.txt +0 -0
  123. {sticker_convert-2.8.12.dist-info → sticker_convert-2.17.0.0.dist-info/licenses}/LICENSE +0 -0
  124. {sticker_convert-2.8.12.dist-info → sticker_convert-2.17.0.0.dist-info}/top_level.txt +0 -0
@@ -1,962 +1,1093 @@
1
- #!/usr/bin/env python3
2
- import os
3
- from fractions import Fraction
4
- from io import BytesIO
5
- from math import ceil, floor
6
- from pathlib import Path
7
- from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
8
-
9
- import numpy as np
10
- from PIL import Image
11
- from PIL import __version__ as PillowVersion
12
- from PIL import features
13
-
14
- from sticker_convert.job_option import CompOption
15
- from sticker_convert.utils.callback import CallbackProtocol, CallbackReturn
16
- from sticker_convert.utils.files.cache_store import CacheStore
17
- from sticker_convert.utils.media.codec_info import CodecInfo, rounding
18
- from sticker_convert.utils.media.format_verify import FormatVerify
19
-
20
- if TYPE_CHECKING:
21
- from av.video.frame import VideoFrame
22
- from av.video.plane import VideoPlane
23
-
24
- MSG_START_COMP = "[I] Start compressing {} -> {}"
25
- MSG_SKIP_COMP = "[S] Compatible file found, skip compress and just copy {} -> {}"
26
- MSG_COMP = (
27
- "[C] Compressing {} -> {} res={}x{}, "
28
- "quality={}, fps={}, color={} (step {}-{}-{})"
29
- )
30
- MSG_REDO_COMP = "[{}] Compressed {} -> {} but size {} {} limit {}, recompressing"
31
- MSG_DONE_COMP = "[S] Successful compression {} -> {} size {} (step {})"
32
- MSG_FAIL_COMP = (
33
- "[F] Failed Compression {} -> {}, "
34
- "cannot get below limit {} with lowest quality under current settings (Best size: {})"
35
- )
36
-
37
- YUV_RGB_MATRIX = np.array(
38
- [
39
- [1.164, 0.000, 1.793],
40
- [1.164, -0.213, -0.533],
41
- [1.164, 2.112, 0.000],
42
- ]
43
- )
44
-
45
- # Whether animated WebP is supported
46
- # See https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#saving-sequences
47
- PIL_WEBP_ANIM = cast(bool, features.check("webp_anim")) # type: ignore
48
-
49
-
50
- def get_step_value(
51
- max_step: Optional[int],
52
- min_step: Optional[int],
53
- step: int,
54
- steps: int,
55
- power: float = 1.0,
56
- even: bool = False,
57
- ) -> Optional[int]:
58
- # Power should be between -1 and positive infinity
59
- # Smaller power = More 'importance' of the parameter
60
- # Power of 1 is linear relationship
61
- # e.g. fps has lower power -> Try not to reduce it early on
62
-
63
- if step > 0:
64
- factor = pow(step / steps, power)
65
- else:
66
- factor = 0
67
-
68
- if max_step is not None and min_step is not None:
69
- v = round((max_step - min_step) * step / steps * factor + min_step)
70
- if even is True and v % 2 == 1:
71
- return v + 1
72
- return v
73
- return None
74
-
75
-
76
- def useful_array(
77
- plane: "VideoPlane", bytes_per_pixel: int = 1, dtype: str = "uint8"
78
- ) -> "np.ndarray[Any, Any]":
79
- total_line_size = abs(plane.line_size)
80
- useful_line_size = plane.width * bytes_per_pixel
81
- arr: "np.ndarray[Any, Any]" = np.frombuffer(cast(bytes, plane), np.uint8)
82
- if total_line_size != useful_line_size:
83
- arr = arr.reshape(-1, total_line_size)[:, 0:useful_line_size].reshape(-1)
84
- return arr.view(np.dtype(dtype))
85
-
86
-
87
- def yuva_to_rgba(frame: "VideoFrame") -> "np.ndarray[Any, Any]":
88
- # https://stackoverflow.com/questions/72308308/converting-yuv-to-rgb-in-python-coefficients-work-with-array-dont-work-with-n
89
-
90
- width = frame.width
91
- height = frame.height
92
-
93
- y = useful_array(frame.planes[0]).reshape(height, width)
94
- u = useful_array(frame.planes[1]).reshape(height // 2, width // 2)
95
- v = useful_array(frame.planes[2]).reshape(height // 2, width // 2)
96
- a = useful_array(frame.planes[3]).reshape(height, width)
97
-
98
- u = u.repeat(2, axis=0).repeat(2, axis=1)
99
- v = v.repeat(2, axis=0).repeat(2, axis=1)
100
-
101
- y = y.reshape((y.shape[0], y.shape[1], 1))
102
- u = u.reshape((u.shape[0], u.shape[1], 1))
103
- v = v.reshape((v.shape[0], v.shape[1], 1))
104
- a = a.reshape((a.shape[0], a.shape[1], 1))
105
-
106
- yuv_array = np.concatenate((y, u, v), axis=2)
107
-
108
- yuv_array = yuv_array.astype(np.float32)
109
- yuv_array[:, :, 0] = (
110
- yuv_array[:, :, 0].clip(16, 235).astype(yuv_array.dtype) - 16 # type: ignore
111
- )
112
- yuv_array[:, :, 1:] = (
113
- yuv_array[:, :, 1:].clip(16, 240).astype(yuv_array.dtype) - 128 # type: ignore
114
- )
115
-
116
- rgb_array = np.matmul(yuv_array, YUV_RGB_MATRIX.T).clip(0, 255).astype("uint8")
117
-
118
- return np.concatenate((rgb_array, a), axis=2)
119
-
120
-
121
- class StickerConvert:
122
- def __init__(
123
- self,
124
- in_f: Union[Path, Tuple[Path, bytes]],
125
- out_f: Path,
126
- opt_comp: CompOption,
127
- cb: CallbackProtocol,
128
- # cb_return: CallbackReturn
129
- ) -> None:
130
- self.in_f: Union[bytes, Path]
131
- if isinstance(in_f, Path):
132
- self.in_f = in_f
133
- self.in_f_name = self.in_f.name
134
- self.in_f_path = in_f
135
- self.codec_info_orig = CodecInfo(self.in_f)
136
- else:
137
- self.in_f = in_f[1]
138
- self.in_f_name = Path(in_f[0]).name
139
- self.in_f_path = in_f[0]
140
- self.codec_info_orig = CodecInfo(in_f[1], Path(in_f[0]).suffix)
141
-
142
- valid_formats: List[str] = []
143
- for i in opt_comp.get_format():
144
- valid_formats.extend(i)
145
-
146
- valid_ext = False
147
- self.out_f = Path()
148
- if len(valid_formats) == 0 or Path(out_f).suffix in valid_formats:
149
- self.out_f = Path(out_f)
150
- valid_ext = True
151
-
152
- if not valid_ext:
153
- if self.codec_info_orig.is_animated or opt_comp.fake_vid:
154
- ext = opt_comp.format_vid[0]
155
- else:
156
- ext = opt_comp.format_img[0]
157
- self.out_f = out_f.with_suffix(ext)
158
-
159
- self.out_f_name: str = self.out_f.name
160
-
161
- self.cb = cb
162
- self.frames_raw: "List[np.ndarray[Any, Any]]" = []
163
- self.frames_processed: "List[np.ndarray[Any, Any]]" = []
164
- self.opt_comp: CompOption = opt_comp
165
- if not self.opt_comp.steps:
166
- self.opt_comp.steps = 1
167
-
168
- self.size: int = 0
169
- self.size_max: Optional[int] = None
170
- self.res_w: Optional[int] = None
171
- self.res_h: Optional[int] = None
172
- self.quality: Optional[int] = None
173
- self.fps: Optional[Fraction] = None
174
- self.color: Optional[int] = None
175
-
176
- self.bg_color: Optional[Tuple[int, int, int, int]] = None
177
- if self.opt_comp.bg_color:
178
- r, g, b, a = bytes.fromhex(self.opt_comp.bg_color)
179
- self.bg_color = (r, g, b, a)
180
-
181
- self.tmp_f: BytesIO = BytesIO()
182
- self.result: Optional[bytes] = None
183
- self.result_size: int = 0
184
- self.result_step: Optional[int] = None
185
-
186
- self.apngasm = None
187
-
188
- @staticmethod
189
- def convert(
190
- in_f: Union[Path, Tuple[Path, bytes]],
191
- out_f: Path,
192
- opt_comp: CompOption,
193
- cb: CallbackProtocol,
194
- _cb_return: CallbackReturn,
195
- ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
196
- sticker = StickerConvert(in_f, out_f, opt_comp, cb)
197
- result = sticker._convert()
198
- cb.put("update_bar")
199
- return result
200
-
201
- def _convert(self) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
202
- result = self.check_if_compatible()
203
- if result:
204
- return self.compress_done(result)
205
-
206
- self.cb.put((MSG_START_COMP.format(self.in_f_name, self.out_f_name)))
207
-
208
- steps_list = self.generate_steps_list()
209
-
210
- step_lower = 0
211
- step_upper = self.opt_comp.steps
212
-
213
- if self.codec_info_orig.is_animated is True:
214
- self.size_max = self.opt_comp.size_max_vid
215
- else:
216
- self.size_max = self.opt_comp.size_max_img
217
-
218
- if self.size_max in (None, 0):
219
- # No limit to size, create the best quality result
220
- step_current = 0
221
- else:
222
- step_current = int(rounding((step_lower + step_upper) / 2))
223
-
224
- self.frames_import()
225
- while True:
226
- param = steps_list[step_current]
227
- self.res_w = param[0]
228
- self.res_h = param[1]
229
- self.quality = param[2]
230
- if param[3] and self.codec_info_orig.fps:
231
- fps_tmp = min(param[3], self.codec_info_orig.fps)
232
- self.fps = self.fix_fps(fps_tmp)
233
- else:
234
- self.fps = Fraction(0)
235
- self.color = param[4]
236
-
237
- self.tmp_f = BytesIO()
238
- msg = MSG_COMP.format(
239
- self.in_f_name,
240
- self.out_f_name,
241
- self.res_w,
242
- self.res_h,
243
- self.quality,
244
- int(self.fps),
245
- self.color,
246
- step_lower,
247
- step_current,
248
- step_upper,
249
- )
250
- self.cb.put(msg)
251
-
252
- self.frames_processed = self.frames_drop(self.frames_raw)
253
- self.frames_processed = self.frames_resize(self.frames_processed)
254
- self.frames_export()
255
-
256
- self.tmp_f.seek(0)
257
- self.size = self.tmp_f.getbuffer().nbytes
258
-
259
- if not self.size_max or (
260
- self.size <= self.size_max and self.size >= self.result_size
261
- ):
262
- self.result = self.tmp_f.read()
263
- self.result_size = self.size
264
- self.result_step = step_current
265
-
266
- if (
267
- step_upper - step_lower > 0
268
- and step_current != step_lower
269
- and self.size_max
270
- ):
271
- if self.size <= self.size_max:
272
- sign = "<"
273
- step_upper = step_current
274
- else:
275
- sign = ">"
276
- step_lower = step_current
277
- if step_current == step_lower + 1:
278
- step_current = step_lower
279
- else:
280
- step_current = int(rounding((step_lower + step_upper) / 2))
281
- self.recompress(sign)
282
- elif self.result:
283
- return self.compress_done(self.result, self.result_step)
284
- else:
285
- return self.compress_fail()
286
-
287
- def check_if_compatible(self) -> Optional[bytes]:
288
- if (
289
- FormatVerify.check_format(
290
- self.in_f,
291
- fmt=self.opt_comp.get_format(),
292
- file_info=self.codec_info_orig,
293
- )
294
- and FormatVerify.check_file_res(
295
- self.in_f, res=self.opt_comp.get_res(), file_info=self.codec_info_orig
296
- )
297
- and FormatVerify.check_file_fps(
298
- self.in_f, fps=self.opt_comp.get_fps(), file_info=self.codec_info_orig
299
- )
300
- and FormatVerify.check_file_size(
301
- self.in_f,
302
- size=self.opt_comp.get_size_max(),
303
- file_info=self.codec_info_orig,
304
- )
305
- and FormatVerify.check_file_duration(
306
- self.in_f,
307
- duration=self.opt_comp.get_duration(),
308
- file_info=self.codec_info_orig,
309
- )
310
- ):
311
- self.cb.put((MSG_SKIP_COMP.format(self.in_f_name, self.out_f_name)))
312
-
313
- if isinstance(self.in_f, Path):
314
- with open(self.in_f, "rb") as f:
315
- result = f.read()
316
- self.result_size = os.path.getsize(self.in_f)
317
- else:
318
- result = self.in_f
319
- self.result_size = len(self.in_f)
320
-
321
- return result
322
-
323
- return None
324
-
325
- def generate_steps_list(self) -> List[Tuple[Optional[int], ...]]:
326
- steps_list: List[Tuple[Optional[int], ...]] = []
327
- for step in range(self.opt_comp.steps, -1, -1):
328
- steps_list.append(
329
- (
330
- get_step_value(
331
- self.opt_comp.res_w_max,
332
- self.opt_comp.res_w_min,
333
- step,
334
- self.opt_comp.steps,
335
- self.opt_comp.res_power,
336
- True,
337
- ),
338
- get_step_value(
339
- self.opt_comp.res_h_max,
340
- self.opt_comp.res_h_min,
341
- step,
342
- self.opt_comp.steps,
343
- self.opt_comp.res_power,
344
- True,
345
- ),
346
- get_step_value(
347
- self.opt_comp.quality_max,
348
- self.opt_comp.quality_min,
349
- step,
350
- self.opt_comp.steps,
351
- self.opt_comp.quality_power,
352
- ),
353
- get_step_value(
354
- self.opt_comp.fps_max,
355
- self.opt_comp.fps_min,
356
- step,
357
- self.opt_comp.steps,
358
- self.opt_comp.fps_power,
359
- ),
360
- get_step_value(
361
- self.opt_comp.color_max,
362
- self.opt_comp.color_min,
363
- step,
364
- self.opt_comp.steps,
365
- self.opt_comp.color_power,
366
- ),
367
- )
368
- )
369
-
370
- return steps_list
371
-
372
- def recompress(self, sign: str) -> None:
373
- msg = MSG_REDO_COMP.format(
374
- sign, self.in_f_name, self.out_f_name, self.size, sign, self.size_max
375
- )
376
- self.cb.put(msg)
377
-
378
- def compress_fail(
379
- self,
380
- ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
381
- msg = MSG_FAIL_COMP.format(
382
- self.in_f_name, self.out_f_name, self.size_max, self.size
383
- )
384
- self.cb.put(msg)
385
-
386
- return False, self.in_f_path, self.out_f, self.size
387
-
388
- def compress_done(
389
- self, data: bytes, result_step: Optional[int] = None
390
- ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
391
- out_f: Union[None, bytes, Path]
392
-
393
- if self.out_f.stem == "none":
394
- out_f = None
395
- elif self.out_f.stem == "bytes":
396
- out_f = data
397
- else:
398
- out_f = self.out_f
399
- with open(self.out_f, "wb+") as f:
400
- f.write(data)
401
-
402
- if result_step is not None:
403
- msg = MSG_DONE_COMP.format(
404
- self.in_f_name, self.out_f_name, self.result_size, result_step
405
- )
406
- self.cb.put(msg)
407
-
408
- return True, self.in_f_path, out_f, self.result_size
409
-
410
- def frames_import(self) -> None:
411
- if isinstance(self.in_f, Path):
412
- suffix = self.in_f.suffix
413
- else:
414
- suffix = Path(self.in_f_name).suffix
415
-
416
- if suffix in (".tgs", ".lottie", ".json"):
417
- self._frames_import_lottie()
418
- elif suffix in (".webp", ".apng", ".png", ".gif"):
419
- # ffmpeg do not support webp decoding (yet)
420
- # ffmpeg could fail to decode apng if file is buggy
421
- self._frames_import_pillow()
422
- else:
423
- self._frames_import_pyav()
424
-
425
- def _frames_import_pillow(self) -> None:
426
- with Image.open(self.in_f) as im:
427
- # Note: im.convert("RGBA") would return rgba image of current frame only
428
- if (
429
- "n_frames" in dir(im)
430
- and im.n_frames != 0
431
- and self.codec_info_orig.fps != 0.0
432
- ):
433
- # Pillow is not reliable for getting webp frame durations
434
- durations: Optional[List[int]]
435
- if im.format == "WEBP":
436
- _, _, _, durations = CodecInfo._get_file_fps_frames_duration_webp(
437
- self.in_f
438
- )
439
- else:
440
- durations = None
441
-
442
- duration_ptr = 0.0
443
- duration_inc = 1 / self.codec_info_orig.fps * 1000
444
- frame = 0
445
- if durations is None:
446
- next_frame_start_duration = cast(int, im.info.get("duration", 1000))
447
- else:
448
- next_frame_start_duration = durations[0]
449
- while True:
450
- self.frames_raw.append(np.asarray(im.convert("RGBA")))
451
- duration_ptr += duration_inc
452
- if duration_ptr >= next_frame_start_duration:
453
- frame += 1
454
- if frame == im.n_frames:
455
- break
456
- im.seek(frame)
457
-
458
- if durations is None:
459
- next_frame_start_duration += cast(
460
- int, im.info.get("duration", 1000)
461
- )
462
- else:
463
- next_frame_start_duration += durations[frame]
464
- else:
465
- self.frames_raw.append(np.asarray(im.convert("RGBA")))
466
-
467
- def _frames_import_pyav(self) -> None:
468
- import av
469
- from av.codec.context import CodecContext
470
- from av.container.input import InputContainer
471
- from av.video.codeccontext import VideoCodecContext
472
- from av.video.frame import VideoFrame
473
-
474
- # Crashes when handling some webm in yuv420p and convert to rgba
475
- # https://github.com/PyAV-Org/PyAV/issues/1166
476
- file: Union[BytesIO, str]
477
- if isinstance(self.in_f, Path):
478
- file = self.in_f.as_posix()
479
- else:
480
- file = BytesIO(self.in_f)
481
- with av.open(file) as container:
482
- container = cast(InputContainer, container)
483
- context = container.streams.video[0].codec_context
484
- if context.name == "vp8":
485
- context = cast(VideoCodecContext, CodecContext.create("libvpx", "r"))
486
- elif context.name == "vp9":
487
- context = cast(
488
- VideoCodecContext, CodecContext.create("libvpx-vp9", "r")
489
- )
490
-
491
- for packet in container.demux(container.streams.video):
492
- for frame in context.decode(packet):
493
- width_orig = frame.width
494
- height_orig = frame.height
495
-
496
- # Need to pad frame to even dimension first
497
- if width_orig % 2 == 1 or height_orig % 2 == 1:
498
- from av.filter import Graph
499
-
500
- width_new = width_orig + width_orig % 2
501
- height_new = height_orig + height_orig % 2
502
-
503
- graph = Graph()
504
- in_src = graph.add_buffer(template=container.streams.video[0])
505
- pad = graph.add(
506
- "pad", f"{width_new}:{height_new}:0:0:color=#00000000"
507
- )
508
- in_src.link_to(pad)
509
- sink = graph.add("buffersink")
510
- pad.link_to(sink)
511
- graph.configure()
512
-
513
- graph.push(frame)
514
- frame_resized = cast(VideoFrame, graph.pull())
515
- else:
516
- frame_resized = frame
517
-
518
- if frame_resized.format.name == "yuv420p":
519
- rgb_array = frame_resized.to_ndarray(format="rgb24")
520
- rgba_array = np.dstack(
521
- (
522
- rgb_array,
523
- np.zeros(rgb_array.shape[:2], dtype=np.uint8) + 255,
524
- )
525
- )
526
- else:
527
- # yuva420p may cause crash
528
- # Not safe to directly call frame.to_ndarray(format="rgba")
529
- # https://github.com/laggykiller/sticker-convert/issues/114
530
- frame_resized = frame_resized.reformat(
531
- format="yuva420p",
532
- dst_colorspace=1,
533
- )
534
- rgba_array = yuva_to_rgba(frame_resized)
535
-
536
- # Remove pixels that was added to make dimensions even
537
- rgba_array = rgba_array[0:width_orig, 0:height_orig]
538
- self.frames_raw.append(rgba_array)
539
-
540
- def _frames_import_lottie(self) -> None:
541
- from rlottie_python.rlottie_wrapper import LottieAnimation
542
-
543
- if isinstance(self.in_f, Path):
544
- suffix = self.in_f.suffix
545
- else:
546
- suffix = Path(self.in_f_name).suffix
547
-
548
- if suffix == ".tgs":
549
- if isinstance(self.in_f, Path):
550
- anim = LottieAnimation.from_tgs(self.in_f.as_posix())
551
- else:
552
- import gzip
553
-
554
- with gzip.open(BytesIO(self.in_f)) as f:
555
- data = f.read().decode(encoding="utf-8")
556
- anim = LottieAnimation.from_data(data)
557
- else:
558
- if isinstance(self.in_f, Path):
559
- anim = LottieAnimation.from_file(self.in_f.as_posix())
560
- else:
561
- anim = LottieAnimation.from_data(self.in_f.decode("utf-8"))
562
-
563
- for i in range(anim.lottie_animation_get_totalframe()):
564
- frame = np.asarray(anim.render_pillow_frame(frame_num=i))
565
- self.frames_raw.append(frame)
566
-
567
- anim.lottie_animation_destroy()
568
-
569
- def determine_bg_color(self) -> Tuple[int, int, int, int]:
570
- mean_total = 0.0
571
- # Calculate average color of all frames for selecting background color
572
- for frame in self.frames_raw:
573
- s = frame.shape
574
- colors = frame.reshape((-1, s[2])) # type: ignore
575
- # Do not count in alpha=0
576
- # If alpha > 0, use alpha as weight
577
- colors = colors[colors[:, 3] != 0]
578
- if colors.shape[0] != 0:
579
- alphas = colors[:, 3] / 255
580
- r_mean = np.mean(colors[:, 0] * alphas)
581
- g_mean = np.mean(colors[:, 1] * alphas)
582
- b_mean = np.mean(colors[:, 2] * alphas)
583
- mean_total += (r_mean + g_mean + b_mean) / 3
584
-
585
- if mean_total / len(self.frames_raw) < 128:
586
- return (255, 255, 255, 0)
587
- else:
588
- return (0, 0, 0, 0)
589
-
590
- def frames_resize(
591
- self, frames_in: "List[np.ndarray[Any, Any]]"
592
- ) -> "List[np.ndarray[Any, Any]]":
593
- frames_out: "List[np.ndarray[Any, Any]]" = []
594
-
595
- resample: Literal[0, 1, 2, 3, 4, 5]
596
- if self.opt_comp.scale_filter == "nearest":
597
- resample = Image.NEAREST
598
- elif self.opt_comp.scale_filter == "box":
599
- resample = Image.BOX
600
- elif self.opt_comp.scale_filter == "bilinear":
601
- resample = Image.BILINEAR
602
- elif self.opt_comp.scale_filter == "hamming":
603
- resample = Image.HAMMING
604
- elif self.opt_comp.scale_filter == "bicubic":
605
- resample = Image.BICUBIC
606
- elif self.opt_comp.scale_filter == "lanczos":
607
- resample = Image.LANCZOS
608
- else:
609
- resample = Image.BICUBIC
610
-
611
- if self.bg_color is None:
612
- self.bg_color = self.determine_bg_color()
613
-
614
- for frame in frames_in:
615
- with Image.fromarray(frame, "RGBA") as im: # type: ignore
616
- width, height = im.size
617
-
618
- if self.res_w is None:
619
- self.res_w = width
620
- if self.res_h is None:
621
- self.res_h = height
622
-
623
- scaling = 1 - (self.opt_comp.padding_percent / 100)
624
- if width > height:
625
- width_new = int(self.res_w * scaling)
626
- height_new = int(height * self.res_w // width * scaling)
627
- else:
628
- height_new = int(self.res_h * scaling)
629
- width_new = int(width * self.res_h // height * scaling)
630
-
631
- with im.resize((width_new, height_new), resample=resample) as im_resized:
632
- with Image.new(
633
- "RGBA", (self.res_w, self.res_h), self.bg_color
634
- ) as im_new:
635
- im_new.alpha_composite(
636
- im_resized,
637
- ((self.res_w - width_new) // 2, (self.res_h - height_new) // 2),
638
- )
639
- frames_out.append(np.asarray(im_new))
640
-
641
- return frames_out
642
-
643
- def frames_drop(
644
- self, frames_in: "List[np.ndarray[Any, Any]]"
645
- ) -> "List[np.ndarray[Any, Any]]":
646
- if (
647
- not self.codec_info_orig.is_animated
648
- or not self.fps
649
- or len(self.frames_processed) == 1
650
- ):
651
- return [frames_in[0]]
652
-
653
- frames_out: "List[np.ndarray[Any, Any]]" = []
654
-
655
- # fps_ratio: 1 frame in new anim equal to how many frame in old anim
656
- # speed_ratio: How much to speed up / slow down
657
- fps_ratio = self.codec_info_orig.fps / self.fps
658
- if (
659
- self.opt_comp.duration_min
660
- and self.codec_info_orig.duration < self.opt_comp.duration_min
661
- ):
662
- speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_min
663
- elif (
664
- self.opt_comp.duration_max
665
- and self.codec_info_orig.duration > self.opt_comp.duration_max
666
- ):
667
- speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_max
668
- else:
669
- speed_ratio = 1
670
-
671
- # How many frames to advance in original video for each frame of output video
672
- frame_increment = fps_ratio * speed_ratio
673
-
674
- frames_out_min = None
675
- frames_out_max = None
676
- if self.opt_comp.duration_min:
677
- frames_out_min = ceil(self.fps * self.opt_comp.duration_min / 1000)
678
- if self.opt_comp.duration_max:
679
- frames_out_max = floor(self.fps * self.opt_comp.duration_max / 1000)
680
-
681
- frame_current = 0
682
- frame_current_float = 0.0
683
- while True:
684
- if frame_current <= len(frames_in) - 1 and not (
685
- frames_out_max and len(frames_out) == frames_out_max
686
- ):
687
- frames_out.append(frames_in[frame_current])
688
- else:
689
- while len(frames_out) == 0 or (
690
- frames_out_min and len(frames_out) < frames_out_min
691
- ):
692
- frames_out.append(frames_in[-1])
693
- return frames_out
694
- frame_current_float += frame_increment
695
- frame_current = int(rounding(frame_current_float))
696
-
697
- def frames_export(self) -> None:
698
- is_animated = len(self.frames_processed) > 1 and self.fps
699
- if self.out_f.suffix in (".apng", ".png"):
700
- if is_animated:
701
- self._frames_export_apng()
702
- else:
703
- self._frames_export_png()
704
- elif self.out_f.suffix == ".gif":
705
- self._frames_export_pil_anim()
706
- elif self.out_f.suffix in (".webm", ".mp4", ".mkv", ".webp") or is_animated:
707
- self._frames_export_pyav()
708
- else:
709
- self._frames_export_pil()
710
-
711
- def _check_dup(self) -> bool:
712
- if len(self.frames_processed) == 1:
713
- return False
714
-
715
- prev_frame = self.frames_processed[0]
716
- for frame in self.frames_processed[1:]:
717
- if np.array_equal(frame, prev_frame):
718
- return True
719
- prev_frame = frame
720
-
721
- return False
722
-
723
- def _frames_export_pil(self) -> None:
724
- with Image.fromarray(self.frames_processed[0]) as im: # type: ignore
725
- im.save(
726
- self.tmp_f,
727
- format=self.out_f.suffix.replace(".", ""),
728
- quality=self.quality,
729
- )
730
-
731
- def _frames_export_pyav(self) -> None:
732
- import av
733
- from av.video.stream import VideoStream
734
-
735
- options_container: Dict[str, str] = {}
736
- options_stream: Dict[str, str] = {}
737
-
738
- if isinstance(self.quality, int):
739
- # Seems not actually working
740
- options_stream["quality"] = str(self.quality)
741
- options_stream["lossless"] = "0"
742
-
743
- if self.out_f.suffix in (".apng", ".png"):
744
- codec = "apng"
745
- pixel_format = "rgba"
746
- options_stream["plays"] = "0"
747
- elif self.out_f.suffix in (".webm", ".mkv"):
748
- codec = "libvpx-vp9"
749
- pixel_format = "yuva420p"
750
- options_stream["loop"] = "0"
751
- elif self.out_f.suffix == ".webp":
752
- codec = "webp"
753
- pixel_format = "yuva420p"
754
- options_container["loop"] = "0"
755
- else:
756
- codec = "libvpx-vp9"
757
- pixel_format = "yuv420p"
758
- options_stream["loop"] = "0"
759
-
760
- with av.open(
761
- self.tmp_f,
762
- "w",
763
- format=self.out_f.suffix.replace(".", ""),
764
- options=options_container,
765
- ) as output:
766
- out_stream = output.add_stream(codec, rate=self.fps, options=options_stream)
767
- out_stream = cast(VideoStream, out_stream)
768
- assert isinstance(self.res_w, int) and isinstance(self.res_h, int)
769
- out_stream.width = self.res_w
770
- out_stream.height = self.res_h
771
- out_stream.pix_fmt = pixel_format
772
-
773
- for frame in self.frames_processed:
774
- av_frame = av.VideoFrame.from_ndarray(frame, format="rgba")
775
- output.mux(out_stream.encode(av_frame))
776
- output.mux(out_stream.encode())
777
-
778
- def _frames_export_pil_anim(self) -> None:
779
- extra_kwargs: Dict[str, Any] = {}
780
-
781
- # disposal=2 on gif cause flicker in image with transparency
782
- # Occurs in Pillow == 10.2.0
783
- # https://github.com/python-pillow/Pillow/issues/7787
784
- if PillowVersion == "10.2.0":
785
- extra_kwargs["optimize"] = False
786
- else:
787
- extra_kwargs["optimize"] = True
788
-
789
- if self.out_f.suffix == ".gif":
790
- # GIF can only have one alpha color
791
- # Change lowest alpha to alpha=0
792
- # Only keep alpha=0 and alpha=255, nothing in between
793
- extra_kwargs["format"] = "GIF"
794
- frames_processed = np.array(self.frames_processed)
795
- alpha = frames_processed[:, :, :, 3]
796
- alpha_min = np.min(alpha) # type: ignore
797
- if alpha_min < 255:
798
- alpha[alpha > alpha_min] = 255
799
- alpha[alpha == alpha_min] = 0
800
-
801
- if 0 in alpha:
802
- extra_kwargs["transparency"] = 0
803
- extra_kwargs["disposal"] = 2
804
- im_out = [self.quantize(Image.fromarray(i)) for i in frames_processed] # type: ignore
805
- else:
806
- im_out = [
807
- self.quantize(Image.fromarray(i).convert("RGB")).convert("RGB") # type: ignore
808
- for i in frames_processed
809
- ]
810
- elif self.out_f.suffix == ".webp":
811
- im_out = [Image.fromarray(i) for i in self.frames_processed] # type: ignore
812
- extra_kwargs["format"] = "WebP"
813
- extra_kwargs["minimize_size"] = True
814
- extra_kwargs["method"] = 6
815
- else:
816
- raise RuntimeError(f"Invalid format {self.out_f.suffix}")
817
-
818
- if self.fps:
819
- extra_kwargs["save_all"] = True
820
- extra_kwargs["append_images"] = im_out[1:]
821
- extra_kwargs["duration"] = int(1000 / self.fps)
822
- extra_kwargs["loop"] = 0
823
-
824
- im_out[0].save(
825
- self.tmp_f,
826
- quality=self.quality,
827
- **extra_kwargs,
828
- )
829
-
830
- def _frames_export_png(self) -> None:
831
- with Image.fromarray(self.frames_processed[0], "RGBA") as image: # type: ignore
832
- image_quant = self.quantize(image)
833
-
834
- with BytesIO() as f:
835
- image_quant.save(f, format="png")
836
- f.seek(0)
837
- frame_optimized = self.optimize_png(f.read())
838
- self.tmp_f.write(frame_optimized)
839
-
840
- def _frames_export_apng(self) -> None:
841
- from apngasm_python._apngasm_python import APNGAsm, create_frame_from_rgba # type: ignore
842
-
843
- assert self.fps
844
- assert self.res_h
845
-
846
- frames_concat = np.concatenate(self.frames_processed)
847
- with Image.fromarray(frames_concat, "RGBA") as image_concat: # type: ignore
848
- image_quant = self.quantize(image_concat)
849
-
850
- if self.apngasm is None:
851
- self.apngasm = APNGAsm() # type: ignore
852
- assert isinstance(self.apngasm, APNGAsm)
853
-
854
- delay_num = int(1000 / self.fps)
855
- for i in range(0, image_quant.height, self.res_h):
856
- with BytesIO() as f:
857
- crop_dimension = (0, i, image_quant.width, i + self.res_h)
858
- image_cropped = image_quant.crop(crop_dimension)
859
- image_cropped.save(f, format="png")
860
- f.seek(0)
861
- frame_optimized = self.optimize_png(f.read())
862
- with Image.open(BytesIO(frame_optimized)) as im:
863
- image_final = im.convert("RGBA")
864
- frame_final = create_frame_from_rgba(
865
- np.array(image_final),
866
- width=image_final.width,
867
- height=image_final.height,
868
- delay_num=delay_num,
869
- delay_den=1000,
870
- )
871
- self.apngasm.add_frame(frame_final)
872
-
873
- with CacheStore.get_cache_store(path=self.opt_comp.cache_dir) as tempdir:
874
- tmp_apng = Path(tempdir, f"out{self.out_f.suffix}")
875
- self.apngasm.assemble(tmp_apng.as_posix())
876
-
877
- with open(tmp_apng, "rb") as f:
878
- self.tmp_f.write(f.read())
879
-
880
- self.apngasm.reset()
881
-
882
- def optimize_png(self, image_bytes: bytes) -> bytes:
883
- import oxipng
884
-
885
- return oxipng.optimize_from_memory(
886
- image_bytes,
887
- level=4,
888
- fix_errors=True,
889
- filter=[oxipng.RowFilter.Brute],
890
- optimize_alpha=True,
891
- strip=oxipng.StripChunks.safe(),
892
- )
893
-
894
- def quantize(self, image: Image.Image) -> Image.Image:
895
- if not (self.color and self.color <= 256):
896
- return image.copy()
897
- if self.opt_comp.quantize_method == "imagequant":
898
- return self._quantize_by_imagequant(image)
899
- if self.opt_comp.quantize_method == "fastoctree":
900
- return self._quantize_by_fastoctree(image)
901
-
902
- return image
903
-
904
- def _quantize_by_imagequant(self, image: Image.Image) -> Image.Image:
905
- import imagequant # type: ignore
906
-
907
- assert self.quality
908
- assert self.opt_comp.quality_min
909
- assert self.opt_comp.quality_max
910
- assert self.color
911
-
912
- dither = 1 - (self.quality - self.opt_comp.quality_min) / (
913
- self.opt_comp.quality_max - self.opt_comp.quality_min
914
- )
915
- image_quant = None
916
- for i in range(self.quality, 101, 5):
917
- try:
918
- image_quant = imagequant.quantize_pil_image( # type: ignore
919
- image,
920
- dithering_level=dither,
921
- max_colors=self.color,
922
- min_quality=self.opt_comp.quality_min,
923
- max_quality=i,
924
- )
925
- return image_quant
926
- except RuntimeError:
927
- pass
928
-
929
- return image
930
-
931
- def _quantize_by_fastoctree(self, image: Image.Image) -> Image.Image:
932
- assert self.color
933
-
934
- return image.quantize(colors=self.color, method=2)
935
-
936
- def fix_fps(self, fps: float) -> Fraction:
937
- # After rounding fps/duration during export,
938
- # Video duration may exceed limit.
939
- # Hence we need to 'fix' the fps
940
- if self.out_f.suffix == ".gif":
941
- # Quote from https://www.w3.org/Graphics/GIF/spec-gif89a.txt
942
- # vii) Delay Time - If not 0, this field specifies
943
- # the number of hundredths (1/100) of a second
944
- #
945
- # For GIF, we need to adjust fps such that delay is matching to hundreths of second
946
- return self._fix_fps_duration(fps, 100)
947
- if self.out_f.suffix in (".webp", ".apng", ".png"):
948
- return self._fix_fps_duration(fps, 1000)
949
-
950
- return self._fix_fps_pyav(fps)
951
-
952
- def _fix_fps_duration(self, fps: float, denominator: int) -> Fraction:
953
- delay = int(rounding(denominator / fps))
954
- fps_fraction = Fraction(denominator, delay)
955
- if self.opt_comp.fps_max and fps_fraction > self.opt_comp.fps_max:
956
- return Fraction(denominator, (delay + 1))
957
- if self.opt_comp.fps_min and fps_fraction < self.opt_comp.fps_min:
958
- return Fraction(denominator, (delay - 1))
959
- return fps_fraction
960
-
961
- def _fix_fps_pyav(self, fps: float) -> Fraction:
962
- return Fraction(rounding(fps))
1
+ #!/usr/bin/env python3
2
+ import json
3
+ import os
4
+ from fractions import Fraction
5
+ from io import BytesIO
6
+ from math import ceil, floor, log2
7
+ from pathlib import Path
8
+ from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
9
+
10
+ import numpy as np
11
+ from bs4 import BeautifulSoup
12
+ from PIL import Image
13
+ from PIL import __version__ as PillowVersion
14
+ from PIL import features
15
+
16
+ from sticker_convert.definitions import RUNTIME_STATE
17
+ from sticker_convert.job_option import CompOption
18
+ from sticker_convert.utils.callback import CallbackProtocol, CallbackReturn
19
+ from sticker_convert.utils.chrome_remotedebug import CRD
20
+ from sticker_convert.utils.files.cache_store import CacheStore
21
+ from sticker_convert.utils.media.codec_info import CodecInfo, rounding
22
+ from sticker_convert.utils.media.format_verify import FormatVerify
23
+ from sticker_convert.utils.translate import get_translator
24
+
25
+ I = get_translator() # noqa: E741
26
+
27
+ if TYPE_CHECKING:
28
+ from av.video.frame import VideoFrame
29
+ from av.video.plane import VideoPlane
30
+
31
+ YUV_RGB_MATRIX = np.array(
32
+ [
33
+ [1.164, 0.000, 1.793],
34
+ [1.164, -0.213, -0.533],
35
+ [1.164, 2.112, 0.000],
36
+ ]
37
+ )
38
+
39
+ # Whether animated WebP is supported
40
+ # See https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#saving-sequences
41
+ PIL_WEBP_ANIM = cast(bool, features.check("webp_anim")) # type: ignore
42
+
43
+
44
+ def get_step_value(
45
+ max_step: Optional[int],
46
+ min_step: Optional[int],
47
+ step: int,
48
+ steps: int,
49
+ power: float = 1.0,
50
+ even: bool = False,
51
+ snap_pow2: bool = False,
52
+ ) -> Optional[int]:
53
+ # Power should be between -1 and positive infinity
54
+ # Smaller power = More 'importance' of the parameter
55
+ # Power of 1 is linear relationship
56
+ # e.g. fps has lower power -> Try not to reduce it early on
57
+
58
+ if step > 0:
59
+ factor = pow(step / steps, power)
60
+ else:
61
+ factor = 0
62
+
63
+ if max_step is not None and min_step is not None:
64
+ v = round((max_step - min_step) * step / steps * factor + min_step)
65
+ if snap_pow2 is True and floor(log2(max_step)) >= ceil(log2(min_step)):
66
+ lower_exp = max(floor(log2(v)), ceil(log2(min_step)))
67
+ lower_pow2 = 2**lower_exp
68
+ upper_exp = min(ceil(log2(v)), floor(log2(max_step)))
69
+ upper_pow2 = 2**upper_exp
70
+ if abs(v - lower_pow2) <= abs(v - upper_pow2):
71
+ return lower_pow2
72
+ else:
73
+ return upper_pow2
74
+ if even is True and v % 2 == 1:
75
+ return v + 1
76
+ return v
77
+ return None
78
+
79
+
80
+ def useful_array(
81
+ plane: "VideoPlane", bytes_per_pixel: int = 1, dtype: str = "uint8"
82
+ ) -> "np.ndarray[Any, Any]":
83
+ total_line_size = abs(plane.line_size)
84
+ useful_line_size = plane.width * bytes_per_pixel
85
+ arr: "np.ndarray[Any, Any]" = np.frombuffer(cast(bytes, plane), np.uint8)
86
+ if total_line_size != useful_line_size:
87
+ arr = arr.reshape(-1, total_line_size)[:, 0:useful_line_size].reshape(-1)
88
+ return arr.view(np.dtype(dtype))
89
+
90
+
91
+ def yuva_to_rgba(frame: "VideoFrame") -> "np.ndarray[Any, Any]":
92
+ # https://stackoverflow.com/questions/72308308/converting-yuv-to-rgb-in-python-coefficients-work-with-array-dont-work-with-n
93
+
94
+ width = frame.width
95
+ height = frame.height
96
+
97
+ y = useful_array(frame.planes[0]).reshape(height, width)
98
+ u = useful_array(frame.planes[1]).reshape(height // 2, width // 2)
99
+ v = useful_array(frame.planes[2]).reshape(height // 2, width // 2)
100
+ a = useful_array(frame.planes[3]).reshape(height, width)
101
+
102
+ u = u.repeat(2, axis=0).repeat(2, axis=1) # type: ignore
103
+ v = v.repeat(2, axis=0).repeat(2, axis=1) # type: ignore
104
+
105
+ y = y.reshape((y.shape[0], y.shape[1], 1)) # type: ignore
106
+ u = u.reshape((u.shape[0], u.shape[1], 1)) # type: ignore
107
+ v = v.reshape((v.shape[0], v.shape[1], 1)) # type: ignore
108
+ a = a.reshape((a.shape[0], a.shape[1], 1)) # type: ignore
109
+
110
+ yuv_array = np.concatenate((y, u, v), axis=2)
111
+
112
+ yuv_array = yuv_array.astype(np.float32)
113
+ yuv_array[:, :, 0] = (
114
+ yuv_array[:, :, 0].clip(16, 235).astype(yuv_array.dtype) - 16 # type: ignore
115
+ )
116
+ yuv_array[:, :, 1:] = (
117
+ yuv_array[:, :, 1:].clip(16, 240).astype(yuv_array.dtype) - 128 # type: ignore
118
+ )
119
+
120
+ rgb_array = np.matmul(yuv_array, YUV_RGB_MATRIX.T).clip(0, 255).astype("uint8")
121
+
122
+ return np.concatenate((rgb_array, a), axis=2)
123
+
124
+
125
+ class StickerConvert:
126
+ def __init__(
127
+ self,
128
+ in_f: Union[Path, Tuple[Path, bytes]],
129
+ out_f: Path,
130
+ opt_comp: CompOption,
131
+ cb: CallbackProtocol,
132
+ # cb_return: CallbackReturn
133
+ ) -> None:
134
+ self.MSG_START_COMP = I("[I] Start compressing {} -> {}")
135
+ self.MSG_SKIP_COMP = I(
136
+ "[S] Compatible file found, skip compress and just copy {} -> {}"
137
+ )
138
+ self.MSG_COMP = I(
139
+ "[C] Compressing {} -> {} res={}x{}, quality={}, fps={}, color={} (step {}-{}-{})"
140
+ )
141
+ self.MSG_REDO_COMP = I(
142
+ "[{}] Compressed {} -> {} but size {} {} limit {}, recompressing"
143
+ )
144
+ self.MSG_DONE_COMP = I("[S] Successful compression {} -> {} size {} (step {})")
145
+ self.MSG_FAIL_COMP = I(
146
+ "[F] Failed Compression {} -> {}, "
147
+ "cannot get below limit {} with lowest quality under current settings (Best size: {})"
148
+ )
149
+ self.MSG_QUANT_NO_ALPHA = I(
150
+ "[W] {} does not support RGBA, defaulted to fastoctree quantization"
151
+ )
152
+ self.MSG_SVG_LONG = I("[W] Importing SVG takes long time")
153
+
154
+ self.in_f: Union[bytes, Path]
155
+ if isinstance(in_f, Path):
156
+ self.in_f = in_f
157
+ self.in_f_name = self.in_f.name
158
+ self.in_f_path = in_f
159
+ self.codec_info_orig = CodecInfo(self.in_f)
160
+ else:
161
+ self.in_f = in_f[1]
162
+ self.in_f_name = Path(in_f[0]).name
163
+ self.in_f_path = in_f[0]
164
+ self.codec_info_orig = CodecInfo(in_f[1], Path(in_f[0]).suffix)
165
+
166
+ valid_formats: List[str] = []
167
+ for i in opt_comp.get_format():
168
+ valid_formats.extend(i)
169
+
170
+ valid_ext = False
171
+ self.out_f = Path()
172
+ if len(valid_formats) == 0 or Path(out_f).suffix in valid_formats:
173
+ self.out_f = Path(out_f)
174
+ valid_ext = True
175
+
176
+ if not valid_ext:
177
+ if self.codec_info_orig.is_animated or opt_comp.fake_vid:
178
+ ext = opt_comp.format_vid[0]
179
+ else:
180
+ ext = opt_comp.format_img[0]
181
+ self.out_f = out_f.with_suffix(ext)
182
+
183
+ self.out_f_name: str = self.out_f.name
184
+
185
+ self.cb = cb
186
+ self.frames_raw: "List[np.ndarray[Any, Any]]" = []
187
+ self.frames_processed: "List[np.ndarray[Any, Any]]" = []
188
+ self.opt_comp: CompOption = opt_comp
189
+ if not self.opt_comp.steps:
190
+ self.opt_comp.steps = 1
191
+
192
+ self.size: int = 0
193
+ self.size_max: Optional[int] = None
194
+ self.res_w: Optional[int] = None
195
+ self.res_h: Optional[int] = None
196
+ self.quality: Optional[int] = None
197
+ self.fps: Optional[Fraction] = None
198
+ self.color: Optional[int] = None
199
+
200
+ self.bg_color: Optional[Tuple[int, int, int, int]] = None
201
+ if self.opt_comp.bg_color:
202
+ r, g, b, a = bytes.fromhex(self.opt_comp.bg_color)
203
+ self.bg_color = (r, g, b, a)
204
+
205
+ self.tmp_f: BytesIO = BytesIO()
206
+ self.result: Optional[bytes] = None
207
+ self.result_size: int = 0
208
+ self.result_step: Optional[int] = None
209
+
210
+ self.apngasm = None
211
+
212
+ @staticmethod
213
+ def convert(
214
+ in_f: Union[Path, Tuple[Path, bytes]],
215
+ out_f: Path,
216
+ opt_comp: CompOption,
217
+ cb: CallbackProtocol,
218
+ _cb_return: CallbackReturn,
219
+ ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
220
+ sticker = StickerConvert(in_f, out_f, opt_comp, cb)
221
+ result = sticker._convert()
222
+ cb.put("update_bar")
223
+ return result
224
+
225
+ def _convert(self) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
226
+ result = self.check_if_compatible()
227
+ if result:
228
+ return self.compress_done(result)
229
+
230
+ self.cb.put((self.MSG_START_COMP.format(self.in_f_name, self.out_f_name)))
231
+
232
+ steps_list = self.generate_steps_list()
233
+
234
+ step_lower = 0
235
+ step_upper = self.opt_comp.steps
236
+
237
+ if self.codec_info_orig.is_animated is True:
238
+ self.size_max = self.opt_comp.size_max_vid
239
+ else:
240
+ self.size_max = self.opt_comp.size_max_img
241
+
242
+ if self.size_max in (None, 0):
243
+ # No limit to size, create the best quality result
244
+ step_current = 0
245
+ else:
246
+ step_current = int(rounding((step_lower + step_upper) / 2))
247
+
248
+ self.frames_import()
249
+ while True:
250
+ param = steps_list[step_current]
251
+ self.res_w = param[0]
252
+ self.res_h = param[1]
253
+ self.quality = param[2]
254
+ if param[3] and self.codec_info_orig.fps:
255
+ fps_tmp = min(param[3], self.codec_info_orig.fps)
256
+ self.fps = self.fix_fps(fps_tmp)
257
+ else:
258
+ self.fps = Fraction(0)
259
+ self.color = param[4]
260
+
261
+ self.tmp_f = BytesIO()
262
+ msg = self.MSG_COMP.format(
263
+ self.in_f_name,
264
+ self.out_f_name,
265
+ self.res_w,
266
+ self.res_h,
267
+ self.quality,
268
+ int(self.fps),
269
+ self.color,
270
+ step_lower,
271
+ step_current,
272
+ step_upper,
273
+ )
274
+ self.cb.put(msg)
275
+
276
+ self.frames_processed = self.frames_drop(self.frames_raw)
277
+ self.frames_processed = self.frames_resize(self.frames_processed)
278
+ self.frames_export()
279
+
280
+ self.tmp_f.seek(0)
281
+ self.size = self.tmp_f.getbuffer().nbytes
282
+
283
+ if not self.size_max or (
284
+ self.size <= self.size_max and self.size >= self.result_size
285
+ ):
286
+ self.result = self.tmp_f.read()
287
+ self.result_size = self.size
288
+ self.result_step = step_current
289
+
290
+ if (
291
+ step_upper - step_lower > 0
292
+ and step_current != step_lower
293
+ and self.size_max
294
+ ):
295
+ if self.size <= self.size_max:
296
+ sign = "<"
297
+ step_upper = step_current
298
+ else:
299
+ sign = ">"
300
+ step_lower = step_current
301
+ if step_current == step_lower + 1:
302
+ step_current = step_lower
303
+ else:
304
+ step_current = int(rounding((step_lower + step_upper) / 2))
305
+ self.recompress(sign)
306
+ elif self.result:
307
+ return self.compress_done(self.result, self.result_step)
308
+ else:
309
+ return self.compress_fail()
310
+
311
+ def check_if_compatible(self) -> Optional[bytes]:
312
+ f_fmt = self.opt_comp.get_format()
313
+ if (
314
+ # Issue #260: Some webp file not accepted by Whatsapp
315
+ ".webp" not in f_fmt[0]
316
+ and ".webp" not in f_fmt[1]
317
+ and FormatVerify.check_format(
318
+ self.in_f,
319
+ fmt=f_fmt,
320
+ file_info=self.codec_info_orig,
321
+ )
322
+ and FormatVerify.check_file_res(
323
+ self.in_f, res=self.opt_comp.get_res(), file_info=self.codec_info_orig
324
+ )
325
+ and FormatVerify.check_file_fps(
326
+ self.in_f, fps=self.opt_comp.get_fps(), file_info=self.codec_info_orig
327
+ )
328
+ and FormatVerify.check_file_size(
329
+ self.in_f,
330
+ size=self.opt_comp.get_size_max(),
331
+ file_info=self.codec_info_orig,
332
+ )
333
+ and FormatVerify.check_file_duration(
334
+ self.in_f,
335
+ duration=self.opt_comp.get_duration(),
336
+ file_info=self.codec_info_orig,
337
+ )
338
+ ):
339
+ self.cb.put((self.MSG_SKIP_COMP.format(self.in_f_name, self.out_f_name)))
340
+
341
+ if isinstance(self.in_f, Path):
342
+ with open(self.in_f, "rb") as f:
343
+ result = f.read()
344
+ self.result_size = os.path.getsize(self.in_f)
345
+ else:
346
+ result = self.in_f
347
+ self.result_size = len(self.in_f)
348
+
349
+ return result
350
+
351
+ return None
352
+
353
+ def generate_steps_list(self) -> List[Tuple[Optional[int], ...]]:
354
+ steps_list: List[Tuple[Optional[int], ...]] = []
355
+ need_even = self.out_f.suffix in (".webm", ".mp4", ".mkv", ".webp")
356
+ for step in range(self.opt_comp.steps, -1, -1):
357
+ steps_list.append(
358
+ (
359
+ get_step_value(
360
+ self.opt_comp.res_w_max,
361
+ self.opt_comp.res_w_min,
362
+ step,
363
+ self.opt_comp.steps,
364
+ self.opt_comp.res_power,
365
+ need_even,
366
+ self.opt_comp.res_snap_pow2,
367
+ ),
368
+ get_step_value(
369
+ self.opt_comp.res_h_max,
370
+ self.opt_comp.res_h_min,
371
+ step,
372
+ self.opt_comp.steps,
373
+ self.opt_comp.res_power,
374
+ need_even,
375
+ self.opt_comp.res_snap_pow2,
376
+ ),
377
+ get_step_value(
378
+ self.opt_comp.quality_max,
379
+ self.opt_comp.quality_min,
380
+ step,
381
+ self.opt_comp.steps,
382
+ self.opt_comp.quality_power,
383
+ ),
384
+ get_step_value(
385
+ self.opt_comp.fps_max,
386
+ self.opt_comp.fps_min,
387
+ step,
388
+ self.opt_comp.steps,
389
+ self.opt_comp.fps_power,
390
+ ),
391
+ get_step_value(
392
+ self.opt_comp.color_max,
393
+ self.opt_comp.color_min,
394
+ step,
395
+ self.opt_comp.steps,
396
+ self.opt_comp.color_power,
397
+ ),
398
+ )
399
+ )
400
+
401
+ return steps_list
402
+
403
+ def recompress(self, sign: str) -> None:
404
+ msg = self.MSG_REDO_COMP.format(
405
+ sign, self.in_f_name, self.out_f_name, self.size, sign, self.size_max
406
+ )
407
+ self.cb.put(msg)
408
+
409
+ def compress_fail(
410
+ self,
411
+ ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
412
+ msg = self.MSG_FAIL_COMP.format(
413
+ self.in_f_name, self.out_f_name, self.size_max, self.size
414
+ )
415
+ self.cb.put(msg)
416
+
417
+ return False, self.in_f_path, self.out_f, self.size
418
+
419
+ def compress_done(
420
+ self, data: bytes, result_step: Optional[int] = None
421
+ ) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
422
+ out_f: Union[None, bytes, Path]
423
+
424
+ if self.out_f.stem == "none":
425
+ out_f = None
426
+ elif self.out_f.stem == "bytes":
427
+ out_f = data
428
+ else:
429
+ out_f = self.out_f
430
+ with open(self.out_f, "wb+") as f:
431
+ f.write(data)
432
+
433
+ if result_step is not None:
434
+ msg = self.MSG_DONE_COMP.format(
435
+ self.in_f_name, self.out_f_name, self.result_size, result_step
436
+ )
437
+ self.cb.put(msg)
438
+
439
+ return True, self.in_f_path, out_f, self.result_size
440
+
441
+ def frames_import(self) -> None:
442
+ if isinstance(self.in_f, Path):
443
+ suffix = self.in_f.suffix
444
+ else:
445
+ suffix = Path(self.in_f_name).suffix
446
+
447
+ if suffix in (".tgs", ".lottie", ".json"):
448
+ self._frames_import_lottie()
449
+ elif suffix in (".webp", ".apng", ".png", ".gif"):
450
+ # ffmpeg do not support webp decoding (yet)
451
+ # ffmpeg could fail to decode apng if file is buggy
452
+ self._frames_import_pillow()
453
+ elif suffix == ".svg":
454
+ self._frames_import_svg()
455
+ else:
456
+ self._frames_import_pyav()
457
+
458
+ def _frames_import_svg(self) -> None:
459
+ width = self.codec_info_orig.res[0]
460
+ height = self.codec_info_orig.res[1]
461
+
462
+ if RUNTIME_STATE.get("crd") is None:
463
+ chrome_path: Optional[str]
464
+ if self.opt_comp.chromium_path:
465
+ chrome_path = self.opt_comp.chromium_path
466
+ else:
467
+ chrome_path = CRD.get_chromium_path()
468
+ args = [
469
+ "--headless",
470
+ "--kiosk",
471
+ "--disable-extensions",
472
+ "--disable-infobars",
473
+ "--disable-gpu",
474
+ "--disable-gpu-rasterization",
475
+ "--hide-scrollbars",
476
+ "--force-device-scale-factor=1",
477
+ "about:blank",
478
+ ]
479
+ if chrome_path is None:
480
+ raise RuntimeError("[F] Chrome/Chromium required for importing svg")
481
+ self.cb.put(self.MSG_SVG_LONG)
482
+ RUNTIME_STATE["crd"] = CRD(chrome_path, args=args)
483
+ RUNTIME_STATE["crd"].connect(-1) # type: ignore
484
+
485
+ crd = cast(CRD, RUNTIME_STATE["crd"])
486
+ if isinstance(self.in_f, bytes):
487
+ svg = self.in_f.decode()
488
+ else:
489
+ with open(self.in_f) as f:
490
+ svg = f.read()
491
+ soup = BeautifulSoup(svg, "html.parser")
492
+ svg_tag = soup.find_all("svg")[0]
493
+
494
+ if svg_tag.get("width") is None:
495
+ svg_tag["width"] = width
496
+ if svg_tag.get("height") is None:
497
+ svg_tag["height"] = height
498
+ svg = str(soup)
499
+
500
+ crd.open_html_str(svg)
501
+ crd.set_transparent_bg()
502
+ init_js = 'svg = document.getElementsByTagName("svg")[0];'
503
+ if self.codec_info_orig.fps > 0:
504
+ init_js += "svg.pauseAnimations();"
505
+ init_js += "JSON.stringify(svg.getBoundingClientRect());"
506
+ bound = json.loads(
507
+ json.loads(crd.exec_js(init_js))["result"]["result"]["value"]
508
+ )
509
+ clip = {
510
+ "x": bound["x"],
511
+ "y": bound["y"],
512
+ "width": width,
513
+ "height": height,
514
+ "scale": 1,
515
+ }
516
+
517
+ if self.codec_info_orig.fps > 0:
518
+ for i in range(self.codec_info_orig.frames):
519
+ curr_time = (
520
+ i
521
+ / self.codec_info_orig.frames
522
+ * self.codec_info_orig.duration
523
+ / 1000
524
+ )
525
+ crd.exec_js(f"svg.setCurrentTime({curr_time})")
526
+ self.frames_raw.append(np.asarray(crd.screenshot(clip)))
527
+ else:
528
+ self.frames_raw.append(np.asarray(crd.screenshot(clip)))
529
+
530
+ def _frames_import_pillow(self) -> None:
531
+ with Image.open(self.in_f) as im:
532
+ # Note: im.convert("RGBA") would return rgba image of current frame only
533
+ if (
534
+ "n_frames" in dir(im)
535
+ and im.n_frames != 0
536
+ and self.codec_info_orig.fps != 0.0
537
+ ):
538
+ # Pillow is not reliable for getting webp frame durations
539
+ durations: Optional[List[int]]
540
+ if im.format == "WEBP":
541
+ _, _, _, durations = CodecInfo._get_file_fps_frames_duration_webp( # type: ignore
542
+ self.in_f
543
+ )
544
+ else:
545
+ durations = None
546
+
547
+ duration_ptr = 0.0
548
+ duration_inc = 1 / self.codec_info_orig.fps * 1000
549
+ frame = 0
550
+ if durations is None:
551
+ next_frame_start_duration = cast(int, im.info.get("duration", 1000))
552
+ else:
553
+ next_frame_start_duration = durations[0]
554
+ while True:
555
+ self.frames_raw.append(np.asarray(im.convert("RGBA")))
556
+ duration_ptr += duration_inc
557
+ if duration_ptr >= next_frame_start_duration:
558
+ frame += 1
559
+ if frame == im.n_frames:
560
+ break
561
+ im.seek(frame)
562
+
563
+ if durations is None:
564
+ next_frame_start_duration += cast(
565
+ int, im.info.get("duration", 1000)
566
+ )
567
+ else:
568
+ next_frame_start_duration += durations[frame]
569
+ else:
570
+ self.frames_raw.append(np.asarray(im.convert("RGBA")))
571
+
572
+ def _frames_import_pyav(self) -> None:
573
+ import av
574
+ from av.codec.context import CodecContext
575
+ from av.container.input import InputContainer
576
+ from av.video.codeccontext import VideoCodecContext
577
+ from av.video.frame import VideoFrame
578
+
579
+ # Crashes when handling some webm in yuv420p and convert to rgba
580
+ # https://github.com/PyAV-Org/PyAV/issues/1166
581
+ file: Union[BytesIO, str]
582
+ if isinstance(self.in_f, Path):
583
+ file = self.in_f.as_posix()
584
+ else:
585
+ file = BytesIO(self.in_f)
586
+ with av.open(file) as container:
587
+ container = cast(InputContainer, container)
588
+ context = container.streams.video[0].codec_context
589
+ if context.name == "vp8":
590
+ context = CodecContext.create("libvpx", "r")
591
+ elif context.name == "vp9":
592
+ context = cast(
593
+ VideoCodecContext, CodecContext.create("libvpx-vp9", "r")
594
+ )
595
+
596
+ for packet in container.demux(container.streams.video):
597
+ for frame in context.decode(packet):
598
+ width_orig = frame.width
599
+ height_orig = frame.height
600
+
601
+ # Need to pad frame to even dimension first
602
+ if width_orig % 2 == 1 or height_orig % 2 == 1:
603
+ from av.filter import Graph
604
+
605
+ width_new = width_orig + width_orig % 2
606
+ height_new = height_orig + height_orig % 2
607
+
608
+ graph = Graph()
609
+ in_src = graph.add_buffer(template=container.streams.video[0])
610
+ pad = graph.add(
611
+ "pad", f"{width_new}:{height_new}:0:0:color=#00000000"
612
+ )
613
+ in_src.link_to(pad)
614
+ sink = graph.add("buffersink")
615
+ pad.link_to(sink)
616
+ graph.configure()
617
+
618
+ graph.push(frame)
619
+ frame_resized = cast(VideoFrame, graph.pull())
620
+ else:
621
+ frame_resized = frame
622
+
623
+ # yuva420p may cause crash
624
+ # Not safe to directly call frame.to_ndarray(format="rgba")
625
+ # https://github.com/PyAV-Org/PyAV/discussions/1510
626
+ # if int(av.__version__.split(".")[0]) >= 14:
627
+ # rgba_array = frame_resized.to_ndarray(format="rgba")
628
+ if frame_resized.format.name == "yuv420p":
629
+ rgb_array = frame_resized.to_ndarray(format="rgb24")
630
+ rgba_array = np.dstack(
631
+ (
632
+ rgb_array,
633
+ cast(
634
+ np.ndarray[Any, np.dtype[np.uint8]],
635
+ np.zeros(rgb_array.shape[:2], dtype=np.uint8) + 255,
636
+ ),
637
+ )
638
+ )
639
+ else:
640
+ frame_resized = frame_resized.reformat(
641
+ format="yuva420p",
642
+ dst_colorspace=1,
643
+ )
644
+ rgba_array = yuva_to_rgba(frame_resized)
645
+
646
+ # Remove pixels that was added to make dimensions even
647
+ rgba_array = rgba_array[0:height_orig, 0:width_orig]
648
+ self.frames_raw.append(rgba_array)
649
+
650
+ def _frames_import_lottie(self) -> None:
651
+ from rlottie_python.rlottie_wrapper import LottieAnimation
652
+
653
+ if isinstance(self.in_f, Path):
654
+ suffix = self.in_f.suffix
655
+ else:
656
+ suffix = Path(self.in_f_name).suffix
657
+
658
+ if suffix == ".tgs":
659
+ if isinstance(self.in_f, Path):
660
+ anim = LottieAnimation.from_tgs(self.in_f.as_posix())
661
+ else:
662
+ import gzip
663
+
664
+ with gzip.open(BytesIO(self.in_f)) as f:
665
+ data = f.read().decode(encoding="utf-8")
666
+ anim = LottieAnimation.from_data(data)
667
+ else:
668
+ if isinstance(self.in_f, Path):
669
+ anim = LottieAnimation.from_file(self.in_f.as_posix())
670
+ else:
671
+ anim = LottieAnimation.from_data(self.in_f.decode("utf-8"))
672
+
673
+ for i in range(anim.lottie_animation_get_totalframe()):
674
+ frame = np.asarray(anim.render_pillow_frame(frame_num=i))
675
+ self.frames_raw.append(frame)
676
+
677
+ anim.lottie_animation_destroy()
678
+
679
+ def determine_bg_color(self) -> Tuple[int, int, int, int]:
680
+ mean_total = 0.0
681
+ # Calculate average color of all frames for selecting background color
682
+ for frame in self.frames_raw:
683
+ s = frame.shape
684
+ colors = frame.reshape((-1, s[2])) # type: ignore
685
+ # Do not count in alpha=0
686
+ # If alpha > 0, use alpha as weight
687
+ colors = colors[colors[:, 3] != 0]
688
+ if colors.shape[0] != 0:
689
+ alphas = colors[:, 3] / 255
690
+ r_mean = cast(float, np.mean(colors[:, 0] * alphas))
691
+ g_mean = cast(float, np.mean(colors[:, 1] * alphas))
692
+ b_mean = cast(float, np.mean(colors[:, 2] * alphas))
693
+ mean_total += (r_mean + g_mean + b_mean) / 3
694
+
695
+ if mean_total / len(self.frames_raw) < 128:
696
+ return (255, 255, 255, 0)
697
+ else:
698
+ return (0, 0, 0, 0)
699
+
700
+ def frames_resize(
701
+ self, frames_in: "List[np.ndarray[Any, Any]]"
702
+ ) -> "List[np.ndarray[Any, Any]]":
703
+ frames_out: "List[np.ndarray[Any, Any]]" = []
704
+
705
+ resample: Literal[0, 1, 2, 3, 4, 5]
706
+ if self.opt_comp.scale_filter == "nearest":
707
+ resample = Image.NEAREST
708
+ elif self.opt_comp.scale_filter == "box":
709
+ resample = Image.BOX
710
+ elif self.opt_comp.scale_filter == "bilinear":
711
+ resample = Image.BILINEAR
712
+ elif self.opt_comp.scale_filter == "hamming":
713
+ resample = Image.HAMMING
714
+ elif self.opt_comp.scale_filter == "bicubic":
715
+ resample = Image.BICUBIC
716
+ elif self.opt_comp.scale_filter == "lanczos":
717
+ resample = Image.LANCZOS
718
+ else:
719
+ resample = Image.BICUBIC
720
+
721
+ if self.bg_color is None:
722
+ self.bg_color = self.determine_bg_color()
723
+
724
+ for frame in frames_in:
725
+ with Image.fromarray(frame, "RGBA") as im: # type: ignore
726
+ width, height = im.size
727
+
728
+ if self.res_w is None:
729
+ self.res_w = width
730
+ if self.res_h is None:
731
+ self.res_h = height
732
+
733
+ scaling = 1 - (self.opt_comp.padding_percent / 100)
734
+ if width / self.res_w > height / self.res_h:
735
+ width_new = int(self.res_w * scaling)
736
+ height_new = int(height * self.res_w / width * scaling)
737
+ else:
738
+ height_new = int(self.res_h * scaling)
739
+ width_new = int(width * self.res_h / height * scaling)
740
+
741
+ with im.resize((width_new, height_new), resample=resample) as im_resized:
742
+ with Image.new(
743
+ "RGBA", (self.res_w, self.res_h), self.bg_color
744
+ ) as im_new:
745
+ im_new.alpha_composite(
746
+ im_resized,
747
+ ((self.res_w - width_new) // 2, (self.res_h - height_new) // 2),
748
+ )
749
+ frames_out.append(np.asarray(im_new))
750
+
751
+ return frames_out
752
+
753
+ def frames_drop(
754
+ self, frames_in: "List[np.ndarray[Any, Any]]"
755
+ ) -> "List[np.ndarray[Any, Any]]":
756
+ if (
757
+ not self.codec_info_orig.is_animated
758
+ or not self.fps
759
+ or len(self.frames_processed) == 1
760
+ ):
761
+ return [frames_in[0]]
762
+
763
+ frames_out: "List[np.ndarray[Any, Any]]" = []
764
+
765
+ # fps_ratio: 1 frame in new anim equal to how many frame in old anim
766
+ # speed_ratio: How much to speed up / slow down
767
+ fps_ratio = self.codec_info_orig.fps / self.fps
768
+ if (
769
+ self.opt_comp.duration_min
770
+ and self.codec_info_orig.duration < self.opt_comp.duration_min
771
+ ):
772
+ speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_min
773
+ elif (
774
+ self.opt_comp.duration_max
775
+ and self.codec_info_orig.duration > self.opt_comp.duration_max
776
+ ):
777
+ speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_max
778
+ else:
779
+ speed_ratio = 1
780
+
781
+ # How many frames to advance in original video for each frame of output video
782
+ frame_increment = fps_ratio * speed_ratio
783
+
784
+ frames_out_min = None
785
+ frames_out_max = None
786
+ if self.opt_comp.duration_min:
787
+ frames_out_min = ceil(self.fps * self.opt_comp.duration_min / 1000)
788
+ if self.opt_comp.duration_max:
789
+ frames_out_max = floor(self.fps * self.opt_comp.duration_max / 1000)
790
+
791
+ frame_current = 0
792
+ frame_current_float = 0.0
793
+ while True:
794
+ if frame_current <= len(frames_in) - 1 and not (
795
+ frames_out_max and len(frames_out) == frames_out_max
796
+ ):
797
+ frames_out.append(frames_in[frame_current])
798
+ else:
799
+ while len(frames_out) == 0 or (
800
+ frames_out_min and len(frames_out) < frames_out_min
801
+ ):
802
+ frames_out.append(frames_in[-1])
803
+ return frames_out
804
+ frame_current_float += frame_increment
805
+ frame_current = int(rounding(frame_current_float))
806
+
807
+ def frames_export(self) -> None:
808
+ is_animated = len(self.frames_processed) > 1 and self.fps
809
+ if self.out_f.suffix in (".apng", ".png"):
810
+ if is_animated:
811
+ self._frames_export_apng()
812
+ else:
813
+ self._frames_export_png()
814
+ elif self.out_f.suffix in (".gif", ".webp"):
815
+ self._frames_export_pil_anim()
816
+ elif self.out_f.suffix in (".webm", ".mp4", ".mkv") or is_animated:
817
+ self._frames_export_pyav()
818
+ else:
819
+ self._frames_export_pil()
820
+
821
+ def _check_dup(self) -> bool:
822
+ if len(self.frames_processed) == 1:
823
+ return False
824
+
825
+ prev_frame = self.frames_processed[0]
826
+ for frame in self.frames_processed[1:]:
827
+ if np.array_equal(frame, prev_frame):
828
+ return True
829
+ prev_frame = frame
830
+
831
+ return False
832
+
833
+ def _frames_export_pil(self) -> None:
834
+ with Image.fromarray(self.frames_processed[0]) as im: # type: ignore
835
+ im.save(
836
+ self.tmp_f,
837
+ format=self.out_f.suffix.replace(".", ""),
838
+ quality=self.quality,
839
+ )
840
+
841
+ def _frames_export_pyav(self) -> None:
842
+ import av
843
+ from av.video.stream import VideoStream
844
+
845
+ options_container: Dict[str, str] = {}
846
+ options_stream: Dict[str, str] = {}
847
+
848
+ if isinstance(self.quality, int):
849
+ # Seems not actually working
850
+ options_stream["quality"] = str(self.quality)
851
+ options_stream["lossless"] = "0"
852
+
853
+ if self.out_f.suffix in (".apng", ".png"):
854
+ codec = "apng"
855
+ pixel_format = "rgba"
856
+ options_stream["plays"] = "0"
857
+ elif self.out_f.suffix in (".webm", ".mkv"):
858
+ codec = "libvpx-vp9"
859
+ pixel_format = "yuva420p"
860
+ options_stream["loop"] = "0"
861
+ elif self.out_f.suffix == ".webp":
862
+ codec = "webp"
863
+ pixel_format = "yuva420p"
864
+ options_container["loop"] = "0"
865
+ else:
866
+ codec = "libvpx-vp9"
867
+ pixel_format = "yuv420p"
868
+ options_stream["loop"] = "0"
869
+
870
+ with av.open(
871
+ self.tmp_f,
872
+ "w",
873
+ format=self.out_f.suffix.replace(".", ""),
874
+ options=options_container,
875
+ ) as output:
876
+ out_stream = output.add_stream(codec, rate=self.fps, options=options_stream) # type: ignore
877
+ out_stream = cast(VideoStream, out_stream)
878
+ assert isinstance(self.res_w, int) and isinstance(self.res_h, int)
879
+ out_stream.width = self.res_w
880
+ out_stream.height = self.res_h
881
+ out_stream.pix_fmt = pixel_format
882
+
883
+ for frame in self.frames_processed:
884
+ av_frame = av.VideoFrame.from_ndarray(frame, format="rgba")
885
+ output.mux(out_stream.encode(av_frame))
886
+ output.mux(out_stream.encode())
887
+
888
+ def _frames_export_pil_anim(self) -> None:
889
+ extra_kwargs: Dict[str, Any] = {}
890
+
891
+ # disposal=2 on gif cause flicker in image with transparency
892
+ # Occurs in Pillow == 10.2.0
893
+ # https://github.com/python-pillow/Pillow/issues/7787
894
+ if PillowVersion == "10.2.0":
895
+ extra_kwargs["optimize"] = False
896
+ else:
897
+ extra_kwargs["optimize"] = True
898
+
899
+ if self.out_f.suffix == ".gif":
900
+ # GIF can only have one alpha color
901
+ # Change lowest alpha to alpha=0
902
+ # Only keep alpha=0 and alpha=255, nothing in between
903
+ extra_kwargs["format"] = "GIF"
904
+ frames_processed = np.array(self.frames_processed)
905
+ alpha = frames_processed[:, :, :, 3]
906
+ alpha_min = np.min(alpha) # type: ignore
907
+ if alpha_min < 255:
908
+ alpha[alpha > alpha_min] = 255
909
+ alpha[alpha == alpha_min] = 0
910
+
911
+ if 0 in alpha:
912
+ extra_kwargs["transparency"] = 0
913
+ extra_kwargs["disposal"] = 2
914
+ im_out = [self.quantize(Image.fromarray(i)) for i in frames_processed] # type: ignore
915
+ else:
916
+ im_out = [
917
+ self.quantize(Image.fromarray(i).convert("RGB")).convert("RGB") # type: ignore
918
+ for i in frames_processed
919
+ ]
920
+ elif self.out_f.suffix == ".webp":
921
+ im_out = [Image.fromarray(i) for i in self.frames_processed] # type: ignore
922
+ extra_kwargs["format"] = "WebP"
923
+ extra_kwargs["allow_mixed"] = True
924
+ extra_kwargs["kmax"] = (
925
+ 1 # Keyframe every frame, otherwise black lines artifact can appear
926
+ )
927
+ if self.quality:
928
+ if self.quality < 20:
929
+ extra_kwargs["minimize_size"] = True
930
+ extra_kwargs["method"] = 4 + int(2 * (100 - self.quality) / 100)
931
+ extra_kwargs["alpha_quality"] = self.quality
932
+ else:
933
+ raise RuntimeError(f"Invalid format {self.out_f.suffix}")
934
+
935
+ if self.fps:
936
+ extra_kwargs["save_all"] = True
937
+ extra_kwargs["append_images"] = im_out[1:]
938
+ extra_kwargs["duration"] = int(1000 / self.fps)
939
+ extra_kwargs["loop"] = 0
940
+
941
+ im_out[0].save(
942
+ self.tmp_f,
943
+ quality=self.quality,
944
+ **extra_kwargs,
945
+ )
946
+
947
+ def _frames_export_png(self) -> None:
948
+ with Image.fromarray(self.frames_processed[0], "RGBA") as image: # type: ignore
949
+ image_quant = self.quantize(image)
950
+
951
+ with BytesIO() as f:
952
+ image_quant.save(f, format="png")
953
+ f.seek(0)
954
+ frame_optimized = self.optimize_png(f.read())
955
+ self.tmp_f.write(frame_optimized)
956
+
957
+ def _frames_export_apng(self) -> None:
958
+ from apngasm_python._apngasm_python import APNGAsm, create_frame_from_rgb, create_frame_from_rgba # type: ignore
959
+
960
+ assert self.fps
961
+ assert self.res_h
962
+
963
+ frames_concat = np.concatenate(self.frames_processed)
964
+ with Image.fromarray(frames_concat, "RGBA") as image_concat: # type: ignore
965
+ if image_concat.getextrema()[3][0] < 255: # type: ignore
966
+ mode = "RGBA"
967
+ create_frame_method = create_frame_from_rgba
968
+ else:
969
+ mode = "RGB"
970
+ create_frame_method = create_frame_from_rgb
971
+ image_quant = self.quantize(image_concat)
972
+
973
+ if self.apngasm is None:
974
+ self.apngasm = APNGAsm() # type: ignore
975
+ assert isinstance(self.apngasm, APNGAsm)
976
+
977
+ delay_num = int(1000 / self.fps)
978
+ for i in range(0, image_quant.height, self.res_h):
979
+ crop_dimension = (0, i, image_quant.width, i + self.res_h)
980
+ image_cropped = image_quant.crop(crop_dimension)
981
+ image_final = image_cropped.convert(mode)
982
+ frame_final = create_frame_method(
983
+ np.array(image_final),
984
+ width=image_final.width,
985
+ height=image_final.height,
986
+ delay_num=delay_num,
987
+ delay_den=1000,
988
+ )
989
+ self.apngasm.add_frame(frame_final)
990
+
991
+ with CacheStore.get_cache_store(path=self.opt_comp.cache_dir) as tempdir:
992
+ tmp_apng = Path(tempdir, f"out{self.out_f.suffix}")
993
+ self.apngasm.assemble(tmp_apng.as_posix())
994
+
995
+ with open(tmp_apng, "rb") as f:
996
+ apng_optimized = self.optimize_png(f.read())
997
+ self.tmp_f.write(apng_optimized)
998
+
999
+ self.apngasm.reset()
1000
+
1001
+ def optimize_png(self, image_bytes: bytes) -> bytes:
1002
+ import oxipng
1003
+
1004
+ return oxipng.optimize_from_memory(
1005
+ image_bytes,
1006
+ level=6,
1007
+ fix_errors=True,
1008
+ filter=[oxipng.RowFilter.Brute],
1009
+ optimize_alpha=True,
1010
+ strip=oxipng.StripChunks.safe(),
1011
+ )
1012
+
1013
+ def quantize(self, image: Image.Image) -> Image.Image:
1014
+ if not (self.color and self.color <= 256):
1015
+ return image.copy()
1016
+ if self.opt_comp.quantize_method == "imagequant":
1017
+ return self._quantize_by_imagequant(image)
1018
+ if self.opt_comp.quantize_method in ("mediancut", "maxcoverage", "fastoctree"):
1019
+ return self._quantize_by_pillow(image)
1020
+
1021
+ return image
1022
+
1023
+ def _quantize_by_imagequant(self, image: Image.Image) -> Image.Image:
1024
+ import imagequant # type: ignore
1025
+
1026
+ assert isinstance(self.quality, int)
1027
+ assert isinstance(self.opt_comp.quality_min, int)
1028
+ assert isinstance(self.opt_comp.quality_max, int)
1029
+ assert isinstance(self.color, int)
1030
+
1031
+ dither = 1 - (self.quality - self.opt_comp.quality_min) / (
1032
+ self.opt_comp.quality_max - self.opt_comp.quality_min
1033
+ )
1034
+ image_quant = None
1035
+ for i in range(self.quality, 101, 5):
1036
+ try:
1037
+ image_quant = imagequant.quantize_pil_image( # type: ignore
1038
+ image,
1039
+ dithering_level=dither,
1040
+ max_colors=self.color,
1041
+ min_quality=self.opt_comp.quality_min,
1042
+ max_quality=i,
1043
+ )
1044
+ return image_quant
1045
+ except RuntimeError:
1046
+ pass
1047
+
1048
+ return image
1049
+
1050
+ def _quantize_by_pillow(self, image: Image.Image) -> Image.Image:
1051
+ assert self.color
1052
+
1053
+ if image.mode == "RGBA" and self.opt_comp.quantize_method in (
1054
+ "mediancut",
1055
+ "maxcoverage",
1056
+ ):
1057
+ self.cb.put(self.MSG_QUANT_NO_ALPHA.format(self.opt_comp.quantize_method))
1058
+ method = Image.Quantize.FASTOCTREE
1059
+ elif self.opt_comp.quantize_method == "mediancut":
1060
+ method = Image.Quantize.MEDIANCUT
1061
+ elif self.opt_comp.quantize_method == "maxcoverage":
1062
+ method = Image.Quantize.MAXCOVERAGE
1063
+ else:
1064
+ method = Image.Quantize.FASTOCTREE
1065
+ return image.quantize(colors=self.color, method=method)
1066
+
1067
+ def fix_fps(self, fps: float) -> Fraction:
1068
+ # After rounding fps/duration during export,
1069
+ # Video duration may exceed limit.
1070
+ # Hence we need to 'fix' the fps
1071
+ if self.out_f.suffix == ".gif":
1072
+ # Quote from https://www.w3.org/Graphics/GIF/spec-gif89a.txt
1073
+ # vii) Delay Time - If not 0, this field specifies
1074
+ # the number of hundredths (1/100) of a second
1075
+ #
1076
+ # For GIF, we need to adjust fps such that delay is matching to hundreths of second
1077
+ return self._fix_fps_duration(fps, 100)
1078
+ if self.out_f.suffix in (".webp", ".apng", ".png"):
1079
+ return self._fix_fps_duration(fps, 1000)
1080
+
1081
+ return self._fix_fps_pyav(fps)
1082
+
1083
+ def _fix_fps_duration(self, fps: float, denominator: int) -> Fraction:
1084
+ delay = int(rounding(denominator / fps))
1085
+ fps_fraction = Fraction(denominator, delay)
1086
+ if self.opt_comp.fps_max and fps_fraction > self.opt_comp.fps_max:
1087
+ return Fraction(denominator, (delay + 1))
1088
+ if self.opt_comp.fps_min and fps_fraction < self.opt_comp.fps_min:
1089
+ return Fraction(denominator, (delay - 1))
1090
+ return fps_fraction
1091
+
1092
+ def _fix_fps_pyav(self, fps: float) -> Fraction:
1093
+ return Fraction(rounding(fps))