sticker-convert 2.13.3.0__py3-none-any.whl → 2.17.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sticker_convert/__main__.py +24 -27
- sticker_convert/auth/__init__.py +0 -0
- sticker_convert/auth/auth_base.py +19 -0
- sticker_convert/{utils/auth/get_discord_auth.py → auth/auth_discord.py} +149 -118
- sticker_convert/{utils/auth/get_kakao_auth.py → auth/auth_kakao_android_login.py} +331 -330
- sticker_convert/auth/auth_kakao_desktop_login.py +327 -0
- sticker_convert/{utils/auth/get_kakao_desktop_auth.py → auth/auth_kakao_desktop_memdump.py} +281 -263
- sticker_convert/{utils/auth/get_line_auth.py → auth/auth_line.py} +98 -80
- sticker_convert/{utils/auth/get_signal_auth.py → auth/auth_signal.py} +139 -135
- sticker_convert/auth/auth_telethon.py +161 -0
- sticker_convert/{utils/auth/get_viber_auth.py → auth/auth_viber.py} +250 -235
- sticker_convert/{utils/auth → auth}/telegram_api.py +736 -675
- sticker_convert/cli.py +623 -608
- sticker_convert/converter.py +1093 -1084
- sticker_convert/definitions.py +4 -0
- sticker_convert/downloaders/download_band.py +111 -110
- sticker_convert/downloaders/download_base.py +171 -166
- sticker_convert/downloaders/download_discord.py +92 -91
- sticker_convert/downloaders/download_kakao.py +417 -404
- sticker_convert/downloaders/download_line.py +484 -475
- sticker_convert/downloaders/download_ogq.py +80 -79
- sticker_convert/downloaders/download_signal.py +108 -105
- sticker_convert/downloaders/download_telegram.py +56 -55
- sticker_convert/downloaders/download_viber.py +121 -120
- sticker_convert/gui.py +788 -873
- sticker_convert/gui_components/frames/comp_frame.py +180 -166
- sticker_convert/gui_components/frames/config_frame.py +156 -113
- sticker_convert/gui_components/frames/control_frame.py +32 -30
- sticker_convert/gui_components/frames/cred_frame.py +232 -233
- sticker_convert/gui_components/frames/input_frame.py +139 -137
- sticker_convert/gui_components/frames/output_frame.py +112 -110
- sticker_convert/gui_components/frames/right_clicker.py +25 -23
- sticker_convert/gui_components/windows/advanced_compression_window.py +757 -757
- sticker_convert/gui_components/windows/base_window.py +7 -2
- sticker_convert/gui_components/windows/discord_get_auth_window.py +79 -82
- sticker_convert/gui_components/windows/kakao_get_auth_window.py +511 -321
- sticker_convert/gui_components/windows/line_get_auth_window.py +94 -102
- sticker_convert/gui_components/windows/signal_get_auth_window.py +84 -89
- sticker_convert/gui_components/windows/viber_get_auth_window.py +168 -168
- sticker_convert/ios-message-stickers-template/.github/FUNDING.yml +3 -3
- sticker_convert/ios-message-stickers-template/README.md +10 -10
- sticker_convert/ios-message-stickers-template/stickers/Info.plist +43 -43
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Info.plist +31 -31
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Contents.json +6 -6
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Contents.json +20 -20
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 1.sticker/Contents.json +9 -9
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 2.sticker/Contents.json +9 -9
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/Sticker Pack.stickerpack/Sticker 3.sticker/Contents.json +9 -9
- sticker_convert/ios-message-stickers-template/stickers StickerPackExtension/Stickers.xcstickers/iMessage App Icon.stickersiconset/Contents.json +91 -91
- sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.pbxproj +364 -364
- sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -7
- sticker_convert/ios-message-stickers-template/stickers.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -8
- sticker_convert/ios-message-stickers-template/stickers.xcodeproj/xcuserdata/niklaspeterson.xcuserdatad/xcschemes/xcschememanagement.plist +14 -14
- sticker_convert/job.py +166 -130
- sticker_convert/job_option.py +1 -0
- sticker_convert/locales/en_US/LC_MESSAGES/base.mo +0 -0
- sticker_convert/locales/ja_JP/LC_MESSAGES/base.mo +0 -0
- sticker_convert/locales/zh_CN/LC_MESSAGES/base.mo +0 -0
- sticker_convert/locales/zh_TW/LC_MESSAGES/base.mo +0 -0
- sticker_convert/py.typed +0 -0
- sticker_convert/resources/NotoColorEmoji.ttf +0 -0
- sticker_convert/resources/help.ja_JP.json +88 -0
- sticker_convert/resources/help.json +10 -7
- sticker_convert/resources/help.zh_CN.json +88 -0
- sticker_convert/resources/help.zh_TW.json +88 -0
- sticker_convert/resources/input.ja_JP.json +74 -0
- sticker_convert/resources/input.json +121 -121
- sticker_convert/resources/input.zh_CN.json +74 -0
- sticker_convert/resources/input.zh_TW.json +74 -0
- sticker_convert/resources/output.ja_JP.json +38 -0
- sticker_convert/resources/output.zh_CN.json +38 -0
- sticker_convert/resources/output.zh_TW.json +38 -0
- sticker_convert/uploaders/compress_wastickers.py +186 -177
- sticker_convert/uploaders/upload_base.py +44 -35
- sticker_convert/uploaders/upload_signal.py +218 -203
- sticker_convert/uploaders/upload_telegram.py +353 -338
- sticker_convert/uploaders/upload_viber.py +178 -169
- sticker_convert/uploaders/xcode_imessage.py +295 -286
- sticker_convert/utils/callback.py +238 -6
- sticker_convert/utils/emoji.py +16 -4
- sticker_convert/utils/files/json_resources_loader.py +24 -19
- sticker_convert/utils/files/metadata_handler.py +3 -3
- sticker_convert/utils/translate.py +108 -0
- sticker_convert/utils/url_detect.py +40 -37
- sticker_convert/version.py +1 -1
- {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/METADATA +89 -74
- {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/RECORD +91 -74
- sticker_convert/utils/auth/telethon_setup.py +0 -97
- sticker_convert/utils/singletons.py +0 -18
- {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/WHEEL +0 -0
- {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/entry_points.txt +0 -0
- {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/licenses/LICENSE +0 -0
- {sticker_convert-2.13.3.0.dist-info → sticker_convert-2.17.0.0.dist-info}/top_level.txt +0 -0
sticker_convert/converter.py
CHANGED
|
@@ -1,1084 +1,1093 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
import json
|
|
3
|
-
import os
|
|
4
|
-
from fractions import Fraction
|
|
5
|
-
from io import BytesIO
|
|
6
|
-
from math import ceil, floor, log2
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
|
|
9
|
-
|
|
10
|
-
import numpy as np
|
|
11
|
-
from bs4 import BeautifulSoup
|
|
12
|
-
from PIL import Image
|
|
13
|
-
from PIL import __version__ as PillowVersion
|
|
14
|
-
from PIL import features
|
|
15
|
-
|
|
16
|
-
from sticker_convert.
|
|
17
|
-
from sticker_convert.
|
|
18
|
-
from sticker_convert.utils.
|
|
19
|
-
from sticker_convert.utils.
|
|
20
|
-
from sticker_convert.utils.
|
|
21
|
-
from sticker_convert.utils.media.
|
|
22
|
-
from sticker_convert.utils.
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
]
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
if
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
) -> "np.ndarray[Any, Any]":
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
self.
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
self.
|
|
184
|
-
|
|
185
|
-
self.
|
|
186
|
-
self.
|
|
187
|
-
self.
|
|
188
|
-
|
|
189
|
-
self.
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
self.
|
|
195
|
-
self.
|
|
196
|
-
self.
|
|
197
|
-
self.
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
self.
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
self.
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
self.
|
|
256
|
-
self.
|
|
257
|
-
|
|
258
|
-
self.
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
)
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
self.
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
and FormatVerify.
|
|
318
|
-
self.in_f,
|
|
319
|
-
|
|
320
|
-
file_info=self.codec_info_orig,
|
|
321
|
-
)
|
|
322
|
-
and FormatVerify.
|
|
323
|
-
self.in_f,
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
(
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
self.opt_comp.
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
self.opt_comp.
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
self.opt_comp.
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
self.opt_comp.
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
self.opt_comp.
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
self.opt_comp.
|
|
382
|
-
self.opt_comp.
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
self.opt_comp.
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
self.
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
self.
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
self.
|
|
444
|
-
else:
|
|
445
|
-
self.
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
frame
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
anim = LottieAnimation.
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
if self.
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
self.
|
|
759
|
-
|
|
760
|
-
):
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
frame_current
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
]
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
|
|
1027
|
-
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
|
|
1033
|
-
|
|
1034
|
-
|
|
1035
|
-
|
|
1036
|
-
|
|
1037
|
-
|
|
1038
|
-
|
|
1039
|
-
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1049
|
-
|
|
1050
|
-
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
return
|
|
1082
|
-
|
|
1083
|
-
def
|
|
1084
|
-
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
from fractions import Fraction
|
|
5
|
+
from io import BytesIO
|
|
6
|
+
from math import ceil, floor, log2
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple, Union, cast
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
from bs4 import BeautifulSoup
|
|
12
|
+
from PIL import Image
|
|
13
|
+
from PIL import __version__ as PillowVersion
|
|
14
|
+
from PIL import features
|
|
15
|
+
|
|
16
|
+
from sticker_convert.definitions import RUNTIME_STATE
|
|
17
|
+
from sticker_convert.job_option import CompOption
|
|
18
|
+
from sticker_convert.utils.callback import CallbackProtocol, CallbackReturn
|
|
19
|
+
from sticker_convert.utils.chrome_remotedebug import CRD
|
|
20
|
+
from sticker_convert.utils.files.cache_store import CacheStore
|
|
21
|
+
from sticker_convert.utils.media.codec_info import CodecInfo, rounding
|
|
22
|
+
from sticker_convert.utils.media.format_verify import FormatVerify
|
|
23
|
+
from sticker_convert.utils.translate import get_translator
|
|
24
|
+
|
|
25
|
+
I = get_translator() # noqa: E741
|
|
26
|
+
|
|
27
|
+
if TYPE_CHECKING:
|
|
28
|
+
from av.video.frame import VideoFrame
|
|
29
|
+
from av.video.plane import VideoPlane
|
|
30
|
+
|
|
31
|
+
YUV_RGB_MATRIX = np.array(
|
|
32
|
+
[
|
|
33
|
+
[1.164, 0.000, 1.793],
|
|
34
|
+
[1.164, -0.213, -0.533],
|
|
35
|
+
[1.164, 2.112, 0.000],
|
|
36
|
+
]
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
# Whether animated WebP is supported
|
|
40
|
+
# See https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#saving-sequences
|
|
41
|
+
PIL_WEBP_ANIM = cast(bool, features.check("webp_anim")) # type: ignore
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_step_value(
|
|
45
|
+
max_step: Optional[int],
|
|
46
|
+
min_step: Optional[int],
|
|
47
|
+
step: int,
|
|
48
|
+
steps: int,
|
|
49
|
+
power: float = 1.0,
|
|
50
|
+
even: bool = False,
|
|
51
|
+
snap_pow2: bool = False,
|
|
52
|
+
) -> Optional[int]:
|
|
53
|
+
# Power should be between -1 and positive infinity
|
|
54
|
+
# Smaller power = More 'importance' of the parameter
|
|
55
|
+
# Power of 1 is linear relationship
|
|
56
|
+
# e.g. fps has lower power -> Try not to reduce it early on
|
|
57
|
+
|
|
58
|
+
if step > 0:
|
|
59
|
+
factor = pow(step / steps, power)
|
|
60
|
+
else:
|
|
61
|
+
factor = 0
|
|
62
|
+
|
|
63
|
+
if max_step is not None and min_step is not None:
|
|
64
|
+
v = round((max_step - min_step) * step / steps * factor + min_step)
|
|
65
|
+
if snap_pow2 is True and floor(log2(max_step)) >= ceil(log2(min_step)):
|
|
66
|
+
lower_exp = max(floor(log2(v)), ceil(log2(min_step)))
|
|
67
|
+
lower_pow2 = 2**lower_exp
|
|
68
|
+
upper_exp = min(ceil(log2(v)), floor(log2(max_step)))
|
|
69
|
+
upper_pow2 = 2**upper_exp
|
|
70
|
+
if abs(v - lower_pow2) <= abs(v - upper_pow2):
|
|
71
|
+
return lower_pow2
|
|
72
|
+
else:
|
|
73
|
+
return upper_pow2
|
|
74
|
+
if even is True and v % 2 == 1:
|
|
75
|
+
return v + 1
|
|
76
|
+
return v
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def useful_array(
|
|
81
|
+
plane: "VideoPlane", bytes_per_pixel: int = 1, dtype: str = "uint8"
|
|
82
|
+
) -> "np.ndarray[Any, Any]":
|
|
83
|
+
total_line_size = abs(plane.line_size)
|
|
84
|
+
useful_line_size = plane.width * bytes_per_pixel
|
|
85
|
+
arr: "np.ndarray[Any, Any]" = np.frombuffer(cast(bytes, plane), np.uint8)
|
|
86
|
+
if total_line_size != useful_line_size:
|
|
87
|
+
arr = arr.reshape(-1, total_line_size)[:, 0:useful_line_size].reshape(-1)
|
|
88
|
+
return arr.view(np.dtype(dtype))
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def yuva_to_rgba(frame: "VideoFrame") -> "np.ndarray[Any, Any]":
|
|
92
|
+
# https://stackoverflow.com/questions/72308308/converting-yuv-to-rgb-in-python-coefficients-work-with-array-dont-work-with-n
|
|
93
|
+
|
|
94
|
+
width = frame.width
|
|
95
|
+
height = frame.height
|
|
96
|
+
|
|
97
|
+
y = useful_array(frame.planes[0]).reshape(height, width)
|
|
98
|
+
u = useful_array(frame.planes[1]).reshape(height // 2, width // 2)
|
|
99
|
+
v = useful_array(frame.planes[2]).reshape(height // 2, width // 2)
|
|
100
|
+
a = useful_array(frame.planes[3]).reshape(height, width)
|
|
101
|
+
|
|
102
|
+
u = u.repeat(2, axis=0).repeat(2, axis=1) # type: ignore
|
|
103
|
+
v = v.repeat(2, axis=0).repeat(2, axis=1) # type: ignore
|
|
104
|
+
|
|
105
|
+
y = y.reshape((y.shape[0], y.shape[1], 1)) # type: ignore
|
|
106
|
+
u = u.reshape((u.shape[0], u.shape[1], 1)) # type: ignore
|
|
107
|
+
v = v.reshape((v.shape[0], v.shape[1], 1)) # type: ignore
|
|
108
|
+
a = a.reshape((a.shape[0], a.shape[1], 1)) # type: ignore
|
|
109
|
+
|
|
110
|
+
yuv_array = np.concatenate((y, u, v), axis=2)
|
|
111
|
+
|
|
112
|
+
yuv_array = yuv_array.astype(np.float32)
|
|
113
|
+
yuv_array[:, :, 0] = (
|
|
114
|
+
yuv_array[:, :, 0].clip(16, 235).astype(yuv_array.dtype) - 16 # type: ignore
|
|
115
|
+
)
|
|
116
|
+
yuv_array[:, :, 1:] = (
|
|
117
|
+
yuv_array[:, :, 1:].clip(16, 240).astype(yuv_array.dtype) - 128 # type: ignore
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
rgb_array = np.matmul(yuv_array, YUV_RGB_MATRIX.T).clip(0, 255).astype("uint8")
|
|
121
|
+
|
|
122
|
+
return np.concatenate((rgb_array, a), axis=2)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
class StickerConvert:
|
|
126
|
+
def __init__(
|
|
127
|
+
self,
|
|
128
|
+
in_f: Union[Path, Tuple[Path, bytes]],
|
|
129
|
+
out_f: Path,
|
|
130
|
+
opt_comp: CompOption,
|
|
131
|
+
cb: CallbackProtocol,
|
|
132
|
+
# cb_return: CallbackReturn
|
|
133
|
+
) -> None:
|
|
134
|
+
self.MSG_START_COMP = I("[I] Start compressing {} -> {}")
|
|
135
|
+
self.MSG_SKIP_COMP = I(
|
|
136
|
+
"[S] Compatible file found, skip compress and just copy {} -> {}"
|
|
137
|
+
)
|
|
138
|
+
self.MSG_COMP = I(
|
|
139
|
+
"[C] Compressing {} -> {} res={}x{}, quality={}, fps={}, color={} (step {}-{}-{})"
|
|
140
|
+
)
|
|
141
|
+
self.MSG_REDO_COMP = I(
|
|
142
|
+
"[{}] Compressed {} -> {} but size {} {} limit {}, recompressing"
|
|
143
|
+
)
|
|
144
|
+
self.MSG_DONE_COMP = I("[S] Successful compression {} -> {} size {} (step {})")
|
|
145
|
+
self.MSG_FAIL_COMP = I(
|
|
146
|
+
"[F] Failed Compression {} -> {}, "
|
|
147
|
+
"cannot get below limit {} with lowest quality under current settings (Best size: {})"
|
|
148
|
+
)
|
|
149
|
+
self.MSG_QUANT_NO_ALPHA = I(
|
|
150
|
+
"[W] {} does not support RGBA, defaulted to fastoctree quantization"
|
|
151
|
+
)
|
|
152
|
+
self.MSG_SVG_LONG = I("[W] Importing SVG takes long time")
|
|
153
|
+
|
|
154
|
+
self.in_f: Union[bytes, Path]
|
|
155
|
+
if isinstance(in_f, Path):
|
|
156
|
+
self.in_f = in_f
|
|
157
|
+
self.in_f_name = self.in_f.name
|
|
158
|
+
self.in_f_path = in_f
|
|
159
|
+
self.codec_info_orig = CodecInfo(self.in_f)
|
|
160
|
+
else:
|
|
161
|
+
self.in_f = in_f[1]
|
|
162
|
+
self.in_f_name = Path(in_f[0]).name
|
|
163
|
+
self.in_f_path = in_f[0]
|
|
164
|
+
self.codec_info_orig = CodecInfo(in_f[1], Path(in_f[0]).suffix)
|
|
165
|
+
|
|
166
|
+
valid_formats: List[str] = []
|
|
167
|
+
for i in opt_comp.get_format():
|
|
168
|
+
valid_formats.extend(i)
|
|
169
|
+
|
|
170
|
+
valid_ext = False
|
|
171
|
+
self.out_f = Path()
|
|
172
|
+
if len(valid_formats) == 0 or Path(out_f).suffix in valid_formats:
|
|
173
|
+
self.out_f = Path(out_f)
|
|
174
|
+
valid_ext = True
|
|
175
|
+
|
|
176
|
+
if not valid_ext:
|
|
177
|
+
if self.codec_info_orig.is_animated or opt_comp.fake_vid:
|
|
178
|
+
ext = opt_comp.format_vid[0]
|
|
179
|
+
else:
|
|
180
|
+
ext = opt_comp.format_img[0]
|
|
181
|
+
self.out_f = out_f.with_suffix(ext)
|
|
182
|
+
|
|
183
|
+
self.out_f_name: str = self.out_f.name
|
|
184
|
+
|
|
185
|
+
self.cb = cb
|
|
186
|
+
self.frames_raw: "List[np.ndarray[Any, Any]]" = []
|
|
187
|
+
self.frames_processed: "List[np.ndarray[Any, Any]]" = []
|
|
188
|
+
self.opt_comp: CompOption = opt_comp
|
|
189
|
+
if not self.opt_comp.steps:
|
|
190
|
+
self.opt_comp.steps = 1
|
|
191
|
+
|
|
192
|
+
self.size: int = 0
|
|
193
|
+
self.size_max: Optional[int] = None
|
|
194
|
+
self.res_w: Optional[int] = None
|
|
195
|
+
self.res_h: Optional[int] = None
|
|
196
|
+
self.quality: Optional[int] = None
|
|
197
|
+
self.fps: Optional[Fraction] = None
|
|
198
|
+
self.color: Optional[int] = None
|
|
199
|
+
|
|
200
|
+
self.bg_color: Optional[Tuple[int, int, int, int]] = None
|
|
201
|
+
if self.opt_comp.bg_color:
|
|
202
|
+
r, g, b, a = bytes.fromhex(self.opt_comp.bg_color)
|
|
203
|
+
self.bg_color = (r, g, b, a)
|
|
204
|
+
|
|
205
|
+
self.tmp_f: BytesIO = BytesIO()
|
|
206
|
+
self.result: Optional[bytes] = None
|
|
207
|
+
self.result_size: int = 0
|
|
208
|
+
self.result_step: Optional[int] = None
|
|
209
|
+
|
|
210
|
+
self.apngasm = None
|
|
211
|
+
|
|
212
|
+
@staticmethod
|
|
213
|
+
def convert(
|
|
214
|
+
in_f: Union[Path, Tuple[Path, bytes]],
|
|
215
|
+
out_f: Path,
|
|
216
|
+
opt_comp: CompOption,
|
|
217
|
+
cb: CallbackProtocol,
|
|
218
|
+
_cb_return: CallbackReturn,
|
|
219
|
+
) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
|
|
220
|
+
sticker = StickerConvert(in_f, out_f, opt_comp, cb)
|
|
221
|
+
result = sticker._convert()
|
|
222
|
+
cb.put("update_bar")
|
|
223
|
+
return result
|
|
224
|
+
|
|
225
|
+
def _convert(self) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
|
|
226
|
+
result = self.check_if_compatible()
|
|
227
|
+
if result:
|
|
228
|
+
return self.compress_done(result)
|
|
229
|
+
|
|
230
|
+
self.cb.put((self.MSG_START_COMP.format(self.in_f_name, self.out_f_name)))
|
|
231
|
+
|
|
232
|
+
steps_list = self.generate_steps_list()
|
|
233
|
+
|
|
234
|
+
step_lower = 0
|
|
235
|
+
step_upper = self.opt_comp.steps
|
|
236
|
+
|
|
237
|
+
if self.codec_info_orig.is_animated is True:
|
|
238
|
+
self.size_max = self.opt_comp.size_max_vid
|
|
239
|
+
else:
|
|
240
|
+
self.size_max = self.opt_comp.size_max_img
|
|
241
|
+
|
|
242
|
+
if self.size_max in (None, 0):
|
|
243
|
+
# No limit to size, create the best quality result
|
|
244
|
+
step_current = 0
|
|
245
|
+
else:
|
|
246
|
+
step_current = int(rounding((step_lower + step_upper) / 2))
|
|
247
|
+
|
|
248
|
+
self.frames_import()
|
|
249
|
+
while True:
|
|
250
|
+
param = steps_list[step_current]
|
|
251
|
+
self.res_w = param[0]
|
|
252
|
+
self.res_h = param[1]
|
|
253
|
+
self.quality = param[2]
|
|
254
|
+
if param[3] and self.codec_info_orig.fps:
|
|
255
|
+
fps_tmp = min(param[3], self.codec_info_orig.fps)
|
|
256
|
+
self.fps = self.fix_fps(fps_tmp)
|
|
257
|
+
else:
|
|
258
|
+
self.fps = Fraction(0)
|
|
259
|
+
self.color = param[4]
|
|
260
|
+
|
|
261
|
+
self.tmp_f = BytesIO()
|
|
262
|
+
msg = self.MSG_COMP.format(
|
|
263
|
+
self.in_f_name,
|
|
264
|
+
self.out_f_name,
|
|
265
|
+
self.res_w,
|
|
266
|
+
self.res_h,
|
|
267
|
+
self.quality,
|
|
268
|
+
int(self.fps),
|
|
269
|
+
self.color,
|
|
270
|
+
step_lower,
|
|
271
|
+
step_current,
|
|
272
|
+
step_upper,
|
|
273
|
+
)
|
|
274
|
+
self.cb.put(msg)
|
|
275
|
+
|
|
276
|
+
self.frames_processed = self.frames_drop(self.frames_raw)
|
|
277
|
+
self.frames_processed = self.frames_resize(self.frames_processed)
|
|
278
|
+
self.frames_export()
|
|
279
|
+
|
|
280
|
+
self.tmp_f.seek(0)
|
|
281
|
+
self.size = self.tmp_f.getbuffer().nbytes
|
|
282
|
+
|
|
283
|
+
if not self.size_max or (
|
|
284
|
+
self.size <= self.size_max and self.size >= self.result_size
|
|
285
|
+
):
|
|
286
|
+
self.result = self.tmp_f.read()
|
|
287
|
+
self.result_size = self.size
|
|
288
|
+
self.result_step = step_current
|
|
289
|
+
|
|
290
|
+
if (
|
|
291
|
+
step_upper - step_lower > 0
|
|
292
|
+
and step_current != step_lower
|
|
293
|
+
and self.size_max
|
|
294
|
+
):
|
|
295
|
+
if self.size <= self.size_max:
|
|
296
|
+
sign = "<"
|
|
297
|
+
step_upper = step_current
|
|
298
|
+
else:
|
|
299
|
+
sign = ">"
|
|
300
|
+
step_lower = step_current
|
|
301
|
+
if step_current == step_lower + 1:
|
|
302
|
+
step_current = step_lower
|
|
303
|
+
else:
|
|
304
|
+
step_current = int(rounding((step_lower + step_upper) / 2))
|
|
305
|
+
self.recompress(sign)
|
|
306
|
+
elif self.result:
|
|
307
|
+
return self.compress_done(self.result, self.result_step)
|
|
308
|
+
else:
|
|
309
|
+
return self.compress_fail()
|
|
310
|
+
|
|
311
|
+
def check_if_compatible(self) -> Optional[bytes]:
|
|
312
|
+
f_fmt = self.opt_comp.get_format()
|
|
313
|
+
if (
|
|
314
|
+
# Issue #260: Some webp file not accepted by Whatsapp
|
|
315
|
+
".webp" not in f_fmt[0]
|
|
316
|
+
and ".webp" not in f_fmt[1]
|
|
317
|
+
and FormatVerify.check_format(
|
|
318
|
+
self.in_f,
|
|
319
|
+
fmt=f_fmt,
|
|
320
|
+
file_info=self.codec_info_orig,
|
|
321
|
+
)
|
|
322
|
+
and FormatVerify.check_file_res(
|
|
323
|
+
self.in_f, res=self.opt_comp.get_res(), file_info=self.codec_info_orig
|
|
324
|
+
)
|
|
325
|
+
and FormatVerify.check_file_fps(
|
|
326
|
+
self.in_f, fps=self.opt_comp.get_fps(), file_info=self.codec_info_orig
|
|
327
|
+
)
|
|
328
|
+
and FormatVerify.check_file_size(
|
|
329
|
+
self.in_f,
|
|
330
|
+
size=self.opt_comp.get_size_max(),
|
|
331
|
+
file_info=self.codec_info_orig,
|
|
332
|
+
)
|
|
333
|
+
and FormatVerify.check_file_duration(
|
|
334
|
+
self.in_f,
|
|
335
|
+
duration=self.opt_comp.get_duration(),
|
|
336
|
+
file_info=self.codec_info_orig,
|
|
337
|
+
)
|
|
338
|
+
):
|
|
339
|
+
self.cb.put((self.MSG_SKIP_COMP.format(self.in_f_name, self.out_f_name)))
|
|
340
|
+
|
|
341
|
+
if isinstance(self.in_f, Path):
|
|
342
|
+
with open(self.in_f, "rb") as f:
|
|
343
|
+
result = f.read()
|
|
344
|
+
self.result_size = os.path.getsize(self.in_f)
|
|
345
|
+
else:
|
|
346
|
+
result = self.in_f
|
|
347
|
+
self.result_size = len(self.in_f)
|
|
348
|
+
|
|
349
|
+
return result
|
|
350
|
+
|
|
351
|
+
return None
|
|
352
|
+
|
|
353
|
+
def generate_steps_list(self) -> List[Tuple[Optional[int], ...]]:
|
|
354
|
+
steps_list: List[Tuple[Optional[int], ...]] = []
|
|
355
|
+
need_even = self.out_f.suffix in (".webm", ".mp4", ".mkv", ".webp")
|
|
356
|
+
for step in range(self.opt_comp.steps, -1, -1):
|
|
357
|
+
steps_list.append(
|
|
358
|
+
(
|
|
359
|
+
get_step_value(
|
|
360
|
+
self.opt_comp.res_w_max,
|
|
361
|
+
self.opt_comp.res_w_min,
|
|
362
|
+
step,
|
|
363
|
+
self.opt_comp.steps,
|
|
364
|
+
self.opt_comp.res_power,
|
|
365
|
+
need_even,
|
|
366
|
+
self.opt_comp.res_snap_pow2,
|
|
367
|
+
),
|
|
368
|
+
get_step_value(
|
|
369
|
+
self.opt_comp.res_h_max,
|
|
370
|
+
self.opt_comp.res_h_min,
|
|
371
|
+
step,
|
|
372
|
+
self.opt_comp.steps,
|
|
373
|
+
self.opt_comp.res_power,
|
|
374
|
+
need_even,
|
|
375
|
+
self.opt_comp.res_snap_pow2,
|
|
376
|
+
),
|
|
377
|
+
get_step_value(
|
|
378
|
+
self.opt_comp.quality_max,
|
|
379
|
+
self.opt_comp.quality_min,
|
|
380
|
+
step,
|
|
381
|
+
self.opt_comp.steps,
|
|
382
|
+
self.opt_comp.quality_power,
|
|
383
|
+
),
|
|
384
|
+
get_step_value(
|
|
385
|
+
self.opt_comp.fps_max,
|
|
386
|
+
self.opt_comp.fps_min,
|
|
387
|
+
step,
|
|
388
|
+
self.opt_comp.steps,
|
|
389
|
+
self.opt_comp.fps_power,
|
|
390
|
+
),
|
|
391
|
+
get_step_value(
|
|
392
|
+
self.opt_comp.color_max,
|
|
393
|
+
self.opt_comp.color_min,
|
|
394
|
+
step,
|
|
395
|
+
self.opt_comp.steps,
|
|
396
|
+
self.opt_comp.color_power,
|
|
397
|
+
),
|
|
398
|
+
)
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
return steps_list
|
|
402
|
+
|
|
403
|
+
def recompress(self, sign: str) -> None:
|
|
404
|
+
msg = self.MSG_REDO_COMP.format(
|
|
405
|
+
sign, self.in_f_name, self.out_f_name, self.size, sign, self.size_max
|
|
406
|
+
)
|
|
407
|
+
self.cb.put(msg)
|
|
408
|
+
|
|
409
|
+
def compress_fail(
|
|
410
|
+
self,
|
|
411
|
+
) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
|
|
412
|
+
msg = self.MSG_FAIL_COMP.format(
|
|
413
|
+
self.in_f_name, self.out_f_name, self.size_max, self.size
|
|
414
|
+
)
|
|
415
|
+
self.cb.put(msg)
|
|
416
|
+
|
|
417
|
+
return False, self.in_f_path, self.out_f, self.size
|
|
418
|
+
|
|
419
|
+
def compress_done(
|
|
420
|
+
self, data: bytes, result_step: Optional[int] = None
|
|
421
|
+
) -> Tuple[bool, Path, Union[None, bytes, Path], int]:
|
|
422
|
+
out_f: Union[None, bytes, Path]
|
|
423
|
+
|
|
424
|
+
if self.out_f.stem == "none":
|
|
425
|
+
out_f = None
|
|
426
|
+
elif self.out_f.stem == "bytes":
|
|
427
|
+
out_f = data
|
|
428
|
+
else:
|
|
429
|
+
out_f = self.out_f
|
|
430
|
+
with open(self.out_f, "wb+") as f:
|
|
431
|
+
f.write(data)
|
|
432
|
+
|
|
433
|
+
if result_step is not None:
|
|
434
|
+
msg = self.MSG_DONE_COMP.format(
|
|
435
|
+
self.in_f_name, self.out_f_name, self.result_size, result_step
|
|
436
|
+
)
|
|
437
|
+
self.cb.put(msg)
|
|
438
|
+
|
|
439
|
+
return True, self.in_f_path, out_f, self.result_size
|
|
440
|
+
|
|
441
|
+
def frames_import(self) -> None:
|
|
442
|
+
if isinstance(self.in_f, Path):
|
|
443
|
+
suffix = self.in_f.suffix
|
|
444
|
+
else:
|
|
445
|
+
suffix = Path(self.in_f_name).suffix
|
|
446
|
+
|
|
447
|
+
if suffix in (".tgs", ".lottie", ".json"):
|
|
448
|
+
self._frames_import_lottie()
|
|
449
|
+
elif suffix in (".webp", ".apng", ".png", ".gif"):
|
|
450
|
+
# ffmpeg do not support webp decoding (yet)
|
|
451
|
+
# ffmpeg could fail to decode apng if file is buggy
|
|
452
|
+
self._frames_import_pillow()
|
|
453
|
+
elif suffix == ".svg":
|
|
454
|
+
self._frames_import_svg()
|
|
455
|
+
else:
|
|
456
|
+
self._frames_import_pyav()
|
|
457
|
+
|
|
458
|
+
def _frames_import_svg(self) -> None:
|
|
459
|
+
width = self.codec_info_orig.res[0]
|
|
460
|
+
height = self.codec_info_orig.res[1]
|
|
461
|
+
|
|
462
|
+
if RUNTIME_STATE.get("crd") is None:
|
|
463
|
+
chrome_path: Optional[str]
|
|
464
|
+
if self.opt_comp.chromium_path:
|
|
465
|
+
chrome_path = self.opt_comp.chromium_path
|
|
466
|
+
else:
|
|
467
|
+
chrome_path = CRD.get_chromium_path()
|
|
468
|
+
args = [
|
|
469
|
+
"--headless",
|
|
470
|
+
"--kiosk",
|
|
471
|
+
"--disable-extensions",
|
|
472
|
+
"--disable-infobars",
|
|
473
|
+
"--disable-gpu",
|
|
474
|
+
"--disable-gpu-rasterization",
|
|
475
|
+
"--hide-scrollbars",
|
|
476
|
+
"--force-device-scale-factor=1",
|
|
477
|
+
"about:blank",
|
|
478
|
+
]
|
|
479
|
+
if chrome_path is None:
|
|
480
|
+
raise RuntimeError("[F] Chrome/Chromium required for importing svg")
|
|
481
|
+
self.cb.put(self.MSG_SVG_LONG)
|
|
482
|
+
RUNTIME_STATE["crd"] = CRD(chrome_path, args=args)
|
|
483
|
+
RUNTIME_STATE["crd"].connect(-1) # type: ignore
|
|
484
|
+
|
|
485
|
+
crd = cast(CRD, RUNTIME_STATE["crd"])
|
|
486
|
+
if isinstance(self.in_f, bytes):
|
|
487
|
+
svg = self.in_f.decode()
|
|
488
|
+
else:
|
|
489
|
+
with open(self.in_f) as f:
|
|
490
|
+
svg = f.read()
|
|
491
|
+
soup = BeautifulSoup(svg, "html.parser")
|
|
492
|
+
svg_tag = soup.find_all("svg")[0]
|
|
493
|
+
|
|
494
|
+
if svg_tag.get("width") is None:
|
|
495
|
+
svg_tag["width"] = width
|
|
496
|
+
if svg_tag.get("height") is None:
|
|
497
|
+
svg_tag["height"] = height
|
|
498
|
+
svg = str(soup)
|
|
499
|
+
|
|
500
|
+
crd.open_html_str(svg)
|
|
501
|
+
crd.set_transparent_bg()
|
|
502
|
+
init_js = 'svg = document.getElementsByTagName("svg")[0];'
|
|
503
|
+
if self.codec_info_orig.fps > 0:
|
|
504
|
+
init_js += "svg.pauseAnimations();"
|
|
505
|
+
init_js += "JSON.stringify(svg.getBoundingClientRect());"
|
|
506
|
+
bound = json.loads(
|
|
507
|
+
json.loads(crd.exec_js(init_js))["result"]["result"]["value"]
|
|
508
|
+
)
|
|
509
|
+
clip = {
|
|
510
|
+
"x": bound["x"],
|
|
511
|
+
"y": bound["y"],
|
|
512
|
+
"width": width,
|
|
513
|
+
"height": height,
|
|
514
|
+
"scale": 1,
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
if self.codec_info_orig.fps > 0:
|
|
518
|
+
for i in range(self.codec_info_orig.frames):
|
|
519
|
+
curr_time = (
|
|
520
|
+
i
|
|
521
|
+
/ self.codec_info_orig.frames
|
|
522
|
+
* self.codec_info_orig.duration
|
|
523
|
+
/ 1000
|
|
524
|
+
)
|
|
525
|
+
crd.exec_js(f"svg.setCurrentTime({curr_time})")
|
|
526
|
+
self.frames_raw.append(np.asarray(crd.screenshot(clip)))
|
|
527
|
+
else:
|
|
528
|
+
self.frames_raw.append(np.asarray(crd.screenshot(clip)))
|
|
529
|
+
|
|
530
|
+
def _frames_import_pillow(self) -> None:
|
|
531
|
+
with Image.open(self.in_f) as im:
|
|
532
|
+
# Note: im.convert("RGBA") would return rgba image of current frame only
|
|
533
|
+
if (
|
|
534
|
+
"n_frames" in dir(im)
|
|
535
|
+
and im.n_frames != 0
|
|
536
|
+
and self.codec_info_orig.fps != 0.0
|
|
537
|
+
):
|
|
538
|
+
# Pillow is not reliable for getting webp frame durations
|
|
539
|
+
durations: Optional[List[int]]
|
|
540
|
+
if im.format == "WEBP":
|
|
541
|
+
_, _, _, durations = CodecInfo._get_file_fps_frames_duration_webp( # type: ignore
|
|
542
|
+
self.in_f
|
|
543
|
+
)
|
|
544
|
+
else:
|
|
545
|
+
durations = None
|
|
546
|
+
|
|
547
|
+
duration_ptr = 0.0
|
|
548
|
+
duration_inc = 1 / self.codec_info_orig.fps * 1000
|
|
549
|
+
frame = 0
|
|
550
|
+
if durations is None:
|
|
551
|
+
next_frame_start_duration = cast(int, im.info.get("duration", 1000))
|
|
552
|
+
else:
|
|
553
|
+
next_frame_start_duration = durations[0]
|
|
554
|
+
while True:
|
|
555
|
+
self.frames_raw.append(np.asarray(im.convert("RGBA")))
|
|
556
|
+
duration_ptr += duration_inc
|
|
557
|
+
if duration_ptr >= next_frame_start_duration:
|
|
558
|
+
frame += 1
|
|
559
|
+
if frame == im.n_frames:
|
|
560
|
+
break
|
|
561
|
+
im.seek(frame)
|
|
562
|
+
|
|
563
|
+
if durations is None:
|
|
564
|
+
next_frame_start_duration += cast(
|
|
565
|
+
int, im.info.get("duration", 1000)
|
|
566
|
+
)
|
|
567
|
+
else:
|
|
568
|
+
next_frame_start_duration += durations[frame]
|
|
569
|
+
else:
|
|
570
|
+
self.frames_raw.append(np.asarray(im.convert("RGBA")))
|
|
571
|
+
|
|
572
|
+
def _frames_import_pyav(self) -> None:
|
|
573
|
+
import av
|
|
574
|
+
from av.codec.context import CodecContext
|
|
575
|
+
from av.container.input import InputContainer
|
|
576
|
+
from av.video.codeccontext import VideoCodecContext
|
|
577
|
+
from av.video.frame import VideoFrame
|
|
578
|
+
|
|
579
|
+
# Crashes when handling some webm in yuv420p and convert to rgba
|
|
580
|
+
# https://github.com/PyAV-Org/PyAV/issues/1166
|
|
581
|
+
file: Union[BytesIO, str]
|
|
582
|
+
if isinstance(self.in_f, Path):
|
|
583
|
+
file = self.in_f.as_posix()
|
|
584
|
+
else:
|
|
585
|
+
file = BytesIO(self.in_f)
|
|
586
|
+
with av.open(file) as container:
|
|
587
|
+
container = cast(InputContainer, container)
|
|
588
|
+
context = container.streams.video[0].codec_context
|
|
589
|
+
if context.name == "vp8":
|
|
590
|
+
context = CodecContext.create("libvpx", "r")
|
|
591
|
+
elif context.name == "vp9":
|
|
592
|
+
context = cast(
|
|
593
|
+
VideoCodecContext, CodecContext.create("libvpx-vp9", "r")
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
for packet in container.demux(container.streams.video):
|
|
597
|
+
for frame in context.decode(packet):
|
|
598
|
+
width_orig = frame.width
|
|
599
|
+
height_orig = frame.height
|
|
600
|
+
|
|
601
|
+
# Need to pad frame to even dimension first
|
|
602
|
+
if width_orig % 2 == 1 or height_orig % 2 == 1:
|
|
603
|
+
from av.filter import Graph
|
|
604
|
+
|
|
605
|
+
width_new = width_orig + width_orig % 2
|
|
606
|
+
height_new = height_orig + height_orig % 2
|
|
607
|
+
|
|
608
|
+
graph = Graph()
|
|
609
|
+
in_src = graph.add_buffer(template=container.streams.video[0])
|
|
610
|
+
pad = graph.add(
|
|
611
|
+
"pad", f"{width_new}:{height_new}:0:0:color=#00000000"
|
|
612
|
+
)
|
|
613
|
+
in_src.link_to(pad)
|
|
614
|
+
sink = graph.add("buffersink")
|
|
615
|
+
pad.link_to(sink)
|
|
616
|
+
graph.configure()
|
|
617
|
+
|
|
618
|
+
graph.push(frame)
|
|
619
|
+
frame_resized = cast(VideoFrame, graph.pull())
|
|
620
|
+
else:
|
|
621
|
+
frame_resized = frame
|
|
622
|
+
|
|
623
|
+
# yuva420p may cause crash
|
|
624
|
+
# Not safe to directly call frame.to_ndarray(format="rgba")
|
|
625
|
+
# https://github.com/PyAV-Org/PyAV/discussions/1510
|
|
626
|
+
# if int(av.__version__.split(".")[0]) >= 14:
|
|
627
|
+
# rgba_array = frame_resized.to_ndarray(format="rgba")
|
|
628
|
+
if frame_resized.format.name == "yuv420p":
|
|
629
|
+
rgb_array = frame_resized.to_ndarray(format="rgb24")
|
|
630
|
+
rgba_array = np.dstack(
|
|
631
|
+
(
|
|
632
|
+
rgb_array,
|
|
633
|
+
cast(
|
|
634
|
+
np.ndarray[Any, np.dtype[np.uint8]],
|
|
635
|
+
np.zeros(rgb_array.shape[:2], dtype=np.uint8) + 255,
|
|
636
|
+
),
|
|
637
|
+
)
|
|
638
|
+
)
|
|
639
|
+
else:
|
|
640
|
+
frame_resized = frame_resized.reformat(
|
|
641
|
+
format="yuva420p",
|
|
642
|
+
dst_colorspace=1,
|
|
643
|
+
)
|
|
644
|
+
rgba_array = yuva_to_rgba(frame_resized)
|
|
645
|
+
|
|
646
|
+
# Remove pixels that was added to make dimensions even
|
|
647
|
+
rgba_array = rgba_array[0:height_orig, 0:width_orig]
|
|
648
|
+
self.frames_raw.append(rgba_array)
|
|
649
|
+
|
|
650
|
+
def _frames_import_lottie(self) -> None:
|
|
651
|
+
from rlottie_python.rlottie_wrapper import LottieAnimation
|
|
652
|
+
|
|
653
|
+
if isinstance(self.in_f, Path):
|
|
654
|
+
suffix = self.in_f.suffix
|
|
655
|
+
else:
|
|
656
|
+
suffix = Path(self.in_f_name).suffix
|
|
657
|
+
|
|
658
|
+
if suffix == ".tgs":
|
|
659
|
+
if isinstance(self.in_f, Path):
|
|
660
|
+
anim = LottieAnimation.from_tgs(self.in_f.as_posix())
|
|
661
|
+
else:
|
|
662
|
+
import gzip
|
|
663
|
+
|
|
664
|
+
with gzip.open(BytesIO(self.in_f)) as f:
|
|
665
|
+
data = f.read().decode(encoding="utf-8")
|
|
666
|
+
anim = LottieAnimation.from_data(data)
|
|
667
|
+
else:
|
|
668
|
+
if isinstance(self.in_f, Path):
|
|
669
|
+
anim = LottieAnimation.from_file(self.in_f.as_posix())
|
|
670
|
+
else:
|
|
671
|
+
anim = LottieAnimation.from_data(self.in_f.decode("utf-8"))
|
|
672
|
+
|
|
673
|
+
for i in range(anim.lottie_animation_get_totalframe()):
|
|
674
|
+
frame = np.asarray(anim.render_pillow_frame(frame_num=i))
|
|
675
|
+
self.frames_raw.append(frame)
|
|
676
|
+
|
|
677
|
+
anim.lottie_animation_destroy()
|
|
678
|
+
|
|
679
|
+
def determine_bg_color(self) -> Tuple[int, int, int, int]:
|
|
680
|
+
mean_total = 0.0
|
|
681
|
+
# Calculate average color of all frames for selecting background color
|
|
682
|
+
for frame in self.frames_raw:
|
|
683
|
+
s = frame.shape
|
|
684
|
+
colors = frame.reshape((-1, s[2])) # type: ignore
|
|
685
|
+
# Do not count in alpha=0
|
|
686
|
+
# If alpha > 0, use alpha as weight
|
|
687
|
+
colors = colors[colors[:, 3] != 0]
|
|
688
|
+
if colors.shape[0] != 0:
|
|
689
|
+
alphas = colors[:, 3] / 255
|
|
690
|
+
r_mean = cast(float, np.mean(colors[:, 0] * alphas))
|
|
691
|
+
g_mean = cast(float, np.mean(colors[:, 1] * alphas))
|
|
692
|
+
b_mean = cast(float, np.mean(colors[:, 2] * alphas))
|
|
693
|
+
mean_total += (r_mean + g_mean + b_mean) / 3
|
|
694
|
+
|
|
695
|
+
if mean_total / len(self.frames_raw) < 128:
|
|
696
|
+
return (255, 255, 255, 0)
|
|
697
|
+
else:
|
|
698
|
+
return (0, 0, 0, 0)
|
|
699
|
+
|
|
700
|
+
def frames_resize(
|
|
701
|
+
self, frames_in: "List[np.ndarray[Any, Any]]"
|
|
702
|
+
) -> "List[np.ndarray[Any, Any]]":
|
|
703
|
+
frames_out: "List[np.ndarray[Any, Any]]" = []
|
|
704
|
+
|
|
705
|
+
resample: Literal[0, 1, 2, 3, 4, 5]
|
|
706
|
+
if self.opt_comp.scale_filter == "nearest":
|
|
707
|
+
resample = Image.NEAREST
|
|
708
|
+
elif self.opt_comp.scale_filter == "box":
|
|
709
|
+
resample = Image.BOX
|
|
710
|
+
elif self.opt_comp.scale_filter == "bilinear":
|
|
711
|
+
resample = Image.BILINEAR
|
|
712
|
+
elif self.opt_comp.scale_filter == "hamming":
|
|
713
|
+
resample = Image.HAMMING
|
|
714
|
+
elif self.opt_comp.scale_filter == "bicubic":
|
|
715
|
+
resample = Image.BICUBIC
|
|
716
|
+
elif self.opt_comp.scale_filter == "lanczos":
|
|
717
|
+
resample = Image.LANCZOS
|
|
718
|
+
else:
|
|
719
|
+
resample = Image.BICUBIC
|
|
720
|
+
|
|
721
|
+
if self.bg_color is None:
|
|
722
|
+
self.bg_color = self.determine_bg_color()
|
|
723
|
+
|
|
724
|
+
for frame in frames_in:
|
|
725
|
+
with Image.fromarray(frame, "RGBA") as im: # type: ignore
|
|
726
|
+
width, height = im.size
|
|
727
|
+
|
|
728
|
+
if self.res_w is None:
|
|
729
|
+
self.res_w = width
|
|
730
|
+
if self.res_h is None:
|
|
731
|
+
self.res_h = height
|
|
732
|
+
|
|
733
|
+
scaling = 1 - (self.opt_comp.padding_percent / 100)
|
|
734
|
+
if width / self.res_w > height / self.res_h:
|
|
735
|
+
width_new = int(self.res_w * scaling)
|
|
736
|
+
height_new = int(height * self.res_w / width * scaling)
|
|
737
|
+
else:
|
|
738
|
+
height_new = int(self.res_h * scaling)
|
|
739
|
+
width_new = int(width * self.res_h / height * scaling)
|
|
740
|
+
|
|
741
|
+
with im.resize((width_new, height_new), resample=resample) as im_resized:
|
|
742
|
+
with Image.new(
|
|
743
|
+
"RGBA", (self.res_w, self.res_h), self.bg_color
|
|
744
|
+
) as im_new:
|
|
745
|
+
im_new.alpha_composite(
|
|
746
|
+
im_resized,
|
|
747
|
+
((self.res_w - width_new) // 2, (self.res_h - height_new) // 2),
|
|
748
|
+
)
|
|
749
|
+
frames_out.append(np.asarray(im_new))
|
|
750
|
+
|
|
751
|
+
return frames_out
|
|
752
|
+
|
|
753
|
+
def frames_drop(
|
|
754
|
+
self, frames_in: "List[np.ndarray[Any, Any]]"
|
|
755
|
+
) -> "List[np.ndarray[Any, Any]]":
|
|
756
|
+
if (
|
|
757
|
+
not self.codec_info_orig.is_animated
|
|
758
|
+
or not self.fps
|
|
759
|
+
or len(self.frames_processed) == 1
|
|
760
|
+
):
|
|
761
|
+
return [frames_in[0]]
|
|
762
|
+
|
|
763
|
+
frames_out: "List[np.ndarray[Any, Any]]" = []
|
|
764
|
+
|
|
765
|
+
# fps_ratio: 1 frame in new anim equal to how many frame in old anim
|
|
766
|
+
# speed_ratio: How much to speed up / slow down
|
|
767
|
+
fps_ratio = self.codec_info_orig.fps / self.fps
|
|
768
|
+
if (
|
|
769
|
+
self.opt_comp.duration_min
|
|
770
|
+
and self.codec_info_orig.duration < self.opt_comp.duration_min
|
|
771
|
+
):
|
|
772
|
+
speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_min
|
|
773
|
+
elif (
|
|
774
|
+
self.opt_comp.duration_max
|
|
775
|
+
and self.codec_info_orig.duration > self.opt_comp.duration_max
|
|
776
|
+
):
|
|
777
|
+
speed_ratio = self.codec_info_orig.duration / self.opt_comp.duration_max
|
|
778
|
+
else:
|
|
779
|
+
speed_ratio = 1
|
|
780
|
+
|
|
781
|
+
# How many frames to advance in original video for each frame of output video
|
|
782
|
+
frame_increment = fps_ratio * speed_ratio
|
|
783
|
+
|
|
784
|
+
frames_out_min = None
|
|
785
|
+
frames_out_max = None
|
|
786
|
+
if self.opt_comp.duration_min:
|
|
787
|
+
frames_out_min = ceil(self.fps * self.opt_comp.duration_min / 1000)
|
|
788
|
+
if self.opt_comp.duration_max:
|
|
789
|
+
frames_out_max = floor(self.fps * self.opt_comp.duration_max / 1000)
|
|
790
|
+
|
|
791
|
+
frame_current = 0
|
|
792
|
+
frame_current_float = 0.0
|
|
793
|
+
while True:
|
|
794
|
+
if frame_current <= len(frames_in) - 1 and not (
|
|
795
|
+
frames_out_max and len(frames_out) == frames_out_max
|
|
796
|
+
):
|
|
797
|
+
frames_out.append(frames_in[frame_current])
|
|
798
|
+
else:
|
|
799
|
+
while len(frames_out) == 0 or (
|
|
800
|
+
frames_out_min and len(frames_out) < frames_out_min
|
|
801
|
+
):
|
|
802
|
+
frames_out.append(frames_in[-1])
|
|
803
|
+
return frames_out
|
|
804
|
+
frame_current_float += frame_increment
|
|
805
|
+
frame_current = int(rounding(frame_current_float))
|
|
806
|
+
|
|
807
|
+
def frames_export(self) -> None:
|
|
808
|
+
is_animated = len(self.frames_processed) > 1 and self.fps
|
|
809
|
+
if self.out_f.suffix in (".apng", ".png"):
|
|
810
|
+
if is_animated:
|
|
811
|
+
self._frames_export_apng()
|
|
812
|
+
else:
|
|
813
|
+
self._frames_export_png()
|
|
814
|
+
elif self.out_f.suffix in (".gif", ".webp"):
|
|
815
|
+
self._frames_export_pil_anim()
|
|
816
|
+
elif self.out_f.suffix in (".webm", ".mp4", ".mkv") or is_animated:
|
|
817
|
+
self._frames_export_pyav()
|
|
818
|
+
else:
|
|
819
|
+
self._frames_export_pil()
|
|
820
|
+
|
|
821
|
+
def _check_dup(self) -> bool:
|
|
822
|
+
if len(self.frames_processed) == 1:
|
|
823
|
+
return False
|
|
824
|
+
|
|
825
|
+
prev_frame = self.frames_processed[0]
|
|
826
|
+
for frame in self.frames_processed[1:]:
|
|
827
|
+
if np.array_equal(frame, prev_frame):
|
|
828
|
+
return True
|
|
829
|
+
prev_frame = frame
|
|
830
|
+
|
|
831
|
+
return False
|
|
832
|
+
|
|
833
|
+
def _frames_export_pil(self) -> None:
|
|
834
|
+
with Image.fromarray(self.frames_processed[0]) as im: # type: ignore
|
|
835
|
+
im.save(
|
|
836
|
+
self.tmp_f,
|
|
837
|
+
format=self.out_f.suffix.replace(".", ""),
|
|
838
|
+
quality=self.quality,
|
|
839
|
+
)
|
|
840
|
+
|
|
841
|
+
def _frames_export_pyav(self) -> None:
|
|
842
|
+
import av
|
|
843
|
+
from av.video.stream import VideoStream
|
|
844
|
+
|
|
845
|
+
options_container: Dict[str, str] = {}
|
|
846
|
+
options_stream: Dict[str, str] = {}
|
|
847
|
+
|
|
848
|
+
if isinstance(self.quality, int):
|
|
849
|
+
# Seems not actually working
|
|
850
|
+
options_stream["quality"] = str(self.quality)
|
|
851
|
+
options_stream["lossless"] = "0"
|
|
852
|
+
|
|
853
|
+
if self.out_f.suffix in (".apng", ".png"):
|
|
854
|
+
codec = "apng"
|
|
855
|
+
pixel_format = "rgba"
|
|
856
|
+
options_stream["plays"] = "0"
|
|
857
|
+
elif self.out_f.suffix in (".webm", ".mkv"):
|
|
858
|
+
codec = "libvpx-vp9"
|
|
859
|
+
pixel_format = "yuva420p"
|
|
860
|
+
options_stream["loop"] = "0"
|
|
861
|
+
elif self.out_f.suffix == ".webp":
|
|
862
|
+
codec = "webp"
|
|
863
|
+
pixel_format = "yuva420p"
|
|
864
|
+
options_container["loop"] = "0"
|
|
865
|
+
else:
|
|
866
|
+
codec = "libvpx-vp9"
|
|
867
|
+
pixel_format = "yuv420p"
|
|
868
|
+
options_stream["loop"] = "0"
|
|
869
|
+
|
|
870
|
+
with av.open(
|
|
871
|
+
self.tmp_f,
|
|
872
|
+
"w",
|
|
873
|
+
format=self.out_f.suffix.replace(".", ""),
|
|
874
|
+
options=options_container,
|
|
875
|
+
) as output:
|
|
876
|
+
out_stream = output.add_stream(codec, rate=self.fps, options=options_stream) # type: ignore
|
|
877
|
+
out_stream = cast(VideoStream, out_stream)
|
|
878
|
+
assert isinstance(self.res_w, int) and isinstance(self.res_h, int)
|
|
879
|
+
out_stream.width = self.res_w
|
|
880
|
+
out_stream.height = self.res_h
|
|
881
|
+
out_stream.pix_fmt = pixel_format
|
|
882
|
+
|
|
883
|
+
for frame in self.frames_processed:
|
|
884
|
+
av_frame = av.VideoFrame.from_ndarray(frame, format="rgba")
|
|
885
|
+
output.mux(out_stream.encode(av_frame))
|
|
886
|
+
output.mux(out_stream.encode())
|
|
887
|
+
|
|
888
|
+
def _frames_export_pil_anim(self) -> None:
|
|
889
|
+
extra_kwargs: Dict[str, Any] = {}
|
|
890
|
+
|
|
891
|
+
# disposal=2 on gif cause flicker in image with transparency
|
|
892
|
+
# Occurs in Pillow == 10.2.0
|
|
893
|
+
# https://github.com/python-pillow/Pillow/issues/7787
|
|
894
|
+
if PillowVersion == "10.2.0":
|
|
895
|
+
extra_kwargs["optimize"] = False
|
|
896
|
+
else:
|
|
897
|
+
extra_kwargs["optimize"] = True
|
|
898
|
+
|
|
899
|
+
if self.out_f.suffix == ".gif":
|
|
900
|
+
# GIF can only have one alpha color
|
|
901
|
+
# Change lowest alpha to alpha=0
|
|
902
|
+
# Only keep alpha=0 and alpha=255, nothing in between
|
|
903
|
+
extra_kwargs["format"] = "GIF"
|
|
904
|
+
frames_processed = np.array(self.frames_processed)
|
|
905
|
+
alpha = frames_processed[:, :, :, 3]
|
|
906
|
+
alpha_min = np.min(alpha) # type: ignore
|
|
907
|
+
if alpha_min < 255:
|
|
908
|
+
alpha[alpha > alpha_min] = 255
|
|
909
|
+
alpha[alpha == alpha_min] = 0
|
|
910
|
+
|
|
911
|
+
if 0 in alpha:
|
|
912
|
+
extra_kwargs["transparency"] = 0
|
|
913
|
+
extra_kwargs["disposal"] = 2
|
|
914
|
+
im_out = [self.quantize(Image.fromarray(i)) for i in frames_processed] # type: ignore
|
|
915
|
+
else:
|
|
916
|
+
im_out = [
|
|
917
|
+
self.quantize(Image.fromarray(i).convert("RGB")).convert("RGB") # type: ignore
|
|
918
|
+
for i in frames_processed
|
|
919
|
+
]
|
|
920
|
+
elif self.out_f.suffix == ".webp":
|
|
921
|
+
im_out = [Image.fromarray(i) for i in self.frames_processed] # type: ignore
|
|
922
|
+
extra_kwargs["format"] = "WebP"
|
|
923
|
+
extra_kwargs["allow_mixed"] = True
|
|
924
|
+
extra_kwargs["kmax"] = (
|
|
925
|
+
1 # Keyframe every frame, otherwise black lines artifact can appear
|
|
926
|
+
)
|
|
927
|
+
if self.quality:
|
|
928
|
+
if self.quality < 20:
|
|
929
|
+
extra_kwargs["minimize_size"] = True
|
|
930
|
+
extra_kwargs["method"] = 4 + int(2 * (100 - self.quality) / 100)
|
|
931
|
+
extra_kwargs["alpha_quality"] = self.quality
|
|
932
|
+
else:
|
|
933
|
+
raise RuntimeError(f"Invalid format {self.out_f.suffix}")
|
|
934
|
+
|
|
935
|
+
if self.fps:
|
|
936
|
+
extra_kwargs["save_all"] = True
|
|
937
|
+
extra_kwargs["append_images"] = im_out[1:]
|
|
938
|
+
extra_kwargs["duration"] = int(1000 / self.fps)
|
|
939
|
+
extra_kwargs["loop"] = 0
|
|
940
|
+
|
|
941
|
+
im_out[0].save(
|
|
942
|
+
self.tmp_f,
|
|
943
|
+
quality=self.quality,
|
|
944
|
+
**extra_kwargs,
|
|
945
|
+
)
|
|
946
|
+
|
|
947
|
+
def _frames_export_png(self) -> None:
|
|
948
|
+
with Image.fromarray(self.frames_processed[0], "RGBA") as image: # type: ignore
|
|
949
|
+
image_quant = self.quantize(image)
|
|
950
|
+
|
|
951
|
+
with BytesIO() as f:
|
|
952
|
+
image_quant.save(f, format="png")
|
|
953
|
+
f.seek(0)
|
|
954
|
+
frame_optimized = self.optimize_png(f.read())
|
|
955
|
+
self.tmp_f.write(frame_optimized)
|
|
956
|
+
|
|
957
|
+
def _frames_export_apng(self) -> None:
|
|
958
|
+
from apngasm_python._apngasm_python import APNGAsm, create_frame_from_rgb, create_frame_from_rgba # type: ignore
|
|
959
|
+
|
|
960
|
+
assert self.fps
|
|
961
|
+
assert self.res_h
|
|
962
|
+
|
|
963
|
+
frames_concat = np.concatenate(self.frames_processed)
|
|
964
|
+
with Image.fromarray(frames_concat, "RGBA") as image_concat: # type: ignore
|
|
965
|
+
if image_concat.getextrema()[3][0] < 255: # type: ignore
|
|
966
|
+
mode = "RGBA"
|
|
967
|
+
create_frame_method = create_frame_from_rgba
|
|
968
|
+
else:
|
|
969
|
+
mode = "RGB"
|
|
970
|
+
create_frame_method = create_frame_from_rgb
|
|
971
|
+
image_quant = self.quantize(image_concat)
|
|
972
|
+
|
|
973
|
+
if self.apngasm is None:
|
|
974
|
+
self.apngasm = APNGAsm() # type: ignore
|
|
975
|
+
assert isinstance(self.apngasm, APNGAsm)
|
|
976
|
+
|
|
977
|
+
delay_num = int(1000 / self.fps)
|
|
978
|
+
for i in range(0, image_quant.height, self.res_h):
|
|
979
|
+
crop_dimension = (0, i, image_quant.width, i + self.res_h)
|
|
980
|
+
image_cropped = image_quant.crop(crop_dimension)
|
|
981
|
+
image_final = image_cropped.convert(mode)
|
|
982
|
+
frame_final = create_frame_method(
|
|
983
|
+
np.array(image_final),
|
|
984
|
+
width=image_final.width,
|
|
985
|
+
height=image_final.height,
|
|
986
|
+
delay_num=delay_num,
|
|
987
|
+
delay_den=1000,
|
|
988
|
+
)
|
|
989
|
+
self.apngasm.add_frame(frame_final)
|
|
990
|
+
|
|
991
|
+
with CacheStore.get_cache_store(path=self.opt_comp.cache_dir) as tempdir:
|
|
992
|
+
tmp_apng = Path(tempdir, f"out{self.out_f.suffix}")
|
|
993
|
+
self.apngasm.assemble(tmp_apng.as_posix())
|
|
994
|
+
|
|
995
|
+
with open(tmp_apng, "rb") as f:
|
|
996
|
+
apng_optimized = self.optimize_png(f.read())
|
|
997
|
+
self.tmp_f.write(apng_optimized)
|
|
998
|
+
|
|
999
|
+
self.apngasm.reset()
|
|
1000
|
+
|
|
1001
|
+
def optimize_png(self, image_bytes: bytes) -> bytes:
|
|
1002
|
+
import oxipng
|
|
1003
|
+
|
|
1004
|
+
return oxipng.optimize_from_memory(
|
|
1005
|
+
image_bytes,
|
|
1006
|
+
level=6,
|
|
1007
|
+
fix_errors=True,
|
|
1008
|
+
filter=[oxipng.RowFilter.Brute],
|
|
1009
|
+
optimize_alpha=True,
|
|
1010
|
+
strip=oxipng.StripChunks.safe(),
|
|
1011
|
+
)
|
|
1012
|
+
|
|
1013
|
+
def quantize(self, image: Image.Image) -> Image.Image:
|
|
1014
|
+
if not (self.color and self.color <= 256):
|
|
1015
|
+
return image.copy()
|
|
1016
|
+
if self.opt_comp.quantize_method == "imagequant":
|
|
1017
|
+
return self._quantize_by_imagequant(image)
|
|
1018
|
+
if self.opt_comp.quantize_method in ("mediancut", "maxcoverage", "fastoctree"):
|
|
1019
|
+
return self._quantize_by_pillow(image)
|
|
1020
|
+
|
|
1021
|
+
return image
|
|
1022
|
+
|
|
1023
|
+
def _quantize_by_imagequant(self, image: Image.Image) -> Image.Image:
|
|
1024
|
+
import imagequant # type: ignore
|
|
1025
|
+
|
|
1026
|
+
assert isinstance(self.quality, int)
|
|
1027
|
+
assert isinstance(self.opt_comp.quality_min, int)
|
|
1028
|
+
assert isinstance(self.opt_comp.quality_max, int)
|
|
1029
|
+
assert isinstance(self.color, int)
|
|
1030
|
+
|
|
1031
|
+
dither = 1 - (self.quality - self.opt_comp.quality_min) / (
|
|
1032
|
+
self.opt_comp.quality_max - self.opt_comp.quality_min
|
|
1033
|
+
)
|
|
1034
|
+
image_quant = None
|
|
1035
|
+
for i in range(self.quality, 101, 5):
|
|
1036
|
+
try:
|
|
1037
|
+
image_quant = imagequant.quantize_pil_image( # type: ignore
|
|
1038
|
+
image,
|
|
1039
|
+
dithering_level=dither,
|
|
1040
|
+
max_colors=self.color,
|
|
1041
|
+
min_quality=self.opt_comp.quality_min,
|
|
1042
|
+
max_quality=i,
|
|
1043
|
+
)
|
|
1044
|
+
return image_quant
|
|
1045
|
+
except RuntimeError:
|
|
1046
|
+
pass
|
|
1047
|
+
|
|
1048
|
+
return image
|
|
1049
|
+
|
|
1050
|
+
def _quantize_by_pillow(self, image: Image.Image) -> Image.Image:
|
|
1051
|
+
assert self.color
|
|
1052
|
+
|
|
1053
|
+
if image.mode == "RGBA" and self.opt_comp.quantize_method in (
|
|
1054
|
+
"mediancut",
|
|
1055
|
+
"maxcoverage",
|
|
1056
|
+
):
|
|
1057
|
+
self.cb.put(self.MSG_QUANT_NO_ALPHA.format(self.opt_comp.quantize_method))
|
|
1058
|
+
method = Image.Quantize.FASTOCTREE
|
|
1059
|
+
elif self.opt_comp.quantize_method == "mediancut":
|
|
1060
|
+
method = Image.Quantize.MEDIANCUT
|
|
1061
|
+
elif self.opt_comp.quantize_method == "maxcoverage":
|
|
1062
|
+
method = Image.Quantize.MAXCOVERAGE
|
|
1063
|
+
else:
|
|
1064
|
+
method = Image.Quantize.FASTOCTREE
|
|
1065
|
+
return image.quantize(colors=self.color, method=method)
|
|
1066
|
+
|
|
1067
|
+
def fix_fps(self, fps: float) -> Fraction:
|
|
1068
|
+
# After rounding fps/duration during export,
|
|
1069
|
+
# Video duration may exceed limit.
|
|
1070
|
+
# Hence we need to 'fix' the fps
|
|
1071
|
+
if self.out_f.suffix == ".gif":
|
|
1072
|
+
# Quote from https://www.w3.org/Graphics/GIF/spec-gif89a.txt
|
|
1073
|
+
# vii) Delay Time - If not 0, this field specifies
|
|
1074
|
+
# the number of hundredths (1/100) of a second
|
|
1075
|
+
#
|
|
1076
|
+
# For GIF, we need to adjust fps such that delay is matching to hundreths of second
|
|
1077
|
+
return self._fix_fps_duration(fps, 100)
|
|
1078
|
+
if self.out_f.suffix in (".webp", ".apng", ".png"):
|
|
1079
|
+
return self._fix_fps_duration(fps, 1000)
|
|
1080
|
+
|
|
1081
|
+
return self._fix_fps_pyav(fps)
|
|
1082
|
+
|
|
1083
|
+
def _fix_fps_duration(self, fps: float, denominator: int) -> Fraction:
|
|
1084
|
+
delay = int(rounding(denominator / fps))
|
|
1085
|
+
fps_fraction = Fraction(denominator, delay)
|
|
1086
|
+
if self.opt_comp.fps_max and fps_fraction > self.opt_comp.fps_max:
|
|
1087
|
+
return Fraction(denominator, (delay + 1))
|
|
1088
|
+
if self.opt_comp.fps_min and fps_fraction < self.opt_comp.fps_min:
|
|
1089
|
+
return Fraction(denominator, (delay - 1))
|
|
1090
|
+
return fps_fraction
|
|
1091
|
+
|
|
1092
|
+
def _fix_fps_pyav(self, fps: float) -> Fraction:
|
|
1093
|
+
return Fraction(rounding(fps))
|