kagefunc 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3 @@
1
+ .coverage
2
+ __pycache__/
3
+ htmlcov/
kagefunc-1.0.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2017 kageru
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,50 @@
1
+ Metadata-Version: 2.4
2
+ Name: kagefunc
3
+ Version: 1.0.0
4
+ Summary: kageru's collection of VapourSynth functions
5
+ Project-URL: Homepage, https://github.com/kageru/kagefunc
6
+ License: MIT License
7
+
8
+ Copyright (c) 2017 kageru
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ License-File: LICENSE
28
+ Requires-Python: >=3.8
29
+ Requires-Dist: vapoursynth
30
+ Requires-Dist: vapoursynth-adaptivegrain
31
+ Requires-Dist: vsutil
32
+ Description-Content-Type: text/markdown
33
+
34
+ # kagefunc
35
+ kageru's (that's me) Vapoursynth functions\
36
+ Functions outside of `kagefunc.py` are currently in development and will probably not even run.\
37
+ Documentation/explanation for each function is given in the docstring.\
38
+ `tests.py` contains the tests and is mostly used by me for development or if you want to test the functions locally.
39
+
40
+ ## Installation
41
+
42
+
43
+ A list of dependencies can be found [in the AUR](https://aur.archlinux.org/packages/vapoursynth-plugin-kagefunc-git).
44
+ If you’re on Arch Linux, [the AUR](https://aur.archlinux.org/packages/vapoursynth-plugin-kagefunc-git) is also the recommended method of installation. If you’re not on Arch, you can also install this package via pip
45
+
46
+ ```
47
+ pip install kagefunc
48
+ ```
49
+
50
+ However, not all of the dependencies needed are also on PyPI, so if you’re not using the AUR, you will have to find them manually.
@@ -0,0 +1,17 @@
1
+ # kagefunc
2
+ kageru's (that's me) Vapoursynth functions\
3
+ Functions outside of `kagefunc.py` are currently in development and will probably not even run.\
4
+ Documentation/explanation for each function is given in the docstring.\
5
+ `tests.py` contains the tests and is mostly used by me for development or if you want to test the functions locally.
6
+
7
+ ## Installation
8
+
9
+
10
+ A list of dependencies can be found [in the AUR](https://aur.archlinux.org/packages/vapoursynth-plugin-kagefunc-git).
11
+ If you’re on Arch Linux, [the AUR](https://aur.archlinux.org/packages/vapoursynth-plugin-kagefunc-git) is also the recommended method of installation. If you’re not on Arch, you can also install this package via pip
12
+
13
+ ```
14
+ pip install kagefunc
15
+ ```
16
+
17
+ However, not all of the dependencies needed are also on PyPI, so if you’re not using the AUR, you will have to find them manually.
@@ -0,0 +1,391 @@
1
+ """
2
+ kageru’s collection of vapoursynth functions.
3
+ """
4
+ from functools import partial
5
+ from vsutil import *
6
+ import vapoursynth as vs
7
+ import mvsfunc as mvf
8
+ import fvsfunc as fvf
9
+
10
+ core = vs.core
11
+
12
+ __all__ = [
13
+ 'inverse_scale',
14
+ 'mask_descale',
15
+ 'generate_keyframes',
16
+ 'adaptive_grain',
17
+ 'conditional_resize',
18
+ 'squaremask',
19
+ 'retinex_edgemask',
20
+ 'kirsch',
21
+ 'get_descale_filter',
22
+ 'hardsubmask',
23
+ 'hardsubmask_fades',
24
+ 'crossfade',
25
+ 'hybriddenoise',
26
+ ]
27
+
28
+ def inverse_scale(
29
+ source: vs.VideoNode,
30
+ width: int = None,
31
+ height: int = 0,
32
+ kernel: str = 'bilinear',
33
+ taps: int = 4,
34
+ b: float = 1 / 3,
35
+ c: float = 1 / 3,
36
+ mask_detail: bool = False,
37
+ descale_mask_zones: str = '',
38
+ denoise: bool = False,
39
+ bm3d_sigma: float = 1,
40
+ knl_strength: float = 0.4,
41
+ use_gpu: bool = True,
42
+ ) -> vs.VideoNode:
43
+ """
44
+ Use descale to reverse the scaling on a given input clip.
45
+ width, height, kernel, taps, a1, a2 are parameters for resizing.
46
+ descale_mask_zones can be used to only mask certain zones to improve performance; uses rfs syntax.
47
+ denoise, bm3d_sigma, knl_strength, use_gpu are parameters for denoising; denoise = False to disable
48
+ use_gpu = True -> chroma will be denoised with KNLMeansCL (faster)
49
+ """
50
+ if not height:
51
+ raise ValueError('inverse_scale: you need to specify a value for the output height')
52
+
53
+ only_luma = source.format.num_planes == 1
54
+
55
+ if get_depth(source) != 32:
56
+ source = source.resize.Point(format=source.format.replace(bits_per_sample=32, sample_type=vs.FLOAT))
57
+ width = fallback(width, getw(height, source.width / source.height))
58
+
59
+ # if we denoise luma and chroma separately, do the chroma here while it’s still 540p
60
+ if denoise and use_gpu and not only_luma:
61
+ source = core.knlm.KNLMeansCL(source, a=2, h=knl_strength, d=3, device_type='gpu', device_id=0, channels='UV')
62
+
63
+ planes = split(source)
64
+ planes[0] = _descale_luma(planes[0], width, height, kernel, taps, b, c)
65
+ if only_luma:
66
+ return planes[0]
67
+ planes = _descale_chroma(planes, width, height)
68
+
69
+ if mask_detail:
70
+ upscaled = fvf.Resize(planes[0], source.width, source.height, kernel=kernel, taps=taps, a1=b, a2=c)
71
+ planes[0] = mask_descale(get_y(source), planes[0], upscaled, zones=descale_mask_zones)
72
+ scaled = join(planes)
73
+ return mvf.BM3D(scaled, radius1=1, sigma=[bm3d_sigma, 0] if use_gpu else bm3d_sigma) if denoise else scaled
74
+
75
+
76
+ def _descale_luma(luma, width, height, kernel, taps, b, c):
77
+ return get_descale_filter(kernel, b=b, c=c, taps=taps)(luma, width, height)
78
+
79
+
80
+ def _descale_chroma(planes, width, height):
81
+ planes[1], planes[2] = [core.resize.Bicubic(plane, width, height, src_left=0.25) for plane in planes[1:]]
82
+ return planes
83
+
84
+
85
+ def mask_descale(
86
+ original: vs.VideoNode,
87
+ descaled: vs.VideoNode,
88
+ upscaled: vs.VideoNode,
89
+ threshold: float = 0.05,
90
+ zones: str = '',
91
+ debug: bool = False,
92
+ ):
93
+ downscaled = core.resize.Spline36(original, descaled.width, descaled.height)
94
+ assert get_depth(original) == get_depth(descaled), "Source and descaled clip need to have the same bitdepth"
95
+ detail_mask = _generate_descale_mask(original, descaled, upscaled, threshold)
96
+ if debug:
97
+ return detail_mask
98
+ merged = core.std.MaskedMerge(descaled, downscaled, detail_mask)
99
+ return fvf.ReplaceFrames(descaled, merged, zones) if zones else merged
100
+
101
+
102
+ def _generate_descale_mask(source, downscaled, upscaled, threshold=0.05):
103
+ mask = (
104
+ core.std.Expr([source, upscaled], 'x y - abs')
105
+ .resize.Bicubic(downscaled.width, downscaled.height)
106
+ .std.Binarize(threshold)
107
+ )
108
+ mask = iterate(mask, core.std.Maximum, 2)
109
+ return iterate(mask, core.std.Inflate, 2)
110
+
111
+
112
+ # Currently, this should fail for non mod4 subsampled input.
113
+ # Not really relevant though, as 480p, 576p, 720p, and 1080p are all mod32
114
+ def generate_keyframes(clip: vs.VideoNode, out_path: str = None, header: bool = True) -> None:
115
+ """
116
+ probably only useful for fansubbing
117
+ generates qp-filename for keyframes to simplify timing
118
+ """
119
+ import os
120
+
121
+ # Speed up the analysis by resizing first. Converting to 8 bit also seems to improve the accuracy of wwxd.
122
+ clip = core.resize.Bilinear(clip, 640, 360, format=vs.YUV420P8)
123
+ clip = core.wwxd.WWXD(clip)
124
+ out_txt = "# WWXD log file, using qpfile format\n\n" if header else ""
125
+ for i in range(1, clip.num_frames):
126
+ if clip.get_frame(i).props.Scenechange == 1:
127
+ out_txt += f"{i} I -1\n"
128
+ print(f"Progress: {i}/{clip.num_frames} frames", end="\r")
129
+ out_path = fallback(out_path, os.path.expanduser("~") + "/Desktop/keyframes.txt")
130
+ with open(out_path, "w") as text_file:
131
+ text_file.write(out_txt)
132
+
133
+
134
+ def adaptive_grain(clip: vs.VideoNode, strength=0.25, static=True, luma_scaling=12, show_mask=False) -> vs.VideoNode:
135
+ """
136
+ Generates grain based on frame and pixel brightness. Details can be found here:
137
+ https://kageru.moe/blog/article/adaptivegrain
138
+ Strength is the strength of the grain generated by AddGrain, static=True for static grain, luma_scaling
139
+ manipulates the grain alpha curve. Higher values will generate less grain (especially in brighter scenes),
140
+ while lower values will generate more grain, even in brighter scenes.
141
+ """
142
+ mask = core.adg.Mask(clip.std.PlaneStats(), luma_scaling)
143
+ grained = core.grain.Add(clip, var=strength, constant=static)
144
+ if show_mask:
145
+ return mask
146
+
147
+ return core.std.MaskedMerge(clip, grained, mask)
148
+
149
+
150
+ # TODO: implement blending zone in which both clips are merged to avoid abrupt and visible kernel changes.
151
+ # also TODO: this should extract the luma so the descale branch works on 420 input
152
+ def conditional_resize(
153
+ src: vs.VideoNode, kernel='bilinear', width=1280, height=720, thr=0.00015, debug=False
154
+ ) -> vs.VideoNode:
155
+ """
156
+ Fix oversharpened upscales by comparing a regular downscale with a blurry bicubic kernel downscale.
157
+ Similar to the avisynth function. thr is lower in vapoursynth because it's normalized (between 0 and 1)
158
+ """
159
+
160
+ def compare(n, down, oversharpened, diff_default, diff_os):
161
+ error_default = diff_default.get_frame(n).props.PlaneStatsDiff
162
+ error_os = diff_os.get_frame(n).props.PlaneStatsDiff
163
+ if debug:
164
+ debugstring = """
165
+ Error when scaling with {:s}: {:.5f}
166
+ Error when scaling with bicubic (b=0, c=1): {:.5f}
167
+ Using sharp debicubic: {:s}
168
+ """.format(
169
+ kernel, error_default, error_os, str(error_default - thr > error_os)
170
+ )
171
+ oversharpened = oversharpened.sub.Subtitle(debugstring)
172
+ down = down.sub.Subtitle(debugstring)
173
+ if error_default - thr > error_os:
174
+ return oversharpened
175
+ return down
176
+
177
+ if hasattr(core, 'descale'):
178
+ down = get_descale_filter(kernel)(src, width, height)
179
+ oversharpened = src.descale.Debicubic(width, height, b=0, c=1)
180
+ else:
181
+ down = src.fmtc.resample(width, height, kernel=kernel, invks=True)
182
+ oversharpened = src.fmtc.resample(width, height, kernel='bicubic', a1=0, a2=1, invks=True)
183
+
184
+ # we only need luma for the comparison
185
+ rescaled = get_y(down).fmtc.resample(src.width, src.height, kernel=kernel)
186
+ oversharpened_up = get_y(oversharpened).fmtc.resample(src.width, src.height, kernel='bicubic', a1=0, a2=1)
187
+
188
+ src_luma = get_y(src)
189
+ diff_default = core.std.PlaneStats(rescaled, src_luma)
190
+ diff_os = core.std.PlaneStats(oversharpened_up, src_luma)
191
+
192
+ return core.std.FrameEval(
193
+ down, partial(compare, down=down, oversharpened=oversharpened, diff_os=diff_os, diff_default=diff_default)
194
+ )
195
+
196
+
197
+ def squaremask(clip: vs.VideoNode, width: int, height: int, offset_x: int, offset_y: int) -> vs.VideoNode:
198
+ """
199
+ “There must be a better way!”
200
+ Basically a small script that draws white rectangles on a black background.
201
+ Python-only replacement for manual paint/photoshop/gimp masks, as long as these don't go beyond a simple rectangle.
202
+ Can be merged with an edgemask to only mask certain edge areas.
203
+ TL;DR: Unless you're scenefiltering, this is useless.
204
+ """
205
+ bits = get_depth(clip)
206
+ src_w = clip.width
207
+ src_h = clip.height
208
+ mask_format = clip.format.replace(color_family=vs.GRAY, subsampling_w=0, subsampling_h=0)
209
+ white = 1 if mask_format.sample_type == vs.FLOAT else (1 << bits) - 1
210
+
211
+ center = core.std.BlankClip(clip, width=width, height=height, format=mask_format, color=white, length=1)
212
+
213
+ if offset_x:
214
+ left = core.std.BlankClip(center, width=offset_x, height=height, color=0)
215
+ center = core.std.StackHorizontal([left, center])
216
+
217
+ if center.width < src_w:
218
+ right = core.std.BlankClip(center, width=src_w - center.width, height=height, color=0)
219
+ center = core.std.StackHorizontal([center, right])
220
+
221
+ if offset_y:
222
+ top = core.std.BlankClip(center, width=src_w, height=offset_y, color=0)
223
+ center = core.std.StackVertical([top, center])
224
+
225
+ if center.height < src_h:
226
+ bottom = core.std.BlankClip(center, width=src_w, height=src_h - center.height, color=0)
227
+ center = core.std.StackVertical([center, bottom])
228
+
229
+ return center * clip.num_frames
230
+
231
+
232
+ def retinex_edgemask(src: vs.VideoNode, sigma=1) -> vs.VideoNode:
233
+ """
234
+ Use retinex to greatly improve the accuracy of the edge detection in dark scenes.
235
+ sigma is the sigma of tcanny
236
+ """
237
+ luma = get_y(src)
238
+ max_value = 1 if src.format.sample_type == vs.FLOAT else (1 << get_depth(src)) - 1
239
+ ret = core.retinex.MSRCP(luma, sigma=[50, 200, 350], upper_thr=0.005)
240
+ tcanny = ret.tcanny.TCanny(mode=1, sigma=sigma).std.Minimum(coordinates=[1, 0, 1, 0, 0, 1, 0, 1])
241
+ return core.std.Expr([kirsch(luma), tcanny], f'x y + {max_value} min')
242
+
243
+
244
+ def kirsch(src: vs.VideoNode) -> vs.VideoNode:
245
+ """
246
+ Kirsch edge detection. This uses 8 directions, so it's slower but better than Sobel (4 directions).
247
+ more information: https://ddl.kageru.moe/konOJ.pdf
248
+ """
249
+ kirsch1 = src.std.Convolution(matrix=[ 5, 5, 5, -3, 0, -3, -3, -3, -3], saturate=False)
250
+ kirsch2 = src.std.Convolution(matrix=[-3, 5, 5, -3, 0, 5, -3, -3, -3], saturate=False)
251
+ kirsch3 = src.std.Convolution(matrix=[-3, -3, 5, -3, 0, 5, -3, -3, 5], saturate=False)
252
+ kirsch4 = src.std.Convolution(matrix=[-3, -3, -3, -3, 0, 5, -3, 5, 5], saturate=False)
253
+ return core.std.Expr([kirsch1, kirsch2, kirsch3, kirsch4], 'x y max z max a max')
254
+
255
+
256
+ def get_descale_filter(kernel: str, **kwargs):
257
+ """
258
+ Stolen from a declined pull request.
259
+ Originally written by @stuxcrystal on Github.
260
+ """
261
+ filters = {
262
+ 'bilinear': (lambda **kwargs: core.descale.Debilinear),
263
+ 'spline16': (lambda **kwargs: core.descale.Despline16),
264
+ 'spline36': (lambda **kwargs: core.descale.Despline36),
265
+ 'bicubic': (lambda b, c, **kwargs: partial(core.descale.Debicubic, b=b, c=c)),
266
+ 'lanczos': (lambda taps, **kwargs: partial(core.descale.Delanczos, taps=taps)),
267
+ }
268
+ return filters[kernel](**kwargs)
269
+
270
+
271
+ def hardsubmask(clip: vs.VideoNode, ref: vs.VideoNode, expand_n=None) -> vs.VideoNode:
272
+ """
273
+ Uses multiple techniques to mask the hardsubs in video streams like Anime on Demand or Wakanim.
274
+ Might (should) work for other hardsubs, too, as long as the subs are somewhat close to black/white.
275
+ It's kinda experimental, but I wanted to try something like this.
276
+ It works by finding the edge of the subtitle (where the black border and the white fill color touch),
277
+ and it grows these areas into a regular brightness + difference mask via hysteresis.
278
+ This should (in theory) reliably find all hardsubs in the image with barely any false positives (or none at all).
279
+ Output depth and processing precision are the same as the input
280
+ It is not necessary for 'clip' and 'ref' to have the same bit depth, as 'ref' will be dithered to match 'clip'
281
+ Most of this code was written by Zastin (https://github.com/Z4ST1N)
282
+ Clean code soon(tm)
283
+ """
284
+ clp_f = clip.format
285
+ bits = clp_f.bits_per_sample
286
+ stype = clp_f.sample_type
287
+
288
+ expand_n = fallback(expand_n, clip.width // 200)
289
+
290
+ fmt_args = (clp_f.color_family, vs.INTEGER, 8, clp_f.subsampling_w, clp_f.subsampling_h)
291
+ try:
292
+ yuv_fmt = core.query_video_format(*fmt_args)
293
+ except AttributeError:
294
+ yuv_fmt = core.register_format(*fmt_args)
295
+
296
+ y_range = 219 << (bits - 8) if stype == vs.INTEGER else 1
297
+ uv_range = 224 << (bits - 8) if stype == vs.INTEGER else 1
298
+ offset = 16 << (bits - 8) if stype == vs.INTEGER else 0
299
+
300
+ uv_abs = ' abs ' if stype == vs.FLOAT else ' {} - abs '.format((1 << bits) // 2)
301
+ yexpr = 'x y - abs {thr} > 255 0 ?'.format(thr=y_range * 0.7)
302
+ uvexpr = 'x {uv_abs} {thr} < y {uv_abs} {thr} < and 255 0 ?'.format(uv_abs=uv_abs, thr=uv_range * 0.8)
303
+
304
+ difexpr = 'x {upper} > x {lower} < or x y - abs {mindiff} > and 255 0 ?'.format(
305
+ upper=y_range * 0.8 + offset, lower=y_range * 0.2 + offset, mindiff=y_range * 0.1
306
+ )
307
+
308
+ # right shift by 4 pixels.
309
+ # fmtc uses at least 16 bit internally, so it's slower for 8 bit,
310
+ # but its behaviour when shifting/replicating edge pixels makes it faster otherwise
311
+ if bits < 16:
312
+ right = core.resize.Point(clip, src_left=4)
313
+ else:
314
+ right = core.fmtc.resample(clip, sx=4, flt=False)
315
+ subedge = core.std.Expr([clip, right], [yexpr, uvexpr], yuv_fmt.id)
316
+ c444 = split(subedge.resize.Bicubic(format=vs.YUV444P8, filter_param_a=0, filter_param_b=0.5))
317
+ subedge = core.std.Expr(c444, 'x y z min min')
318
+
319
+ clip, ref = get_y(clip), get_y(ref)
320
+ ref = ref if clip.format == ref.format else depth(ref, bits)
321
+
322
+ clips = [clip.std.Convolution([1] * 9), ref.std.Convolution([1] * 9)]
323
+ diff = core.std.Expr(clips, difexpr, vs.GRAY8).std.Maximum().std.Maximum()
324
+
325
+ mask = core.misc.Hysteresis(subedge, diff)
326
+ mask = iterate(mask, core.std.Maximum, expand_n)
327
+ mask = mask.std.Inflate().std.Inflate().std.Convolution([1] * 9)
328
+ return depth(mask, bits, range=1, range_in=1)
329
+
330
+
331
+ def hardsubmask_fades(clip, ref, expand_n=8, highpass=5000):
332
+ """
333
+ Uses Sobel edge detection to find edges that are only present in the main clip.
334
+ These should (theoretically) be the subtitles.
335
+ The video is blurred beforehand to prevent compression artifacts from being recognized as subtitles.
336
+ This may create more false positives than the other hardsubmask,
337
+ but it is capable of finding subtitles of any color and subtitles during fadein/fadeout.
338
+ Setting highpass to a lower value may catch very slight changes (e.g. the last frame of a low-contrast fade),
339
+ but it will make the mask more susceptible to artifacts.
340
+ """
341
+ clip = core.fmtc.bitdepth(clip, bits=16).std.Convolution([1] * 9)
342
+ ref = core.fmtc.bitdepth(ref, bits=16).std.Convolution([1] * 9)
343
+ clipedge = get_y(clip).std.Sobel()
344
+ refedge = get_y(ref).std.Sobel()
345
+ mask = core.std.Expr([clipedge, refedge], 'x y - {} < 0 65535 ?'.format(highpass)).std.Median()
346
+ mask = iterate(mask, core.std.Maximum, expand_n)
347
+ return iterate(mask, core.std.Inflate, 4)
348
+
349
+
350
+ def crossfade(clipa, clipb, duration):
351
+ """
352
+ Crossfade clipa into clipb. Duration is the length of the blending zone.
353
+ For example, crossfade(a, b, 100) will fade the last 100 frames of a into b.
354
+ """
355
+
356
+ def fade_image(n, clipa, clipb):
357
+ return core.std.Merge(clipa, clipb, weight=n / clipa.num_frames)
358
+
359
+ if clipa.format.id != clipb.format.id or clipa.height != clipb.height or clipa.width != clipb.width:
360
+ raise ValueError('Crossfade: Both clips must have the same dimensions and format.')
361
+ fade = core.std.FrameEval(
362
+ core.std.BlankClip(clipa, length=duration + 1),
363
+ partial(fade_image, clipa=clipa[-duration - 1 :], clipb=clipb[:duration]),
364
+ )
365
+ return clipa[:-duration] + fade[1:] + clipb[duration:]
366
+
367
+
368
+ def hybriddenoise(src, knl=0.5, sigma=2, radius1=1):
369
+ """
370
+ denoise luma with BM3D (CPU-based) and chroma with KNLMeansCL (GPU-based)
371
+ sigma = luma denoise strength
372
+ knl = chroma denoise strength. The algorithm is different, so this value is different from sigma
373
+ BM3D's sigma default is 5, KNL's is 1.2, to give you an idea of the order of magnitude
374
+ radius1 = temporal radius of luma denoising, 0 for purely spatial denoising
375
+ """
376
+ y = get_y(src)
377
+ y = mvf.BM3D(y, radius1=radius1, sigma=sigma)
378
+ denoised = core.knlm.KNLMeansCL(src, a=2, h=knl, d=3, device_type='gpu', device_id=0, channels='UV')
379
+ return core.std.ShufflePlanes([y, denoised], planes=[0, 1, 2], colorfamily=vs.YUV)
380
+
381
+
382
+ # helpers
383
+
384
+
385
+ def getw(height, aspect_ratio=16 / 9, only_even=True):
386
+ """
387
+ Returns width for image.
388
+ """
389
+ width = height * aspect_ratio
390
+ width = int(round(width))
391
+ return width // 2 * 2 if only_even else width
@@ -0,0 +1,22 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "kagefunc"
7
+ version = "1.0.0"
8
+ description = "kageru's collection of VapourSynth functions"
9
+ readme = "README.md"
10
+ license = { file = "LICENSE" }
11
+ requires-python = ">=3.8"
12
+ dependencies = [
13
+ "vapoursynth",
14
+ "vsutil",
15
+ "vapoursynth-adaptivegrain",
16
+ ]
17
+
18
+ [project.urls]
19
+ Homepage = "https://github.com/kageru/kagefunc"
20
+
21
+ [tool.hatch.build.targets.wheel]
22
+ py-modules = ["kagefunc"]
@@ -0,0 +1,115 @@
1
+ """
2
+ Many of these don’t actually test the logic and just make some
3
+ basic assertions as well as a call to check if frames are produced.
4
+ """
5
+ import unittest
6
+ import vapoursynth as vs
7
+ import kagefunc as kgf
8
+
9
+
10
+ class KagefuncTests(unittest.TestCase):
11
+ BLACK_SAMPLE_CLIP = vs.core.std.BlankClip(
12
+ format=vs.YUV420P8, width=160, height=120, color=[0, 128, 128], length=100
13
+ )
14
+ WHITE_SAMPLE_CLIP = vs.core.std.BlankClip(
15
+ format=vs.YUV420P8, width=160, height=120, color=[255, 128, 128], length=100
16
+ )
17
+ GREYSCALE_SAMPLE_CLIP = vs.core.std.BlankClip(format=vs.GRAY8, width=160, height=120, color=[255])
18
+
19
+ def test_retinex_edgemask(self):
20
+ mask = kgf.retinex_edgemask(self.BLACK_SAMPLE_CLIP)
21
+ self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
22
+ self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
23
+ self.assertEqual(mask.format.color_family, vs.GRAY)
24
+ # request a frame to see if that errors
25
+ mask.get_frame(0)
26
+
27
+ def test_inverse_scale(self):
28
+ src = self.BLACK_SAMPLE_CLIP
29
+ resized = kgf.inverse_scale(self.GREYSCALE_SAMPLE_CLIP, height=90)
30
+ self.assertEqual(resized.format.id, vs.GRAYS)
31
+ self.assertEqual(resized.height, 90)
32
+ self.assertEqual(resized.width, 120)
33
+ resized = kgf.inverse_scale(src, height=90)
34
+ self.assertEqual(resized.format.id, vs.YUV444PS)
35
+ self.assertEqual(resized.height, 90)
36
+ self.assertEqual(resized.width, 120)
37
+ self.assert_runs(kgf.inverse_scale(src, height=90, denoise=True))
38
+ self.assert_runs(kgf.inverse_scale(src, height=90, mask_detail=True))
39
+ self.assert_runs(kgf.inverse_scale(src, height=90, descale_mask_zones='[0, 4]', mask_detail=True))
40
+ resized.get_frame(0)
41
+
42
+ def test_squaremask(self):
43
+ mask = kgf.squaremask(self.BLACK_SAMPLE_CLIP, 30, 30, 20, 0)
44
+ self.assert_same_length(mask, self.BLACK_SAMPLE_CLIP)
45
+ self.assert_same_bitdepth(mask, self.BLACK_SAMPLE_CLIP)
46
+ self.assert_same_dimensions(mask, self.BLACK_SAMPLE_CLIP)
47
+ mask.get_frame(0)
48
+
49
+ def test_crossfade(self):
50
+ faded = kgf.crossfade(self.WHITE_SAMPLE_CLIP, self.BLACK_SAMPLE_CLIP, 50)
51
+ self.assertEqual(len(faded), len(self.WHITE_SAMPLE_CLIP) + len(self.BLACK_SAMPLE_CLIP) - 50)
52
+
53
+ faded = kgf.crossfade(self.WHITE_SAMPLE_CLIP, self.BLACK_SAMPLE_CLIP, 1)
54
+ # this isn’t exactly 0.5 because… reasons?
55
+ self.assertLess(
56
+ abs(faded.std.PlaneStats().get_frame(len(self.WHITE_SAMPLE_CLIP) - 1).props.PlaneStatsAverage - 0.5), 0.02
57
+ )
58
+ self.assertEqual(len(faded), len(self.WHITE_SAMPLE_CLIP) + len(self.BLACK_SAMPLE_CLIP) - 1)
59
+
60
+ def test_adaptive_grain(self):
61
+ grained = kgf.adaptive_grain(self.BLACK_SAMPLE_CLIP)
62
+ self.assert_same_metadata(grained, self.BLACK_SAMPLE_CLIP)
63
+ grained.get_frame(0)
64
+
65
+ @staticmethod
66
+ def assert_runs(clip: vs.VideoNode):
67
+ clip.get_frame(0)
68
+
69
+ def assert_same_dimensions(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
70
+ """
71
+ Assert that two clips have the same width and height.
72
+ """
73
+ self.assertEqual(clip_a.height, clip_b.height, 'Same height expected, was {clip_a.height} and {clip_b.height}')
74
+ self.assertEqual(clip_a.width, clip_b.width, 'Same width expected, was {clip_a.width} and {clip_b.width}')
75
+
76
+ def assert_same_format(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
77
+ """
78
+ Assert that two clips have the same format (but not necessarily size).
79
+ """
80
+ self.assertEqual(clip_a.format.id, clip_b.format.id, 'Same format expected')
81
+
82
+ def assert_same_bitdepth(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
83
+ """
84
+ Assert that two clips have the same number of bits per sample.
85
+ """
86
+ self.assertEqual(
87
+ clip_a.format.bits_per_sample,
88
+ clip_b.format.bits_per_sample,
89
+ 'Same depth expected, was {clip_a.format.bits_per_sample} and {clip_b.format.bits_per_sample}',
90
+ )
91
+
92
+ def assert_same_length(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
93
+ self.assertEqual(
94
+ len(clip_a), len(clip_b), 'Same number of frames expected, was {len(clip_a)} and {len(clip_b)}.'
95
+ )
96
+
97
+ def assert_same_metadata(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
98
+ """
99
+ Assert that two clips have the same height and width, format, depth, and length.
100
+ """
101
+ self.assert_same_format(clip_a, clip_b)
102
+ self.assert_same_dimensions(clip_a, clip_b)
103
+ self.assert_same_length(clip_a, clip_b)
104
+
105
+ def assert_same_frame(self, clip_a: vs.VideoNode, clip_b: vs.VideoNode):
106
+ """
107
+ Assert that two frames are identical. Only the first frame of the arguments is used.
108
+ """
109
+ diff = vs.core.std.PlaneStats(clip_a, clip_b)
110
+ frame = diff.get_frame(0)
111
+ self.assertEqual(frame.props.PlaneStatsDiff, 0)
112
+
113
+
114
+ if __name__ == '__main__':
115
+ unittest.main()