vsdirty 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vsdirty/__init__.py ADDED
@@ -0,0 +1,8 @@
1
+ """
2
+ Tools made by PingWer74 and Man3500 for adaptive denoise, masks and other stuff.
3
+ """
4
+
5
+ from .adfunc import *
6
+ from .admask import *
7
+ from .adutils import *
8
+ from .dirtyfixer import *
vsdirty/adfunc.py ADDED
@@ -0,0 +1,554 @@
1
+ import vapoursynth as vs
2
+
3
+ from typing import Optional
4
+ from vstools import PlanesT
5
+
6
+ core = vs.core
7
+
8
+ if not (hasattr(core, 'fmtc') or hasattr(core, 'akarin')):
9
+ raise ImportError("'fmtc' and 'akarin' are mandatory. Make sure the DLLs are present in the plugins folder.")
10
+
11
+ def mini_BM3D(
12
+ clip: vs.VideoNode,
13
+ profile: str = "LC",
14
+ accel: Optional[str] = None,
15
+ planes: PlanesT = [0, 1, 2],
16
+ ref: Optional[vs.VideoNode] = None,
17
+ dither: Optional[str] = "error_diffusion",
18
+ fast: Optional[bool] = False,
19
+ **kwargs
20
+ ) -> vs.VideoNode:
21
+ """
22
+ BM3D mini wrapper.
23
+
24
+ :param clip: Clip to process (32bit, if not will be internally converted in 32bit).
25
+ :param profile: Precision. Accepted values: "FAST", "LC", "HIGH".
26
+ :param accel: Choose the hardware acceleration. Accepted values: "cuda_rtc", "cuda", "hip", "cpu", "auto".
27
+ :param planes: Which planes to process. Defaults to all planes.
28
+ :param ref: Reference clip for BM3D (32bit, if not will be internally converted in 32bit).
29
+ :param dither: Dithering method for the output clip. If None, no dithering is applied.
30
+ :param fast: Use CPU+GPU, adds overhead.
31
+ :param kwargs: Accepts BM3DCUDA arguments, https://github.com/WolframRhodium/VapourSynth-BM3DCUDA.
32
+ :return: Denoised clip.
33
+ """
34
+ from vstools import depth
35
+ from .adutils import plane
36
+
37
+ def _bm3d (
38
+ clip: vs.VideoNode,
39
+ accel: Optional[str] = "AUTO",
40
+ ref: Optional[vs.VideoNode] = None,
41
+ **kwargs
42
+ ) -> vs.VideoNode:
43
+ accel_u = accel.upper() if accel is not None else "AUTO"
44
+
45
+ if accel_u not in ("AUTO", "CUDA_RTC", "CUDA", "HIP", "CPU"):
46
+ raise ValueError(f"Accel unknown: {accel}")
47
+
48
+ if accel_u in ("AUTO", "CUDA_RTC"):
49
+ try:
50
+ return core.bm3dcuda_rtc.BM3Dv2(clip, ref, **kwargs)
51
+ except Exception:
52
+ try:
53
+ return core.bm3dhip.BM3Dv2(clip, ref, **kwargs)
54
+ except Exception:
55
+ kwargs.pop("fast", None)
56
+ kwargs.pop("ps_range", kwargs.get("ps_range")[0])
57
+ return core.bm3dcpu.BM3Dv2(clip, ref, **kwargs)
58
+ elif accel_u == "CUDA":
59
+ return core.bm3dcuda.BM3Dv2(clip, ref, **kwargs)
60
+ elif accel_u == "HIP":
61
+ return core.bm3dhip.BM3Dv2(clip, ref, **kwargs)
62
+ elif accel_u == "CPU":
63
+ kwargs.pop("fast", None)
64
+ kwargs.pop("ps_range", kwargs.get("ps_range")[0])
65
+ return core.bm3dcpu.BM3Dv2(clip, ref, **kwargs)
66
+
67
+ clipS = depth(clip, 32, dither_type="none")
68
+
69
+ if ref is not None:
70
+ refS = depth(ref, 32, dither_type="none")
71
+ else:
72
+ refS = None
73
+
74
+ profiles = {
75
+ "FAST": {
76
+ "block_step": [8, 7, 8, 7],
77
+ "bm_range": [9, 9, 7, 7],
78
+ "ps_range": [4, 5],
79
+ },
80
+ "LC": {
81
+ "block_step": [6, 5, 6, 5],
82
+ "bm_range": [9, 9, 9, 9],
83
+ "ps_range": [4, 5],
84
+ },
85
+ "HIGH": {
86
+ "block_step": [3, 2, 3, 2],
87
+ "bm_range": [16, 16, 16, 16],
88
+ "ps_range": [7, 8],
89
+ },
90
+ }
91
+
92
+ profile_u = str(profile).upper()
93
+
94
+ if profile_u not in profiles:
95
+ raise ValueError(f"mini_BM3D: Profile '{profile}' not recognized.")
96
+
97
+ params = profiles[profile_u]
98
+
99
+ kwargs = dict(
100
+ kwargs,
101
+ fast=fast,
102
+ **params
103
+ )
104
+
105
+ num_planes = clip.format.num_planes
106
+ if clip.format.color_family == vs.GRAY:
107
+ return depth(_bm3d(clipS, accel, refS, **kwargs), clip.format.bits_per_sample) if refS is None else depth(_bm3d(clipS, accel, **kwargs), clip.format.bits_per_sample)
108
+
109
+ if isinstance(planes, int):
110
+ planes = [planes]
111
+ planes = list(dict.fromkeys(int(p) for p in planes))
112
+
113
+ if clip.format.color_family == vs.RGB:
114
+ filtered_planes = [
115
+ _bm3d(plane(clipS, p), accel, **kwargs) if p in planes else plane(clipS, p)
116
+ for p in range(num_planes)
117
+ ]
118
+ dclip = core.std.ShufflePlanes(filtered_planes, planes=[0, 0, 0], colorfamily=clip.format.color_family)
119
+
120
+ elif clip.format.color_family == vs.YUV:
121
+ y = plane(clipS, 0)
122
+ u = plane(clipS, 1)
123
+ v = plane(clipS, 2)
124
+
125
+ y_ref = None
126
+ if refS is not None:
127
+ if refS.format.num_planes not in (1, 3):
128
+ raise ValueError("mini_BM3D: When providing a reference clip for YUV, it must have 1 or 3 planes.")
129
+ y_ref = plane(refS, 0)
130
+
131
+ y_denoised = _bm3d(y, accel, ref=y_ref, **kwargs) if 0 in planes else y
132
+
133
+ if 1 in planes or 2 in planes:
134
+ y_downscaled = y.resize.Spline36(u.width, u.height)
135
+
136
+ ref_444 = None
137
+ if refS is not None and refS.format.num_planes == 3:
138
+ u_ref = plane(refS, 1); v_ref = plane(refS, 2)
139
+ y_ref_downscaled = y_ref.resize.Spline36(u.width, u.height)
140
+ ref_444 = core.std.ShufflePlanes([y_ref_downscaled, u_ref, v_ref], planes=[0, 0, 0], colorfamily=clip.format.color_family)
141
+ elif refS is not None and refS.format.num_planes == 1:
142
+ ref_444 = y_ref
143
+
144
+ clip_444 = core.std.ShufflePlanes([y_downscaled, u, v], planes=[0, 0, 0], colorfamily=clip.format.color_family)
145
+ clip_444 = _bm3d(clip_444, accel, ref=ref_444, chroma=True, **kwargs) if ref_444 is not None else _bm3d(clip_444, accel, chroma=True, **kwargs)
146
+
147
+ if 1 in planes:
148
+ u = plane(clip_444, 1)
149
+ if 2 in planes:
150
+ v = plane(clip_444, 2)
151
+
152
+ dclip = core.std.ShufflePlanes([y_denoised, u, v], planes=[0, 0, 0], colorfamily=clip.format.color_family)
153
+
154
+ else:
155
+ raise ValueError("mini_BM3D: Unsupported color family.")
156
+
157
+ return depth(dclip, clip.format.bits_per_sample, dither_type=dither if dither is not None else "none")
158
+
159
+ class adenoise:
160
+ """
161
+ Preset class for _adaptive_denoiser.
162
+
163
+ Intensive Adaptive Denoise.
164
+
165
+ Three denoisers are applied: mc_degrain (luma), NLMeans/CBM3D (chroma), and BM3D (luma).
166
+ NLMeans/CBM3D uses mc_degrain as reference to remove dirt spots and scanner noise from the clip,
167
+ while mc_degrain affects only the luma, which is then passed to BM3D for a second denoising pass.
168
+ If precision = True, a series of masks are created to enhance the denoise strength on flat areas avoiding textured area.
169
+
170
+ Luma masks ensure that denoising is applied mostly to the brighter areas of the frame, preserving details in darker regions while cleaning them as much as possible.
171
+ Note: Luma masks are more sensitive to variations than the sigma value for the final result.
172
+
173
+ :param clip: Clip to process (YUV or GRAY 16bit, if not will be internally converted in 16bit).
174
+ :param thsad: Thsad for mc_degrain (luma denoise strength and chroma ref).
175
+ Recommended values: 300-800
176
+ :param tr: Temporal radius for temporal consistency across al the filter involved.
177
+ Recommended values: 2-3 (1 means no temporal denoise).
178
+ :param sigma: Sigma for BM3D (luma denoise strength).
179
+ Recommended values: 1-5.
180
+ :param luma_mask_weaken: Controls how much dark spots should be denoised. Lower values mean stronger overall denoise.
181
+ Recommended values: 0.6-0.9
182
+ :param luma_mask_thr: Threshold that determines what is considered bright and what is dark in the luma mask.
183
+ Recommended values: 0.15-0.25
184
+ :param chroma_denoise: Denoiser strength and type for chroma. NLMeans/CBM3D/ArtCNN.
185
+ Reccomended strength values: 0.5-2. If not given, 1.0 is used (or none for ArtCNN).
186
+ Accepted denoiser types: "nlm", "cbm3d", "artcnn". If not given, nlm is used.
187
+ :param precision: If True, a flat mask is created to enhance the denoise strenght on flat areas avoiding textured area (95% accuracy).
188
+ :param chroma_masking: If True, enables specific chroma masking for U/V planes.
189
+ :param show_mask: 1 = Show the first luma mask, 2 = Show the textured luma mask, 3 = Show the complete luma mask, 4 = Show the Chroma U Plane mask (if chroma_masking = True), 5 = Show the Chroma V Plane mask (if chroma_masking = True). Any other value returns the denoised clip.
190
+ :param luma_over_texture: Multiplier for the luma mask in precision mode. Lower value means more importance to textured areas, higher value means more importance to luma levels.
191
+ Accepted values: 0.0-1.0
192
+ :param kwargs_flatmask: Additional arguments for flatmask creation.
193
+ dict values (check hd_flatmask for more info):
194
+ sigma1: This value should be decided based on the details level of the clip and how much grain and noise is present. Usually 1 for really textured clip, 2-3 for a normal clip, 4-5 for a clip with strong noise or grain.
195
+ texture_strength: Texture strength for mask (0-inf). Values above 1 decrese the strength of the texture in the mask, lower values increase it. The max value is theoretical infinite, but there is no gain after some point.
196
+ edges_strength: Edges strength for mask (0-1). Basic multiplier for edges strength.
197
+
198
+ :return: 16bit denoised clip. If show_mask is 1, 2, 3, 4 or 5, returns a tuple (denoised_clip, mask).
199
+ """
200
+
201
+ @classmethod
202
+ def _adaptive_denoiser(
203
+ cls,
204
+ clip: vs.VideoNode,
205
+ thsad: int = 500,
206
+ tr: int = 2,
207
+ sigma: float = 6,
208
+ luma_mask_weaken: float = 0.75,
209
+ luma_mask_thr: float = 0.196,
210
+ chroma_denoise: float | str | tuple[float, str] = [1.0, "nlm"],
211
+ precision: bool = True,
212
+ chroma_masking: bool = False,
213
+ luma_over_texture: float = 0.4,
214
+ kwargs_flatmask: Optional[dict] = {},
215
+ **kwargs
216
+ ) -> vs.VideoNode:
217
+
218
+ from vstools import depth
219
+ from vsdenoise import Prefilter, mc_degrain, nl_means, MVTools, SearchMode, MotionMode, SADMode, MVTools, SADMode, MotionMode
220
+ from .admask import luma_mask_ping, luma_mask_man, hd_flatmask
221
+ from .adutils import plane
222
+
223
+ core = vs.core
224
+
225
+ if clip.format.color_family not in {vs.YUV, vs.GRAY}:
226
+ raise ValueError('adaptive_denoiser: only YUV and GRAY formats are supported')
227
+
228
+ clip = depth(clip, 16, dither_type="none")
229
+
230
+ lumamask = luma_mask_ping(clip, thr=luma_mask_thr)
231
+ darken_luma_mask = core.akarin.Expr([lumamask], f"x {luma_mask_weaken} *")
232
+
233
+ # Degrain
234
+ if "is_digital" not in kwargs:
235
+ # Mvtool initialization
236
+ mvtools = MVTools(clip)
237
+ vectors = mvtools.analyze(blksize=16, tr=tr, overlap=8, lsad=300, search=SearchMode.UMH, truemotion=MotionMode.SAD, dct=SADMode.MIXED_SATD_DCT)
238
+ mfilter = mini_BM3D(clip, sigma=sigma*1.25, radius=tr, profile="LC", planes=0)
239
+ degrain = mc_degrain(clip, prefilter=Prefilter.DFTTEST, blksize=8, mfilter=mfilter, thsad=thsad, vectors=vectors, tr=tr, limit=1)
240
+ else:
241
+ degrain = clip
242
+
243
+ if precision:
244
+ flatmask_defaults = {
245
+ "sigma1": 3,
246
+ "texture_strength": 2,
247
+ "edges_strength": 0.05
248
+ }
249
+ flatmask = hd_flatmask(degrain, **(flatmask_defaults | kwargs_flatmask))
250
+
251
+ if luma_over_texture > 1.0:
252
+ raise ValueError("luma_over_texture must be less than 1")
253
+ elif luma_over_texture < 0.0:
254
+ raise ValueError("luma_over_texture must be greater than 0")
255
+ elif luma_over_texture == 1:
256
+ raise ValueError("don't use precision mode if luma_over_texture is 1")
257
+
258
+ darken_luma_mask = core.akarin.Expr([darken_luma_mask, flatmask], f"x {luma_over_texture} * y {abs(luma_over_texture-1)} * +")
259
+
260
+ denoised = mini_BM3D(plane(degrain, 0), sigma=sigma, radius=tr, profile="HIGH")
261
+ y_denoised = core.std.MaskedMerge(denoised, plane(clip, 0), darken_luma_mask) #denoise applied to darker areas
262
+
263
+ if clip.format.color_family == vs.GRAY:
264
+ return y_denoised
265
+
266
+ # Chroma denoise
267
+ if isinstance(chroma_denoise, str):
268
+ chroma_denoise = [1.0, chroma_denoise]
269
+ if (isinstance(chroma_denoise, float) and chroma_denoise <= 0) or (isinstance(chroma_denoise, tuple) and chroma_denoise[0] <= 0):
270
+ chroma_denoised = clip
271
+ else:
272
+ if isinstance(chroma_denoise, float):
273
+ chroma_denoised = nl_means(clip, h=chroma_denoise, tr=tr, ref=degrain, planes=[1,2])
274
+ elif "nlm" in chroma_denoise:
275
+ chroma_denoised = nl_means(clip, h=chroma_denoise[0], tr=tr, ref=degrain, planes=[1,2])
276
+ elif "cbm3d" in chroma_denoise:
277
+ chroma_denoised = mini_BM3D(clip, sigma=chroma_denoise[0], radius=tr, ref=degrain, planes=[1,2])
278
+ elif "artcnn" in chroma_denoise:
279
+ from vsscale import ArtCNN
280
+ chroma_denoised = ArtCNN.R8F64_JPEG420().scale(clip)
281
+
282
+ if chroma_masking:
283
+ u=plane(clip, 1)
284
+ u_mask= luma_mask_man(u, t=1.5, s=2, a=0)
285
+ u_denoised = core.std.MaskedMerge(u, plane(chroma_denoised, 1), u_mask)
286
+ v=plane(clip, 2)
287
+ v_mask= luma_mask_man(v, t=1.5, s=2, a=0)
288
+ v_denoised = core.std.MaskedMerge(v, plane(chroma_denoised, 2), v_mask)
289
+ return core.std.ShufflePlanes(clips=[chroma_denoised, u_denoised, v_denoised], planes=[0,0,0], colorfamily=vs.YUV)
290
+
291
+ return core.std.ShufflePlanes(clips=[y_denoised, chroma_denoised, chroma_denoised], planes=[0,1,2], colorfamily=vs.YUV)
292
+
293
+ @classmethod
294
+ def _adaptive_denoiser_tuple(
295
+ cls,
296
+ clip: vs.VideoNode,
297
+ thsad: int = 500,
298
+ tr: int = 2,
299
+ sigma: float = 6,
300
+ luma_mask_weaken: float = 0.75,
301
+ luma_mask_thr: float = 0.196,
302
+ chroma_denoise: float | str | tuple[float, str] = [1.0, "nlm"],
303
+ precision: bool = True,
304
+ chroma_masking: bool = False,
305
+ show_mask: int = 0,
306
+ luma_over_texture: float = 0.4,
307
+ kwargs_flatmask: Optional[dict] = {},
308
+ **kwargs
309
+ ) -> tuple[vs.VideoNode, vs.VideoNode]:
310
+
311
+ from vstools import depth
312
+ from vsdenoise import Prefilter, mc_degrain, nl_means, MVTools, SearchMode, MotionMode, SADMode, MVTools, SADMode, MotionMode
313
+ from .admask import luma_mask_ping, luma_mask_man, hd_flatmask
314
+ from .adutils import plane
315
+
316
+ core = vs.core
317
+
318
+ selected_mask = None
319
+
320
+ if clip.format.color_family not in {vs.YUV, vs.GRAY}:
321
+ raise ValueError('adaptive_denoiser: only YUV and GRAY formats are supported')
322
+
323
+ clip = depth(clip, 16, dither_type="none")
324
+
325
+ lumamask = luma_mask_ping(clip, thr=luma_mask_thr)
326
+ darken_luma_mask = core.akarin.Expr([lumamask], f"x {luma_mask_weaken} *")
327
+
328
+ if show_mask == 1:
329
+ selected_mask = darken_luma_mask
330
+
331
+ #Degrain
332
+ if "is_digital" not in kwargs:
333
+ mvtools = MVTools(clip)
334
+ vectors = mvtools.analyze(blksize=16, tr=tr, overlap=8, lsad=300, search=SearchMode.UMH, truemotion=MotionMode.SAD, dct=SADMode.MIXED_SATD_DCT)
335
+ mfilter = mini_BM3D(clip, sigma=sigma*1.25, radius=tr, profile="LC", planes=0)
336
+ degrain = mc_degrain(clip, prefilter=Prefilter.DFTTEST, blksize=8, mfilter=mfilter, thsad=thsad, vectors=vectors, tr=tr, limit=1)
337
+ else:
338
+ degrain = clip
339
+
340
+ if precision:
341
+ flatmask_defaults = {
342
+ "sigma1": 3,
343
+ "texture_strength": 2,
344
+ "edges_strength": 0.05
345
+ }
346
+ flatmask = hd_flatmask(degrain, **(flatmask_defaults | kwargs_flatmask))
347
+ if show_mask == 2:
348
+ selected_mask = flatmask
349
+
350
+ if luma_over_texture > 1.0:
351
+ raise ValueError("luma_over_texture must be less than 1")
352
+ elif luma_over_texture < 0.0:
353
+ raise ValueError("luma_over_texture must be greater than 0")
354
+ elif luma_over_texture == 1:
355
+ raise ValueError("don't use precision mode if luma_over_texture is 1")
356
+ darken_luma_mask = core.akarin.Expr([darken_luma_mask, flatmask], f"x {luma_over_texture} * y {abs(luma_over_texture-1)} * +")
357
+
358
+ if show_mask == 3:
359
+ selected_mask = darken_luma_mask
360
+
361
+ denoised = mini_BM3D(plane(degrain, 0), sigma=sigma, radius=tr, profile="HIGH")
362
+ y_denoised = core.std.MaskedMerge(denoised, plane(clip, 0), darken_luma_mask) #denoise applied to darker areas
363
+
364
+ #Chroma denoise
365
+ if chroma_denoise[0] <= 0:
366
+ chroma_denoised = clip
367
+ else:
368
+ if chroma_denoise[1] == "nlm":
369
+ chroma_denoised = nl_means(clip, h=chroma_denoise[0], tr=tr, ref=degrain, planes=[1,2])
370
+ if chroma_denoise[1] == "cbm3d":
371
+ chroma_denoised = mini_BM3D(clip, sigma=chroma_denoise[0], radius=tr, ref=degrain, planes=[1,2])
372
+ if chroma_denoise[1] == "artcnn":
373
+ from vsscale import ArtCNN
374
+ chroma_denoised = ArtCNN.R8F64_JPEG420().scale(clip)
375
+
376
+ if (chroma_masking and chroma_denoise[0]>0) and clip.format.color_family == vs.YUV:
377
+ u=plane(clip, 1)
378
+ u_mask= luma_mask_man(u, t=1.5, s=2, a=0)
379
+ u_masked = core.std.MaskedMerge(u, plane(chroma_denoised, 1), u_mask)
380
+ v=plane(clip, 2)
381
+ v_mask= luma_mask_man(v, t=1.5, s=2, a=0)
382
+ v_masked = core.std.MaskedMerge(v, plane(chroma_denoised, 2), v_mask)
383
+ final = core.std.ShufflePlanes(clips=[chroma_denoised, u_masked, v_masked], planes=[0,0,0], colorfamily=vs.YUV)
384
+
385
+ if show_mask == 4:
386
+ selected_mask = v_mask
387
+ elif show_mask == 5:
388
+ selected_mask = u_mask
389
+
390
+ if clip.format.color_family == vs.GRAY:
391
+ final = y_denoised
392
+ else:
393
+ final = core.std.ShufflePlanes(clips=[y_denoised, chroma_denoised, chroma_denoised], planes=[0,1,2], colorfamily=vs.YUV)
394
+ return final, selected_mask
395
+
396
+ # Presets
397
+ @staticmethod
398
+ def scan65mm (clip: vs.VideoNode, thsad: int = 200, tr: int = 2, sigma: float = 2, luma_mask_weaken: float = 0.9, luma_mask_thr: float = 0.196, chroma_denoise: float | tuple[float, str] = [0.5, "nlm"], precision: bool = True, chroma_masking: bool = False, show_mask: int = 0, luma_over_texture: float = 0.4, kwargs_flatmask: dict = {})->vs.VideoNode:
399
+ """ changes: thsad=200, sigma=2, luma_mask_weaken=0.9, chroma_strength=0.5 """
400
+ if show_mask in [1, 2, 3, 4, 5]:
401
+ return adenoise._adaptive_denoiser_tuple(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, show_mask, luma_over_texture, kwargs_flatmask)
402
+ return adenoise._adaptive_denoiser(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, luma_over_texture, kwargs_flatmask)
403
+ @staticmethod
404
+ def scan35mm (clip: vs.VideoNode, thsad: int = 400, tr: int = 2, sigma: float = 4, luma_mask_weaken: float = 0.8, luma_mask_thr: float = 0.196, chroma_denoise: float | tuple[float, str] = [0.7, "nlm"], precision: bool = True, chroma_masking: bool = False, show_mask: int = 0, luma_over_texture: float = 0.4, kwargs_flatmask: dict = {})->vs.VideoNode:
405
+ """ changes: thsad=400, sigma=4, luma_mask_weaken=0.8, chroma_strength=0.7 """
406
+ if show_mask in [1, 2, 3, 4, 5]:
407
+ return adenoise._adaptive_denoiser_tuple(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, show_mask, luma_over_texture, kwargs_flatmask)
408
+ return adenoise._adaptive_denoiser(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, luma_over_texture, kwargs_flatmask)
409
+
410
+ @staticmethod
411
+ def scan16mm (clip: vs.VideoNode, thsad: int = 600, tr: int = 2, sigma: float = 8, luma_mask_weaken: float = 0.75, luma_mask_thr: float = 0.196, chroma_denoise: float | tuple[float, str] = [1.0, "nlm"], precision: bool = True, chroma_masking: bool = False, show_mask: int = 0, luma_over_texture: float = 0.4, kwargs_flatmask: dict = {})->vs.VideoNode:
412
+ """ changes: thsad=600, sigma=8 """
413
+ if show_mask in [1, 2, 3, 4, 5]:
414
+ return adenoise._adaptive_denoiser_tuple(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, show_mask, luma_over_texture, kwargs_flatmask)
415
+ return adenoise._adaptive_denoiser(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, luma_over_texture, kwargs_flatmask)
416
+
417
+ @staticmethod
418
+ def scan8mm (clip: vs.VideoNode, thsad: int = 800, tr: int = 2, sigma: float = 12, luma_mask_weaken: float = 0.75, luma_mask_thr: float = 0.196, chroma_denoise: float | tuple[float, str] = [1.5, "nlm"], precision: bool = True, chroma_masking: bool = False, show_mask: int = 0, luma_over_texture: float = 0.4, kwargs_flatmask: dict = {})->vs.VideoNode:
419
+ """ changes: thsad=800, sigma=12, chroma_strength=1.5 """
420
+ if show_mask in [1, 2, 3, 4, 5]:
421
+ return adenoise._adaptive_denoiser_tuple(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, show_mask, luma_over_texture, kwargs_flatmask)
422
+ return adenoise._adaptive_denoiser(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, luma_over_texture, kwargs_flatmask)
423
+
424
+ @staticmethod
425
+ def digital (clip: vs.VideoNode, thsad: int = 300, tr: int = 2, sigma: float = 3, luma_mask_weaken: float = 0.75, luma_mask_thr: float = 0.196, chroma_denoise: float | tuple[float, str] = [1.0, "nlm"], precision: bool = True, chroma_masking: bool = False, show_mask: int = 0, luma_over_texture: float = 0.0, kwargs_flatmask: dict = {})->vs.VideoNode:
426
+ """ changes: thsad=300, sigma=3, luma_over_texture=0 """
427
+ if show_mask in [1, 2, 3, 4, 5]:
428
+ return adenoise._adaptive_denoiser_tuple(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, show_mask, luma_over_texture, kwargs_flatmask, is_digital=True)
429
+ return adenoise._adaptive_denoiser(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, luma_over_texture, kwargs_flatmask, is_digital=True)
430
+
431
+ @staticmethod
432
+ def default (clip: vs.VideoNode, thsad: int = 500, tr: int = 2, sigma: float = 6, luma_mask_weaken: float = 0.75, luma_mask_thr: float = 0.196, chroma_denoise: float | tuple[float, str] = [1.0, "nlm"], precision: bool = True, chroma_masking: bool = False, show_mask: int = 0, luma_over_texture: float = 0.4, kwargs_flatmask: dict = {})->vs.VideoNode:
433
+ """ default profile """
434
+ if show_mask in [1, 2, 3, 4, 5]:
435
+ return adenoise._adaptive_denoiser_tuple(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, show_mask, luma_over_texture, kwargs_flatmask)
436
+ return adenoise._adaptive_denoiser(clip, thsad, tr, sigma, luma_mask_weaken, luma_mask_thr, chroma_denoise, precision, chroma_masking, luma_over_texture, kwargs_flatmask)
437
+ #Ported from fvsfunc
438
+ def auto_deblock(
439
+ clip: vs.VideoNode,
440
+ sigma: int = 15,
441
+ tbsize: int = 1,
442
+ luma_mask_strength: float = 0.9,
443
+ pre: bool = False,
444
+ mask_type: int = 0,
445
+ planes: PlanesT = None
446
+ ) -> vs.VideoNode:
447
+ """
448
+ Deblocker 8x8 and other.
449
+
450
+ :param clip: Clip to process (YUV 16bit, if not will be internally converted in 16bit).
451
+ :param sigma: Sigma value for dfttest deblock.
452
+ :param tbsize: Length of the temporal dimension (i.e. number of frames).
453
+ :param luma_mask_strength: Mask strength multiplier. Lower values mean stronger overall deblock.
454
+ :param pre: If True, applies a preliminary deblocking with vsdenoise.deblock_qed.
455
+ :param mask_type: Mask type to use.
456
+ :param planes: Which planes to process. Defaults to all planes.
457
+ """
458
+
459
+ from .admask import luma_mask_ping, luma_mask_man, luma_mask
460
+ from vsdenoise import deblock_qed
461
+ from vstools import depth
462
+ from dfttest2 import DFTTest
463
+
464
+ if clip.format.color_family not in [vs.YUV]:
465
+ raise TypeError("AutoDeblock: clip must be YUV color family!")
466
+
467
+ clip = depth(clip, 16, dither_type="none")
468
+
469
+ if pre:
470
+ clip = deblock_qed(clip, planes=planes)
471
+
472
+ deblock = DFTTest(clip, sigma=sigma, tbsize=tbsize, planes=planes)
473
+
474
+ if (mask_type == 0):
475
+ lumamask = luma_mask(clip)
476
+ elif (mask_type == 1):
477
+ lumamask = luma_mask_man(clip)
478
+ else:
479
+ lumamask = luma_mask_ping(clip)
480
+ darken_luma_mask = core.std.Expr([lumamask], f"x {luma_mask_strength} *")
481
+ final = core.std.MaskedMerge(deblock, clip, darken_luma_mask, planes=planes)
482
+
483
+ return final
484
+
485
+ def msaa2x(
486
+ clip: vs.VideoNode,
487
+ ref: Optional[vs.VideoNode] = None,
488
+ mask: bool = False,
489
+ sigma: float = 2,
490
+ thr: float = None,
491
+ planes: PlanesT = 0,
492
+ **kwargs
493
+ ) -> vs.VideoNode:
494
+ """
495
+ Upscales only the edges with AI (ArtCNN DN) and downscales them.
496
+
497
+ :param clip: Clip to process (YUV or Grayscale).
498
+ :param ref: Reference clip used to create the edgemask (should be the original not filtered clip). If None, clip will be used and will be denoised with adenoise.digital to prevent edge detail loss, but remove grain and noise.
499
+ :param mask: If True will return the mask used.
500
+ :param sigma: Sigma used for edge fixing during antialiasing (remove dirty spots and blocking) only if ref is None.
501
+ :param thr: Threshold used for Binarize the clip, only 0-1 value area allowed. If None, no Binarize will be applied.
502
+ :param planes: Which planes to process. Defaults to Y.
503
+ :param kwargs: Accepts advanced_edgemask arguments.
504
+ """
505
+ from vsscale import ArtCNN
506
+ from vstools import depth
507
+ from .admask import advanced_edgemask
508
+ from .adutils import scale_binary_value, plane
509
+
510
+ if isinstance(planes, int):
511
+ planes = [planes]
512
+ if clip.format.color_family == vs.GRAY:
513
+ planes = [0]
514
+
515
+ if clip.format.color_family == vs.RGB:
516
+ raise ValueError("msaa2x: clip must be YUV or Gray color family!")
517
+
518
+ clip = depth(clip, 16, dither_type="none")
519
+
520
+ if ref is None:
521
+ ref = adenoise.digital(clip, sigma=sigma, precision=False, chroma_denoise=[(0 if (1 in planes or 2 in planes) else 1), "cbm3d"])
522
+
523
+ if len(planes) == 1:
524
+ edgemask = advanced_edgemask(plane(ref, planes[0]), **kwargs)
525
+ else:
526
+ masks = [
527
+ advanced_edgemask(plane(ref, p), **kwargs) if p in planes else plane(ref, p).std.BlankClip()
528
+ for p in range(3)
529
+ ]
530
+ edgemask = core.std.ShufflePlanes(masks, planes=[0, 0, 0], colorfamily=ref.format.color_family)
531
+
532
+ if thr is not None and thr != 0:
533
+ edgemask = edgemask.std.Binarize(threshold=scale_binary_value(edgemask, thr, return_int=True))
534
+ if mask:
535
+ return edgemask
536
+
537
+ upscaled = ArtCNN.C4F32_DN().scale(clip, clip.width*2, clip.height*2)
538
+ downscaled = core.resize.Bicubic(upscaled, clip.width, clip.height)
539
+ aa = core.std.MaskedMerge(clip, downscaled, edgemask, planes=0)
540
+
541
+ if 1 in planes or 2 in planes:
542
+ lefted = aa.resize.Spline36(src_left=-0.5)
543
+ aa = core.std.ShufflePlanes([aa, lefted, lefted], planes=[0,1,2], colorfamily=clip.format.color_family)
544
+ aa = ArtCNN.R8F64_Chroma().scale(aa)
545
+ chroma_downscaled = core.resize.Bicubic(aa, clip.width/2, clip.height/2)
546
+ u = plane(chroma_downscaled, 1)
547
+ v = plane(chroma_downscaled, 2)
548
+ if 0 not in planes:
549
+ downscaled = clip
550
+ all_downscaled = core.std.ShufflePlanes([downscaled, u, v], planes=[0,0,0], colorfamily=clip.format.color_family)
551
+ aa = core.std.MaskedMerge(clip, all_downscaled, edgemask, planes=planes)
552
+
553
+ return aa
554
+