soifunc 0.11.3__tar.gz → 0.14.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {soifunc-0.11.3 → soifunc-0.14.0}/PKG-INFO +5 -3
- {soifunc-0.11.3 → soifunc-0.14.0}/pyproject.toml +4 -4
- soifunc-0.14.0/soifunc/deband.py +64 -0
- {soifunc-0.11.3 → soifunc-0.14.0}/soifunc/denoise.py +20 -12
- soifunc-0.14.0/soifunc/interpolate.py +245 -0
- soifunc-0.14.0/soifunc/resize.py +72 -0
- soifunc-0.11.3/soifunc/deband.py +0 -46
- soifunc-0.11.3/soifunc/interpolate.py +0 -134
- soifunc-0.11.3/soifunc/resize.py +0 -112
- {soifunc-0.11.3 → soifunc-0.14.0}/LICENSE +0 -0
- {soifunc-0.11.3 → soifunc-0.14.0}/README.md +0 -0
- {soifunc-0.11.3 → soifunc-0.14.0}/soifunc/__init__.py +0 -0
- {soifunc-0.11.3 → soifunc-0.14.0}/soifunc/py.typed +0 -0
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: soifunc
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.14.0
|
|
4
4
|
Summary: Soichiro's VapourSynth Functions Collection
|
|
5
5
|
License: MIT
|
|
6
|
+
License-File: LICENSE
|
|
6
7
|
Author: Josh Holmer
|
|
7
8
|
Author-email: jholmer.in@gmail.com
|
|
8
9
|
Requires-Python: >=3.12,<4.0
|
|
@@ -10,8 +11,9 @@ Classifier: License :: OSI Approved :: MIT License
|
|
|
10
11
|
Classifier: Programming Language :: Python :: 3
|
|
11
12
|
Classifier: Programming Language :: Python :: 3.12
|
|
12
13
|
Classifier: Programming Language :: Python :: 3.13
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
13
15
|
Requires-Dist: vapoursynth (>=68)
|
|
14
|
-
Requires-Dist: vsjetpack (>=0.
|
|
16
|
+
Requires-Dist: vsjetpack (>=1.0.0,<2.0.0)
|
|
15
17
|
Description-Content-Type: text/markdown
|
|
16
18
|
|
|
17
19
|
## soifunc
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "soifunc"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.14.0"
|
|
4
4
|
description = "Soichiro's VapourSynth Functions Collection"
|
|
5
5
|
authors = ["Josh Holmer <jholmer.in@gmail.com>"]
|
|
6
6
|
license = "MIT"
|
|
@@ -9,11 +9,11 @@ readme = "README.md"
|
|
|
9
9
|
[tool.poetry.dependencies]
|
|
10
10
|
python = ">=3.12,<4.0"
|
|
11
11
|
vapoursynth = ">=68"
|
|
12
|
-
vsjetpack = "^0.
|
|
12
|
+
vsjetpack = "^1.0.0"
|
|
13
13
|
|
|
14
14
|
[tool.poetry.group.dev.dependencies]
|
|
15
|
-
black = "^25.
|
|
16
|
-
isort = "^
|
|
15
|
+
black = "^25.9.0"
|
|
16
|
+
isort = "^7.0.0"
|
|
17
17
|
pre-commit = "^4.2.0"
|
|
18
18
|
|
|
19
19
|
[build-system]
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from vsdeband import f3k_deband
|
|
4
|
+
from vsmasktools import dre_edgemask
|
|
5
|
+
from vstools import (
|
|
6
|
+
UnsupportedVideoFormatError,
|
|
7
|
+
VariableFormatError,
|
|
8
|
+
check_variable,
|
|
9
|
+
core,
|
|
10
|
+
vs,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"retinex_deband",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def retinex_deband(
|
|
19
|
+
clip: vs.VideoNode,
|
|
20
|
+
threshold: int,
|
|
21
|
+
showmask: bool = False,
|
|
22
|
+
) -> vs.VideoNode:
|
|
23
|
+
"""Debanding using contrast-adaptive edge masking.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
clip: Input video (8-16bit YUV required).
|
|
27
|
+
threshold: Debanding strength (0-255). Default ~16-48 recommended.
|
|
28
|
+
showmask: If True, return edge mask instead of debanded clip.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Debanded video clip or edge mask.
|
|
32
|
+
|
|
33
|
+
Note:
|
|
34
|
+
Does not add grain. Use vsdeband.AddNoise for post-denoising.
|
|
35
|
+
"""
|
|
36
|
+
if threshold < 0 or threshold > 255:
|
|
37
|
+
raise ValueError(f"threshold must be between 0-255, got {threshold}")
|
|
38
|
+
|
|
39
|
+
if not check_variable(clip, retinex_deband):
|
|
40
|
+
raise VariableFormatError("clip must have constant format and fps")
|
|
41
|
+
|
|
42
|
+
if (
|
|
43
|
+
clip.format.color_family != vs.YUV
|
|
44
|
+
or clip.format.sample_type != vs.INTEGER
|
|
45
|
+
or clip.format.bits_per_sample > 16
|
|
46
|
+
):
|
|
47
|
+
raise UnsupportedVideoFormatError(
|
|
48
|
+
retinex_deband,
|
|
49
|
+
clip.format,
|
|
50
|
+
"The format {format.name} is not supported! It must be an 8-16bit integer YUV bit format!",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
mask: vs.VideoNode = dre_edgemask.CLAHE(clip)
|
|
54
|
+
|
|
55
|
+
if showmask:
|
|
56
|
+
return mask
|
|
57
|
+
|
|
58
|
+
# The threshold value that `retinex_deband` takes is relative
|
|
59
|
+
# to 8-bit videos, but `f3kdb` changed their threshold
|
|
60
|
+
# values to be relative to 10-bit videos some time after this
|
|
61
|
+
# function was created. To keep this function compatible,
|
|
62
|
+
# we shift our threshold from 8-bit to 10-bit.
|
|
63
|
+
deband = f3k_deband(clip, thr=(threshold << 2))
|
|
64
|
+
return core.std.MaskedMerge(deband, clip, mask)
|
|
@@ -3,17 +3,18 @@ from __future__ import annotations
|
|
|
3
3
|
from typing import Callable, Optional
|
|
4
4
|
|
|
5
5
|
import vsdenoise
|
|
6
|
-
from vsdenoise import DFTTest, bm3d, mc_degrain
|
|
7
|
-
from vstools import core, vs
|
|
6
|
+
from vsdenoise import DFTTest, bm3d, mc_degrain, nl_means
|
|
7
|
+
from vstools import core, depth, get_u, get_y, join, split, vs
|
|
8
8
|
|
|
9
9
|
__all__ = ["MCDenoise", "magic_denoise", "hqbm3d", "mc_dfttest"]
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def hqbm3d(
|
|
13
13
|
clip: vs.VideoNode,
|
|
14
|
-
luma_str: float = 0.
|
|
14
|
+
luma_str: float = 0.5,
|
|
15
15
|
chroma_str: float = 0.4,
|
|
16
16
|
profile: bm3d.Profile = bm3d.Profile.FAST,
|
|
17
|
+
tr: int = 1,
|
|
17
18
|
) -> vs.VideoNode:
|
|
18
19
|
"""
|
|
19
20
|
High-quality presets for motion compensated denoising.
|
|
@@ -39,12 +40,24 @@ def hqbm3d(
|
|
|
39
40
|
),
|
|
40
41
|
planes=None,
|
|
41
42
|
)
|
|
42
|
-
|
|
43
|
-
|
|
43
|
+
[y, u, v] = split(clip)
|
|
44
|
+
[y_mv, u_mv, v_mv] = split(mv)
|
|
45
|
+
out_y = bm3d(y, sigma=luma_str, tr=tr, ref=y_mv, profile=profile)
|
|
46
|
+
if not hasattr(core, "nlm_cuda") and hasattr(core, "knlm"):
|
|
47
|
+
# the KNLMeansCL would force extra depth conversions
|
|
48
|
+
# and re-processing of the luma plane, so avoid it.
|
|
49
|
+
nlm_backend = nl_means.Backend.ISPC
|
|
50
|
+
else:
|
|
51
|
+
nlm_backend = nl_means.Backend.AUTO
|
|
52
|
+
out_u = nl_means(u, h=chroma_str, tr=tr, ref=u_mv, backend=nlm_backend)
|
|
53
|
+
out_v = nl_means(v, h=chroma_str, tr=tr, ref=v_mv, backend=nlm_backend)
|
|
54
|
+
return join(out_y, out_u, out_v, prop_src=clip)
|
|
44
55
|
|
|
45
56
|
|
|
46
57
|
def mc_dfttest(
|
|
47
|
-
clip: vs.VideoNode,
|
|
58
|
+
clip: vs.VideoNode,
|
|
59
|
+
thSAD: int = 75,
|
|
60
|
+
tr: int = 2,
|
|
48
61
|
) -> vs.VideoNode:
|
|
49
62
|
"""
|
|
50
63
|
A motion-compensated denoiser using DFTTEST.
|
|
@@ -52,19 +65,14 @@ def mc_dfttest(
|
|
|
52
65
|
Turn it up to 150 or more if you really need to nuke something.
|
|
53
66
|
It does a decent job at preserving details, but not nearly as good
|
|
54
67
|
as bm3d, so this is not recommended on clean, high-quality sources.
|
|
55
|
-
|
|
56
|
-
The `noisy` parameter did help preserve more detail on high-quality but grainy sources.
|
|
57
|
-
Currently it is deprecated, as the presets in `vsdenoise` changed,
|
|
58
|
-
but it may be un-deprecated in the future.
|
|
59
68
|
"""
|
|
60
|
-
# TODO: Do we need to tweak anything for the `noisy` param?
|
|
61
69
|
blksize = select_block_size(clip)
|
|
62
70
|
return mc_degrain(
|
|
63
71
|
clip,
|
|
64
72
|
prefilter=vsdenoise.Prefilter.DFTTEST,
|
|
65
73
|
preset=vsdenoise.MVToolsPreset.HQ_SAD,
|
|
66
74
|
thsad=thSAD,
|
|
67
|
-
tr=
|
|
75
|
+
tr=tr,
|
|
68
76
|
refine=3 if blksize > 16 else 2,
|
|
69
77
|
blksize=blksize,
|
|
70
78
|
)
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
|
|
5
|
+
import vstools
|
|
6
|
+
from vsscale import autoselect_backend
|
|
7
|
+
from vstools import vs
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from vsmlrt import backendT
|
|
11
|
+
|
|
12
|
+
__all__ = ["rate_doubler", "decimation_fixer", "replace_dupes"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def rate_doubler(
|
|
16
|
+
clip: vs.VideoNode, multi: int = 2, backend: backendT | None = None
|
|
17
|
+
) -> vs.VideoNode:
|
|
18
|
+
"""
|
|
19
|
+
A utility to scale the framerate of a video via frame interpolation.
|
|
20
|
+
|
|
21
|
+
Probably shouldn't just go spraying this everywhere,
|
|
22
|
+
it's more for fun and science than anything.
|
|
23
|
+
"""
|
|
24
|
+
import vsmlrt
|
|
25
|
+
|
|
26
|
+
width = clip.width
|
|
27
|
+
height = clip.height
|
|
28
|
+
format = clip.format
|
|
29
|
+
matrix = vstools.Matrix.from_video(clip)
|
|
30
|
+
transfer = vstools.Transfer.from_video(clip)
|
|
31
|
+
primaries = vstools.Primaries.from_video(clip)
|
|
32
|
+
clip = clip.misc.SCDetect()
|
|
33
|
+
clip = clip.resize.Bicubic(
|
|
34
|
+
format=vs.RGBS,
|
|
35
|
+
width=next_multiple_of(64, width),
|
|
36
|
+
height=next_multiple_of(64, height),
|
|
37
|
+
)
|
|
38
|
+
clip = vsmlrt.RIFE(
|
|
39
|
+
clip,
|
|
40
|
+
multi=multi,
|
|
41
|
+
model=vsmlrt.RIFEModel.v4_25_heavy,
|
|
42
|
+
# Why these defaults? Because running ML stuff on AMD on Windows sucks hard.
|
|
43
|
+
# Trial and error led me to finally find that ORT_DML works.
|
|
44
|
+
backend=(backend if backend else autoselect_backend()),
|
|
45
|
+
)
|
|
46
|
+
# TODO: Handle other chroma samplings
|
|
47
|
+
clip = clip.resize.Bicubic(
|
|
48
|
+
format=format,
|
|
49
|
+
width=width,
|
|
50
|
+
height=height,
|
|
51
|
+
matrix=matrix,
|
|
52
|
+
transfer=transfer,
|
|
53
|
+
primaries=primaries,
|
|
54
|
+
)
|
|
55
|
+
return clip
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def replace_dupes(
|
|
59
|
+
clip: vs.VideoNode,
|
|
60
|
+
max_length: int = 5,
|
|
61
|
+
backend: backendT | None = None,
|
|
62
|
+
threshold: float = 0.001,
|
|
63
|
+
) -> vs.VideoNode:
|
|
64
|
+
"""
|
|
65
|
+
Detects strings of duplicate frames in a video and replaces them
|
|
66
|
+
with interpolated frames from RIFE.
|
|
67
|
+
|
|
68
|
+
Max number of continuous duplicates to detect is determined by the `max_length` parameter.
|
|
69
|
+
`threshold` is the maximum average pixel difference (0-1 scale) to consider frames as duplicates.
|
|
70
|
+
Lower values are stricter (frames must be more similar to be considered duplicates).
|
|
71
|
+
"""
|
|
72
|
+
import vsmlrt
|
|
73
|
+
|
|
74
|
+
# Store original properties
|
|
75
|
+
width = clip.width
|
|
76
|
+
height = clip.height
|
|
77
|
+
format = clip.format
|
|
78
|
+
matrix = vstools.Matrix.from_video(clip)
|
|
79
|
+
transfer = vstools.Transfer.from_video(clip)
|
|
80
|
+
primaries = vstools.Primaries.from_video(clip)
|
|
81
|
+
|
|
82
|
+
# Compute frame differences using PlaneStats
|
|
83
|
+
# This compares each frame with the previous one
|
|
84
|
+
diff_clip = clip.std.PlaneStats(clip[0] + clip)
|
|
85
|
+
|
|
86
|
+
# Prepare clip for RIFE (convert to RGBS and resize to multiple of 64)
|
|
87
|
+
rife_clip = clip.resize.Bicubic(
|
|
88
|
+
format=vs.RGBS,
|
|
89
|
+
width=next_multiple_of(64, width),
|
|
90
|
+
height=next_multiple_of(64, height),
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Create interpolated frames using RIFE (double the framerate)
|
|
94
|
+
interpolated = vsmlrt.RIFE(
|
|
95
|
+
rife_clip,
|
|
96
|
+
multi=2,
|
|
97
|
+
model=vsmlrt.RIFEModel.v4_25_heavy,
|
|
98
|
+
backend=(backend if backend else autoselect_backend()),
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Convert interpolated frames back to original format
|
|
102
|
+
interpolated = interpolated.resize.Bicubic(
|
|
103
|
+
format=format,
|
|
104
|
+
width=width,
|
|
105
|
+
height=height,
|
|
106
|
+
matrix=matrix,
|
|
107
|
+
transfer=transfer,
|
|
108
|
+
primaries=primaries,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Track sequence state for lazy evaluation
|
|
112
|
+
state = {"prev_len": 0}
|
|
113
|
+
|
|
114
|
+
def select_frame(n):
|
|
115
|
+
"""
|
|
116
|
+
Select interpolated frame if current frame is a duplicate,
|
|
117
|
+
otherwise use original. Copies PlaneStatsDiff property to output
|
|
118
|
+
to help users calibrate the threshold parameter.
|
|
119
|
+
"""
|
|
120
|
+
if n == 0 or n == clip.num_frames - 1:
|
|
121
|
+
state["prev_len"] = 0
|
|
122
|
+
# Frame 0 and final frame are never duplicates
|
|
123
|
+
# (no previous frame for 0, no next frame for final)
|
|
124
|
+
output = clip[n : n + 1]
|
|
125
|
+
diff_val = (
|
|
126
|
+
0.0
|
|
127
|
+
if n == 0
|
|
128
|
+
else diff_clip.get_frame(n).props.get("PlaneStatsDiff", 1.0)
|
|
129
|
+
)
|
|
130
|
+
return output.std.SetFrameProp(prop="PlaneStatsDiff", floatval=diff_val)
|
|
131
|
+
|
|
132
|
+
# Get difference from PlaneStats (lazy evaluation)
|
|
133
|
+
f = diff_clip.get_frame(n)
|
|
134
|
+
diff = f.props.get("PlaneStatsDiff", 1.0)
|
|
135
|
+
|
|
136
|
+
# Determine if this is a duplicate
|
|
137
|
+
if diff < threshold:
|
|
138
|
+
new_len = state["prev_len"] + 1
|
|
139
|
+
if new_len <= max_length:
|
|
140
|
+
state["prev_len"] = new_len
|
|
141
|
+
is_dupe = True
|
|
142
|
+
else:
|
|
143
|
+
state["prev_len"] = 0
|
|
144
|
+
is_dupe = False
|
|
145
|
+
else:
|
|
146
|
+
state["prev_len"] = 0
|
|
147
|
+
is_dupe = False
|
|
148
|
+
|
|
149
|
+
if is_dupe:
|
|
150
|
+
# Use interpolated frame between previous and current
|
|
151
|
+
# If the original sequence is 0 1 2 where 0 and 1 are dupes,
|
|
152
|
+
# the interpolated sequence will have 0 1 2 3 4 5
|
|
153
|
+
# where 3 is the interpolated frame we want to fetch
|
|
154
|
+
# to replace frame 1..
|
|
155
|
+
|
|
156
|
+
output = interpolated[n * 2 + 1 : n * 2 + 2]
|
|
157
|
+
else:
|
|
158
|
+
output = clip[n : n + 1]
|
|
159
|
+
|
|
160
|
+
# Attach PlaneStatsDiff property to output frame for threshold calibration
|
|
161
|
+
return output.std.SetFrameProp(prop="PlaneStatsDiff", floatval=diff)
|
|
162
|
+
|
|
163
|
+
# Apply frame selection with lazy evaluation
|
|
164
|
+
result = clip.std.FrameEval(select_frame)
|
|
165
|
+
|
|
166
|
+
return result
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def decimation_fixer(
|
|
170
|
+
clip: vs.VideoNode, cycle: int, offset: int = 0, backend: backendT | None = None
|
|
171
|
+
) -> vs.VideoNode:
|
|
172
|
+
"""
|
|
173
|
+
Attempts to interpolate frames that were removed by bad decimation.
|
|
174
|
+
Only works with static decimation cycles.
|
|
175
|
+
`cycle` should be the output cycle, i.e. what did the idiot who decimated this
|
|
176
|
+
pass into the decimation filter to achieve this monstrosity?
|
|
177
|
+
|
|
178
|
+
Yeah, I know, "ThiS is bAd AND yOu shoUldn'T Do IT".
|
|
179
|
+
Maybe people shouldn't decimate clips that don't need decimation.
|
|
180
|
+
Sometimes you can't "just get a better source".
|
|
181
|
+
"""
|
|
182
|
+
import vsmlrt
|
|
183
|
+
|
|
184
|
+
if offset >= cycle - 1:
|
|
185
|
+
raise Exception("offset must be less than cycle - 1")
|
|
186
|
+
if cycle <= 0:
|
|
187
|
+
raise Exception("cycle must be greater than zero")
|
|
188
|
+
|
|
189
|
+
width = clip.width
|
|
190
|
+
height = clip.height
|
|
191
|
+
fps = clip.fps
|
|
192
|
+
format = clip.format
|
|
193
|
+
input_cycle = cycle - 1
|
|
194
|
+
matrix = vstools.Matrix.from_video(clip)
|
|
195
|
+
transfer = vstools.Transfer.from_video(clip)
|
|
196
|
+
primaries = vstools.Primaries.from_video(clip)
|
|
197
|
+
clip = clip.misc.SCDetect()
|
|
198
|
+
clip = clip.resize.Bicubic(
|
|
199
|
+
format=vs.RGBS,
|
|
200
|
+
width=next_multiple_of(64, width),
|
|
201
|
+
height=next_multiple_of(64, height),
|
|
202
|
+
)
|
|
203
|
+
doubled = vsmlrt.RIFE(
|
|
204
|
+
clip,
|
|
205
|
+
model=vsmlrt.RIFEModel.v4_25_heavy,
|
|
206
|
+
backend=(backend if backend else autoselect_backend()),
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
out_clip = None
|
|
210
|
+
# This is the frame after our insertion point
|
|
211
|
+
src_frame = offset
|
|
212
|
+
last_src_frame = 0
|
|
213
|
+
# This is the frame we want to grab from the doubled clip
|
|
214
|
+
doub_frame = offset * 2 - 1
|
|
215
|
+
while src_frame < clip.num_frames:
|
|
216
|
+
if src_frame > 0:
|
|
217
|
+
interp = doubled[doub_frame]
|
|
218
|
+
if out_clip is None:
|
|
219
|
+
out_clip = clip[last_src_frame:src_frame] + interp
|
|
220
|
+
else:
|
|
221
|
+
out_clip = out_clip + clip[last_src_frame:src_frame] + interp
|
|
222
|
+
last_src_frame = src_frame
|
|
223
|
+
src_frame += input_cycle
|
|
224
|
+
doub_frame += input_cycle * 2
|
|
225
|
+
out_clip += clip[last_src_frame:]
|
|
226
|
+
out_clip = out_clip.std.AssumeFPS(
|
|
227
|
+
fpsnum=fps.numerator * cycle // input_cycle, fpsden=fps.denominator
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
out_clip = out_clip.resize.Bicubic(
|
|
231
|
+
format=format,
|
|
232
|
+
width=width,
|
|
233
|
+
height=height,
|
|
234
|
+
matrix=matrix,
|
|
235
|
+
transfer=transfer,
|
|
236
|
+
primaries=primaries,
|
|
237
|
+
)
|
|
238
|
+
return out_clip
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def next_multiple_of(multiple: int, param: int) -> int:
|
|
242
|
+
rem = param % multiple
|
|
243
|
+
if rem == 0:
|
|
244
|
+
return param
|
|
245
|
+
return param + (multiple - rem)
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from vsaa.deinterlacers import NNEDI3
|
|
4
|
+
from vskernels import Hermite, LeftShift, Spline36, TopShift
|
|
5
|
+
from vsscale import ArtCNN
|
|
6
|
+
from vstools import (
|
|
7
|
+
VariableFormatError,
|
|
8
|
+
check_variable_format,
|
|
9
|
+
is_gpu_available,
|
|
10
|
+
join,
|
|
11
|
+
vs,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"good_resize",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def good_resize(
|
|
20
|
+
clip: vs.VideoNode,
|
|
21
|
+
width: int,
|
|
22
|
+
height: int,
|
|
23
|
+
shift: tuple[TopShift | list[TopShift], LeftShift | list[LeftShift]] = (0, 0),
|
|
24
|
+
gpu: bool | None = None,
|
|
25
|
+
anime: bool = False,
|
|
26
|
+
) -> vs.VideoNode:
|
|
27
|
+
"""High quality resizing filter
|
|
28
|
+
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
clip: VideoNode
|
|
32
|
+
Video clip to apply resizing to.
|
|
33
|
+
width: int
|
|
34
|
+
Target width to resize to.
|
|
35
|
+
height: int
|
|
36
|
+
Target height to resize to.
|
|
37
|
+
shift: tuple[float, float], optional
|
|
38
|
+
Horizontal and vertical amount of shift to apply.
|
|
39
|
+
gpu: bool, optional
|
|
40
|
+
Whether to allow usage of GPU for ArtCNN.
|
|
41
|
+
Defaults to None, which will auto-select based on available mlrt and hardware.
|
|
42
|
+
anime: bool, optional
|
|
43
|
+
Enables scalers that are better tuned toward anime.
|
|
44
|
+
Defaults to False.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
if gpu is None:
|
|
48
|
+
gpu = is_gpu_available()
|
|
49
|
+
|
|
50
|
+
is_upscale = clip.width < width or clip.height < height
|
|
51
|
+
chroma_scaler = Spline36()
|
|
52
|
+
|
|
53
|
+
# We've ended up where the only special case is anime + upscale + GPU enabled
|
|
54
|
+
if anime and is_upscale and gpu:
|
|
55
|
+
luma_scaler = ArtCNN(scaler=Hermite(sigmoid=True))
|
|
56
|
+
elif is_upscale:
|
|
57
|
+
luma_scaler = NNEDI3(scaler=Hermite(sigmoid=True))
|
|
58
|
+
else:
|
|
59
|
+
luma_scaler = Hermite(sigmoid=True)
|
|
60
|
+
|
|
61
|
+
if not check_variable_format(clip, "good_resize"):
|
|
62
|
+
raise VariableFormatError("Invalid clip format for good_resize")
|
|
63
|
+
|
|
64
|
+
luma = luma_scaler.scale(clip, width, height, shift)
|
|
65
|
+
|
|
66
|
+
# Grayscale doesn't need chroma processing
|
|
67
|
+
if clip.format.num_planes == 1:
|
|
68
|
+
return luma
|
|
69
|
+
|
|
70
|
+
chroma = chroma_scaler.scale(clip, width, height, shift)
|
|
71
|
+
|
|
72
|
+
return join(luma, chroma)
|
soifunc-0.11.3/soifunc/deband.py
DELETED
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from vsdeband import f3k_deband
|
|
4
|
-
from vsmasktools import dre_edgemask
|
|
5
|
-
from vstools import InvalidVideoFormatError, check_variable, core, vs
|
|
6
|
-
|
|
7
|
-
__all__ = [
|
|
8
|
-
"retinex_deband",
|
|
9
|
-
]
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def retinex_deband(
|
|
13
|
-
clip: vs.VideoNode,
|
|
14
|
-
threshold: int,
|
|
15
|
-
showmask: bool = False,
|
|
16
|
-
) -> vs.VideoNode:
|
|
17
|
-
"""
|
|
18
|
-
Debanding using a contrast-adaptive edge mask to preserve details
|
|
19
|
-
even in dark areas.
|
|
20
|
-
|
|
21
|
-
"medium" `threshold` in f3kdb is 48. I think that's a bit strong.
|
|
22
|
-
16 might be a more sane starting point. Increase as needed.
|
|
23
|
-
|
|
24
|
-
This function does not add grain on its own. Use another function like
|
|
25
|
-
`vsdeband.AddNoise` to do that.
|
|
26
|
-
"""
|
|
27
|
-
assert check_variable(clip, retinex_deband)
|
|
28
|
-
|
|
29
|
-
if (
|
|
30
|
-
clip.format.color_family != vs.YUV
|
|
31
|
-
or clip.format.sample_type != vs.INTEGER
|
|
32
|
-
or clip.format.bits_per_sample > 16
|
|
33
|
-
):
|
|
34
|
-
raise InvalidVideoFormatError(
|
|
35
|
-
retinex_deband,
|
|
36
|
-
clip.format,
|
|
37
|
-
"The format {format.name} is not supported! It must be an 8-16bit integer YUV bit format!",
|
|
38
|
-
)
|
|
39
|
-
|
|
40
|
-
mask = dre_edgemask.CLAHE(clip)
|
|
41
|
-
|
|
42
|
-
if showmask:
|
|
43
|
-
return mask
|
|
44
|
-
|
|
45
|
-
deband = f3k_deband(clip, thr=(threshold << 2))
|
|
46
|
-
return core.std.MaskedMerge(deband, clip, mask)
|
|
@@ -1,134 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import platform
|
|
4
|
-
from typing import TYPE_CHECKING
|
|
5
|
-
|
|
6
|
-
import vstools
|
|
7
|
-
from vsscale import autoselect_backend
|
|
8
|
-
from vstools import vs
|
|
9
|
-
|
|
10
|
-
if TYPE_CHECKING:
|
|
11
|
-
from vsmlrt import backendT
|
|
12
|
-
|
|
13
|
-
__all__ = ["rate_doubler", "decimation_fixer"]
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
def rate_doubler(
|
|
17
|
-
clip: vs.VideoNode, multi: int = 2, backend: backendT | None = None
|
|
18
|
-
) -> vs.VideoNode:
|
|
19
|
-
"""
|
|
20
|
-
A utility to scale the framerate of a video via frame interpolation.
|
|
21
|
-
|
|
22
|
-
Probably shouldn't just go spraying this everywhere,
|
|
23
|
-
it's more for fun and science than anything.
|
|
24
|
-
"""
|
|
25
|
-
import vsmlrt
|
|
26
|
-
|
|
27
|
-
width = clip.width
|
|
28
|
-
height = clip.height
|
|
29
|
-
matrix = vstools.Matrix.from_video(clip)
|
|
30
|
-
transfer = vstools.Transfer.from_video(clip)
|
|
31
|
-
primaries = vstools.Primaries.from_video(clip)
|
|
32
|
-
clip = clip.misc.SCDetect()
|
|
33
|
-
clip = clip.resize.Bicubic(
|
|
34
|
-
format=vs.RGBS,
|
|
35
|
-
width=next_multiple_of(64, width),
|
|
36
|
-
height=next_multiple_of(64, height),
|
|
37
|
-
)
|
|
38
|
-
clip = vsmlrt.RIFE(
|
|
39
|
-
clip,
|
|
40
|
-
multi=multi,
|
|
41
|
-
model=vsmlrt.RIFEModel.v4_25_heavy,
|
|
42
|
-
# Why these defaults? Because running ML stuff on AMD on Windows sucks hard.
|
|
43
|
-
# Trial and error led me to finally find that ORT_DML works.
|
|
44
|
-
backend=(backend if backend else autoselect_backend()),
|
|
45
|
-
)
|
|
46
|
-
# TODO: Handle other chroma samplings
|
|
47
|
-
clip = clip.resize.Bicubic(
|
|
48
|
-
format=vs.YUV420P16,
|
|
49
|
-
width=width,
|
|
50
|
-
height=height,
|
|
51
|
-
matrix=matrix,
|
|
52
|
-
transfer=transfer,
|
|
53
|
-
primaries=primaries,
|
|
54
|
-
)
|
|
55
|
-
return clip
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def decimation_fixer(
|
|
59
|
-
clip: vs.VideoNode, cycle: int, offset: int = 0, backend: backendT | None = None
|
|
60
|
-
) -> vs.VideoNode:
|
|
61
|
-
"""
|
|
62
|
-
Attempts to interpolate frames that were removed by bad decimation.
|
|
63
|
-
Only works with static decimation cycles.
|
|
64
|
-
`cycle` should be the output cycle, i.e. what did the idiot who decimated this
|
|
65
|
-
pass into the decimation filter to achieve this monstrosity?
|
|
66
|
-
|
|
67
|
-
Yeah, I know, "ThiS is bAd AND yOu shoUldn'T Do IT".
|
|
68
|
-
Maybe people shouldn't decimate clips that don't need decimation.
|
|
69
|
-
Sometimes you can't "just get a better source".
|
|
70
|
-
"""
|
|
71
|
-
import vsmlrt
|
|
72
|
-
|
|
73
|
-
if offset >= cycle - 1:
|
|
74
|
-
raise Exception("offset must be less than cycle - 1")
|
|
75
|
-
if cycle <= 0:
|
|
76
|
-
raise Exception("cycle must be greater than zero")
|
|
77
|
-
|
|
78
|
-
width = clip.width
|
|
79
|
-
height = clip.height
|
|
80
|
-
fps = clip.fps
|
|
81
|
-
input_cycle = cycle - 1
|
|
82
|
-
matrix = vstools.Matrix.from_video(clip)
|
|
83
|
-
transfer = vstools.Transfer.from_video(clip)
|
|
84
|
-
primaries = vstools.Primaries.from_video(clip)
|
|
85
|
-
clip = clip.misc.SCDetect()
|
|
86
|
-
clip = clip.resize.Bicubic(
|
|
87
|
-
format=vs.RGBS,
|
|
88
|
-
width=next_multiple_of(64, width),
|
|
89
|
-
height=next_multiple_of(64, height),
|
|
90
|
-
)
|
|
91
|
-
doubled = vsmlrt.RIFE(
|
|
92
|
-
clip,
|
|
93
|
-
model=vsmlrt.RIFEModel.v4_25_heavy,
|
|
94
|
-
backend=(backend if backend else autoselect_backend()),
|
|
95
|
-
)
|
|
96
|
-
|
|
97
|
-
out_clip = None
|
|
98
|
-
# This is the frame after our insertion point
|
|
99
|
-
src_frame = offset
|
|
100
|
-
last_src_frame = 0
|
|
101
|
-
# This is the frame we want to grab from the doubled clip
|
|
102
|
-
doub_frame = offset * 2 - 1
|
|
103
|
-
while src_frame < clip.num_frames:
|
|
104
|
-
if src_frame > 0:
|
|
105
|
-
interp = doubled[doub_frame]
|
|
106
|
-
if out_clip is None:
|
|
107
|
-
out_clip = clip[last_src_frame:src_frame] + interp
|
|
108
|
-
else:
|
|
109
|
-
out_clip = out_clip + clip[last_src_frame:src_frame] + interp
|
|
110
|
-
last_src_frame = src_frame
|
|
111
|
-
src_frame += input_cycle
|
|
112
|
-
doub_frame += input_cycle * 2
|
|
113
|
-
out_clip += clip[last_src_frame:]
|
|
114
|
-
out_clip = out_clip.std.AssumeFPS(
|
|
115
|
-
fpsnum=fps.numerator * cycle // input_cycle, fpsden=fps.denominator
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
# TODO: Handle other chroma samplings
|
|
119
|
-
out_clip = out_clip.resize.Bicubic(
|
|
120
|
-
format=vs.YUV420P16,
|
|
121
|
-
width=width,
|
|
122
|
-
height=height,
|
|
123
|
-
matrix=matrix,
|
|
124
|
-
transfer=transfer,
|
|
125
|
-
primaries=primaries,
|
|
126
|
-
)
|
|
127
|
-
return out_clip
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def next_multiple_of(multiple: int, param: int) -> int:
|
|
131
|
-
rem = param % multiple
|
|
132
|
-
if rem == 0:
|
|
133
|
-
return param
|
|
134
|
-
return param + (multiple - rem)
|
soifunc-0.11.3/soifunc/resize.py
DELETED
|
@@ -1,112 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from dataclasses import dataclass
|
|
4
|
-
from inspect import getfullargspec
|
|
5
|
-
from typing import Any
|
|
6
|
-
|
|
7
|
-
from vsaa.deinterlacers import NNEDI3
|
|
8
|
-
from vskernels import (
|
|
9
|
-
Hermite,
|
|
10
|
-
Scaler,
|
|
11
|
-
ScalerLike,
|
|
12
|
-
Spline36,
|
|
13
|
-
)
|
|
14
|
-
from vsscale import ArtCNN, GenericScaler
|
|
15
|
-
from vstools import check_variable_format, inject_self, is_gpu_available, join, vs
|
|
16
|
-
|
|
17
|
-
__all__ = [
|
|
18
|
-
"good_resize",
|
|
19
|
-
"HybridScaler",
|
|
20
|
-
]
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def good_resize(
|
|
24
|
-
clip: vs.VideoNode,
|
|
25
|
-
width: int,
|
|
26
|
-
height: int,
|
|
27
|
-
shift: tuple[float, float] = (0, 0),
|
|
28
|
-
gpu: bool | None = None,
|
|
29
|
-
anime: bool = False,
|
|
30
|
-
) -> vs.VideoNode:
|
|
31
|
-
"""High quality resizing filter
|
|
32
|
-
|
|
33
|
-
Parameters
|
|
34
|
-
----------
|
|
35
|
-
clip: VideoNode
|
|
36
|
-
Video clip to apply resizing to.
|
|
37
|
-
width: int
|
|
38
|
-
Target width to resize to.
|
|
39
|
-
height: int
|
|
40
|
-
Target height to resize to.
|
|
41
|
-
shift: tuple[float, float], optional
|
|
42
|
-
Horizontal and vertical amount of shift to apply.
|
|
43
|
-
gpu: bool, optional
|
|
44
|
-
Whether to allow usage of GPU for ArtCNN.
|
|
45
|
-
Defaults to None, which will auto-select based on available mlrt and hardware.
|
|
46
|
-
anime: bool, optional
|
|
47
|
-
Enables scalers that are better tuned toward anime.
|
|
48
|
-
Defaults to False.
|
|
49
|
-
"""
|
|
50
|
-
|
|
51
|
-
if gpu is None:
|
|
52
|
-
gpu = is_gpu_available()
|
|
53
|
-
|
|
54
|
-
is_upscale = clip.width < width or clip.height < height
|
|
55
|
-
chroma_scaler = Spline36()
|
|
56
|
-
|
|
57
|
-
# We've ended up where the only special case is anime + upscale + GPU enabled
|
|
58
|
-
if anime and is_upscale and gpu:
|
|
59
|
-
luma_scaler = ArtCNN(scaler=Hermite(sigmoid=True))
|
|
60
|
-
elif is_upscale:
|
|
61
|
-
luma_scaler = NNEDI3(scaler=Hermite(sigmoid=True))
|
|
62
|
-
else:
|
|
63
|
-
luma_scaler = Hermite(sigmoid=True)
|
|
64
|
-
|
|
65
|
-
return HybridScaler(luma_scaler, chroma_scaler).scale(
|
|
66
|
-
clip, width, height, shift=shift
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
@dataclass
|
|
71
|
-
class HybridScaler(GenericScaler):
|
|
72
|
-
luma_scaler: ScalerLike
|
|
73
|
-
chroma_scaler: ScalerLike
|
|
74
|
-
|
|
75
|
-
def __post_init__(self) -> None:
|
|
76
|
-
super().__post_init__()
|
|
77
|
-
|
|
78
|
-
self._luma = Scaler.ensure_obj(self.luma_scaler)
|
|
79
|
-
self._chroma = Scaler.ensure_obj(self.chroma_scaler)
|
|
80
|
-
|
|
81
|
-
@Scaler.cached_property
|
|
82
|
-
def kernel_radius(self) -> int:
|
|
83
|
-
return self._luma.kernel_radius
|
|
84
|
-
|
|
85
|
-
def scale( # type:ignore
|
|
86
|
-
self,
|
|
87
|
-
clip: vs.VideoNode,
|
|
88
|
-
width: int,
|
|
89
|
-
height: int,
|
|
90
|
-
shift: tuple[float, float] = (0, 0),
|
|
91
|
-
**kwargs: Any,
|
|
92
|
-
) -> vs.VideoNode:
|
|
93
|
-
assert check_variable_format(clip, self.__class__)
|
|
94
|
-
|
|
95
|
-
luma = self._luma.scale(clip, width, height, shift, **kwargs)
|
|
96
|
-
|
|
97
|
-
if clip.format.num_planes == 1:
|
|
98
|
-
return luma
|
|
99
|
-
|
|
100
|
-
chroma = self._chroma.scale(clip, width, height, shift, **kwargs)
|
|
101
|
-
|
|
102
|
-
return join(luma, chroma)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
def _get_scaler(scaler: ScalerLike, **kwargs: Any) -> Scaler:
|
|
106
|
-
scaler_cls = Scaler.from_param(scaler, _get_scaler)
|
|
107
|
-
|
|
108
|
-
args = getfullargspec(scaler_cls).args
|
|
109
|
-
|
|
110
|
-
clean_kwargs = {key: value for key, value in kwargs.items() if key in args}
|
|
111
|
-
|
|
112
|
-
return scaler_cls(**clean_kwargs)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|