soifunc 0.14.0__tar.gz → 0.14.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: soifunc
3
- Version: 0.14.0
3
+ Version: 0.14.2
4
4
  Summary: Soichiro's VapourSynth Functions Collection
5
5
  License: MIT
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "soifunc"
3
- version = "0.14.0"
3
+ version = "0.14.2"
4
4
  description = "Soichiro's VapourSynth Functions Collection"
5
5
  authors = ["Josh Holmer <jholmer.in@gmail.com>"]
6
6
  license = "MIT"
@@ -2,3 +2,4 @@ from .deband import * # noqa: F401, F403
2
2
  from .denoise import * # noqa: F401, F403
3
3
  from .interpolate import * # noqa: F401, F403
4
4
  from .resize import * # noqa: F401, F403
5
+ from .subtitle import * # noqa: F401, F403
@@ -17,14 +17,19 @@ __all__ = [
17
17
 
18
18
  def retinex_deband(
19
19
  clip: vs.VideoNode,
20
- threshold: int,
20
+ threshold: int = 24,
21
21
  showmask: bool = False,
22
22
  ) -> vs.VideoNode:
23
23
  """Debanding using contrast-adaptive edge masking.
24
24
 
25
25
  Args:
26
26
  clip: Input video (8-16bit YUV required).
27
- threshold: Debanding strength (0-255). Default ~16-48 recommended.
27
+ threshold: Debanding strength (0-255). A reasonable range is around 16-64.
28
+ According to the original f3kdb documentation (https://f3kdb.readthedocs.io/en/stable/presets.html):
29
+ - Low = 32
30
+ - Medium = 48
31
+ - High = 64
32
+ - Very high = 80
28
33
  showmask: If True, return edge mask instead of debanded clip.
29
34
 
30
35
  Returns:
@@ -55,10 +60,8 @@ def retinex_deband(
55
60
  if showmask:
56
61
  return mask
57
62
 
58
- # The threshold value that `retinex_deband` takes is relative
59
- # to 8-bit videos, but `f3kdb` changed their threshold
60
- # values to be relative to 10-bit videos some time after this
61
- # function was created. To keep this function compatible,
62
- # we shift our threshold from 8-bit to 10-bit.
63
+ # The bitshift here is to adjust threshold values from neo_f3kdb's
64
+ # `scale=False` behavior to match vs-jetpack's `scale=True` behavior.
65
+ # It is not related to bit depth. The threshold value is independent of bit depth.
63
66
  deband = f3k_deband(clip, thr=(threshold << 2))
64
67
  return core.std.MaskedMerge(deband, clip, mask)
@@ -4,9 +4,9 @@ from typing import Callable, Optional
4
4
 
5
5
  import vsdenoise
6
6
  from vsdenoise import DFTTest, bm3d, mc_degrain, nl_means
7
- from vstools import core, depth, get_u, get_y, join, split, vs
7
+ from vstools import check_variable, core, join, split, vs
8
8
 
9
- __all__ = ["MCDenoise", "magic_denoise", "hqbm3d", "mc_dfttest"]
9
+ __all__ = ["MCDenoise", "magic_denoise", "hqbm3d", "mc_dfttest", "Stab"]
10
10
 
11
11
 
12
12
  def hqbm3d(
@@ -218,3 +218,47 @@ def magic_denoise(clip: vs.VideoNode) -> vs.VideoNode:
218
218
  tbsize=3,
219
219
  ssystem=1,
220
220
  )
221
+
222
+
223
+ ##############################################################################
224
+ # Original script by g-force converted into a stand alone script by McCauley #
225
+ # then copied from havsfunc when they deleted it #
226
+ # latest version from December 10, 2008 #
227
+ ##############################################################################
228
+ def Stab(clp, dxmax=4, dymax=4, mirror=0):
229
+ if not isinstance(clp, vs.VideoNode):
230
+ raise vs.Error("Stab: this is not a clip")
231
+
232
+ clp = scdetect(clp, 25 / 255)
233
+ temp = clp.misc.AverageFrames([1] * 15, scenechange=True)
234
+ inter = core.std.Interleave(
235
+ [
236
+ core.rgvs.Repair(
237
+ temp, clp.misc.AverageFrames([1] * 3, scenechange=True), mode=[1]
238
+ ),
239
+ clp,
240
+ ]
241
+ )
242
+ mdata = inter.mv.DepanEstimate(trust=0, dxmax=dxmax, dymax=dymax)
243
+ last = inter.mv.DepanCompensate(data=mdata, offset=-1, mirror=mirror)
244
+ return last[::2]
245
+
246
+
247
+ def scdetect(clip: vs.VideoNode, threshold: float = 0.1) -> vs.VideoNode:
248
+ def _copy_property(n: int, f: list[vs.VideoFrame]) -> vs.VideoFrame:
249
+ fout = f[0].copy()
250
+ fout.props["_SceneChangePrev"] = f[1].props["_SceneChangePrev"]
251
+ fout.props["_SceneChangeNext"] = f[1].props["_SceneChangeNext"]
252
+ return fout
253
+
254
+ assert check_variable(clip, scdetect)
255
+
256
+ sc = clip
257
+ if clip.format.color_family == vs.RGB:
258
+ sc = clip.resize.Point(format=vs.GRAY8, matrix_s="709")
259
+
260
+ sc = sc.misc.SCDetect(threshold)
261
+ if clip.format.color_family == vs.RGB:
262
+ sc = clip.std.ModifyFrame([clip, sc], _copy_property)
263
+
264
+ return sc
@@ -59,7 +59,7 @@ def replace_dupes(
59
59
  clip: vs.VideoNode,
60
60
  max_length: int = 5,
61
61
  backend: backendT | None = None,
62
- threshold: float = 0.001,
62
+ threshold: float = 0.0005,
63
63
  ) -> vs.VideoNode:
64
64
  """
65
65
  Detects strings of duplicate frames in a video and replaces them
@@ -0,0 +1,57 @@
1
+ from vsaa import NNEDI3, based_aa, pre_aa
2
+ from vstools import ChromaLocation, ColorRange, Matrix, Primaries, Transfer, vs
3
+
4
+ from soifunc.resize import good_resize
5
+
6
+ __all__ = [
7
+ "based_imagesub",
8
+ ]
9
+
10
+
11
+ def based_imagesub(clip: vs.VideoNode, sub_file: str) -> vs.VideoNode:
12
+ """
13
+ ImageSub that performs anti-aliasing on only the subtitles.
14
+
15
+ :param clip: Video to add subtitles to
16
+ :type clip: vs.VideoNode
17
+ :param sub_file: Path to the image-based subtitles file
18
+ :type sub_file: str
19
+ :return: Subtitled video
20
+ :rtype: vs.VideoNode
21
+ """
22
+ matrix = Matrix.from_video(clip)
23
+ primaries = Primaries.from_video(clip)
24
+ transfer = Transfer.from_video(clip)
25
+ range = ColorRange.from_video(clip)
26
+ sub = clip.sub.ImageFile(
27
+ file=sub_file,
28
+ blend=False,
29
+ )
30
+ sub = sub.resize.Spline36(
31
+ format=clip.format.id,
32
+ matrix=matrix,
33
+ primaries_in=primaries,
34
+ primaries=primaries,
35
+ transfer_in=transfer,
36
+ transfer=transfer,
37
+ range=range,
38
+ )
39
+ sub = good_resize(sub, clip.width, clip.height)
40
+ # Use NNEDI3 since ArtCNN isn't really designed for text
41
+ sub = based_aa(sub, prefilter=pre_aa, supersampler=NNEDI3)
42
+
43
+ mask = sub.std.PropToClip(prop="_Alpha")
44
+ mask_format = vs.core.query_video_format(
45
+ mask.format.color_family,
46
+ clip.format.sample_type,
47
+ clip.format.bits_per_sample,
48
+ mask.format.subsampling_w,
49
+ mask.format.subsampling_h,
50
+ )
51
+ mask = mask.resize.Spline36(
52
+ clip.width,
53
+ clip.height,
54
+ format=mask_format.id,
55
+ )
56
+
57
+ return vs.core.std.MaskedMerge(clipa=clip, clipb=sub, mask=mask)
File without changes
File without changes
File without changes
File without changes