soifunc 0.11.2__tar.gz → 0.12.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: soifunc
3
- Version: 0.11.2
3
+ Version: 0.12.0
4
4
  Summary: Soichiro's VapourSynth Functions Collection
5
5
  License: MIT
6
6
  Author: Josh Holmer
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.12
12
12
  Classifier: Programming Language :: Python :: 3.13
13
13
  Requires-Dist: vapoursynth (>=68)
14
- Requires-Dist: vsjetpack (>=0.5.1,<0.6.0)
14
+ Requires-Dist: vsjetpack (>=0.7.1,<0.8.0)
15
15
  Description-Content-Type: text/markdown
16
16
 
17
17
  ## soifunc
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "soifunc"
3
- version = "0.11.2"
3
+ version = "0.12.0"
4
4
  description = "Soichiro's VapourSynth Functions Collection"
5
5
  authors = ["Josh Holmer <jholmer.in@gmail.com>"]
6
6
  license = "MIT"
@@ -9,7 +9,7 @@ readme = "README.md"
9
9
  [tool.poetry.dependencies]
10
10
  python = ">=3.12,<4.0"
11
11
  vapoursynth = ">=68"
12
- vsjetpack = "^0.5.1"
12
+ vsjetpack = "^0.7.1"
13
13
 
14
14
  [tool.poetry.group.dev.dependencies]
15
15
  black = "^25.1.0"
@@ -4,6 +4,7 @@ import platform
4
4
  from typing import TYPE_CHECKING
5
5
 
6
6
  import vstools
7
+ from vsscale import autoselect_backend
7
8
  from vstools import vs
8
9
 
9
10
  if TYPE_CHECKING:
@@ -40,15 +41,7 @@ def rate_doubler(
40
41
  model=vsmlrt.RIFEModel.v4_25_heavy,
41
42
  # Why these defaults? Because running ML stuff on AMD on Windows sucks hard.
42
43
  # Trial and error led me to finally find that ORT_DML works.
43
- backend=(
44
- backend
45
- if backend
46
- else (
47
- vsmlrt.Backend.ORT_DML()
48
- if platform.system() == "Windows"
49
- else vsmlrt.Backend.TRT()
50
- )
51
- ),
44
+ backend=(backend if backend else autoselect_backend()),
52
45
  )
53
46
  # TODO: Handle other chroma samplings
54
47
  clip = clip.resize.Bicubic(
@@ -98,15 +91,7 @@ def decimation_fixer(
98
91
  doubled = vsmlrt.RIFE(
99
92
  clip,
100
93
  model=vsmlrt.RIFEModel.v4_25_heavy,
101
- backend=(
102
- backend
103
- if backend
104
- else (
105
- vsmlrt.Backend.ORT_DML()
106
- if platform.system() == "Windows"
107
- else vsmlrt.Backend.TRT()
108
- )
109
- ),
94
+ backend=(backend if backend else autoselect_backend()),
110
95
  )
111
96
 
112
97
  out_clip = None
@@ -0,0 +1,67 @@
1
+ from __future__ import annotations
2
+
3
+ from vsaa.deinterlacers import NNEDI3
4
+ from vskernels import (
5
+ Hermite,
6
+ Spline36,
7
+ )
8
+ from vsscale import ArtCNN
9
+ from vstools import check_variable_format, is_gpu_available, join, vs
10
+
11
+ __all__ = [
12
+ "good_resize",
13
+ ]
14
+
15
+
16
+ def good_resize(
17
+ clip: vs.VideoNode,
18
+ width: int,
19
+ height: int,
20
+ shift: tuple[float, float] = (0, 0),
21
+ gpu: bool | None = None,
22
+ anime: bool = False,
23
+ ) -> vs.VideoNode:
24
+ """High quality resizing filter
25
+
26
+ Parameters
27
+ ----------
28
+ clip: VideoNode
29
+ Video clip to apply resizing to.
30
+ width: int
31
+ Target width to resize to.
32
+ height: int
33
+ Target height to resize to.
34
+ shift: tuple[float, float], optional
35
+ Horizontal and vertical amount of shift to apply.
36
+ gpu: bool, optional
37
+ Whether to allow usage of GPU for ArtCNN.
38
+ Defaults to None, which will auto-select based on available mlrt and hardware.
39
+ anime: bool, optional
40
+ Enables scalers that are better tuned toward anime.
41
+ Defaults to False.
42
+ """
43
+
44
+ if gpu is None:
45
+ gpu = is_gpu_available()
46
+
47
+ is_upscale = clip.width < width or clip.height < height
48
+ chroma_scaler = Spline36()
49
+
50
+ # We've ended up where the only special case is anime + upscale + GPU enabled
51
+ if anime and is_upscale and gpu:
52
+ luma_scaler = ArtCNN(scaler=Hermite(sigmoid=True))
53
+ elif is_upscale:
54
+ luma_scaler = NNEDI3(scaler=Hermite(sigmoid=True))
55
+ else:
56
+ luma_scaler = Hermite(sigmoid=True)
57
+
58
+ assert check_variable_format(clip, "good_resize")
59
+
60
+ luma = luma_scaler.scale(clip, width, height, shift)
61
+
62
+ if clip.format.num_planes == 1:
63
+ return luma
64
+
65
+ chroma = chroma_scaler.scale(clip, width, height, shift)
66
+
67
+ return join(luma, chroma)
@@ -1,112 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from dataclasses import dataclass
4
- from inspect import getfullargspec
5
- from typing import Any
6
-
7
- from vsaa.deinterlacers import NNEDI3
8
- from vskernels import (
9
- Hermite,
10
- Scaler,
11
- ScalerLike,
12
- Spline36,
13
- )
14
- from vsscale import ArtCNN, GenericScaler
15
- from vstools import check_variable_format, inject_self, is_gpu_available, join, vs
16
-
17
- __all__ = [
18
- "good_resize",
19
- "HybridScaler",
20
- ]
21
-
22
-
23
- def good_resize(
24
- clip: vs.VideoNode,
25
- width: int,
26
- height: int,
27
- shift: tuple[float, float] = (0, 0),
28
- gpu: bool | None = None,
29
- anime: bool = False,
30
- ) -> vs.VideoNode:
31
- """High quality resizing filter
32
-
33
- Parameters
34
- ----------
35
- clip: VideoNode
36
- Video clip to apply resizing to.
37
- width: int
38
- Target width to resize to.
39
- height: int
40
- Target height to resize to.
41
- shift: tuple[float, float], optional
42
- Horizontal and vertical amount of shift to apply.
43
- gpu: bool, optional
44
- Whether to allow usage of GPU for ArtCNN.
45
- Defaults to None, which will auto-select based on available mlrt and hardware.
46
- anime: bool, optional
47
- Enables scalers that are better tuned toward anime.
48
- Defaults to False.
49
- """
50
-
51
- if gpu is None:
52
- gpu = is_gpu_available()
53
-
54
- is_upscale = clip.width < width or clip.height < height
55
- chroma_scaler = Spline36()
56
-
57
- # We've ended up where the only special case is anime + upscale + GPU enabled
58
- if anime and is_upscale and gpu:
59
- luma_scaler = ArtCNN(scaler=Hermite(sigmoid=True))
60
- elif is_upscale:
61
- luma_scaler = NNEDI3(scaler=Hermite(sigmoid=True))
62
- else:
63
- luma_scaler = Hermite(sigmoid=True)
64
-
65
- return HybridScaler(luma_scaler, chroma_scaler).scale(
66
- clip, width, height, shift=shift
67
- )
68
-
69
-
70
- @dataclass
71
- class HybridScaler(GenericScaler):
72
- luma_scaler: ScalerLike
73
- chroma_scaler: ScalerLike
74
-
75
- def __post_init__(self) -> None:
76
- super().__post_init__()
77
-
78
- self._luma = Scaler.ensure_obj(self.luma_scaler)
79
- self._chroma = Scaler.ensure_obj(self.chroma_scaler)
80
-
81
- @Scaler.cached_property
82
- def kernel_radius(self) -> int:
83
- return self._luma.kernel_radius
84
-
85
- def scale( # type:ignore
86
- self,
87
- clip: vs.VideoNode,
88
- width: int,
89
- height: int,
90
- shift: tuple[float, float] = (0, 0),
91
- **kwargs: Any,
92
- ) -> vs.VideoNode:
93
- assert check_variable_format(clip, self.__class__)
94
-
95
- luma = self._luma.scale(clip, width, height, shift, **kwargs)
96
-
97
- if clip.format.num_planes == 1:
98
- return luma
99
-
100
- chroma = self._chroma.scale(clip, width, height, shift, **kwargs)
101
-
102
- return join(luma, chroma)
103
-
104
-
105
- def _get_scaler(scaler: ScalerLike, **kwargs: Any) -> Scaler:
106
- scaler_cls = Scaler.from_param(scaler, _get_scaler)
107
-
108
- args = getfullargspec(scaler_cls).args
109
-
110
- clean_kwargs = {key: value for key, value in kwargs.items() if key in args}
111
-
112
- return scaler_cls(**clean_kwargs)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes