streamdeck-gui-ng 4.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- streamdeck_gui_ng-4.1.3.dist-info/METADATA +141 -0
- streamdeck_gui_ng-4.1.3.dist-info/RECORD +62 -0
- streamdeck_gui_ng-4.1.3.dist-info/WHEEL +4 -0
- streamdeck_gui_ng-4.1.3.dist-info/entry_points.txt +4 -0
- streamdeck_gui_ng-4.1.3.dist-info/licenses/LICENSE +21 -0
- streamdeck_ui/__init__.py +6 -0
- streamdeck_ui/api.py +712 -0
- streamdeck_ui/button.ui +1214 -0
- streamdeck_ui/cli/__init__.py +0 -0
- streamdeck_ui/cli/commands.py +191 -0
- streamdeck_ui/cli/server.py +292 -0
- streamdeck_ui/config.py +244 -0
- streamdeck_ui/dimmer.py +93 -0
- streamdeck_ui/display/__init__.py +0 -0
- streamdeck_ui/display/background_color_filter.py +41 -0
- streamdeck_ui/display/display_grid.py +265 -0
- streamdeck_ui/display/empty_filter.py +43 -0
- streamdeck_ui/display/filter.py +65 -0
- streamdeck_ui/display/image_filter.py +144 -0
- streamdeck_ui/display/keypress_filter.py +63 -0
- streamdeck_ui/display/pipeline.py +74 -0
- streamdeck_ui/display/pulse_filter.py +54 -0
- streamdeck_ui/display/text_filter.py +142 -0
- streamdeck_ui/fonts/roboto/LICENSE.txt +202 -0
- streamdeck_ui/fonts/roboto/Roboto-Black.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-BlackItalic.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-Bold.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-BoldItalic.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-Italic.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-Light.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-LightItalic.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-Medium.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-MediumItalic.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-Regular.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-Thin.ttf +0 -0
- streamdeck_ui/fonts/roboto/Roboto-ThinItalic.ttf +0 -0
- streamdeck_ui/gui.py +1423 -0
- streamdeck_ui/icons/add_page.png +0 -0
- streamdeck_ui/icons/cross.png +0 -0
- streamdeck_ui/icons/gear.png +0 -0
- streamdeck_ui/icons/horizontal-align.png +0 -0
- streamdeck_ui/icons/remove_page.png +0 -0
- streamdeck_ui/icons/vertical-align.png +0 -0
- streamdeck_ui/icons/warning_icon_button.png +0 -0
- streamdeck_ui/logger.py +11 -0
- streamdeck_ui/logo.png +0 -0
- streamdeck_ui/main.ui +407 -0
- streamdeck_ui/mock_streamdeck.py +204 -0
- streamdeck_ui/model.py +78 -0
- streamdeck_ui/modules/__init__.py +0 -0
- streamdeck_ui/modules/fonts.py +150 -0
- streamdeck_ui/modules/keyboard.py +447 -0
- streamdeck_ui/modules/utils/__init__.py +0 -0
- streamdeck_ui/modules/utils/timers.py +35 -0
- streamdeck_ui/resources.qrc +10 -0
- streamdeck_ui/resources_rc.py +324 -0
- streamdeck_ui/semaphore.py +38 -0
- streamdeck_ui/settings.ui +155 -0
- streamdeck_ui/stream_deck_monitor.py +157 -0
- streamdeck_ui/ui_button.py +421 -0
- streamdeck_ui/ui_main.py +267 -0
- streamdeck_ui/ui_settings.py +119 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from fractions import Fraction
|
|
3
|
+
from typing import Callable, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from PIL import Image
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Filter(ABC):
|
|
9
|
+
"""
|
|
10
|
+
A filter transforms a given input image to the desired output image. A filter can signal that it
|
|
11
|
+
is complete and will be removed from the pipeline.
|
|
12
|
+
|
|
13
|
+
:param str name: The name of the filter. The name is useful for debugging purposes.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
size: Tuple[int, int]
|
|
17
|
+
"The image size (width, height) in pixels that this filter transforms."
|
|
18
|
+
|
|
19
|
+
is_complete: bool
|
|
20
|
+
"Indicates if the filter is complete and should no longer be processed."
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
self.is_complete = False
|
|
24
|
+
|
|
25
|
+
@abstractmethod
|
|
26
|
+
def initialize(self, size: Tuple[int, int]):
|
|
27
|
+
"""Initializes the filter with the provided frame size. Since the construction
|
|
28
|
+
of the filter can happen before the size of the display is known, initialization
|
|
29
|
+
should be done here.
|
|
30
|
+
|
|
31
|
+
:param size: The filter image size
|
|
32
|
+
:type size: Tuple[int, int]
|
|
33
|
+
"""
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
@abstractmethod
|
|
37
|
+
def transform(
|
|
38
|
+
self,
|
|
39
|
+
get_input: Callable[[], Optional[Image.Image]],
|
|
40
|
+
get_output: Callable[[int], Optional[Image.Image]],
|
|
41
|
+
input_changed: bool,
|
|
42
|
+
time: Fraction,
|
|
43
|
+
) -> Tuple[Optional[Image.Image], int]:
|
|
44
|
+
"""
|
|
45
|
+
Transforms the given input image to the desired output image.
|
|
46
|
+
The default behaviour is to return the orignal image.
|
|
47
|
+
|
|
48
|
+
:param Callable[[], PIL.Image] get_input: A function that returns the input image to transform. Note that calling
|
|
49
|
+
this will create a copy of the input image, and it is safe to manipulate directly.
|
|
50
|
+
|
|
51
|
+
:param Callable[[int], PIL.Image] get_output: Provide the hashcode of the new frame and it will
|
|
52
|
+
return the output frame if it already exists. This avoids having to redraw an output frame that is already
|
|
53
|
+
cached.
|
|
54
|
+
|
|
55
|
+
:param bool input_changed: True if the input is different from previous run, False otherwise.
|
|
56
|
+
When true, you have to return an Image.
|
|
57
|
+
|
|
58
|
+
:param Fraction time: The current time in seconds, expressed as a fractional number since
|
|
59
|
+
the start of the pipeline.
|
|
60
|
+
|
|
61
|
+
:rtype: PIL.Image
|
|
62
|
+
:return: The transformed output image. If this filter did not modify the input, return None. This signals to the
|
|
63
|
+
pipeline manager that there was no change and a cached version will be moved to the next stage.
|
|
64
|
+
"""
|
|
65
|
+
pass
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import itertools
|
|
2
|
+
import os
|
|
3
|
+
from fractions import Fraction
|
|
4
|
+
from io import BytesIO
|
|
5
|
+
from typing import Callable, Optional, Tuple
|
|
6
|
+
|
|
7
|
+
import cairosvg
|
|
8
|
+
import filetype
|
|
9
|
+
from PIL import Image, ImageSequence
|
|
10
|
+
|
|
11
|
+
from streamdeck_ui.config import WARNING_ICON
|
|
12
|
+
from streamdeck_ui.display.filter import Filter
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ImageFilter(Filter):
|
|
16
|
+
"""
|
|
17
|
+
Represents a static image. It transforms the input image by replacing it with a static image.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, file: str):
|
|
21
|
+
super(ImageFilter, self).__init__()
|
|
22
|
+
self.file = os.path.expanduser(file)
|
|
23
|
+
try:
|
|
24
|
+
file_stats = os.stat(file)
|
|
25
|
+
file_size = file_stats.st_size
|
|
26
|
+
mod_time = file_stats.st_mtime
|
|
27
|
+
except BaseException: # noqa: B036
|
|
28
|
+
file_size = 0
|
|
29
|
+
mod_time = 0
|
|
30
|
+
print(f"Unable to load icon {self.file} to calculate stats.")
|
|
31
|
+
|
|
32
|
+
# Create a tuple of the file metadata for creating a hashcode.
|
|
33
|
+
self.metadata = (self.__class__, self.file, file_size, mod_time)
|
|
34
|
+
|
|
35
|
+
def initialize(self, size: Tuple[int, int]):
|
|
36
|
+
# Each frame needs to have a unique hashcode.
|
|
37
|
+
image_hash = hash(self.metadata)
|
|
38
|
+
frame_duration = []
|
|
39
|
+
frame_hash = []
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
kind = filetype.guess(self.file)
|
|
43
|
+
if kind is None:
|
|
44
|
+
svg_code = open(self.file).read()
|
|
45
|
+
png = cairosvg.svg2png(svg_code, output_height=size[1], output_width=size[0])
|
|
46
|
+
image_file = BytesIO(png)
|
|
47
|
+
image = Image.open(image_file)
|
|
48
|
+
frame_duration.append(-1)
|
|
49
|
+
frame_hash.append(image_hash)
|
|
50
|
+
else:
|
|
51
|
+
image = Image.open(self.file)
|
|
52
|
+
image.seek(0)
|
|
53
|
+
# Frame number is used to create unique hash
|
|
54
|
+
frame_number = 1
|
|
55
|
+
while True:
|
|
56
|
+
try:
|
|
57
|
+
frame_duration.append(image.info["duration"])
|
|
58
|
+
# Create tuple and hash it, to combine the image and frame hashcodes
|
|
59
|
+
frame_hash.append(hash((image_hash, frame_number)))
|
|
60
|
+
image.seek(image.tell() + 1)
|
|
61
|
+
frame_number += 1
|
|
62
|
+
except EOFError:
|
|
63
|
+
# Reached the final frame
|
|
64
|
+
break
|
|
65
|
+
except KeyError:
|
|
66
|
+
# If the key 'duration' can't be found, it's not an animation
|
|
67
|
+
frame_duration.append(-1)
|
|
68
|
+
frame_hash.append(image_hash)
|
|
69
|
+
break
|
|
70
|
+
|
|
71
|
+
except BaseException as icon_error: # noqa: B036
|
|
72
|
+
# FIXME: caller should handle this?
|
|
73
|
+
print(f"Unable to load icon {self.file} with error {icon_error}")
|
|
74
|
+
image = Image.open(WARNING_ICON)
|
|
75
|
+
frame_duration.append(-1)
|
|
76
|
+
frame_hash.append(image_hash)
|
|
77
|
+
|
|
78
|
+
frames = ImageSequence.Iterator(image)
|
|
79
|
+
|
|
80
|
+
# Scale all the frames to the target size
|
|
81
|
+
self.frames = []
|
|
82
|
+
for frame, milliseconds, hashcode in zip(frames, frame_duration, frame_hash):
|
|
83
|
+
frame = frame.copy()
|
|
84
|
+
if frame.has_transparency_data and frame.mode != "RGBA":
|
|
85
|
+
try:
|
|
86
|
+
frame = frame.convert("RGBA")
|
|
87
|
+
except BaseException: # noqa: B036
|
|
88
|
+
pass
|
|
89
|
+
scale_factor = min(size[0] / frame.size[0], size[1] / frame.size[1])
|
|
90
|
+
frame = frame.resize((int(v * scale_factor) for v in frame.size), Image.Resampling.LANCZOS)
|
|
91
|
+
self.frames.append((frame, milliseconds, hashcode))
|
|
92
|
+
|
|
93
|
+
self.frame_cycle = itertools.cycle(self.frames)
|
|
94
|
+
self.current_frame = next(self.frame_cycle)
|
|
95
|
+
self.frame_time = Fraction()
|
|
96
|
+
|
|
97
|
+
def transform( # type: ignore[override]
|
|
98
|
+
self,
|
|
99
|
+
get_input: Callable[[], Image.Image],
|
|
100
|
+
get_output: Callable[[int], Image.Image],
|
|
101
|
+
input_changed: bool,
|
|
102
|
+
time: Fraction,
|
|
103
|
+
) -> Tuple[Optional[Image.Image], int]:
|
|
104
|
+
"""
|
|
105
|
+
The transformation returns the loaded image, ando overwrites whatever came before.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
# Unpack tuple to make code a bit easier to understand
|
|
109
|
+
frame, duration, hashcode = self.current_frame
|
|
110
|
+
|
|
111
|
+
if duration >= 0 and time - self.frame_time > duration / 1000:
|
|
112
|
+
self.frame_time = time
|
|
113
|
+
self.current_frame = next(self.frame_cycle)
|
|
114
|
+
|
|
115
|
+
# Unpack updated value
|
|
116
|
+
frame, duration, hashcode = self.current_frame
|
|
117
|
+
|
|
118
|
+
image = get_output(hashcode)
|
|
119
|
+
if image:
|
|
120
|
+
return (image, hashcode)
|
|
121
|
+
|
|
122
|
+
input = get_input()
|
|
123
|
+
if frame.mode == "RGBA":
|
|
124
|
+
# Use the transparency mask of the image to paste
|
|
125
|
+
input.paste(frame, frame)
|
|
126
|
+
else:
|
|
127
|
+
input.paste(frame)
|
|
128
|
+
return (input, hashcode)
|
|
129
|
+
|
|
130
|
+
if input_changed:
|
|
131
|
+
image = get_output(hashcode)
|
|
132
|
+
if image:
|
|
133
|
+
return (image, hashcode)
|
|
134
|
+
|
|
135
|
+
input = get_input()
|
|
136
|
+
|
|
137
|
+
if frame.mode == "RGBA":
|
|
138
|
+
# Use the transparency mask of the image to paste
|
|
139
|
+
input.paste(frame, frame)
|
|
140
|
+
else:
|
|
141
|
+
input.paste(frame)
|
|
142
|
+
return (input, hashcode)
|
|
143
|
+
else:
|
|
144
|
+
return (None, hashcode)
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from fractions import Fraction
|
|
2
|
+
from typing import Callable, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
from PIL import Image, ImageEnhance
|
|
5
|
+
|
|
6
|
+
from streamdeck_ui.display.filter import Filter
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class KeypressFilter(Filter):
|
|
10
|
+
"""This filter is applied whenever a key is being pressed"""
|
|
11
|
+
|
|
12
|
+
last_time: Fraction
|
|
13
|
+
pulse_delay: float
|
|
14
|
+
brightness: float
|
|
15
|
+
dim_brightness: float
|
|
16
|
+
filter_hash: int
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
super(KeypressFilter, self).__init__()
|
|
20
|
+
self.last_time = Fraction()
|
|
21
|
+
self.brightness = 1
|
|
22
|
+
self.dim_brightness = 0.5
|
|
23
|
+
self.filter_hash = hash(self.__class__)
|
|
24
|
+
self.active = False
|
|
25
|
+
self.last_state = False
|
|
26
|
+
|
|
27
|
+
def initialize(self, size: Tuple[int, int]):
|
|
28
|
+
self.blank_image = Image.new("RGB", size)
|
|
29
|
+
self.size = size
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
def transform( # type: ignore[override]
|
|
33
|
+
self,
|
|
34
|
+
get_input: Callable[[], Image.Image],
|
|
35
|
+
get_output: Callable[[int], Image.Image],
|
|
36
|
+
input_changed: bool,
|
|
37
|
+
time: Fraction,
|
|
38
|
+
) -> Tuple[Optional[Image.Image], int]:
|
|
39
|
+
frame_hash = hash((self.filter_hash, self.active))
|
|
40
|
+
if input_changed or self.active != self.last_state:
|
|
41
|
+
self.last_state = self.active
|
|
42
|
+
image = get_output(frame_hash)
|
|
43
|
+
if image:
|
|
44
|
+
return (image, frame_hash)
|
|
45
|
+
|
|
46
|
+
input = get_input()
|
|
47
|
+
if self.active:
|
|
48
|
+
input = get_input()
|
|
49
|
+
background = self.blank_image.copy()
|
|
50
|
+
input.thumbnail((self.size[0] - 10, self.size[1] - 10), Image.Resampling.LANCZOS)
|
|
51
|
+
# Reduce the image by 10px
|
|
52
|
+
|
|
53
|
+
enhancer = ImageEnhance.Brightness(input)
|
|
54
|
+
input = enhancer.enhance(2)
|
|
55
|
+
# Light it up a bit
|
|
56
|
+
|
|
57
|
+
background.paste(input, (5, 5))
|
|
58
|
+
# Center the image
|
|
59
|
+
|
|
60
|
+
return (background, frame_hash)
|
|
61
|
+
else:
|
|
62
|
+
return (input, frame_hash)
|
|
63
|
+
return (None, frame_hash)
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from fractions import Fraction
|
|
2
|
+
from typing import Dict, List, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
from PIL.Image import Image
|
|
5
|
+
|
|
6
|
+
from streamdeck_ui.display.filter import Filter
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Pipeline:
|
|
10
|
+
def __init__(self) -> None:
|
|
11
|
+
self.filters: List[Tuple[Filter, Optional[Image]]] = []
|
|
12
|
+
self.first_run = True
|
|
13
|
+
self.output_cache: Dict[int, Image] = {}
|
|
14
|
+
|
|
15
|
+
def add(self, filter: Filter) -> None:
|
|
16
|
+
self.filters.append((filter, None))
|
|
17
|
+
self.first_run = True
|
|
18
|
+
|
|
19
|
+
def execute(self, time: Fraction) -> Optional[Tuple[Image, int]]:
|
|
20
|
+
"""
|
|
21
|
+
Executes all the filter in the pipeline and returns the final image, or None if the pipeline did not yield any changes.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
image: Optional[Image] = None
|
|
25
|
+
is_modified = False
|
|
26
|
+
pipeline_hash = 0
|
|
27
|
+
|
|
28
|
+
# To avoid flake8 B023 (https://docs.python-guide.org/writing/gotchas/#late-binding-closures), we need to
|
|
29
|
+
# capture the variable going into the lambda. However, as a result of that, we have a lambda that
|
|
30
|
+
# technically takes an argument (with a default) that does not match the signature we declared
|
|
31
|
+
# for the transform() method. There are likely other solutions to avoid the warning this produces,
|
|
32
|
+
# like using functools.partial, but this needs to be investigated.
|
|
33
|
+
|
|
34
|
+
for i, (current_filter, cached) in enumerate(self.filters):
|
|
35
|
+
(result_image, hashcode) = current_filter.transform(
|
|
36
|
+
lambda input_image=image: input_image.copy() if input_image else None, # type: ignore [misc]
|
|
37
|
+
lambda output_hash, pipeline_hash=pipeline_hash: self.output_cache.get(hash((output_hash, pipeline_hash)), None), # type: ignore [misc]
|
|
38
|
+
is_modified | self.first_run,
|
|
39
|
+
time,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
pipeline_hash = hash((hashcode, pipeline_hash))
|
|
43
|
+
|
|
44
|
+
if not result_image:
|
|
45
|
+
# Filter indicated that it did NOT change anything, pull up the last
|
|
46
|
+
# cached value for the next step in the pipeline
|
|
47
|
+
image = cached
|
|
48
|
+
else:
|
|
49
|
+
# The filter changed the image, cache it for future use
|
|
50
|
+
image = result_image
|
|
51
|
+
# Update tuple with cached image
|
|
52
|
+
self.filters[i] = (current_filter, image)
|
|
53
|
+
is_modified = True
|
|
54
|
+
|
|
55
|
+
# Store this image with pipeline hash if we haven't seen it.
|
|
56
|
+
if pipeline_hash not in self.output_cache and image is not None:
|
|
57
|
+
self.output_cache[pipeline_hash] = image
|
|
58
|
+
|
|
59
|
+
if self.first_run:
|
|
60
|
+
# Force an update the first time the pipeline runs
|
|
61
|
+
is_modified = True
|
|
62
|
+
self.first_run = False
|
|
63
|
+
|
|
64
|
+
if image is not None:
|
|
65
|
+
return (image, pipeline_hash)
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
def last_result(self) -> Optional[Image]:
|
|
69
|
+
"""
|
|
70
|
+
Returns the last known output of the pipeline
|
|
71
|
+
"""
|
|
72
|
+
if not self.filters:
|
|
73
|
+
return None
|
|
74
|
+
return self.filters[-1][1]
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from fractions import Fraction
|
|
2
|
+
from typing import Callable, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
from PIL import Image, ImageEnhance
|
|
5
|
+
|
|
6
|
+
from streamdeck_ui.display.filter import Filter
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PulseFilter(Filter):
|
|
10
|
+
last_time: Fraction
|
|
11
|
+
pulse_delay: float
|
|
12
|
+
brightness: float
|
|
13
|
+
dim_brightness: float
|
|
14
|
+
filter_hash: int
|
|
15
|
+
|
|
16
|
+
def __init__(self):
|
|
17
|
+
super(PulseFilter, self).__init__()
|
|
18
|
+
self.last_time = Fraction()
|
|
19
|
+
self.pulse_delay = 0.5
|
|
20
|
+
self.brightness = 1
|
|
21
|
+
self.dim_brightness = 0.5
|
|
22
|
+
self.filter_hash = hash(self.__class__)
|
|
23
|
+
|
|
24
|
+
def initialize(self, size: Tuple[int, int]):
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
def transform( # type: ignore[override]
|
|
28
|
+
self,
|
|
29
|
+
get_input: Callable[[], Image.Image],
|
|
30
|
+
get_output: Callable[[int], Image.Image],
|
|
31
|
+
input_changed: bool,
|
|
32
|
+
time: Fraction,
|
|
33
|
+
) -> Tuple[Optional[Image.Image], int]:
|
|
34
|
+
brightness_changed = False
|
|
35
|
+
if time - self.last_time > self.pulse_delay:
|
|
36
|
+
brightness_changed = True
|
|
37
|
+
self.last_time = time
|
|
38
|
+
|
|
39
|
+
if self.brightness == self.dim_brightness:
|
|
40
|
+
self.brightness = 1
|
|
41
|
+
else:
|
|
42
|
+
self.brightness = self.dim_brightness
|
|
43
|
+
|
|
44
|
+
frame_hash = hash((self.filter_hash, self.brightness))
|
|
45
|
+
if input_changed or brightness_changed:
|
|
46
|
+
image = get_output(frame_hash)
|
|
47
|
+
if image:
|
|
48
|
+
return (image, frame_hash)
|
|
49
|
+
|
|
50
|
+
input = get_input()
|
|
51
|
+
enhancer = ImageEnhance.Brightness(input)
|
|
52
|
+
input = enhancer.enhance(self.brightness)
|
|
53
|
+
return (input, frame_hash)
|
|
54
|
+
return (None, frame_hash)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from fractions import Fraction
|
|
2
|
+
from typing import Callable, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
from PIL import Image, ImageDraw, ImageFilter, ImageFont
|
|
5
|
+
|
|
6
|
+
from streamdeck_ui.config import DEFAULT_FONT_FALLBACK_PATH
|
|
7
|
+
from streamdeck_ui.display.filter import Filter
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class TextFilter(Filter):
|
|
11
|
+
font_blur: Optional[ImageFilter.Kernel] = None
|
|
12
|
+
# Static instance - no need to create one per Filter instance
|
|
13
|
+
|
|
14
|
+
image: Optional[Image.Image]
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self, text: str, font: str, font_size: int, font_color: str, vertical_align: str, horizontal_align: str
|
|
18
|
+
):
|
|
19
|
+
super(TextFilter, self).__init__()
|
|
20
|
+
self.text = text
|
|
21
|
+
self.vertical_align = vertical_align
|
|
22
|
+
self.horizontal_align = horizontal_align
|
|
23
|
+
self.font_color = font_color
|
|
24
|
+
self.fallback_font = ImageFont.truetype(DEFAULT_FONT_FALLBACK_PATH, font_size)
|
|
25
|
+
try:
|
|
26
|
+
self.true_font = ImageFont.truetype(font, font_size)
|
|
27
|
+
except OSError:
|
|
28
|
+
print("Unable to set font: " + font)
|
|
29
|
+
self.true_font = self.fallback_font
|
|
30
|
+
# fmt: off
|
|
31
|
+
kernel = [
|
|
32
|
+
0, 1, 2, 1, 0,
|
|
33
|
+
1, 2, 4, 2, 1,
|
|
34
|
+
2, 4, 8, 4, 1,
|
|
35
|
+
1, 2, 4, 2, 1,
|
|
36
|
+
0, 1, 2, 1, 0]
|
|
37
|
+
# fmt: on
|
|
38
|
+
TextFilter.font_blur = ImageFilter.Kernel((5, 5), kernel, scale=0.1 * sum(kernel))
|
|
39
|
+
self.offset = 0.0
|
|
40
|
+
self.offset_direction = 1
|
|
41
|
+
self.image = None
|
|
42
|
+
|
|
43
|
+
# Hashcode should be created for anything that makes this frame unique
|
|
44
|
+
self.hashcode = hash((self.__class__, text, font, font_size, font_color, vertical_align, horizontal_align))
|
|
45
|
+
|
|
46
|
+
def initialize(self, size: Tuple[int, int]):
|
|
47
|
+
self.image = Image.new("RGBA", size)
|
|
48
|
+
foreground_draw = ImageDraw.Draw(self.image)
|
|
49
|
+
# Split the text by newline to determine label height
|
|
50
|
+
# then grab the longest word to determine label width
|
|
51
|
+
text_split_newline = sorted(self.text.split("\n"), key=len)
|
|
52
|
+
# Calculate the height and width of the text we're drawing, using the font itself
|
|
53
|
+
# Previously we counted the number of characters to determine the width, but if the font wasn't a fixed width
|
|
54
|
+
# the horizontal alignment would be off.
|
|
55
|
+
_, _, label_w, _ = foreground_draw.textbbox((0, 0), self.text, font=self.true_font)
|
|
56
|
+
# Calculate dimensions for text that include ascender (above the line)
|
|
57
|
+
# and below the line (descender) characters. This is used to adjust the
|
|
58
|
+
# font placement and should allow for button text to horizontally align
|
|
59
|
+
# across buttons. Basically we want to figure out what is the tallest
|
|
60
|
+
# text we will need to draw.
|
|
61
|
+
_, _, _, label_h = foreground_draw.textbbox(
|
|
62
|
+
(0, 0), "\n".join(["lLpgyL|"] * len(text_split_newline)), font=self.true_font
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
gap = (size[1] - 5 * label_h) // 4
|
|
66
|
+
|
|
67
|
+
if self.vertical_align == "top":
|
|
68
|
+
label_y = 0
|
|
69
|
+
elif self.vertical_align == "middle-top":
|
|
70
|
+
label_y = int(gap + label_h)
|
|
71
|
+
elif self.vertical_align == "middle":
|
|
72
|
+
label_y = size[1] // 2 - (int(label_h) // 2)
|
|
73
|
+
elif self.vertical_align == "middle-bottom":
|
|
74
|
+
label_y = int((gap + label_h) * 3)
|
|
75
|
+
else:
|
|
76
|
+
label_y = int(size[1] - label_h)
|
|
77
|
+
# Default or "bottom"
|
|
78
|
+
|
|
79
|
+
if self.horizontal_align == "left":
|
|
80
|
+
label_x = 0
|
|
81
|
+
elif self.horizontal_align == "right":
|
|
82
|
+
label_x = int(size[0] - label_w)
|
|
83
|
+
else:
|
|
84
|
+
self.horizontal_align = "center"
|
|
85
|
+
label_x = (size[0] - int(label_w)) // 2
|
|
86
|
+
# Default or "center"
|
|
87
|
+
|
|
88
|
+
label_pos = (label_x, label_y)
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
foreground_draw.multiline_text(
|
|
92
|
+
label_pos,
|
|
93
|
+
text=self.text,
|
|
94
|
+
font=self.true_font,
|
|
95
|
+
fill=self.font_color,
|
|
96
|
+
align=self.horizontal_align,
|
|
97
|
+
spacing=0,
|
|
98
|
+
stroke_fill="black",
|
|
99
|
+
stroke_width=2,
|
|
100
|
+
)
|
|
101
|
+
except OSError:
|
|
102
|
+
print("Font does not render with pillow, falling back to default font.")
|
|
103
|
+
foreground_draw.multiline_text(
|
|
104
|
+
label_pos,
|
|
105
|
+
text=self.text,
|
|
106
|
+
font=self.fallback_font,
|
|
107
|
+
fill=self.font_color,
|
|
108
|
+
align=self.horizontal_align,
|
|
109
|
+
spacing=0,
|
|
110
|
+
stroke_fill="black",
|
|
111
|
+
stroke_width=2,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def transform( # type: ignore[override]
|
|
115
|
+
self,
|
|
116
|
+
get_input: Callable[[], Image.Image],
|
|
117
|
+
get_output: Callable[[int], Image.Image],
|
|
118
|
+
input_changed: bool,
|
|
119
|
+
time: Fraction,
|
|
120
|
+
) -> Tuple[Optional[Image.Image], int]:
|
|
121
|
+
"""
|
|
122
|
+
The transformation returns the loaded image, ando overwrites whatever came before.
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
if input_changed:
|
|
126
|
+
image = get_output(self.hashcode)
|
|
127
|
+
if image:
|
|
128
|
+
return (image, self.hashcode)
|
|
129
|
+
|
|
130
|
+
input = get_input()
|
|
131
|
+
if self.image:
|
|
132
|
+
input.paste(self.image, self.image)
|
|
133
|
+
return (input, self.hashcode)
|
|
134
|
+
return (None, self.hashcode)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def is_a_valid_text_filter_font(font) -> bool:
|
|
138
|
+
try:
|
|
139
|
+
TextFilter("", font, 12, "white", "top", "left")
|
|
140
|
+
return True
|
|
141
|
+
except BaseException: # noqa: B036
|
|
142
|
+
return False
|