@lightningtv/solid 2.11.7 → 2.12.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/primitives/Lazy.d.ts +1 -0
- package/dist/src/primitives/Lazy.jsx +14 -1
- package/dist/src/primitives/Lazy.jsx.map +1 -1
- package/dist/src/primitives/Virtual.jsx +7 -1
- package/dist/src/primitives/Virtual.jsx.map +1 -1
- package/dist/src/primitives/announcer/announcer.d.ts +1 -0
- package/dist/src/primitives/announcer/announcer.js +4 -3
- package/dist/src/primitives/announcer/announcer.js.map +1 -1
- package/dist/src/primitives/announcer/speech.d.ts +1 -1
- package/dist/src/primitives/announcer/speech.js +98 -8
- package/dist/src/primitives/announcer/speech.js.map +1 -1
- package/dist/src/primitives/index.d.ts +1 -0
- package/dist/src/primitives/index.js +1 -0
- package/dist/src/primitives/index.js.map +1 -1
- package/dist/src/primitives/utils/createBlurredImage.d.ts +56 -0
- package/dist/src/primitives/utils/createBlurredImage.js +223 -0
- package/dist/src/primitives/utils/createBlurredImage.js.map +1 -0
- package/dist/src/primitives/utils/handleNavigation.js +7 -13
- package/dist/src/primitives/utils/handleNavigation.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
- package/src/primitives/Lazy.tsx +15 -3
- package/src/primitives/Virtual.tsx +8 -1
- package/src/primitives/announcer/announcer.ts +10 -3
- package/src/primitives/announcer/speech.ts +113 -6
- package/src/primitives/index.ts +1 -0
- package/src/primitives/utils/createBlurredImage.ts +366 -0
- package/src/primitives/utils/handleNavigation.ts +9 -14
|
@@ -12,6 +12,11 @@ export interface SeriesResult {
|
|
|
12
12
|
cancel: () => void;
|
|
13
13
|
}
|
|
14
14
|
|
|
15
|
+
// Aria label
|
|
16
|
+
type AriaLabel = { text: string; lang: string };
|
|
17
|
+
const ARIA_PARENT_ID = 'aria-parent';
|
|
18
|
+
let ariaLabelPhrases: AriaLabel[] = [];
|
|
19
|
+
|
|
15
20
|
/* global SpeechSynthesisErrorEvent */
|
|
16
21
|
function flattenStrings(series: SpeechType[] = []): SpeechType[] {
|
|
17
22
|
const flattenedSeries = [];
|
|
@@ -40,6 +45,82 @@ function delay(pause: number) {
|
|
|
40
45
|
});
|
|
41
46
|
}
|
|
42
47
|
|
|
48
|
+
/**
|
|
49
|
+
* @description This function is called at the end of the speak series
|
|
50
|
+
* @param Phrase is an object containing the text and the language
|
|
51
|
+
*/
|
|
52
|
+
function addChildrenToAriaDiv(phrase: AriaLabel) {
|
|
53
|
+
if (phrase?.text?.trim().length === 0) return;
|
|
54
|
+
ariaLabelPhrases.push(phrase);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* @description This function is triggered finally when the speak series is finished and we are to speak the aria labels
|
|
59
|
+
*/
|
|
60
|
+
function focusElementForAria() {
|
|
61
|
+
const element = createAriaElement();
|
|
62
|
+
|
|
63
|
+
if (!element) {
|
|
64
|
+
console.error(`ARIA div not found: ${ARIA_PARENT_ID}`);
|
|
65
|
+
return;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
for (const object of ariaLabelPhrases) {
|
|
69
|
+
const span = document.createElement('span');
|
|
70
|
+
|
|
71
|
+
// TODO: Not sure LG or Samsung support lang attribute on span or switching language
|
|
72
|
+
span.setAttribute('lang', object.lang);
|
|
73
|
+
span.setAttribute('aria-label', object.text);
|
|
74
|
+
element.appendChild(span);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Cleanup
|
|
78
|
+
setTimeout(() => {
|
|
79
|
+
ariaLabelPhrases = [];
|
|
80
|
+
cleanAriaLabelParent();
|
|
81
|
+
focusCanvas();
|
|
82
|
+
}, 100);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* @description Clean the aria label parent after speaking
|
|
87
|
+
*/
|
|
88
|
+
function cleanAriaLabelParent(): void {
|
|
89
|
+
const parentTag = document.getElementById(ARIA_PARENT_ID);
|
|
90
|
+
if (parentTag) {
|
|
91
|
+
while (parentTag.firstChild) {
|
|
92
|
+
parentTag.removeChild(parentTag.firstChild);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* @description Focus the canvas element
|
|
99
|
+
*/
|
|
100
|
+
function focusCanvas(): void {
|
|
101
|
+
const canvas = document.getElementById('app')?.firstChild as HTMLElement;
|
|
102
|
+
canvas?.focus();
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* @description Create the aria element in the DOM if it doesn't exist
|
|
107
|
+
* @private For xbox, we may need to create a different element each time we wanna use aria
|
|
108
|
+
*/
|
|
109
|
+
function createAriaElement(): HTMLDivElement | HTMLElement {
|
|
110
|
+
const aria_container = document.getElementById(ARIA_PARENT_ID);
|
|
111
|
+
|
|
112
|
+
if (!aria_container) {
|
|
113
|
+
const element = document.createElement('div');
|
|
114
|
+
element.setAttribute('id', ARIA_PARENT_ID);
|
|
115
|
+
element.setAttribute('aria-live', 'assertive');
|
|
116
|
+
element.setAttribute('tabindex', '0');
|
|
117
|
+
document.body.appendChild(element);
|
|
118
|
+
return element;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
return aria_container;
|
|
122
|
+
}
|
|
123
|
+
|
|
43
124
|
/**
|
|
44
125
|
* Speak a string
|
|
45
126
|
*
|
|
@@ -82,6 +163,7 @@ function speak(
|
|
|
82
163
|
|
|
83
164
|
function speakSeries(
|
|
84
165
|
series: SpeechType,
|
|
166
|
+
aria: boolean,
|
|
85
167
|
lang: string,
|
|
86
168
|
voice?: string,
|
|
87
169
|
root = true,
|
|
@@ -118,7 +200,8 @@ function speakSeries(
|
|
|
118
200
|
|
|
119
201
|
while (active && retriesLeft > 0) {
|
|
120
202
|
try {
|
|
121
|
-
|
|
203
|
+
if (aria) addChildrenToAriaDiv({ text: phrase, lang });
|
|
204
|
+
else await speak(phrase, utterances, lang, voice);
|
|
122
205
|
retriesLeft = 0; // Exit retry loop on success
|
|
123
206
|
} catch (e) {
|
|
124
207
|
if (e instanceof SpeechSynthesisErrorEvent) {
|
|
@@ -152,8 +235,12 @@ function speakSeries(
|
|
|
152
235
|
|
|
153
236
|
while (active && retriesLeft > 0) {
|
|
154
237
|
try {
|
|
155
|
-
|
|
156
|
-
|
|
238
|
+
if (text) {
|
|
239
|
+
if (aria) addChildrenToAriaDiv({ text, lang: objectLang });
|
|
240
|
+
else
|
|
241
|
+
await speak(text, utterances, objectLang, objectVoice?.name);
|
|
242
|
+
retriesLeft = 0; // Exit retry loop on success
|
|
243
|
+
}
|
|
157
244
|
} catch (e) {
|
|
158
245
|
if (e instanceof SpeechSynthesisErrorEvent) {
|
|
159
246
|
if (e.error === 'network') {
|
|
@@ -178,18 +265,22 @@ function speakSeries(
|
|
|
178
265
|
}
|
|
179
266
|
} else if (typeof phrase === 'function') {
|
|
180
267
|
// Handle functions
|
|
181
|
-
const seriesResult = speakSeries(phrase(), lang, voice, false);
|
|
268
|
+
const seriesResult = speakSeries(phrase(), aria, lang, voice, false);
|
|
182
269
|
nestedSeriesResults.push(seriesResult);
|
|
183
270
|
await seriesResult.series;
|
|
184
271
|
} else if (Array.isArray(phrase)) {
|
|
185
272
|
// Handle nested arrays
|
|
186
|
-
const seriesResult = speakSeries(phrase, lang, voice, false);
|
|
273
|
+
const seriesResult = speakSeries(phrase, aria, lang, voice, false);
|
|
187
274
|
nestedSeriesResults.push(seriesResult);
|
|
188
275
|
await seriesResult.series;
|
|
189
276
|
}
|
|
190
277
|
}
|
|
191
278
|
} finally {
|
|
192
279
|
active = false;
|
|
280
|
+
// Call completion logic only for the original (root) series
|
|
281
|
+
if (root && aria) {
|
|
282
|
+
focusElementForAria();
|
|
283
|
+
}
|
|
193
284
|
}
|
|
194
285
|
})();
|
|
195
286
|
|
|
@@ -205,7 +296,21 @@ function speakSeries(
|
|
|
205
296
|
if (!active) {
|
|
206
297
|
return;
|
|
207
298
|
}
|
|
299
|
+
|
|
208
300
|
if (root) {
|
|
301
|
+
if (aria) {
|
|
302
|
+
const element = createAriaElement();
|
|
303
|
+
|
|
304
|
+
if (element) {
|
|
305
|
+
ariaLabelPhrases = [];
|
|
306
|
+
cleanAriaLabelParent();
|
|
307
|
+
element.focus();
|
|
308
|
+
focusCanvas();
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
return;
|
|
312
|
+
}
|
|
313
|
+
|
|
209
314
|
synth.cancel(); // Cancel all ongoing speech
|
|
210
315
|
}
|
|
211
316
|
nestedSeriesResults.forEach((nestedSeriesResult) => {
|
|
@@ -215,13 +320,15 @@ function speakSeries(
|
|
|
215
320
|
},
|
|
216
321
|
};
|
|
217
322
|
}
|
|
323
|
+
|
|
218
324
|
let currentSeries: SeriesResult | undefined;
|
|
219
325
|
export default function (
|
|
220
326
|
toSpeak: SpeechType,
|
|
327
|
+
aria: boolean,
|
|
221
328
|
lang: string = 'en-US',
|
|
222
329
|
voice?: string,
|
|
223
330
|
) {
|
|
224
331
|
currentSeries && currentSeries.cancel();
|
|
225
|
-
currentSeries = speakSeries(toSpeak, lang, voice);
|
|
332
|
+
currentSeries = speakSeries(toSpeak, aria, lang, voice);
|
|
226
333
|
return currentSeries;
|
|
227
334
|
}
|
package/src/primitives/index.ts
CHANGED
|
@@ -28,6 +28,7 @@ export {
|
|
|
28
28
|
} from './utils/chainFunctions.js';
|
|
29
29
|
export * from './utils/handleNavigation.js';
|
|
30
30
|
export { createSpriteMap, type SpriteDef } from './utils/createSpriteMap.js';
|
|
31
|
+
export { createBlurredImage } from './utils/createBlurredImage.js';
|
|
31
32
|
|
|
32
33
|
export type * from './types.js';
|
|
33
34
|
export type { KeyHandler } from '@lightningtv/core/focusManager';
|
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
import { type Accessor, type Resource, createResource } from 'solid-js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Represents a valid image source that can be used for blurring
|
|
5
|
+
*/
|
|
6
|
+
type ImageSource = string | URL;
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Represents a valid image source or null/undefined
|
|
10
|
+
*/
|
|
11
|
+
type NullableImageSource = ImageSource | null | undefined;
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Configuration options for Gaussian blur operation
|
|
15
|
+
*/
|
|
16
|
+
interface BlurOptions {
|
|
17
|
+
/**
|
|
18
|
+
* The blur radius in pixels
|
|
19
|
+
* @default 10
|
|
20
|
+
*/
|
|
21
|
+
readonly radius?: number;
|
|
22
|
+
/**
|
|
23
|
+
* CORS setting for image loading
|
|
24
|
+
* @default 'anonymous'
|
|
25
|
+
*/
|
|
26
|
+
readonly crossOrigin?: 'anonymous' | 'use-credentials' | '';
|
|
27
|
+
/**
|
|
28
|
+
* The resolution of the output image in pixels
|
|
29
|
+
* @default 1
|
|
30
|
+
*/
|
|
31
|
+
readonly resolution?: number;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Default blur options
|
|
36
|
+
*/
|
|
37
|
+
const DEFAULT_BLUR_OPTIONS: Required<
|
|
38
|
+
Pick<BlurOptions, 'radius' | 'crossOrigin' | 'resolution'>
|
|
39
|
+
> = {
|
|
40
|
+
radius: 10,
|
|
41
|
+
crossOrigin: 'anonymous',
|
|
42
|
+
resolution: 1,
|
|
43
|
+
} as const;
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Type for Gaussian kernel array
|
|
47
|
+
* Represents a normalized array of weights
|
|
48
|
+
*/
|
|
49
|
+
type GaussianKernel = readonly number[];
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Type for image dimensions
|
|
53
|
+
*/
|
|
54
|
+
interface ImageDimensions {
|
|
55
|
+
readonly width: number;
|
|
56
|
+
readonly height: number;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Type for the resource return value from createBlurredImage
|
|
61
|
+
*/
|
|
62
|
+
type BlurredImageResource<T extends NullableImageSource> = Resource<
|
|
63
|
+
T extends null | undefined ? null : string
|
|
64
|
+
>;
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Ensures a number is within valid range
|
|
68
|
+
*/
|
|
69
|
+
type ValidRadius = number & { __brand: 'ValidRadius' };
|
|
70
|
+
|
|
71
|
+
/**
|
|
72
|
+
* Validates that radius is a positive number
|
|
73
|
+
*/
|
|
74
|
+
function isValidRadius(radius: number): radius is ValidRadius {
|
|
75
|
+
return radius > 0 && Number.isFinite(radius);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Ensures a resolution is a positive number
|
|
80
|
+
*/
|
|
81
|
+
function isValidResolution(resolution: number): boolean {
|
|
82
|
+
return resolution > 0 && resolution <= 1 && Number.isFinite(resolution);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Applies vertical Gaussian blur to image data
|
|
87
|
+
* @param input - Input pixel data
|
|
88
|
+
* @param output - Output pixel data buffer
|
|
89
|
+
* @param width - Image width
|
|
90
|
+
* @param height - Image height
|
|
91
|
+
* @param kernel - Gaussian kernel weights
|
|
92
|
+
* @param half - Half of kernel size
|
|
93
|
+
*/
|
|
94
|
+
function applyVerticalBlur(
|
|
95
|
+
input: Readonly<Uint8ClampedArray>,
|
|
96
|
+
output: Uint8ClampedArray,
|
|
97
|
+
width: number,
|
|
98
|
+
height: number,
|
|
99
|
+
kernel: Readonly<GaussianKernel>,
|
|
100
|
+
half: number,
|
|
101
|
+
): void {
|
|
102
|
+
for (let y = 0; y < height; y++) {
|
|
103
|
+
for (let x = 0; x < width; x++) {
|
|
104
|
+
let r = 0,
|
|
105
|
+
g = 0,
|
|
106
|
+
b = 0,
|
|
107
|
+
a = 0;
|
|
108
|
+
let weightSum = 0;
|
|
109
|
+
|
|
110
|
+
for (let ky = -half; ky <= half; ky++) {
|
|
111
|
+
const py = y + ky;
|
|
112
|
+
if (py >= 0 && py < height) {
|
|
113
|
+
const pixelIndex = (py * width + x) * 4;
|
|
114
|
+
const weight = kernel[ky + half]!;
|
|
115
|
+
|
|
116
|
+
r += input[pixelIndex]! * weight;
|
|
117
|
+
g += input[pixelIndex + 1]! * weight;
|
|
118
|
+
b += input[pixelIndex + 2]! * weight;
|
|
119
|
+
a += input[pixelIndex + 3]! * weight;
|
|
120
|
+
weightSum += weight;
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
const outputIndex = (y * width + x) * 4;
|
|
125
|
+
output[outputIndex] = r / weightSum;
|
|
126
|
+
output[outputIndex + 1] = g / weightSum;
|
|
127
|
+
output[outputIndex + 2] = b / weightSum;
|
|
128
|
+
output[outputIndex + 3] = a / weightSum;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/**
|
|
134
|
+
* Applies horizontal Gaussian blur to image data
|
|
135
|
+
* @param input - Input pixel data
|
|
136
|
+
* @param output - Output pixel data buffer
|
|
137
|
+
* @param width - Image width
|
|
138
|
+
* @param height - Image height
|
|
139
|
+
* @param kernel - Gaussian kernel weights
|
|
140
|
+
* @param half - Half of kernel size
|
|
141
|
+
*/
|
|
142
|
+
function applyHorizontalBlur(
|
|
143
|
+
input: Readonly<Uint8ClampedArray>,
|
|
144
|
+
output: Uint8ClampedArray,
|
|
145
|
+
width: number,
|
|
146
|
+
height: number,
|
|
147
|
+
kernel: Readonly<GaussianKernel>,
|
|
148
|
+
half: number,
|
|
149
|
+
): void {
|
|
150
|
+
for (let y = 0; y < height; y++) {
|
|
151
|
+
for (let x = 0; x < width; x++) {
|
|
152
|
+
let r = 0,
|
|
153
|
+
g = 0,
|
|
154
|
+
b = 0,
|
|
155
|
+
a = 0;
|
|
156
|
+
let weightSum = 0;
|
|
157
|
+
|
|
158
|
+
for (let kx = -half; kx <= half; kx++) {
|
|
159
|
+
const px = x + kx;
|
|
160
|
+
if (px >= 0 && px < width) {
|
|
161
|
+
const pixelIndex = (y * width + px) * 4;
|
|
162
|
+
const weight = kernel[kx + half]!;
|
|
163
|
+
|
|
164
|
+
r += input[pixelIndex]! * weight;
|
|
165
|
+
g += input[pixelIndex + 1]! * weight;
|
|
166
|
+
b += input[pixelIndex + 2]! * weight;
|
|
167
|
+
a += input[pixelIndex + 3]! * weight;
|
|
168
|
+
weightSum += weight;
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
const outputIndex = (y * width + x) * 4;
|
|
173
|
+
output[outputIndex] = r / weightSum;
|
|
174
|
+
output[outputIndex + 1] = g / weightSum;
|
|
175
|
+
output[outputIndex + 2] = b / weightSum;
|
|
176
|
+
output[outputIndex + 3] = a / weightSum;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
/**
|
|
182
|
+
* Generates a normalized Gaussian kernel
|
|
183
|
+
* @param size - Kernel size (must be odd)
|
|
184
|
+
* @param sigma - Standard deviation
|
|
185
|
+
* @returns Normalized Gaussian kernel
|
|
186
|
+
*/
|
|
187
|
+
function generateGaussianKernel(
|
|
188
|
+
size: number,
|
|
189
|
+
sigma: number,
|
|
190
|
+
): Readonly<GaussianKernel> {
|
|
191
|
+
const kernel: number[] = [];
|
|
192
|
+
const half = Math.floor(size / 2);
|
|
193
|
+
let sum = 0;
|
|
194
|
+
|
|
195
|
+
for (let i = -half; i <= half; i++) {
|
|
196
|
+
const value = Math.exp(-(i * i) / (2 * sigma * sigma));
|
|
197
|
+
kernel.push(value);
|
|
198
|
+
sum += value;
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return Object.freeze(kernel.map((value) => value / sum));
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/**
|
|
205
|
+
* Applies Gaussian blur convolution to image data
|
|
206
|
+
* @param imageData - Source image data
|
|
207
|
+
* @param dimensions - Image dimensions
|
|
208
|
+
* @param radius - Blur radius
|
|
209
|
+
* @returns Blurred image data
|
|
210
|
+
*/
|
|
211
|
+
function gaussianBlurConvolution(
|
|
212
|
+
imageData: Readonly<ImageData>,
|
|
213
|
+
dimensions: Readonly<ImageDimensions>,
|
|
214
|
+
radius: ValidRadius,
|
|
215
|
+
): ImageData {
|
|
216
|
+
const { data } = imageData;
|
|
217
|
+
const { width, height } = dimensions;
|
|
218
|
+
const output = new Uint8ClampedArray(data.length);
|
|
219
|
+
|
|
220
|
+
const kernelSize = Math.ceil(radius * 2) * 2 + 1;
|
|
221
|
+
const kernel = generateGaussianKernel(kernelSize, radius);
|
|
222
|
+
const half = Math.floor(kernelSize / 2);
|
|
223
|
+
|
|
224
|
+
applyHorizontalBlur(data, output, width, height, kernel, half);
|
|
225
|
+
|
|
226
|
+
const tempData = new Uint8ClampedArray(output);
|
|
227
|
+
applyVerticalBlur(tempData, output, width, height, kernel, half);
|
|
228
|
+
|
|
229
|
+
return new ImageData(output, width, height);
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
/**
|
|
233
|
+
* Applies Gaussian blur to an image URL
|
|
234
|
+
* @param imageUrl - Image source (string or URL)
|
|
235
|
+
* @param options - Blur configuration options
|
|
236
|
+
* @returns Promise resolving to data URL of blurred image
|
|
237
|
+
* @throws {Error} If image fails to load or blur operation fails
|
|
238
|
+
*/
|
|
239
|
+
export async function applyGaussianBlur<TSource extends ImageSource>(
|
|
240
|
+
imageUrl: TSource,
|
|
241
|
+
options?: Readonly<BlurOptions>,
|
|
242
|
+
): Promise<string> {
|
|
243
|
+
const opts = { ...DEFAULT_BLUR_OPTIONS, ...options };
|
|
244
|
+
const radius = opts.radius;
|
|
245
|
+
const resolution = opts.resolution;
|
|
246
|
+
|
|
247
|
+
if (!isValidRadius(radius)) {
|
|
248
|
+
throw new Error(
|
|
249
|
+
`Invalid blur radius: ${radius}. Must be a positive number.`,
|
|
250
|
+
);
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
if (!isValidResolution(resolution)) {
|
|
254
|
+
throw new Error(
|
|
255
|
+
`Invalid resolution: ${resolution}. Must be a number between 0 and 1.`,
|
|
256
|
+
);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
return new Promise<string>((resolve, reject) => {
|
|
260
|
+
const img = new Image();
|
|
261
|
+
img.crossOrigin = opts.crossOrigin;
|
|
262
|
+
|
|
263
|
+
img.onload = (): void => {
|
|
264
|
+
try {
|
|
265
|
+
const canvas = document.createElement('canvas');
|
|
266
|
+
const ctx = canvas.getContext('2d', {
|
|
267
|
+
willReadFrequently: true,
|
|
268
|
+
});
|
|
269
|
+
|
|
270
|
+
if (ctx === null) {
|
|
271
|
+
reject(new Error('Failed to get canvas context'));
|
|
272
|
+
return;
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
const scaledWidth = Math.max(1, Math.round(img.width * resolution));
|
|
276
|
+
const scaledHeight = Math.max(1, Math.round(img.height * resolution));
|
|
277
|
+
|
|
278
|
+
const dimensions: ImageDimensions = {
|
|
279
|
+
width: scaledWidth,
|
|
280
|
+
height: scaledHeight,
|
|
281
|
+
};
|
|
282
|
+
|
|
283
|
+
canvas.width = dimensions.width;
|
|
284
|
+
canvas.height = dimensions.height;
|
|
285
|
+
|
|
286
|
+
const hasFilterSupport = ctx.filter !== undefined;
|
|
287
|
+
if (hasFilterSupport) {
|
|
288
|
+
ctx.filter = `blur(${radius}px)`;
|
|
289
|
+
ctx.drawImage(img, 0, 0, scaledWidth, scaledHeight);
|
|
290
|
+
ctx.filter = 'none';
|
|
291
|
+
} else {
|
|
292
|
+
ctx.drawImage(img, 0, 0, scaledWidth, scaledHeight);
|
|
293
|
+
const imageData = ctx.getImageData(
|
|
294
|
+
0,
|
|
295
|
+
0,
|
|
296
|
+
dimensions.width,
|
|
297
|
+
dimensions.height,
|
|
298
|
+
);
|
|
299
|
+
const blurredData = gaussianBlurConvolution(
|
|
300
|
+
imageData,
|
|
301
|
+
dimensions,
|
|
302
|
+
radius,
|
|
303
|
+
);
|
|
304
|
+
ctx.putImageData(blurredData, 0, 0);
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
const dataUrl = canvas.toDataURL();
|
|
308
|
+
|
|
309
|
+
if (dataUrl) {
|
|
310
|
+
resolve(dataUrl);
|
|
311
|
+
} else {
|
|
312
|
+
reject(new Error('Failed to create image data URL'));
|
|
313
|
+
}
|
|
314
|
+
} catch (error) {
|
|
315
|
+
reject(
|
|
316
|
+
error instanceof Error
|
|
317
|
+
? error
|
|
318
|
+
: new Error('Unknown error during blur operation'),
|
|
319
|
+
);
|
|
320
|
+
}
|
|
321
|
+
};
|
|
322
|
+
|
|
323
|
+
img.onerror = (): void => {
|
|
324
|
+
reject(new Error('Failed to load image'));
|
|
325
|
+
};
|
|
326
|
+
|
|
327
|
+
const srcString: string =
|
|
328
|
+
typeof imageUrl === 'string' ? imageUrl : imageUrl.toString();
|
|
329
|
+
img.src = srcString;
|
|
330
|
+
});
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
/**
|
|
334
|
+
* Hook to create a blurred image resource
|
|
335
|
+
* @param imageUrl - Accessor function returning image source or null/undefined
|
|
336
|
+
* @param options - Blur configuration options
|
|
337
|
+
* @returns Resource containing blurred image data URL or null
|
|
338
|
+
* @template TSource - Type of image source (string, URL, or null/undefined)
|
|
339
|
+
*
|
|
340
|
+
* @example
|
|
341
|
+
* ```ts
|
|
342
|
+
* const imageUrl = () => 'https://example.com/image.jpg';
|
|
343
|
+
* const blurred = createBlurredImage(imageUrl, { radius: 15 });
|
|
344
|
+
* ```
|
|
345
|
+
*/
|
|
346
|
+
export function createBlurredImage<TSource extends NullableImageSource>(
|
|
347
|
+
imageUrl: Accessor<TSource>,
|
|
348
|
+
options?: Readonly<BlurOptions>,
|
|
349
|
+
): BlurredImageResource<TSource> {
|
|
350
|
+
const imageUrlString: Accessor<string | null | undefined> = () => {
|
|
351
|
+
const url = imageUrl();
|
|
352
|
+
if (url === null || url === undefined) {
|
|
353
|
+
return url;
|
|
354
|
+
}
|
|
355
|
+
return typeof url === 'string' ? url : url.toString();
|
|
356
|
+
};
|
|
357
|
+
|
|
358
|
+
const [blurredImage] = createResource(
|
|
359
|
+
imageUrlString,
|
|
360
|
+
async (url: string): Promise<string> => {
|
|
361
|
+
return await applyGaussianBlur(url, options);
|
|
362
|
+
},
|
|
363
|
+
);
|
|
364
|
+
|
|
365
|
+
return blurredImage as BlurredImageResource<TSource>;
|
|
366
|
+
}
|
|
@@ -10,7 +10,7 @@ declare module '@lightningtv/core' {
|
|
|
10
10
|
}
|
|
11
11
|
|
|
12
12
|
function idxInArray(idx: number, arr: readonly any[]): boolean {
|
|
13
|
-
return idx >= 0 && idx < arr.length;
|
|
13
|
+
return idx === 0 || (idx >= 0 && idx < arr.length);
|
|
14
14
|
}
|
|
15
15
|
|
|
16
16
|
function findFirstFocusableChildIdx(
|
|
@@ -77,20 +77,15 @@ export function onGridFocus(
|
|
|
77
77
|
export const navigableForwardFocus: lng.ForwardFocusHandler = function () {
|
|
78
78
|
const navigable = this as lngp.NavigableElement;
|
|
79
79
|
|
|
80
|
-
// Undo for now - We should only do this when setFocus is called rather than on forwardFocus
|
|
81
|
-
// needs some more research
|
|
82
|
-
// if (!lng.isFocused(this)) {
|
|
83
|
-
// // if a child already has focus, assume that should be selected
|
|
84
|
-
// for (let [i, child] of this.children.entries()) {
|
|
85
|
-
// if (lng.isFocused(child)) {
|
|
86
|
-
// this.selected = i;
|
|
87
|
-
// break;
|
|
88
|
-
// }
|
|
89
|
-
// }
|
|
90
|
-
// }
|
|
91
|
-
|
|
92
80
|
let selected = navigable.selected;
|
|
93
|
-
|
|
81
|
+
|
|
82
|
+
if (selected !== 0) {
|
|
83
|
+
selected = lng.clamp(selected, 0, this.children.length - 1);
|
|
84
|
+
while (!idxInArray(selected, this.children)) {
|
|
85
|
+
selected--;
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
94
89
|
selected = findFirstFocusableChildIdx(navigable, selected);
|
|
95
90
|
// update selected as firstfocusable maybe different if first element has skipFocus
|
|
96
91
|
navigable.selected = selected;
|