@juicesharp/rpiv-voice 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +45 -0
  2. package/LICENSE +21 -0
  3. package/README.md +116 -0
  4. package/audio/error-log.ts +37 -0
  5. package/audio/hallucination-filter.ts +71 -0
  6. package/audio/mic-source.ts +38 -0
  7. package/audio/model-download.ts +268 -0
  8. package/audio/pcm.ts +45 -0
  9. package/audio/sherpa-onnx-node.d.ts +55 -0
  10. package/audio/stt-engine.ts +117 -0
  11. package/command/pipeline-runner.ts +238 -0
  12. package/command/splash-runner.ts +72 -0
  13. package/command/voice-command.ts +251 -0
  14. package/config/voice-config.ts +80 -0
  15. package/docs/cover.png +0 -0
  16. package/docs/cover.svg +173 -0
  17. package/docs/equalizer.svg +86 -0
  18. package/docs/overlay.jpg +0 -0
  19. package/docs/overlay.png +0 -0
  20. package/docs/vertical-cover.png +0 -0
  21. package/docs/vertical-cover.svg +239 -0
  22. package/index.ts +66 -0
  23. package/locales/de.json +39 -0
  24. package/locales/en.json +42 -0
  25. package/locales/es.json +39 -0
  26. package/locales/fr.json +39 -0
  27. package/locales/pt-BR.json +39 -0
  28. package/locales/pt.json +39 -0
  29. package/locales/ru.json +39 -0
  30. package/locales/uk.json +39 -0
  31. package/package.json +94 -0
  32. package/state/i18n-bridge.ts +51 -0
  33. package/state/key-router.ts +46 -0
  34. package/state/screen-intent.ts +27 -0
  35. package/state/selectors/contract.ts +13 -0
  36. package/state/selectors/derivations.ts +9 -0
  37. package/state/selectors/focus.ts +6 -0
  38. package/state/selectors/projections.ts +112 -0
  39. package/state/state-reducer.ts +197 -0
  40. package/state/state.ts +48 -0
  41. package/state/status-intent.ts +23 -0
  42. package/state/voice-session.ts +176 -0
  43. package/view/component-binding.ts +24 -0
  44. package/view/components/equalizer-view.ts +237 -0
  45. package/view/components/settings-field-view.ts +77 -0
  46. package/view/components/settings-form-view.ts +26 -0
  47. package/view/components/splash-view.ts +98 -0
  48. package/view/components/status-bar-view.ts +112 -0
  49. package/view/components/transcript-view.ts +50 -0
  50. package/view/overlay-view.ts +82 -0
  51. package/view/props-adapter.ts +29 -0
  52. package/view/screen-content-strategy.ts +58 -0
  53. package/view/stateful-view.ts +7 -0
@@ -0,0 +1,55 @@
1
+ // Ambient type declarations for sherpa-onnx-node (no .d.ts shipped upstream).
2
+ // Mirrors `nodejs-addon-examples/test_asr_non_streaming_whisper.js` from
3
+ // k2-fsa/sherpa-onnx — top-level keys are camelCase; binding converts to
4
+ // snake_case C struct internally.
5
+
6
+ declare module "sherpa-onnx-node" {
7
+ export interface Samples {
8
+ samples: Float32Array;
9
+ sampleRate: number;
10
+ }
11
+ export interface Result {
12
+ text: string;
13
+ tokens: string[];
14
+ timestamps: number[];
15
+ }
16
+ export interface Stream {
17
+ acceptWaveform(input: Samples): void;
18
+ }
19
+ // Note: OfflineRecognizer has no release/destroy/free method in
20
+ // sherpa-onnx-node@1.13.0 — the native handle is GC-managed.
21
+ // We use the synchronous `decode` + `getResult` pair (the canonical
22
+ // upstream example uses sync exclusively).
23
+ export interface Recognizer {
24
+ createStream(): Stream;
25
+ decode(stream: Stream): void;
26
+ getResult(stream: Stream): Result;
27
+ }
28
+ // Whisper config: `language` and `task` are optional (and meaningless for
29
+ // the *.en monolingual variants — the upstream example omits them
30
+ // entirely). `tailPaddings` defaults to 0.
31
+ export interface WhisperModelConfig {
32
+ encoder: string;
33
+ decoder: string;
34
+ language?: string;
35
+ task?: string;
36
+ tailPaddings?: number;
37
+ }
38
+ export interface Config {
39
+ featConfig: { sampleRate: number; featureDim: number };
40
+ modelConfig: {
41
+ whisper: WhisperModelConfig;
42
+ tokens: string;
43
+ numThreads?: number;
44
+ provider?: string;
45
+ };
46
+ }
47
+ // The binding exposes both a sync constructor and an async factory.
48
+ // The canonical examples use the sync constructor; we keep both signatures
49
+ // here so consumers can pick.
50
+ export interface OfflineRecognizerCtor {
51
+ new (config: Config): Recognizer;
52
+ createAsync(config: Config): Promise<Recognizer>;
53
+ }
54
+ export const OfflineRecognizer: OfflineRecognizerCtor;
55
+ }
@@ -0,0 +1,117 @@
1
+ /**
2
+ * stt-engine — thin typed wrapper around sherpa-onnx-node.
3
+ *
4
+ * Type model: sherpa-onnx-node ships no .d.ts files; ambient types live in
5
+ * ./sherpa-onnx-node.d.ts. Config keys are camelCase; the binding maps to
6
+ * snake_case C structs internally.
7
+ *
8
+ * Model layout: Whisper base multilingual — `modelConfig.whisper.{encoder,
9
+ * decoder}`, matching the canonical upstream example
10
+ * `nodejs-addon-examples/test_asr_non_streaming_whisper.js`. We use the int8
11
+ * quantized variants (`base-encoder.int8.onnx`, `base-decoder.int8.onnx`) to
12
+ * keep CPU latency low.
13
+ *
14
+ * Language pre-set: optional `language` (ISO 639-1 like "en", "ru") biases
15
+ * Whisper toward that language for accuracy and skips the per-utterance
16
+ * auto-detect. Threaded from `getActiveLocale()` in voice-command. When
17
+ * undefined, the multilingual model's built-in auto-detect runs — the
18
+ * historical default behavior.
19
+ *
20
+ * Decode path: SYNCHRONOUS `recognizer.decode(stream)` + `getResult(stream)`,
21
+ * same as upstream's example.
22
+ */
23
+
24
+ import type { Config } from "sherpa-onnx-node";
25
+
26
+ // ── Whisper fixed input contract ─────────────────────────────────────────────
27
+ // 16 kHz mono PCM. featureDim 80 matches the model's mel-spectrogram output.
28
+ const WHISPER_SAMPLE_RATE = 16000;
29
+ const WHISPER_FEATURE_DIM = 80;
30
+
31
+ // ── Defaults ─────────────────────────────────────────────────────────────────
32
+ // 4 threads is the sweet spot for Whisper base.en on a modern multi-core CPU
33
+ // per upstream tuning guidance (whisper.cpp benchmarks; the sherpa-onnx ORT
34
+ // thread pool follows the same pattern). More than 4 shows diminishing
35
+ // returns and can starve other Pi work on smaller machines.
36
+ const DEFAULT_NUM_THREADS = 4;
37
+ const DEFAULT_PROVIDER = "cpu";
38
+ // `tailPaddings` is the only decoder-adjacent knob sherpa-onnx exposes for
39
+ // Whisper. Per maintainer guidance in k2-fsa/sherpa-onnx#2787, audio under
40
+ // 30 s makes Whisper miss EOS and hallucinate; padding the encoder input
41
+ // reduces the chunk-end EOT bias that produces spurious terminal punctuation.
42
+ // 1000 frames ≈ 100 mel-frame steps of trailing silence.
43
+ const DEFAULT_TAIL_PADDINGS = 1000;
44
+
45
+ // ── Types ────────────────────────────────────────────────────────────────────
46
+
47
+ export interface SttEngineConfig {
48
+ encoderPath: string;
49
+ decoderPath: string;
50
+ tokensPath: string;
51
+ /** ISO 639-1 hint (e.g. "en", "ru"). Undefined → Whisper auto-detects. */
52
+ language?: string;
53
+ numThreads?: number;
54
+ provider?: string;
55
+ }
56
+
57
+ export interface SttEngine {
58
+ recognize(samples: Float32Array, sampleRate: number): Promise<string>;
59
+ release(): void;
60
+ }
61
+
62
+ // ── Factory ──────────────────────────────────────────────────────────────────
63
+
64
+ export async function createSttEngine(config: SttEngineConfig): Promise<SttEngine> {
65
+ const ns = await loadSherpaNamespace();
66
+ const recognizer = new ns.OfflineRecognizer(buildRecognizerConfig(config));
67
+
68
+ return {
69
+ async recognize(samples: Float32Array, sampleRate: number): Promise<string> {
70
+ if (samples.length === 0) return "";
71
+ const stream = recognizer.createStream();
72
+ stream.acceptWaveform({ samples, sampleRate });
73
+ recognizer.decode(stream);
74
+ return recognizer.getResult(stream).text.trim();
75
+ },
76
+ release(): void {
77
+ // sherpa-onnx-node@1.13.0 exposes no destructor; the native handle is
78
+ // GC-managed. Kept as a no-op so the lifecycle contract is stable for
79
+ // callers and tests.
80
+ },
81
+ };
82
+ }
83
+
84
+ // ── Internal ─────────────────────────────────────────────────────────────────
85
+
86
+ // sherpa-onnx-node ships as CJS; under ESM dynamic import only
87
+ // `OnlineRecognizer` is auto-detected as a named export. Everything else
88
+ // (including `OfflineRecognizer`) lives on `.default`. We fall back to the
89
+ // namespace itself in case a future ESM build flattens the shape.
90
+ async function loadSherpaNamespace(): Promise<{
91
+ OfflineRecognizer: typeof import("sherpa-onnx-node").OfflineRecognizer;
92
+ }> {
93
+ const mod = (await import("sherpa-onnx-node")) as Record<string, unknown> & {
94
+ default?: Record<string, unknown>;
95
+ };
96
+ return (mod.default ?? mod) as { OfflineRecognizer: typeof import("sherpa-onnx-node").OfflineRecognizer };
97
+ }
98
+
99
+ function buildRecognizerConfig(config: SttEngineConfig): Config {
100
+ return {
101
+ featConfig: {
102
+ sampleRate: WHISPER_SAMPLE_RATE,
103
+ featureDim: WHISPER_FEATURE_DIM,
104
+ },
105
+ modelConfig: {
106
+ whisper: {
107
+ encoder: config.encoderPath,
108
+ decoder: config.decoderPath,
109
+ tailPaddings: DEFAULT_TAIL_PADDINGS,
110
+ ...(config.language ? { language: config.language } : {}),
111
+ },
112
+ tokens: config.tokensPath,
113
+ numThreads: config.numThreads ?? DEFAULT_NUM_THREADS,
114
+ provider: config.provider ?? DEFAULT_PROVIDER,
115
+ },
116
+ };
117
+ }
@@ -0,0 +1,238 @@
1
+ import { appendErrorLog } from "../audio/error-log.js";
2
+ import { isHallucination } from "../audio/hallucination-filter.js";
3
+ import type { DecibriLike } from "../audio/mic-source.js";
4
+ import { TARGET_SAMPLE_RATE } from "../audio/mic-source.js";
5
+ import { bufferToFloat32, computeRmsFloat32, computeRmsInt16, samplesInInt16Chunk } from "../audio/pcm.js";
6
+ import type { SttEngine } from "../audio/stt-engine.js";
7
+ import { isHallucinationFilterEnabled } from "../config/voice-config.js";
8
+ import type { VoiceSession } from "../state/voice-session.js";
9
+
10
+ // 12 s soft cap: Whisper trains on 30 s windows and degrades on very short
11
+ // inputs; 5 s force-flushes routinely bisect a clause mid-word. 12 s is the
12
+ // dictation-tool consensus (LiveKit, whisper_streaming) — long enough to fit
13
+ // most sentences, short enough to bound first-token latency.
14
+ const MAX_SEGMENT_MS = 12000;
15
+ const MAX_SEGMENT_SAMPLES = (TARGET_SAMPLE_RATE * MAX_SEGMENT_MS) / 1000;
16
+
17
+ // When the cap fires, scan the trailing 800 ms for the chunk with the lowest
18
+ // RMS and split there instead of at the wall-clock boundary. The "head" half
19
+ // goes to Whisper, the "tail" carries forward as the start of the next
20
+ // segment. Cuts mid-breath instead of mid-syllable.
21
+ const CAP_CUT_SCAN_MS = 800;
22
+ const CAP_CUT_SCAN_SAMPLES = (TARGET_SAMPLE_RATE * CAP_CUT_SCAN_MS) / 1000;
23
+
24
+ // Whisper hallucinates filler ("Thanks for watching", "♪", "1/2 1/2…") on
25
+ // near-silent input. sherpa-onnx-node doesn't expose the decoder thresholds
26
+ // that would suppress this, so we gate at the input: skip segments whose mean
27
+ // RMS is below a floor. ~-46 dBFS sits between room noise and quiet speech.
28
+ const MIN_SEGMENT_RMS = 0.005;
29
+
30
+ // Cadence of rolling partial-transcript decodes during an active utterance.
31
+ // 1 s gives the user a continuously-refining preview without saturating the
32
+ // CPU on Whisper-base (typical decode of a sub-12 s buffer is well under 1 s
33
+ // on modern silicon, leaving headroom for mic + render). Single-flight: a
34
+ // new tick is skipped if the previous decode hasn't returned.
35
+ const PARTIAL_DECODE_INTERVAL_MS = 1000;
36
+
37
+ export interface PipelineHandle {
38
+ finalTranscriptPromise: Promise<string>;
39
+ isPaused(): boolean;
40
+ setPaused(paused: boolean): void;
41
+ setHallucinationFilterEnabled(enabled: boolean): void;
42
+ stop(): void;
43
+ }
44
+
45
+ export interface PipelineOptions {
46
+ hallucinationFilterEnabled?: boolean;
47
+ }
48
+
49
+ export function startDictationPipeline(
50
+ mic: DecibriLike,
51
+ sttEngine: SttEngine,
52
+ session: VoiceSession,
53
+ signal: AbortSignal,
54
+ options: PipelineOptions = {},
55
+ ): PipelineHandle {
56
+ let speechBuffer: Buffer[] = [];
57
+ let speechBufferSamples = 0;
58
+ let transcript = "";
59
+ let recognizing: Promise<void> = Promise.resolve();
60
+ let paused = false;
61
+ let hallucinationFilterEnabled = isHallucinationFilterEnabled(options);
62
+
63
+ // Single-flight gate for partial decodes. Combined with the interval
64
+ // throttle this means at most one partial recognize() at a time and at
65
+ // most one per PARTIAL_DECODE_INTERVAL_MS — never queueing a backlog if
66
+ // the CPU stalls.
67
+ let partialInFlight = false;
68
+ let lastPartialAt = 0;
69
+ // Bumps every time the buffer is committed (silence flush, cap flush, or
70
+ // session shutdown). A partial whose snapshot was taken at an earlier
71
+ // epoch is dropped on dispatch — protects against a slow partial decode
72
+ // painting stale text after the final commit.
73
+ let utteranceEpoch = 0;
74
+
75
+ const recognizeFinal = async (chunks: Buffer[]): Promise<void> => {
76
+ if (chunks.length === 0) return;
77
+ const samples = bufferToFloat32(Buffer.concat(chunks));
78
+ if (computeRmsFloat32(samples) < MIN_SEGMENT_RMS) {
79
+ // No audible content — but still finalize so any in-flight partial
80
+ // gets cleared by the reducer's empty-append branch.
81
+ session.dispatchAction({ kind: "audio_transcript_appended", text: "" });
82
+ return;
83
+ }
84
+ try {
85
+ const text = await sttEngine.recognize(samples, TARGET_SAMPLE_RATE);
86
+ if (!text || (hallucinationFilterEnabled && isHallucination(text))) {
87
+ session.dispatchAction({ kind: "audio_transcript_appended", text: "" });
88
+ return;
89
+ }
90
+ transcript = transcript ? `${transcript} ${text}` : text;
91
+ session.dispatchAction({ kind: "audio_transcript_appended", text });
92
+ } catch (err) {
93
+ // We deliberately do not surface this to the TUI: writing to stderr
94
+ // corrupts the active render, and `notify` would churn the chat for
95
+ // every dropped segment. Instead, append a breadcrumb to a file the
96
+ // user can `cat` later when investigating transcript gaps.
97
+ appendErrorLog("stt.recognize", err);
98
+ session.dispatchAction({ kind: "audio_transcript_appended", text: "" });
99
+ }
100
+ };
101
+
102
+ const flushBuffer = (): void => {
103
+ if (speechBuffer.length === 0) return;
104
+ const chunks = speechBuffer;
105
+ speechBuffer = [];
106
+ speechBufferSamples = 0;
107
+ utteranceEpoch++;
108
+ recognizing = recognizing.then(() => recognizeFinal(chunks));
109
+ };
110
+
111
+ const queueCapFlush = (): void => {
112
+ const cutIdx = findLowestEnergyCutIndex(speechBuffer);
113
+ if (cutIdx <= 0 || cutIdx >= speechBuffer.length) {
114
+ flushBuffer();
115
+ return;
116
+ }
117
+ const head = speechBuffer.slice(0, cutIdx);
118
+ const tail = speechBuffer.slice(cutIdx);
119
+ speechBuffer = tail;
120
+ speechBufferSamples = countSamples(tail);
121
+ utteranceEpoch++;
122
+ recognizing = recognizing.then(() => recognizeFinal(head));
123
+ };
124
+
125
+ // Rolling partial preview. Runs *outside* the `recognizing` chain so the
126
+ // preview latency isn't queued behind pending finals. Best-effort: a
127
+ // snapshot of the current buffer is decoded, and the result is dispatched
128
+ // as the new partial only if the utterance epoch hasn't advanced under us.
129
+ const tryEmitPartial = (): void => {
130
+ if (partialInFlight) return;
131
+ if (speechBuffer.length === 0) return;
132
+ const now = Date.now();
133
+ if (now - lastPartialAt < PARTIAL_DECODE_INTERVAL_MS) return;
134
+ lastPartialAt = now;
135
+ partialInFlight = true;
136
+ const snapshotEpoch = utteranceEpoch;
137
+ const snapshot = speechBuffer.slice();
138
+ void (async () => {
139
+ try {
140
+ const samples = bufferToFloat32(Buffer.concat(snapshot));
141
+ if (computeRmsFloat32(samples) < MIN_SEGMENT_RMS) return;
142
+ const text = await sttEngine.recognize(samples, TARGET_SAMPLE_RATE);
143
+ if (snapshotEpoch !== utteranceEpoch) return;
144
+ if (hallucinationFilterEnabled && isHallucination(text)) return;
145
+ session.dispatchAction({ kind: "audio_partial_transcript_set", text });
146
+ } catch (err) {
147
+ appendErrorLog("stt.recognize.partial", err);
148
+ } finally {
149
+ partialInFlight = false;
150
+ }
151
+ })();
152
+ };
153
+
154
+ mic.on("data", (chunk: Buffer) => {
155
+ const level = computeRmsInt16(chunk);
156
+ session.dispatchAction({ kind: "audio_chunk", level });
157
+ if (paused) return;
158
+ speechBuffer.push(chunk);
159
+ speechBufferSamples += samplesInInt16Chunk(chunk);
160
+ if (speechBufferSamples >= MAX_SEGMENT_SAMPLES) {
161
+ queueCapFlush();
162
+ } else {
163
+ tryEmitPartial();
164
+ }
165
+ });
166
+ mic.on("silence", () => {
167
+ if (paused) return;
168
+ flushBuffer();
169
+ });
170
+
171
+ const finalTranscriptPromise = waitForMicShutdown(mic, signal, async () => {
172
+ flushBuffer();
173
+ await recognizing;
174
+ }).then(() => transcript);
175
+
176
+ return {
177
+ finalTranscriptPromise,
178
+ isPaused: () => paused,
179
+ setPaused: (v) => {
180
+ paused = v;
181
+ },
182
+ setHallucinationFilterEnabled: (v) => {
183
+ hallucinationFilterEnabled = v;
184
+ },
185
+ stop: () => {
186
+ mic.stop();
187
+ },
188
+ };
189
+ }
190
+
191
+ function countSamples(chunks: Buffer[]): number {
192
+ let total = 0;
193
+ for (const chunk of chunks) total += samplesInInt16Chunk(chunk);
194
+ return total;
195
+ }
196
+
197
+ // Walk chunks newest-first up to CAP_CUT_SCAN_SAMPLES of audio; return the
198
+ // index of the lowest-RMS chunk in that window. Returns chunks.length when
199
+ // the buffer is too short to scan, telling the caller to fall back to a full
200
+ // flush.
201
+ function findLowestEnergyCutIndex(chunks: Buffer[]): number {
202
+ if (chunks.length < 2) return chunks.length;
203
+ let scanned = 0;
204
+ let lowestRms = Number.POSITIVE_INFINITY;
205
+ let lowestIdx = chunks.length;
206
+ for (let i = chunks.length - 1; i >= 1; i--) {
207
+ const chunk = chunks[i];
208
+ if (!chunk) continue;
209
+ const rms = computeRmsInt16(chunk);
210
+ if (rms < lowestRms) {
211
+ lowestRms = rms;
212
+ lowestIdx = i;
213
+ }
214
+ scanned += samplesInInt16Chunk(chunk);
215
+ if (scanned >= CAP_CUT_SCAN_SAMPLES) break;
216
+ }
217
+ return lowestIdx;
218
+ }
219
+
220
+ function waitForMicShutdown(mic: DecibriLike, signal: AbortSignal, onFinish: () => Promise<void>): Promise<void> {
221
+ return new Promise<void>((resolve) => {
222
+ const onAbort = () => {
223
+ mic.stop();
224
+ };
225
+ const finish = async () => {
226
+ signal.removeEventListener("abort", onAbort);
227
+ await onFinish();
228
+ resolve();
229
+ };
230
+ mic.once("end", finish);
231
+ mic.once("error", finish);
232
+ if (signal.aborted) {
233
+ mic.stop();
234
+ } else {
235
+ signal.addEventListener("abort", onAbort, { once: true });
236
+ }
237
+ });
238
+ }
@@ -0,0 +1,72 @@
1
+ import type { ExtensionCommandContext } from "@earendil-works/pi-coding-agent";
2
+ import {
3
+ SPLASH_FRAME_INTERVAL_MS,
4
+ SPLASH_FRAMES,
5
+ type SplashPhase,
6
+ SplashView,
7
+ } from "../view/components/splash-view.js";
8
+
9
+ export interface SplashController {
10
+ setPhase(phase: SplashPhase): void;
11
+ }
12
+
13
+ export interface SplashRunnerConfig {
14
+ initialPhase: SplashPhase;
15
+ }
16
+
17
+ export async function runWithSplash<T>(
18
+ ctx: ExtensionCommandContext,
19
+ config: SplashRunnerConfig,
20
+ work: (controller: SplashController) => Promise<T>,
21
+ ): Promise<T> {
22
+ let workResult: T | undefined;
23
+ let workError: unknown;
24
+
25
+ // Render inline (replace the editor) rather than as a bottom-anchored overlay.
26
+ // Bottom-anchored overlays force pi-tui to pad the chat buffer to the full
27
+ // terminal height, which pushes short chat content to the very top of the
28
+ // screen and leaves a large gap above the overlay. Inline mode keeps the
29
+ // component in the chat flow — it appears exactly where the editor was.
30
+ await ctx.ui.custom<void>((tui, theme, _kb, done) => {
31
+ const splash = new SplashView(theme);
32
+ let phase: SplashPhase = config.initialPhase;
33
+ let frame = 0;
34
+ splash.setProps({ phase, frame });
35
+
36
+ const tick = setInterval(() => {
37
+ frame = (frame + 1) % SPLASH_FRAMES.length;
38
+ splash.setProps({ phase, frame });
39
+ tui.requestRender();
40
+ }, SPLASH_FRAME_INTERVAL_MS);
41
+
42
+ const controller: SplashController = {
43
+ setPhase(next: SplashPhase) {
44
+ phase = next;
45
+ splash.setProps({ phase, frame });
46
+ tui.requestRender();
47
+ },
48
+ };
49
+
50
+ work(controller).then(
51
+ (result) => {
52
+ workResult = result;
53
+ clearInterval(tick);
54
+ done(undefined);
55
+ },
56
+ (err) => {
57
+ workError = err;
58
+ clearInterval(tick);
59
+ done(undefined);
60
+ },
61
+ );
62
+
63
+ return {
64
+ render: (w: number) => splash.render(w),
65
+ invalidate: () => splash.setProps({ phase, frame }),
66
+ handleInput: (_d: string) => {},
67
+ };
68
+ });
69
+
70
+ if (workError) throw workError;
71
+ return workResult as T;
72
+ }