@designcrowd/fe-shared-lib 1.7.1 → 1.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/skills/playwright-cli/SKILL.md +278 -0
- package/.claude/skills/playwright-cli/references/request-mocking.md +87 -0
- package/.claude/skills/playwright-cli/references/running-code.md +232 -0
- package/.claude/skills/playwright-cli/references/session-management.md +169 -0
- package/.claude/skills/playwright-cli/references/storage-state.md +275 -0
- package/.claude/skills/playwright-cli/references/test-generation.md +88 -0
- package/.claude/skills/playwright-cli/references/tracing.md +139 -0
- package/.claude/skills/playwright-cli/references/video-recording.md +43 -0
- package/CLAUDE.md +35 -0
- package/dist/css/tailwind-brandCrowd.css +25 -0
- package/dist/css/tailwind-brandPage.css +21 -0
- package/dist/css/tailwind-crazyDomains.css +25 -0
- package/dist/css/tailwind-designCom.css +25 -0
- package/dist/css/tailwind-designCrowd.css +25 -0
- package/index.js +2 -0
- package/package.json +3 -1
- package/src/atoms/components/Icon/Icon.stories.js +1 -0
- package/src/atoms/components/Icon/Icon.vue +2 -0
- package/src/atoms/components/Icon/icons/microphone.vue +5 -0
- package/src/atoms/components/Icon/icons/select-all.vue +12 -4
- package/src/atoms/components/VoiceToTextButton/VoiceToTextButton.stories.ts +242 -0
- package/src/atoms/components/VoiceToTextButton/VoiceToTextButton.vue +147 -0
- package/src/types/speech-recognition.d.ts +8 -0
- package/src/useVoiceToText.ts +196 -0
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
<template>
|
|
2
|
+
<button
|
|
3
|
+
v-if="isSupported"
|
|
4
|
+
type="button"
|
|
5
|
+
:disabled="disabled"
|
|
6
|
+
:aria-label="isListening ? 'Stop voice input' : 'Start voice input'"
|
|
7
|
+
data-test="voice-to-text-button"
|
|
8
|
+
class="voice-prompt-button tw-rounded-full tw-border-0 tw-flex tw-items-center tw-justify-center tw-transition-all tw-duration-200 tw-cursor-pointer focus:tw-outline-none focus-visible:tw-ring-2 focus-visible:tw-ring-offset-2 focus-visible:tw-ring-primary-500"
|
|
9
|
+
:class="[
|
|
10
|
+
sizeClasses,
|
|
11
|
+
variantClasses.button,
|
|
12
|
+
isListening ? 'voice-prompt-listening' : '',
|
|
13
|
+
disabled ? 'tw-opacity-50 tw-cursor-not-allowed' : '',
|
|
14
|
+
]"
|
|
15
|
+
:style="isListening ? { '--voice-bg': variantClasses.recordingBg } : {}"
|
|
16
|
+
@click="toggle"
|
|
17
|
+
>
|
|
18
|
+
<Icon
|
|
19
|
+
name="microphone"
|
|
20
|
+
:size="iconSize"
|
|
21
|
+
:class="variantClasses.icon"
|
|
22
|
+
aria-hidden="true"
|
|
23
|
+
/>
|
|
24
|
+
</button>
|
|
25
|
+
</template>
|
|
26
|
+
|
|
27
|
+
<script setup lang="ts">
|
|
28
|
+
import { watch, toRef, computed } from 'vue';
|
|
29
|
+
import Icon from '../Icon/Icon.vue';
|
|
30
|
+
import { useVoiceToText } from '../../../useVoiceToText';
|
|
31
|
+
|
|
32
|
+
type ButtonSize = 'sm' | 'md' | 'lg';
|
|
33
|
+
type ButtonVariant = 'light' | 'dark';
|
|
34
|
+
|
|
35
|
+
interface VoiceToTextButtonProps {
|
|
36
|
+
lang?: string;
|
|
37
|
+
disabled?: boolean;
|
|
38
|
+
size?: ButtonSize;
|
|
39
|
+
variant?: ButtonVariant;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
const props = withDefaults(defineProps<VoiceToTextButtonProps>(), {
|
|
43
|
+
lang: 'en-US',
|
|
44
|
+
disabled: false,
|
|
45
|
+
size: 'md',
|
|
46
|
+
variant: 'dark',
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
const sizeClasses = computed(() => {
|
|
50
|
+
const sizes: Record<ButtonSize, string> = {
|
|
51
|
+
sm: 'tw-w-8 tw-h-8',
|
|
52
|
+
md: 'tw-w-10 tw-h-10',
|
|
53
|
+
lg: 'tw-w-12 tw-h-12',
|
|
54
|
+
};
|
|
55
|
+
return sizes[props.size];
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
const iconSize = computed(() => {
|
|
59
|
+
const sizes: Record<ButtonSize, string> = {
|
|
60
|
+
sm: 'sm',
|
|
61
|
+
md: 'md',
|
|
62
|
+
lg: 'md',
|
|
63
|
+
};
|
|
64
|
+
return sizes[props.size];
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
const emit = defineEmits<{
|
|
68
|
+
'on-transcript': [transcript: string];
|
|
69
|
+
'on-interim-transcript': [transcript: string];
|
|
70
|
+
'on-start': [];
|
|
71
|
+
'on-stop': [];
|
|
72
|
+
'on-error': [error: string];
|
|
73
|
+
}>();
|
|
74
|
+
|
|
75
|
+
const { isSupported, isListening, transcript, isFinal, error, toggle, setLang } = useVoiceToText({
|
|
76
|
+
lang: props.lang,
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
const variantClasses = computed(() => {
|
|
80
|
+
if (props.variant === 'light') {
|
|
81
|
+
return {
|
|
82
|
+
button: isListening.value ? '' : 'tw-bg-transparent hover:tw-bg-grayscale-200',
|
|
83
|
+
icon: isListening.value ? 'tw-text-grayscale-700' : 'tw-text-grayscale-600',
|
|
84
|
+
recordingBg: '#EDEDED',
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
return {
|
|
88
|
+
button: isListening.value ? '' : 'tw-bg-transparent hover:tw-bg-grayscale-700',
|
|
89
|
+
icon: isListening.value ? 'tw-text-white' : 'tw-text-grayscale-400',
|
|
90
|
+
recordingBg: '#606060',
|
|
91
|
+
};
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
// Keep recognition language in sync with prop
|
|
95
|
+
watch(toRef(props, 'lang'), setLang);
|
|
96
|
+
|
|
97
|
+
// Watch for transcript changes and emit appropriate events
|
|
98
|
+
watch(
|
|
99
|
+
[transcript, isFinal],
|
|
100
|
+
([newTranscript, newIsFinal]) => {
|
|
101
|
+
if (newIsFinal && newTranscript) {
|
|
102
|
+
emit('on-transcript', newTranscript);
|
|
103
|
+
} else if (newTranscript) {
|
|
104
|
+
emit('on-interim-transcript', newTranscript);
|
|
105
|
+
}
|
|
106
|
+
},
|
|
107
|
+
{ flush: 'sync' },
|
|
108
|
+
);
|
|
109
|
+
|
|
110
|
+
// Watch for listening state changes
|
|
111
|
+
watch(isListening, (newVal, oldVal) => {
|
|
112
|
+
if (newVal && !oldVal) {
|
|
113
|
+
emit('on-start');
|
|
114
|
+
}
|
|
115
|
+
if (!newVal && oldVal) {
|
|
116
|
+
emit('on-stop');
|
|
117
|
+
}
|
|
118
|
+
});
|
|
119
|
+
|
|
120
|
+
// Watch for errors
|
|
121
|
+
watch(error, (newError) => {
|
|
122
|
+
if (newError) {
|
|
123
|
+
emit('on-error', newError);
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
</script>
|
|
127
|
+
|
|
128
|
+
<style scoped>
|
|
129
|
+
.voice-prompt-button {
|
|
130
|
+
-webkit-tap-highlight-color: transparent;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
.voice-prompt-listening {
|
|
134
|
+
background-color: var(--voice-bg);
|
|
135
|
+
animation: voice-bg-pulse 2s ease-in-out infinite;
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
@keyframes voice-bg-pulse {
|
|
139
|
+
0%,
|
|
140
|
+
100% {
|
|
141
|
+
opacity: 1;
|
|
142
|
+
}
|
|
143
|
+
50% {
|
|
144
|
+
opacity: 0.5;
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
</style>
|
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
/* eslint-disable no-undef */
|
|
2
|
+
import { ref, computed, readonly, type Ref, type ComputedRef, type DeepReadonly } from 'vue';
|
|
3
|
+
|
|
4
|
+
export interface UseVoiceToTextOptions {
|
|
5
|
+
lang?: string;
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export interface UseVoiceToTextReturn {
|
|
9
|
+
isSupported: ComputedRef<boolean>;
|
|
10
|
+
isListening: DeepReadonly<Ref<boolean>>;
|
|
11
|
+
transcript: DeepReadonly<Ref<string>>;
|
|
12
|
+
isFinal: DeepReadonly<Ref<boolean>>;
|
|
13
|
+
error: DeepReadonly<Ref<string | null>>;
|
|
14
|
+
start: () => void;
|
|
15
|
+
stop: () => void;
|
|
16
|
+
toggle: () => void;
|
|
17
|
+
// eslint-disable-next-line no-unused-vars
|
|
18
|
+
setLang: (lang: string) => void;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
interface VoiceToTextState {
|
|
22
|
+
isListening: Ref<boolean>;
|
|
23
|
+
transcript: Ref<string>;
|
|
24
|
+
isFinal: Ref<boolean>;
|
|
25
|
+
error: Ref<string | null>;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
// Singleton instance and state (lazily initialized)
|
|
29
|
+
let recognition: SpeechRecognition | null = null;
|
|
30
|
+
let isInitialized = false;
|
|
31
|
+
let errorClearTimeout: ReturnType<typeof setTimeout> | null = null;
|
|
32
|
+
let state: VoiceToTextState | null = null;
|
|
33
|
+
|
|
34
|
+
// Error message mapping per spec
|
|
35
|
+
const ERROR_MESSAGES: Record<string, string> = {
|
|
36
|
+
'not-allowed': 'Microphone permission was denied. Please allow access.',
|
|
37
|
+
'language-not-supported': 'This language is not supported.',
|
|
38
|
+
network: 'A network error occurred. Please check your connection.',
|
|
39
|
+
'audio-capture': 'No microphone was found or microphone is not working.',
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
const ERROR_CLEAR_DELAY = 5000;
|
|
43
|
+
|
|
44
|
+
function getState(): VoiceToTextState {
|
|
45
|
+
if (!state) {
|
|
46
|
+
state = {
|
|
47
|
+
isListening: ref(false),
|
|
48
|
+
transcript: ref(''),
|
|
49
|
+
isFinal: ref(false),
|
|
50
|
+
error: ref<string | null>(null),
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
return state;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Singleton composable that wraps the Web Speech API (SpeechRecognition).
|
|
58
|
+
* All calls to useVoiceToText() return the same shared instance.
|
|
59
|
+
*/
|
|
60
|
+
export function useVoiceToText(options: UseVoiceToTextOptions = {}): UseVoiceToTextReturn {
|
|
61
|
+
const { lang = 'en-US' } = options;
|
|
62
|
+
|
|
63
|
+
// Get or create shared state
|
|
64
|
+
const { isListening, transcript, isFinal, error } = getState();
|
|
65
|
+
|
|
66
|
+
// Check for browser support
|
|
67
|
+
const SpeechRecognitionCtor: typeof SpeechRecognition | null =
|
|
68
|
+
typeof window !== 'undefined' ? window.SpeechRecognition || window.webkitSpeechRecognition : null;
|
|
69
|
+
|
|
70
|
+
const isSupported = computed(() => !!SpeechRecognitionCtor);
|
|
71
|
+
|
|
72
|
+
// Initialize singleton once
|
|
73
|
+
if (!isInitialized && SpeechRecognitionCtor) {
|
|
74
|
+
recognition = new SpeechRecognitionCtor();
|
|
75
|
+
recognition.continuous = true;
|
|
76
|
+
recognition.interimResults = true;
|
|
77
|
+
|
|
78
|
+
recognition.onresult = (event: SpeechRecognitionEvent) => {
|
|
79
|
+
let interimTranscript = '';
|
|
80
|
+
let finalTranscript = '';
|
|
81
|
+
|
|
82
|
+
for (let i = event.resultIndex; i < event.results.length; i += 1) {
|
|
83
|
+
const result = event.results[i];
|
|
84
|
+
if (result.isFinal) {
|
|
85
|
+
finalTranscript += result[0].transcript;
|
|
86
|
+
} else {
|
|
87
|
+
interimTranscript += result[0].transcript;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (finalTranscript) {
|
|
92
|
+
transcript.value = finalTranscript;
|
|
93
|
+
isFinal.value = true;
|
|
94
|
+
} else {
|
|
95
|
+
transcript.value = interimTranscript;
|
|
96
|
+
isFinal.value = false;
|
|
97
|
+
}
|
|
98
|
+
};
|
|
99
|
+
|
|
100
|
+
recognition.onerror = (event: SpeechRecognitionErrorEvent) => {
|
|
101
|
+
// Suppress no-speech and aborted errors per spec
|
|
102
|
+
if (event.error === 'no-speech' || event.error === 'aborted') {
|
|
103
|
+
return;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
const message = ERROR_MESSAGES[event.error] || 'An error occurred with speech recognition.';
|
|
107
|
+
error.value = message;
|
|
108
|
+
|
|
109
|
+
// eslint-disable-next-line no-console
|
|
110
|
+
console.warn('[useVoiceToText]', event.error, message);
|
|
111
|
+
|
|
112
|
+
// Auto-clear error after timeout
|
|
113
|
+
if (errorClearTimeout) {
|
|
114
|
+
clearTimeout(errorClearTimeout);
|
|
115
|
+
}
|
|
116
|
+
errorClearTimeout = setTimeout(() => {
|
|
117
|
+
error.value = null;
|
|
118
|
+
}, ERROR_CLEAR_DELAY);
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
recognition.onend = () => {
|
|
122
|
+
isListening.value = false;
|
|
123
|
+
};
|
|
124
|
+
|
|
125
|
+
isInitialized = true;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Update language on existing instance
|
|
129
|
+
if (recognition) {
|
|
130
|
+
recognition.lang = lang;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const start = () => {
|
|
134
|
+
if (!recognition || isListening.value) return;
|
|
135
|
+
|
|
136
|
+
// Clear previous state
|
|
137
|
+
transcript.value = '';
|
|
138
|
+
isFinal.value = false;
|
|
139
|
+
error.value = null;
|
|
140
|
+
if (errorClearTimeout) {
|
|
141
|
+
clearTimeout(errorClearTimeout);
|
|
142
|
+
errorClearTimeout = null;
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
try {
|
|
146
|
+
recognition.start();
|
|
147
|
+
isListening.value = true;
|
|
148
|
+
} catch (e: unknown) {
|
|
149
|
+
// Handle case where recognition is already started
|
|
150
|
+
// eslint-disable-next-line no-console
|
|
151
|
+
console.warn('[useVoiceToText] Failed to start:', (e as Error).message);
|
|
152
|
+
}
|
|
153
|
+
};
|
|
154
|
+
|
|
155
|
+
const stop = () => {
|
|
156
|
+
if (!recognition || !isListening.value) return;
|
|
157
|
+
|
|
158
|
+
isListening.value = false;
|
|
159
|
+
try {
|
|
160
|
+
recognition.stop();
|
|
161
|
+
} catch (e: unknown) {
|
|
162
|
+
// Handle case where recognition is already stopped
|
|
163
|
+
// eslint-disable-next-line no-console
|
|
164
|
+
console.warn('[useVoiceToText] Failed to stop:', (e as Error).message);
|
|
165
|
+
}
|
|
166
|
+
};
|
|
167
|
+
|
|
168
|
+
const toggle = () => {
|
|
169
|
+
if (isListening.value) {
|
|
170
|
+
stop();
|
|
171
|
+
} else {
|
|
172
|
+
start();
|
|
173
|
+
}
|
|
174
|
+
};
|
|
175
|
+
|
|
176
|
+
const setLang = (newLang: string) => {
|
|
177
|
+
if (recognition) {
|
|
178
|
+
recognition.lang = newLang;
|
|
179
|
+
}
|
|
180
|
+
};
|
|
181
|
+
|
|
182
|
+
return {
|
|
183
|
+
// State (reactive, read-only)
|
|
184
|
+
isSupported,
|
|
185
|
+
isListening: readonly(isListening),
|
|
186
|
+
transcript: readonly(transcript),
|
|
187
|
+
isFinal: readonly(isFinal),
|
|
188
|
+
error: readonly(error),
|
|
189
|
+
|
|
190
|
+
// Actions
|
|
191
|
+
start,
|
|
192
|
+
stop,
|
|
193
|
+
toggle,
|
|
194
|
+
setLang,
|
|
195
|
+
};
|
|
196
|
+
}
|