react-optimistic-chat 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -43,6 +43,65 @@ type Props$2<T> = {
43
43
  };
44
44
  declare function ChatList<T>({ messages, messageMapper, messageRenderer, className, loadingRenderer, }: Props$2<T>): react_jsx_runtime.JSX.Element;
45
45
 
46
+ type VoiceRecognitionController$1 = {
47
+ start: () => void;
48
+ stop: () => void;
49
+ isRecording: boolean;
50
+ };
51
+ type ButtonConfig = {
52
+ className?: string;
53
+ icon?: React.ReactNode;
54
+ };
55
+ type Props$1 = {
56
+ onSend: (value: string) => void | Promise<void>;
57
+ voice?: boolean | VoiceRecognitionController$1;
58
+ placeholder?: string;
59
+ className?: string;
60
+ inputClassName?: string;
61
+ micButton?: ButtonConfig;
62
+ recordingButton?: ButtonConfig;
63
+ sendButton?: ButtonConfig;
64
+ sendingButton?: ButtonConfig;
65
+ maxHeight?: number;
66
+ value?: string;
67
+ onChange?: (value: string) => void;
68
+ isSending: boolean;
69
+ submitOnEnter?: boolean;
70
+ };
71
+ declare function ChatInput({ onSend, voice, placeholder, className, inputClassName, micButton, recordingButton, sendButton, sendingButton, maxHeight, value, onChange, isSending, submitOnEnter, }: Props$1): react_jsx_runtime.JSX.Element;
72
+
73
+ type Props<T> = {
74
+ messages: T[];
75
+ messageMapper?: (msg: T) => Message;
76
+ messageRenderer?: (msg: Message) => React.ReactNode;
77
+ loadingRenderer?: React.ReactNode;
78
+ listClassName?: string;
79
+ onSend: (value: string) => void | Promise<void>;
80
+ isSending: boolean;
81
+ disableVoice?: boolean;
82
+ placeholder?: string;
83
+ inputClassName?: string;
84
+ className?: string;
85
+ };
86
+ declare function ChatContainer<T>({ messages, messageMapper, messageRenderer, loadingRenderer, listClassName, onSend, isSending, disableVoice, placeholder, inputClassName, className, }: Props<T>): react_jsx_runtime.JSX.Element;
87
+
88
+ type MessageMapper$1<TRaw> = (raw: TRaw) => Message;
89
+ type Options$2<TQueryRaw, TMutationRaw> = {
90
+ queryKey: readonly unknown[];
91
+ queryFn: () => Promise<TQueryRaw[]>;
92
+ mutationFn: (content: string) => Promise<TMutationRaw>;
93
+ map: MessageMapper$1<TQueryRaw | TMutationRaw>;
94
+ onError?: (error: unknown) => void;
95
+ staleTime?: number;
96
+ gcTime?: number;
97
+ };
98
+ declare function useOptimisticChat<TQeuryRaw, TMutationRaw>({ queryKey, queryFn, mutationFn, map, onError, staleTime, gcTime, }: Options$2<TQeuryRaw, TMutationRaw>): {
99
+ messages: Message[];
100
+ sendUserMessage: (content: string) => void;
101
+ isPending: boolean;
102
+ isInitialLoading: boolean;
103
+ };
104
+
46
105
  interface SpeechGrammar {
47
106
  src: string;
48
107
  weight: number;
@@ -100,59 +159,42 @@ declare global {
100
159
  webkitSpeechRecognition: new () => SpeechRecognition;
101
160
  }
102
161
  }
103
- type ButtonConfig = {
104
- className?: string;
105
- icon?: React.ReactNode;
106
- };
107
- type Props$1 = {
108
- onSend: (value: string) => void | Promise<void>;
109
- disableVoice?: boolean;
110
- placeholder?: string;
111
- className?: string;
112
- inputClassName?: string;
113
- micButton?: ButtonConfig;
114
- recordingButton?: ButtonConfig;
115
- sendButton?: ButtonConfig;
116
- sendingButton?: ButtonConfig;
117
- maxHeight?: number;
118
- value?: string;
119
- onChange?: (value: string) => void;
120
- isSending: boolean;
121
- submitOnEnter?: boolean;
122
- speechLang?: string;
162
+ type Options$1 = {
163
+ lang?: string;
164
+ onStart?: () => void;
165
+ onEnd?: () => void;
166
+ onError?: (error: unknown) => void;
123
167
  };
124
- declare function ChatInput({ onSend, disableVoice, placeholder, className, inputClassName, micButton, recordingButton, sendButton, sendingButton, maxHeight, value, onChange, isSending, submitOnEnter, speechLang, }: Props$1): react_jsx_runtime.JSX.Element;
125
-
126
- type Props<T> = {
127
- messages: T[];
128
- messageMapper?: (msg: T) => Message;
129
- messageRenderer?: (msg: Message) => React.ReactNode;
130
- loadingRenderer?: React.ReactNode;
131
- listClassName?: string;
132
- onSend: (value: string) => void | Promise<void>;
133
- isSending: boolean;
134
- disableVoice?: boolean;
135
- placeholder?: string;
136
- inputClassName?: string;
137
- className?: string;
168
+ declare function useBrowserSpeechRecognition({ lang, onStart, onEnd, onError, }?: Options$1): {
169
+ start: () => void;
170
+ stop: () => void;
171
+ isRecording: boolean;
172
+ onTranscript: (text: string) => void;
138
173
  };
139
- declare function ChatContainer<T>({ messages, messageMapper, messageRenderer, loadingRenderer, listClassName, onSend, isSending, disableVoice, placeholder, inputClassName, className, }: Props<T>): react_jsx_runtime.JSX.Element;
140
174
 
141
175
  type MessageMapper<TRaw> = (raw: TRaw) => Message;
176
+ type VoiceRecognitionController = {
177
+ start: () => void;
178
+ stop: () => void;
179
+ isRecording: boolean;
180
+ onTranscript: (text: string) => void;
181
+ };
142
182
  type Options<TQueryRaw, TMutationRaw> = {
143
183
  queryKey: readonly unknown[];
144
184
  queryFn: () => Promise<TQueryRaw[]>;
145
185
  mutationFn: (content: string) => Promise<TMutationRaw>;
146
186
  map: MessageMapper<TQueryRaw | TMutationRaw>;
187
+ voice: VoiceRecognitionController;
147
188
  onError?: (error: unknown) => void;
148
189
  staleTime?: number;
149
190
  gcTime?: number;
150
191
  };
151
- declare function useOptimisticChat<TQeuryRaw, TMutationRaw>({ queryKey, queryFn, mutationFn, map, onError, staleTime, gcTime, }: Options<TQeuryRaw, TMutationRaw>): {
192
+ declare function useVoiceOptimisticChat<TQeuryRaw, TMutationRaw>({ queryKey, queryFn, mutationFn, map, voice, onError, staleTime, gcTime, }: Options<TQeuryRaw, TMutationRaw>): {
152
193
  messages: Message[];
153
- sendUserMessage: (content: string) => void;
154
194
  isPending: boolean;
155
195
  isInitialLoading: boolean;
196
+ startRecording: () => Promise<void>;
197
+ stopRecording: () => void;
156
198
  };
157
199
 
158
- export { ChatContainer, ChatInput, ChatList, ChatMessage, LoadingSpinner, type Message, SendingDots, useOptimisticChat };
200
+ export { ChatContainer, ChatInput, ChatList, ChatMessage, LoadingSpinner, type Message, SendingDots, useBrowserSpeechRecognition, useOptimisticChat, useVoiceOptimisticChat };
package/dist/index.d.ts CHANGED
@@ -43,6 +43,65 @@ type Props$2<T> = {
43
43
  };
44
44
  declare function ChatList<T>({ messages, messageMapper, messageRenderer, className, loadingRenderer, }: Props$2<T>): react_jsx_runtime.JSX.Element;
45
45
 
46
+ type VoiceRecognitionController$1 = {
47
+ start: () => void;
48
+ stop: () => void;
49
+ isRecording: boolean;
50
+ };
51
+ type ButtonConfig = {
52
+ className?: string;
53
+ icon?: React.ReactNode;
54
+ };
55
+ type Props$1 = {
56
+ onSend: (value: string) => void | Promise<void>;
57
+ voice?: boolean | VoiceRecognitionController$1;
58
+ placeholder?: string;
59
+ className?: string;
60
+ inputClassName?: string;
61
+ micButton?: ButtonConfig;
62
+ recordingButton?: ButtonConfig;
63
+ sendButton?: ButtonConfig;
64
+ sendingButton?: ButtonConfig;
65
+ maxHeight?: number;
66
+ value?: string;
67
+ onChange?: (value: string) => void;
68
+ isSending: boolean;
69
+ submitOnEnter?: boolean;
70
+ };
71
+ declare function ChatInput({ onSend, voice, placeholder, className, inputClassName, micButton, recordingButton, sendButton, sendingButton, maxHeight, value, onChange, isSending, submitOnEnter, }: Props$1): react_jsx_runtime.JSX.Element;
72
+
73
+ type Props<T> = {
74
+ messages: T[];
75
+ messageMapper?: (msg: T) => Message;
76
+ messageRenderer?: (msg: Message) => React.ReactNode;
77
+ loadingRenderer?: React.ReactNode;
78
+ listClassName?: string;
79
+ onSend: (value: string) => void | Promise<void>;
80
+ isSending: boolean;
81
+ disableVoice?: boolean;
82
+ placeholder?: string;
83
+ inputClassName?: string;
84
+ className?: string;
85
+ };
86
+ declare function ChatContainer<T>({ messages, messageMapper, messageRenderer, loadingRenderer, listClassName, onSend, isSending, disableVoice, placeholder, inputClassName, className, }: Props<T>): react_jsx_runtime.JSX.Element;
87
+
88
+ type MessageMapper$1<TRaw> = (raw: TRaw) => Message;
89
+ type Options$2<TQueryRaw, TMutationRaw> = {
90
+ queryKey: readonly unknown[];
91
+ queryFn: () => Promise<TQueryRaw[]>;
92
+ mutationFn: (content: string) => Promise<TMutationRaw>;
93
+ map: MessageMapper$1<TQueryRaw | TMutationRaw>;
94
+ onError?: (error: unknown) => void;
95
+ staleTime?: number;
96
+ gcTime?: number;
97
+ };
98
+ declare function useOptimisticChat<TQeuryRaw, TMutationRaw>({ queryKey, queryFn, mutationFn, map, onError, staleTime, gcTime, }: Options$2<TQeuryRaw, TMutationRaw>): {
99
+ messages: Message[];
100
+ sendUserMessage: (content: string) => void;
101
+ isPending: boolean;
102
+ isInitialLoading: boolean;
103
+ };
104
+
46
105
  interface SpeechGrammar {
47
106
  src: string;
48
107
  weight: number;
@@ -100,59 +159,42 @@ declare global {
100
159
  webkitSpeechRecognition: new () => SpeechRecognition;
101
160
  }
102
161
  }
103
- type ButtonConfig = {
104
- className?: string;
105
- icon?: React.ReactNode;
106
- };
107
- type Props$1 = {
108
- onSend: (value: string) => void | Promise<void>;
109
- disableVoice?: boolean;
110
- placeholder?: string;
111
- className?: string;
112
- inputClassName?: string;
113
- micButton?: ButtonConfig;
114
- recordingButton?: ButtonConfig;
115
- sendButton?: ButtonConfig;
116
- sendingButton?: ButtonConfig;
117
- maxHeight?: number;
118
- value?: string;
119
- onChange?: (value: string) => void;
120
- isSending: boolean;
121
- submitOnEnter?: boolean;
122
- speechLang?: string;
162
+ type Options$1 = {
163
+ lang?: string;
164
+ onStart?: () => void;
165
+ onEnd?: () => void;
166
+ onError?: (error: unknown) => void;
123
167
  };
124
- declare function ChatInput({ onSend, disableVoice, placeholder, className, inputClassName, micButton, recordingButton, sendButton, sendingButton, maxHeight, value, onChange, isSending, submitOnEnter, speechLang, }: Props$1): react_jsx_runtime.JSX.Element;
125
-
126
- type Props<T> = {
127
- messages: T[];
128
- messageMapper?: (msg: T) => Message;
129
- messageRenderer?: (msg: Message) => React.ReactNode;
130
- loadingRenderer?: React.ReactNode;
131
- listClassName?: string;
132
- onSend: (value: string) => void | Promise<void>;
133
- isSending: boolean;
134
- disableVoice?: boolean;
135
- placeholder?: string;
136
- inputClassName?: string;
137
- className?: string;
168
+ declare function useBrowserSpeechRecognition({ lang, onStart, onEnd, onError, }?: Options$1): {
169
+ start: () => void;
170
+ stop: () => void;
171
+ isRecording: boolean;
172
+ onTranscript: (text: string) => void;
138
173
  };
139
- declare function ChatContainer<T>({ messages, messageMapper, messageRenderer, loadingRenderer, listClassName, onSend, isSending, disableVoice, placeholder, inputClassName, className, }: Props<T>): react_jsx_runtime.JSX.Element;
140
174
 
141
175
  type MessageMapper<TRaw> = (raw: TRaw) => Message;
176
+ type VoiceRecognitionController = {
177
+ start: () => void;
178
+ stop: () => void;
179
+ isRecording: boolean;
180
+ onTranscript: (text: string) => void;
181
+ };
142
182
  type Options<TQueryRaw, TMutationRaw> = {
143
183
  queryKey: readonly unknown[];
144
184
  queryFn: () => Promise<TQueryRaw[]>;
145
185
  mutationFn: (content: string) => Promise<TMutationRaw>;
146
186
  map: MessageMapper<TQueryRaw | TMutationRaw>;
187
+ voice: VoiceRecognitionController;
147
188
  onError?: (error: unknown) => void;
148
189
  staleTime?: number;
149
190
  gcTime?: number;
150
191
  };
151
- declare function useOptimisticChat<TQeuryRaw, TMutationRaw>({ queryKey, queryFn, mutationFn, map, onError, staleTime, gcTime, }: Options<TQeuryRaw, TMutationRaw>): {
192
+ declare function useVoiceOptimisticChat<TQeuryRaw, TMutationRaw>({ queryKey, queryFn, mutationFn, map, voice, onError, staleTime, gcTime, }: Options<TQeuryRaw, TMutationRaw>): {
152
193
  messages: Message[];
153
- sendUserMessage: (content: string) => void;
154
194
  isPending: boolean;
155
195
  isInitialLoading: boolean;
196
+ startRecording: () => Promise<void>;
197
+ stopRecording: () => void;
156
198
  };
157
199
 
158
- export { ChatContainer, ChatInput, ChatList, ChatMessage, LoadingSpinner, type Message, SendingDots, useOptimisticChat };
200
+ export { ChatContainer, ChatInput, ChatList, ChatMessage, LoadingSpinner, type Message, SendingDots, useBrowserSpeechRecognition, useOptimisticChat, useVoiceOptimisticChat };
package/dist/index.js CHANGED
@@ -53,7 +53,9 @@ __export(index_exports, {
53
53
  ChatMessage: () => ChatMessage,
54
54
  LoadingSpinner: () => LoadingSpinner,
55
55
  SendingDots: () => SendingDots,
56
- useOptimisticChat: () => useOptimisticChat
56
+ useBrowserSpeechRecognition: () => useBrowserSpeechRecognition,
57
+ useOptimisticChat: () => useOptimisticChat,
58
+ useVoiceOptimisticChat: () => useVoiceOptimisticChat
57
59
  });
58
60
  module.exports = __toCommonJS(index_exports);
59
61
 
@@ -220,11 +222,82 @@ function ChatList({
220
222
  }
221
223
 
222
224
  // src/components/ChatInput.tsx
225
+ var import_react4 = require("react");
226
+
227
+ // src/hooks/useBrowserSpeechRecognition.ts
223
228
  var import_react3 = require("react");
229
+ function useBrowserSpeechRecognition({
230
+ lang = "ko-KR",
231
+ onStart,
232
+ onEnd,
233
+ onError
234
+ } = {}) {
235
+ const [isRecording, setIsRecording] = (0, import_react3.useState)(false);
236
+ const recognitionRef = (0, import_react3.useRef)(null);
237
+ const onTranscriptRef = (0, import_react3.useRef)(void 0);
238
+ const start = () => {
239
+ const Speech = window.SpeechRecognition || window.webkitSpeechRecognition;
240
+ if (!Speech) {
241
+ onError == null ? void 0 : onError(new Error("SpeechRecognition not supported"));
242
+ return;
243
+ }
244
+ const recognition = new Speech();
245
+ recognition.lang = lang;
246
+ recognition.continuous = true;
247
+ recognition.interimResults = true;
248
+ recognition.onstart = () => {
249
+ setIsRecording(true);
250
+ onStart == null ? void 0 : onStart();
251
+ };
252
+ recognition.onend = () => {
253
+ setIsRecording(false);
254
+ onEnd == null ? void 0 : onEnd();
255
+ };
256
+ recognition.onresult = (event) => {
257
+ var _a;
258
+ const transcript = Array.from(event.results).map((r) => {
259
+ var _a2;
260
+ return (_a2 = r[0]) == null ? void 0 : _a2.transcript;
261
+ }).join("");
262
+ (_a = onTranscriptRef.current) == null ? void 0 : _a.call(onTranscriptRef, transcript);
263
+ };
264
+ recognition.onerror = (e) => {
265
+ onError == null ? void 0 : onError(e);
266
+ };
267
+ recognitionRef.current = recognition;
268
+ recognition.start();
269
+ };
270
+ const stop = () => {
271
+ var _a;
272
+ (_a = recognitionRef.current) == null ? void 0 : _a.stop();
273
+ };
274
+ (0, import_react3.useEffect)(() => {
275
+ return () => {
276
+ var _a;
277
+ (_a = recognitionRef.current) == null ? void 0 : _a.stop();
278
+ recognitionRef.current = null;
279
+ };
280
+ }, []);
281
+ return {
282
+ start,
283
+ // 음성 인식 시작
284
+ stop,
285
+ // 음성 인식 종료
286
+ isRecording,
287
+ // 음성 인식 상태
288
+ // 외부에서 음성 인식 결과(transcript) 처리 로직을 주입하기 위한 setter
289
+ // 음성 인식 이벤트는 React 생명주기와 무관하게 발생하므로 ref로 관리한다
290
+ set onTranscript(fn) {
291
+ onTranscriptRef.current = fn;
292
+ }
293
+ };
294
+ }
295
+
296
+ // src/components/ChatInput.tsx
224
297
  var import_jsx_runtime5 = require("react/jsx-runtime");
225
298
  function ChatInput({
226
299
  onSend,
227
- disableVoice = false,
300
+ voice = true,
228
301
  placeholder = "\uBA54\uC2DC\uC9C0\uB97C \uC785\uB825\uD558\uC138\uC694...",
229
302
  className = "",
230
303
  inputClassName = "",
@@ -236,34 +309,29 @@ function ChatInput({
236
309
  value,
237
310
  onChange,
238
311
  isSending,
239
- submitOnEnter = false,
240
- speechLang = "ko-KR"
312
+ submitOnEnter = false
241
313
  }) {
242
- const [innerText, setInnerText] = (0, import_react3.useState)("");
243
- const [isRecording, setIsRecording] = (0, import_react3.useState)(false);
244
- const textareaRef = (0, import_react3.useRef)(null);
314
+ var _a;
315
+ const [innerText, setInnerText] = (0, import_react4.useState)("");
316
+ const textareaRef = (0, import_react4.useRef)(null);
245
317
  const isControlled = value !== void 0;
246
318
  const text = isControlled ? value : innerText;
247
319
  const isEmpty = text.trim().length === 0;
248
- const recognition = (0, import_react3.useRef)(null);
249
- const isVoiceMode = !disableVoice && !isSending && (isEmpty || isRecording);
250
- (0, import_react3.useEffect)(() => {
251
- return () => {
252
- const r = recognition.current;
253
- if (r) {
254
- r.onresult = null;
255
- r.onstart = null;
256
- r.onend = null;
257
- try {
258
- r.stop();
259
- } catch (e) {
260
- console.warn("SpeechRecognition stop error:", e);
261
- }
320
+ const defaultVoice = useBrowserSpeechRecognition();
321
+ (0, import_react4.useEffect)(() => {
322
+ if (!defaultVoice) return;
323
+ defaultVoice.onTranscript = (text2) => {
324
+ if (!isControlled) {
325
+ setInnerText(text2);
262
326
  }
263
- recognition.current = null;
327
+ onChange == null ? void 0 : onChange(text2);
264
328
  };
265
- }, []);
266
- (0, import_react3.useEffect)(() => {
329
+ }, [defaultVoice, isControlled, onChange]);
330
+ const voiceController = voice === true ? defaultVoice : typeof voice === "object" ? voice : null;
331
+ const isRecording = (_a = voiceController == null ? void 0 : voiceController.isRecording) != null ? _a : false;
332
+ const isVoiceEnabled = Boolean(voiceController);
333
+ const isVoiceMode = isVoiceEnabled && !isSending && (isEmpty || isRecording);
334
+ (0, import_react4.useEffect)(() => {
267
335
  const el = textareaRef.current;
268
336
  if (!el) return;
269
337
  el.style.height = "auto";
@@ -299,53 +367,21 @@ function ChatInput({
299
367
  }
300
368
  };
301
369
  const handleRecord = () => {
302
- var _a, _b;
303
- try {
304
- if (!isRecording) {
305
- const Speech = window.SpeechRecognition || window.webkitSpeechRecognition;
306
- if (!Speech) {
307
- console.error("Browser does not support SpeechRecognition");
308
- alert("\uD604\uC7AC \uBE0C\uB77C\uC6B0\uC800\uC5D0\uC11C\uB294 \uC74C\uC131 \uC778\uC2DD \uAE30\uB2A5\uC744 \uC0AC\uC6A9\uD560 \uC218 \uC5C6\uC2B5\uB2C8\uB2E4.");
309
- return;
310
- }
311
- recognition.current = new Speech();
312
- recognition.current.lang = speechLang;
313
- recognition.current.continuous = true;
314
- recognition.current.interimResults = true;
315
- recognition.current.onstart = () => {
316
- setIsRecording(true);
317
- };
318
- recognition.current.onend = () => {
319
- setIsRecording(false);
320
- };
321
- recognition.current.onresult = (event) => {
322
- const newTranscript = Array.from(event.results).map((r) => {
323
- var _a2;
324
- return (_a2 = r[0]) == null ? void 0 : _a2.transcript;
325
- }).join("");
326
- setInnerText(newTranscript);
327
- };
328
- (_a = recognition.current) == null ? void 0 : _a.start();
329
- } else {
330
- (_b = recognition.current) == null ? void 0 : _b.stop();
331
- }
332
- } catch (e) {
333
- console.error("Speech Recognition error: ", e);
334
- alert("\uC74C\uC131 \uC785\uB825\uC744 \uC0AC\uC6A9\uD560 \uC218 \uC5C6\uC2B5\uB2C8\uB2E4. \uD14D\uC2A4\uD2B8\uB85C \uC785\uB825\uD574\uC8FC\uC138\uC694.");
335
- setIsRecording(false);
370
+ if (!voiceController) return;
371
+ if (isRecording) {
372
+ voiceController.stop();
373
+ } else {
374
+ voiceController.start();
336
375
  }
337
376
  };
338
377
  const getActivityLayer = () => {
339
378
  if (isSending) return "sending";
340
- if (!disableVoice) {
379
+ if (isVoiceEnabled) {
341
380
  if (isRecording) return "recording";
342
381
  if (isVoiceMode) return "mic";
343
382
  return "send";
344
383
  }
345
- if (disableVoice) {
346
- if (!isEmpty) return "send";
347
- return null;
348
- }
384
+ if (!isEmpty) return "send";
349
385
  return null;
350
386
  };
351
387
  const activeLayer = getActivityLayer();
@@ -480,7 +516,7 @@ function ChatInput({
480
516
  }
481
517
 
482
518
  // src/components/ChatContainer.tsx
483
- var import_react4 = require("react");
519
+ var import_react5 = require("react");
484
520
  var import_jsx_runtime6 = require("react/jsx-runtime");
485
521
  function ChatContainer({
486
522
  messages,
@@ -495,9 +531,9 @@ function ChatContainer({
495
531
  inputClassName,
496
532
  className
497
533
  }) {
498
- const [isAtBottom, setIsAtBottom] = (0, import_react4.useState)(true);
499
- const scrollRef = (0, import_react4.useRef)(null);
500
- (0, import_react4.useEffect)(() => {
534
+ const [isAtBottom, setIsAtBottom] = (0, import_react5.useState)(true);
535
+ const scrollRef = (0, import_react5.useRef)(null);
536
+ (0, import_react5.useEffect)(() => {
501
537
  const el = scrollRef.current;
502
538
  if (!el) return;
503
539
  el.scrollTop = el.scrollHeight;
@@ -508,7 +544,7 @@ function ChatContainer({
508
544
  el.addEventListener("scroll", handleScroll);
509
545
  return () => el.removeEventListener("scroll", handleScroll);
510
546
  }, []);
511
- (0, import_react4.useEffect)(() => {
547
+ (0, import_react5.useEffect)(() => {
512
548
  const el = scrollRef.current;
513
549
  if (!el) return;
514
550
  if (isAtBottom) {
@@ -590,7 +626,7 @@ function ChatContainer({
590
626
 
591
627
  // src/hooks/useOptimisticChat.ts
592
628
  var import_react_query = require("@tanstack/react-query");
593
- var import_react5 = require("react");
629
+ var import_react6 = require("react");
594
630
  function useOptimisticChat({
595
631
  queryKey,
596
632
  queryFn,
@@ -600,7 +636,7 @@ function useOptimisticChat({
600
636
  staleTime = 0,
601
637
  gcTime = 0
602
638
  }) {
603
- const [isPending, setIsPending] = (0, import_react5.useState)(false);
639
+ const [isPending, setIsPending] = (0, import_react6.useState)(false);
604
640
  const queryClient = (0, import_react_query.useQueryClient)();
605
641
  const {
606
642
  data: messages = [],
@@ -686,6 +722,145 @@ function useOptimisticChat({
686
722
  // 초기 로딩 상태
687
723
  };
688
724
  }
725
+
726
+ // src/hooks/useVoiceOptimisticChat.ts
727
+ var import_react_query2 = require("@tanstack/react-query");
728
+ var import_react7 = require("react");
729
+ function useVoiceOptimisticChat({
730
+ queryKey,
731
+ queryFn,
732
+ mutationFn,
733
+ map,
734
+ voice,
735
+ onError,
736
+ staleTime = 0,
737
+ gcTime = 0
738
+ }) {
739
+ const [isPending, setIsPending] = (0, import_react7.useState)(false);
740
+ const queryClient = (0, import_react_query2.useQueryClient)();
741
+ const currentTextRef = (0, import_react7.useRef)("");
742
+ const rollbackRef = (0, import_react7.useRef)(void 0);
743
+ const {
744
+ data: messages = [],
745
+ isLoading: isInitialLoading
746
+ } = (0, import_react_query2.useQuery)({
747
+ queryKey,
748
+ queryFn: async () => {
749
+ const rawList = await queryFn();
750
+ return rawList.map(map);
751
+ },
752
+ staleTime,
753
+ gcTime
754
+ });
755
+ const mutation = (0, import_react_query2.useMutation)({
756
+ mutationFn,
757
+ // (content: string) => Promise<TMutationRaw>
758
+ onMutate: async () => {
759
+ setIsPending(true);
760
+ const prev = queryClient.getQueryData(queryKey);
761
+ if (prev) {
762
+ await queryClient.cancelQueries({ queryKey });
763
+ }
764
+ queryClient.setQueryData(queryKey, (old) => {
765
+ const base = old != null ? old : [];
766
+ return [
767
+ ...base,
768
+ // AI placeholder 추가
769
+ {
770
+ id: crypto.randomUUID(),
771
+ role: "AI",
772
+ content: "",
773
+ isLoading: true
774
+ }
775
+ ];
776
+ });
777
+ return { prev };
778
+ },
779
+ onSuccess: (rawAiResponse) => {
780
+ const aiMessage = map(rawAiResponse);
781
+ queryClient.setQueryData(queryKey, (old) => {
782
+ if (!old || old.length === 0) {
783
+ return [aiMessage];
784
+ }
785
+ const next = [...old];
786
+ const lastIndex = next.length - 1;
787
+ next[lastIndex] = __spreadProps(__spreadValues(__spreadValues({}, next[lastIndex]), aiMessage), {
788
+ isLoading: false
789
+ });
790
+ return next;
791
+ });
792
+ setIsPending(false);
793
+ },
794
+ onError: (error, _variables, context) => {
795
+ setIsPending(false);
796
+ if (context == null ? void 0 : context.prev) {
797
+ queryClient.setQueryData(queryKey, context.prev);
798
+ }
799
+ onError == null ? void 0 : onError(error);
800
+ },
801
+ // mutation 이후 서버 기준 최신 데이터 재동기화
802
+ onSettled: () => {
803
+ queryClient.invalidateQueries({ queryKey });
804
+ }
805
+ });
806
+ const startRecording = async () => {
807
+ currentTextRef.current = "";
808
+ const prev = queryClient.getQueryData(queryKey);
809
+ rollbackRef.current = prev;
810
+ if (prev) {
811
+ await queryClient.cancelQueries({ queryKey });
812
+ }
813
+ queryClient.setQueryData(queryKey, (old) => [
814
+ ...old != null ? old : [],
815
+ {
816
+ id: crypto.randomUUID(),
817
+ role: "USER",
818
+ content: ""
819
+ }
820
+ ]);
821
+ voice.start();
822
+ };
823
+ const onTranscript = (text) => {
824
+ currentTextRef.current = text;
825
+ queryClient.setQueryData(queryKey, (old) => {
826
+ var _a;
827
+ if (!old) return old;
828
+ const next = [...old];
829
+ const last = next.length - 1;
830
+ if (((_a = next[last]) == null ? void 0 : _a.role) !== "USER") return old;
831
+ next[last] = __spreadProps(__spreadValues({}, next[last]), {
832
+ content: text
833
+ });
834
+ return next;
835
+ });
836
+ };
837
+ (0, import_react7.useEffect)(() => {
838
+ voice.onTranscript = onTranscript;
839
+ }, [voice]);
840
+ const stopRecording = () => {
841
+ voice.stop();
842
+ const finalText = currentTextRef.current.trim();
843
+ if (!finalText) {
844
+ if (rollbackRef.current) {
845
+ queryClient.setQueryData(queryKey, rollbackRef.current);
846
+ }
847
+ return;
848
+ }
849
+ mutation.mutate(finalText);
850
+ };
851
+ return {
852
+ messages,
853
+ // Message[]
854
+ isPending,
855
+ // 사용자가 채팅 전송 후 AI 응답이 올 때까지의 로딩
856
+ isInitialLoading,
857
+ // 초기 로딩 상태
858
+ startRecording,
859
+ // 음성 인식 시작 함수
860
+ stopRecording
861
+ // 음성 인식 종료 함수
862
+ };
863
+ }
689
864
  // Annotate the CommonJS export names for ESM import in node:
690
865
  0 && (module.exports = {
691
866
  ChatContainer,
@@ -694,5 +869,7 @@ function useOptimisticChat({
694
869
  ChatMessage,
695
870
  LoadingSpinner,
696
871
  SendingDots,
697
- useOptimisticChat
872
+ useBrowserSpeechRecognition,
873
+ useOptimisticChat,
874
+ useVoiceOptimisticChat
698
875
  });
package/dist/index.mjs CHANGED
@@ -181,11 +181,82 @@ function ChatList({
181
181
  }
182
182
 
183
183
  // src/components/ChatInput.tsx
184
- import { useState, useRef, useEffect } from "react";
184
+ import { useState as useState2, useRef as useRef2, useEffect as useEffect2 } from "react";
185
+
186
+ // src/hooks/useBrowserSpeechRecognition.ts
187
+ import { useEffect, useRef, useState } from "react";
188
+ function useBrowserSpeechRecognition({
189
+ lang = "ko-KR",
190
+ onStart,
191
+ onEnd,
192
+ onError
193
+ } = {}) {
194
+ const [isRecording, setIsRecording] = useState(false);
195
+ const recognitionRef = useRef(null);
196
+ const onTranscriptRef = useRef(void 0);
197
+ const start = () => {
198
+ const Speech = window.SpeechRecognition || window.webkitSpeechRecognition;
199
+ if (!Speech) {
200
+ onError == null ? void 0 : onError(new Error("SpeechRecognition not supported"));
201
+ return;
202
+ }
203
+ const recognition = new Speech();
204
+ recognition.lang = lang;
205
+ recognition.continuous = true;
206
+ recognition.interimResults = true;
207
+ recognition.onstart = () => {
208
+ setIsRecording(true);
209
+ onStart == null ? void 0 : onStart();
210
+ };
211
+ recognition.onend = () => {
212
+ setIsRecording(false);
213
+ onEnd == null ? void 0 : onEnd();
214
+ };
215
+ recognition.onresult = (event) => {
216
+ var _a;
217
+ const transcript = Array.from(event.results).map((r) => {
218
+ var _a2;
219
+ return (_a2 = r[0]) == null ? void 0 : _a2.transcript;
220
+ }).join("");
221
+ (_a = onTranscriptRef.current) == null ? void 0 : _a.call(onTranscriptRef, transcript);
222
+ };
223
+ recognition.onerror = (e) => {
224
+ onError == null ? void 0 : onError(e);
225
+ };
226
+ recognitionRef.current = recognition;
227
+ recognition.start();
228
+ };
229
+ const stop = () => {
230
+ var _a;
231
+ (_a = recognitionRef.current) == null ? void 0 : _a.stop();
232
+ };
233
+ useEffect(() => {
234
+ return () => {
235
+ var _a;
236
+ (_a = recognitionRef.current) == null ? void 0 : _a.stop();
237
+ recognitionRef.current = null;
238
+ };
239
+ }, []);
240
+ return {
241
+ start,
242
+ // 음성 인식 시작
243
+ stop,
244
+ // 음성 인식 종료
245
+ isRecording,
246
+ // 음성 인식 상태
247
+ // 외부에서 음성 인식 결과(transcript) 처리 로직을 주입하기 위한 setter
248
+ // 음성 인식 이벤트는 React 생명주기와 무관하게 발생하므로 ref로 관리한다
249
+ set onTranscript(fn) {
250
+ onTranscriptRef.current = fn;
251
+ }
252
+ };
253
+ }
254
+
255
+ // src/components/ChatInput.tsx
185
256
  import { jsx as jsx5, jsxs as jsxs3 } from "react/jsx-runtime";
186
257
  function ChatInput({
187
258
  onSend,
188
- disableVoice = false,
259
+ voice = true,
189
260
  placeholder = "\uBA54\uC2DC\uC9C0\uB97C \uC785\uB825\uD558\uC138\uC694...",
190
261
  className = "",
191
262
  inputClassName = "",
@@ -197,34 +268,29 @@ function ChatInput({
197
268
  value,
198
269
  onChange,
199
270
  isSending,
200
- submitOnEnter = false,
201
- speechLang = "ko-KR"
271
+ submitOnEnter = false
202
272
  }) {
203
- const [innerText, setInnerText] = useState("");
204
- const [isRecording, setIsRecording] = useState(false);
205
- const textareaRef = useRef(null);
273
+ var _a;
274
+ const [innerText, setInnerText] = useState2("");
275
+ const textareaRef = useRef2(null);
206
276
  const isControlled = value !== void 0;
207
277
  const text = isControlled ? value : innerText;
208
278
  const isEmpty = text.trim().length === 0;
209
- const recognition = useRef(null);
210
- const isVoiceMode = !disableVoice && !isSending && (isEmpty || isRecording);
211
- useEffect(() => {
212
- return () => {
213
- const r = recognition.current;
214
- if (r) {
215
- r.onresult = null;
216
- r.onstart = null;
217
- r.onend = null;
218
- try {
219
- r.stop();
220
- } catch (e) {
221
- console.warn("SpeechRecognition stop error:", e);
222
- }
279
+ const defaultVoice = useBrowserSpeechRecognition();
280
+ useEffect2(() => {
281
+ if (!defaultVoice) return;
282
+ defaultVoice.onTranscript = (text2) => {
283
+ if (!isControlled) {
284
+ setInnerText(text2);
223
285
  }
224
- recognition.current = null;
286
+ onChange == null ? void 0 : onChange(text2);
225
287
  };
226
- }, []);
227
- useEffect(() => {
288
+ }, [defaultVoice, isControlled, onChange]);
289
+ const voiceController = voice === true ? defaultVoice : typeof voice === "object" ? voice : null;
290
+ const isRecording = (_a = voiceController == null ? void 0 : voiceController.isRecording) != null ? _a : false;
291
+ const isVoiceEnabled = Boolean(voiceController);
292
+ const isVoiceMode = isVoiceEnabled && !isSending && (isEmpty || isRecording);
293
+ useEffect2(() => {
228
294
  const el = textareaRef.current;
229
295
  if (!el) return;
230
296
  el.style.height = "auto";
@@ -260,53 +326,21 @@ function ChatInput({
260
326
  }
261
327
  };
262
328
  const handleRecord = () => {
263
- var _a, _b;
264
- try {
265
- if (!isRecording) {
266
- const Speech = window.SpeechRecognition || window.webkitSpeechRecognition;
267
- if (!Speech) {
268
- console.error("Browser does not support SpeechRecognition");
269
- alert("\uD604\uC7AC \uBE0C\uB77C\uC6B0\uC800\uC5D0\uC11C\uB294 \uC74C\uC131 \uC778\uC2DD \uAE30\uB2A5\uC744 \uC0AC\uC6A9\uD560 \uC218 \uC5C6\uC2B5\uB2C8\uB2E4.");
270
- return;
271
- }
272
- recognition.current = new Speech();
273
- recognition.current.lang = speechLang;
274
- recognition.current.continuous = true;
275
- recognition.current.interimResults = true;
276
- recognition.current.onstart = () => {
277
- setIsRecording(true);
278
- };
279
- recognition.current.onend = () => {
280
- setIsRecording(false);
281
- };
282
- recognition.current.onresult = (event) => {
283
- const newTranscript = Array.from(event.results).map((r) => {
284
- var _a2;
285
- return (_a2 = r[0]) == null ? void 0 : _a2.transcript;
286
- }).join("");
287
- setInnerText(newTranscript);
288
- };
289
- (_a = recognition.current) == null ? void 0 : _a.start();
290
- } else {
291
- (_b = recognition.current) == null ? void 0 : _b.stop();
292
- }
293
- } catch (e) {
294
- console.error("Speech Recognition error: ", e);
295
- alert("\uC74C\uC131 \uC785\uB825\uC744 \uC0AC\uC6A9\uD560 \uC218 \uC5C6\uC2B5\uB2C8\uB2E4. \uD14D\uC2A4\uD2B8\uB85C \uC785\uB825\uD574\uC8FC\uC138\uC694.");
296
- setIsRecording(false);
329
+ if (!voiceController) return;
330
+ if (isRecording) {
331
+ voiceController.stop();
332
+ } else {
333
+ voiceController.start();
297
334
  }
298
335
  };
299
336
  const getActivityLayer = () => {
300
337
  if (isSending) return "sending";
301
- if (!disableVoice) {
338
+ if (isVoiceEnabled) {
302
339
  if (isRecording) return "recording";
303
340
  if (isVoiceMode) return "mic";
304
341
  return "send";
305
342
  }
306
- if (disableVoice) {
307
- if (!isEmpty) return "send";
308
- return null;
309
- }
343
+ if (!isEmpty) return "send";
310
344
  return null;
311
345
  };
312
346
  const activeLayer = getActivityLayer();
@@ -441,7 +475,7 @@ function ChatInput({
441
475
  }
442
476
 
443
477
  // src/components/ChatContainer.tsx
444
- import { useEffect as useEffect2, useRef as useRef2, useState as useState2 } from "react";
478
+ import { useEffect as useEffect3, useRef as useRef3, useState as useState3 } from "react";
445
479
  import { Fragment, jsx as jsx6, jsxs as jsxs4 } from "react/jsx-runtime";
446
480
  function ChatContainer({
447
481
  messages,
@@ -456,9 +490,9 @@ function ChatContainer({
456
490
  inputClassName,
457
491
  className
458
492
  }) {
459
- const [isAtBottom, setIsAtBottom] = useState2(true);
460
- const scrollRef = useRef2(null);
461
- useEffect2(() => {
493
+ const [isAtBottom, setIsAtBottom] = useState3(true);
494
+ const scrollRef = useRef3(null);
495
+ useEffect3(() => {
462
496
  const el = scrollRef.current;
463
497
  if (!el) return;
464
498
  el.scrollTop = el.scrollHeight;
@@ -469,7 +503,7 @@ function ChatContainer({
469
503
  el.addEventListener("scroll", handleScroll);
470
504
  return () => el.removeEventListener("scroll", handleScroll);
471
505
  }, []);
472
- useEffect2(() => {
506
+ useEffect3(() => {
473
507
  const el = scrollRef.current;
474
508
  if (!el) return;
475
509
  if (isAtBottom) {
@@ -551,7 +585,7 @@ function ChatContainer({
551
585
 
552
586
  // src/hooks/useOptimisticChat.ts
553
587
  import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
554
- import { useState as useState3 } from "react";
588
+ import { useState as useState4 } from "react";
555
589
  function useOptimisticChat({
556
590
  queryKey,
557
591
  queryFn,
@@ -561,7 +595,7 @@ function useOptimisticChat({
561
595
  staleTime = 0,
562
596
  gcTime = 0
563
597
  }) {
564
- const [isPending, setIsPending] = useState3(false);
598
+ const [isPending, setIsPending] = useState4(false);
565
599
  const queryClient = useQueryClient();
566
600
  const {
567
601
  data: messages = [],
@@ -647,6 +681,145 @@ function useOptimisticChat({
647
681
  // 초기 로딩 상태
648
682
  };
649
683
  }
684
+
685
+ // src/hooks/useVoiceOptimisticChat.ts
686
+ import { useMutation as useMutation2, useQuery as useQuery2, useQueryClient as useQueryClient2 } from "@tanstack/react-query";
687
+ import { useEffect as useEffect4, useRef as useRef4, useState as useState5 } from "react";
688
+ function useVoiceOptimisticChat({
689
+ queryKey,
690
+ queryFn,
691
+ mutationFn,
692
+ map,
693
+ voice,
694
+ onError,
695
+ staleTime = 0,
696
+ gcTime = 0
697
+ }) {
698
+ const [isPending, setIsPending] = useState5(false);
699
+ const queryClient = useQueryClient2();
700
+ const currentTextRef = useRef4("");
701
+ const rollbackRef = useRef4(void 0);
702
+ const {
703
+ data: messages = [],
704
+ isLoading: isInitialLoading
705
+ } = useQuery2({
706
+ queryKey,
707
+ queryFn: async () => {
708
+ const rawList = await queryFn();
709
+ return rawList.map(map);
710
+ },
711
+ staleTime,
712
+ gcTime
713
+ });
714
+ const mutation = useMutation2({
715
+ mutationFn,
716
+ // (content: string) => Promise<TMutationRaw>
717
+ onMutate: async () => {
718
+ setIsPending(true);
719
+ const prev = queryClient.getQueryData(queryKey);
720
+ if (prev) {
721
+ await queryClient.cancelQueries({ queryKey });
722
+ }
723
+ queryClient.setQueryData(queryKey, (old) => {
724
+ const base = old != null ? old : [];
725
+ return [
726
+ ...base,
727
+ // AI placeholder 추가
728
+ {
729
+ id: crypto.randomUUID(),
730
+ role: "AI",
731
+ content: "",
732
+ isLoading: true
733
+ }
734
+ ];
735
+ });
736
+ return { prev };
737
+ },
738
+ onSuccess: (rawAiResponse) => {
739
+ const aiMessage = map(rawAiResponse);
740
+ queryClient.setQueryData(queryKey, (old) => {
741
+ if (!old || old.length === 0) {
742
+ return [aiMessage];
743
+ }
744
+ const next = [...old];
745
+ const lastIndex = next.length - 1;
746
+ next[lastIndex] = __spreadProps(__spreadValues(__spreadValues({}, next[lastIndex]), aiMessage), {
747
+ isLoading: false
748
+ });
749
+ return next;
750
+ });
751
+ setIsPending(false);
752
+ },
753
+ onError: (error, _variables, context) => {
754
+ setIsPending(false);
755
+ if (context == null ? void 0 : context.prev) {
756
+ queryClient.setQueryData(queryKey, context.prev);
757
+ }
758
+ onError == null ? void 0 : onError(error);
759
+ },
760
+ // mutation 이후 서버 기준 최신 데이터 재동기화
761
+ onSettled: () => {
762
+ queryClient.invalidateQueries({ queryKey });
763
+ }
764
+ });
765
+ const startRecording = async () => {
766
+ currentTextRef.current = "";
767
+ const prev = queryClient.getQueryData(queryKey);
768
+ rollbackRef.current = prev;
769
+ if (prev) {
770
+ await queryClient.cancelQueries({ queryKey });
771
+ }
772
+ queryClient.setQueryData(queryKey, (old) => [
773
+ ...old != null ? old : [],
774
+ {
775
+ id: crypto.randomUUID(),
776
+ role: "USER",
777
+ content: ""
778
+ }
779
+ ]);
780
+ voice.start();
781
+ };
782
+ const onTranscript = (text) => {
783
+ currentTextRef.current = text;
784
+ queryClient.setQueryData(queryKey, (old) => {
785
+ var _a;
786
+ if (!old) return old;
787
+ const next = [...old];
788
+ const last = next.length - 1;
789
+ if (((_a = next[last]) == null ? void 0 : _a.role) !== "USER") return old;
790
+ next[last] = __spreadProps(__spreadValues({}, next[last]), {
791
+ content: text
792
+ });
793
+ return next;
794
+ });
795
+ };
796
+ useEffect4(() => {
797
+ voice.onTranscript = onTranscript;
798
+ }, [voice]);
799
+ const stopRecording = () => {
800
+ voice.stop();
801
+ const finalText = currentTextRef.current.trim();
802
+ if (!finalText) {
803
+ if (rollbackRef.current) {
804
+ queryClient.setQueryData(queryKey, rollbackRef.current);
805
+ }
806
+ return;
807
+ }
808
+ mutation.mutate(finalText);
809
+ };
810
+ return {
811
+ messages,
812
+ // Message[]
813
+ isPending,
814
+ // 사용자가 채팅 전송 후 AI 응답이 올 때까지의 로딩
815
+ isInitialLoading,
816
+ // 초기 로딩 상태
817
+ startRecording,
818
+ // 음성 인식 시작 함수
819
+ stopRecording
820
+ // 음성 인식 종료 함수
821
+ };
822
+ }
650
823
  export {
651
824
  ChatContainer,
652
825
  ChatInput,
@@ -654,5 +827,7 @@ export {
654
827
  ChatMessage,
655
828
  LoadingSpinner,
656
829
  SendingDots,
657
- useOptimisticChat
830
+ useBrowserSpeechRecognition,
831
+ useOptimisticChat,
832
+ useVoiceOptimisticChat
658
833
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "react-optimistic-chat",
3
- "version": "1.0.0",
3
+ "version": "1.1.0",
4
4
  "main": "./dist/index.js",
5
5
  "module": "./dist/index.mjs",
6
6
  "types": "./dist/index.d.ts",