@rimori/react-client 0.4.11-next.2 → 0.4.11-next.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,6 +10,8 @@ interface Props {
10
10
  autoStartConversation?: FirstMessages;
11
11
  className?: string;
12
12
  knowledgeId?: string;
13
+ /** Set to true to disable automatic dialect TTS from userInfo. Default: false (dialect enabled). */
14
+ disableDialect?: boolean;
13
15
  }
14
- export declare function Avatar({ avatarImageUrl, voiceId, agentTools, autoStartConversation, children, circleSize, className, cache, knowledgeId, }: Props): import("react/jsx-runtime").JSX.Element;
16
+ export declare function Avatar({ avatarImageUrl, voiceId, agentTools, autoStartConversation, children, circleSize, className, cache, knowledgeId, disableDialect, }: Props): import("react/jsx-runtime").JSX.Element;
15
17
  export {};
@@ -7,12 +7,18 @@ import { useChat } from '../../hooks/UseChatHook';
7
7
  import { useRimori } from '../../providers/PluginProvider';
8
8
  import { getFirstMessages } from './utils';
9
9
  import { useTheme } from '../../hooks/ThemeSetter';
10
- export function Avatar({ avatarImageUrl, voiceId, agentTools, autoStartConversation, children, circleSize = '300px', className, cache = false, knowledgeId, }) {
11
- const { ai, event, plugin } = useRimori();
10
+ export function Avatar({ avatarImageUrl, voiceId, agentTools, autoStartConversation, children, circleSize = '300px', className, cache = false, knowledgeId, disableDialect = false, }) {
11
+ const { ai, event, plugin, userInfo } = useRimori();
12
12
  const { isDark: isDarkThemeValue } = useTheme(plugin.theme);
13
13
  const [agentReplying, setAgentReplying] = useState(false);
14
14
  const [isProcessingMessage, setIsProcessingMessage] = useState(false);
15
+ const dialectTtsInstruction = !disableDialect && (userInfo === null || userInfo === void 0 ? void 0 : userInfo.dialect)
16
+ ? `Speak with a ${userInfo.dialect} accent and pronunciation.`
17
+ : undefined;
15
18
  const sender = useMemo(() => new MessageSender((...args) => ai.getVoice(...args), voiceId, cache), [voiceId, ai, cache]);
19
+ useEffect(() => {
20
+ sender.setInstructions(dialectTtsInstruction);
21
+ }, [sender, dialectTtsInstruction]);
16
22
  const { messages, append, isLoading, lastMessage, setMessages } = useChat(agentTools, { knowledgeId });
17
23
  useEffect(() => {
18
24
  console.log('messages', messages);
@@ -7,9 +7,6 @@ export interface BuddyAssistantAutoStart {
7
7
  userMessage?: string;
8
8
  }
9
9
  export interface BuddyAssistantProps {
10
- buddyName: string;
11
- avatarImageUrl: string;
12
- voiceId: string;
13
10
  systemPrompt: string;
14
11
  autoStartConversation?: BuddyAssistantAutoStart;
15
12
  circleSize?: string;
@@ -18,5 +15,7 @@ export interface BuddyAssistantProps {
18
15
  className?: string;
19
16
  voiceSpeed?: number;
20
17
  tools?: Tool[];
18
+ /** Set to true to disable automatic dialect from userInfo. Default: false (dialect enabled). */
19
+ disableDialect?: boolean;
21
20
  }
22
- export declare function BuddyAssistant({ buddyName, avatarImageUrl, voiceId, systemPrompt, autoStartConversation, circleSize, chatPlaceholder, bottomAction, className, voiceSpeed, tools, }: BuddyAssistantProps): import("react/jsx-runtime").JSX.Element;
21
+ export declare function BuddyAssistant({ systemPrompt, autoStartConversation, circleSize, chatPlaceholder, bottomAction, className, voiceSpeed, tools, disableDialect, }: BuddyAssistantProps): JSX.Element;
@@ -9,9 +9,16 @@ import { HiMiniSpeakerWave, HiMiniSpeakerXMark } from 'react-icons/hi2';
9
9
  import { BiSolidRightArrow } from 'react-icons/bi';
10
10
  let idCounter = 0;
11
11
  const genId = () => `ba-${++idCounter}`;
12
- export function BuddyAssistant({ buddyName, avatarImageUrl, voiceId, systemPrompt, autoStartConversation, circleSize = '160px', chatPlaceholder, bottomAction, className, voiceSpeed = 1, tools, }) {
13
- const { ai, event, plugin } = useRimori();
12
+ export function BuddyAssistant({ systemPrompt, autoStartConversation, circleSize = '160px', chatPlaceholder, bottomAction, className, voiceSpeed = 1, tools, disableDialect = false, }) {
13
+ var _a;
14
+ const { ai, event, plugin, userInfo } = useRimori();
14
15
  const { isDark } = useTheme(plugin.theme);
16
+ const buddy = (_a = plugin.getUserInfo()) === null || _a === void 0 ? void 0 : _a.study_buddy;
17
+ const dialect = !disableDialect ? userInfo === null || userInfo === void 0 ? void 0 : userInfo.dialect : undefined;
18
+ const dialectSystemSuffix = dialect
19
+ ? `\n\nThe user is learning the regional ${dialect} dialect. Occasionally use typical regional vocabulary and expressions from this dialect to help them learn local language naturally.`
20
+ : '';
21
+ const dialectTtsInstruction = dialect ? `Speak with a ${dialect} accent and pronunciation.` : undefined;
15
22
  const [ttsEnabled, setTtsEnabled] = useState(true);
16
23
  const [chatInput, setChatInput] = useState('');
17
24
  const [messages, setMessages] = useState([]);
@@ -21,16 +28,20 @@ export function BuddyAssistant({ buddyName, avatarImageUrl, voiceId, systemPromp
21
28
  useEffect(() => {
22
29
  ttsEnabledRef.current = ttsEnabled;
23
30
  }, [ttsEnabled]);
24
- const sender = useMemo(() => new MessageSender((...args) => ai.getVoice(...args), voiceId), [voiceId, ai]);
31
+ const sender = useMemo(() => { var _a; return new MessageSender((...args) => ai.getVoice(...args), (_a = buddy === null || buddy === void 0 ? void 0 : buddy.voiceId) !== null && _a !== void 0 ? _a : ''); }, [buddy === null || buddy === void 0 ? void 0 : buddy.voiceId, ai]);
32
+ useEffect(() => {
33
+ sender.setInstructions(dialectTtsInstruction);
34
+ }, [sender, dialectTtsInstruction]);
25
35
  // Setup sender callbacks and cleanup
26
36
  useEffect(() => {
37
+ sender.setVoiceSpeed(voiceSpeed);
27
38
  sender.setOnLoudnessChange((value) => event.emit('self.avatar.triggerLoudness', { loudness: value }));
28
39
  sender.setOnEndOfSpeech(() => setIsSpeaking(false));
29
40
  return () => sender.cleanup();
30
41
  }, [sender]);
31
- // Build full API message list with system prompt
42
+ // Build full API message list with system prompt (dialect appended when enabled)
32
43
  const buildApiMessages = (history) => [
33
- { role: 'system', content: systemPrompt },
44
+ { role: 'system', content: systemPrompt + dialectSystemSuffix },
34
45
  ...history.map((m) => ({ role: m.role, content: m.content })),
35
46
  ];
36
47
  const triggerAI = (history) => {
@@ -73,6 +84,8 @@ export function BuddyAssistant({ buddyName, avatarImageUrl, voiceId, systemPromp
73
84
  }
74
85
  // eslint-disable-next-line react-hooks/exhaustive-deps
75
86
  }, []);
87
+ if (!buddy)
88
+ return _jsx("div", {});
76
89
  const sendMessage = (text) => {
77
90
  if (!text.trim() || isLoading)
78
91
  return;
@@ -89,13 +102,13 @@ export function BuddyAssistant({ buddyName, avatarImageUrl, voiceId, systemPromp
89
102
  setTtsEnabled((prev) => !prev);
90
103
  };
91
104
  const lastAssistantMessage = [...messages].filter((m) => m.role === 'assistant').pop();
92
- return (_jsxs("div", { className: `flex flex-col items-center ${className || ''}`, children: [_jsx(CircleAudioAvatar, { width: circleSize, imageUrl: avatarImageUrl, isDarkTheme: isDark, className: "mx-auto" }), _jsxs("div", { className: "flex items-center gap-2", children: [_jsx("span", { className: "text-3xl font-semibold", children: buddyName }), _jsx("button", { type: "button", onClick: handleToggleTts, className: "p-1 rounded-md hover:bg-gray-700/50 transition-colors", title: ttsEnabled ? 'Disable voice' : 'Enable voice', children: ttsEnabled ? (_jsx(HiMiniSpeakerWave, { className: `w-5 h-5 mt-0.5 ${isSpeaking ? 'text-blue-400' : 'text-gray-300'}` })) : (_jsx(HiMiniSpeakerXMark, { className: "w-5 h-5 mt-0.5 text-gray-500" })) })] }), !ttsEnabled && (_jsx("div", { className: "w-full max-w-md rounded-xl bg-gray-800/70 px-4 py-3 text-sm text-gray-200 leading-relaxed border border-gray-700/40 mt-4", children: !(lastAssistantMessage === null || lastAssistantMessage === void 0 ? void 0 : lastAssistantMessage.content) && isLoading ? (_jsxs("span", { className: "inline-flex gap-1 py-0.5", children: [_jsx("span", { className: "w-1.5 h-1.5 rounded-full bg-gray-400 animate-bounce" }), _jsx("span", { className: "w-1.5 h-1.5 rounded-full bg-gray-400 animate-bounce", style: { animationDelay: '0.15s' } }), _jsx("span", { className: "w-1.5 h-1.5 rounded-full bg-gray-400 animate-bounce", style: { animationDelay: '0.3s' } })] })) : (_jsx("span", { className: "whitespace-pre-wrap", children: lastAssistantMessage === null || lastAssistantMessage === void 0 ? void 0 : lastAssistantMessage.content })) })), _jsxs("div", { className: "w-full max-w-md relative mt-4", children: [_jsx("input", { value: chatInput, onChange: (e) => setChatInput(e.target.value), onKeyDown: (e) => {
105
+ return (_jsxs("div", { className: `flex flex-col items-center ${className || ''}`, children: [_jsx(CircleAudioAvatar, { width: circleSize, imageUrl: buddy.avatarUrl, isDarkTheme: isDark, className: "mx-auto" }), _jsxs("div", { className: "flex items-center gap-2 pl-10", children: [_jsx("span", { className: "text-3xl font-semibold", children: buddy.name }), _jsx("button", { type: "button", onClick: handleToggleTts, className: "p-1 rounded-md hover:bg-gray-700/50 transition-colors", title: ttsEnabled ? 'Disable voice' : 'Enable voice', children: ttsEnabled ? (_jsx(HiMiniSpeakerWave, { className: `w-5 h-5 mt-0.5 ${isSpeaking ? 'text-blue-400' : 'text-gray-300'}` })) : (_jsx(HiMiniSpeakerXMark, { className: "w-5 h-5 mt-0.5 text-gray-500" })) })] }), !ttsEnabled && (_jsx("div", { className: "w-full max-w-md rounded-xl bg-gray-800/70 px-4 py-3 text-sm text-gray-200 leading-relaxed border border-gray-700/40 mt-4", children: !(lastAssistantMessage === null || lastAssistantMessage === void 0 ? void 0 : lastAssistantMessage.content) && isLoading ? (_jsxs("span", { className: "inline-flex gap-1 py-0.5", children: [_jsx("span", { className: "w-1.5 h-1.5 rounded-full bg-gray-400 animate-bounce" }), _jsx("span", { className: "w-1.5 h-1.5 rounded-full bg-gray-400 animate-bounce", style: { animationDelay: '0.15s' } }), _jsx("span", { className: "w-1.5 h-1.5 rounded-full bg-gray-400 animate-bounce", style: { animationDelay: '0.3s' } })] })) : (_jsx("span", { className: "whitespace-pre-wrap", children: lastAssistantMessage === null || lastAssistantMessage === void 0 ? void 0 : lastAssistantMessage.content })) })), _jsxs("div", { className: "w-full max-w-md relative mt-4", children: [_jsx("input", { value: chatInput, onChange: (e) => setChatInput(e.target.value), onKeyDown: (e) => {
93
106
  if (e.key === 'Enter' && !e.shiftKey) {
94
107
  e.preventDefault();
95
108
  sendMessage(chatInput);
96
109
  setChatInput('');
97
110
  }
98
- }, placeholder: chatPlaceholder !== null && chatPlaceholder !== void 0 ? chatPlaceholder : `Ask ${buddyName} a question…`, disabled: isLoading, className: "w-full bg-gray-800/50 border border-gray-700 rounded-lg px-3 py-2 pr-16 text-sm text-gray-200 placeholder:text-gray-500 focus:outline-none focus:ring-1 focus:ring-blue-500 disabled:opacity-60" }), _jsxs("div", { className: "absolute right-2 top-1/2 -translate-y-1/2 flex items-center gap-1", children: [_jsx(VoiceRecorder, { iconSize: "14", className: "p-1 text-gray-400 hover:text-white transition-colors", disabled: isLoading, onVoiceRecorded: (text) => sendMessage(text), onRecordingStatusChange: () => { } }), _jsx("div", { className: "w-px h-3.5 bg-gray-600" }), _jsx("button", { type: "button", onClick: () => {
111
+ }, placeholder: chatPlaceholder !== null && chatPlaceholder !== void 0 ? chatPlaceholder : `Ask ${buddy.name} a question…`, disabled: isLoading, className: "w-full bg-gray-800/50 border border-gray-700 rounded-lg px-3 py-2 pr-16 text-sm text-gray-200 placeholder:text-gray-500 focus:outline-none focus:ring-1 focus:ring-blue-500 disabled:opacity-60" }), _jsxs("div", { className: "absolute right-2 top-1/2 -translate-y-1/2 flex items-center gap-1", children: [_jsx(VoiceRecorder, { iconSize: "14", className: "p-1 text-gray-400 hover:text-white transition-colors", disabled: isLoading, onVoiceRecorded: (text) => sendMessage(text), onRecordingStatusChange: () => { } }), _jsx("div", { className: "w-px h-3.5 bg-gray-600" }), _jsx("button", { type: "button", onClick: () => {
99
112
  sendMessage(chatInput);
100
113
  setChatInput('');
101
114
  }, disabled: isLoading || !chatInput.trim(), className: "p-1 text-gray-400 hover:text-white disabled:opacity-40 transition-colors", children: _jsx(BiSolidRightArrow, { className: "w-4 h-4" }) })] })] }), bottomAction && _jsx("div", { className: "w-full max-w-md border-t border-gray-700/60 pt-3", children: bottomAction })] }));
@@ -10,6 +10,10 @@ type AudioPlayerProps = {
10
10
  enableSpeedAdjustment?: boolean;
11
11
  playListenerEvent?: string;
12
12
  size?: string;
13
+ /** Explicit TTS instruction string. If provided, overrides auto-dialect. */
14
+ ttsInstructions?: string;
15
+ /** Set to true to disable automatic dialect from userInfo. Default: false (dialect enabled). */
16
+ disableDialect?: boolean;
13
17
  };
14
18
  export declare const AudioPlayOptions: number[];
15
19
  export type AudioPlayOptionType = 0.8 | 0.9 | 1.0 | 1.1 | 1.2 | 1.5;
@@ -14,12 +14,12 @@ import { useRimori } from '../../providers/PluginProvider';
14
14
  import { EventBus } from '@rimori/client';
15
15
  export const AudioPlayOptions = [0.8, 0.9, 1.0, 1.1, 1.2, 1.5];
16
16
  let isFetchingAudio = false;
17
- export const AudioPlayer = ({ text, voice, language, hide, playListenerEvent, initialSpeed = 1.0, playOnMount = false, enableSpeedAdjustment = false, cache = true, size = '25px', }) => {
17
+ export const AudioPlayer = ({ text, voice, language, hide, playListenerEvent, initialSpeed = 1.0, playOnMount = false, enableSpeedAdjustment = false, cache = true, size = '25px', ttsInstructions, disableDialect = false, }) => {
18
18
  const [audioUrl, setAudioUrl] = useState(null);
19
19
  const [speed, setSpeed] = useState(initialSpeed);
20
20
  const [isPlaying, setIsPlaying] = useState(false);
21
21
  const [isLoading, setIsLoading] = useState(false);
22
- const { ai } = useRimori();
22
+ const { ai, userInfo } = useRimori();
23
23
  const audioRef = useRef(null);
24
24
  const eventBusListenerRef = useRef(null);
25
25
  useEffect(() => {
@@ -33,7 +33,8 @@ export const AudioPlayer = ({ text, voice, language, hide, playListenerEvent, in
33
33
  // Function to generate audio from text using API
34
34
  const generateAudio = () => __awaiter(void 0, void 0, void 0, function* () {
35
35
  setIsLoading(true);
36
- const blob = yield ai.getVoice(text, voice || (language ? 'aws_default' : 'openai_alloy'), 1, language, cache);
36
+ const effectiveInstructions = ttsInstructions !== null && ttsInstructions !== void 0 ? ttsInstructions : (!disableDialect && (userInfo === null || userInfo === void 0 ? void 0 : userInfo.dialect) ? `Speak with a ${userInfo.dialect} accent and pronunciation.` : undefined);
37
+ const blob = yield ai.getVoice(text, voice || (language ? 'aws_default' : 'openai_alloy'), 1, language, cache, effectiveInstructions);
37
38
  setAudioUrl(URL.createObjectURL(blob));
38
39
  setIsLoading(false);
39
40
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rimori/react-client",
3
- "version": "0.4.11-next.2",
3
+ "version": "0.4.11-next.4",
4
4
  "license": "Apache-2.0",
5
5
  "repository": {
6
6
  "type": "git",
@@ -24,7 +24,7 @@
24
24
  "format": "prettier --write ."
25
25
  },
26
26
  "peerDependencies": {
27
- "@rimori/client": "2.5.19-next.5",
27
+ "@rimori/client": "2.5.20-next.1",
28
28
  "react": "^18.1.0",
29
29
  "react-dom": "^18.1.0"
30
30
  },
@@ -47,7 +47,7 @@
47
47
  },
48
48
  "devDependencies": {
49
49
  "@eslint/js": "^9.37.0",
50
- "@rimori/client": "2.5.19-next.5",
50
+ "@rimori/client": "2.5.20-next.1",
51
51
  "@types/react": "^18.3.21",
52
52
  "eslint-config-prettier": "^10.1.8",
53
53
  "eslint-plugin-prettier": "^5.5.4",
@@ -19,6 +19,8 @@ interface Props {
19
19
  autoStartConversation?: FirstMessages;
20
20
  className?: string;
21
21
  knowledgeId?: string;
22
+ /** Set to true to disable automatic dialect TTS from userInfo. Default: false (dialect enabled). */
23
+ disableDialect?: boolean;
22
24
  }
23
25
 
24
26
  export function Avatar({
@@ -31,15 +33,23 @@ export function Avatar({
31
33
  className,
32
34
  cache = false,
33
35
  knowledgeId,
36
+ disableDialect = false,
34
37
  }: Props) {
35
- const { ai, event, plugin } = useRimori();
38
+ const { ai, event, plugin, userInfo } = useRimori();
36
39
  const { isDark: isDarkThemeValue } = useTheme(plugin.theme);
37
40
  const [agentReplying, setAgentReplying] = useState(false);
38
41
  const [isProcessingMessage, setIsProcessingMessage] = useState(false);
42
+ const dialectTtsInstruction = !disableDialect && userInfo?.dialect
43
+ ? `Speak with a ${userInfo.dialect} accent and pronunciation.`
44
+ : undefined;
39
45
  const sender = useMemo(
40
46
  () => new MessageSender((...args) => ai.getVoice(...args), voiceId, cache),
41
47
  [voiceId, ai, cache],
42
48
  );
49
+
50
+ useEffect(() => {
51
+ sender.setInstructions(dialectTtsInstruction);
52
+ }, [sender, dialectTtsInstruction]);
43
53
  const { messages, append, isLoading, lastMessage, setMessages } = useChat(agentTools, { knowledgeId });
44
54
 
45
55
  useEffect(() => {
@@ -17,9 +17,6 @@ export interface BuddyAssistantAutoStart {
17
17
  }
18
18
 
19
19
  export interface BuddyAssistantProps {
20
- buddyName: string;
21
- avatarImageUrl: string;
22
- voiceId: string;
23
20
  systemPrompt: string;
24
21
  autoStartConversation?: BuddyAssistantAutoStart;
25
22
  circleSize?: string;
@@ -28,15 +25,14 @@ export interface BuddyAssistantProps {
28
25
  className?: string;
29
26
  voiceSpeed?: number;
30
27
  tools?: Tool[];
28
+ /** Set to true to disable automatic dialect from userInfo. Default: false (dialect enabled). */
29
+ disableDialect?: boolean;
31
30
  }
32
31
 
33
32
  let idCounter = 0;
34
33
  const genId = () => `ba-${++idCounter}`;
35
34
 
36
35
  export function BuddyAssistant({
37
- buddyName,
38
- avatarImageUrl,
39
- voiceId,
40
36
  systemPrompt,
41
37
  autoStartConversation,
42
38
  circleSize = '160px',
@@ -45,9 +41,16 @@ export function BuddyAssistant({
45
41
  className,
46
42
  voiceSpeed = 1,
47
43
  tools,
48
- }: BuddyAssistantProps) {
49
- const { ai, event, plugin } = useRimori();
44
+ disableDialect = false,
45
+ }: BuddyAssistantProps): JSX.Element {
46
+ const { ai, event, plugin, userInfo } = useRimori();
50
47
  const { isDark } = useTheme(plugin.theme);
48
+ const buddy = plugin.getUserInfo()?.study_buddy;
49
+ const dialect = !disableDialect ? userInfo?.dialect : undefined;
50
+ const dialectSystemSuffix = dialect
51
+ ? `\n\nThe user is learning the regional ${dialect} dialect. Occasionally use typical regional vocabulary and expressions from this dialect to help them learn local language naturally.`
52
+ : '';
53
+ const dialectTtsInstruction = dialect ? `Speak with a ${dialect} accent and pronunciation.` : undefined;
51
54
 
52
55
  const [ttsEnabled, setTtsEnabled] = useState(true);
53
56
  const [chatInput, setChatInput] = useState('');
@@ -60,18 +63,26 @@ export function BuddyAssistant({
60
63
  ttsEnabledRef.current = ttsEnabled;
61
64
  }, [ttsEnabled]);
62
65
 
63
- const sender = useMemo(() => new MessageSender((...args) => ai.getVoice(...args), voiceId), [voiceId, ai]);
66
+ const sender = useMemo(
67
+ () => new MessageSender((...args) => ai.getVoice(...args), buddy?.voiceId ?? ''),
68
+ [buddy?.voiceId, ai],
69
+ );
70
+
71
+ useEffect(() => {
72
+ sender.setInstructions(dialectTtsInstruction);
73
+ }, [sender, dialectTtsInstruction]);
64
74
 
65
75
  // Setup sender callbacks and cleanup
66
76
  useEffect(() => {
77
+ sender.setVoiceSpeed(voiceSpeed);
67
78
  sender.setOnLoudnessChange((value: number) => event.emit('self.avatar.triggerLoudness', { loudness: value }));
68
79
  sender.setOnEndOfSpeech(() => setIsSpeaking(false));
69
80
  return () => sender.cleanup();
70
81
  }, [sender]);
71
82
 
72
- // Build full API message list with system prompt
83
+ // Build full API message list with system prompt (dialect appended when enabled)
73
84
  const buildApiMessages = (history: ChatMessage[]) => [
74
- { role: 'system' as const, content: systemPrompt },
85
+ { role: 'system' as const, content: systemPrompt + dialectSystemSuffix },
75
86
  ...history.map((m) => ({ role: m.role, content: m.content })),
76
87
  ];
77
88
 
@@ -119,6 +130,8 @@ export function BuddyAssistant({
119
130
  // eslint-disable-next-line react-hooks/exhaustive-deps
120
131
  }, []);
121
132
 
133
+ if (!buddy) return <div />;
134
+
122
135
  const sendMessage = (text: string) => {
123
136
  if (!text.trim() || isLoading) return;
124
137
  const userMsg: ChatMessage = { id: genId(), role: 'user', content: text };
@@ -140,11 +153,11 @@ export function BuddyAssistant({
140
153
  return (
141
154
  <div className={`flex flex-col items-center ${className || ''}`}>
142
155
  {/* Animated circle avatar */}
143
- <CircleAudioAvatar width={circleSize} imageUrl={avatarImageUrl} isDarkTheme={isDark} className="mx-auto" />
156
+ <CircleAudioAvatar width={circleSize} imageUrl={buddy.avatarUrl} isDarkTheme={isDark} className="mx-auto" />
144
157
 
145
158
  {/* Buddy name + TTS toggle */}
146
- <div className="flex items-center gap-2">
147
- <span className="text-3xl font-semibold">{buddyName}</span>
159
+ <div className="flex items-center gap-2 pl-10">
160
+ <span className="text-3xl font-semibold">{buddy.name}</span>
148
161
  <button
149
162
  type="button"
150
163
  onClick={handleToggleTts}
@@ -192,7 +205,7 @@ export function BuddyAssistant({
192
205
  setChatInput('');
193
206
  }
194
207
  }}
195
- placeholder={chatPlaceholder ?? `Ask ${buddyName} a question…`}
208
+ placeholder={chatPlaceholder ?? `Ask ${buddy.name} a question…`}
196
209
  disabled={isLoading}
197
210
  className="w-full bg-gray-800/50 border border-gray-700 rounded-lg px-3 py-2 pr-16 text-sm text-gray-200 placeholder:text-gray-500 focus:outline-none focus:ring-1 focus:ring-blue-500 disabled:opacity-60"
198
211
  />
@@ -14,6 +14,10 @@ type AudioPlayerProps = {
14
14
  enableSpeedAdjustment?: boolean;
15
15
  playListenerEvent?: string;
16
16
  size?: string;
17
+ /** Explicit TTS instruction string. If provided, overrides auto-dialect. */
18
+ ttsInstructions?: string;
19
+ /** Set to true to disable automatic dialect from userInfo. Default: false (dialect enabled). */
20
+ disableDialect?: boolean;
17
21
  };
18
22
 
19
23
  export const AudioPlayOptions = [0.8, 0.9, 1.0, 1.1, 1.2, 1.5];
@@ -32,12 +36,14 @@ export const AudioPlayer: React.FC<AudioPlayerProps> = ({
32
36
  enableSpeedAdjustment = false,
33
37
  cache = true,
34
38
  size = '25px',
39
+ ttsInstructions,
40
+ disableDialect = false,
35
41
  }) => {
36
42
  const [audioUrl, setAudioUrl] = useState<string | null>(null);
37
43
  const [speed, setSpeed] = useState(initialSpeed);
38
44
  const [isPlaying, setIsPlaying] = useState(false);
39
45
  const [isLoading, setIsLoading] = useState(false);
40
- const { ai } = useRimori();
46
+ const { ai, userInfo } = useRimori();
41
47
  const audioRef = useRef<HTMLAudioElement | null>(null);
42
48
  const eventBusListenerRef = useRef<{ off: () => void } | null>(null);
43
49
 
@@ -52,7 +58,9 @@ export const AudioPlayer: React.FC<AudioPlayerProps> = ({
52
58
  const generateAudio = async () => {
53
59
  setIsLoading(true);
54
60
 
55
- const blob = await ai.getVoice(text, voice || (language ? 'aws_default' : 'openai_alloy'), 1, language, cache);
61
+ const effectiveInstructions = ttsInstructions
62
+ ?? (!disableDialect && userInfo?.dialect ? `Speak with a ${userInfo.dialect} accent and pronunciation.` : undefined);
63
+ const blob = await ai.getVoice(text, voice || (language ? 'aws_default' : 'openai_alloy'), 1, language, cache, effectiveInstructions);
56
64
  setAudioUrl(URL.createObjectURL(blob));
57
65
  setIsLoading(false);
58
66
  };