@rimori/client 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/README.md +61 -18
  2. package/dist/cli/scripts/init/dev-registration.js +0 -1
  3. package/dist/cli/scripts/init/main.d.ts +1 -1
  4. package/dist/cli/scripts/init/main.js +1 -0
  5. package/dist/components/LoggerExample.d.ts +6 -0
  6. package/dist/components/LoggerExample.js +79 -0
  7. package/dist/components/ai/Assistant.js +2 -2
  8. package/dist/components/ai/Avatar.js +2 -2
  9. package/dist/components/ai/EmbeddedAssistent/VoiceRecoder.js +41 -32
  10. package/dist/components/audio/Playbutton.js +2 -2
  11. package/dist/components/components/ContextMenu.js +48 -9
  12. package/dist/core/controller/AIController.js +202 -69
  13. package/dist/core/controller/AudioController.d.ts +0 -0
  14. package/dist/core/controller/AudioController.js +1 -0
  15. package/dist/core/controller/ObjectController.d.ts +2 -2
  16. package/dist/core/controller/ObjectController.js +8 -8
  17. package/dist/core/controller/SettingsController.d.ts +16 -0
  18. package/dist/core/controller/SharedContentController.d.ts +30 -2
  19. package/dist/core/controller/SharedContentController.js +74 -23
  20. package/dist/core/controller/VoiceController.d.ts +2 -3
  21. package/dist/core/controller/VoiceController.js +11 -4
  22. package/dist/core/core.d.ts +1 -0
  23. package/dist/fromRimori/EventBus.js +1 -1
  24. package/dist/fromRimori/PluginTypes.d.ts +7 -4
  25. package/dist/hooks/UseChatHook.js +6 -4
  26. package/dist/hooks/UseLogger.d.ts +30 -0
  27. package/dist/hooks/UseLogger.js +122 -0
  28. package/dist/index.d.ts +1 -0
  29. package/dist/index.js +1 -0
  30. package/dist/plugin/AudioController.d.ts +37 -0
  31. package/dist/plugin/AudioController.js +68 -0
  32. package/dist/plugin/Logger.d.ts +68 -0
  33. package/dist/plugin/Logger.js +256 -0
  34. package/dist/plugin/LoggerExample.d.ts +16 -0
  35. package/dist/plugin/LoggerExample.js +140 -0
  36. package/dist/plugin/PluginController.d.ts +15 -3
  37. package/dist/plugin/PluginController.js +162 -39
  38. package/dist/plugin/RimoriClient.d.ts +55 -13
  39. package/dist/plugin/RimoriClient.js +60 -23
  40. package/dist/plugin/StandaloneClient.d.ts +1 -0
  41. package/dist/plugin/StandaloneClient.js +16 -5
  42. package/dist/plugin/ThemeSetter.d.ts +2 -2
  43. package/dist/plugin/ThemeSetter.js +8 -5
  44. package/dist/providers/PluginProvider.d.ts +1 -1
  45. package/dist/providers/PluginProvider.js +36 -10
  46. package/dist/utils/audioFormats.d.ts +26 -0
  47. package/dist/utils/audioFormats.js +67 -0
  48. package/dist/worker/WorkerSetup.d.ts +3 -2
  49. package/dist/worker/WorkerSetup.js +22 -67
  50. package/package.json +2 -1
  51. package/src/cli/scripts/init/dev-registration.ts +0 -1
  52. package/src/cli/scripts/init/main.ts +1 -0
  53. package/src/components/ai/Assistant.tsx +2 -2
  54. package/src/components/ai/Avatar.tsx +2 -2
  55. package/src/components/ai/EmbeddedAssistent/VoiceRecoder.tsx +39 -32
  56. package/src/components/audio/Playbutton.tsx +2 -2
  57. package/src/components/components/ContextMenu.tsx +53 -9
  58. package/src/core/controller/AIController.ts +236 -75
  59. package/src/core/controller/ObjectController.ts +8 -8
  60. package/src/core/controller/SettingsController.ts +16 -0
  61. package/src/core/controller/SharedContentController.ts +87 -25
  62. package/src/core/controller/VoiceController.ts +24 -19
  63. package/src/core/core.ts +1 -0
  64. package/src/fromRimori/EventBus.ts +1 -1
  65. package/src/fromRimori/PluginTypes.ts +6 -4
  66. package/src/hooks/UseChatHook.ts +6 -4
  67. package/src/index.ts +1 -0
  68. package/src/plugin/AudioController.ts +58 -0
  69. package/src/plugin/Logger.ts +324 -0
  70. package/src/plugin/PluginController.ts +171 -43
  71. package/src/plugin/RimoriClient.ts +95 -30
  72. package/src/plugin/StandaloneClient.ts +22 -6
  73. package/src/plugin/ThemeSetter.ts +8 -5
  74. package/src/providers/PluginProvider.tsx +40 -10
  75. package/src/worker/WorkerSetup.ts +14 -63
@@ -1,6 +1,7 @@
1
- import { useState, useRef, forwardRef, useImperativeHandle, useEffect } from 'react';
1
+ import { useRimori } from '../../../components';
2
2
  import { FaMicrophone, FaSpinner } from 'react-icons/fa6';
3
- import { usePlugin } from '../../../components';
3
+ import { AudioController } from '../../../plugin/AudioController';
4
+ import { useState, useRef, forwardRef, useImperativeHandle, useEffect } from 'react';
4
5
 
5
6
  interface Props {
6
7
  iconSize?: string;
@@ -15,10 +16,8 @@ interface Props {
15
16
  export const VoiceRecorder = forwardRef(({ onVoiceRecorded, iconSize, className, disabled, loading, onRecordingStatusChange, enablePushToTalk = false }: Props, ref) => {
16
17
  const [isRecording, setIsRecording] = useState(false);
17
18
  const [internalIsProcessing, setInternalIsProcessing] = useState(false);
18
- const mediaRecorderRef = useRef<MediaRecorder | null>(null);
19
- const audioChunksRef = useRef<Blob[]>([]);
20
- const mediaStreamRef = useRef<MediaStream | null>(null);
21
- const { ai: llm } = usePlugin();
19
+ const audioControllerRef = useRef<AudioController | null>(null);
20
+ const { ai, plugin } = useRimori();
22
21
 
23
22
  // Ref for latest onVoiceRecorded callback
24
23
  const onVoiceRecordedRef = useRef(onVoiceRecorded);
@@ -27,41 +26,49 @@ export const VoiceRecorder = forwardRef(({ onVoiceRecorded, iconSize, className,
27
26
  }, [onVoiceRecorded]);
28
27
 
29
28
  const startRecording = async () => {
30
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
31
- mediaStreamRef.current = stream;
32
- const mediaRecorder = new MediaRecorder(stream);
33
- mediaRecorderRef.current = mediaRecorder;
29
+ try {
30
+ if (!audioControllerRef.current) {
31
+ audioControllerRef.current = new AudioController(plugin.pluginId);
32
+ }
34
33
 
35
- mediaRecorder.ondataavailable = (event) => {
36
- audioChunksRef.current.push(event.data);
37
- };
34
+ await audioControllerRef.current.startRecording();
35
+ setIsRecording(true);
36
+ onRecordingStatusChange(true);
37
+ } catch (error) {
38
+ console.error('Failed to start recording:', error);
39
+ // Handle permission denied or other errors
40
+ }
41
+ };
38
42
 
39
- mediaRecorder.onstop = async () => {
40
- const audioBlob = new Blob(audioChunksRef.current);
41
- audioChunksRef.current = [];
42
43
 
43
44
 
44
- setInternalIsProcessing(true);
45
- const text = await llm.getTextFromVoice(audioBlob);
46
- setInternalIsProcessing(false);
47
- onVoiceRecordedRef.current(text);
48
- };
45
+ const stopRecording = async () => {
46
+ try {
47
+ if (audioControllerRef.current && isRecording) {
48
+ const audioResult = await audioControllerRef.current.stopRecording();
49
+ // console.log("audioResult: ", audioResult);
49
50
 
50
- mediaRecorder.start();
51
- setIsRecording(true);
52
- onRecordingStatusChange(true);
53
- };
51
+ setInternalIsProcessing(true);
52
+
53
+ // Play the recorded audio from the Blob
54
+ // const blobUrl = URL.createObjectURL(audioResult.recording);
55
+ // const audioRef = new Audio(blobUrl);
56
+ // audioRef.onended = () => URL.revokeObjectURL(blobUrl);
57
+ // audioRef.play().catch((e) => console.error('Playback error:', e));
54
58
 
55
- const stopRecording = () => {
56
- if (mediaRecorderRef.current) {
57
- mediaRecorderRef.current.stop();
59
+ // console.log("audioBlob: ", audioResult.recording);
60
+ const text = await ai.getTextFromVoice(audioResult.recording);
61
+ // console.log("stt result", text);
62
+ // throw new Error("test");
63
+ setInternalIsProcessing(false);
64
+ onVoiceRecordedRef.current(text);
65
+ }
66
+ } catch (error) {
67
+ console.error('Failed to stop recording:', error);
68
+ } finally {
58
69
  setIsRecording(false);
59
70
  onRecordingStatusChange(false);
60
71
  }
61
- if (mediaStreamRef.current) {
62
- mediaStreamRef.current.getTracks().forEach(track => track.stop());
63
- mediaStreamRef.current = null;
64
- }
65
72
  };
66
73
 
67
74
  useImperativeHandle(ref, () => ({
@@ -1,6 +1,6 @@
1
1
  import React, { useState, useEffect } from 'react';
2
2
  import { FaPlayCircle, FaStopCircle } from "react-icons/fa";
3
- import { usePlugin } from "../../providers/PluginProvider";
3
+ import { useRimori } from "../../providers/PluginProvider";
4
4
  import { Spinner } from '../Spinner';
5
5
  import { EventBus } from '../../fromRimori/EventBus';
6
6
 
@@ -34,7 +34,7 @@ export const AudioPlayer: React.FC<AudioPlayerProps> = ({
34
34
  const [speed, setSpeed] = useState(initialSpeed);
35
35
  const [isPlaying, setIsPlaying] = useState(false);
36
36
  const [isLoading, setIsLoading] = useState(false);
37
- const { ai } = usePlugin();
37
+ const { ai } = useRimori();
38
38
 
39
39
  useEffect(() => {
40
40
  if (!playListenerEvent) return;
@@ -14,28 +14,64 @@ const ContextMenu = ({ client }: { client: RimoriClient }) => {
14
14
  const [actions, setActions] = useState<MenuEntry[]>([]);
15
15
  const [position, setPosition] = useState<Position>({ x: 0, y: 0 });
16
16
  const [openOnTextSelect, setOpenOnTextSelect] = useState(false);
17
+ const [menuWidth, setMenuWidth] = useState<number>(0);
17
18
  const menuRef = useRef<HTMLDivElement>(null);
19
+ const isMobile = window.innerWidth < 768;
20
+
21
+ /**
22
+ * Calculates position for mobile context menu based on selected text bounds.
23
+ * Centers the menu horizontally over the selected text and positions it 30px below the text's end.
24
+ * @param selectedText - The currently selected text
25
+ * @param menuWidth - The width of the menu to center properly
26
+ * @returns Position object with x and y coordinates
27
+ */
28
+ const calculateMobilePosition = (selectedText: string, menuWidth: number = 0): Position => {
29
+ const selection = window.getSelection();
30
+ if (!selection || !selectedText) {
31
+ return { x: 0, y: 0, text: selectedText };
32
+ }
33
+
34
+ const range = selection.getRangeAt(0);
35
+ const rect = range.getBoundingClientRect();
36
+
37
+ // Center horizontally over the selected text, accounting for menu width
38
+ const centerX = rect.left + (rect.width / 2) - (menuWidth / 2);
39
+
40
+ // Position 12px below where the text ends vertically
41
+ const textEndY = rect.bottom + 12;
42
+
43
+ return { x: centerX, y: textEndY, text: selectedText };
44
+ };
18
45
 
19
46
  useEffect(() => {
20
- client.plugin.getInstalled().then(plugins => {
21
- setActions(plugins.flatMap(p => p.context_menu_actions).filter(Boolean));
22
- });
47
+ const actions = client.plugin.getPluginInfo().installedPlugins.flatMap(p => p.context_menu_actions).filter(Boolean);
48
+ setActions(actions);
49
+ setOpenOnTextSelect(client.plugin.getUserInfo().context_menu_on_select);
23
50
 
24
- client.plugin.getUserInfo().then((userInfo) => {
25
- setOpenOnTextSelect(userInfo.context_menu_on_select);
26
- })
27
51
 
28
52
  EventBus.on<{ actions: MenuEntry[] }>("global.contextMenu.createActions", ({ data }) => {
29
53
  setActions([...data.actions, ...actions]);
30
54
  });
31
55
  }, []);
32
56
 
57
+ // Update menu width when menu is rendered
58
+ useEffect(() => {
59
+ if (isOpen && menuRef.current) {
60
+ setMenuWidth(menuRef.current.offsetWidth);
61
+ }
62
+ }, [isOpen, actions]);
63
+
33
64
  useEffect(() => {
34
65
  // Track mouse position globally
35
66
  const handleMouseMove = (e: MouseEvent) => {
36
67
  const selectedText = window.getSelection()?.toString().trim();
37
68
  if (isOpen && selectedText === position.text) return;
38
- setPosition({ x: e.clientX, y: e.clientY, text: selectedText });
69
+
70
+ if (isMobile && selectedText) {
71
+ setPosition(calculateMobilePosition(selectedText, menuWidth));
72
+ } else {
73
+ setPosition({ x: e.clientX, y: e.clientY, text: selectedText });
74
+ }
39
75
  };
40
76
 
41
77
  const handleMouseUp = (e: MouseEvent) => {
@@ -64,18 +100,26 @@ const ContextMenu = ({ client }: { client: RimoriClient }) => {
64
100
  if (e.button === 2) {
65
101
  e.preventDefault();
66
102
  }
67
- setPosition({ x: e.clientX, y: e.clientY, text: selectedText });
103
+
104
+ if (isMobile) {
105
+ setPosition(calculateMobilePosition(selectedText, menuWidth));
106
+ } else {
107
+ setPosition({ x: e.clientX, y: e.clientY, text: selectedText });
108
+ }
68
109
  setIsOpen(true);
69
110
  } else {
70
111
  setIsOpen(false);
71
112
  }
72
113
  };
73
114
 
74
- // Add selectionchange listener to close menu if selection is cleared
115
+ // Add selectionchange listener to close menu if selection is cleared and update position for mobile
75
116
  const handleSelectionChange = () => {
76
117
  const selectedText = window.getSelection()?.toString().trim();
77
118
  if (!selectedText && isOpen) {
78
119
  setIsOpen(false);
120
+ } else if (selectedText && isOpen && isMobile) {
121
+ // Update position in real-time as text selection changes on mobile
122
+ setPosition(calculateMobilePosition(selectedText, menuWidth));
79
123
  }
80
124
  };
81
125
 
@@ -29,94 +29,255 @@ export async function streamChatGPT(backendUrl: string, messages: Message[], too
29
29
  const messageId = Math.random().toString(36).substring(3);
30
30
  let currentMessages: Message[] = [...messages];
31
31
 
32
+ console.log('Starting streamChatGPT with:', {
33
+ messageId,
34
+ messageCount: messages.length,
35
+ toolCount: tools.length,
36
+ backendUrl
37
+ });
38
+
32
39
  while (true) {
33
40
  const messagesForApi = currentMessages.map(({ id, ...rest }) => rest);
34
41
 
35
- const response = await fetch(`${backendUrl}/ai/llm`, {
36
- method: 'POST',
37
- body: JSON.stringify({ messages: messagesForApi, tools, stream: true }),
38
- headers: { 'Authorization': `Bearer ${token}`, 'Content-Type': 'application/json' }
39
- });
42
+ try {
43
+ const response = await fetch(`${backendUrl}/ai/llm`, {
44
+ method: 'POST',
45
+ body: JSON.stringify({ messages: messagesForApi, tools, stream: true }),
46
+ headers: { 'Authorization': `Bearer ${token}`, 'Content-Type': 'application/json' }
47
+ });
40
48
 
41
- if (!response.body) {
42
- console.error('No response body.');
43
- return;
44
- }
49
+ if (!response.ok) {
50
+ throw new Error(`HTTP error! status: ${response.status}`);
51
+ }
52
+
53
+ if (!response.body) {
54
+ console.error('No response body.');
55
+ return;
56
+ }
45
57
 
46
- const reader = response.body.getReader();
47
- const decoder = new TextDecoder('utf-8');
48
-
49
- let content = "";
50
- let done = false;
51
- let toolInvocations: { toolCallId: string, toolName: string, args: any }[] = [];
52
- let finishReason = "";
53
-
54
- while (!done) {
55
- const { value, done: readerDone } = await reader.read();
56
-
57
- if (value) {
58
- const chunk = decoder.decode(value, { stream: true });
59
- const lines = chunk.split('\n').filter(line => line.trim() !== '');
60
-
61
- for (const line of lines) {
62
- const command = line.substring(0, 1);
63
-
64
- if (command === '0') {
65
- const data = line.substring(3, line.length - 1);
66
- content += data;
67
- onResponse(messageId, content.replace(/\\n/g, '\n').replace(/\\+"/g, '"'), false);
68
- } else if (command === 'd' || command === 'e') {
69
- const eventData = JSON.parse(line.substring(2));
70
- finishReason = eventData.finishReason;
71
- done = true;
72
- break;
73
- } else if (command === '9') {
74
- const toolInvocation = JSON.parse(line.substring(2));
75
- toolInvocations.push(toolInvocation);
58
+ const reader = response.body.getReader();
59
+ const decoder = new TextDecoder('utf-8');
60
+
61
+ let content = "";
62
+ let done = false;
63
+ let toolInvocations: { toolCallId: string, toolName: string, args: any }[] = [];
64
+ let currentTextId = "";
65
+ let isToolCallMode = false;
66
+ let buffer = ""; // Buffer for incomplete chunks
67
+
68
+ while (!done) {
69
+ const { value, done: readerDone } = await reader.read();
70
+
71
+ if (value) {
72
+ const chunk = decoder.decode(value, { stream: true });
73
+ buffer += chunk;
74
+
75
+ // Split by lines, but handle incomplete lines
76
+ const lines = buffer.split('\n');
77
+
78
+ // Keep the last line in buffer if it's incomplete
79
+ if (lines.length > 1) {
80
+ buffer = lines.pop() || "";
81
+ }
82
+
83
+ for (const line of lines) {
84
+ if (line.trim() === '') continue;
85
+
86
+ // Handle the new streaming format
87
+ if (line.startsWith('data: ')) {
88
+ const dataStr = line.substring(6); // Remove 'data: ' prefix
89
+
90
+ // Handle [DONE] marker
91
+ if (dataStr === '[DONE]') {
92
+ done = true;
93
+ break;
94
+ }
95
+
96
+ try {
97
+ const data = JSON.parse(dataStr);
98
+
99
+ // Log the first message to understand the format
100
+ if (!content && !isToolCallMode) {
101
+ console.log('First stream message received:', data);
102
+ }
103
+
104
+ switch (data.type) {
105
+ case 'start':
106
+ // Stream started, no action needed
107
+ console.log('Stream started');
108
+ break;
109
+
110
+ case 'start-step':
111
+ // Step started, no action needed
112
+ console.log('Step started');
113
+ break;
114
+
115
+ case 'reasoning-start':
116
+ // Reasoning started, no action needed
117
+ console.log('Reasoning started:', data.id);
118
+ break;
119
+
120
+ case 'reasoning-end':
121
+ // Reasoning ended, no action needed
122
+ console.log('Reasoning ended:', data.id);
123
+ break;
124
+
125
+ case 'text-start':
126
+ // Text generation started, store the ID
127
+ currentTextId = data.id;
128
+ console.log('Text generation started:', data.id);
129
+ break;
130
+
131
+ case 'text-delta':
132
+ // Text delta received, append to content
133
+ if (data.delta) {
134
+ content += data.delta;
135
+ onResponse(messageId, content, false);
136
+ }
137
+ break;
138
+
139
+ case 'text-end':
140
+ // Text generation ended
141
+ console.log('Text generation ended:', data.id);
142
+ break;
143
+
144
+ case 'finish-step':
145
+ // Step finished, no action needed
146
+ console.log('Step finished');
147
+ break;
148
+
149
+ case 'finish':
150
+ // Stream finished
151
+ console.log('Stream finished');
152
+ done = true;
153
+ break;
154
+
155
+ // Additional message types that might be present in the AI library
156
+ case 'tool-call':
157
+ // Tool call initiated
158
+ console.log('Tool call initiated:', data);
159
+ isToolCallMode = true;
160
+ if (data.toolCallId && data.toolName && data.args) {
161
+ toolInvocations.push({
162
+ toolCallId: data.toolCallId,
163
+ toolName: data.toolName,
164
+ args: data.args
165
+ });
166
+ }
167
+ break;
168
+
169
+ case 'tool-call-delta':
170
+ // Tool call delta (for streaming tool calls)
171
+ console.log('Tool call delta:', data);
172
+ break;
173
+
174
+ case 'tool-call-end':
175
+ // Tool call completed
176
+ console.log('Tool call completed:', data);
177
+ break;
178
+
179
+ case 'tool-result':
180
+ // Tool execution result
181
+ console.log('Tool result:', data);
182
+ break;
183
+
184
+ case 'error':
185
+ // Error occurred
186
+ console.error('Stream error:', data);
187
+ break;
188
+
189
+ case 'usage':
190
+ // Usage information
191
+ console.log('Usage info:', data);
192
+ break;
193
+
194
+ case 'model':
195
+ // Model information
196
+ console.log('Model info:', data);
197
+ break;
198
+
199
+ case 'stop':
200
+ // Stop signal
201
+ console.log('Stop signal received');
202
+ done = true;
203
+ break;
204
+
205
+ default:
206
+ // Unknown type, log for debugging
207
+ console.log('Unknown stream type:', data.type, data);
208
+ break;
209
+ }
210
+ } catch (error) {
211
+ console.error('Error parsing stream data:', error, dataStr);
212
+ }
213
+ }
76
214
  }
77
215
  }
78
- }
79
216
 
80
- if (readerDone) {
81
- done = true;
217
+ if (readerDone) {
218
+ done = true;
219
+ }
82
220
  }
83
- }
84
-
85
- if (content || toolInvocations.length > 0) {
86
- currentMessages.push({
87
- id: messageId,
88
- role: "assistant",
89
- content: content,
90
- toolCalls: toolInvocations.length > 0 ? toolInvocations: undefined,
91
- });
92
- }
93
221
 
94
- if (finishReason !== 'tool-calls') {
95
- onResponse(messageId, content.replace(/\\n/g, '\n'), true, toolInvocations);
96
- return;
97
- }
222
+ // Check if we have content or if this was a tool call response
223
+ if (content || toolInvocations.length > 0) {
224
+ currentMessages.push({
225
+ id: messageId,
226
+ role: "assistant",
227
+ content: content,
228
+ toolCalls: toolInvocations.length > 0 ? toolInvocations: undefined,
229
+ });
230
+ }
98
231
 
99
- const toolResults: Message[] = [];
100
- for (const toolInvocation of toolInvocations) {
101
- const tool = tools.find(t => t.name === toolInvocation.toolName);
102
- if (tool && tool.execute) {
103
- try {
104
- const result = await tool.execute(toolInvocation.args);
105
- toolResults.push({
106
- id: Math.random().toString(36).substring(3),
107
- role: "user",
108
- content: `Tool '${toolInvocation.toolName}' returned: ${JSON.stringify(result)}`,
109
- });
110
- } catch (error) {
111
- console.error(`Error executing tool ${toolInvocation.toolName}:`, error);
112
- toolResults.push({
113
- id: Math.random().toString(36).substring(3),
114
- role: "user",
115
- content: `Tool '${toolInvocation.toolName}' failed with error: ${error}`,
116
- });
232
+ // Handle tool call scenario if tools were provided
233
+ if (tools.length > 0 && toolInvocations.length > 0) {
234
+ console.log('Tool calls detected, executing tools...');
235
+
236
+ const toolResults: Message[] = [];
237
+ for (const toolInvocation of toolInvocations) {
238
+ const tool = tools.find(t => t.name === toolInvocation.toolName);
239
+ if (tool && tool.execute) {
240
+ try {
241
+ const result = await tool.execute(toolInvocation.args);
242
+ toolResults.push({
243
+ id: Math.random().toString(36).substring(3),
244
+ role: "user",
245
+ content: `Tool '${toolInvocation.toolName}' returned: ${JSON.stringify(result)}`,
246
+ });
247
+ } catch (error) {
248
+ console.error(`Error executing tool ${toolInvocation.toolName}:`, error);
249
+ toolResults.push({
250
+ id: Math.random().toString(36).substring(3),
251
+ role: "user",
252
+ content: `Tool '${toolInvocation.toolName}' failed with error: ${error}`,
253
+ });
254
+ }
255
+ }
256
+ }
257
+
258
+ if (toolResults.length > 0) {
259
+ currentMessages.push(...toolResults);
260
+ // Continue the loop to handle the next response
261
+ continue;
117
262
  }
118
263
  }
264
+
265
+ // Since the new format doesn't seem to support tool calls in the same way,
266
+ // we'll assume the stream is complete when we reach the end
267
+ // If tools are provided and no content was generated, this might indicate a tool call
268
+ if (tools.length > 0 && !content && !isToolCallMode) {
269
+ // This might be a tool call scenario, but we need more information
270
+ // For now, we'll just finish the stream
271
+ console.log('No content generated, but tools provided - might be tool call scenario');
272
+ }
273
+
274
+ onResponse(messageId, content, true, toolInvocations);
275
+ return;
276
+
277
+ } catch (error) {
278
+ console.error('Error in streamChatGPT:', error);
279
+ onResponse(messageId, `Error: ${error instanceof Error ? error.message : String(error)}`, true, []);
280
+ return;
119
281
  }
120
- currentMessages.push(...toolResults);
121
282
  }
122
283
  }
@@ -41,8 +41,8 @@ export interface ObjectRequest {
41
41
  instructions: string;
42
42
  }
43
43
 
44
- export async function generateObject(supabaseUrl: string, request: ObjectRequest, token: string) {
45
- return await fetch(`${supabaseUrl}/functions/v1/llm-object`, {
44
+ export async function generateObject(backendUrl: string, request: ObjectRequest, token: string) {
45
+ return await fetch(`${backendUrl}/ai/llm-object`, {
46
46
  method: 'POST',
47
47
  body: JSON.stringify({
48
48
  stream: false,
@@ -50,16 +50,16 @@ export async function generateObject(supabaseUrl: string, request: ObjectRequest
50
50
  behaviour: request.behaviour,
51
51
  instructions: request.instructions,
52
52
  }),
53
- headers: { 'Authorization': `Bearer ${token}` }
53
+ headers: { 'Authorization': `Bearer ${token}`, 'Content-Type': 'application/json' }
54
54
  }).then(response => response.json());
55
55
  }
56
56
 
57
57
  // TODO adjust stream to work with object
58
58
  export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: any[]) => void;
59
59
 
60
- export async function streamObject(supabaseUrl: string, request: ObjectRequest, onResponse: OnLLMResponse, token: string) {
60
+ export async function streamObject(backendUrl: string, request: ObjectRequest, onResponse: OnLLMResponse, token: string) {
61
61
  const messageId = Math.random().toString(36).substring(3);
62
- const response = await fetch(`${supabaseUrl}/functions/v1/llm-object`, {
62
+ const response = await fetch(`${backendUrl}/ai/llm-object`, {
63
63
  method: 'POST',
64
64
  body: JSON.stringify({
65
65
  stream: true,
@@ -67,7 +67,7 @@ export async function streamObject(supabaseUrl: string, request: ObjectRequest,
67
67
  systemInstructions: request.behaviour,
68
68
  secondaryInstructions: request.instructions,
69
69
  }),
70
- headers: { 'Authorization': `Bearer ${token}` }
70
+ headers: { 'Authorization': `Bearer ${token}`, 'Content-Type': 'application/json' }
71
71
  });
72
72
 
73
73
  if (!response.body) {
@@ -98,7 +98,7 @@ export async function streamObject(supabaseUrl: string, request: ObjectRequest,
98
98
  // console.log("AI response:", content);
99
99
 
100
100
  //content \n\n should be real line break when message is displayed
101
- onResponse(messageId, content.replace(/\\n/g, '\n'), false);
101
+ onResponse(messageId, content.replace(/\\n/g, '\n').replace(/\\+"/g, '"'), false);
102
102
  } else if (command === 'd') {
103
103
  // console.log("AI usage:", JSON.parse(line.substring(2)));
104
104
  done = true;
@@ -111,5 +111,5 @@ export async function streamObject(supabaseUrl: string, request: ObjectRequest,
111
111
  }
112
112
  }
113
113
  }
114
- onResponse(messageId, content.replace(/\\n/g, '\n'), true, toolInvocations);
114
+ onResponse(messageId, content.replace(/\\n/g, '\n').replace(/\\+"/g, '"'), true, toolInvocations);
115
115
  }
@@ -23,11 +23,27 @@ export interface UserInfo {
23
23
  study_buddy: Buddy;
24
24
  story_genre: string;
25
25
  study_duration: number;
26
+ /**
27
+ * The 2 letter language code of the language the user speaks natively.
28
+ * With the function getLanguageName, the language name can be retrieved.
29
+ */
26
30
  mother_tongue: Language;
31
+ /**
32
+ * The language the user targets to learn.
33
+ */
34
+ target_language: Language;
27
35
  motivation_type: string;
28
36
  onboarding_completed: boolean;
29
37
  context_menu_on_select: boolean;
30
38
  user_name?: string;
39
+ /**
40
+ * ISO 3166-1 alpha-2 country code of user's location (exposed to plugins)
41
+ */
42
+ location_country: string;
43
+ /**
44
+ * Optional: nearest big city (>100,000) near user's location
45
+ */
46
+ location_city?: string;
31
47
  }
32
48
 
33
49
  export class SettingsController {