@rimori/client 2.4.0-next.6 → 2.4.0-next.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/package.json +5 -1
  2. package/.github/workflows/create-release-branch.yml +0 -226
  3. package/.github/workflows/pre-release.yml +0 -126
  4. package/.github/workflows/release-on-merge.yml +0 -195
  5. package/.prettierignore +0 -35
  6. package/eslint.config.js +0 -53
  7. package/example/docs/devdocs.md +0 -241
  8. package/example/docs/overview.md +0 -29
  9. package/example/docs/userdocs.md +0 -126
  10. package/example/rimori.config.ts +0 -91
  11. package/example/worker/vite.config.ts +0 -26
  12. package/example/worker/worker.ts +0 -11
  13. package/prettier.config.js +0 -8
  14. package/src/cli/scripts/init/dev-registration.ts +0 -191
  15. package/src/cli/scripts/init/env-setup.ts +0 -44
  16. package/src/cli/scripts/init/file-operations.ts +0 -58
  17. package/src/cli/scripts/init/html-cleaner.ts +0 -45
  18. package/src/cli/scripts/init/main.ts +0 -176
  19. package/src/cli/scripts/init/package-setup.ts +0 -113
  20. package/src/cli/scripts/init/router-transformer.ts +0 -332
  21. package/src/cli/scripts/init/tailwind-config.ts +0 -66
  22. package/src/cli/scripts/init/vite-config.ts +0 -73
  23. package/src/cli/scripts/release/detect-translation-languages.ts +0 -37
  24. package/src/cli/scripts/release/release-config-upload.ts +0 -119
  25. package/src/cli/scripts/release/release-db-update.ts +0 -97
  26. package/src/cli/scripts/release/release-file-upload.ts +0 -138
  27. package/src/cli/scripts/release/release.ts +0 -85
  28. package/src/cli/types/DatabaseTypes.ts +0 -125
  29. package/src/controller/AIController.ts +0 -295
  30. package/src/controller/AccomplishmentController.ts +0 -188
  31. package/src/controller/AudioController.ts +0 -64
  32. package/src/controller/ObjectController.ts +0 -120
  33. package/src/controller/SettingsController.ts +0 -186
  34. package/src/controller/SharedContentController.ts +0 -365
  35. package/src/controller/TranslationController.ts +0 -136
  36. package/src/controller/VoiceController.ts +0 -33
  37. package/src/fromRimori/EventBus.ts +0 -382
  38. package/src/fromRimori/PluginTypes.ts +0 -214
  39. package/src/fromRimori/readme.md +0 -2
  40. package/src/index.ts +0 -19
  41. package/src/plugin/CommunicationHandler.ts +0 -291
  42. package/src/plugin/Logger.ts +0 -394
  43. package/src/plugin/RimoriClient.ts +0 -199
  44. package/src/plugin/StandaloneClient.ts +0 -127
  45. package/src/plugin/module/AIModule.ts +0 -77
  46. package/src/plugin/module/DbModule.ts +0 -67
  47. package/src/plugin/module/EventModule.ts +0 -192
  48. package/src/plugin/module/ExerciseModule.ts +0 -131
  49. package/src/plugin/module/PluginModule.ts +0 -114
  50. package/src/utils/difficultyConverter.ts +0 -15
  51. package/src/utils/endpoint.ts +0 -3
  52. package/src/worker/WorkerSetup.ts +0 -35
  53. package/tsconfig.json +0 -17
@@ -1,295 +0,0 @@
1
- import { Tool } from '../fromRimori/PluginTypes';
2
-
3
- export interface ToolInvocation {
4
- toolCallId: string;
5
- toolName: string;
6
- args: Record<string, string>;
7
- }
8
-
9
- export interface Message {
10
- id?: string;
11
- role: 'user' | 'assistant' | 'system';
12
- content: string;
13
- toolCalls?: ToolInvocation[];
14
- }
15
-
16
- export async function generateText(backendUrl: string, messages: Message[], tools: Tool[], token: string) {
17
- const response = await fetch(`${backendUrl}/ai/llm`, {
18
- method: 'POST',
19
- body: JSON.stringify({ messages, tools }),
20
- headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
21
- });
22
-
23
- return await response.json();
24
- }
25
-
26
- export type OnLLMResponse = (
27
- id: string,
28
- response: string,
29
- finished: boolean,
30
- toolInvocations?: ToolInvocation[],
31
- ) => void;
32
-
33
- export async function streamChatGPT(
34
- backendUrl: string,
35
- messages: Message[],
36
- tools: Tool[],
37
- onResponse: OnLLMResponse,
38
- token: string,
39
- ) {
40
- const messageId = Math.random().toString(36).substring(3);
41
- const currentMessages: Message[] = [...messages];
42
-
43
- console.log('Starting streamChatGPT with:', {
44
- messageId,
45
- messageCount: messages.length,
46
- toolCount: tools.length,
47
- backendUrl,
48
- });
49
-
50
- while (true) {
51
- const messagesForApi = currentMessages.map(({ id, ...rest }) => rest);
52
-
53
- try {
54
- const response = await fetch(`${backendUrl}/ai/llm`, {
55
- method: 'POST',
56
- body: JSON.stringify({ messages: messagesForApi, tools, stream: true }),
57
- headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
58
- });
59
-
60
- if (!response.ok) {
61
- throw new Error(`HTTP error! status: ${response.status}`);
62
- }
63
-
64
- if (!response.body) {
65
- console.error('No response body.');
66
- return;
67
- }
68
-
69
- const reader = response.body.getReader();
70
- const decoder = new TextDecoder('utf-8');
71
-
72
- let content = '';
73
- let done = false;
74
- const toolInvocations: { toolCallId: string; toolName: string; args: any }[] = [];
75
- let currentTextId = '';
76
- let isToolCallMode = false;
77
- let buffer = ''; // Buffer for incomplete chunks
78
-
79
- while (!done) {
80
- const { value, done: readerDone } = await reader.read();
81
-
82
- if (value) {
83
- const chunk = decoder.decode(value, { stream: true });
84
- buffer += chunk;
85
-
86
- // Split by lines, but handle incomplete lines
87
- const lines = buffer.split('\n');
88
-
89
- // Keep the last line in buffer if it's incomplete
90
- if (lines.length > 1) {
91
- buffer = lines.pop() || '';
92
- }
93
-
94
- for (const line of lines) {
95
- if (line.trim() === '') continue;
96
-
97
- // Handle the new streaming format
98
- if (line.startsWith('data: ')) {
99
- const dataStr = line.substring(6); // Remove 'data: ' prefix
100
-
101
- // Handle [DONE] marker
102
- if (dataStr === '[DONE]') {
103
- done = true;
104
- break;
105
- }
106
-
107
- try {
108
- const data = JSON.parse(dataStr);
109
-
110
- // Log the first message to understand the format
111
- if (!content && !isToolCallMode) {
112
- // console.log('First stream message received:', data);
113
- }
114
-
115
- switch (data.type) {
116
- case 'start':
117
- // Stream started, no action needed
118
- // console.log('Stream started');
119
- break;
120
-
121
- case 'start-step':
122
- // Step started, no action needed
123
- // console.log('Step started');
124
- break;
125
-
126
- case 'reasoning-start':
127
- // Reasoning started, no action needed
128
- console.log('Reasoning started:', data.id);
129
- break;
130
-
131
- case 'reasoning-end':
132
- // Reasoning ended, no action needed
133
- console.log('Reasoning ended:', data.id);
134
- break;
135
-
136
- case 'text-start':
137
- // Text generation started, store the ID
138
- currentTextId = data.id;
139
- console.log('Text generation started:', data.id);
140
- break;
141
-
142
- case 'text-delta':
143
- // Text delta received, append to content
144
- if (data.delta) {
145
- content += data.delta;
146
- onResponse(messageId, content, false);
147
- }
148
- break;
149
-
150
- case 'text-end':
151
- // Text generation ended
152
- console.log('Text generation ended:', data.id);
153
- break;
154
-
155
- case 'finish-step':
156
- // Step finished, no action needed
157
- // console.log('Step finished');
158
- break;
159
-
160
- case 'finish':
161
- // Stream finished
162
- // console.log('Stream finished');
163
- done = true;
164
- break;
165
-
166
- // Additional message types that might be present in the AI library
167
- case 'tool-call':
168
- case 'tool-input-available': //for now input calls should be handled the same way as tool calls
169
- // Tool call initiated
170
- console.log('Tool call initiated:', data);
171
- isToolCallMode = true;
172
- if (data.toolCallId && data.toolName && (data.args || data.input)) {
173
- toolInvocations.push({
174
- toolCallId: data.toolCallId,
175
- toolName: data.toolName,
176
- args: data.args || data.input,
177
- });
178
- }
179
- break;
180
-
181
- case 'tool-input-delta': //for now input calls should be handled the same way as tool calls
182
- case 'tool-call-delta':
183
- // Tool call delta (for streaming tool calls)
184
- console.log('Tool call delta:', data);
185
- break;
186
-
187
- case 'tool-call-end':
188
- // Tool call completed
189
- console.log('Tool call completed:', data);
190
- break;
191
-
192
- case 'tool-result':
193
- // Tool execution result
194
- console.log('Tool result:', data);
195
- break;
196
-
197
- case 'error':
198
- // Error occurred
199
- console.error('Stream error:', data);
200
- break;
201
-
202
- case 'usage':
203
- // Usage information
204
- console.log('Usage info:', data);
205
- break;
206
-
207
- case 'model':
208
- // Model information
209
- console.log('Model info:', data);
210
- break;
211
-
212
- case 'stop':
213
- // Stop signal
214
- console.log('Stop signal received');
215
- done = true;
216
- break;
217
-
218
- default:
219
- // Unknown type, log for debugging
220
- console.log('Unknown stream type:', data.type, data);
221
- break;
222
- }
223
- } catch (error) {
224
- console.error('Error parsing stream data:', error, dataStr);
225
- }
226
- }
227
- }
228
- }
229
-
230
- if (readerDone) {
231
- done = true;
232
- }
233
- }
234
-
235
- // Check if we have content or if this was a tool call response
236
- if (content || toolInvocations.length > 0) {
237
- currentMessages.push({
238
- id: messageId,
239
- role: 'assistant',
240
- content: content,
241
- toolCalls: toolInvocations.length > 0 ? toolInvocations : undefined,
242
- });
243
- }
244
-
245
- // Handle tool call scenario if tools were provided
246
- if (tools.length > 0 && toolInvocations.length > 0) {
247
- console.log('Tool calls detected, executing tools...');
248
-
249
- const toolResults: Message[] = [];
250
- for (const toolInvocation of toolInvocations) {
251
- const tool = tools.find((t) => t.name === toolInvocation.toolName);
252
- if (tool && tool.execute) {
253
- try {
254
- const result = await tool.execute(toolInvocation.args);
255
- toolResults.push({
256
- id: Math.random().toString(36).substring(3),
257
- role: 'user',
258
- content: `Tool '${toolInvocation.toolName}' returned: ${JSON.stringify(result)}`,
259
- });
260
- } catch (error) {
261
- console.error(`Error executing tool ${toolInvocation.toolName}:`, error);
262
- toolResults.push({
263
- id: Math.random().toString(36).substring(3),
264
- role: 'user',
265
- content: `Tool '${toolInvocation.toolName}' failed with error: ${error}`,
266
- });
267
- }
268
- }
269
- }
270
-
271
- if (toolResults.length > 0) {
272
- currentMessages.push(...toolResults);
273
- // Continue the loop to handle the next response
274
- continue;
275
- }
276
- }
277
-
278
- // Since the new format doesn't seem to support tool calls in the same way,
279
- // we'll assume the stream is complete when we reach the end
280
- // If tools are provided and no content was generated, this might indicate a tool call
281
- if (tools.length > 0 && !content && !isToolCallMode) {
282
- // This might be a tool call scenario, but we need more information
283
- // For now, we'll just finish the stream
284
- console.log('No content generated, but tools provided - might be tool call scenario');
285
- }
286
-
287
- onResponse(messageId, content, true, toolInvocations);
288
- return;
289
- } catch (error) {
290
- console.error('Error in streamChatGPT:', error);
291
- onResponse(messageId, `Error: ${error instanceof Error ? error.message : String(error)}`, true, []);
292
- return;
293
- }
294
- }
295
- }
@@ -1,188 +0,0 @@
1
- import { EventBus, EventBusMessage } from '../fromRimori/EventBus';
2
-
3
- export type AccomplishmentMessage = EventBusMessage<MicroAccomplishmentPayload>;
4
-
5
- export const skillCategories = ['reading', 'listening', 'speaking', 'writing', 'learning', 'community'] as const;
6
-
7
- interface BaseAccomplishmentPayload {
8
- type: 'micro' | 'macro';
9
- skillCategory: (typeof skillCategories)[number];
10
- /*
11
- what is the accomplishment? e.g. chapter, flashcard, story, etc.
12
- only one keyword per skill category, written in lowercase without spaces, numbers, or special characters
13
- */
14
- accomplishmentKeyword: string;
15
- // the human readable description of the accomplishment. Important for other plugin developers to understand the accomplishment.
16
- description: string;
17
- meta?: {
18
- //the key of the meta data in snake_case
19
- key: string;
20
- //the value of the meta data
21
- value: string | number | boolean;
22
- //the human readable description of the meta data. Important for other plugin developers to understand the meta data.
23
- description: string;
24
- }[];
25
- }
26
-
27
- export interface MicroAccomplishmentPayload extends BaseAccomplishmentPayload {
28
- type: 'micro';
29
- }
30
-
31
- export interface MacroAccomplishmentPayload extends BaseAccomplishmentPayload {
32
- type: 'macro';
33
- errorRatio: number;
34
- durationMinutes: number;
35
- }
36
-
37
- export type AccomplishmentPayload = MicroAccomplishmentPayload | MacroAccomplishmentPayload;
38
-
39
- export class AccomplishmentController {
40
- private pluginId: string;
41
-
42
- public constructor(pluginId: string) {
43
- this.pluginId = pluginId;
44
- }
45
-
46
- emitAccomplishment(payload: Omit<AccomplishmentPayload, 'type'>) {
47
- const accomplishmentPayload = {
48
- ...payload,
49
- type: 'durationMinutes' in payload ? 'macro' : 'micro',
50
- } as AccomplishmentPayload;
51
-
52
- if (!this.validateAccomplishment(accomplishmentPayload)) {
53
- return;
54
- }
55
-
56
- const sanitizedPayload = this.sanitizeAccomplishment(accomplishmentPayload);
57
-
58
- const topic = 'global.accomplishment.trigger' + (accomplishmentPayload.type === 'macro' ? 'Macro' : 'Micro');
59
-
60
- EventBus.emit(this.pluginId, topic, sanitizedPayload);
61
- }
62
-
63
- private validateAccomplishment(payload: AccomplishmentPayload): boolean {
64
- if (!skillCategories.includes(payload.skillCategory)) {
65
- throw new Error(`Invalid skill category: ${payload.skillCategory}`);
66
- }
67
-
68
- //regex validate accomplishmentKeyword
69
- if (!/^[0-9a-z_.-]+$/.test(payload.accomplishmentKeyword)) {
70
- throw new Error(
71
- `The accomplishment keyword: ${payload.accomplishmentKeyword} is invalid. Only lowercase letters, minuses, underscores and periods are allowed`,
72
- );
73
- }
74
-
75
- //description is required
76
- if (payload.description.length < 10) {
77
- throw new Error('Description is too short');
78
- }
79
-
80
- //check that the type is valid
81
- if (!['micro', 'macro'].includes(payload.type)) {
82
- throw new Error('Invalid accomplishment type ' + payload.type);
83
- }
84
-
85
- // disabled detection temporarelly to determine how long exercises normally are
86
- //durationMinutes is required
87
- // if (payload.type === 'macro' && payload.durationMinutes < 4) {
88
- // console.warn('The duration must be at least 4 minutes');
89
- // return false;
90
- // }
91
-
92
- //errorRatio is required
93
- if (payload.type === 'macro' && (payload.errorRatio < 0 || payload.errorRatio > 1)) {
94
- throw new Error('The error ratio must be between 0 and 1');
95
- }
96
-
97
- //regex check meta data key
98
- if (payload.meta) {
99
- payload.meta.forEach((meta) => {
100
- if (!/^[a-z_]+$/.test(meta.key)) {
101
- throw new Error('Invalid meta data key ' + meta.key + ', only lowercase letters and underscores are allowed');
102
- }
103
- });
104
- }
105
- return true;
106
- }
107
-
108
- private sanitizeAccomplishment(payload: AccomplishmentPayload) {
109
- payload.description = payload.description.replace(/[^\x20-\x7E]/g, '');
110
-
111
- payload.meta?.forEach((meta) => {
112
- meta.description = meta.description.replace(/[^\x20-\x7E]/g, '');
113
- });
114
-
115
- return payload;
116
- }
117
-
118
- private getDecoupledTopic(topic: string) {
119
- const [plugin, skillCategory, accomplishmentKeyword] = topic.split('.');
120
-
121
- return {
122
- plugin: plugin || '*',
123
- skillCategory: skillCategory || '*',
124
- accomplishmentKeyword: accomplishmentKeyword || '*',
125
- };
126
- }
127
-
128
- /**
129
- * Subscribe to accomplishment events
130
- * @param accomplishmentTopic - The topic of the accomplishment event. The pattern can be any pattern of plugin.skillCategory.accomplishmentKeyword or an * as wildcard for any plugin, skill category or accomplishment keyword
131
- * @param callback - The callback function to be called when the accomplishment event is triggered
132
- */
133
- subscribe(
134
- accomplishmentTopics = '*' as string | string[],
135
- callback: (payload: EventBusMessage<AccomplishmentPayload>) => void,
136
- ) {
137
- if (typeof accomplishmentTopics === 'string') {
138
- accomplishmentTopics = [accomplishmentTopics];
139
- }
140
-
141
- accomplishmentTopics.forEach((accomplishmentTopic) => {
142
- const topicLength = accomplishmentTopic.split('.').length;
143
- if (topicLength === 1) {
144
- accomplishmentTopic += '.*.*';
145
- } else if (topicLength === 2) {
146
- accomplishmentTopic += '.*';
147
- } else if (topicLength !== 3) {
148
- throw new Error(
149
- 'Invalid accomplishment topic pattern. The pattern must be plugin.skillCategory.accomplishmentKeyword or an * as wildcard for any plugin, skill category or accomplishment keyword',
150
- );
151
- }
152
-
153
- EventBus.on<AccomplishmentPayload>(
154
- ['global.accomplishment.triggerMicro', 'global.accomplishment.triggerMacro'],
155
- (event) => {
156
- const { plugin, skillCategory, accomplishmentKeyword } = this.getDecoupledTopic(accomplishmentTopic);
157
-
158
- if (plugin !== '*' && event.sender !== plugin) return;
159
- if (skillCategory !== '*' && event.data.skillCategory !== skillCategory) return;
160
- if (accomplishmentKeyword !== '*' && event.data.accomplishmentKeyword !== accomplishmentKeyword) return;
161
-
162
- callback(event);
163
- },
164
- [this.pluginId],
165
- );
166
- });
167
- }
168
- }
169
-
170
- // const accomplishmentHandler = AccomplishmentHandler.getInstance("my-plugin");
171
-
172
- // accomplishmentHandler.subscribe("*", (payload) => {
173
- // console.log(payload);
174
- // });
175
-
176
- // accomplishmentHandler.emitAccomplishment({
177
- // skillCategory: "reading",
178
- // accomplishmentKeyword: "chapter",
179
- // description: "Read chapter 1 of the book",
180
- // durationMinutes: 10,
181
- // meta: [
182
- // {
183
- // key: "book",
184
- // value: "The Great Gatsby",
185
- // description: "The book I read",
186
- // },
187
- // ],
188
- // });
@@ -1,64 +0,0 @@
1
- import { EventBus } from '../fromRimori/EventBus';
2
-
3
- /**
4
- * AudioController is a class that provides methods to record audio. It is a wrapper around the Capacitor Voice Recorder plugin. For more information, see https://github.com/tchvu3/capacitor-voice-recorder.
5
- *
6
- * @example
7
- * const audioController = new AudioController();
8
- * await audioController.startRecording();
9
- */
10
- export class AudioController {
11
- private pluginId: string;
12
-
13
- constructor(pluginId: string) {
14
- this.pluginId = pluginId;
15
- }
16
-
17
- /**
18
- * Start the recording.
19
- *
20
- * @example
21
- * const audioController = new AudioController();
22
- * await audioController.startRecording();
23
- * @returns void
24
- */
25
- public async startRecording(): Promise<void> {
26
- EventBus.emit(this.pluginId, 'global.microphone.triggerStartRecording');
27
- }
28
-
29
- /**
30
- * Stop the recording and return the audio data.
31
- * @returns The audio data.
32
- *
33
- * @example
34
- * const audioRef = new Audio(`data:${mimeType};base64,${base64Sound}`)
35
- * audioRef.oncanplaythrough = () => audioRef.play()
36
- * audioRef.load()
37
- */
38
- public async stopRecording(): Promise<{ recording: Blob; msDuration: number; mimeType: string }> {
39
- const result = await EventBus.request<{ recording: Blob; msDuration: number; mimeType: string }>(
40
- this.pluginId,
41
- 'global.microphone.triggerStopRecording',
42
- );
43
-
44
- return result.data;
45
- }
46
-
47
- public async pauseRecording(): Promise<boolean> {
48
- const result = await EventBus.request<boolean>(this.pluginId, 'global.microphone.triggerPauseRecording');
49
- return result.data;
50
- }
51
-
52
- public async resumeRecording(): Promise<boolean> {
53
- const result = await EventBus.request<boolean>(this.pluginId, 'global.microphone.triggerResumeRecording');
54
- return result.data;
55
- }
56
-
57
- public async getCurrentStatus(): Promise<'RECORDING' | 'PAUSED' | 'NONE'> {
58
- const result = await EventBus.request<'RECORDING' | 'PAUSED' | 'NONE'>(
59
- this.pluginId,
60
- 'global.microphone.triggerGetCurrentStatus',
61
- );
62
- return result.data;
63
- }
64
- }
@@ -1,120 +0,0 @@
1
- type PrimitiveType = 'string' | 'number' | 'boolean';
2
-
3
- // This is the type that can appear in the `type` property
4
- type ObjectToolParameterType =
5
- | PrimitiveType
6
- | { [key: string]: ObjectToolParameter } // for nested objects
7
- | [{ [key: string]: ObjectToolParameter }]; // for arrays of objects (notice the tuple type)
8
-
9
- interface ObjectToolParameter {
10
- type: ObjectToolParameterType;
11
- description?: string;
12
- enum?: string[];
13
- optional?: boolean;
14
- }
15
-
16
- /**
17
- * The tools that the AI can use.
18
- *
19
- * The key is the name of the tool.
20
- * The value is the parameter of the tool.
21
- *
22
- */
23
- export type ObjectTool = {
24
- [key: string]: ObjectToolParameter;
25
- };
26
-
27
- export interface ObjectRequest {
28
- /**
29
- * The tools that the AI can use.
30
- */
31
- tool: ObjectTool;
32
- /**
33
- * High level instructions for the AI to follow. Behaviour, tone, restrictions, etc.
34
- * Example: "Act like a recipe writer."
35
- */
36
- behaviour?: string;
37
- /**
38
- * The specific instruction for the AI to follow.
39
- * Example: "Generate a recipe using chicken, rice and vegetables."
40
- */
41
- instructions: string;
42
- }
43
-
44
- export async function generateObject<T = any>(backendUrl: string, request: ObjectRequest, token: string): Promise<T> {
45
- return await fetch(`${backendUrl}/ai/llm-object`, {
46
- method: 'POST',
47
- body: JSON.stringify({
48
- stream: false,
49
- tool: request.tool,
50
- behaviour: request.behaviour,
51
- instructions: request.instructions,
52
- }),
53
- headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
54
- }).then((response) => response.json());
55
- }
56
-
57
- // TODO adjust stream to work with object
58
- export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: any[]) => void;
59
-
60
- export async function streamObject(
61
- backendUrl: string,
62
- request: ObjectRequest,
63
- onResponse: OnLLMResponse,
64
- token: string,
65
- ) {
66
- const messageId = Math.random().toString(36).substring(3);
67
- const response = await fetch(`${backendUrl}/ai/llm-object`, {
68
- method: 'POST',
69
- body: JSON.stringify({
70
- stream: true,
71
- tools: request.tool,
72
- systemInstructions: request.behaviour,
73
- secondaryInstructions: request.instructions,
74
- }),
75
- headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
76
- });
77
-
78
- if (!response.body) {
79
- console.error('No response body.');
80
- return;
81
- }
82
-
83
- const reader = response.body.getReader();
84
- const decoder = new TextDecoder('utf-8');
85
-
86
- let content = '';
87
- let done = false;
88
- const toolInvocations: any[] = [];
89
- while (!done) {
90
- const { value } = await reader.read();
91
-
92
- if (value) {
93
- const chunk = decoder.decode(value, { stream: true });
94
- const lines = chunk.split('\n').filter((line) => line.trim() !== '');
95
-
96
- for (const line of lines) {
97
- const data = line.substring(3, line.length - 1);
98
- const command = line.substring(0, 1);
99
- // console.log("data: ", { line, data, command });
100
-
101
- if (command === '0') {
102
- content += data;
103
- // console.log("AI response:", content);
104
-
105
- //content \n\n should be real line break when message is displayed
106
- onResponse(messageId, content.replace(/\\n/g, '\n').replace(/\\+"/g, '"'), false);
107
- } else if (command === 'd') {
108
- // console.log("AI usage:", JSON.parse(line.substring(2)));
109
- done = true;
110
- break;
111
- } else if (command === '9') {
112
- // console.log("tool call:", JSON.parse(line.substring(2)));
113
- // console.log("tools", tools);
114
- toolInvocations.push(JSON.parse(line.substring(2)));
115
- }
116
- }
117
- }
118
- }
119
- onResponse(messageId, content.replace(/\\n/g, '\n').replace(/\\+"/g, '"'), true, toolInvocations);
120
- }