@rimori/client 2.5.10 → 2.5.11-next.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/controller/SettingsController.js +2 -2
- package/dist/controller/TranslationController.d.ts +0 -2
- package/dist/controller/TranslationController.js +4 -3
- package/dist/controller/VoiceController.d.ts +1 -0
- package/dist/fromRimori/PluginTypes.d.ts +6 -0
- package/dist/index.d.ts +1 -1
- package/dist/plugin/module/AIModule.d.ts +78 -8
- package/dist/plugin/module/AIModule.js +152 -14
- package/package.json +1 -1
- package/dist/controller/AIController.d.ts +0 -15
- package/dist/controller/AIController.js +0 -255
- package/dist/controller/ObjectController.d.ts +0 -42
- package/dist/controller/ObjectController.js +0 -105
|
@@ -63,8 +63,8 @@ export class SettingsController {
|
|
|
63
63
|
.eq('guild_id', this.guild.id)
|
|
64
64
|
.eq('is_guild_setting', isGuildSetting);
|
|
65
65
|
const { data: updatedRows, error: updateError } = yield (isGuildSetting
|
|
66
|
-
? updateQuery.is('user_id', null).select(
|
|
67
|
-
: updateQuery.select(
|
|
66
|
+
? updateQuery.is('user_id', null).select()
|
|
67
|
+
: updateQuery.select());
|
|
68
68
|
if (updateError) {
|
|
69
69
|
if (updateError.code === '42501' || ((_a = updateError.message) === null || _a === void 0 ? void 0 : _a.includes('policy'))) {
|
|
70
70
|
throw new Error(`Cannot set ${isGuildSetting ? 'guild' : 'user'} settings: Permission denied.`);
|
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
import { ThirdPartyModule, TOptions } from 'i18next';
|
|
2
|
-
import { ObjectRequest } from './ObjectController';
|
|
3
2
|
import { AIModule } from '../plugin/module/AIModule';
|
|
4
|
-
export type AIObjectGenerator = <T>(request: ObjectRequest) => Promise<T>;
|
|
5
3
|
/**
|
|
6
4
|
* Translator class for handling internationalization
|
|
7
5
|
*/
|
|
@@ -158,9 +158,10 @@ export class Translator {
|
|
|
158
158
|
if (!this.ai || this.currentLanguage === 'en')
|
|
159
159
|
return text;
|
|
160
160
|
const response = yield this.ai.getObject({
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
161
|
+
systemPrompt: 'You are a translation engine. Return only the translated text.' + additionalInstructions,
|
|
162
|
+
userPrompt: `Translate the following text into ${this.currentLanguage}: ${text}`,
|
|
163
|
+
cache: true,
|
|
164
|
+
responseSchema: {
|
|
164
165
|
translation: {
|
|
165
166
|
type: 'string',
|
|
166
167
|
description: `The translation of the input text into ${this.currentLanguage}.`,
|
package/dist/index.d.ts
CHANGED
|
@@ -12,7 +12,7 @@ export type { TOptions } from 'i18next';
|
|
|
12
12
|
export type { SharedContent, BasicSharedContent, ContentStatus } from './plugin/module/SharedContentController';
|
|
13
13
|
export type { Exercise } from './plugin/module/ExerciseModule';
|
|
14
14
|
export type { UserInfo, Language, UserRole } from './controller/SettingsController';
|
|
15
|
-
export type { Message, ToolInvocation } from './
|
|
15
|
+
export type { Message, ToolInvocation } from './plugin/module/AIModule';
|
|
16
16
|
export type { TriggerAction } from './plugin/module/ExerciseModule';
|
|
17
17
|
export type { MacroAccomplishmentPayload, MicroAccomplishmentPayload } from './controller/AccomplishmentController';
|
|
18
18
|
export type { EventBusMessage } from './fromRimori/EventBus';
|
|
@@ -1,7 +1,56 @@
|
|
|
1
|
-
import { RimoriCommunicationHandler, RimoriInfo } from '../CommunicationHandler';
|
|
2
|
-
import { Message, OnLLMResponse } from '../../controller/AIController';
|
|
3
|
-
import { ObjectRequest } from '../../controller/ObjectController';
|
|
4
1
|
import { Tool } from '../../fromRimori/PluginTypes';
|
|
2
|
+
import { RimoriCommunicationHandler, RimoriInfo } from '../CommunicationHandler';
|
|
3
|
+
export type OnStreamedObjectResult<T = any> = (result: T, isLoading: boolean) => void;
|
|
4
|
+
type PrimitiveType = 'string' | 'number' | 'boolean';
|
|
5
|
+
type ObjectToolParameterType = PrimitiveType | {
|
|
6
|
+
[key: string]: ObjectToolParameter;
|
|
7
|
+
} | [{
|
|
8
|
+
[key: string]: ObjectToolParameter;
|
|
9
|
+
}];
|
|
10
|
+
interface ObjectToolParameter {
|
|
11
|
+
type: ObjectToolParameterType;
|
|
12
|
+
description?: string;
|
|
13
|
+
enum?: string[];
|
|
14
|
+
optional?: boolean;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* The tools that the AI can use.
|
|
18
|
+
*
|
|
19
|
+
* The key is the name of the tool.
|
|
20
|
+
* The value is the parameter of the tool.
|
|
21
|
+
*
|
|
22
|
+
*/
|
|
23
|
+
export type ObjectTool = {
|
|
24
|
+
[key: string]: ObjectToolParameter;
|
|
25
|
+
};
|
|
26
|
+
export interface ToolInvocation {
|
|
27
|
+
toolCallId: string;
|
|
28
|
+
toolName: string;
|
|
29
|
+
args: Record<string, string>;
|
|
30
|
+
}
|
|
31
|
+
export interface Message {
|
|
32
|
+
id?: string;
|
|
33
|
+
role: 'user' | 'assistant' | 'system';
|
|
34
|
+
content: string;
|
|
35
|
+
toolCalls?: ToolInvocation[];
|
|
36
|
+
}
|
|
37
|
+
export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: ToolInvocation[]) => void;
|
|
38
|
+
export interface ObjectRequest {
|
|
39
|
+
/**
|
|
40
|
+
* The tools that the AI can use.
|
|
41
|
+
*/
|
|
42
|
+
tool: ObjectTool;
|
|
43
|
+
/**
|
|
44
|
+
* High level instructions for the AI to follow. Behaviour, tone, restrictions, etc.
|
|
45
|
+
* Example: "Act like a recipe writer."
|
|
46
|
+
*/
|
|
47
|
+
behaviour?: string;
|
|
48
|
+
/**
|
|
49
|
+
* The specific instruction for the AI to follow.
|
|
50
|
+
* Example: "Generate a recipe using chicken, rice and vegetables."
|
|
51
|
+
*/
|
|
52
|
+
instructions: string;
|
|
53
|
+
}
|
|
5
54
|
/**
|
|
6
55
|
* Controller for AI-related operations.
|
|
7
56
|
* Provides access to text generation, voice synthesis, and object generation.
|
|
@@ -15,41 +64,62 @@ export declare class AIModule {
|
|
|
15
64
|
* Generate text from messages using AI.
|
|
16
65
|
* @param messages The messages to generate text from.
|
|
17
66
|
* @param tools Optional tools to use for generation.
|
|
67
|
+
* @param cache Whether to cache the result (default: false).
|
|
18
68
|
* @returns The generated text.
|
|
19
69
|
*/
|
|
20
|
-
getText(messages: Message[], tools?: Tool[]): Promise<string>;
|
|
70
|
+
getText(messages: Message[], tools?: Tool[], cache?: boolean): Promise<string>;
|
|
21
71
|
/**
|
|
22
72
|
* Stream text generation from messages using AI.
|
|
23
73
|
* @param messages The messages to generate text from.
|
|
24
74
|
* @param onMessage Callback for each message chunk.
|
|
25
75
|
* @param tools Optional tools to use for generation.
|
|
76
|
+
* @param cache Whether to cache the result (default: false).
|
|
26
77
|
*/
|
|
27
|
-
getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[]): Promise<void>;
|
|
78
|
+
getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[], cache?: boolean): Promise<void>;
|
|
28
79
|
/**
|
|
29
80
|
* Generate voice audio from text using AI.
|
|
30
81
|
* @param text The text to convert to voice.
|
|
31
82
|
* @param voice The voice to use (default: 'alloy').
|
|
32
83
|
* @param speed The speed of the voice (default: 1).
|
|
33
84
|
* @param language Optional language for the voice.
|
|
85
|
+
* @param cache Whether to cache the result (default: false).
|
|
34
86
|
* @returns The generated audio as a Blob.
|
|
35
87
|
*/
|
|
36
|
-
getVoice(text: string, voice?: string, speed?: number, language?: string): Promise<Blob>;
|
|
88
|
+
getVoice(text: string, voice?: string, speed?: number, language?: string, cache?: boolean): Promise<Blob>;
|
|
37
89
|
/**
|
|
38
90
|
* Convert voice audio to text using AI.
|
|
39
91
|
* @param file The audio file to convert.
|
|
40
92
|
* @returns The transcribed text.
|
|
41
93
|
*/
|
|
42
94
|
getTextFromVoice(file: Blob): Promise<string>;
|
|
95
|
+
private getChatMessage;
|
|
43
96
|
/**
|
|
44
97
|
* Generate a structured object from a request using AI.
|
|
45
98
|
* @param request The object generation request.
|
|
46
99
|
* @returns The generated object.
|
|
47
100
|
*/
|
|
48
|
-
getObject<T = any>(
|
|
101
|
+
getObject<T = any>(params: {
|
|
102
|
+
systemPrompt: string;
|
|
103
|
+
responseSchema: ObjectTool;
|
|
104
|
+
userPrompt?: string;
|
|
105
|
+
cache?: boolean;
|
|
106
|
+
tools?: Tool[];
|
|
107
|
+
}): Promise<T>;
|
|
49
108
|
/**
|
|
50
109
|
* Generate a streamed structured object from a request using AI.
|
|
51
110
|
* @param request The object generation request.
|
|
52
111
|
* @param onResult Callback for each result chunk.
|
|
112
|
+
* @param cache Whether to cache the result (default: false).
|
|
53
113
|
*/
|
|
54
|
-
getStreamedObject<T = any>(
|
|
114
|
+
getStreamedObject<T = any>(params: {
|
|
115
|
+
systemPrompt: string;
|
|
116
|
+
responseSchema: ObjectTool;
|
|
117
|
+
userPrompt?: string;
|
|
118
|
+
onResult: OnStreamedObjectResult<T>;
|
|
119
|
+
cache?: boolean;
|
|
120
|
+
tools?: Tool[];
|
|
121
|
+
}): Promise<void>;
|
|
122
|
+
private streamObject;
|
|
123
|
+
private sendToolResult;
|
|
55
124
|
}
|
|
125
|
+
export {};
|
|
@@ -7,8 +7,6 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
7
7
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
8
|
});
|
|
9
9
|
};
|
|
10
|
-
import { generateText, streamChatGPT } from '../../controller/AIController';
|
|
11
|
-
import { generateObject, streamObject } from '../../controller/ObjectController';
|
|
12
10
|
import { getSTTResponse, getTTSResponse } from '../../controller/VoiceController';
|
|
13
11
|
/**
|
|
14
12
|
* Controller for AI-related operations.
|
|
@@ -27,11 +25,22 @@ export class AIModule {
|
|
|
27
25
|
* Generate text from messages using AI.
|
|
28
26
|
* @param messages The messages to generate text from.
|
|
29
27
|
* @param tools Optional tools to use for generation.
|
|
28
|
+
* @param cache Whether to cache the result (default: false).
|
|
30
29
|
* @returns The generated text.
|
|
31
30
|
*/
|
|
32
|
-
getText(
|
|
33
|
-
return __awaiter(this,
|
|
34
|
-
|
|
31
|
+
getText(messages_1, tools_1) {
|
|
32
|
+
return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false) {
|
|
33
|
+
const { result } = yield this.streamObject({
|
|
34
|
+
cache,
|
|
35
|
+
tools,
|
|
36
|
+
messages,
|
|
37
|
+
responseSchema: {
|
|
38
|
+
result: {
|
|
39
|
+
type: 'string',
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
});
|
|
43
|
+
return result;
|
|
35
44
|
});
|
|
36
45
|
}
|
|
37
46
|
/**
|
|
@@ -39,10 +48,23 @@ export class AIModule {
|
|
|
39
48
|
* @param messages The messages to generate text from.
|
|
40
49
|
* @param onMessage Callback for each message chunk.
|
|
41
50
|
* @param tools Optional tools to use for generation.
|
|
51
|
+
* @param cache Whether to cache the result (default: false).
|
|
42
52
|
*/
|
|
43
|
-
getSteamedText(
|
|
44
|
-
return __awaiter(this,
|
|
45
|
-
|
|
53
|
+
getSteamedText(messages_1, onMessage_1, tools_1) {
|
|
54
|
+
return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false) {
|
|
55
|
+
const messageId = Math.random().toString(36).substring(3);
|
|
56
|
+
const { result } = yield this.streamObject({
|
|
57
|
+
cache,
|
|
58
|
+
tools,
|
|
59
|
+
messages,
|
|
60
|
+
responseSchema: {
|
|
61
|
+
result: {
|
|
62
|
+
type: 'string',
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
onResult: ({ result }) => onMessage(messageId, result, false),
|
|
66
|
+
});
|
|
67
|
+
onMessage(messageId, result, true);
|
|
46
68
|
});
|
|
47
69
|
}
|
|
48
70
|
/**
|
|
@@ -51,11 +73,12 @@ export class AIModule {
|
|
|
51
73
|
* @param voice The voice to use (default: 'alloy').
|
|
52
74
|
* @param speed The speed of the voice (default: 1).
|
|
53
75
|
* @param language Optional language for the voice.
|
|
76
|
+
* @param cache Whether to cache the result (default: false).
|
|
54
77
|
* @returns The generated audio as a Blob.
|
|
55
78
|
*/
|
|
56
79
|
getVoice(text_1) {
|
|
57
|
-
return __awaiter(this, arguments, void 0, function* (text, voice = 'alloy', speed = 1, language) {
|
|
58
|
-
return getTTSResponse(this.backendUrl, { input: text, voice, speed, language }, this.token);
|
|
80
|
+
return __awaiter(this, arguments, void 0, function* (text, voice = 'alloy', speed = 1, language, cache = false) {
|
|
81
|
+
return getTTSResponse(this.backendUrl, { input: text, voice, speed, language, cache }, this.token);
|
|
59
82
|
});
|
|
60
83
|
}
|
|
61
84
|
/**
|
|
@@ -68,24 +91,139 @@ export class AIModule {
|
|
|
68
91
|
return getSTTResponse(this.backendUrl, file, this.token);
|
|
69
92
|
});
|
|
70
93
|
}
|
|
94
|
+
getChatMessage(systemPrompt, userPrompt) {
|
|
95
|
+
const messages = [{ role: 'system', content: systemPrompt }];
|
|
96
|
+
if (userPrompt) {
|
|
97
|
+
messages.push({ role: 'user', content: userPrompt });
|
|
98
|
+
}
|
|
99
|
+
return messages;
|
|
100
|
+
}
|
|
71
101
|
/**
|
|
72
102
|
* Generate a structured object from a request using AI.
|
|
73
103
|
* @param request The object generation request.
|
|
74
104
|
* @returns The generated object.
|
|
75
105
|
*/
|
|
76
|
-
getObject(
|
|
106
|
+
getObject(params) {
|
|
77
107
|
return __awaiter(this, void 0, void 0, function* () {
|
|
78
|
-
|
|
108
|
+
const { systemPrompt, responseSchema, userPrompt, cache = false, tools = [] } = params;
|
|
109
|
+
return yield this.streamObject({
|
|
110
|
+
responseSchema,
|
|
111
|
+
messages: this.getChatMessage(systemPrompt, userPrompt),
|
|
112
|
+
cache,
|
|
113
|
+
tools,
|
|
114
|
+
});
|
|
79
115
|
});
|
|
80
116
|
}
|
|
81
117
|
/**
|
|
82
118
|
* Generate a streamed structured object from a request using AI.
|
|
83
119
|
* @param request The object generation request.
|
|
84
120
|
* @param onResult Callback for each result chunk.
|
|
121
|
+
* @param cache Whether to cache the result (default: false).
|
|
85
122
|
*/
|
|
86
|
-
getStreamedObject(
|
|
123
|
+
getStreamedObject(params) {
|
|
124
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
125
|
+
const { systemPrompt, responseSchema, userPrompt, onResult, cache = false, tools = [] } = params;
|
|
126
|
+
yield this.streamObject({
|
|
127
|
+
responseSchema,
|
|
128
|
+
messages: this.getChatMessage(systemPrompt, userPrompt),
|
|
129
|
+
onResult,
|
|
130
|
+
cache,
|
|
131
|
+
tools,
|
|
132
|
+
});
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
streamObject(params) {
|
|
136
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
137
|
+
console.log('todo: move this function into the AI Module');
|
|
138
|
+
const { messages, responseSchema, onResult = () => null, cache = false, tools = [] } = params;
|
|
139
|
+
const chatMessages = messages.map((message, index) => (Object.assign(Object.assign({}, message), { id: `${index + 1}` })));
|
|
140
|
+
const response = yield fetch(`${this.backendUrl}/ai/llm`, {
|
|
141
|
+
method: 'POST',
|
|
142
|
+
body: JSON.stringify({
|
|
143
|
+
stream: true,
|
|
144
|
+
messages: chatMessages,
|
|
145
|
+
responseSchema,
|
|
146
|
+
cache,
|
|
147
|
+
tools,
|
|
148
|
+
}),
|
|
149
|
+
headers: { Authorization: `Bearer ${this.token}`, 'Content-Type': 'application/json' },
|
|
150
|
+
});
|
|
151
|
+
if (!response.ok) {
|
|
152
|
+
throw new Error(`Failed to stream object: ${response.status} ${response.statusText}`);
|
|
153
|
+
}
|
|
154
|
+
if (!response.body) {
|
|
155
|
+
throw new Error('No response body.');
|
|
156
|
+
}
|
|
157
|
+
const reader = response.body.getReader();
|
|
158
|
+
const decoder = new TextDecoder('utf-8');
|
|
159
|
+
let currentObject = {};
|
|
160
|
+
let isLoading = true;
|
|
161
|
+
while (isLoading) {
|
|
162
|
+
//wait 50ms to not overload the CPU
|
|
163
|
+
yield new Promise((resolve) => setTimeout(resolve, 30));
|
|
164
|
+
const { value, done: readerDone } = yield reader.read();
|
|
165
|
+
if (readerDone) {
|
|
166
|
+
isLoading = false;
|
|
167
|
+
onResult(currentObject, false);
|
|
168
|
+
return currentObject;
|
|
169
|
+
}
|
|
170
|
+
//the check needs to be behind readerDone because in closed connections the value is undefined
|
|
171
|
+
if (!value)
|
|
172
|
+
continue;
|
|
173
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
174
|
+
const lines = chunk.split('\n').filter((line) => line.trim());
|
|
175
|
+
for (const line of lines) {
|
|
176
|
+
const command = line.substring(0, 5);
|
|
177
|
+
const dataStr = line.substring(5).trim();
|
|
178
|
+
if (dataStr === '[DONE]') {
|
|
179
|
+
isLoading = false;
|
|
180
|
+
onResult(currentObject, false);
|
|
181
|
+
return currentObject;
|
|
182
|
+
}
|
|
183
|
+
if (command === 'data:') {
|
|
184
|
+
currentObject = JSON.parse(dataStr);
|
|
185
|
+
onResult(currentObject, true);
|
|
186
|
+
}
|
|
187
|
+
else if (command === 'tool:') {
|
|
188
|
+
const { toolCallId, toolName, args } = JSON.parse(dataStr);
|
|
189
|
+
const tool = tools.find((tool) => tool.name === toolName);
|
|
190
|
+
if (tool && tool.execute) {
|
|
191
|
+
const result = yield tool.execute(args);
|
|
192
|
+
// Send the result to the backend
|
|
193
|
+
yield this.sendToolResult(toolCallId, result);
|
|
194
|
+
}
|
|
195
|
+
else if (tool && !tool.execute) {
|
|
196
|
+
console.error('Tool found but has no execute function:', toolName);
|
|
197
|
+
}
|
|
198
|
+
else {
|
|
199
|
+
console.error('Tool not found:', toolName);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
else if (command === 'error') {
|
|
203
|
+
//error has 5 letters + the colon so we need to remove one character of the data string to get the error message
|
|
204
|
+
console.error('Error:', dataStr.substring(1));
|
|
205
|
+
}
|
|
206
|
+
else if (command === 'info:') {
|
|
207
|
+
//ignore info messages
|
|
208
|
+
}
|
|
209
|
+
else {
|
|
210
|
+
console.error('Unknown stream data:', line);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
return currentObject;
|
|
215
|
+
});
|
|
216
|
+
}
|
|
217
|
+
sendToolResult(toolCallId, result) {
|
|
87
218
|
return __awaiter(this, void 0, void 0, function* () {
|
|
88
|
-
|
|
219
|
+
yield fetch(`${this.backendUrl}/ai/llm/tool_result`, {
|
|
220
|
+
method: 'POST',
|
|
221
|
+
body: JSON.stringify({
|
|
222
|
+
toolCallId,
|
|
223
|
+
result: result !== null && result !== void 0 ? result : '[DONE]',
|
|
224
|
+
}),
|
|
225
|
+
headers: { Authorization: `Bearer ${this.token} `, 'Content-Type': 'application/json' },
|
|
226
|
+
});
|
|
89
227
|
});
|
|
90
228
|
}
|
|
91
229
|
}
|
package/package.json
CHANGED
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { Tool } from '../fromRimori/PluginTypes';
|
|
2
|
-
export interface ToolInvocation {
|
|
3
|
-
toolCallId: string;
|
|
4
|
-
toolName: string;
|
|
5
|
-
args: Record<string, string>;
|
|
6
|
-
}
|
|
7
|
-
export interface Message {
|
|
8
|
-
id?: string;
|
|
9
|
-
role: 'user' | 'assistant' | 'system';
|
|
10
|
-
content: string;
|
|
11
|
-
toolCalls?: ToolInvocation[];
|
|
12
|
-
}
|
|
13
|
-
export declare function generateText(backendUrl: string, messages: Message[], tools: Tool[], token: string): Promise<any>;
|
|
14
|
-
export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: ToolInvocation[]) => void;
|
|
15
|
-
export declare function streamChatGPT(backendUrl: string, messages: Message[], tools: Tool[], onResponse: OnLLMResponse, token: string): Promise<void>;
|
|
@@ -1,255 +0,0 @@
|
|
|
1
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
2
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
3
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
4
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
5
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
6
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
7
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
|
-
});
|
|
9
|
-
};
|
|
10
|
-
var __rest = (this && this.__rest) || function (s, e) {
|
|
11
|
-
var t = {};
|
|
12
|
-
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
|
|
13
|
-
t[p] = s[p];
|
|
14
|
-
if (s != null && typeof Object.getOwnPropertySymbols === "function")
|
|
15
|
-
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
|
|
16
|
-
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
|
|
17
|
-
t[p[i]] = s[p[i]];
|
|
18
|
-
}
|
|
19
|
-
return t;
|
|
20
|
-
};
|
|
21
|
-
export function generateText(backendUrl, messages, tools, token) {
|
|
22
|
-
return __awaiter(this, void 0, void 0, function* () {
|
|
23
|
-
const response = yield fetch(`${backendUrl}/ai/llm`, {
|
|
24
|
-
method: 'POST',
|
|
25
|
-
body: JSON.stringify({ messages, tools }),
|
|
26
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
27
|
-
});
|
|
28
|
-
return yield response.json();
|
|
29
|
-
});
|
|
30
|
-
}
|
|
31
|
-
export function streamChatGPT(backendUrl, messages, tools, onResponse, token) {
|
|
32
|
-
return __awaiter(this, void 0, void 0, function* () {
|
|
33
|
-
const messageId = Math.random().toString(36).substring(3);
|
|
34
|
-
const currentMessages = [...messages];
|
|
35
|
-
console.log('Starting streamChatGPT with:', {
|
|
36
|
-
messageId,
|
|
37
|
-
messageCount: messages.length,
|
|
38
|
-
toolCount: tools.length,
|
|
39
|
-
backendUrl,
|
|
40
|
-
});
|
|
41
|
-
while (true) {
|
|
42
|
-
const messagesForApi = currentMessages.map((_a) => {
|
|
43
|
-
var { id } = _a, rest = __rest(_a, ["id"]);
|
|
44
|
-
return rest;
|
|
45
|
-
});
|
|
46
|
-
try {
|
|
47
|
-
const response = yield fetch(`${backendUrl}/ai/llm`, {
|
|
48
|
-
method: 'POST',
|
|
49
|
-
body: JSON.stringify({ messages: messagesForApi, tools, stream: true }),
|
|
50
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
51
|
-
});
|
|
52
|
-
if (!response.ok) {
|
|
53
|
-
throw new Error(`HTTP error! status: ${response.status}`);
|
|
54
|
-
}
|
|
55
|
-
if (!response.body) {
|
|
56
|
-
console.error('No response body.');
|
|
57
|
-
return;
|
|
58
|
-
}
|
|
59
|
-
const reader = response.body.getReader();
|
|
60
|
-
const decoder = new TextDecoder('utf-8');
|
|
61
|
-
let content = '';
|
|
62
|
-
let done = false;
|
|
63
|
-
const toolInvocations = [];
|
|
64
|
-
let currentTextId = '';
|
|
65
|
-
let isToolCallMode = false;
|
|
66
|
-
let buffer = ''; // Buffer for incomplete chunks
|
|
67
|
-
while (!done) {
|
|
68
|
-
const { value, done: readerDone } = yield reader.read();
|
|
69
|
-
if (value) {
|
|
70
|
-
const chunk = decoder.decode(value, { stream: true });
|
|
71
|
-
buffer += chunk;
|
|
72
|
-
// Split by lines, but handle incomplete lines
|
|
73
|
-
const lines = buffer.split('\n');
|
|
74
|
-
// Keep the last line in buffer if it's incomplete
|
|
75
|
-
if (lines.length > 1) {
|
|
76
|
-
buffer = lines.pop() || '';
|
|
77
|
-
}
|
|
78
|
-
for (const line of lines) {
|
|
79
|
-
if (line.trim() === '')
|
|
80
|
-
continue;
|
|
81
|
-
// Handle the new streaming format
|
|
82
|
-
if (line.startsWith('data: ')) {
|
|
83
|
-
const dataStr = line.substring(6); // Remove 'data: ' prefix
|
|
84
|
-
// Handle [DONE] marker
|
|
85
|
-
if (dataStr === '[DONE]') {
|
|
86
|
-
done = true;
|
|
87
|
-
break;
|
|
88
|
-
}
|
|
89
|
-
try {
|
|
90
|
-
const data = JSON.parse(dataStr);
|
|
91
|
-
// Log the first message to understand the format
|
|
92
|
-
if (!content && !isToolCallMode) {
|
|
93
|
-
// console.log('First stream message received:', data);
|
|
94
|
-
}
|
|
95
|
-
switch (data.type) {
|
|
96
|
-
case 'start':
|
|
97
|
-
// Stream started, no action needed
|
|
98
|
-
// console.log('Stream started');
|
|
99
|
-
break;
|
|
100
|
-
case 'start-step':
|
|
101
|
-
// Step started, no action needed
|
|
102
|
-
// console.log('Step started');
|
|
103
|
-
break;
|
|
104
|
-
case 'reasoning-start':
|
|
105
|
-
// Reasoning started, no action needed
|
|
106
|
-
console.log('Reasoning started:', data.id);
|
|
107
|
-
break;
|
|
108
|
-
case 'reasoning-end':
|
|
109
|
-
// Reasoning ended, no action needed
|
|
110
|
-
console.log('Reasoning ended:', data.id);
|
|
111
|
-
break;
|
|
112
|
-
case 'text-start':
|
|
113
|
-
// Text generation started, store the ID
|
|
114
|
-
currentTextId = data.id;
|
|
115
|
-
console.log('Text generation started:', data.id);
|
|
116
|
-
break;
|
|
117
|
-
case 'text-delta':
|
|
118
|
-
// Text delta received, append to content
|
|
119
|
-
if (data.delta) {
|
|
120
|
-
content += data.delta;
|
|
121
|
-
onResponse(messageId, content, false);
|
|
122
|
-
}
|
|
123
|
-
break;
|
|
124
|
-
case 'text-end':
|
|
125
|
-
// Text generation ended
|
|
126
|
-
console.log('Text generation ended:', data.id);
|
|
127
|
-
break;
|
|
128
|
-
case 'finish-step':
|
|
129
|
-
// Step finished, no action needed
|
|
130
|
-
// console.log('Step finished');
|
|
131
|
-
break;
|
|
132
|
-
case 'finish':
|
|
133
|
-
// Stream finished
|
|
134
|
-
// console.log('Stream finished');
|
|
135
|
-
done = true;
|
|
136
|
-
break;
|
|
137
|
-
// Additional message types that might be present in the AI library
|
|
138
|
-
case 'tool-call':
|
|
139
|
-
case 'tool-input-available': //for now input calls should be handled the same way as tool calls
|
|
140
|
-
// Tool call initiated
|
|
141
|
-
console.log('Tool call initiated:', data);
|
|
142
|
-
isToolCallMode = true;
|
|
143
|
-
if (data.toolCallId && data.toolName && (data.args || data.input)) {
|
|
144
|
-
toolInvocations.push({
|
|
145
|
-
toolCallId: data.toolCallId,
|
|
146
|
-
toolName: data.toolName,
|
|
147
|
-
args: data.args || data.input,
|
|
148
|
-
});
|
|
149
|
-
}
|
|
150
|
-
break;
|
|
151
|
-
case 'tool-input-delta': //for now input calls should be handled the same way as tool calls
|
|
152
|
-
case 'tool-call-delta':
|
|
153
|
-
// Tool call delta (for streaming tool calls)
|
|
154
|
-
console.log('Tool call delta:', data);
|
|
155
|
-
break;
|
|
156
|
-
case 'tool-call-end':
|
|
157
|
-
// Tool call completed
|
|
158
|
-
console.log('Tool call completed:', data);
|
|
159
|
-
break;
|
|
160
|
-
case 'tool-result':
|
|
161
|
-
// Tool execution result
|
|
162
|
-
console.log('Tool result:', data);
|
|
163
|
-
break;
|
|
164
|
-
case 'error':
|
|
165
|
-
// Error occurred
|
|
166
|
-
console.error('Stream error:', data);
|
|
167
|
-
break;
|
|
168
|
-
case 'usage':
|
|
169
|
-
// Usage information
|
|
170
|
-
console.log('Usage info:', data);
|
|
171
|
-
break;
|
|
172
|
-
case 'model':
|
|
173
|
-
// Model information
|
|
174
|
-
console.log('Model info:', data);
|
|
175
|
-
break;
|
|
176
|
-
case 'stop':
|
|
177
|
-
// Stop signal
|
|
178
|
-
console.log('Stop signal received');
|
|
179
|
-
done = true;
|
|
180
|
-
break;
|
|
181
|
-
default:
|
|
182
|
-
// Unknown type, log for debugging
|
|
183
|
-
console.log('Unknown stream type:', data.type, data);
|
|
184
|
-
break;
|
|
185
|
-
}
|
|
186
|
-
}
|
|
187
|
-
catch (error) {
|
|
188
|
-
console.error('Error parsing stream data:', error, dataStr);
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
if (readerDone) {
|
|
194
|
-
done = true;
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
// Check if we have content or if this was a tool call response
|
|
198
|
-
if (content || toolInvocations.length > 0) {
|
|
199
|
-
currentMessages.push({
|
|
200
|
-
id: messageId,
|
|
201
|
-
role: 'assistant',
|
|
202
|
-
content: content,
|
|
203
|
-
toolCalls: toolInvocations.length > 0 ? toolInvocations : undefined,
|
|
204
|
-
});
|
|
205
|
-
}
|
|
206
|
-
// Handle tool call scenario if tools were provided
|
|
207
|
-
if (tools.length > 0 && toolInvocations.length > 0) {
|
|
208
|
-
console.log('Tool calls detected, executing tools...');
|
|
209
|
-
const toolResults = [];
|
|
210
|
-
for (const toolInvocation of toolInvocations) {
|
|
211
|
-
const tool = tools.find((t) => t.name === toolInvocation.toolName);
|
|
212
|
-
if (tool && tool.execute) {
|
|
213
|
-
try {
|
|
214
|
-
const result = yield tool.execute(toolInvocation.args);
|
|
215
|
-
toolResults.push({
|
|
216
|
-
id: Math.random().toString(36).substring(3),
|
|
217
|
-
role: 'user',
|
|
218
|
-
content: `Tool '${toolInvocation.toolName}' returned: ${JSON.stringify(result)}`,
|
|
219
|
-
});
|
|
220
|
-
}
|
|
221
|
-
catch (error) {
|
|
222
|
-
console.error(`Error executing tool ${toolInvocation.toolName}:`, error);
|
|
223
|
-
toolResults.push({
|
|
224
|
-
id: Math.random().toString(36).substring(3),
|
|
225
|
-
role: 'user',
|
|
226
|
-
content: `Tool '${toolInvocation.toolName}' failed with error: ${error}`,
|
|
227
|
-
});
|
|
228
|
-
}
|
|
229
|
-
}
|
|
230
|
-
}
|
|
231
|
-
if (toolResults.length > 0) {
|
|
232
|
-
currentMessages.push(...toolResults);
|
|
233
|
-
// Continue the loop to handle the next response
|
|
234
|
-
continue;
|
|
235
|
-
}
|
|
236
|
-
}
|
|
237
|
-
// Since the new format doesn't seem to support tool calls in the same way,
|
|
238
|
-
// we'll assume the stream is complete when we reach the end
|
|
239
|
-
// If tools are provided and no content was generated, this might indicate a tool call
|
|
240
|
-
if (tools.length > 0 && !content && !isToolCallMode) {
|
|
241
|
-
// This might be a tool call scenario, but we need more information
|
|
242
|
-
// For now, we'll just finish the stream
|
|
243
|
-
console.log('No content generated, but tools provided - might be tool call scenario');
|
|
244
|
-
}
|
|
245
|
-
onResponse(messageId, content, true, toolInvocations);
|
|
246
|
-
return;
|
|
247
|
-
}
|
|
248
|
-
catch (error) {
|
|
249
|
-
console.error('Error in streamChatGPT:', error);
|
|
250
|
-
onResponse(messageId, `Error: ${error instanceof Error ? error.message : String(error)}`, true, []);
|
|
251
|
-
return;
|
|
252
|
-
}
|
|
253
|
-
}
|
|
254
|
-
});
|
|
255
|
-
}
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
type PrimitiveType = 'string' | 'number' | 'boolean';
|
|
2
|
-
type ObjectToolParameterType = PrimitiveType | {
|
|
3
|
-
[key: string]: ObjectToolParameter;
|
|
4
|
-
} | [{
|
|
5
|
-
[key: string]: ObjectToolParameter;
|
|
6
|
-
}];
|
|
7
|
-
interface ObjectToolParameter {
|
|
8
|
-
type: ObjectToolParameterType;
|
|
9
|
-
description?: string;
|
|
10
|
-
enum?: string[];
|
|
11
|
-
optional?: boolean;
|
|
12
|
-
}
|
|
13
|
-
/**
|
|
14
|
-
* The tools that the AI can use.
|
|
15
|
-
*
|
|
16
|
-
* The key is the name of the tool.
|
|
17
|
-
* The value is the parameter of the tool.
|
|
18
|
-
*
|
|
19
|
-
*/
|
|
20
|
-
export type ObjectTool = {
|
|
21
|
-
[key: string]: ObjectToolParameter;
|
|
22
|
-
};
|
|
23
|
-
export interface ObjectRequest {
|
|
24
|
-
/**
|
|
25
|
-
* The tools that the AI can use.
|
|
26
|
-
*/
|
|
27
|
-
tool: ObjectTool;
|
|
28
|
-
/**
|
|
29
|
-
* High level instructions for the AI to follow. Behaviour, tone, restrictions, etc.
|
|
30
|
-
* Example: "Act like a recipe writer."
|
|
31
|
-
*/
|
|
32
|
-
behaviour?: string;
|
|
33
|
-
/**
|
|
34
|
-
* The specific instruction for the AI to follow.
|
|
35
|
-
* Example: "Generate a recipe using chicken, rice and vegetables."
|
|
36
|
-
*/
|
|
37
|
-
instructions: string;
|
|
38
|
-
}
|
|
39
|
-
export declare function generateObject<T = any>(backendUrl: string, request: ObjectRequest, token: string): Promise<T>;
|
|
40
|
-
export type OnStreamedObjectResult<T = any> = (result: T, isLoading: boolean) => void;
|
|
41
|
-
export declare function streamObject<T = any>(backendUrl: string, request: ObjectRequest, onResult: OnStreamedObjectResult<T>, token: string): Promise<void>;
|
|
42
|
-
export {};
|
|
@@ -1,105 +0,0 @@
|
|
|
1
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
2
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
3
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
4
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
5
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
6
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
7
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
|
-
});
|
|
9
|
-
};
|
|
10
|
-
export function generateObject(backendUrl, request, token) {
|
|
11
|
-
return __awaiter(this, void 0, void 0, function* () {
|
|
12
|
-
return yield fetch(`${backendUrl}/ai/llm-object`, {
|
|
13
|
-
method: 'POST',
|
|
14
|
-
body: JSON.stringify({
|
|
15
|
-
stream: false,
|
|
16
|
-
tool: request.tool,
|
|
17
|
-
behaviour: request.behaviour,
|
|
18
|
-
instructions: request.instructions,
|
|
19
|
-
}),
|
|
20
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
21
|
-
}).then((response) => response.json());
|
|
22
|
-
});
|
|
23
|
-
}
|
|
24
|
-
const tryParseJson = (value) => {
|
|
25
|
-
try {
|
|
26
|
-
return JSON.parse(value);
|
|
27
|
-
}
|
|
28
|
-
catch (_a) {
|
|
29
|
-
return null;
|
|
30
|
-
}
|
|
31
|
-
};
|
|
32
|
-
const mergeStreamObject = (base, patch) => {
|
|
33
|
-
if (Array.isArray(patch)) {
|
|
34
|
-
return patch.map((item, index) => mergeStreamObject(base === null || base === void 0 ? void 0 : base[index], item));
|
|
35
|
-
}
|
|
36
|
-
if (patch && typeof patch === 'object') {
|
|
37
|
-
const result = base && typeof base === 'object' && !Array.isArray(base) ? Object.assign({}, base) : {};
|
|
38
|
-
for (const [key, value] of Object.entries(patch)) {
|
|
39
|
-
result[key] = mergeStreamObject(result[key], value);
|
|
40
|
-
}
|
|
41
|
-
return result;
|
|
42
|
-
}
|
|
43
|
-
return patch;
|
|
44
|
-
};
|
|
45
|
-
const applyStreamChunk = (current, chunk) => {
|
|
46
|
-
if (!chunk || typeof chunk !== 'object') {
|
|
47
|
-
return { next: current, updated: false };
|
|
48
|
-
}
|
|
49
|
-
if (chunk.object && typeof chunk.object === 'object') {
|
|
50
|
-
return { next: chunk.object, updated: true };
|
|
51
|
-
}
|
|
52
|
-
if (chunk.delta && typeof chunk.delta === 'object') {
|
|
53
|
-
return { next: mergeStreamObject(current, chunk.delta), updated: true };
|
|
54
|
-
}
|
|
55
|
-
if (chunk.value && typeof chunk.value === 'object') {
|
|
56
|
-
return { next: mergeStreamObject(current, chunk.value), updated: true };
|
|
57
|
-
}
|
|
58
|
-
return { next: current, updated: false };
|
|
59
|
-
};
|
|
60
|
-
export function streamObject(backendUrl, request, onResult, token) {
|
|
61
|
-
return __awaiter(this, void 0, void 0, function* () {
|
|
62
|
-
const response = yield fetch(`${backendUrl}/ai/llm-object`, {
|
|
63
|
-
method: 'POST',
|
|
64
|
-
body: JSON.stringify({
|
|
65
|
-
stream: true,
|
|
66
|
-
tool: request.tool,
|
|
67
|
-
behaviour: request.behaviour,
|
|
68
|
-
instructions: request.instructions,
|
|
69
|
-
}),
|
|
70
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
71
|
-
});
|
|
72
|
-
if (!response.ok) {
|
|
73
|
-
console.error('Failed to stream object:', response.status, response.statusText);
|
|
74
|
-
return;
|
|
75
|
-
}
|
|
76
|
-
if (!response.body) {
|
|
77
|
-
console.error('No response body.');
|
|
78
|
-
return;
|
|
79
|
-
}
|
|
80
|
-
const reader = response.body.getReader();
|
|
81
|
-
const decoder = new TextDecoder('utf-8');
|
|
82
|
-
let done = false;
|
|
83
|
-
let currentObject = {};
|
|
84
|
-
while (!done) {
|
|
85
|
-
const { value, done: readerDone } = yield reader.read();
|
|
86
|
-
if (value) {
|
|
87
|
-
const chunk = decoder.decode(value, { stream: true });
|
|
88
|
-
const lines = chunk.split('\n').filter((line) => line.trim());
|
|
89
|
-
for (const line of lines) {
|
|
90
|
-
const dataStr = line.substring(5).trim();
|
|
91
|
-
if (dataStr === '[DONE]') {
|
|
92
|
-
done = true;
|
|
93
|
-
break;
|
|
94
|
-
}
|
|
95
|
-
currentObject = JSON.parse(dataStr);
|
|
96
|
-
onResult(currentObject, true);
|
|
97
|
-
}
|
|
98
|
-
}
|
|
99
|
-
if (readerDone) {
|
|
100
|
-
done = true;
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
onResult(currentObject, false);
|
|
104
|
-
});
|
|
105
|
-
}
|