@rimori/client 2.5.11 → 2.5.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/controller/TranslationController.d.ts +0 -2
- package/dist/controller/TranslationController.js +5 -4
- package/dist/fromRimori/PluginTypes.d.ts +30 -6
- package/dist/index.d.ts +1 -1
- package/dist/plugin/module/AIModule.d.ts +71 -5
- package/dist/plugin/module/AIModule.js +143 -10
- package/dist/plugin/module/SharedContentController.d.ts +3 -2
- package/dist/plugin/module/SharedContentController.js +11 -21
- package/package.json +1 -1
- package/dist/controller/AIController.d.ts +0 -15
- package/dist/controller/AIController.js +0 -255
- package/dist/controller/ObjectController.d.ts +0 -42
- package/dist/controller/ObjectController.js +0 -107
|
@@ -1,7 +1,5 @@
|
|
|
1
1
|
import { ThirdPartyModule, TOptions } from 'i18next';
|
|
2
|
-
import { ObjectRequest } from './ObjectController';
|
|
3
2
|
import { AIModule } from '../plugin/module/AIModule';
|
|
4
|
-
export type AIObjectGenerator = <T>(request: ObjectRequest) => Promise<T>;
|
|
5
3
|
/**
|
|
6
4
|
* Translator class for handling internationalization
|
|
7
5
|
*/
|
|
@@ -158,15 +158,16 @@ export class Translator {
|
|
|
158
158
|
if (!this.ai || this.currentLanguage === 'en')
|
|
159
159
|
return text;
|
|
160
160
|
const response = yield this.ai.getObject({
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
161
|
+
systemPrompt: 'You are a translation engine. Return only the translated text.' + additionalInstructions,
|
|
162
|
+
userPrompt: `Translate the following text into ${this.currentLanguage}: ${text}`,
|
|
163
|
+
cache: true,
|
|
164
|
+
responseSchema: {
|
|
164
165
|
translation: {
|
|
165
166
|
type: 'string',
|
|
166
167
|
description: `The translation of the input text into ${this.currentLanguage}.`,
|
|
167
168
|
},
|
|
168
169
|
},
|
|
169
|
-
}
|
|
170
|
+
});
|
|
170
171
|
const translation = response === null || response === void 0 ? void 0 : response.translation;
|
|
171
172
|
if (translation) {
|
|
172
173
|
this.aiTranslationCache.set(text, translation);
|
|
@@ -18,6 +18,12 @@ export interface PluginPage {
|
|
|
18
18
|
action?: {
|
|
19
19
|
key: string;
|
|
20
20
|
parameters: ObjectTool;
|
|
21
|
+
supportive_tools?: {
|
|
22
|
+
key: string;
|
|
23
|
+
event: string;
|
|
24
|
+
description: string;
|
|
25
|
+
parameters?: ObjectTool;
|
|
26
|
+
}[];
|
|
21
27
|
};
|
|
22
28
|
}
|
|
23
29
|
export interface SidebarPage {
|
|
@@ -108,14 +114,14 @@ export interface RimoriPluginConfig<T extends object = object> {
|
|
|
108
114
|
topics?: string[];
|
|
109
115
|
};
|
|
110
116
|
}
|
|
117
|
+
/**
|
|
118
|
+
* Tool definition for AI function calling.
|
|
119
|
+
* Used when the LLM needs to call external functions/APIs during generation.
|
|
120
|
+
*/
|
|
111
121
|
export interface Tool {
|
|
112
122
|
name: string;
|
|
113
123
|
description: string;
|
|
114
|
-
parameters:
|
|
115
|
-
name: string;
|
|
116
|
-
description: string;
|
|
117
|
-
type: 'string' | 'number' | 'boolean';
|
|
118
|
-
}[];
|
|
124
|
+
parameters: FunctionToolParameter[];
|
|
119
125
|
execute?: (args: Record<string, any>) => Promise<unknown> | unknown | void;
|
|
120
126
|
}
|
|
121
127
|
/**
|
|
@@ -179,5 +185,23 @@ type ToolParameterType = PrimitiveType | {
|
|
|
179
185
|
* Primitive data types supported by the LLM tool system.
|
|
180
186
|
* These align with JSON schema primitive types and TypeScript basic types.
|
|
181
187
|
*/
|
|
182
|
-
type PrimitiveType = 'string' | 'number' | 'boolean';
|
|
188
|
+
export type PrimitiveType = 'string' | 'number' | 'boolean';
|
|
189
|
+
/**
|
|
190
|
+
* Parameter for function calling tools (flat structure with name).
|
|
191
|
+
* Uses the same optional properties as ToolParameter but restricted to primitive types.
|
|
192
|
+
*/
|
|
193
|
+
export interface FunctionToolParameter {
|
|
194
|
+
/** The parameter name used in function calls */
|
|
195
|
+
name: string;
|
|
196
|
+
/** Human-readable description of the parameter's purpose and usage */
|
|
197
|
+
description: string;
|
|
198
|
+
/** The primitive data type of the parameter */
|
|
199
|
+
type: PrimitiveType;
|
|
200
|
+
/** Optional array of allowed values for enumerated parameters */
|
|
201
|
+
enum?: string[];
|
|
202
|
+
/** Whether the parameter is optional (defaults to required) */
|
|
203
|
+
optional?: boolean;
|
|
204
|
+
/** Whether the parameter is an array of the specified type */
|
|
205
|
+
isArray?: boolean;
|
|
206
|
+
}
|
|
183
207
|
export {};
|
package/dist/index.d.ts
CHANGED
|
@@ -12,7 +12,7 @@ export type { TOptions } from 'i18next';
|
|
|
12
12
|
export type { SharedContent, BasicSharedContent, ContentStatus } from './plugin/module/SharedContentController';
|
|
13
13
|
export type { Exercise } from './plugin/module/ExerciseModule';
|
|
14
14
|
export type { UserInfo, Language, UserRole } from './controller/SettingsController';
|
|
15
|
-
export type { Message, ToolInvocation } from './
|
|
15
|
+
export type { Message, ToolInvocation } from './plugin/module/AIModule';
|
|
16
16
|
export type { TriggerAction } from './plugin/module/ExerciseModule';
|
|
17
17
|
export type { MacroAccomplishmentPayload, MicroAccomplishmentPayload } from './controller/AccomplishmentController';
|
|
18
18
|
export type { EventBusMessage } from './fromRimori/EventBus';
|
|
@@ -1,7 +1,56 @@
|
|
|
1
|
-
import { RimoriCommunicationHandler, RimoriInfo } from '../CommunicationHandler';
|
|
2
|
-
import { Message, OnLLMResponse } from '../../controller/AIController';
|
|
3
|
-
import { ObjectRequest } from '../../controller/ObjectController';
|
|
4
1
|
import { Tool } from '../../fromRimori/PluginTypes';
|
|
2
|
+
import { RimoriCommunicationHandler, RimoriInfo } from '../CommunicationHandler';
|
|
3
|
+
export type OnStreamedObjectResult<T = any> = (result: T, isLoading: boolean) => void;
|
|
4
|
+
type PrimitiveType = 'string' | 'number' | 'boolean';
|
|
5
|
+
type ObjectToolParameterType = PrimitiveType | {
|
|
6
|
+
[key: string]: ObjectToolParameter;
|
|
7
|
+
} | [{
|
|
8
|
+
[key: string]: ObjectToolParameter;
|
|
9
|
+
}];
|
|
10
|
+
interface ObjectToolParameter {
|
|
11
|
+
type: ObjectToolParameterType;
|
|
12
|
+
description?: string;
|
|
13
|
+
enum?: string[];
|
|
14
|
+
optional?: boolean;
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* The tools that the AI can use.
|
|
18
|
+
*
|
|
19
|
+
* The key is the name of the tool.
|
|
20
|
+
* The value is the parameter of the tool.
|
|
21
|
+
*
|
|
22
|
+
*/
|
|
23
|
+
export type ObjectTool = {
|
|
24
|
+
[key: string]: ObjectToolParameter;
|
|
25
|
+
};
|
|
26
|
+
export interface ToolInvocation {
|
|
27
|
+
toolCallId: string;
|
|
28
|
+
toolName: string;
|
|
29
|
+
args: Record<string, string>;
|
|
30
|
+
}
|
|
31
|
+
export interface Message {
|
|
32
|
+
id?: string;
|
|
33
|
+
role: 'user' | 'assistant' | 'system';
|
|
34
|
+
content: string;
|
|
35
|
+
toolCalls?: ToolInvocation[];
|
|
36
|
+
}
|
|
37
|
+
export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: ToolInvocation[]) => void;
|
|
38
|
+
export interface ObjectRequest {
|
|
39
|
+
/**
|
|
40
|
+
* The tools that the AI can use.
|
|
41
|
+
*/
|
|
42
|
+
tool: ObjectTool;
|
|
43
|
+
/**
|
|
44
|
+
* High level instructions for the AI to follow. Behaviour, tone, restrictions, etc.
|
|
45
|
+
* Example: "Act like a recipe writer."
|
|
46
|
+
*/
|
|
47
|
+
behaviour?: string;
|
|
48
|
+
/**
|
|
49
|
+
* The specific instruction for the AI to follow.
|
|
50
|
+
* Example: "Generate a recipe using chicken, rice and vegetables."
|
|
51
|
+
*/
|
|
52
|
+
instructions: string;
|
|
53
|
+
}
|
|
5
54
|
/**
|
|
6
55
|
* Controller for AI-related operations.
|
|
7
56
|
* Provides access to text generation, voice synthesis, and object generation.
|
|
@@ -43,17 +92,34 @@ export declare class AIModule {
|
|
|
43
92
|
* @returns The transcribed text.
|
|
44
93
|
*/
|
|
45
94
|
getTextFromVoice(file: Blob): Promise<string>;
|
|
95
|
+
private getChatMessage;
|
|
46
96
|
/**
|
|
47
97
|
* Generate a structured object from a request using AI.
|
|
48
98
|
* @param request The object generation request.
|
|
49
99
|
* @returns The generated object.
|
|
50
100
|
*/
|
|
51
|
-
getObject<T = any>(
|
|
101
|
+
getObject<T = any>(params: {
|
|
102
|
+
systemPrompt: string;
|
|
103
|
+
responseSchema: ObjectTool;
|
|
104
|
+
userPrompt?: string;
|
|
105
|
+
cache?: boolean;
|
|
106
|
+
tools?: Tool[];
|
|
107
|
+
}): Promise<T>;
|
|
52
108
|
/**
|
|
53
109
|
* Generate a streamed structured object from a request using AI.
|
|
54
110
|
* @param request The object generation request.
|
|
55
111
|
* @param onResult Callback for each result chunk.
|
|
56
112
|
* @param cache Whether to cache the result (default: false).
|
|
57
113
|
*/
|
|
58
|
-
getStreamedObject<T = any>(
|
|
114
|
+
getStreamedObject<T = any>(params: {
|
|
115
|
+
systemPrompt: string;
|
|
116
|
+
responseSchema: ObjectTool;
|
|
117
|
+
userPrompt?: string;
|
|
118
|
+
onResult: OnStreamedObjectResult<T>;
|
|
119
|
+
cache?: boolean;
|
|
120
|
+
tools?: Tool[];
|
|
121
|
+
}): Promise<void>;
|
|
122
|
+
private streamObject;
|
|
123
|
+
private sendToolResult;
|
|
59
124
|
}
|
|
125
|
+
export {};
|
|
@@ -7,8 +7,6 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
|
|
|
7
7
|
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
8
|
});
|
|
9
9
|
};
|
|
10
|
-
import { generateText, streamChatGPT } from '../../controller/AIController';
|
|
11
|
-
import { generateObject, streamObject } from '../../controller/ObjectController';
|
|
12
10
|
import { getSTTResponse, getTTSResponse } from '../../controller/VoiceController';
|
|
13
11
|
/**
|
|
14
12
|
* Controller for AI-related operations.
|
|
@@ -32,7 +30,17 @@ export class AIModule {
|
|
|
32
30
|
*/
|
|
33
31
|
getText(messages_1, tools_1) {
|
|
34
32
|
return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false) {
|
|
35
|
-
|
|
33
|
+
const { result } = yield this.streamObject({
|
|
34
|
+
cache,
|
|
35
|
+
tools,
|
|
36
|
+
messages,
|
|
37
|
+
responseSchema: {
|
|
38
|
+
result: {
|
|
39
|
+
type: 'string',
|
|
40
|
+
},
|
|
41
|
+
},
|
|
42
|
+
});
|
|
43
|
+
return result;
|
|
36
44
|
});
|
|
37
45
|
}
|
|
38
46
|
/**
|
|
@@ -44,7 +52,19 @@ export class AIModule {
|
|
|
44
52
|
*/
|
|
45
53
|
getSteamedText(messages_1, onMessage_1, tools_1) {
|
|
46
54
|
return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false) {
|
|
47
|
-
|
|
55
|
+
const messageId = Math.random().toString(36).substring(3);
|
|
56
|
+
const { result } = yield this.streamObject({
|
|
57
|
+
cache,
|
|
58
|
+
tools,
|
|
59
|
+
messages,
|
|
60
|
+
responseSchema: {
|
|
61
|
+
result: {
|
|
62
|
+
type: 'string',
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
onResult: ({ result }) => onMessage(messageId, result, false),
|
|
66
|
+
});
|
|
67
|
+
onMessage(messageId, result, true);
|
|
48
68
|
});
|
|
49
69
|
}
|
|
50
70
|
/**
|
|
@@ -71,14 +91,27 @@ export class AIModule {
|
|
|
71
91
|
return getSTTResponse(this.backendUrl, file, this.token);
|
|
72
92
|
});
|
|
73
93
|
}
|
|
94
|
+
getChatMessage(systemPrompt, userPrompt) {
|
|
95
|
+
const messages = [{ role: 'system', content: systemPrompt }];
|
|
96
|
+
if (userPrompt) {
|
|
97
|
+
messages.push({ role: 'user', content: userPrompt });
|
|
98
|
+
}
|
|
99
|
+
return messages;
|
|
100
|
+
}
|
|
74
101
|
/**
|
|
75
102
|
* Generate a structured object from a request using AI.
|
|
76
103
|
* @param request The object generation request.
|
|
77
104
|
* @returns The generated object.
|
|
78
105
|
*/
|
|
79
|
-
getObject(
|
|
80
|
-
return __awaiter(this,
|
|
81
|
-
|
|
106
|
+
getObject(params) {
|
|
107
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
108
|
+
const { systemPrompt, responseSchema, userPrompt, cache = false, tools = [] } = params;
|
|
109
|
+
return yield this.streamObject({
|
|
110
|
+
responseSchema,
|
|
111
|
+
messages: this.getChatMessage(systemPrompt, userPrompt),
|
|
112
|
+
cache,
|
|
113
|
+
tools,
|
|
114
|
+
});
|
|
82
115
|
});
|
|
83
116
|
}
|
|
84
117
|
/**
|
|
@@ -87,9 +120,109 @@ export class AIModule {
|
|
|
87
120
|
* @param onResult Callback for each result chunk.
|
|
88
121
|
* @param cache Whether to cache the result (default: false).
|
|
89
122
|
*/
|
|
90
|
-
getStreamedObject(
|
|
91
|
-
return __awaiter(this,
|
|
92
|
-
|
|
123
|
+
getStreamedObject(params) {
|
|
124
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
125
|
+
const { systemPrompt, responseSchema, userPrompt, onResult, cache = false, tools = [] } = params;
|
|
126
|
+
yield this.streamObject({
|
|
127
|
+
responseSchema,
|
|
128
|
+
messages: this.getChatMessage(systemPrompt, userPrompt),
|
|
129
|
+
onResult,
|
|
130
|
+
cache,
|
|
131
|
+
tools,
|
|
132
|
+
});
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
streamObject(params) {
|
|
136
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
137
|
+
const { messages, responseSchema, onResult = () => null, cache = false, tools = [] } = params;
|
|
138
|
+
const chatMessages = messages.map((message, index) => (Object.assign(Object.assign({}, message), { id: `${index + 1}` })));
|
|
139
|
+
const response = yield fetch(`${this.backendUrl}/ai/llm`, {
|
|
140
|
+
body: JSON.stringify({
|
|
141
|
+
cache,
|
|
142
|
+
tools,
|
|
143
|
+
stream: true,
|
|
144
|
+
responseSchema,
|
|
145
|
+
messages: chatMessages,
|
|
146
|
+
}),
|
|
147
|
+
method: 'POST',
|
|
148
|
+
headers: { Authorization: `Bearer ${this.token}`, 'Content-Type': 'application/json' },
|
|
149
|
+
});
|
|
150
|
+
if (!response.ok) {
|
|
151
|
+
throw new Error(`Failed to stream object: ${response.status} ${response.statusText}`);
|
|
152
|
+
}
|
|
153
|
+
if (!response.body) {
|
|
154
|
+
throw new Error('No response body.');
|
|
155
|
+
}
|
|
156
|
+
const reader = response.body.getReader();
|
|
157
|
+
const decoder = new TextDecoder('utf-8');
|
|
158
|
+
let currentObject = {};
|
|
159
|
+
let isLoading = true;
|
|
160
|
+
while (isLoading) {
|
|
161
|
+
//wait 50ms to not overload the CPU
|
|
162
|
+
yield new Promise((resolve) => setTimeout(resolve, 30));
|
|
163
|
+
const { value, done: readerDone } = yield reader.read();
|
|
164
|
+
if (readerDone) {
|
|
165
|
+
isLoading = false;
|
|
166
|
+
onResult(currentObject, false);
|
|
167
|
+
return currentObject;
|
|
168
|
+
}
|
|
169
|
+
//the check needs to be behind readerDone because in closed connections the value is undefined
|
|
170
|
+
if (!value)
|
|
171
|
+
continue;
|
|
172
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
173
|
+
const lines = chunk.split('\n').filter((line) => line.trim());
|
|
174
|
+
for (const line of lines) {
|
|
175
|
+
const command = line.substring(0, 5);
|
|
176
|
+
const dataStr = line.substring(5).trim();
|
|
177
|
+
if (dataStr === '[DONE]') {
|
|
178
|
+
isLoading = false;
|
|
179
|
+
onResult(currentObject, false);
|
|
180
|
+
return currentObject;
|
|
181
|
+
}
|
|
182
|
+
if (command === 'data:') {
|
|
183
|
+
currentObject = JSON.parse(dataStr);
|
|
184
|
+
onResult(currentObject, true);
|
|
185
|
+
}
|
|
186
|
+
else if (command === 'tool:') {
|
|
187
|
+
const { toolCallId, toolName, args } = JSON.parse(dataStr);
|
|
188
|
+
const tool = tools.find((tool) => tool.name === toolName);
|
|
189
|
+
if (tool && tool.execute) {
|
|
190
|
+
const result = yield tool.execute(args);
|
|
191
|
+
// Send the result to the backend
|
|
192
|
+
yield this.sendToolResult(toolCallId, result);
|
|
193
|
+
}
|
|
194
|
+
else if (tool && !tool.execute) {
|
|
195
|
+
console.error('Tool found but has no execute function:', toolName);
|
|
196
|
+
}
|
|
197
|
+
else {
|
|
198
|
+
console.error('Tool not found:', toolName);
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
else if (command === 'error') {
|
|
202
|
+
//error has 5 letters + the colon so we need to remove one character of the data string to get the error message
|
|
203
|
+
console.error('Error:', dataStr.substring(1));
|
|
204
|
+
}
|
|
205
|
+
else if (command === 'info:') {
|
|
206
|
+
//ignore info messages
|
|
207
|
+
}
|
|
208
|
+
else {
|
|
209
|
+
console.error('Unknown stream data:', line);
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
return currentObject;
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
sendToolResult(toolCallId, result) {
|
|
217
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
218
|
+
yield fetch(`${this.backendUrl}/ai/llm/tool_result`, {
|
|
219
|
+
method: 'POST',
|
|
220
|
+
body: JSON.stringify({
|
|
221
|
+
toolCallId,
|
|
222
|
+
result: result !== null && result !== void 0 ? result : '[DONE]',
|
|
223
|
+
}),
|
|
224
|
+
headers: { Authorization: `Bearer ${this.token} `, 'Content-Type': 'application/json' },
|
|
225
|
+
});
|
|
93
226
|
});
|
|
94
227
|
}
|
|
95
228
|
}
|
|
@@ -58,10 +58,11 @@ export declare class SharedContentController {
|
|
|
58
58
|
}): Promise<SharedContent<T>>;
|
|
59
59
|
/**
|
|
60
60
|
* Search for shared content by topic using RAG (semantic similarity).
|
|
61
|
+
* Returns the first matching content that hasn't been completed by the user.
|
|
61
62
|
* @param tableName - Name of the shared content table
|
|
62
63
|
* @param topic - Topic to search for
|
|
63
|
-
* @param limit - Maximum number of results
|
|
64
|
-
* @returns
|
|
64
|
+
* @param limit - Maximum number of results to return (default: 10)
|
|
65
|
+
* @returns Matching shared content
|
|
65
66
|
*/
|
|
66
67
|
searchByTopic<T>(tableName: string, topic: string, limit?: number): Promise<SharedContent<T>[]>;
|
|
67
68
|
/**
|
|
@@ -57,37 +57,27 @@ export class SharedContentController {
|
|
|
57
57
|
}
|
|
58
58
|
/**
|
|
59
59
|
* Search for shared content by topic using RAG (semantic similarity).
|
|
60
|
+
* Returns the first matching content that hasn't been completed by the user.
|
|
60
61
|
* @param tableName - Name of the shared content table
|
|
61
62
|
* @param topic - Topic to search for
|
|
62
|
-
* @param limit - Maximum number of results
|
|
63
|
-
* @returns
|
|
63
|
+
* @param limit - Maximum number of results to return (default: 10)
|
|
64
|
+
* @returns Matching shared content
|
|
64
65
|
*/
|
|
65
66
|
searchByTopic(tableName_1, topic_1) {
|
|
66
67
|
return __awaiter(this, arguments, void 0, function* (tableName, topic, limit = 10) {
|
|
67
|
-
const
|
|
68
|
-
const completedTableName = this.getCompletedTableName(tableName);
|
|
69
|
-
// Generate embedding for search topic
|
|
70
|
-
const response = yield this.rimoriClient.runtime.fetchBackend('/ai/embedding', {
|
|
68
|
+
const response = yield this.rimoriClient.runtime.fetchBackend('/shared-content/get-by-topic', {
|
|
71
69
|
method: 'POST',
|
|
72
70
|
headers: { 'Content-Type': 'application/json' },
|
|
73
|
-
body: JSON.stringify({
|
|
71
|
+
body: JSON.stringify({
|
|
72
|
+
tableName,
|
|
73
|
+
limit,
|
|
74
|
+
filter: { title: { filterType: 'rag', value: topic } },
|
|
75
|
+
}),
|
|
74
76
|
});
|
|
75
77
|
if (!response.ok) {
|
|
76
|
-
throw new Error(`Failed to
|
|
77
|
-
}
|
|
78
|
-
const { embedding } = yield response.json();
|
|
79
|
-
// RPC call for vector similarity search with completion filtering
|
|
80
|
-
const { data, error } = yield this.supabase.rpc('search_shared_content', {
|
|
81
|
-
p_table_name: fullTableName,
|
|
82
|
-
p_completed_table_name: completedTableName,
|
|
83
|
-
p_embedding: JSON.stringify(embedding),
|
|
84
|
-
p_limit: limit,
|
|
85
|
-
});
|
|
86
|
-
if (error) {
|
|
87
|
-
console.error('Error searching shared content:', error);
|
|
88
|
-
throw new Error('Error searching shared content');
|
|
78
|
+
throw new Error(`Failed to search shared content: ${response.statusText}`);
|
|
89
79
|
}
|
|
90
|
-
return
|
|
80
|
+
return (yield response.json());
|
|
91
81
|
});
|
|
92
82
|
}
|
|
93
83
|
/**
|
package/package.json
CHANGED
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { Tool } from '../fromRimori/PluginTypes';
|
|
2
|
-
export interface ToolInvocation {
|
|
3
|
-
toolCallId: string;
|
|
4
|
-
toolName: string;
|
|
5
|
-
args: Record<string, string>;
|
|
6
|
-
}
|
|
7
|
-
export interface Message {
|
|
8
|
-
id?: string;
|
|
9
|
-
role: 'user' | 'assistant' | 'system';
|
|
10
|
-
content: string;
|
|
11
|
-
toolCalls?: ToolInvocation[];
|
|
12
|
-
}
|
|
13
|
-
export declare function generateText(backendUrl: string, messages: Message[], tools: Tool[], token: string, cache?: boolean): Promise<any>;
|
|
14
|
-
export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: ToolInvocation[]) => void;
|
|
15
|
-
export declare function streamChatGPT(backendUrl: string, messages: Message[], tools: Tool[], onResponse: OnLLMResponse, token: string, cache?: boolean): Promise<void>;
|
|
@@ -1,255 +0,0 @@
|
|
|
1
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
2
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
3
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
4
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
5
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
6
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
7
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
|
-
});
|
|
9
|
-
};
|
|
10
|
-
var __rest = (this && this.__rest) || function (s, e) {
|
|
11
|
-
var t = {};
|
|
12
|
-
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
|
|
13
|
-
t[p] = s[p];
|
|
14
|
-
if (s != null && typeof Object.getOwnPropertySymbols === "function")
|
|
15
|
-
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
|
|
16
|
-
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
|
|
17
|
-
t[p[i]] = s[p[i]];
|
|
18
|
-
}
|
|
19
|
-
return t;
|
|
20
|
-
};
|
|
21
|
-
export function generateText(backendUrl_1, messages_1, tools_1, token_1) {
|
|
22
|
-
return __awaiter(this, arguments, void 0, function* (backendUrl, messages, tools, token, cache = false) {
|
|
23
|
-
const response = yield fetch(`${backendUrl}/ai/llm`, {
|
|
24
|
-
method: 'POST',
|
|
25
|
-
body: JSON.stringify({ messages, tools, cache }),
|
|
26
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
27
|
-
});
|
|
28
|
-
return yield response.json();
|
|
29
|
-
});
|
|
30
|
-
}
|
|
31
|
-
export function streamChatGPT(backendUrl_1, messages_1, tools_1, onResponse_1, token_1) {
|
|
32
|
-
return __awaiter(this, arguments, void 0, function* (backendUrl, messages, tools, onResponse, token, cache = false) {
|
|
33
|
-
const messageId = Math.random().toString(36).substring(3);
|
|
34
|
-
const currentMessages = [...messages];
|
|
35
|
-
console.log('Starting streamChatGPT with:', {
|
|
36
|
-
messageId,
|
|
37
|
-
messageCount: messages.length,
|
|
38
|
-
toolCount: tools.length,
|
|
39
|
-
backendUrl,
|
|
40
|
-
});
|
|
41
|
-
while (true) {
|
|
42
|
-
const messagesForApi = currentMessages.map((_a) => {
|
|
43
|
-
var { id } = _a, rest = __rest(_a, ["id"]);
|
|
44
|
-
return rest;
|
|
45
|
-
});
|
|
46
|
-
try {
|
|
47
|
-
const response = yield fetch(`${backendUrl}/ai/llm`, {
|
|
48
|
-
method: 'POST',
|
|
49
|
-
body: JSON.stringify({ messages: messagesForApi, tools, stream: true, cache }),
|
|
50
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
51
|
-
});
|
|
52
|
-
if (!response.ok) {
|
|
53
|
-
throw new Error(`HTTP error! status: ${response.status}`);
|
|
54
|
-
}
|
|
55
|
-
if (!response.body) {
|
|
56
|
-
console.error('No response body.');
|
|
57
|
-
return;
|
|
58
|
-
}
|
|
59
|
-
const reader = response.body.getReader();
|
|
60
|
-
const decoder = new TextDecoder('utf-8');
|
|
61
|
-
let content = '';
|
|
62
|
-
let done = false;
|
|
63
|
-
const toolInvocations = [];
|
|
64
|
-
let currentTextId = '';
|
|
65
|
-
let isToolCallMode = false;
|
|
66
|
-
let buffer = ''; // Buffer for incomplete chunks
|
|
67
|
-
while (!done) {
|
|
68
|
-
const { value, done: readerDone } = yield reader.read();
|
|
69
|
-
if (value) {
|
|
70
|
-
const chunk = decoder.decode(value, { stream: true });
|
|
71
|
-
buffer += chunk;
|
|
72
|
-
// Split by lines, but handle incomplete lines
|
|
73
|
-
const lines = buffer.split('\n');
|
|
74
|
-
// Keep the last line in buffer if it's incomplete
|
|
75
|
-
if (lines.length > 1) {
|
|
76
|
-
buffer = lines.pop() || '';
|
|
77
|
-
}
|
|
78
|
-
for (const line of lines) {
|
|
79
|
-
if (line.trim() === '')
|
|
80
|
-
continue;
|
|
81
|
-
// Handle the new streaming format
|
|
82
|
-
if (line.startsWith('data: ')) {
|
|
83
|
-
const dataStr = line.substring(6); // Remove 'data: ' prefix
|
|
84
|
-
// Handle [DONE] marker
|
|
85
|
-
if (dataStr === '[DONE]') {
|
|
86
|
-
done = true;
|
|
87
|
-
break;
|
|
88
|
-
}
|
|
89
|
-
try {
|
|
90
|
-
const data = JSON.parse(dataStr);
|
|
91
|
-
// Log the first message to understand the format
|
|
92
|
-
if (!content && !isToolCallMode) {
|
|
93
|
-
// console.log('First stream message received:', data);
|
|
94
|
-
}
|
|
95
|
-
switch (data.type) {
|
|
96
|
-
case 'start':
|
|
97
|
-
// Stream started, no action needed
|
|
98
|
-
// console.log('Stream started');
|
|
99
|
-
break;
|
|
100
|
-
case 'start-step':
|
|
101
|
-
// Step started, no action needed
|
|
102
|
-
// console.log('Step started');
|
|
103
|
-
break;
|
|
104
|
-
case 'reasoning-start':
|
|
105
|
-
// Reasoning started, no action needed
|
|
106
|
-
console.log('Reasoning started:', data.id);
|
|
107
|
-
break;
|
|
108
|
-
case 'reasoning-end':
|
|
109
|
-
// Reasoning ended, no action needed
|
|
110
|
-
console.log('Reasoning ended:', data.id);
|
|
111
|
-
break;
|
|
112
|
-
case 'text-start':
|
|
113
|
-
// Text generation started, store the ID
|
|
114
|
-
currentTextId = data.id;
|
|
115
|
-
console.log('Text generation started:', data.id);
|
|
116
|
-
break;
|
|
117
|
-
case 'text-delta':
|
|
118
|
-
// Text delta received, append to content
|
|
119
|
-
if (data.delta) {
|
|
120
|
-
content += data.delta;
|
|
121
|
-
onResponse(messageId, content, false);
|
|
122
|
-
}
|
|
123
|
-
break;
|
|
124
|
-
case 'text-end':
|
|
125
|
-
// Text generation ended
|
|
126
|
-
console.log('Text generation ended:', data.id);
|
|
127
|
-
break;
|
|
128
|
-
case 'finish-step':
|
|
129
|
-
// Step finished, no action needed
|
|
130
|
-
// console.log('Step finished');
|
|
131
|
-
break;
|
|
132
|
-
case 'finish':
|
|
133
|
-
// Stream finished
|
|
134
|
-
// console.log('Stream finished');
|
|
135
|
-
done = true;
|
|
136
|
-
break;
|
|
137
|
-
// Additional message types that might be present in the AI library
|
|
138
|
-
case 'tool-call':
|
|
139
|
-
case 'tool-input-available': //for now input calls should be handled the same way as tool calls
|
|
140
|
-
// Tool call initiated
|
|
141
|
-
console.log('Tool call initiated:', data);
|
|
142
|
-
isToolCallMode = true;
|
|
143
|
-
if (data.toolCallId && data.toolName && (data.args || data.input)) {
|
|
144
|
-
toolInvocations.push({
|
|
145
|
-
toolCallId: data.toolCallId,
|
|
146
|
-
toolName: data.toolName,
|
|
147
|
-
args: data.args || data.input,
|
|
148
|
-
});
|
|
149
|
-
}
|
|
150
|
-
break;
|
|
151
|
-
case 'tool-input-delta': //for now input calls should be handled the same way as tool calls
|
|
152
|
-
case 'tool-call-delta':
|
|
153
|
-
// Tool call delta (for streaming tool calls)
|
|
154
|
-
console.log('Tool call delta:', data);
|
|
155
|
-
break;
|
|
156
|
-
case 'tool-call-end':
|
|
157
|
-
// Tool call completed
|
|
158
|
-
console.log('Tool call completed:', data);
|
|
159
|
-
break;
|
|
160
|
-
case 'tool-result':
|
|
161
|
-
// Tool execution result
|
|
162
|
-
console.log('Tool result:', data);
|
|
163
|
-
break;
|
|
164
|
-
case 'error':
|
|
165
|
-
// Error occurred
|
|
166
|
-
console.error('Stream error:', data);
|
|
167
|
-
break;
|
|
168
|
-
case 'usage':
|
|
169
|
-
// Usage information
|
|
170
|
-
console.log('Usage info:', data);
|
|
171
|
-
break;
|
|
172
|
-
case 'model':
|
|
173
|
-
// Model information
|
|
174
|
-
console.log('Model info:', data);
|
|
175
|
-
break;
|
|
176
|
-
case 'stop':
|
|
177
|
-
// Stop signal
|
|
178
|
-
console.log('Stop signal received');
|
|
179
|
-
done = true;
|
|
180
|
-
break;
|
|
181
|
-
default:
|
|
182
|
-
// Unknown type, log for debugging
|
|
183
|
-
console.log('Unknown stream type:', data.type, data);
|
|
184
|
-
break;
|
|
185
|
-
}
|
|
186
|
-
}
|
|
187
|
-
catch (error) {
|
|
188
|
-
console.error('Error parsing stream data:', error, dataStr);
|
|
189
|
-
}
|
|
190
|
-
}
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
if (readerDone) {
|
|
194
|
-
done = true;
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
// Check if we have content or if this was a tool call response
|
|
198
|
-
if (content || toolInvocations.length > 0) {
|
|
199
|
-
currentMessages.push({
|
|
200
|
-
id: messageId,
|
|
201
|
-
role: 'assistant',
|
|
202
|
-
content: content,
|
|
203
|
-
toolCalls: toolInvocations.length > 0 ? toolInvocations : undefined,
|
|
204
|
-
});
|
|
205
|
-
}
|
|
206
|
-
// Handle tool call scenario if tools were provided
|
|
207
|
-
if (tools.length > 0 && toolInvocations.length > 0) {
|
|
208
|
-
console.log('Tool calls detected, executing tools...');
|
|
209
|
-
const toolResults = [];
|
|
210
|
-
for (const toolInvocation of toolInvocations) {
|
|
211
|
-
const tool = tools.find((t) => t.name === toolInvocation.toolName);
|
|
212
|
-
if (tool && tool.execute) {
|
|
213
|
-
try {
|
|
214
|
-
const result = yield tool.execute(toolInvocation.args);
|
|
215
|
-
toolResults.push({
|
|
216
|
-
id: Math.random().toString(36).substring(3),
|
|
217
|
-
role: 'user',
|
|
218
|
-
content: `Tool '${toolInvocation.toolName}' returned: ${JSON.stringify(result)}`,
|
|
219
|
-
});
|
|
220
|
-
}
|
|
221
|
-
catch (error) {
|
|
222
|
-
console.error(`Error executing tool ${toolInvocation.toolName}:`, error);
|
|
223
|
-
toolResults.push({
|
|
224
|
-
id: Math.random().toString(36).substring(3),
|
|
225
|
-
role: 'user',
|
|
226
|
-
content: `Tool '${toolInvocation.toolName}' failed with error: ${error}`,
|
|
227
|
-
});
|
|
228
|
-
}
|
|
229
|
-
}
|
|
230
|
-
}
|
|
231
|
-
if (toolResults.length > 0) {
|
|
232
|
-
currentMessages.push(...toolResults);
|
|
233
|
-
// Continue the loop to handle the next response
|
|
234
|
-
continue;
|
|
235
|
-
}
|
|
236
|
-
}
|
|
237
|
-
// Since the new format doesn't seem to support tool calls in the same way,
|
|
238
|
-
// we'll assume the stream is complete when we reach the end
|
|
239
|
-
// If tools are provided and no content was generated, this might indicate a tool call
|
|
240
|
-
if (tools.length > 0 && !content && !isToolCallMode) {
|
|
241
|
-
// This might be a tool call scenario, but we need more information
|
|
242
|
-
// For now, we'll just finish the stream
|
|
243
|
-
console.log('No content generated, but tools provided - might be tool call scenario');
|
|
244
|
-
}
|
|
245
|
-
onResponse(messageId, content, true, toolInvocations);
|
|
246
|
-
return;
|
|
247
|
-
}
|
|
248
|
-
catch (error) {
|
|
249
|
-
console.error('Error in streamChatGPT:', error);
|
|
250
|
-
onResponse(messageId, `Error: ${error instanceof Error ? error.message : String(error)}`, true, []);
|
|
251
|
-
return;
|
|
252
|
-
}
|
|
253
|
-
}
|
|
254
|
-
});
|
|
255
|
-
}
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
type PrimitiveType = 'string' | 'number' | 'boolean';
|
|
2
|
-
type ObjectToolParameterType = PrimitiveType | {
|
|
3
|
-
[key: string]: ObjectToolParameter;
|
|
4
|
-
} | [{
|
|
5
|
-
[key: string]: ObjectToolParameter;
|
|
6
|
-
}];
|
|
7
|
-
interface ObjectToolParameter {
|
|
8
|
-
type: ObjectToolParameterType;
|
|
9
|
-
description?: string;
|
|
10
|
-
enum?: string[];
|
|
11
|
-
optional?: boolean;
|
|
12
|
-
}
|
|
13
|
-
/**
|
|
14
|
-
* The tools that the AI can use.
|
|
15
|
-
*
|
|
16
|
-
* The key is the name of the tool.
|
|
17
|
-
* The value is the parameter of the tool.
|
|
18
|
-
*
|
|
19
|
-
*/
|
|
20
|
-
export type ObjectTool = {
|
|
21
|
-
[key: string]: ObjectToolParameter;
|
|
22
|
-
};
|
|
23
|
-
export interface ObjectRequest {
|
|
24
|
-
/**
|
|
25
|
-
* The tools that the AI can use.
|
|
26
|
-
*/
|
|
27
|
-
tool: ObjectTool;
|
|
28
|
-
/**
|
|
29
|
-
* High level instructions for the AI to follow. Behaviour, tone, restrictions, etc.
|
|
30
|
-
* Example: "Act like a recipe writer."
|
|
31
|
-
*/
|
|
32
|
-
behaviour?: string;
|
|
33
|
-
/**
|
|
34
|
-
* The specific instruction for the AI to follow.
|
|
35
|
-
* Example: "Generate a recipe using chicken, rice and vegetables."
|
|
36
|
-
*/
|
|
37
|
-
instructions: string;
|
|
38
|
-
}
|
|
39
|
-
export declare function generateObject<T = any>(backendUrl: string, request: ObjectRequest, token: string, cache?: boolean): Promise<T>;
|
|
40
|
-
export type OnStreamedObjectResult<T = any> = (result: T, isLoading: boolean) => void;
|
|
41
|
-
export declare function streamObject<T = any>(backendUrl: string, request: ObjectRequest, onResult: OnStreamedObjectResult<T>, token: string, cache?: boolean): Promise<void>;
|
|
42
|
-
export {};
|
|
@@ -1,107 +0,0 @@
|
|
|
1
|
-
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
2
|
-
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
3
|
-
return new (P || (P = Promise))(function (resolve, reject) {
|
|
4
|
-
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
5
|
-
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
6
|
-
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
7
|
-
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
8
|
-
});
|
|
9
|
-
};
|
|
10
|
-
export function generateObject(backendUrl_1, request_1, token_1) {
|
|
11
|
-
return __awaiter(this, arguments, void 0, function* (backendUrl, request, token, cache = false) {
|
|
12
|
-
return yield fetch(`${backendUrl}/ai/llm-object`, {
|
|
13
|
-
method: 'POST',
|
|
14
|
-
body: JSON.stringify({
|
|
15
|
-
stream: false,
|
|
16
|
-
tool: request.tool,
|
|
17
|
-
behaviour: request.behaviour,
|
|
18
|
-
instructions: request.instructions,
|
|
19
|
-
cache,
|
|
20
|
-
}),
|
|
21
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
22
|
-
}).then((response) => response.json());
|
|
23
|
-
});
|
|
24
|
-
}
|
|
25
|
-
const tryParseJson = (value) => {
|
|
26
|
-
try {
|
|
27
|
-
return JSON.parse(value);
|
|
28
|
-
}
|
|
29
|
-
catch (_a) {
|
|
30
|
-
return null;
|
|
31
|
-
}
|
|
32
|
-
};
|
|
33
|
-
const mergeStreamObject = (base, patch) => {
|
|
34
|
-
if (Array.isArray(patch)) {
|
|
35
|
-
return patch.map((item, index) => mergeStreamObject(base === null || base === void 0 ? void 0 : base[index], item));
|
|
36
|
-
}
|
|
37
|
-
if (patch && typeof patch === 'object') {
|
|
38
|
-
const result = base && typeof base === 'object' && !Array.isArray(base) ? Object.assign({}, base) : {};
|
|
39
|
-
for (const [key, value] of Object.entries(patch)) {
|
|
40
|
-
result[key] = mergeStreamObject(result[key], value);
|
|
41
|
-
}
|
|
42
|
-
return result;
|
|
43
|
-
}
|
|
44
|
-
return patch;
|
|
45
|
-
};
|
|
46
|
-
const applyStreamChunk = (current, chunk) => {
|
|
47
|
-
if (!chunk || typeof chunk !== 'object') {
|
|
48
|
-
return { next: current, updated: false };
|
|
49
|
-
}
|
|
50
|
-
if (chunk.object && typeof chunk.object === 'object') {
|
|
51
|
-
return { next: chunk.object, updated: true };
|
|
52
|
-
}
|
|
53
|
-
if (chunk.delta && typeof chunk.delta === 'object') {
|
|
54
|
-
return { next: mergeStreamObject(current, chunk.delta), updated: true };
|
|
55
|
-
}
|
|
56
|
-
if (chunk.value && typeof chunk.value === 'object') {
|
|
57
|
-
return { next: mergeStreamObject(current, chunk.value), updated: true };
|
|
58
|
-
}
|
|
59
|
-
return { next: current, updated: false };
|
|
60
|
-
};
|
|
61
|
-
export function streamObject(backendUrl_1, request_1, onResult_1, token_1) {
|
|
62
|
-
return __awaiter(this, arguments, void 0, function* (backendUrl, request, onResult, token, cache = false) {
|
|
63
|
-
const response = yield fetch(`${backendUrl}/ai/llm-object`, {
|
|
64
|
-
method: 'POST',
|
|
65
|
-
body: JSON.stringify({
|
|
66
|
-
stream: true,
|
|
67
|
-
tool: request.tool,
|
|
68
|
-
behaviour: request.behaviour,
|
|
69
|
-
instructions: request.instructions,
|
|
70
|
-
cache,
|
|
71
|
-
}),
|
|
72
|
-
headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
|
|
73
|
-
});
|
|
74
|
-
if (!response.ok) {
|
|
75
|
-
console.error('Failed to stream object:', response.status, response.statusText);
|
|
76
|
-
return;
|
|
77
|
-
}
|
|
78
|
-
if (!response.body) {
|
|
79
|
-
console.error('No response body.');
|
|
80
|
-
return;
|
|
81
|
-
}
|
|
82
|
-
const reader = response.body.getReader();
|
|
83
|
-
const decoder = new TextDecoder('utf-8');
|
|
84
|
-
let done = false;
|
|
85
|
-
let currentObject = {};
|
|
86
|
-
while (!done) {
|
|
87
|
-
const { value, done: readerDone } = yield reader.read();
|
|
88
|
-
if (value) {
|
|
89
|
-
const chunk = decoder.decode(value, { stream: true });
|
|
90
|
-
const lines = chunk.split('\n').filter((line) => line.trim());
|
|
91
|
-
for (const line of lines) {
|
|
92
|
-
const dataStr = line.substring(5).trim();
|
|
93
|
-
if (dataStr === '[DONE]') {
|
|
94
|
-
done = true;
|
|
95
|
-
break;
|
|
96
|
-
}
|
|
97
|
-
currentObject = JSON.parse(dataStr);
|
|
98
|
-
onResult(currentObject, true);
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
if (readerDone) {
|
|
102
|
-
done = true;
|
|
103
|
-
}
|
|
104
|
-
}
|
|
105
|
-
onResult(currentObject, false);
|
|
106
|
-
});
|
|
107
|
-
}
|