@rimori/client 2.5.10 → 2.5.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,6 +10,6 @@ export interface Message {
10
10
  content: string;
11
11
  toolCalls?: ToolInvocation[];
12
12
  }
13
- export declare function generateText(backendUrl: string, messages: Message[], tools: Tool[], token: string): Promise<any>;
13
+ export declare function generateText(backendUrl: string, messages: Message[], tools: Tool[], token: string, cache?: boolean): Promise<any>;
14
14
  export type OnLLMResponse = (id: string, response: string, finished: boolean, toolInvocations?: ToolInvocation[]) => void;
15
- export declare function streamChatGPT(backendUrl: string, messages: Message[], tools: Tool[], onResponse: OnLLMResponse, token: string): Promise<void>;
15
+ export declare function streamChatGPT(backendUrl: string, messages: Message[], tools: Tool[], onResponse: OnLLMResponse, token: string, cache?: boolean): Promise<void>;
@@ -18,18 +18,18 @@ var __rest = (this && this.__rest) || function (s, e) {
18
18
  }
19
19
  return t;
20
20
  };
21
- export function generateText(backendUrl, messages, tools, token) {
22
- return __awaiter(this, void 0, void 0, function* () {
21
+ export function generateText(backendUrl_1, messages_1, tools_1, token_1) {
22
+ return __awaiter(this, arguments, void 0, function* (backendUrl, messages, tools, token, cache = false) {
23
23
  const response = yield fetch(`${backendUrl}/ai/llm`, {
24
24
  method: 'POST',
25
- body: JSON.stringify({ messages, tools }),
25
+ body: JSON.stringify({ messages, tools, cache }),
26
26
  headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
27
27
  });
28
28
  return yield response.json();
29
29
  });
30
30
  }
31
- export function streamChatGPT(backendUrl, messages, tools, onResponse, token) {
32
- return __awaiter(this, void 0, void 0, function* () {
31
+ export function streamChatGPT(backendUrl_1, messages_1, tools_1, onResponse_1, token_1) {
32
+ return __awaiter(this, arguments, void 0, function* (backendUrl, messages, tools, onResponse, token, cache = false) {
33
33
  const messageId = Math.random().toString(36).substring(3);
34
34
  const currentMessages = [...messages];
35
35
  console.log('Starting streamChatGPT with:', {
@@ -46,7 +46,7 @@ export function streamChatGPT(backendUrl, messages, tools, onResponse, token) {
46
46
  try {
47
47
  const response = yield fetch(`${backendUrl}/ai/llm`, {
48
48
  method: 'POST',
49
- body: JSON.stringify({ messages: messagesForApi, tools, stream: true }),
49
+ body: JSON.stringify({ messages: messagesForApi, tools, stream: true, cache }),
50
50
  headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
51
51
  });
52
52
  if (!response.ok) {
@@ -36,7 +36,7 @@ export interface ObjectRequest {
36
36
  */
37
37
  instructions: string;
38
38
  }
39
- export declare function generateObject<T = any>(backendUrl: string, request: ObjectRequest, token: string): Promise<T>;
39
+ export declare function generateObject<T = any>(backendUrl: string, request: ObjectRequest, token: string, cache?: boolean): Promise<T>;
40
40
  export type OnStreamedObjectResult<T = any> = (result: T, isLoading: boolean) => void;
41
- export declare function streamObject<T = any>(backendUrl: string, request: ObjectRequest, onResult: OnStreamedObjectResult<T>, token: string): Promise<void>;
41
+ export declare function streamObject<T = any>(backendUrl: string, request: ObjectRequest, onResult: OnStreamedObjectResult<T>, token: string, cache?: boolean): Promise<void>;
42
42
  export {};
@@ -7,8 +7,8 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
7
7
  step((generator = generator.apply(thisArg, _arguments || [])).next());
8
8
  });
9
9
  };
10
- export function generateObject(backendUrl, request, token) {
11
- return __awaiter(this, void 0, void 0, function* () {
10
+ export function generateObject(backendUrl_1, request_1, token_1) {
11
+ return __awaiter(this, arguments, void 0, function* (backendUrl, request, token, cache = false) {
12
12
  return yield fetch(`${backendUrl}/ai/llm-object`, {
13
13
  method: 'POST',
14
14
  body: JSON.stringify({
@@ -16,6 +16,7 @@ export function generateObject(backendUrl, request, token) {
16
16
  tool: request.tool,
17
17
  behaviour: request.behaviour,
18
18
  instructions: request.instructions,
19
+ cache,
19
20
  }),
20
21
  headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
21
22
  }).then((response) => response.json());
@@ -57,8 +58,8 @@ const applyStreamChunk = (current, chunk) => {
57
58
  }
58
59
  return { next: current, updated: false };
59
60
  };
60
- export function streamObject(backendUrl, request, onResult, token) {
61
- return __awaiter(this, void 0, void 0, function* () {
61
+ export function streamObject(backendUrl_1, request_1, onResult_1, token_1) {
62
+ return __awaiter(this, arguments, void 0, function* (backendUrl, request, onResult, token, cache = false) {
62
63
  const response = yield fetch(`${backendUrl}/ai/llm-object`, {
63
64
  method: 'POST',
64
65
  body: JSON.stringify({
@@ -66,6 +67,7 @@ export function streamObject(backendUrl, request, onResult, token) {
66
67
  tool: request.tool,
67
68
  behaviour: request.behaviour,
68
69
  instructions: request.instructions,
70
+ cache,
69
71
  }),
70
72
  headers: { Authorization: `Bearer ${token}`, 'Content-Type': 'application/json' },
71
73
  });
@@ -63,8 +63,8 @@ export class SettingsController {
63
63
  .eq('guild_id', this.guild.id)
64
64
  .eq('is_guild_setting', isGuildSetting);
65
65
  const { data: updatedRows, error: updateError } = yield (isGuildSetting
66
- ? updateQuery.is('user_id', null).select('id')
67
- : updateQuery.select('id'));
66
+ ? updateQuery.is('user_id', null).select()
67
+ : updateQuery.select());
68
68
  if (updateError) {
69
69
  if (updateError.code === '42501' || ((_a = updateError.message) === null || _a === void 0 ? void 0 : _a.includes('policy'))) {
70
70
  throw new Error(`Cannot set ${isGuildSetting ? 'guild' : 'user'} settings: Permission denied.`);
@@ -166,7 +166,7 @@ export class Translator {
166
166
  description: `The translation of the input text into ${this.currentLanguage}.`,
167
167
  },
168
168
  },
169
- });
169
+ }, true);
170
170
  const translation = response === null || response === void 0 ? void 0 : response.translation;
171
171
  if (translation) {
172
172
  this.aiTranslationCache.set(text, translation);
@@ -5,5 +5,6 @@ interface TTSRequest {
5
5
  voice: string;
6
6
  speed: number;
7
7
  language?: string;
8
+ cache?: boolean;
8
9
  }
9
10
  export {};
@@ -15,25 +15,28 @@ export declare class AIModule {
15
15
  * Generate text from messages using AI.
16
16
  * @param messages The messages to generate text from.
17
17
  * @param tools Optional tools to use for generation.
18
+ * @param cache Whether to cache the result (default: false).
18
19
  * @returns The generated text.
19
20
  */
20
- getText(messages: Message[], tools?: Tool[]): Promise<string>;
21
+ getText(messages: Message[], tools?: Tool[], cache?: boolean): Promise<string>;
21
22
  /**
22
23
  * Stream text generation from messages using AI.
23
24
  * @param messages The messages to generate text from.
24
25
  * @param onMessage Callback for each message chunk.
25
26
  * @param tools Optional tools to use for generation.
27
+ * @param cache Whether to cache the result (default: false).
26
28
  */
27
- getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[]): Promise<void>;
29
+ getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[], cache?: boolean): Promise<void>;
28
30
  /**
29
31
  * Generate voice audio from text using AI.
30
32
  * @param text The text to convert to voice.
31
33
  * @param voice The voice to use (default: 'alloy').
32
34
  * @param speed The speed of the voice (default: 1).
33
35
  * @param language Optional language for the voice.
36
+ * @param cache Whether to cache the result (default: false).
34
37
  * @returns The generated audio as a Blob.
35
38
  */
36
- getVoice(text: string, voice?: string, speed?: number, language?: string): Promise<Blob>;
39
+ getVoice(text: string, voice?: string, speed?: number, language?: string, cache?: boolean): Promise<Blob>;
37
40
  /**
38
41
  * Convert voice audio to text using AI.
39
42
  * @param file The audio file to convert.
@@ -45,11 +48,12 @@ export declare class AIModule {
45
48
  * @param request The object generation request.
46
49
  * @returns The generated object.
47
50
  */
48
- getObject<T = any>(request: ObjectRequest): Promise<T>;
51
+ getObject<T = any>(request: ObjectRequest, cache?: boolean): Promise<T>;
49
52
  /**
50
53
  * Generate a streamed structured object from a request using AI.
51
54
  * @param request The object generation request.
52
55
  * @param onResult Callback for each result chunk.
56
+ * @param cache Whether to cache the result (default: false).
53
57
  */
54
- getStreamedObject<T = any>(request: ObjectRequest, onResult: (result: T, isLoading: boolean) => void): Promise<void>;
58
+ getStreamedObject<T = any>(request: ObjectRequest, onResult: (result: T, isLoading: boolean) => void, cache?: boolean): Promise<void>;
55
59
  }
@@ -27,11 +27,12 @@ export class AIModule {
27
27
  * Generate text from messages using AI.
28
28
  * @param messages The messages to generate text from.
29
29
  * @param tools Optional tools to use for generation.
30
+ * @param cache Whether to cache the result (default: false).
30
31
  * @returns The generated text.
31
32
  */
32
- getText(messages, tools) {
33
- return __awaiter(this, void 0, void 0, function* () {
34
- return generateText(this.backendUrl, messages, tools || [], this.token).then(({ messages }) => messages[0].content[0].text);
33
+ getText(messages_1, tools_1) {
34
+ return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false) {
35
+ return generateText(this.backendUrl, messages, tools || [], this.token, cache).then(({ messages }) => messages[0].content[0].text);
35
36
  });
36
37
  }
37
38
  /**
@@ -39,10 +40,11 @@ export class AIModule {
39
40
  * @param messages The messages to generate text from.
40
41
  * @param onMessage Callback for each message chunk.
41
42
  * @param tools Optional tools to use for generation.
43
+ * @param cache Whether to cache the result (default: false).
42
44
  */
43
- getSteamedText(messages, onMessage, tools) {
44
- return __awaiter(this, void 0, void 0, function* () {
45
- streamChatGPT(this.backendUrl, messages, tools || [], onMessage, this.token);
45
+ getSteamedText(messages_1, onMessage_1, tools_1) {
46
+ return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false) {
47
+ streamChatGPT(this.backendUrl, messages, tools || [], onMessage, this.token, cache);
46
48
  });
47
49
  }
48
50
  /**
@@ -51,11 +53,12 @@ export class AIModule {
51
53
  * @param voice The voice to use (default: 'alloy').
52
54
  * @param speed The speed of the voice (default: 1).
53
55
  * @param language Optional language for the voice.
56
+ * @param cache Whether to cache the result (default: false).
54
57
  * @returns The generated audio as a Blob.
55
58
  */
56
59
  getVoice(text_1) {
57
- return __awaiter(this, arguments, void 0, function* (text, voice = 'alloy', speed = 1, language) {
58
- return getTTSResponse(this.backendUrl, { input: text, voice, speed, language }, this.token);
60
+ return __awaiter(this, arguments, void 0, function* (text, voice = 'alloy', speed = 1, language, cache = false) {
61
+ return getTTSResponse(this.backendUrl, { input: text, voice, speed, language, cache }, this.token);
59
62
  });
60
63
  }
61
64
  /**
@@ -73,19 +76,20 @@ export class AIModule {
73
76
  * @param request The object generation request.
74
77
  * @returns The generated object.
75
78
  */
76
- getObject(request) {
77
- return __awaiter(this, void 0, void 0, function* () {
78
- return generateObject(this.backendUrl, request, this.token);
79
+ getObject(request_1) {
80
+ return __awaiter(this, arguments, void 0, function* (request, cache = false) {
81
+ return generateObject(this.backendUrl, request, this.token, cache);
79
82
  });
80
83
  }
81
84
  /**
82
85
  * Generate a streamed structured object from a request using AI.
83
86
  * @param request The object generation request.
84
87
  * @param onResult Callback for each result chunk.
88
+ * @param cache Whether to cache the result (default: false).
85
89
  */
86
- getStreamedObject(request, onResult) {
87
- return __awaiter(this, void 0, void 0, function* () {
88
- return streamObject(this.backendUrl, request, onResult, this.token);
90
+ getStreamedObject(request_1, onResult_1) {
91
+ return __awaiter(this, arguments, void 0, function* (request, onResult, cache = false) {
92
+ return streamObject(this.backendUrl, request, onResult, this.token, cache);
89
93
  });
90
94
  }
91
95
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rimori/client",
3
- "version": "2.5.10",
3
+ "version": "2.5.11",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "repository": {