@rimori/client 2.5.12 → 2.5.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -101,8 +101,13 @@ export interface DbSharedContentTableDefinition {
101
101
  description: string;
102
102
  /** AI prompt for generating content. Supports placeholders like {{topic}}, {{level}}, etc. */
103
103
  instructions: string;
104
- /** Optional AI prompt to verify content quality and set content_status to 'community' */
105
- verification_prompt: string;
104
+ /** Verification settings for the content. */
105
+ verification: {
106
+ /** AI prompt to verify content quality. Supports placeholders like {{topic}}, {{level}}, etc. */
107
+ prompt: string;
108
+ /** Whether to automatically verify the content. If true, the content will be verified automatically when it is inserted and shared with the community. */
109
+ auto_verify: boolean;
110
+ };
106
111
  /** Column definitions for the table (excluding auto-generated columns) */
107
112
  columns: {
108
113
  [column_name: string]: DbColumnDefinition & {
@@ -62,14 +62,11 @@ export class RimoriCommunicationHandler {
62
62
  if (sender !== this.pluginId) {
63
63
  EventBus.emit(sender, topic, eventData, eventId);
64
64
  }
65
+ else {
66
+ console.log('[PluginController] event from self', event);
67
+ }
65
68
  }
66
69
  };
67
- // Set theme from MessageChannel query params
68
- if (!worker) {
69
- // const theme = this.queryParams['rm_theme'];
70
- // setTheme(theme);
71
- // console.log('TODO: set theme from MessageChannel query params');
72
- }
73
70
  // Forward plugin events to parent (only after MessageChannel is ready)
74
71
  EventBus.on('*', (ev) => {
75
72
  var _a;
@@ -1,5 +1,6 @@
1
1
  import { Tool } from '../../fromRimori/PluginTypes';
2
2
  import { RimoriCommunicationHandler, RimoriInfo } from '../CommunicationHandler';
3
+ import { Language } from '../../controller/SettingsController';
3
4
  export type OnStreamedObjectResult<T = any> = (result: T, isLoading: boolean) => void;
4
5
  type PrimitiveType = 'string' | 'number' | 'boolean';
5
6
  type ObjectToolParameterType = PrimitiveType | {
@@ -65,17 +66,19 @@ export declare class AIModule {
65
66
  * @param messages The messages to generate text from.
66
67
  * @param tools Optional tools to use for generation.
67
68
  * @param cache Whether to cache the result (default: false).
69
+ * @param model The model to use for generation.
68
70
  * @returns The generated text.
69
71
  */
70
- getText(messages: Message[], tools?: Tool[], cache?: boolean): Promise<string>;
72
+ getText(messages: Message[], tools?: Tool[], cache?: boolean, model?: string): Promise<string>;
71
73
  /**
72
74
  * Stream text generation from messages using AI.
73
75
  * @param messages The messages to generate text from.
74
76
  * @param onMessage Callback for each message chunk.
75
77
  * @param tools Optional tools to use for generation.
76
78
  * @param cache Whether to cache the result (default: false).
79
+ * @param model The model to use for generation.
77
80
  */
78
- getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[], cache?: boolean): Promise<void>;
81
+ getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[], cache?: boolean, model?: string): Promise<void>;
79
82
  /**
80
83
  * Generate voice audio from text using AI.
81
84
  * @param text The text to convert to voice.
@@ -89,13 +92,20 @@ export declare class AIModule {
89
92
  /**
90
93
  * Convert voice audio to text using AI.
91
94
  * @param file The audio file to convert.
95
+ * @param language Optional language for the voice.
92
96
  * @returns The transcribed text.
93
97
  */
94
- getTextFromVoice(file: Blob): Promise<string>;
98
+ getTextFromVoice(file: Blob, language?: Language): Promise<string>;
95
99
  private getChatMessage;
96
100
  /**
97
101
  * Generate a structured object from a request using AI.
98
102
  * @param request The object generation request.
103
+ * @param request.systemPrompt The system prompt to use for generation.
104
+ * @param request.responseSchema The response schema to use for generation.
105
+ * @param request.userPrompt The user prompt to use for generation.
106
+ * @param request.cache Whether to cache the result (default: false).
107
+ * @param request.tools The tools to use for generation.
108
+ * @param request.model The model to use for generation.
99
109
  * @returns The generated object.
100
110
  */
101
111
  getObject<T = any>(params: {
@@ -104,12 +114,18 @@ export declare class AIModule {
104
114
  userPrompt?: string;
105
115
  cache?: boolean;
106
116
  tools?: Tool[];
117
+ model?: string;
107
118
  }): Promise<T>;
108
119
  /**
109
120
  * Generate a streamed structured object from a request using AI.
110
121
  * @param request The object generation request.
111
- * @param onResult Callback for each result chunk.
112
- * @param cache Whether to cache the result (default: false).
122
+ * @param request.systemPrompt The system prompt to use for generation.
123
+ * @param request.responseSchema The response schema to use for generation.
124
+ * @param request.userPrompt The user prompt to use for generation.
125
+ * @param request.onResult Callback for each result chunk.
126
+ * @param request.cache Whether to cache the result (default: false).
127
+ * @param request.tools The tools to use for generation.
128
+ * @param request.model The model to use for generation.
113
129
  */
114
130
  getStreamedObject<T = any>(params: {
115
131
  systemPrompt: string;
@@ -118,6 +134,7 @@ export declare class AIModule {
118
134
  onResult: OnStreamedObjectResult<T>;
119
135
  cache?: boolean;
120
136
  tools?: Tool[];
137
+ model?: string;
121
138
  }): Promise<void>;
122
139
  private streamObject;
123
140
  private sendToolResult;
@@ -7,7 +7,6 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
7
7
  step((generator = generator.apply(thisArg, _arguments || [])).next());
8
8
  });
9
9
  };
10
- import { getSTTResponse, getTTSResponse } from '../../controller/VoiceController';
11
10
  /**
12
11
  * Controller for AI-related operations.
13
12
  * Provides access to text generation, voice synthesis, and object generation.
@@ -26,13 +25,15 @@ export class AIModule {
26
25
  * @param messages The messages to generate text from.
27
26
  * @param tools Optional tools to use for generation.
28
27
  * @param cache Whether to cache the result (default: false).
28
+ * @param model The model to use for generation.
29
29
  * @returns The generated text.
30
30
  */
31
31
  getText(messages_1, tools_1) {
32
- return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false) {
32
+ return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false, model) {
33
33
  const { result } = yield this.streamObject({
34
34
  cache,
35
35
  tools,
36
+ model,
36
37
  messages,
37
38
  responseSchema: {
38
39
  result: {
@@ -49,13 +50,15 @@ export class AIModule {
49
50
  * @param onMessage Callback for each message chunk.
50
51
  * @param tools Optional tools to use for generation.
51
52
  * @param cache Whether to cache the result (default: false).
53
+ * @param model The model to use for generation.
52
54
  */
53
55
  getSteamedText(messages_1, onMessage_1, tools_1) {
54
- return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false) {
56
+ return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false, model) {
55
57
  const messageId = Math.random().toString(36).substring(3);
56
58
  const { result } = yield this.streamObject({
57
59
  cache,
58
60
  tools,
61
+ model,
59
62
  messages,
60
63
  responseSchema: {
61
64
  result: {
@@ -78,17 +81,39 @@ export class AIModule {
78
81
  */
79
82
  getVoice(text_1) {
80
83
  return __awaiter(this, arguments, void 0, function* (text, voice = 'alloy', speed = 1, language, cache = false) {
81
- return getTTSResponse(this.backendUrl, { input: text, voice, speed, language, cache }, this.token);
84
+ return yield fetch(`${this.backendUrl}/voice/tts`, {
85
+ method: 'POST',
86
+ headers: {
87
+ 'Content-Type': 'application/json',
88
+ Authorization: `Bearer ${this.token}`,
89
+ },
90
+ body: JSON.stringify({ input: text, voice, speed, language, cache }),
91
+ }).then((r) => r.blob());
82
92
  });
83
93
  }
84
94
  /**
85
95
  * Convert voice audio to text using AI.
86
96
  * @param file The audio file to convert.
97
+ * @param language Optional language for the voice.
87
98
  * @returns The transcribed text.
88
99
  */
89
- getTextFromVoice(file) {
100
+ getTextFromVoice(file, language) {
90
101
  return __awaiter(this, void 0, void 0, function* () {
91
- return getSTTResponse(this.backendUrl, file, this.token);
102
+ const formData = new FormData();
103
+ formData.append('file', file);
104
+ if (language) {
105
+ formData.append('language', language.code);
106
+ }
107
+ return yield fetch(`${this.backendUrl}/voice/stt`, {
108
+ method: 'POST',
109
+ headers: { Authorization: `Bearer ${this.token}` },
110
+ body: formData,
111
+ })
112
+ .then((r) => r.json())
113
+ .then((r) => {
114
+ // console.log("STT response: ", r);
115
+ return r.text;
116
+ });
92
117
  });
93
118
  }
94
119
  getChatMessage(systemPrompt, userPrompt) {
@@ -101,40 +126,53 @@ export class AIModule {
101
126
  /**
102
127
  * Generate a structured object from a request using AI.
103
128
  * @param request The object generation request.
129
+ * @param request.systemPrompt The system prompt to use for generation.
130
+ * @param request.responseSchema The response schema to use for generation.
131
+ * @param request.userPrompt The user prompt to use for generation.
132
+ * @param request.cache Whether to cache the result (default: false).
133
+ * @param request.tools The tools to use for generation.
134
+ * @param request.model The model to use for generation.
104
135
  * @returns The generated object.
105
136
  */
106
137
  getObject(params) {
107
138
  return __awaiter(this, void 0, void 0, function* () {
108
- const { systemPrompt, responseSchema, userPrompt, cache = false, tools = [] } = params;
139
+ const { systemPrompt, responseSchema, userPrompt, cache = false, tools = [], model = undefined } = params;
109
140
  return yield this.streamObject({
110
141
  responseSchema,
111
142
  messages: this.getChatMessage(systemPrompt, userPrompt),
112
143
  cache,
113
144
  tools,
145
+ model,
114
146
  });
115
147
  });
116
148
  }
117
149
  /**
118
150
  * Generate a streamed structured object from a request using AI.
119
151
  * @param request The object generation request.
120
- * @param onResult Callback for each result chunk.
121
- * @param cache Whether to cache the result (default: false).
152
+ * @param request.systemPrompt The system prompt to use for generation.
153
+ * @param request.responseSchema The response schema to use for generation.
154
+ * @param request.userPrompt The user prompt to use for generation.
155
+ * @param request.onResult Callback for each result chunk.
156
+ * @param request.cache Whether to cache the result (default: false).
157
+ * @param request.tools The tools to use for generation.
158
+ * @param request.model The model to use for generation.
122
159
  */
123
160
  getStreamedObject(params) {
124
161
  return __awaiter(this, void 0, void 0, function* () {
125
- const { systemPrompt, responseSchema, userPrompt, onResult, cache = false, tools = [] } = params;
162
+ const { systemPrompt, responseSchema, userPrompt, onResult, cache = false, tools = [], model = undefined } = params;
126
163
  yield this.streamObject({
127
164
  responseSchema,
128
165
  messages: this.getChatMessage(systemPrompt, userPrompt),
129
166
  onResult,
130
167
  cache,
131
168
  tools,
169
+ model,
132
170
  });
133
171
  });
134
172
  }
135
173
  streamObject(params) {
136
174
  return __awaiter(this, void 0, void 0, function* () {
137
- const { messages, responseSchema, onResult = () => null, cache = false, tools = [] } = params;
175
+ const { messages, responseSchema, onResult = () => null, cache = false, tools = [], model = undefined } = params;
138
176
  const chatMessages = messages.map((message, index) => (Object.assign(Object.assign({}, message), { id: `${index + 1}` })));
139
177
  const response = yield fetch(`${this.backendUrl}/ai/llm`, {
140
178
  body: JSON.stringify({
@@ -143,6 +181,7 @@ export class AIModule {
143
181
  stream: true,
144
182
  responseSchema,
145
183
  messages: chatMessages,
184
+ model,
146
185
  }),
147
186
  method: 'POST',
148
187
  headers: { Authorization: `Bearer ${this.token}`, 'Content-Type': 'application/json' },
@@ -41,6 +41,11 @@ export declare class PluginModule {
41
41
  * @returns The user info.
42
42
  */
43
43
  getUserInfo(): UserInfo;
44
+ getGuildInfo(): {
45
+ id: string;
46
+ name: string;
47
+ description: string | null;
48
+ };
44
49
  /**
45
50
  * Register a callback to be notified when RimoriInfo is updated.
46
51
  * This is useful for reacting to changes in user info, tokens, or other rimori data.
@@ -53,6 +53,13 @@ export class PluginModule {
53
53
  getUserInfo() {
54
54
  return this.rimoriInfo.profile;
55
55
  }
56
+ getGuildInfo() {
57
+ return {
58
+ id: this.rimoriInfo.guild.id,
59
+ name: this.rimoriInfo.guild.name,
60
+ description: this.rimoriInfo.guild.description,
61
+ };
62
+ }
56
63
  /**
57
64
  * Register a callback to be notified when RimoriInfo is updated.
58
65
  * This is useful for reacting to changes in user info, tokens, or other rimori data.
@@ -135,13 +135,28 @@ export declare class SharedContentController {
135
135
  */
136
136
  create<T = any>(tableName: string, content: Omit<SharedContent<T>, 'id' | 'created_at' | 'created_by'>): Promise<SharedContent<T>>;
137
137
  /**
138
- * Update existing shared content.
138
+ * Update existing shared content via backend.
139
+ * If content was already validated (community/featured) and user is not a moderator,
140
+ * the status is reset to 'unverified'.
139
141
  * @param tableName - Name of the shared content table
140
142
  * @param contentId - ID of the content to update
141
143
  * @param updates - Updates to apply
142
144
  * @returns Updated content
143
145
  */
144
146
  update<T = any>(tableName: string, contentId: string, updates: Partial<SharedContent<T>>): Promise<SharedContent<T>>;
147
+ /**
148
+ * Request validation for shared content.
149
+ * Triggers the backend verification process to potentially upgrade content_status to 'community'.
150
+ * Only the content creator can request validation.
151
+ * @param tableName - Name of the shared content table
152
+ * @param contentId - ID of the content to validate
153
+ * @returns Validation result with new content_status
154
+ */
155
+ validate(tableName: string, contentId: string): Promise<{
156
+ success: boolean;
157
+ content_status: ContentStatus;
158
+ reason?: string;
159
+ }>;
145
160
  /**
146
161
  * Delete shared content.
147
162
  * @param tableName - Name of the shared content table
@@ -273,7 +273,9 @@ export class SharedContentController {
273
273
  });
274
274
  }
275
275
  /**
276
- * Update existing shared content.
276
+ * Update existing shared content via backend.
277
+ * If content was already validated (community/featured) and user is not a moderator,
278
+ * the status is reset to 'unverified'.
277
279
  * @param tableName - Name of the shared content table
278
280
  * @param contentId - ID of the content to update
279
281
  * @param updates - Updates to apply
@@ -281,18 +283,45 @@ export class SharedContentController {
281
283
  */
282
284
  update(tableName, contentId, updates) {
283
285
  return __awaiter(this, void 0, void 0, function* () {
284
- const fullTableName = this.getTableName(tableName);
285
- const { data, error } = yield this.supabase
286
- .from(fullTableName)
287
- .update(updates)
288
- .eq('id', contentId)
289
- .select()
290
- .single();
291
- if (error) {
292
- console.error('Error updating shared content:', error);
293
- throw new Error('Error updating shared content');
286
+ const response = yield this.rimoriClient.runtime.fetchBackend('/shared-content/update', {
287
+ method: 'POST',
288
+ headers: { 'Content-Type': 'application/json' },
289
+ body: JSON.stringify({
290
+ tableName,
291
+ contentId,
292
+ updates,
293
+ }),
294
+ });
295
+ if (!response.ok) {
296
+ console.error('Error updating shared content:', response.statusText);
297
+ throw new Error(`Failed to update shared content: ${response.statusText}`);
294
298
  }
295
- return data;
299
+ return (yield response.json());
300
+ });
301
+ }
302
+ /**
303
+ * Request validation for shared content.
304
+ * Triggers the backend verification process to potentially upgrade content_status to 'community'.
305
+ * Only the content creator can request validation.
306
+ * @param tableName - Name of the shared content table
307
+ * @param contentId - ID of the content to validate
308
+ * @returns Validation result with new content_status
309
+ */
310
+ validate(tableName, contentId) {
311
+ return __awaiter(this, void 0, void 0, function* () {
312
+ const response = yield this.rimoriClient.runtime.fetchBackend('/shared-content/validate', {
313
+ method: 'POST',
314
+ headers: { 'Content-Type': 'application/json' },
315
+ body: JSON.stringify({
316
+ tableName,
317
+ contentId,
318
+ }),
319
+ });
320
+ if (!response.ok) {
321
+ console.error('Error validating shared content:', response.statusText);
322
+ throw new Error(`Failed to validate shared content: ${response.statusText}`);
323
+ }
324
+ return yield response.json();
296
325
  });
297
326
  }
298
327
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rimori/client",
3
- "version": "2.5.12",
3
+ "version": "2.5.13",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "repository": {
@@ -1,10 +0,0 @@
1
- export declare function getSTTResponse(backendUrl: string, audio: Blob, token: string): Promise<string>;
2
- export declare function getTTSResponse(backendUrl: string, request: TTSRequest, token: string): Promise<Blob>;
3
- interface TTSRequest {
4
- input: string;
5
- voice: string;
6
- speed: number;
7
- language?: string;
8
- cache?: boolean;
9
- }
10
- export {};
@@ -1,37 +0,0 @@
1
- var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
2
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
3
- return new (P || (P = Promise))(function (resolve, reject) {
4
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
5
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
6
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
7
- step((generator = generator.apply(thisArg, _arguments || [])).next());
8
- });
9
- };
10
- export function getSTTResponse(backendUrl, audio, token) {
11
- return __awaiter(this, void 0, void 0, function* () {
12
- const formData = new FormData();
13
- formData.append('file', audio);
14
- return yield fetch(`${backendUrl}/voice/stt`, {
15
- method: 'POST',
16
- headers: { Authorization: `Bearer ${token}` },
17
- body: formData,
18
- })
19
- .then((r) => r.json())
20
- .then((r) => {
21
- // console.log("STT response: ", r);
22
- return r.text;
23
- });
24
- });
25
- }
26
- export function getTTSResponse(backendUrl, request, token) {
27
- return __awaiter(this, void 0, void 0, function* () {
28
- return yield fetch(`${backendUrl}/voice/tts`, {
29
- method: 'POST',
30
- headers: {
31
- 'Content-Type': 'application/json',
32
- Authorization: `Bearer ${token}`,
33
- },
34
- body: JSON.stringify(request),
35
- }).then((r) => r.blob());
36
- });
37
- }