@rimori/client 2.5.12-next.1 → 2.5.12-next.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -62,14 +62,11 @@ export class RimoriCommunicationHandler {
62
62
  if (sender !== this.pluginId) {
63
63
  EventBus.emit(sender, topic, eventData, eventId);
64
64
  }
65
+ else {
66
+ console.log('[PluginController] event from self', event);
67
+ }
65
68
  }
66
69
  };
67
- // Set theme from MessageChannel query params
68
- if (!worker) {
69
- // const theme = this.queryParams['rm_theme'];
70
- // setTheme(theme);
71
- // console.log('TODO: set theme from MessageChannel query params');
72
- }
73
70
  // Forward plugin events to parent (only after MessageChannel is ready)
74
71
  EventBus.on('*', (ev) => {
75
72
  var _a;
@@ -65,17 +65,19 @@ export declare class AIModule {
65
65
  * @param messages The messages to generate text from.
66
66
  * @param tools Optional tools to use for generation.
67
67
  * @param cache Whether to cache the result (default: false).
68
+ * @param model The model to use for generation.
68
69
  * @returns The generated text.
69
70
  */
70
- getText(messages: Message[], tools?: Tool[], cache?: boolean): Promise<string>;
71
+ getText(messages: Message[], tools?: Tool[], cache?: boolean, model?: string): Promise<string>;
71
72
  /**
72
73
  * Stream text generation from messages using AI.
73
74
  * @param messages The messages to generate text from.
74
75
  * @param onMessage Callback for each message chunk.
75
76
  * @param tools Optional tools to use for generation.
76
77
  * @param cache Whether to cache the result (default: false).
78
+ * @param model The model to use for generation.
77
79
  */
78
- getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[], cache?: boolean): Promise<void>;
80
+ getSteamedText(messages: Message[], onMessage: OnLLMResponse, tools?: Tool[], cache?: boolean, model?: string): Promise<void>;
79
81
  /**
80
82
  * Generate voice audio from text using AI.
81
83
  * @param text The text to convert to voice.
@@ -96,6 +98,12 @@ export declare class AIModule {
96
98
  /**
97
99
  * Generate a structured object from a request using AI.
98
100
  * @param request The object generation request.
101
+ * @param request.systemPrompt The system prompt to use for generation.
102
+ * @param request.responseSchema The response schema to use for generation.
103
+ * @param request.userPrompt The user prompt to use for generation.
104
+ * @param request.cache Whether to cache the result (default: false).
105
+ * @param request.tools The tools to use for generation.
106
+ * @param request.model The model to use for generation.
99
107
  * @returns The generated object.
100
108
  */
101
109
  getObject<T = any>(params: {
@@ -104,12 +112,18 @@ export declare class AIModule {
104
112
  userPrompt?: string;
105
113
  cache?: boolean;
106
114
  tools?: Tool[];
115
+ model?: string;
107
116
  }): Promise<T>;
108
117
  /**
109
118
  * Generate a streamed structured object from a request using AI.
110
119
  * @param request The object generation request.
111
- * @param onResult Callback for each result chunk.
112
- * @param cache Whether to cache the result (default: false).
120
+ * @param request.systemPrompt The system prompt to use for generation.
121
+ * @param request.responseSchema The response schema to use for generation.
122
+ * @param request.userPrompt The user prompt to use for generation.
123
+ * @param request.onResult Callback for each result chunk.
124
+ * @param request.cache Whether to cache the result (default: false).
125
+ * @param request.tools The tools to use for generation.
126
+ * @param request.model The model to use for generation.
113
127
  */
114
128
  getStreamedObject<T = any>(params: {
115
129
  systemPrompt: string;
@@ -118,6 +132,7 @@ export declare class AIModule {
118
132
  onResult: OnStreamedObjectResult<T>;
119
133
  cache?: boolean;
120
134
  tools?: Tool[];
135
+ model?: string;
121
136
  }): Promise<void>;
122
137
  private streamObject;
123
138
  private sendToolResult;
@@ -26,13 +26,15 @@ export class AIModule {
26
26
  * @param messages The messages to generate text from.
27
27
  * @param tools Optional tools to use for generation.
28
28
  * @param cache Whether to cache the result (default: false).
29
+ * @param model The model to use for generation.
29
30
  * @returns The generated text.
30
31
  */
31
32
  getText(messages_1, tools_1) {
32
- return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false) {
33
+ return __awaiter(this, arguments, void 0, function* (messages, tools, cache = false, model) {
33
34
  const { result } = yield this.streamObject({
34
35
  cache,
35
36
  tools,
37
+ model,
36
38
  messages,
37
39
  responseSchema: {
38
40
  result: {
@@ -49,13 +51,15 @@ export class AIModule {
49
51
  * @param onMessage Callback for each message chunk.
50
52
  * @param tools Optional tools to use for generation.
51
53
  * @param cache Whether to cache the result (default: false).
54
+ * @param model The model to use for generation.
52
55
  */
53
56
  getSteamedText(messages_1, onMessage_1, tools_1) {
54
- return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false) {
57
+ return __awaiter(this, arguments, void 0, function* (messages, onMessage, tools, cache = false, model) {
55
58
  const messageId = Math.random().toString(36).substring(3);
56
59
  const { result } = yield this.streamObject({
57
60
  cache,
58
61
  tools,
62
+ model,
59
63
  messages,
60
64
  responseSchema: {
61
65
  result: {
@@ -101,40 +105,53 @@ export class AIModule {
101
105
  /**
102
106
  * Generate a structured object from a request using AI.
103
107
  * @param request The object generation request.
108
+ * @param request.systemPrompt The system prompt to use for generation.
109
+ * @param request.responseSchema The response schema to use for generation.
110
+ * @param request.userPrompt The user prompt to use for generation.
111
+ * @param request.cache Whether to cache the result (default: false).
112
+ * @param request.tools The tools to use for generation.
113
+ * @param request.model The model to use for generation.
104
114
  * @returns The generated object.
105
115
  */
106
116
  getObject(params) {
107
117
  return __awaiter(this, void 0, void 0, function* () {
108
- const { systemPrompt, responseSchema, userPrompt, cache = false, tools = [] } = params;
118
+ const { systemPrompt, responseSchema, userPrompt, cache = false, tools = [], model = undefined } = params;
109
119
  return yield this.streamObject({
110
120
  responseSchema,
111
121
  messages: this.getChatMessage(systemPrompt, userPrompt),
112
122
  cache,
113
123
  tools,
124
+ model,
114
125
  });
115
126
  });
116
127
  }
117
128
  /**
118
129
  * Generate a streamed structured object from a request using AI.
119
130
  * @param request The object generation request.
120
- * @param onResult Callback for each result chunk.
121
- * @param cache Whether to cache the result (default: false).
131
+ * @param request.systemPrompt The system prompt to use for generation.
132
+ * @param request.responseSchema The response schema to use for generation.
133
+ * @param request.userPrompt The user prompt to use for generation.
134
+ * @param request.onResult Callback for each result chunk.
135
+ * @param request.cache Whether to cache the result (default: false).
136
+ * @param request.tools The tools to use for generation.
137
+ * @param request.model The model to use for generation.
122
138
  */
123
139
  getStreamedObject(params) {
124
140
  return __awaiter(this, void 0, void 0, function* () {
125
- const { systemPrompt, responseSchema, userPrompt, onResult, cache = false, tools = [] } = params;
141
+ const { systemPrompt, responseSchema, userPrompt, onResult, cache = false, tools = [], model = undefined } = params;
126
142
  yield this.streamObject({
127
143
  responseSchema,
128
144
  messages: this.getChatMessage(systemPrompt, userPrompt),
129
145
  onResult,
130
146
  cache,
131
147
  tools,
148
+ model,
132
149
  });
133
150
  });
134
151
  }
135
152
  streamObject(params) {
136
153
  return __awaiter(this, void 0, void 0, function* () {
137
- const { messages, responseSchema, onResult = () => null, cache = false, tools = [] } = params;
154
+ const { messages, responseSchema, onResult = () => null, cache = false, tools = [], model = undefined } = params;
138
155
  const chatMessages = messages.map((message, index) => (Object.assign(Object.assign({}, message), { id: `${index + 1}` })));
139
156
  const response = yield fetch(`${this.backendUrl}/ai/llm`, {
140
157
  body: JSON.stringify({
@@ -143,6 +160,7 @@ export class AIModule {
143
160
  stream: true,
144
161
  responseSchema,
145
162
  messages: chatMessages,
163
+ model,
146
164
  }),
147
165
  method: 'POST',
148
166
  headers: { Authorization: `Bearer ${this.token}`, 'Content-Type': 'application/json' },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@rimori/client",
3
- "version": "2.5.12-next.1",
3
+ "version": "2.5.12-next.3",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "repository": {