@lobehub/chat 0.149.2 → 0.149.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/locales/ar/setting.json +1 -0
- package/locales/bg-BG/setting.json +1 -0
- package/locales/de-DE/setting.json +1 -0
- package/locales/en-US/setting.json +1 -0
- package/locales/es-ES/setting.json +1 -0
- package/locales/fr-FR/setting.json +1 -0
- package/locales/it-IT/setting.json +1 -0
- package/locales/ja-JP/setting.json +1 -0
- package/locales/ko-KR/setting.json +1 -0
- package/locales/nl-NL/setting.json +1 -0
- package/locales/pl-PL/setting.json +1 -0
- package/locales/pt-BR/setting.json +1 -0
- package/locales/ru-RU/setting.json +1 -0
- package/locales/tr-TR/setting.json +1 -0
- package/locales/vi-VN/setting.json +1 -0
- package/locales/zh-CN/setting.json +2 -1
- package/locales/zh-TW/setting.json +1 -0
- package/package.json +1 -1
- package/src/app/api/chat/google/route.test.ts +1 -2
- package/src/app/api/chat/google/route.ts +1 -18
- package/src/app/settings/common/Common.tsx +1 -0
- package/src/libs/agent-runtime/anthropic/index.test.ts +44 -32
- package/src/libs/agent-runtime/anthropic/index.ts +12 -9
- package/src/libs/agent-runtime/azureOpenai/index.ts +3 -4
- package/src/libs/agent-runtime/bedrock/index.ts +1 -1
- package/src/libs/agent-runtime/ollama/index.ts +7 -0
- package/src/libs/agent-runtime/perplexity/index.ts +1 -0
- package/src/libs/agent-runtime/types/chat.ts +2 -1
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +1 -0
- package/src/locales/default/setting.ts +2 -1
- package/src/services/chat.ts +16 -13
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,56 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 0.149.4](https://github.com/lobehub/lobe-chat/compare/v0.149.3...v0.149.4)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-04-25**</sup>
|
|
8
|
+
|
|
9
|
+
#### 🐛 Bug Fixes
|
|
10
|
+
|
|
11
|
+
- **misc**: Fix chat client request not support abort.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### What's fixed
|
|
19
|
+
|
|
20
|
+
- **misc**: Fix chat client request not support abort, closes [#2193](https://github.com/lobehub/lobe-chat/issues/2193) ([d22ef2c](https://github.com/lobehub/lobe-chat/commit/d22ef2c))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
30
|
+
### [Version 0.149.3](https://github.com/lobehub/lobe-chat/compare/v0.149.2...v0.149.3)
|
|
31
|
+
|
|
32
|
+
<sup>Released on **2024-04-25**</sup>
|
|
33
|
+
|
|
34
|
+
#### 💄 Styles
|
|
35
|
+
|
|
36
|
+
- **misc**: Add displaying the message "Reset Successfully.".
|
|
37
|
+
|
|
38
|
+
<br/>
|
|
39
|
+
|
|
40
|
+
<details>
|
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
42
|
+
|
|
43
|
+
#### Styles
|
|
44
|
+
|
|
45
|
+
- **misc**: Add displaying the message "Reset Successfully.", closes [#2158](https://github.com/lobehub/lobe-chat/issues/2158) ([27913ef](https://github.com/lobehub/lobe-chat/commit/27913ef))
|
|
46
|
+
|
|
47
|
+
</details>
|
|
48
|
+
|
|
49
|
+
<div align="right">
|
|
50
|
+
|
|
51
|
+
[](#readme-top)
|
|
52
|
+
|
|
53
|
+
</div>
|
|
54
|
+
|
|
5
55
|
### [Version 0.149.2](https://github.com/lobehub/lobe-chat/compare/v0.149.1...v0.149.2)
|
|
6
56
|
|
|
7
57
|
<sup>Released on **2024-04-24**</sup>
|
package/locales/ar/setting.json
CHANGED
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Потвърдете нулирането на всички настройки?",
|
|
23
23
|
"currentVersion": "Текуща версия",
|
|
24
24
|
"desc": "Нулирайте всички настройки до стойностите по подразбиране",
|
|
25
|
+
"success": "Всички настройки са нулирани успешно",
|
|
25
26
|
"title": "Нулиране на всички настройки"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Alle Einstellungen zurücksetzen?",
|
|
23
23
|
"currentVersion": "Aktuelle Version",
|
|
24
24
|
"desc": "Alle Einstellungen auf Standardwerte zurücksetzen",
|
|
25
|
+
"success": "Alle Einstellungen wurden zurückgesetzt",
|
|
25
26
|
"title": "Alle Einstellungen zurücksetzen"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "¿Confirmar el restablecimiento de todas las configuraciones?",
|
|
23
23
|
"currentVersion": "Versión actual",
|
|
24
24
|
"desc": "Restablecer todas las opciones de configuración a sus valores predeterminados",
|
|
25
|
+
"success": "Se han restablecido todas las configuraciones",
|
|
25
26
|
"title": "Restablecer todas las configuraciones"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Confirmer la réinitialisation de tous les paramètres ?",
|
|
23
23
|
"currentVersion": "Version actuelle",
|
|
24
24
|
"desc": "Réinitialiser tous les paramètres aux valeurs par défaut",
|
|
25
|
+
"success": "Toutes les configurations ont été réinitialisées avec succès",
|
|
25
26
|
"title": "Réinitialiser tous les paramètres"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Confermi di ripristinare tutte le impostazioni?",
|
|
23
23
|
"currentVersion": "Versione corrente",
|
|
24
24
|
"desc": "Ripristina tutte le impostazioni ai valori predefiniti",
|
|
25
|
+
"success": "Tutte le impostazioni sono state ripristinate con successo",
|
|
25
26
|
"title": "Ripristina tutte le impostazioni"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Alle instellingen resetten bevestigen?",
|
|
23
23
|
"currentVersion": "Huidige versie",
|
|
24
24
|
"desc": "Alle instellingen worden teruggezet naar de standaardwaarden",
|
|
25
|
+
"success": "Alle instellingen zijn succesvol gereset",
|
|
25
26
|
"title": "Alle instellingen resetten"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Potwierdź zresetowanie wszystkich ustawień?",
|
|
23
23
|
"currentVersion": "Aktualna wersja",
|
|
24
24
|
"desc": "Zresetuj wszystkie ustawienia do wartości domyślnych",
|
|
25
|
+
"success": "Wszystkie ustawienia zostały zresetowane",
|
|
25
26
|
"title": "Zresetuj wszystkie ustawienia"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Confirmar a redefinição de todas as configurações?",
|
|
23
23
|
"currentVersion": "Versão Atual",
|
|
24
24
|
"desc": "Redefinir todas as configurações para os valores padrão",
|
|
25
|
+
"success": "Todas as configurações foram redefinidas com sucesso",
|
|
25
26
|
"title": "Redefinir Todas as Configurações"
|
|
26
27
|
}
|
|
27
28
|
},
|
|
@@ -22,6 +22,7 @@
|
|
|
22
22
|
"confirm": "Вы уверены, что хотите сбросить все настройки?",
|
|
23
23
|
"currentVersion": "Текущая версия",
|
|
24
24
|
"desc": "Сброс всех параметров настройки до значений по умолчанию",
|
|
25
|
+
"success": "Все настройки были успешно сброшены",
|
|
25
26
|
"title": "Сброс всех настроек"
|
|
26
27
|
}
|
|
27
28
|
},
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "0.149.
|
|
3
|
+
"version": "0.149.4",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -15,8 +15,7 @@ describe('Configuration tests', () => {
|
|
|
15
15
|
});
|
|
16
16
|
|
|
17
17
|
it('should contain specific regions in preferredRegion', () => {
|
|
18
|
-
expect(preferredRegion).not.contain(['
|
|
19
|
-
expect(preferredRegion).not.contain(['dub1']);
|
|
18
|
+
expect(preferredRegion).not.contain(['hk1']);
|
|
20
19
|
});
|
|
21
20
|
});
|
|
22
21
|
|
|
@@ -15,23 +15,6 @@ export const runtime = 'edge';
|
|
|
15
15
|
|
|
16
16
|
// due to Gemini-1.5-pro is not available in Hong Kong and Ireland, we need to set the preferred region to exclude "Hong Kong" or "Ireland".
|
|
17
17
|
// refs: https://github.com/lobehub/lobe-chat/pull/2149
|
|
18
|
-
export const preferredRegion = [
|
|
19
|
-
'icn1',
|
|
20
|
-
'sin1',
|
|
21
|
-
'hnd1',
|
|
22
|
-
'kix1',
|
|
23
|
-
'bom1',
|
|
24
|
-
'cdg1',
|
|
25
|
-
'lhr1',
|
|
26
|
-
'cpt1',
|
|
27
|
-
'pdx1',
|
|
28
|
-
'arn1',
|
|
29
|
-
'cle1',
|
|
30
|
-
'syd1',
|
|
31
|
-
'iad1',
|
|
32
|
-
'fra1',
|
|
33
|
-
'sfo1',
|
|
34
|
-
'gru1'
|
|
35
|
-
];
|
|
18
|
+
export const preferredRegion = ['cle1', 'iad1', 'pdx1', 'sfo1'];
|
|
36
19
|
|
|
37
20
|
export const POST = async (req: Request) => UniverseRoute(req, { params: { provider: 'google' } });
|
|
@@ -72,14 +72,17 @@ describe('LobeAnthropicAI', () => {
|
|
|
72
72
|
});
|
|
73
73
|
|
|
74
74
|
// Assert
|
|
75
|
-
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
75
|
+
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
76
|
+
{
|
|
77
|
+
max_tokens: 4096,
|
|
78
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
79
|
+
model: 'claude-3-haiku-20240307',
|
|
80
|
+
stream: true,
|
|
81
|
+
temperature: 0,
|
|
82
|
+
top_p: 1,
|
|
83
|
+
},
|
|
84
|
+
{},
|
|
85
|
+
);
|
|
83
86
|
expect(result).toBeInstanceOf(Response);
|
|
84
87
|
});
|
|
85
88
|
|
|
@@ -105,14 +108,17 @@ describe('LobeAnthropicAI', () => {
|
|
|
105
108
|
});
|
|
106
109
|
|
|
107
110
|
// Assert
|
|
108
|
-
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
111
|
+
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
112
|
+
{
|
|
113
|
+
max_tokens: 4096,
|
|
114
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
115
|
+
model: 'claude-3-haiku-20240307',
|
|
116
|
+
stream: true,
|
|
117
|
+
system: 'You are an awesome greeter',
|
|
118
|
+
temperature: 0,
|
|
119
|
+
},
|
|
120
|
+
{},
|
|
121
|
+
);
|
|
116
122
|
expect(result).toBeInstanceOf(Response);
|
|
117
123
|
});
|
|
118
124
|
|
|
@@ -137,14 +143,17 @@ describe('LobeAnthropicAI', () => {
|
|
|
137
143
|
});
|
|
138
144
|
|
|
139
145
|
// Assert
|
|
140
|
-
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
146
|
+
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
147
|
+
{
|
|
148
|
+
max_tokens: 2048,
|
|
149
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
150
|
+
model: 'claude-3-haiku-20240307',
|
|
151
|
+
stream: true,
|
|
152
|
+
temperature: 0.5,
|
|
153
|
+
top_p: 1,
|
|
154
|
+
},
|
|
155
|
+
{},
|
|
156
|
+
);
|
|
148
157
|
expect(result).toBeInstanceOf(Response);
|
|
149
158
|
});
|
|
150
159
|
|
|
@@ -171,14 +180,17 @@ describe('LobeAnthropicAI', () => {
|
|
|
171
180
|
});
|
|
172
181
|
|
|
173
182
|
// Assert
|
|
174
|
-
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
183
|
+
expect(instance['client'].messages.create).toHaveBeenCalledWith(
|
|
184
|
+
{
|
|
185
|
+
max_tokens: 2048,
|
|
186
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
187
|
+
model: 'claude-3-haiku-20240307',
|
|
188
|
+
stream: true,
|
|
189
|
+
temperature: 0.5,
|
|
190
|
+
top_p: 1,
|
|
191
|
+
},
|
|
192
|
+
{},
|
|
193
|
+
);
|
|
182
194
|
expect(result).toBeInstanceOf(Response);
|
|
183
195
|
});
|
|
184
196
|
|
|
@@ -32,15 +32,18 @@ export class LobeAnthropicAI implements LobeRuntimeAI {
|
|
|
32
32
|
const user_messages = messages.filter((m) => m.role !== 'system');
|
|
33
33
|
|
|
34
34
|
try {
|
|
35
|
-
const response = await this.client.messages.create(
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
35
|
+
const response = await this.client.messages.create(
|
|
36
|
+
{
|
|
37
|
+
max_tokens: max_tokens || 4096,
|
|
38
|
+
messages: buildAnthropicMessages(user_messages),
|
|
39
|
+
model: model,
|
|
40
|
+
stream: true,
|
|
41
|
+
system: system_message?.content as string,
|
|
42
|
+
temperature: temperature,
|
|
43
|
+
top_p: top_p,
|
|
44
|
+
},
|
|
45
|
+
{ signal: options?.signal },
|
|
46
|
+
);
|
|
44
47
|
|
|
45
48
|
const [prod, debug] = response.tee();
|
|
46
49
|
|
|
@@ -8,7 +8,7 @@ import { OpenAIStream, StreamingTextResponse } from 'ai';
|
|
|
8
8
|
|
|
9
9
|
import { LobeRuntimeAI } from '../BaseAI';
|
|
10
10
|
import { AgentRuntimeErrorType } from '../error';
|
|
11
|
-
import { ChatStreamPayload, ModelProvider } from '../types';
|
|
11
|
+
import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
|
|
12
12
|
import { AgentRuntimeError } from '../utils/createError';
|
|
13
13
|
import { debugStream } from '../utils/debugStream';
|
|
14
14
|
|
|
@@ -26,7 +26,7 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
|
26
26
|
|
|
27
27
|
baseURL: string;
|
|
28
28
|
|
|
29
|
-
async chat(payload: ChatStreamPayload) {
|
|
29
|
+
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
30
30
|
// ============ 1. preprocess messages ============ //
|
|
31
31
|
const { messages, model, ...params } = payload;
|
|
32
32
|
|
|
@@ -36,10 +36,9 @@ export class LobeAzureOpenAI implements LobeRuntimeAI {
|
|
|
36
36
|
const response = await this.client.streamChatCompletions(
|
|
37
37
|
model,
|
|
38
38
|
messages as ChatRequestMessage[],
|
|
39
|
-
params as GetChatCompletionsOptions,
|
|
39
|
+
{ ...params, abortSignal: options?.signal } as GetChatCompletionsOptions,
|
|
40
40
|
);
|
|
41
41
|
|
|
42
|
-
// TODO: we need to refactor this part in the future
|
|
43
42
|
const stream = OpenAIStream(response as any);
|
|
44
43
|
|
|
45
44
|
const [debug, prod] = stream.tee();
|
|
@@ -68,7 +68,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
|
|
|
68
68
|
|
|
69
69
|
try {
|
|
70
70
|
// Ask Claude for a streaming chat completion given the prompt
|
|
71
|
-
const bedrockResponse = await this.client.send(command);
|
|
71
|
+
const bedrockResponse = await this.client.send(command, { abortSignal: options?.signal });
|
|
72
72
|
|
|
73
73
|
// Convert the response into a friendly text-stream
|
|
74
74
|
const stream = AWSBedrockStream(
|
|
@@ -31,6 +31,13 @@ export class LobeOllamaAI implements LobeRuntimeAI {
|
|
|
31
31
|
|
|
32
32
|
async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
|
|
33
33
|
try {
|
|
34
|
+
const abort = () => {
|
|
35
|
+
this.client.abort();
|
|
36
|
+
options?.signal?.removeEventListener('abort', abort);
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
options?.signal?.addEventListener('abort', abort);
|
|
40
|
+
|
|
34
41
|
const response = await this.client.chat({
|
|
35
42
|
messages: this.buildOllamaMessages(payload.messages),
|
|
36
43
|
model: payload.model,
|
|
@@ -33,6 +33,7 @@ export class LobePerplexityAI implements LobeRuntimeAI {
|
|
|
33
33
|
};
|
|
34
34
|
const response = await this.client.chat.completions.create(
|
|
35
35
|
chatPayload as unknown as OpenAI.ChatCompletionCreateParamsStreaming,
|
|
36
|
+
{ signal: options?.signal },
|
|
36
37
|
);
|
|
37
38
|
const [prod, debug] = response.tee();
|
|
38
39
|
|
|
@@ -90,8 +90,9 @@ export interface ChatStreamPayload {
|
|
|
90
90
|
}
|
|
91
91
|
|
|
92
92
|
export interface ChatCompetitionOptions {
|
|
93
|
-
callback
|
|
93
|
+
callback?: ChatStreamCallbacks;
|
|
94
94
|
headers?: Record<string, any>;
|
|
95
|
+
signal?: AbortSignal;
|
|
95
96
|
}
|
|
96
97
|
|
|
97
98
|
export interface ChatCompletionFunctions {
|
|
@@ -76,6 +76,7 @@ export const LobeOpenAICompatibleFactory = ({
|
|
|
76
76
|
const response = await this.client.chat.completions.create(postPayload, {
|
|
77
77
|
// https://github.com/lobehub/lobe-chat/pull/318
|
|
78
78
|
headers: { Accept: '*/*' },
|
|
79
|
+
signal: options?.signal,
|
|
79
80
|
});
|
|
80
81
|
|
|
81
82
|
const [prod, useForDebug] = response.tee();
|
package/src/services/chat.ts
CHANGED
|
@@ -168,18 +168,6 @@ export function initializeWithClientStore(provider: string, payload: any) {
|
|
|
168
168
|
});
|
|
169
169
|
}
|
|
170
170
|
|
|
171
|
-
/**
|
|
172
|
-
* Fetch chat completion on the client side.
|
|
173
|
-
* @param provider - The provider name.
|
|
174
|
-
* @param payload - The payload data for the chat stream.
|
|
175
|
-
* @returns A promise that resolves to the chat response.
|
|
176
|
-
*/
|
|
177
|
-
export async function fetchOnClient(provider: string, payload: Partial<ChatStreamPayload>) {
|
|
178
|
-
const agentRuntime = await initializeWithClientStore(provider, payload);
|
|
179
|
-
const data = payload as ChatStreamPayload;
|
|
180
|
-
return await agentRuntime.chat(data);
|
|
181
|
-
}
|
|
182
|
-
|
|
183
171
|
class ChatService {
|
|
184
172
|
createAssistantMessage = async (
|
|
185
173
|
{ plugins: enabledPlugins, messages, ...params }: GetChatCompletionPayload,
|
|
@@ -279,7 +267,7 @@ class ChatService {
|
|
|
279
267
|
*/
|
|
280
268
|
if (enableFetchOnClient) {
|
|
281
269
|
try {
|
|
282
|
-
return
|
|
270
|
+
return this.fetchOnClient({ payload, provider, signal });
|
|
283
271
|
} catch (e) {
|
|
284
272
|
const {
|
|
285
273
|
errorType = ChatErrorType.BadRequest,
|
|
@@ -472,6 +460,21 @@ class ChatService {
|
|
|
472
460
|
userId: commonSelectors.userId(useGlobalStore.getState()),
|
|
473
461
|
};
|
|
474
462
|
}
|
|
463
|
+
|
|
464
|
+
/**
|
|
465
|
+
* Fetch chat completion on the client side.
|
|
466
|
+
|
|
467
|
+
*/
|
|
468
|
+
private fetchOnClient = async (params: {
|
|
469
|
+
payload: Partial<ChatStreamPayload>;
|
|
470
|
+
provider: string;
|
|
471
|
+
signal?: AbortSignal;
|
|
472
|
+
}) => {
|
|
473
|
+
const agentRuntime = await initializeWithClientStore(params.provider, params.payload);
|
|
474
|
+
const data = params.payload as ChatStreamPayload;
|
|
475
|
+
|
|
476
|
+
return agentRuntime.chat(data, { signal: params.signal });
|
|
477
|
+
};
|
|
475
478
|
}
|
|
476
479
|
|
|
477
480
|
export const chatService = new ChatService();
|