@lobehub/chat 1.77.16 → 1.77.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/contributing/Basic/Architecture.md +1 -1
- package/contributing/Basic/Architecture.zh-CN.md +1 -1
- package/contributing/Basic/Chat-API.md +326 -108
- package/contributing/Basic/Chat-API.zh-CN.md +313 -133
- package/contributing/Basic/Contributing-Guidelines.md +7 -4
- package/contributing/Basic/Contributing-Guidelines.zh-CN.md +7 -6
- package/contributing/Home.md +5 -5
- package/contributing/State-Management/State-Management-Intro.md +1 -1
- package/contributing/State-Management/State-Management-Intro.zh-CN.md +1 -1
- package/docker-compose/local/docker-compose.yml +2 -1
- package/locales/ar/components.json +4 -0
- package/locales/ar/modelProvider.json +1 -0
- package/locales/ar/models.json +8 -5
- package/locales/ar/tool.json +21 -1
- package/locales/bg-BG/components.json +4 -0
- package/locales/bg-BG/modelProvider.json +1 -0
- package/locales/bg-BG/models.json +8 -5
- package/locales/bg-BG/tool.json +21 -1
- package/locales/de-DE/components.json +4 -0
- package/locales/de-DE/modelProvider.json +1 -0
- package/locales/de-DE/models.json +8 -5
- package/locales/de-DE/tool.json +21 -1
- package/locales/en-US/components.json +4 -0
- package/locales/en-US/modelProvider.json +1 -0
- package/locales/en-US/models.json +8 -5
- package/locales/en-US/tool.json +21 -1
- package/locales/es-ES/components.json +4 -0
- package/locales/es-ES/modelProvider.json +1 -0
- package/locales/es-ES/models.json +7 -4
- package/locales/es-ES/tool.json +21 -1
- package/locales/fa-IR/components.json +4 -0
- package/locales/fa-IR/modelProvider.json +1 -0
- package/locales/fa-IR/models.json +7 -4
- package/locales/fa-IR/tool.json +21 -1
- package/locales/fr-FR/components.json +4 -0
- package/locales/fr-FR/modelProvider.json +1 -0
- package/locales/fr-FR/models.json +8 -5
- package/locales/fr-FR/tool.json +21 -1
- package/locales/it-IT/components.json +4 -0
- package/locales/it-IT/modelProvider.json +1 -0
- package/locales/it-IT/models.json +7 -4
- package/locales/it-IT/tool.json +21 -1
- package/locales/ja-JP/components.json +4 -0
- package/locales/ja-JP/modelProvider.json +1 -0
- package/locales/ja-JP/models.json +8 -5
- package/locales/ja-JP/tool.json +21 -1
- package/locales/ko-KR/components.json +4 -0
- package/locales/ko-KR/modelProvider.json +1 -0
- package/locales/ko-KR/models.json +8 -5
- package/locales/ko-KR/tool.json +21 -1
- package/locales/nl-NL/components.json +4 -0
- package/locales/nl-NL/modelProvider.json +1 -0
- package/locales/nl-NL/models.json +8 -5
- package/locales/nl-NL/tool.json +21 -1
- package/locales/pl-PL/components.json +4 -0
- package/locales/pl-PL/modelProvider.json +1 -0
- package/locales/pl-PL/models.json +8 -5
- package/locales/pl-PL/tool.json +21 -1
- package/locales/pt-BR/components.json +4 -0
- package/locales/pt-BR/modelProvider.json +1 -0
- package/locales/pt-BR/models.json +7 -4
- package/locales/pt-BR/tool.json +21 -1
- package/locales/ru-RU/components.json +4 -0
- package/locales/ru-RU/modelProvider.json +1 -0
- package/locales/ru-RU/models.json +7 -4
- package/locales/ru-RU/tool.json +21 -1
- package/locales/tr-TR/components.json +4 -0
- package/locales/tr-TR/modelProvider.json +1 -0
- package/locales/tr-TR/models.json +8 -5
- package/locales/tr-TR/tool.json +21 -1
- package/locales/vi-VN/components.json +4 -0
- package/locales/vi-VN/modelProvider.json +1 -0
- package/locales/vi-VN/models.json +8 -5
- package/locales/vi-VN/tool.json +21 -1
- package/locales/zh-CN/components.json +4 -0
- package/locales/zh-CN/modelProvider.json +1 -0
- package/locales/zh-CN/models.json +9 -6
- package/locales/zh-CN/tool.json +30 -1
- package/locales/zh-TW/components.json +4 -0
- package/locales/zh-TW/modelProvider.json +1 -0
- package/locales/zh-TW/models.json +7 -4
- package/locales/zh-TW/tool.json +21 -1
- package/package.json +1 -1
- package/src/app/(backend)/webapi/models/[provider]/pull/route.ts +34 -0
- package/src/app/(backend)/webapi/{chat/models → models}/[provider]/route.ts +1 -2
- package/src/app/[variants]/(main)/settings/llm/ProviderList/Ollama/index.tsx +0 -7
- package/src/app/[variants]/(main)/settings/provider/(detail)/ollama/CheckError.tsx +1 -1
- package/src/components/FormAction/index.tsx +1 -1
- package/src/database/models/__tests__/aiProvider.test.ts +100 -0
- package/src/database/models/aiProvider.ts +11 -1
- package/src/features/Conversation/Error/OllamaBizError/InvalidOllamaModel.tsx +43 -0
- package/src/features/Conversation/Error/OllamaDesktopSetupGuide/index.tsx +61 -0
- package/src/features/Conversation/Error/index.tsx +7 -0
- package/src/features/DevPanel/SystemInspector/ServerConfig.tsx +18 -2
- package/src/features/DevPanel/SystemInspector/index.tsx +25 -6
- package/src/features/OllamaModelDownloader/index.tsx +149 -0
- package/src/libs/agent-runtime/AgentRuntime.ts +6 -0
- package/src/libs/agent-runtime/BaseAI.ts +7 -0
- package/src/libs/agent-runtime/ollama/index.ts +84 -2
- package/src/libs/agent-runtime/openrouter/__snapshots__/index.test.ts.snap +24 -3263
- package/src/libs/agent-runtime/openrouter/fixtures/frontendModels.json +25 -0
- package/src/libs/agent-runtime/openrouter/fixtures/models.json +0 -3353
- package/src/libs/agent-runtime/openrouter/index.test.ts +56 -1
- package/src/libs/agent-runtime/openrouter/index.ts +9 -4
- package/src/libs/agent-runtime/types/index.ts +1 -0
- package/src/libs/agent-runtime/types/model.ts +44 -0
- package/src/libs/agent-runtime/utils/streams/index.ts +1 -0
- package/src/libs/agent-runtime/utils/streams/model.ts +110 -0
- package/src/locales/default/components.ts +4 -0
- package/src/locales/default/modelProvider.ts +1 -0
- package/src/locales/default/tool.ts +30 -1
- package/src/server/modules/SearXNG.ts +10 -2
- package/src/server/routers/tools/__test__/search.test.ts +3 -1
- package/src/server/routers/tools/search.ts +10 -2
- package/src/services/__tests__/models.test.ts +21 -0
- package/src/services/_url.ts +4 -1
- package/src/services/chat.ts +1 -1
- package/src/services/models.ts +153 -7
- package/src/services/search.ts +2 -2
- package/src/store/aiInfra/slices/aiModel/action.ts +1 -1
- package/src/store/aiInfra/slices/aiProvider/action.ts +2 -1
- package/src/store/chat/slices/builtinTool/actions/searXNG.test.ts +28 -8
- package/src/store/chat/slices/builtinTool/actions/searXNG.ts +22 -5
- package/src/store/user/slices/modelList/action.test.ts +2 -2
- package/src/store/user/slices/modelList/action.ts +1 -1
- package/src/tools/web-browsing/Portal/Search/index.tsx +1 -1
- package/src/tools/web-browsing/Render/Search/SearchQuery/SearchView.tsx +1 -1
- package/src/tools/web-browsing/Render/Search/SearchQuery/index.tsx +1 -1
- package/src/tools/web-browsing/Render/Search/SearchResult/index.tsx +1 -1
- package/src/tools/web-browsing/components/CategoryAvatar.tsx +27 -0
- package/src/tools/web-browsing/components/SearchBar.tsx +84 -4
- package/src/tools/web-browsing/const.ts +26 -0
- package/src/tools/web-browsing/index.ts +58 -28
- package/src/tools/web-browsing/systemRole.ts +62 -1
- package/src/types/tool/search.ts +10 -1
- package/src/app/[variants]/(main)/settings/llm/ProviderList/Ollama/Checker.tsx +0 -73
- package/src/app/[variants]/(main)/settings/provider/(detail)/ollama/OllamaModelDownloader/index.tsx +0 -127
- package/src/features/Conversation/Error/OllamaBizError/InvalidOllamaModel/index.tsx +0 -154
- package/src/features/Conversation/Error/OllamaBizError/InvalidOllamaModel/useDownloadMonitor.ts +0 -29
- package/src/helpers/url.ts +0 -17
- package/src/services/__tests__/ollama.test.ts +0 -28
- package/src/services/ollama.ts +0 -83
- /package/src/{app/[variants]/(main)/settings/provider/(detail)/ollama → features}/OllamaModelDownloader/useDownloadMonitor.ts +0 -0
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.77.18](https://github.com/lobehub/lobe-chat/compare/v1.77.17...v1.77.18)
|
6
|
+
|
7
|
+
<sup>Released on **2025-04-09**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Add `time_range` & `categories` support for SearXNG.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Add `time_range` & `categories` support for SearXNG, closes [#6813](https://github.com/lobehub/lobe-chat/issues/6813) ([9e4cd8c](https://github.com/lobehub/lobe-chat/commit/9e4cd8c))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.77.17](https://github.com/lobehub/lobe-chat/compare/v1.77.16...v1.77.17)
|
31
|
+
|
32
|
+
<sup>Released on **2025-04-08**</sup>
|
33
|
+
|
34
|
+
#### 🐛 Bug Fixes
|
35
|
+
|
36
|
+
- **misc**: Refactor ollama pull flow and model service.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### What's fixed
|
44
|
+
|
45
|
+
- **misc**: Refactor ollama pull flow and model service, closes [#7330](https://github.com/lobehub/lobe-chat/issues/7330) ([44d63b8](https://github.com/lobehub/lobe-chat/commit/44d63b8))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.77.16](https://github.com/lobehub/lobe-chat/compare/v1.77.15...v1.77.16)
|
6
56
|
|
7
57
|
<sup>Released on **2025-04-06**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"improvements": [
|
5
|
+
"Add time_range & categories support for SearXNG."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-04-09",
|
9
|
+
"version": "1.77.18"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"fixes": [
|
14
|
+
"Refactor ollama pull flow and model service."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-04-08",
|
18
|
+
"version": "1.77.17"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"improvements": [
|
@@ -1,6 +1,6 @@
|
|
1
1
|
# Architecture Design
|
2
2
|
|
3
|
-
LobeChat is an AI
|
3
|
+
LobeChat is an AI chat application built on the Next.js framework, aiming to provide an AI productivity platform that enables users to interact with AI through natural language. The following is an overview of the architecture design of LobeChat:
|
4
4
|
|
5
5
|
#### TOC
|
6
6
|
|
@@ -1,136 +1,354 @@
|
|
1
|
-
#
|
1
|
+
# Lobe Chat API Client-Server Interaction Logic
|
2
2
|
|
3
|
-
|
3
|
+
This document explains the implementation logic of Lobe Chat API in client-server interactions, including event sequences and core components involved.
|
4
4
|
|
5
|
-
|
5
|
+
## Table of Contents
|
6
6
|
|
7
|
-
- [
|
8
|
-
|
9
|
-
|
10
|
-
- [Frontend Implementation](#frontend-implementation)
|
11
|
-
- [Frontend Integration](#frontend-integration)
|
12
|
-
- [Using Streaming to Get Results](#using-streaming-to-get-results)
|
7
|
+
- [Interaction Sequence Diagram](#interaction-sequence-diagram)
|
8
|
+
- [Main Process Steps](#main-process-steps)
|
9
|
+
- [AgentRuntime Overview](#agentruntime-overview)
|
13
10
|
|
14
|
-
##
|
11
|
+
## Interaction Sequence Diagram
|
15
12
|
|
16
|
-
|
13
|
+
```mermaid
|
14
|
+
sequenceDiagram
|
15
|
+
participant Client as Frontend Client
|
16
|
+
participant ChatService as Frontend ChatService
|
17
|
+
participant ChatAPI as Backend Chat API
|
18
|
+
participant AgentRuntime as AgentRuntime
|
19
|
+
participant ModelProvider as Model Provider API
|
20
|
+
participant PluginGateway as Plugin Gateway
|
17
21
|
|
18
|
-
|
22
|
+
Client->>ChatService: Call createAssistantMessage
|
23
|
+
Note over ChatService: Process messages, tools, and parameters
|
19
24
|
|
20
|
-
|
25
|
+
ChatService->>ChatService: Call getChatCompletion
|
26
|
+
Note over ChatService: Prepare request parameters
|
21
27
|
|
22
|
-
|
23
|
-
export const POST = async (req: Request) => {
|
24
|
-
const payload = await req.json();
|
28
|
+
ChatService->>ChatAPI: Send POST request to /webapi/chat/[provider]
|
25
29
|
|
26
|
-
|
30
|
+
ChatAPI->>AgentRuntime: Initialize AgentRuntime
|
31
|
+
Note over AgentRuntime: Create runtime with provider and user config
|
27
32
|
|
28
|
-
|
33
|
+
ChatAPI->>AgentRuntime: Call chat method
|
34
|
+
AgentRuntime->>ModelProvider: Send chat completion request
|
29
35
|
|
30
|
-
|
31
|
-
|
32
|
-
```
|
36
|
+
ModelProvider-->>AgentRuntime: Return streaming response
|
37
|
+
AgentRuntime-->>ChatAPI: Process response and return stream
|
33
38
|
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
content: m.content,
|
46
|
-
name: m.name,
|
47
|
-
role: m.role,
|
48
|
-
}));
|
49
|
-
|
50
|
-
const response = await openai.chat.completions.create(
|
51
|
-
{
|
52
|
-
messages: formatMessages,
|
53
|
-
...params,
|
54
|
-
stream: true,
|
55
|
-
},
|
56
|
-
{ headers: { Accept: '*/*' } },
|
57
|
-
);
|
58
|
-
const stream = OpenAIStream(response);
|
59
|
-
return new StreamingTextResponse(stream);
|
60
|
-
};
|
61
|
-
```
|
39
|
+
ChatAPI-->>ChatService: Stream back SSE response
|
40
|
+
|
41
|
+
ChatService->>ChatService: Handle streaming response with fetchSSE
|
42
|
+
Note over ChatService: Process event stream with fetchEventSource
|
43
|
+
|
44
|
+
loop For each data chunk
|
45
|
+
ChatService->>ChatService: Handle different event types (text, tool_calls, reasoning, etc.)
|
46
|
+
ChatService-->>Client: Return current chunk via onMessageHandle callback
|
47
|
+
end
|
48
|
+
|
49
|
+
ChatService-->>Client: Return complete result via onFinish callback
|
62
50
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
...initialLobeAgentConfig.params,
|
79
|
-
},
|
80
|
-
params,
|
81
|
-
);
|
82
|
-
|
83
|
-
const filterFunctions: ChatCompletionFunctions[] = pluginSelectors.enabledSchema(enabledPlugins)(
|
84
|
-
usePluginStore.getState(),
|
85
|
-
);
|
86
|
-
|
87
|
-
const functions = filterFunctions.length === 0 ? undefined : filterFunctions;
|
88
|
-
|
89
|
-
return fetch(OPENAI_URLS.chat, {
|
90
|
-
body: JSON.stringify({ ...payload, functions }),
|
91
|
-
headers: createHeaderWithOpenAI({ 'Content-Type': 'application/json' }),
|
92
|
-
method: 'POST',
|
93
|
-
signal: options?.signal,
|
94
|
-
});
|
95
|
-
};
|
51
|
+
Note over ChatService,ModelProvider: Plugin calling scenario
|
52
|
+
ModelProvider-->>ChatService: Return response with tool_calls
|
53
|
+
ChatService->>ChatService: Parse tool calls
|
54
|
+
ChatService->>ChatService: Call runPluginApi
|
55
|
+
ChatService->>PluginGateway: Send plugin request to gateway
|
56
|
+
PluginGateway-->>ChatService: Return plugin execution result
|
57
|
+
ChatService->>ModelProvider: Return plugin result to model
|
58
|
+
ModelProvider-->>ChatService: Generate final response based on plugin result
|
59
|
+
|
60
|
+
Note over ChatService,ModelProvider: Preset task scenario
|
61
|
+
Client->>ChatService: Trigger preset task (e.g., translation, search)
|
62
|
+
ChatService->>ChatService: Call fetchPresetTaskResult
|
63
|
+
ChatService->>ChatAPI: Send preset task request
|
64
|
+
ChatAPI-->>ChatService: Return task result
|
65
|
+
ChatService-->>Client: Return result via callback function
|
96
66
|
```
|
97
67
|
|
98
|
-
|
68
|
+
## Main Process Steps
|
99
69
|
|
100
|
-
|
70
|
+
1. **Client Initiates Request**: The client calls the createAssistantMessage method of the frontend ChatService.
|
101
71
|
|
102
|
-
|
103
|
-
export const fetchSSE = async (fetchFn: () => Promise<Response>, options: FetchSSEOptions = {}) => {
|
104
|
-
const response = await fetchFn();
|
72
|
+
2. **Frontend Processes Request**:
|
105
73
|
|
106
|
-
|
107
|
-
|
74
|
+
- `src/services/chat.ts` preprocesses messages, tools, and parameters
|
75
|
+
- Calls getChatCompletion to prepare request parameters
|
76
|
+
- Uses `src/utils/fetch/fetchSSE.ts` to send request to backend API
|
108
77
|
|
109
|
-
|
110
|
-
return;
|
111
|
-
}
|
78
|
+
3. **Backend Processes Request**:
|
112
79
|
|
113
|
-
|
80
|
+
- `src/app/(backend)/webapi/chat/[provider]/route.ts` receives the request
|
81
|
+
- Initializes AgentRuntime
|
82
|
+
- Creates the appropriate model instance based on user configuration and provider
|
114
83
|
|
115
|
-
|
84
|
+
4. **Model Call**:
|
116
85
|
|
117
|
-
|
86
|
+
- `src/libs/agent-runtime/AgentRuntime.ts` calls the respective model provider's API
|
87
|
+
- Returns streaming response
|
118
88
|
|
119
|
-
|
120
|
-
|
89
|
+
5. **Process Response**:
|
90
|
+
|
91
|
+
- Backend converts model response to Stream and returns it
|
92
|
+
- Frontend processes streaming response via fetchSSE and [fetchEventSource](https://github.com/Azure/fetch-event-source)
|
93
|
+
- Handles different types of events (text, tool calls, reasoning, etc.)
|
94
|
+
- Passes results back to client through callback functions
|
121
95
|
|
122
|
-
|
96
|
+
6. **Plugin Calling Scenario**:
|
123
97
|
|
124
|
-
|
125
|
-
const { value, done: doneReading } = await reader.read();
|
126
|
-
done = doneReading;
|
127
|
-
const chunkValue = decoder.decode(value);
|
98
|
+
When the AI model returns a `tool_calls` field in its response, it triggers the plugin calling process:
|
128
99
|
|
129
|
-
|
130
|
-
|
100
|
+
- AI model returns response containing `tool_calls`, indicating a need to call tools
|
101
|
+
- Frontend handles tool calls via the `internal_callPluginApi` method
|
102
|
+
- Calls `runPluginApi` method to execute plugin functionality, including retrieving plugin settings and manifest, creating authentication headers, and sending requests to the plugin gateway
|
103
|
+
- After plugin execution completes, the result is returned to the AI model, which generates the final response based on the result
|
131
104
|
|
132
|
-
|
133
|
-
|
134
|
-
|
105
|
+
**Real-world Examples**:
|
106
|
+
|
107
|
+
- **Search Plugin**: When a user needs real-time information, the AI calls a web search plugin to retrieve the latest data
|
108
|
+
- **DALL-E Plugin**: When a user requests image generation, the AI calls the DALL-E plugin to create images
|
109
|
+
- **Midjourney Plugin**: Provides higher quality image generation capabilities by calling the Midjourney service via API
|
110
|
+
|
111
|
+
7. **Preset Task Processing**:
|
112
|
+
|
113
|
+
Preset tasks are specific predefined functions that are typically triggered when users perform specific actions (rather than being part of the regular chat flow). These tasks use the `fetchPresetTaskResult` method, which is similar to the normal chat flow but uses specially designed prompt chains.
|
114
|
+
|
115
|
+
**Execution Timing**: Preset tasks are mainly triggered in the following scenarios:
|
116
|
+
|
117
|
+
1. **Agent Information Auto-generation**: Triggered when users create or edit an agent
|
118
|
+
|
119
|
+
- Agent avatar generation (via `autoPickEmoji` method)
|
120
|
+
- Agent description generation (via `autocompleteAgentDescription` method)
|
121
|
+
- Agent tag generation (via `autocompleteAgentTags` method)
|
122
|
+
- Agent title generation (via `autocompleteAgentTitle` method)
|
123
|
+
|
124
|
+
2. **Message Translation**: Triggered when users manually click the translate button (via `translateMessage` method)
|
125
|
+
|
126
|
+
3. **Web Search**: When search is enabled but the model doesn't support tool calling, search functionality is implemented via `fetchPresetTaskResult`
|
127
|
+
|
128
|
+
**Code Examples**:
|
129
|
+
|
130
|
+
Agent avatar auto-generation implementation:
|
135
131
|
|
136
|
-
|
132
|
+
```typescript
|
133
|
+
// src/features/AgentSetting/store/action.ts
|
134
|
+
autoPickEmoji: async () => {
|
135
|
+
const { config, meta, dispatchMeta } = get();
|
136
|
+
const systemRole = config.systemRole;
|
137
|
+
|
138
|
+
chatService.fetchPresetTaskResult({
|
139
|
+
onFinish: async (emoji) => {
|
140
|
+
dispatchMeta({ type: 'update', value: { avatar: emoji } });
|
141
|
+
},
|
142
|
+
onLoadingChange: (loading) => {
|
143
|
+
get().updateLoadingState('avatar', loading);
|
144
|
+
},
|
145
|
+
params: merge(
|
146
|
+
get().internal_getSystemAgentForMeta(),
|
147
|
+
chainPickEmoji([meta.title, meta.description, systemRole].filter(Boolean).join(',')),
|
148
|
+
),
|
149
|
+
trace: get().getCurrentTracePayload({ traceName: TraceNameMap.EmojiPicker }),
|
150
|
+
});
|
151
|
+
};
|
152
|
+
```
|
153
|
+
|
154
|
+
Translation feature implementation:
|
155
|
+
|
156
|
+
```typescript
|
157
|
+
// src/store/chat/slices/translate/action.ts
|
158
|
+
translateMessage: async (id, targetLang) => {
|
159
|
+
// ...omitted code...
|
160
|
+
|
161
|
+
// Detect language
|
162
|
+
chatService.fetchPresetTaskResult({
|
163
|
+
onFinish: async (data) => {
|
164
|
+
if (data && supportLocales.includes(data)) from = data;
|
165
|
+
await updateMessageTranslate(id, { content, from, to: targetLang });
|
166
|
+
},
|
167
|
+
params: merge(translationSetting, chainLangDetect(message.content)),
|
168
|
+
trace: get().getCurrentTracePayload({ traceName: TraceNameMap.LanguageDetect }),
|
169
|
+
});
|
170
|
+
|
171
|
+
// Perform translation
|
172
|
+
chatService.fetchPresetTaskResult({
|
173
|
+
onMessageHandle: (chunk) => {
|
174
|
+
if (chunk.type === 'text') {
|
175
|
+
content = chunk.text;
|
176
|
+
internal_dispatchMessage({
|
177
|
+
id,
|
178
|
+
type: 'updateMessageTranslate',
|
179
|
+
value: { content, from, to: targetLang },
|
180
|
+
});
|
181
|
+
}
|
182
|
+
},
|
183
|
+
onFinish: async () => {
|
184
|
+
await updateMessageTranslate(id, { content, from, to: targetLang });
|
185
|
+
internal_toggleChatLoading(false, id, n('translateMessage(end)', { id }) as string);
|
186
|
+
},
|
187
|
+
params: merge(translationSetting, chainTranslate(message.content, targetLang)),
|
188
|
+
trace: get().getCurrentTracePayload({ traceName: TraceNameMap.Translation }),
|
189
|
+
});
|
190
|
+
};
|
191
|
+
```
|
192
|
+
|
193
|
+
8. **Completion**:
|
194
|
+
- When the stream ends, the onFinish callback is called, providing the complete response result
|
195
|
+
|
196
|
+
## AgentRuntime Overview
|
197
|
+
|
198
|
+
AgentRuntime is a core abstraction layer in Lobe Chat that encapsulates a unified interface for interacting with different AI model providers. Its main responsibilities and features include:
|
199
|
+
|
200
|
+
1. **Unified Abstraction Layer**: AgentRuntime provides a unified interface that hides the implementation details and differences between various AI provider APIs (such as OpenAI, Anthropic, Bedrock, etc.).
|
201
|
+
|
202
|
+
2. **Model Initialization**: Through the static `initializeWithProvider` method, it initializes the corresponding runtime instance based on the specified provider and configuration parameters.
|
203
|
+
|
204
|
+
3. **Capability Encapsulation**:
|
205
|
+
|
206
|
+
- `chat` method: Handles chat streaming requests
|
207
|
+
- `models` method: Retrieves model lists
|
208
|
+
- Supports text embedding, text-to-image, text-to-speech, and other functionalities (if supported by the model provider)
|
209
|
+
|
210
|
+
4. **Plugin Architecture**: Through the `src/libs/agent-runtime/runtimeMap.ts` mapping table, it implements an extensible plugin architecture, making it easy to add new model providers. Currently, it supports over 40 different model providers:
|
211
|
+
|
212
|
+
```typescript
|
213
|
+
export const providerRuntimeMap = {
|
214
|
+
openai: LobeOpenAI,
|
215
|
+
anthropic: LobeAnthropicAI,
|
216
|
+
google: LobeGoogleAI,
|
217
|
+
azure: LobeAzureOpenAI,
|
218
|
+
bedrock: LobeBedrockAI,
|
219
|
+
ollama: LobeOllamaAI,
|
220
|
+
// ...over 40 other model providers
|
221
|
+
};
|
222
|
+
```
|
223
|
+
|
224
|
+
5. **Adapter Pattern**: Internally, it uses the adapter pattern to adapt different provider APIs to the unified `src/libs/agent-runtime/BaseAI.ts` interface:
|
225
|
+
|
226
|
+
```typescript
|
227
|
+
export interface LobeRuntimeAI {
|
228
|
+
baseURL?: string;
|
229
|
+
chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions): Promise<Response>;
|
230
|
+
embeddings?(payload: EmbeddingsPayload, options?: EmbeddingsOptions): Promise<Embeddings[]>;
|
231
|
+
models?(): Promise<any>;
|
232
|
+
textToImage?: (payload: TextToImagePayload) => Promise<string[]>;
|
233
|
+
textToSpeech?: (
|
234
|
+
payload: TextToSpeechPayload,
|
235
|
+
options?: TextToSpeechOptions,
|
236
|
+
) => Promise<ArrayBuffer>;
|
237
|
+
}
|
238
|
+
```
|
239
|
+
|
240
|
+
**Adapter Implementation Examples**:
|
241
|
+
|
242
|
+
1. **OpenRouter Adapter**:
|
243
|
+
OpenRouter is a unified API that allows access to AI models from multiple providers. Lobe Chat implements support for OpenRouter through an adapter:
|
244
|
+
|
245
|
+
```typescript
|
246
|
+
// OpenRouter adapter implementation
|
247
|
+
class LobeOpenRouterAI implements LobeRuntimeAI {
|
248
|
+
client: OpenAI;
|
249
|
+
baseURL: string;
|
250
|
+
|
251
|
+
constructor(options: OpenAICompatibleOptions) {
|
252
|
+
// Initialize OpenRouter client using OpenAI-compatible API
|
253
|
+
this.client = new OpenAI({
|
254
|
+
apiKey: options.apiKey,
|
255
|
+
baseURL: OPENROUTER_BASE_URL,
|
256
|
+
defaultHeaders: {
|
257
|
+
'HTTP-Referer': 'https://github.com/lobehub/lobe-chat',
|
258
|
+
'X-Title': 'LobeChat',
|
259
|
+
},
|
260
|
+
});
|
261
|
+
this.baseURL = OPENROUTER_BASE_URL;
|
262
|
+
}
|
263
|
+
|
264
|
+
// Implement chat functionality
|
265
|
+
async chat(payload: ChatCompletionCreateParamsBase, options?: RequestOptions) {
|
266
|
+
// Convert Lobe Chat request format to OpenRouter format
|
267
|
+
// Handle model mapping, message format, etc.
|
268
|
+
return this.client.chat.completions.create(
|
269
|
+
{
|
270
|
+
...payload,
|
271
|
+
model: payload.model || 'openai/gpt-4-turbo', // Default model
|
272
|
+
},
|
273
|
+
options,
|
274
|
+
);
|
275
|
+
}
|
276
|
+
|
277
|
+
// Implement other LobeRuntimeAI interface methods
|
278
|
+
}
|
279
|
+
```
|
280
|
+
|
281
|
+
2. **Google Gemini Adapter**:
|
282
|
+
Gemini is Google's large language model. Lobe Chat supports Gemini series models through a dedicated adapter:
|
283
|
+
|
284
|
+
```typescript
|
285
|
+
import { GoogleGenerativeAI } from '@google/generative-ai';
|
286
|
+
|
287
|
+
// Gemini adapter implementation
|
288
|
+
class LobeGoogleAI implements LobeRuntimeAI {
|
289
|
+
client: GoogleGenerativeAI;
|
290
|
+
baseURL: string;
|
291
|
+
apiKey: string;
|
292
|
+
|
293
|
+
constructor(options: GoogleAIOptions) {
|
294
|
+
// Initialize Google Generative AI client
|
295
|
+
this.client = new GoogleGenerativeAI(options.apiKey);
|
296
|
+
this.apiKey = options.apiKey;
|
297
|
+
this.baseURL = options.baseURL || GOOGLE_AI_BASE_URL;
|
298
|
+
}
|
299
|
+
|
300
|
+
// Implement chat functionality
|
301
|
+
async chat(payload: ChatCompletionCreateParamsBase, options?: RequestOptions) {
|
302
|
+
// Select appropriate model (supports Gemini Pro, Gemini Flash, etc.)
|
303
|
+
const modelName = payload.model || 'gemini-pro';
|
304
|
+
const model = this.client.getGenerativeModel({ model: modelName });
|
305
|
+
|
306
|
+
// Process multimodal inputs (e.g., images)
|
307
|
+
const contents = this.processMessages(payload.messages);
|
308
|
+
|
309
|
+
// Set generation parameters
|
310
|
+
const generationConfig = {
|
311
|
+
temperature: payload.temperature,
|
312
|
+
topK: payload.top_k,
|
313
|
+
topP: payload.top_p,
|
314
|
+
maxOutputTokens: payload.max_tokens,
|
315
|
+
};
|
316
|
+
|
317
|
+
// Create chat session and get response
|
318
|
+
const chat = model.startChat({
|
319
|
+
generationConfig,
|
320
|
+
history: contents.slice(0, -1),
|
321
|
+
safetySettings: this.getSafetySettings(payload),
|
322
|
+
});
|
323
|
+
|
324
|
+
// Handle streaming response
|
325
|
+
return this.handleStreamResponse(chat, contents, options?.signal);
|
326
|
+
}
|
327
|
+
|
328
|
+
// Implement other processing methods
|
329
|
+
private processMessages(messages) {
|
330
|
+
/* ... */
|
331
|
+
}
|
332
|
+
private getSafetySettings(payload) {
|
333
|
+
/* ... */
|
334
|
+
}
|
335
|
+
private handleStreamResponse(chat, contents, signal) {
|
336
|
+
/* ... */
|
337
|
+
}
|
338
|
+
}
|
339
|
+
```
|
340
|
+
|
341
|
+
**Different Model Implementations**:
|
342
|
+
|
343
|
+
- `src/libs/agent-runtime/openai/index.ts` - OpenAI implementation
|
344
|
+
- `src/libs/agent-runtime/anthropic/index.ts` - Anthropic implementation
|
345
|
+
- `src/libs/agent-runtime/google/index.ts` - Google implementation
|
346
|
+
- `src/libs/agent-runtime/openrouter/index.ts` - OpenRouter implementation
|
347
|
+
|
348
|
+
For detailed implementation, see:
|
349
|
+
|
350
|
+
- `src/libs/agent-runtime/AgentRuntime.ts` - Core runtime class
|
351
|
+
- `src/libs/agent-runtime/BaseAI.ts` - Define base interface
|
352
|
+
- `src/libs/agent-runtime/runtimeMap.ts` - Provider mapping table
|
353
|
+
- `src/libs/agent-runtime/UniformRuntime/index.ts` - Handle multi-model unified runtime
|
354
|
+
- `src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts` - OpenAI compatible adapter factory
|