@lobehub/chat 0.150.6 → 0.150.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/package.json +1 -1
- package/src/app/settings/llm/Ollama/index.tsx +1 -1
- package/src/components/ModelIcon/index.tsx +1 -0
- package/src/components/ModelTag/ModelIcon.tsx +1 -0
- package/src/config/modelProviders/ollama.ts +6 -0
- package/src/libs/agent-runtime/ollama/index.ts +7 -6
- package/src/services/models.ts +14 -0
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,56 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 0.150.8](https://github.com/lobehub/lobe-chat/compare/v0.150.7...v0.150.8)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-04-28**</sup>
|
|
8
|
+
|
|
9
|
+
#### 💄 Styles
|
|
10
|
+
|
|
11
|
+
- **ollama**: Phi3 Instruct models and its model icons.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### Styles
|
|
19
|
+
|
|
20
|
+
- **ollama**: Phi3 Instruct models and its model icons, closes [#2254](https://github.com/lobehub/lobe-chat/issues/2254) ([c9b55cc](https://github.com/lobehub/lobe-chat/commit/c9b55cc))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
30
|
+
### [Version 0.150.7](https://github.com/lobehub/lobe-chat/compare/v0.150.6...v0.150.7)
|
|
31
|
+
|
|
32
|
+
<sup>Released on **2024-04-28**</sup>
|
|
33
|
+
|
|
34
|
+
#### 🐛 Bug Fixes
|
|
35
|
+
|
|
36
|
+
- **misc**: Suport to fetch model list on client.
|
|
37
|
+
|
|
38
|
+
<br/>
|
|
39
|
+
|
|
40
|
+
<details>
|
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
42
|
+
|
|
43
|
+
#### What's fixed
|
|
44
|
+
|
|
45
|
+
- **misc**: Suport to fetch model list on client, closes [#2252](https://github.com/lobehub/lobe-chat/issues/2252) ([76310a8](https://github.com/lobehub/lobe-chat/commit/76310a8))
|
|
46
|
+
|
|
47
|
+
</details>
|
|
48
|
+
|
|
49
|
+
<div align="right">
|
|
50
|
+
|
|
51
|
+
[](#readme-top)
|
|
52
|
+
|
|
53
|
+
</div>
|
|
54
|
+
|
|
5
55
|
### [Version 0.150.6](https://github.com/lobehub/lobe-chat/compare/v0.150.5...v0.150.6)
|
|
6
56
|
|
|
7
57
|
<sup>Released on **2024-04-28**</sup>
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "0.150.
|
|
3
|
+
"version": "0.150.8",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -18,12 +18,12 @@ const OllamaProvider = memo(() => {
|
|
|
18
18
|
label: t('llm.checker.title'),
|
|
19
19
|
minWidth: undefined,
|
|
20
20
|
}}
|
|
21
|
+
modelList={{ showModelFetcher: true }}
|
|
21
22
|
provider={ModelProvider.Ollama}
|
|
22
23
|
showApiKey={false}
|
|
23
24
|
showBrowserRequest
|
|
24
25
|
showEndpoint
|
|
25
26
|
title={<Ollama.Combine size={24} />}
|
|
26
|
-
// modelList={{ showModelFetcher: true }}
|
|
27
27
|
/>
|
|
28
28
|
);
|
|
29
29
|
});
|
|
@@ -95,6 +95,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model: originModel, size = 12
|
|
|
95
95
|
return <Stability.Avatar size={size} />;
|
|
96
96
|
|
|
97
97
|
if (model.includes('wizardlm')) return <Azure.Avatar size={size} />;
|
|
98
|
+
if (model.includes('phi3')) return <Azure.Avatar size={size} />;
|
|
98
99
|
if (model.includes('firefly')) return <Adobe.Avatar size={size} />;
|
|
99
100
|
if (model.includes('jamba') ||
|
|
100
101
|
model.includes('j2-'))
|
|
@@ -82,6 +82,7 @@ const ModelIcon = memo<ModelIconProps>(({ model, size = 12 }) => {
|
|
|
82
82
|
return <Stability size={size} />;
|
|
83
83
|
|
|
84
84
|
if (model.includes('wizardlm')) return <Azure size={size} />;
|
|
85
|
+
if (model.includes('phi3')) return <Azure size={size} />;
|
|
85
86
|
if (model.includes('firefly')) return <AdobeFirefly size={size} />;
|
|
86
87
|
if (model.includes('jamba') || model.includes('j2-')) return <Ai21 size={size} />;
|
|
87
88
|
});
|
|
@@ -91,6 +91,12 @@ const Ollama: ModelProviderCard = {
|
|
|
91
91
|
id: 'codellama:python',
|
|
92
92
|
tokens: 16_000,
|
|
93
93
|
},
|
|
94
|
+
{
|
|
95
|
+
displayName: 'Phi3-Instruct 3.8B',
|
|
96
|
+
enabled: true,
|
|
97
|
+
id: 'phi3:instruct',
|
|
98
|
+
tokens: 128_000,
|
|
99
|
+
},
|
|
94
100
|
{
|
|
95
101
|
displayName: 'Mistral',
|
|
96
102
|
enabled: true,
|
|
@@ -4,6 +4,7 @@ import { ClientOptions } from 'openai';
|
|
|
4
4
|
|
|
5
5
|
import { OpenAIChatMessage } from '@/libs/agent-runtime';
|
|
6
6
|
import { OllamaStream } from '@/libs/agent-runtime/ollama/stream';
|
|
7
|
+
import { ChatModelCard } from '@/types/llm';
|
|
7
8
|
|
|
8
9
|
import { LobeRuntimeAI } from '../BaseAI';
|
|
9
10
|
import { AgentRuntimeErrorType } from '../error';
|
|
@@ -64,12 +65,12 @@ export class LobeOllamaAI implements LobeRuntimeAI {
|
|
|
64
65
|
}
|
|
65
66
|
}
|
|
66
67
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
68
|
+
async models(): Promise<ChatModelCard[]> {
|
|
69
|
+
const list = await this.client.list();
|
|
70
|
+
return list.models.map((model) => ({
|
|
71
|
+
id: model.name,
|
|
72
|
+
}));
|
|
73
|
+
}
|
|
73
74
|
|
|
74
75
|
private buildOllamaMessages(messages: OpenAIChatMessage[]) {
|
|
75
76
|
return messages.map((message) => this.convertContentToOllamaMessage(message));
|
package/src/services/models.ts
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
import { createHeaderWithAuth } from '@/services/_auth';
|
|
2
|
+
import { useGlobalStore } from '@/store/global';
|
|
3
|
+
import { modelConfigSelectors } from '@/store/global/selectors';
|
|
2
4
|
import { ChatModelCard } from '@/types/llm';
|
|
3
5
|
|
|
4
6
|
import { API_ENDPOINTS } from './_url';
|
|
7
|
+
import { initializeWithClientStore } from './chat';
|
|
5
8
|
|
|
6
9
|
class ModelsService {
|
|
7
10
|
getChatModels = async (provider: string): Promise<ChatModelCard[] | undefined> => {
|
|
@@ -10,6 +13,17 @@ class ModelsService {
|
|
|
10
13
|
provider,
|
|
11
14
|
});
|
|
12
15
|
try {
|
|
16
|
+
/**
|
|
17
|
+
* Use browser agent runtime
|
|
18
|
+
*/
|
|
19
|
+
const enableFetchOnClient = modelConfigSelectors.isProviderFetchOnClient(provider)(
|
|
20
|
+
useGlobalStore.getState(),
|
|
21
|
+
);
|
|
22
|
+
if (enableFetchOnClient) {
|
|
23
|
+
const agentRuntime = await initializeWithClientStore(provider, {});
|
|
24
|
+
return agentRuntime.models();
|
|
25
|
+
}
|
|
26
|
+
|
|
13
27
|
const res = await fetch(API_ENDPOINTS.chatModels(provider), { headers });
|
|
14
28
|
if (!res.ok) return;
|
|
15
29
|
|