@lobehub/chat 1.44.2 → 1.44.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.44.3](https://github.com/lobehub/lobe-chat/compare/v1.44.2...v1.44.3)
6
+
7
+ <sup>Released on **2025-01-08**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix provider enabled issue.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix provider enabled issue, closes [#5337](https://github.com/lobehub/lobe-chat/issues/5337) ([8e0b634](https://github.com/lobehub/lobe-chat/commit/8e0b634))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.44.2](https://github.com/lobehub/lobe-chat/compare/v1.44.1...v1.44.2)
6
31
 
7
32
  <sup>Released on **2025-01-08**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix provider enabled issue."
6
+ ]
7
+ },
8
+ "date": "2025-01-08",
9
+ "version": "1.44.3"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "fixes": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.44.2",
3
+ "version": "1.44.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -69,19 +69,28 @@ export class AiInfraRepos {
69
69
  const providers = await this.getAiProviderList();
70
70
  const enabledProviders = providers.filter((item) => item.enabled);
71
71
 
72
- const userEnabledModels = await this.aiModelModel.getEnabledModels();
72
+ const allModels = await this.aiModelModel.getAllModels();
73
+ const userEnabledModels = allModels.filter((item) => item.enabled);
74
+
73
75
  const modelList = await pMap(
74
76
  enabledProviders,
75
77
  async (provider) => {
76
78
  const aiModels = await this.fetchBuiltinModels(provider.id);
77
79
 
78
80
  return (aiModels || [])
79
- .filter((i) => i.enabled)
80
- .map<EnabledAiModel>((item) => ({
81
- ...item,
82
- abilities: item.abilities || {},
83
- providerId: provider.id,
84
- }));
81
+ .map<EnabledAiModel & { enabled?: boolean | null }>((item) => {
82
+ const user = allModels.find((m) => m.id === item.id && m.providerId === provider.id);
83
+
84
+ const enabled = !!user ? user.enabled : item.enabled;
85
+
86
+ return {
87
+ ...item,
88
+ abilities: item.abilities || {},
89
+ enabled,
90
+ providerId: provider.id,
91
+ };
92
+ })
93
+ .filter((i) => i.enabled);
85
94
  },
86
95
  { concurrency: 10 },
87
96
  );
@@ -100,6 +109,9 @@ export class AiInfraRepos {
100
109
  return mergeArrayById(defaultModels, aiModels) as AiProviderModelListItem[];
101
110
  };
102
111
 
112
+ /**
113
+ * Fetch builtin models from config
114
+ */
103
115
  private fetchBuiltinModels = async (
104
116
  providerId: string,
105
117
  ): Promise<AiProviderModelListItem[] | undefined> => {
@@ -193,17 +193,15 @@ describe('AiModelModel', () => {
193
193
  });
194
194
  });
195
195
 
196
- describe('getEnabledModels', () => {
196
+ describe('getAllModels', () => {
197
197
  it('should only return enabled models', async () => {
198
198
  await serverDB.insert(aiModels).values([
199
199
  { id: 'model1', providerId: 'openai', enabled: true, source: 'custom', userId },
200
- { id: 'model2', providerId: 'openai', enabled: false, source: 'custom', userId },
200
+ { id: 'model2', providerId: 'b', enabled: false, source: 'custom', userId },
201
201
  ]);
202
202
 
203
- const models = await aiProviderModel.getEnabledModels();
204
- expect(models).toHaveLength(1);
205
- expect(models[0].id).toBe('model1');
206
- expect(models[0].enabled).toBe(true);
203
+ const models = await aiProviderModel.getAllModels();
204
+ expect(models).toHaveLength(2);
207
205
  });
208
206
  });
209
207
 
@@ -8,6 +8,7 @@ import {
8
8
  AiProviderModelListItem,
9
9
  ToggleAiModelEnableParams,
10
10
  } from '@/types/aiModel';
11
+ import { EnabledAiModel } from '@/types/aiProvider';
11
12
 
12
13
  import { AiModelSelectItem, NewAiModelItem, aiModels } from '../../schemas';
13
14
 
@@ -83,8 +84,8 @@ export class AiModelModel {
83
84
  return result as AiProviderModelListItem[];
84
85
  };
85
86
 
86
- getEnabledModels = async () => {
87
- return this.db
87
+ getAllModels = async () => {
88
+ const data = await this.db
88
89
  .select({
89
90
  abilities: aiModels.abilities,
90
91
  config: aiModels.config,
@@ -98,7 +99,9 @@ export class AiModelModel {
98
99
  type: aiModels.type,
99
100
  })
100
101
  .from(aiModels)
101
- .where(and(eq(aiModels.userId, this.userId), eq(aiModels.enabled, true)));
102
+ .where(and(eq(aiModels.userId, this.userId)));
103
+
104
+ return data as EnabledAiModel[];
102
105
  };
103
106
 
104
107
  findById = async (id: string) => {
@@ -215,7 +215,7 @@ export const createAiProviderSlice: StateCreator<
215
215
  enabledChatModelList,
216
216
  },
217
217
  false,
218
- 'useInitAiProviderKeyVaults',
218
+ 'useFetchAiProviderRuntimeState',
219
219
  );
220
220
  },
221
221
  },
@@ -192,6 +192,7 @@ export interface EnabledAiModel {
192
192
  config?: AiModelConfig;
193
193
  contextWindowTokens?: number;
194
194
  displayName?: string;
195
+ enabled?: boolean;
195
196
  id: string;
196
197
  providerId: string;
197
198
  sort?: number;