@lobehub/chat 1.45.2 → 1.45.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,31 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.45.3](https://github.com/lobehub/lobe-chat/compare/v1.45.2...v1.45.3)
6
+
7
+ <sup>Released on **2025-01-09**</sup>
8
+
9
+ #### 🐛 Bug Fixes
10
+
11
+ - **misc**: Fix some ai provider known issues.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### What's fixed
19
+
20
+ - **misc**: Fix some ai provider known issues, closes [#5361](https://github.com/lobehub/lobe-chat/issues/5361) ([b2775b5](https://github.com/lobehub/lobe-chat/commit/b2775b5))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
5
30
  ### [Version 1.45.2](https://github.com/lobehub/lobe-chat/compare/v1.45.1...v1.45.2)
6
31
 
7
32
  <sup>Released on **2025-01-09**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,13 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "fixes": [
5
+ "Fix some ai provider known issues."
6
+ ]
7
+ },
8
+ "date": "2025-01-09",
9
+ "version": "1.45.3"
10
+ },
2
11
  {
3
12
  "children": {
4
13
  "improvements": [
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.45.2",
3
+ "version": "1.45.3",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -10,6 +10,7 @@ import { memo } from 'react';
10
10
  import { useTranslation } from 'react-i18next';
11
11
  import { FlexboxProps } from 'react-layout-kit';
12
12
 
13
+ import { isServerMode } from '@/const/version';
13
14
  import { DiscoverProviderItem } from '@/types/discover';
14
15
 
15
16
  const useStyles = createStyles(({ css }) => ({
@@ -25,13 +26,13 @@ interface ProviderConfigProps extends FlexboxProps {
25
26
  identifier: string;
26
27
  }
27
28
 
28
- const ProviderConfig = memo<ProviderConfigProps>(({ data }) => {
29
+ const ProviderConfig = memo<ProviderConfigProps>(({ data, identifier }) => {
29
30
  const { styles } = useStyles();
30
31
  const { t } = useTranslation('discover');
31
32
 
32
33
  const router = useRouter();
33
34
  const openSettings = () => {
34
- router.push('/settings/llm');
35
+ router.push(!isServerMode ? '/settings/llm' : `/settings/provider/${identifier}`);
35
36
  };
36
37
 
37
38
  const icon = <Icon icon={SquareArrowOutUpRight} size={{ fontSize: 16 }} />;
@@ -81,13 +81,16 @@ export class AiInfraRepos {
81
81
  .map<EnabledAiModel & { enabled?: boolean | null }>((item) => {
82
82
  const user = allModels.find((m) => m.id === item.id && m.providerId === provider.id);
83
83
 
84
- const enabled = !!user ? user.enabled : item.enabled;
85
-
86
84
  return {
87
- ...item,
88
- abilities: item.abilities || {},
89
- enabled,
85
+ abilities: !!user ? user.abilities : item.abilities || {},
86
+ config: !!user ? user.config : item.config,
87
+ contextWindowTokens: !!user ? user.contextWindowTokens : item.contextWindowTokens,
88
+ displayName: user?.displayName || item.displayName,
89
+ enabled: !!user ? user.enabled : item.enabled,
90
+ id: item.id,
90
91
  providerId: provider.id,
92
+ sort: !!user ? user.sort : undefined,
93
+ type: item.type,
91
94
  };
92
95
  })
93
96
  .filter((i) => i.enabled);
@@ -248,7 +248,7 @@ describe('AiModelModel', () => {
248
248
 
249
249
  const allModels = await aiProviderModel.query();
250
250
  expect(allModels).toHaveLength(2);
251
- expect(allModels.find((m) => m.id === 'existing-model')?.displayName).toBe('Updated Name');
251
+ expect(allModels.find((m) => m.id === 'existing-model')?.displayName).toBe('Old Name');
252
252
  expect(allModels.find((m) => m.id === 'new-model')?.displayName).toBe('New Model');
253
253
  });
254
254
  });
@@ -1,5 +1,4 @@
1
1
  import { and, asc, desc, eq, inArray } from 'drizzle-orm/expressions';
2
- import pMap from 'p-map';
3
2
 
4
3
  import { LobeChatDatabase } from '@/database/type';
5
4
  import {
@@ -131,51 +130,21 @@ export class AiModelModel {
131
130
  };
132
131
 
133
132
  batchUpdateAiModels = async (providerId: string, models: AiProviderModelListItem[]) => {
134
- return this.db.transaction(async (trx) => {
135
- const records = models.map(({ id, ...model }) => ({
136
- ...model,
137
- id,
138
- providerId,
139
- updatedAt: new Date(),
140
- userId: this.userId,
141
- }));
133
+ const records = models.map(({ id, ...model }) => ({
134
+ ...model,
135
+ id,
136
+ providerId,
137
+ updatedAt: new Date(),
138
+ userId: this.userId,
139
+ }));
142
140
 
143
- // 第一步:尝试插入所有记录,忽略冲突
144
- const insertedRecords = await trx
145
- .insert(aiModels)
146
- .values(records)
147
- .onConflictDoNothing({
148
- target: [aiModels.id, aiModels.userId, aiModels.providerId],
149
- })
150
- .returning();
151
- // 第二步:找出需要更新的记录(即插入时发生冲突的记录)
152
- // 找出未能插入的记录(需要更新的记录)
153
- const insertedIds = new Set(insertedRecords.map((r) => r.id));
154
- const recordsToUpdate = records.filter((r) => !insertedIds.has(r.id));
155
-
156
- // 第三步:更新已存在的记录
157
- if (recordsToUpdate.length > 0) {
158
- await pMap(
159
- recordsToUpdate,
160
- async (record) => {
161
- await trx
162
- .update(aiModels)
163
- .set({
164
- ...record,
165
- updatedAt: new Date(),
166
- })
167
- .where(
168
- and(
169
- eq(aiModels.id, record.id),
170
- eq(aiModels.userId, this.userId),
171
- eq(aiModels.providerId, providerId),
172
- ),
173
- );
174
- },
175
- { concurrency: 10 }, // 限制并发数为 10
176
- );
177
- }
178
- });
141
+ return this.db
142
+ .insert(aiModels)
143
+ .values(records)
144
+ .onConflictDoNothing({
145
+ target: [aiModels.id, aiModels.userId, aiModels.providerId],
146
+ })
147
+ .returning();
179
148
  };
180
149
 
181
150
  batchToggleAiModels = async (providerId: string, models: string[], enabled: boolean) => {