@lobehub/chat 0.147.16 → 0.147.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +51 -0
  2. package/contributing/Basic/Feature-Development.md +7 -7
  3. package/contributing/Basic/Feature-Development.zh-CN.md +7 -7
  4. package/next.config.mjs +61 -13
  5. package/package.json +2 -1
  6. package/sentry.client.config.ts +30 -0
  7. package/sentry.edge.config.ts +17 -0
  8. package/sentry.server.config.ts +19 -0
  9. package/src/app/home/Redirect.tsx +2 -2
  10. package/src/config/modelProviders/anthropic.ts +1 -0
  11. package/src/config/modelProviders/bedrock.ts +10 -0
  12. package/src/config/modelProviders/google.ts +1 -0
  13. package/src/config/modelProviders/groq.ts +1 -0
  14. package/src/config/modelProviders/mistral.ts +0 -5
  15. package/src/config/modelProviders/moonshot.ts +0 -7
  16. package/src/config/modelProviders/ollama.ts +0 -43
  17. package/src/config/modelProviders/openai.ts +0 -8
  18. package/src/config/modelProviders/openrouter.ts +10 -1
  19. package/src/config/modelProviders/perplexity.ts +16 -15
  20. package/src/config/modelProviders/togetherai.ts +1 -0
  21. package/src/database/client/models/__tests__/session.test.ts +1 -3
  22. package/src/database/client/models/session.ts +4 -4
  23. package/src/services/config.ts +4 -4
  24. package/src/services/file/client.test.ts +2 -2
  25. package/src/services/file/client.ts +35 -33
  26. package/src/services/file/index.ts +8 -2
  27. package/src/services/file/type.ts +11 -0
  28. package/src/services/message/client.test.ts +6 -32
  29. package/src/services/message/client.ts +24 -37
  30. package/src/services/message/index.test.ts +48 -0
  31. package/src/services/message/index.ts +22 -2
  32. package/src/services/message/type.ts +33 -0
  33. package/src/services/plugin/client.test.ts +2 -2
  34. package/src/services/plugin/client.ts +1 -1
  35. package/src/services/plugin/index.ts +9 -3
  36. package/src/services/session/client.test.ts +37 -44
  37. package/src/services/session/client.ts +30 -22
  38. package/src/services/session/index.ts +9 -2
  39. package/src/services/session/type.ts +44 -0
  40. package/src/services/topic/client.test.ts +18 -22
  41. package/src/services/topic/client.ts +31 -23
  42. package/src/services/topic/index.ts +10 -2
  43. package/src/services/topic/type.ts +32 -0
  44. package/src/services/user/client.ts +1 -1
  45. package/src/services/user/index.ts +10 -2
  46. package/src/store/chat/slices/message/action.test.ts +12 -12
  47. package/src/store/chat/slices/message/action.ts +4 -4
  48. package/src/store/chat/slices/plugin/action.test.ts +5 -6
  49. package/src/store/chat/slices/plugin/action.ts +1 -1
  50. package/src/store/chat/slices/topic/action.test.ts +11 -6
  51. package/src/store/chat/slices/topic/action.ts +7 -5
  52. package/src/store/global/slices/settings/selectors/modelProvider.test.ts +2 -2
  53. package/src/store/session/slices/agent/action.test.ts +175 -0
  54. package/src/store/session/slices/agent/action.ts +1 -1
  55. package/src/store/session/slices/session/action.test.ts +14 -15
  56. package/src/store/session/slices/session/action.ts +4 -4
  57. package/src/store/session/slices/sessionGroup/action.test.ts +6 -4
  58. package/src/store/session/slices/sessionGroup/action.ts +3 -3
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 0.147.18](https://github.com/lobehub/lobe-chat/compare/v0.147.17...v0.147.18)
6
+
7
+ <sup>Released on **2024-04-17**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add claude 3 opus to AWS Bedrock, remove custom models from providers, and update Perplexity model names.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add claude 3 opus to AWS Bedrock, closes [#2072](https://github.com/lobehub/lobe-chat/issues/2072) ([479f562](https://github.com/lobehub/lobe-chat/commit/479f562))
21
+ - **misc**: Remove custom models from providers, and update Perplexity model names, closes [#2069](https://github.com/lobehub/lobe-chat/issues/2069) ([e04754d](https://github.com/lobehub/lobe-chat/commit/e04754d))
22
+
23
+ </details>
24
+
25
+ <div align="right">
26
+
27
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
28
+
29
+ </div>
30
+
31
+ ### [Version 0.147.17](https://github.com/lobehub/lobe-chat/compare/v0.147.16...v0.147.17)
32
+
33
+ <sup>Released on **2024-04-16**</sup>
34
+
35
+ #### ♻ Code Refactoring
36
+
37
+ - **misc**: Refactor service to a uniform interface.
38
+
39
+ <br/>
40
+
41
+ <details>
42
+ <summary><kbd>Improvements and Fixes</kbd></summary>
43
+
44
+ #### Code refactoring
45
+
46
+ - **misc**: Refactor service to a uniform interface, closes [#2062](https://github.com/lobehub/lobe-chat/issues/2062) ([86779e2](https://github.com/lobehub/lobe-chat/commit/86779e2))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ### [Version 0.147.16](https://github.com/lobehub/lobe-chat/compare/v0.147.15...v0.147.16)
6
57
 
7
58
  <sup>Released on **2024-04-14**</sup>
@@ -231,7 +231,7 @@ This requirement involves upgrading the Sessions feature to transform it from a
231
231
 
232
232
  To handle these groups, we need to refactor the implementation logic of `useFetchSessions`. Here are the key changes:
233
233
 
234
- 1. Use the `sessionService.getSessionsWithGroup` method to call the backend API and retrieve the grouped session data.
234
+ 1. Use the `sessionService.getGroupedSessions` method to call the backend API and retrieve the grouped session data.
235
235
  2. Save the retrieved data into three different state fields: `pinnedSessions`, `customSessionGroups`, and `defaultSessions`.
236
236
 
237
237
  #### `useFetchSessions` Method
@@ -247,7 +247,7 @@ export const createSessionSlice: StateCreator<
247
247
  > = (set, get) => ({
248
248
  // ... other methods
249
249
  useFetchSessions: () =>
250
- useSWR<ChatSessionList>(FETCH_SESSIONS_KEY, sessionService.getSessionsWithGroup, {
250
+ useSWR<ChatSessionList>(FETCH_SESSIONS_KEY, sessionService.getGroupedSessions, {
251
251
  onSuccess: (data) => {
252
252
  set(
253
253
  {
@@ -267,15 +267,15 @@ export const createSessionSlice: StateCreator<
267
267
 
268
268
  After successfully retrieving the data, we use the `set` method to update the `customSessionGroups`, `defaultSessions`, `pinnedSessions`, and `sessions` states. This ensures that the states are synchronized with the latest session data.
269
269
 
270
- #### `sessionService.getSessionsWithGroup` Method
270
+ #### `sessionService.getGroupedSessions` Method
271
271
 
272
- The `sessionService.getSessionsWithGroup` method is responsible for calling the backend API `SessionModel.queryWithGroups()`.
272
+ The `sessionService.getGroupedSessions` method is responsible for calling the backend API `SessionModel.queryWithGroups()`.
273
273
 
274
274
  ```typescript
275
275
  class SessionService {
276
276
  // ... other SessionGroup related implementations
277
277
 
278
- async getSessionsWithGroup(): Promise<ChatSessionList> {
278
+ async getGroupedSessions(): Promise<ChatSessionList> {
279
279
  return SessionModel.queryWithGroups();
280
280
  }
281
281
  }
@@ -283,7 +283,7 @@ class SessionService {
283
283
 
284
284
  #### `SessionModel.queryWithGroups` Method
285
285
 
286
- This method is the core method called by `sessionService.getSessionsWithGroup`, and it is responsible for querying and organizing session data. The code is as follows:
286
+ This method is the core method called by `sessionService.getGroupedSessions`, and it is responsible for querying and organizing session data. The code is as follows:
287
287
 
288
288
  ```typescript
289
289
  class _SessionModel extends BaseModel {
@@ -617,7 +617,7 @@ class ConfigService {
617
617
  // ... Other code omitted
618
618
 
619
619
  exportSessions = async () => {
620
- const sessions = await sessionService.getSessions();
620
+ const sessions = await sessionService.getAllSessions();
621
621
  + const sessionGroups = await sessionService.getSessionGroups();
622
622
  const messages = await messageService.getAllMessages();
623
623
  const topics = await topicService.getAllTopics();
@@ -231,7 +231,7 @@ export const createSessionGroupSlice: StateCreator<
231
231
 
232
232
  为了处理这些分组,我们需要改造 `useFetchSessions` 的实现逻辑。以下是关键的改动点:
233
233
 
234
- 1. 使用 `sessionService.getSessionsWithGroup` 方法负责调用后端接口来获取分组后的会话数据;
234
+ 1. 使用 `sessionService.getGroupedSessions` 方法负责调用后端接口来获取分组后的会话数据;
235
235
  2. 将获取后的数据保存为三到不同的状态字段中:`pinnedSessions`、`customSessionGroups` 和 `defaultSessions`;
236
236
 
237
237
  #### `useFetchSessions` 方法
@@ -247,7 +247,7 @@ export const createSessionSlice: StateCreator<
247
247
  > = (set, get) => ({
248
248
  // ... 其他方法
249
249
  useFetchSessions: () =>
250
- useSWR<ChatSessionList>(FETCH_SESSIONS_KEY, sessionService.getSessionsWithGroup, {
250
+ useSWR<ChatSessionList>(FETCH_SESSIONS_KEY, sessionService.getGroupedSessions, {
251
251
  onSuccess: (data) => {
252
252
  set(
253
253
  {
@@ -267,15 +267,15 @@ export const createSessionSlice: StateCreator<
267
267
 
268
268
  在成功获取数据后,我们使用 `set` 方法来更新 `customSessionGroups`、`defaultSessions`、`pinnedSessions` 和 `sessions` 状态。这将保证状态与最新的会话数据同步。
269
269
 
270
- #### getSessionsWithGroup
270
+ #### getGroupedSessions
271
271
 
272
- 使用 `sessionService.getSessionsWithGroup` 方法负责调用后端接口 `SessionModel.queryWithGroups()`
272
+ 使用 `sessionService.getGroupedSessions` 方法负责调用后端接口 `SessionModel.queryWithGroups()`
273
273
 
274
274
  ```typescript
275
275
  class SessionService {
276
276
  // ... 其他 SessionGroup 相关的实现
277
277
 
278
- async getSessionsWithGroup(): Promise<ChatSessionList> {
278
+ async getGroupedSessions(): Promise<ChatSessionList> {
279
279
  return SessionModel.queryWithGroups();
280
280
  }
281
281
  }
@@ -283,7 +283,7 @@ class SessionService {
283
283
 
284
284
  #### `SessionModel.queryWithGroups` 方法
285
285
 
286
- 此方法是 `sessionService.getSessionsWithGroup` 调用的核心方法,它负责查询和组织会话数据,代码如下:
286
+ 此方法是 `sessionService.getGroupedSessions` 调用的核心方法,它负责查询和组织会话数据,代码如下:
287
287
 
288
288
  ```typescript
289
289
  class _SessionModel extends BaseModel {
@@ -611,7 +611,7 @@ class ConfigService {
611
611
  // ... 省略其他
612
612
 
613
613
  exportSessions = async () => {
614
- const sessions = await sessionService.getSessions();
614
+ const sessions = await sessionService.getAllSessions();
615
615
  + const sessionGroups = await sessionService.getSessionGroups();
616
616
  const messages = await messageService.getAllMessages();
617
617
  const topics = await topicService.getAllTopics();
package/next.config.mjs CHANGED
@@ -1,5 +1,6 @@
1
1
  import nextPWA from '@ducanh2912/next-pwa';
2
2
  import analyzer from '@next/bundle-analyzer';
3
+ import { withSentryConfig } from '@sentry/nextjs';
3
4
 
4
5
  const isProd = process.env.NODE_ENV === 'production';
5
6
  const buildWithDocker = process.env.DOCKER === 'true';
@@ -9,18 +10,6 @@ const API_PROXY_ENDPOINT = process.env.API_PROXY_ENDPOINT || '';
9
10
 
10
11
  const basePath = process.env.NEXT_PUBLIC_BASE_PATH;
11
12
 
12
- const withBundleAnalyzer = analyzer({
13
- enabled: process.env.ANALYZE === 'true',
14
- });
15
-
16
- const withPWA = nextPWA({
17
- dest: 'public',
18
- register: true,
19
- workboxOptions: {
20
- skipWaiting: true,
21
- },
22
- });
23
-
24
13
  /** @type {import('next').NextConfig} */
25
14
  const nextConfig = {
26
15
  compress: isProd,
@@ -67,4 +56,63 @@ const nextConfig = {
67
56
  },
68
57
  };
69
58
 
70
- export default isProd ? withBundleAnalyzer(withPWA(nextConfig)) : nextConfig;
59
+ const noWrapper = (config) => config;
60
+
61
+ const withBundleAnalyzer = process.env.ANALYZE === 'true' ? analyzer() : noWrapper;
62
+
63
+ const withPWA = isProd
64
+ ? nextPWA({
65
+ dest: 'public',
66
+ register: true,
67
+ workboxOptions: {
68
+ skipWaiting: true,
69
+ },
70
+ })
71
+ : noWrapper;
72
+
73
+ const hasSentry = !!process.env.NEXT_PUBLIC_SENTRY_DSN;
74
+ const withSentry =
75
+ isProd && hasSentry
76
+ ? (c) =>
77
+ withSentryConfig(
78
+ c,
79
+ {
80
+ // For all available options, see:
81
+ // https://github.com/getsentry/sentry-webpack-plugin#options
82
+
83
+ // Suppresses source map uploading logs during build
84
+ silent: true,
85
+ org: process.env.SENTRY_ORG,
86
+ project: process.env.SENTRY_PROJECT,
87
+ },
88
+ {
89
+ // For all available options, see:
90
+ // https://docs.sentry.io/platforms/javascript/guides/nextjs/manual-setup/
91
+
92
+ // Upload a larger set of source maps for prettier stack traces (increases build time)
93
+ widenClientFileUpload: true,
94
+
95
+ // Transpiles SDK to be compatible with IE11 (increases bundle size)
96
+ transpileClientSDK: true,
97
+
98
+ // Routes browser requests to Sentry through a Next.js rewrite to circumvent ad-blockers. (increases server load)
99
+ // Note: Check that the configured route will not match with your Next.js middleware, otherwise reporting of client-
100
+ // side errors will fail.
101
+ tunnelRoute: '/monitoring',
102
+
103
+ // Hides source maps from generated client bundles
104
+ hideSourceMaps: true,
105
+
106
+ // Automatically tree-shake Sentry logger statements to reduce bundle size
107
+ disableLogger: true,
108
+
109
+ // Enables automatic instrumentation of Vercel Cron Monitors.
110
+ // See the following for more information:
111
+ // https://docs.sentry.io/product/crons/
112
+ // https://vercel.com/docs/cron-jobs
113
+ automaticVercelMonitors: true,
114
+ },
115
+ )
116
+ : noWrapper;
117
+
118
+ export default withBundleAnalyzer(withPWA(withSentry(nextConfig)));
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "0.147.16",
3
+ "version": "0.147.18",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -94,6 +94,7 @@
94
94
  "@lobehub/tts": "latest",
95
95
  "@lobehub/ui": "^1.137.7",
96
96
  "@next/third-parties": "^14.1.4",
97
+ "@sentry/nextjs": "^7.105.0",
97
98
  "@vercel/analytics": "^1.2.2",
98
99
  "@vercel/speed-insights": "^1.0.10",
99
100
  "ahooks": "^3.7.11",
@@ -0,0 +1,30 @@
1
+ // This file configures the initialization of Sentry on the client.
2
+ // The config you add here will be used whenever a users loads a page in their browser.
3
+ // https://docs.sentry.io/platforms/javascript/guides/nextjs/
4
+ import * as Sentry from '@sentry/nextjs';
5
+
6
+ if (!!process.env.NEXT_PUBLIC_SENTRY_DSN) {
7
+ Sentry.init({
8
+ // Setting this option to true will print useful information to the console while you're setting up Sentry.
9
+ debug: false,
10
+
11
+ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN,
12
+ // You can remove this option if you're not planning to use the Sentry Session Replay feature:
13
+ integrations: [
14
+ Sentry.replayIntegration({
15
+ blockAllMedia: true,
16
+ // Additional Replay configuration goes in here, for example:
17
+ maskAllText: true,
18
+ }),
19
+ ],
20
+
21
+ replaysOnErrorSampleRate: 1,
22
+
23
+ // This sets the sample rate to be 10%. You may want this to be 100% while
24
+ // in development and sample at a lower rate in production
25
+ replaysSessionSampleRate: 0.1,
26
+
27
+ // Adjust this value in production, or use tracesSampler for greater control
28
+ tracesSampleRate: 1,
29
+ });
30
+ }
@@ -0,0 +1,17 @@
1
+ // This file configures the initialization of Sentry for edge features (middleware, edge routes, and so on).
2
+ // The config you add here will be used whenever one of the edge features is loaded.
3
+ // Note that this config is unrelated to the Vercel Edge Runtime and is also required when running locally.
4
+ // https://docs.sentry.io/platforms/javascript/guides/nextjs/
5
+ import * as Sentry from '@sentry/nextjs';
6
+
7
+ if (!!process.env.NEXT_PUBLIC_SENTRY_DSN) {
8
+ Sentry.init({
9
+ // Setting this option to true will print useful information to the console while you're setting up Sentry.
10
+ debug: false,
11
+
12
+ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN,
13
+
14
+ // Adjust this value in production, or use tracesSampler for greater control
15
+ tracesSampleRate: 1,
16
+ });
17
+ }
@@ -0,0 +1,19 @@
1
+ // This file configures the initialization of Sentry on the server.
2
+ // The config you add here will be used whenever the server handles a request.
3
+ // https://docs.sentry.io/platforms/javascript/guides/nextjs/
4
+ import * as Sentry from '@sentry/nextjs';
5
+
6
+ if (!!process.env.NEXT_PUBLIC_SENTRY_DSN) {
7
+ Sentry.init({
8
+ // Setting this option to true will print useful information to the console while you're setting up Sentry.
9
+ debug: false,
10
+
11
+ dsn: process.env.NEXT_PUBLIC_SENTRY_DSN,
12
+
13
+ // Adjust this value in production, or use tracesSampler for greater control
14
+ tracesSampleRate: 1,
15
+
16
+ // uncomment the line below to enable Spotlight (https://spotlightjs.com)
17
+ // spotlight: process.env.NODE_ENV === 'development',
18
+ });
19
+ }
@@ -8,8 +8,8 @@ import { sessionService } from '@/services/session';
8
8
 
9
9
  const checkHasConversation = async () => {
10
10
  const hasMessages = await messageService.hasMessages();
11
- const hasAgents = await sessionService.hasSessions();
12
- return hasMessages || hasAgents;
11
+ const hasAgents = await sessionService.countSessions();
12
+ return hasMessages || hasAgents === 0;
13
13
  };
14
14
 
15
15
  const Redirect = memo(() => {
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://docs.anthropic.com/claude/docs/models-overview
3
4
  const Anthropic: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html
3
4
  const Bedrock: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -9,6 +10,15 @@ const Bedrock: ModelProviderCard = {
9
10
  id: 'amazon.titan-text-express-v1:0:8k',
10
11
  tokens: 8000,
11
12
  },
13
+ {
14
+ description:
15
+ 'Claude 3 Opus 是 Anthropic 最强大的人工智能模型,在处理高度复杂的任务方面具备顶尖性能。该模型能够以非凡的流畅性和类似人类的理解能力引导开放式的提示和未可见的场景。Claude 3 Opus 向我们展示生成式人工智能的美好前景。 Claude 3 Opus 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。',
16
+ displayName: 'Claude 3 Opus',
17
+ enabled: true,
18
+ id: 'anthropic.claude-3-opus-20240229-v1:0',
19
+ tokens: 200_000,
20
+ vision: true,
21
+ },
12
22
  {
13
23
  description:
14
24
  'Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过精心设计,是大规模部署人工智能的可信赖、高耐久性骨干模型。 Claude 3 Sonnet 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。',
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://ai.google.dev/models/gemini
3
4
  const Google: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://console.groq.com/docs/models
3
4
  const Groq: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -33,11 +33,6 @@ const Mistral: ModelProviderCard = {
33
33
  id: 'mistral-large-latest',
34
34
  tokens: 32_768,
35
35
  },
36
- {
37
- displayName: 'Mixtral 8x22B',
38
- id: 'mixtral-8x22b',
39
- tokens: 32_768,
40
- },
41
36
  ],
42
37
  id: 'mistral',
43
38
  };
@@ -20,13 +20,6 @@ const Moonshot: ModelProviderCard = {
20
20
  id: 'moonshot-v1-128k',
21
21
  tokens: 128_000,
22
22
  },
23
- {
24
- displayName: 'Moonshot Kimi Reverse',
25
- files: true,
26
- id: 'moonshot-v1',
27
- tokens: 200_000,
28
- vision: true,
29
- },
30
23
  ],
31
24
  id: 'moonshot',
32
25
  };
@@ -148,49 +148,6 @@ const Ollama: ModelProviderCard = {
148
148
  tokens: 4000,
149
149
  vision: true,
150
150
  },
151
- // TODO: 在单独支持千问之后这些 Qwen 模型需要移动到千问的配置中
152
- {
153
- displayName: 'Qwen Plus',
154
- functionCall: true,
155
- id: 'qwen-plus',
156
- tokens: 30_000,
157
- vision: false,
158
- },
159
- {
160
- displayName: 'Qwen Turbo',
161
- functionCall: true,
162
- id: 'qwen-turbo',
163
- tokens: 6000,
164
- vision: false,
165
- },
166
- {
167
- displayName: 'Qwen Max',
168
- functionCall: true,
169
- id: 'qwen-max',
170
- tokens: 6000,
171
- vision: false,
172
- },
173
- {
174
- displayName: 'Qwen Max Long',
175
- functionCall: true,
176
- id: 'qwen-max-longcontext',
177
- tokens: 28_000,
178
- vision: false,
179
- },
180
- {
181
- displayName: 'Qwen VL Max',
182
- functionCall: false,
183
- id: 'qwen-vl-max',
184
- tokens: 6000,
185
- vision: true,
186
- },
187
- {
188
- displayName: 'Qwen VL Plus',
189
- functionCall: false,
190
- id: 'qwen-vl-plus',
191
- tokens: 30_000,
192
- vision: true,
193
- },
194
151
  ],
195
152
  id: 'ollama',
196
153
  };
@@ -117,14 +117,6 @@ const OpenAI: ModelProviderCard = {
117
117
  tokens: 128_000,
118
118
  vision: true,
119
119
  },
120
- {
121
- displayName: 'GPT-4 ALL',
122
- files: true,
123
- functionCall: true,
124
- id: 'gpt-4-all',
125
- tokens: 32_768,
126
- vision: true,
127
- },
128
120
  ],
129
121
  enabled: true,
130
122
  id: 'openai',
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://openrouter.ai/docs#models
3
4
  const OpenRouter: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -99,13 +100,21 @@ const OpenRouter: ModelProviderCard = {
99
100
  vision: false,
100
101
  },
101
102
  {
102
- displayName: 'Mistral: Mixtral 8x22B (base) (free)',
103
+ displayName: 'Mistral: Mixtral 8x22B (base)',
103
104
  enabled: true,
104
105
  functionCall: false,
105
106
  id: 'mistralai/mixtral-8x22b',
106
107
  tokens: 64_000,
107
108
  vision: false,
108
109
  },
110
+ {
111
+ displayName: 'Microsoft: WizardLM-2 8x22B',
112
+ enabled: true,
113
+ functionCall: false,
114
+ id: 'microsoft/wizardlm-2-8x22b',
115
+ tokens: 65_536,
116
+ vision: false,
117
+ },
109
118
  ],
110
119
  id: 'openrouter',
111
120
  };
@@ -1,43 +1,44 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://docs.perplexity.ai/docs/model-cards
3
4
  const Perplexity: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
6
7
  displayName: 'Perplexity 7B Chat',
7
- id: 'pplx-7b-chat',
8
- tokens: 8192,
8
+ id: 'sonar-small-chat',
9
+ tokens: 16_384,
9
10
  },
10
11
  {
11
- displayName: 'Perplexity 70B Chat',
12
+ displayName: 'Perplexity 8x7B Chat',
12
13
  enabled: true,
13
- id: 'pplx-70b-chat',
14
- tokens: 8192,
14
+ id: 'sonar-medium-chat',
15
+ tokens: 16_384,
15
16
  },
16
17
  {
17
18
  displayName: 'Perplexity 7B Online',
18
- id: 'pplx-7b-online',
19
- tokens: 8192,
19
+ id: 'sonar-small-online',
20
+ tokens: 12_000,
20
21
  },
21
22
  {
22
- displayName: 'Perplexity 70B Online',
23
+ displayName: 'Perplexity 8x7B Online',
23
24
  enabled: true,
24
- id: 'pplx-70b-online',
25
- tokens: 8192,
25
+ id: 'sonar-medium-online',
26
+ tokens: 12_000,
26
27
  },
27
28
  {
28
- displayName: 'Codellama 34B Instruct',
29
- id: 'codellama-34b-instruct',
29
+ displayName: 'Codellama 70B Instruct',
30
+ id: 'codellama-70b-instruct',
30
31
  tokens: 16_384,
31
32
  },
32
33
  {
33
- displayName: 'Codellama 70B Instruct',
34
- id: 'codellama-70b-instruct',
34
+ displayName: 'Mistral 7B Instruct',
35
+ id: 'mistral-7b-instruc',
35
36
  tokens: 16_384,
36
37
  },
37
38
  {
38
39
  displayName: 'Mixtral 8x7B Instruct',
39
40
  id: 'mixtral-8x7b-instruct',
40
- tokens: 8192,
41
+ tokens: 16_384,
41
42
  },
42
43
  ],
43
44
  id: 'perplexity',
@@ -1,5 +1,6 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
+ // ref https://api.together.xyz/models
3
4
  const TogetherAI: ModelProviderCard = {
4
5
  chatModels: [
5
6
  {
@@ -111,12 +111,10 @@ describe('SessionModel', () => {
111
111
 
112
112
  expect(updatedSession).toHaveProperty('group', 'newGroup');
113
113
  });
114
- });
115
114
 
116
- describe('updatePinned', () => {
117
115
  it('should update pinned status of a session', async () => {
118
116
  const createdSession = await SessionModel.create('agent', sessionData);
119
- await SessionModel.updatePinned(createdSession.id, true);
117
+ await SessionModel.update(createdSession.id, { pinned: 1 });
120
118
  const updatedSession = await SessionModel.findById(createdSession.id);
121
119
  expect(updatedSession).toHaveProperty('pinned', 1);
122
120
  });
@@ -171,6 +171,10 @@ class _SessionModel extends BaseModel {
171
171
  return (await this.table.count()) === 0;
172
172
  }
173
173
 
174
+ async count() {
175
+ return this.table.count();
176
+ }
177
+
174
178
  // **************** Create *************** //
175
179
 
176
180
  async create(type: 'agent' | 'group', defaultValue: Partial<LobeAgentSession>, id = uuid()) {
@@ -238,10 +242,6 @@ class _SessionModel extends BaseModel {
238
242
  return super._updateWithSync(id, data);
239
243
  }
240
244
 
241
- async updatePinned(id: string, pinned: boolean) {
242
- return this.update(id, { pinned: pinned ? 1 : 0 });
243
- }
244
-
245
245
  async updateConfig(id: string, data: DeepPartial<LobeAgentConfig>) {
246
246
  const session = await this.findById(id);
247
247
  if (!session) return;
@@ -33,7 +33,7 @@ class ConfigService {
33
33
  return await sessionService.batchCreateSessions(sessions);
34
34
  };
35
35
  importMessages = async (messages: ChatMessage[]) => {
36
- return messageService.batchCreate(messages);
36
+ return messageService.batchCreateMessages(messages);
37
37
  };
38
38
  importSettings = async (settings: GlobalSettings) => {
39
39
  useGlobalStore.getState().importAppSettings(settings);
@@ -105,7 +105,7 @@ class ConfigService {
105
105
  * export all agents
106
106
  */
107
107
  exportAgents = async () => {
108
- const agents = await sessionService.getAllAgents();
108
+ const agents = await sessionService.getSessionsByType('agent');
109
109
  const sessionGroups = await sessionService.getSessionGroups();
110
110
 
111
111
  const config = createConfigFile('agents', { sessionGroups, sessions: agents });
@@ -117,7 +117,7 @@ class ConfigService {
117
117
  * export all sessions
118
118
  */
119
119
  exportSessions = async () => {
120
- const sessions = await sessionService.getSessions();
120
+ const sessions = await sessionService.getSessionsByType();
121
121
  const sessionGroups = await sessionService.getSessionGroups();
122
122
  const messages = await messageService.getAllMessages();
123
123
  const topics = await topicService.getAllTopics();
@@ -188,7 +188,7 @@ class ConfigService {
188
188
  * export all data
189
189
  */
190
190
  exportAll = async () => {
191
- const sessions = await sessionService.getSessions();
191
+ const sessions = await sessionService.getSessionsByType();
192
192
  const sessionGroups = await sessionService.getSessionGroups();
193
193
  const messages = await messageService.getAllMessages();
194
194
  const topics = await topicService.getAllTopics();