@lobehub/chat 1.90.2 → 1.90.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/.cursor/rules/backend-architecture.mdc +12 -9
  2. package/.cursor/rules/cursor-ux-optimize.mdc +1 -1
  3. package/.cursor/rules/define-database-model.mdc +1 -1
  4. package/.cursor/rules/drizzle-schema-style-guide.mdc +1 -1
  5. package/.cursor/rules/i18n/i18n.mdc +1 -1
  6. package/.cursor/rules/project-introduce.mdc +2 -1
  7. package/.cursor/rules/system-role.mdc +42 -0
  8. package/.cursor/rules/zustand-action-patterns.mdc +318 -0
  9. package/.cursor/rules/zustand-slice-organization.mdc +300 -0
  10. package/CHANGELOG.md +58 -0
  11. package/README.md +2 -2
  12. package/README.zh-CN.md +2 -2
  13. package/changelog/v1.json +21 -0
  14. package/docs/self-hosting/advanced/model-list.mdx +1 -1
  15. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +1 -1
  16. package/docs/self-hosting/environment-variables/model-provider.mdx +2 -2
  17. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +2 -2
  18. package/package.json +44 -44
  19. package/src/config/aiModels/qwen.ts +64 -42
  20. package/src/config/modelProviders/qwen.ts +2 -5
  21. package/src/config/modelProviders/xai.ts +1 -1
  22. package/src/features/PluginsUI/Render/utils/iframeOnReady.test.ts +1 -1
  23. package/src/features/PluginsUI/Render/utils/pluginSettings.test.ts +1 -1
  24. package/src/features/PluginsUI/Render/utils/pluginState.test.ts +1 -1
  25. package/src/libs/model-runtime/BaseAI.ts +3 -3
  26. package/src/libs/model-runtime/ModelRuntime.ts +2 -2
  27. package/src/libs/model-runtime/UniformRuntime/index.ts +2 -2
  28. package/src/libs/model-runtime/ai21/index.ts +2 -2
  29. package/src/libs/model-runtime/ai360/index.ts +2 -2
  30. package/src/libs/model-runtime/anthropic/index.ts +15 -11
  31. package/src/libs/model-runtime/azureOpenai/index.ts +2 -2
  32. package/src/libs/model-runtime/azureai/index.ts +4 -4
  33. package/src/libs/model-runtime/baichuan/index.ts +2 -2
  34. package/src/libs/model-runtime/bedrock/index.ts +4 -4
  35. package/src/libs/model-runtime/cloudflare/index.ts +2 -2
  36. package/src/libs/model-runtime/cohere/index.ts +2 -2
  37. package/src/libs/model-runtime/deepseek/index.ts +2 -2
  38. package/src/libs/model-runtime/fireworksai/index.ts +2 -2
  39. package/src/libs/model-runtime/giteeai/index.ts +2 -2
  40. package/src/libs/model-runtime/github/index.ts +2 -2
  41. package/src/libs/model-runtime/google/index.ts +7 -5
  42. package/src/libs/model-runtime/groq/index.ts +2 -2
  43. package/src/libs/model-runtime/higress/index.ts +2 -2
  44. package/src/libs/model-runtime/huggingface/index.ts +2 -2
  45. package/src/libs/model-runtime/hunyuan/index.ts +2 -2
  46. package/src/libs/model-runtime/index.ts +1 -1
  47. package/src/libs/model-runtime/infiniai/index.ts +2 -2
  48. package/src/libs/model-runtime/internlm/index.ts +7 -9
  49. package/src/libs/model-runtime/jina/index.ts +2 -2
  50. package/src/libs/model-runtime/lmstudio/index.ts +2 -2
  51. package/src/libs/model-runtime/minimax/index.ts +2 -2
  52. package/src/libs/model-runtime/mistral/index.ts +2 -2
  53. package/src/libs/model-runtime/modelscope/index.ts +2 -3
  54. package/src/libs/model-runtime/moonshot/index.ts +2 -2
  55. package/src/libs/model-runtime/novita/index.ts +2 -2
  56. package/src/libs/model-runtime/nvidia/index.ts +2 -2
  57. package/src/libs/model-runtime/ollama/index.ts +2 -2
  58. package/src/libs/model-runtime/openai/index.ts +3 -3
  59. package/src/libs/model-runtime/openrouter/index.ts +2 -2
  60. package/src/libs/model-runtime/perplexity/index.ts +2 -2
  61. package/src/libs/model-runtime/ppio/index.ts +2 -2
  62. package/src/libs/model-runtime/qiniu/index.ts +2 -2
  63. package/src/libs/model-runtime/qwen/index.ts +2 -2
  64. package/src/libs/model-runtime/sambanova/index.ts +2 -2
  65. package/src/libs/model-runtime/search1api/index.ts +2 -2
  66. package/src/libs/model-runtime/sensenova/index.ts +2 -2
  67. package/src/libs/model-runtime/siliconcloud/index.ts +2 -2
  68. package/src/libs/model-runtime/spark/index.ts +15 -13
  69. package/src/libs/model-runtime/stepfun/index.ts +2 -2
  70. package/src/libs/model-runtime/taichu/index.ts +2 -2
  71. package/src/libs/model-runtime/tencentcloud/index.ts +2 -2
  72. package/src/libs/model-runtime/togetherai/index.ts +2 -2
  73. package/src/libs/model-runtime/types/chat.ts +1 -1
  74. package/src/libs/model-runtime/upstage/index.ts +2 -2
  75. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +7 -7
  76. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +3 -3
  77. package/src/libs/model-runtime/vllm/index.ts +2 -2
  78. package/src/libs/model-runtime/volcengine/index.ts +2 -2
  79. package/src/libs/model-runtime/wenxin/index.ts +2 -2
  80. package/src/libs/model-runtime/xai/index.ts +6 -3
  81. package/src/libs/model-runtime/xinference/index.ts +2 -2
  82. package/src/libs/model-runtime/zeroone/index.ts +2 -2
  83. package/src/libs/model-runtime/zhipu/index.ts +2 -2
  84. package/src/middleware.ts +3 -1
  85. package/src/server/globalConfig/index.ts +3 -0
  86. package/src/server/routers/tools/search.test.ts +2 -4
  87. package/src/services/chat.ts +1 -0
  88. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +4 -2
  89. package/src/store/chat/slices/message/action.test.ts +2 -1
  90. package/src/store/chat/slices/topic/action.test.ts +3 -2
  91. package/src/types/aiProvider.ts +1 -0
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
6
6
 
7
7
  export interface OpenAIModelCard {
@@ -10,13 +10,13 @@ export interface OpenAIModelCard {
10
10
 
11
11
  const prunePrefixes = ['o1', 'o3', 'o4'];
12
12
 
13
- export const LobeOpenAI = LobeOpenAICompatibleFactory({
13
+ export const LobeOpenAI = createOpenAICompatibleRuntime({
14
14
  baseURL: 'https://api.openai.com/v1',
15
15
  chatCompletion: {
16
16
  handlePayload: (payload) => {
17
17
  const { model } = payload;
18
18
 
19
- if (prunePrefixes.some(prefix => model.startsWith(prefix))) {
19
+ if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
20
20
  return pruneReasoningPayload(payload) as any;
21
21
  }
22
22
 
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { OpenRouterModelCard, OpenRouterModelExtraInfo, OpenRouterReasoning } from './type';
6
6
 
7
7
  const formatPrice = (price: string) => {
@@ -9,7 +9,7 @@ const formatPrice = (price: string) => {
9
9
  return Number((Number(price) * 1e6).toPrecision(5));
10
10
  };
11
11
 
12
- export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
12
+ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://openrouter.ai/api/v1',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload) => {
@@ -1,9 +1,9 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
- export const LobePerplexityAI = LobeOpenAICompatibleFactory({
6
+ export const LobePerplexityAI = createOpenAICompatibleRuntime({
7
7
  baseURL: 'https://api.perplexity.ai',
8
8
  chatCompletion: {
9
9
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,10 +1,10 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { PPIOModelCard } from './type';
6
6
 
7
- export const LobePPIOAI = LobeOpenAICompatibleFactory({
7
+ export const LobePPIOAI = createOpenAICompatibleRuntime({
8
8
  baseURL: 'https://api.ppinfra.com/v3/openai',
9
9
  constructorOptions: {
10
10
  defaultHeaders: {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface QiniuModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeQiniuAI = LobeOpenAICompatibleFactory({
10
+ export const LobeQiniuAI = createOpenAICompatibleRuntime({
11
11
  apiKey: 'placeholder-to-avoid-error',
12
12
  baseURL: 'https://api.qnaigc.com/v1',
13
13
  debug: {
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { QwenAIStream } from '../utils/streams';
6
6
 
7
7
  export interface QwenModelCard {
@@ -20,7 +20,7 @@ export const QwenLegacyModels = new Set([
20
20
  'qwen-1.8b-longcontext-chat',
21
21
  ]);
22
22
 
23
- export const LobeQwenAI = LobeOpenAICompatibleFactory({
23
+ export const LobeQwenAI = createOpenAICompatibleRuntime({
24
24
  baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
25
25
  chatCompletion: {
26
26
  handlePayload: (payload) => {
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeSambaNovaAI = LobeOpenAICompatibleFactory({
4
+ export const LobeSambaNovaAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://api.sambanova.ai/v1',
6
6
  debug: {
7
7
  chatCompletion: () => process.env.DEBUG_SAMBANOVA_CHAT_COMPLETION === '1',
@@ -3,13 +3,13 @@ import OpenAI from 'openai';
3
3
  import type { ChatModelCard } from '@/types/llm';
4
4
 
5
5
  import { ChatStreamPayload, ModelProvider } from '../types';
6
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
6
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
7
7
 
8
8
  export interface Search1APIModelCard {
9
9
  id: string;
10
10
  }
11
11
 
12
- export const LobeSearch1API = LobeOpenAICompatibleFactory({
12
+ export const LobeSearch1API = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://api.search1api.com/v1',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,14 +1,14 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { convertSenseNovaMessage } from '../utils/sensenovaHelpers';
6
6
 
7
7
  export interface SenseNovaModelCard {
8
8
  id: string;
9
9
  }
10
10
 
11
- export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
11
+ export const LobeSenseNovaAI = createOpenAICompatibleRuntime({
12
12
  baseURL: 'https://api.sensenova.cn/compatible-mode/v1',
13
13
  chatCompletion: {
14
14
  handlePayload: (payload) => {
@@ -2,13 +2,13 @@ import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { AgentRuntimeErrorType } from '../error';
4
4
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
5
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
6
 
7
7
  export interface SiliconCloudModelCard {
8
8
  id: string;
9
9
  }
10
10
 
11
- export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
11
+ export const LobeSiliconCloudAI = createOpenAICompatibleRuntime({
12
12
  baseURL: 'https://api.siliconflow.cn/v1',
13
13
  chatCompletion: {
14
14
  handleError: (error: any): Omit<ChatCompletionErrorPayload, 'provider'> | undefined => {
@@ -1,26 +1,28 @@
1
1
  import { ChatStreamPayload, ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
  import { SparkAIStream, transformSparkResponseToStream } from '../utils/streams';
4
4
 
5
- export const LobeSparkAI = LobeOpenAICompatibleFactory({
5
+ export const LobeSparkAI = createOpenAICompatibleRuntime({
6
6
  baseURL: 'https://spark-api-open.xf-yun.com/v1',
7
7
  chatCompletion: {
8
8
  handlePayload: (payload: ChatStreamPayload) => {
9
9
  const { enabledSearch, tools, ...rest } = payload;
10
10
 
11
- const sparkTools = enabledSearch ? [
12
- ...(tools || []),
13
- {
14
- type: "web_search",
15
- web_search: {
16
- enable: true,
17
- search_mode: process.env.SPARK_SEARCH_MODE || "normal", // normal or deep
18
- /*
11
+ const sparkTools = enabledSearch
12
+ ? [
13
+ ...(tools || []),
14
+ {
15
+ type: 'web_search',
16
+ web_search: {
17
+ enable: true,
18
+ search_mode: process.env.SPARK_SEARCH_MODE || 'normal', // normal or deep
19
+ /*
19
20
  show_ref_label: true,
20
21
  */
21
- },
22
- }
23
- ] : tools;
22
+ },
23
+ },
24
+ ]
25
+ : tools;
24
26
 
25
27
  return {
26
28
  ...rest,
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface StepfunModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeStepfunAI = LobeOpenAICompatibleFactory({
10
+ export const LobeStepfunAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.stepfun.com/v1',
12
12
  chatCompletion: {
13
13
  handlePayload: (payload) => {
@@ -1,9 +1,9 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
- export const LobeTaichuAI = LobeOpenAICompatibleFactory({
6
+ export const LobeTaichuAI = createOpenAICompatibleRuntime({
7
7
  baseURL: 'https://ai-maas.wair.ac.cn/maas/v1',
8
8
  chatCompletion: {
9
9
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface TencentCloudModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeTencentCloudAI = LobeOpenAICompatibleFactory({
10
+ export const LobeTencentCloudAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.lkeap.cloud.tencent.com/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_TENCENT_CLOUD_CHAT_COMPLETION === '1',
@@ -1,10 +1,10 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { TogetherAIModel } from './type';
6
6
 
7
- export const LobeTogetherAI = LobeOpenAICompatibleFactory({
7
+ export const LobeTogetherAI = createOpenAICompatibleRuntime({
8
8
  baseURL: 'https://api.together.xyz/v1',
9
9
  constructorOptions: {
10
10
  defaultHeaders: {
@@ -137,7 +137,7 @@ export interface ChatStreamPayload {
137
137
  top_p?: number;
138
138
  }
139
139
 
140
- export interface ChatCompetitionOptions {
140
+ export interface ChatMethodOptions {
141
141
  callback?: ChatStreamCallbacks;
142
142
  /**
143
143
  * response headers
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeUpstageAI = LobeOpenAICompatibleFactory({
4
+ export const LobeUpstageAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://api.upstage.ai/v1/solar',
6
6
  debug: {
7
7
  chatCompletion: () => process.env.DEBUG_UPSTAGE_CHAT_COMPLETION === '1',
@@ -14,7 +14,7 @@ import officalOpenAIModels from '@/libs/model-runtime/openai/fixtures/openai-mod
14
14
  import { sleep } from '@/utils/sleep';
15
15
 
16
16
  import * as debugStreamModule from '../debugStream';
17
- import { LobeOpenAICompatibleFactory } from './index';
17
+ import { createOpenAICompatibleRuntime } from './index';
18
18
 
19
19
  const provider = 'groq';
20
20
  const defaultBaseURL = 'https://api.groq.com/openai/v1';
@@ -26,7 +26,7 @@ vi.spyOn(console, 'error').mockImplementation(() => {});
26
26
 
27
27
  let instance: LobeOpenAICompatibleRuntime;
28
28
 
29
- const LobeMockProvider = LobeOpenAICompatibleFactory({
29
+ const LobeMockProvider = createOpenAICompatibleRuntime({
30
30
  baseURL: defaultBaseURL,
31
31
  chatCompletion: {
32
32
  handleError: (error) => {
@@ -426,7 +426,7 @@ describe('LobeOpenAICompatibleFactory', () => {
426
426
 
427
427
  describe('noUserId option', () => {
428
428
  it('should not add user to payload when noUserId is true', async () => {
429
- const LobeMockProvider = LobeOpenAICompatibleFactory({
429
+ const LobeMockProvider = createOpenAICompatibleRuntime({
430
430
  baseURL: 'https://api.mistral.ai/v1',
431
431
  chatCompletion: {
432
432
  noUserId: true,
@@ -457,7 +457,7 @@ describe('LobeOpenAICompatibleFactory', () => {
457
457
  });
458
458
 
459
459
  it('should add user to payload when noUserId is false', async () => {
460
- const LobeMockProvider = LobeOpenAICompatibleFactory({
460
+ const LobeMockProvider = createOpenAICompatibleRuntime({
461
461
  baseURL: 'https://api.mistral.ai/v1',
462
462
  chatCompletion: {
463
463
  noUserId: false,
@@ -488,7 +488,7 @@ describe('LobeOpenAICompatibleFactory', () => {
488
488
  });
489
489
 
490
490
  it('should add user to payload when noUserId is not set in chatCompletion', async () => {
491
- const LobeMockProvider = LobeOpenAICompatibleFactory({
491
+ const LobeMockProvider = createOpenAICompatibleRuntime({
492
492
  baseURL: 'https://api.mistral.ai/v1',
493
493
  provider: ModelProvider.Mistral,
494
494
  });
@@ -831,7 +831,7 @@ describe('LobeOpenAICompatibleFactory', () => {
831
831
  },
832
832
  );
833
833
 
834
- const LobeMockProvider = LobeOpenAICompatibleFactory({
834
+ const LobeMockProvider = createOpenAICompatibleRuntime({
835
835
  baseURL: 'https://api.test.com/v1',
836
836
  chatCompletion: {
837
837
  handleStream: customStreamHandler,
@@ -890,7 +890,7 @@ describe('LobeOpenAICompatibleFactory', () => {
890
890
  });
891
891
  });
892
892
 
893
- const LobeMockProvider = LobeOpenAICompatibleFactory({
893
+ const LobeMockProvider = createOpenAICompatibleRuntime({
894
894
  baseURL: 'https://api.test.com/v1',
895
895
  chatCompletion: {
896
896
  handleTransformResponseToStream: customTransformHandler,
@@ -9,8 +9,8 @@ import type { ChatModelCard } from '@/types/llm';
9
9
  import { LobeRuntimeAI } from '../../BaseAI';
10
10
  import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error';
11
11
  import type {
12
- ChatCompetitionOptions,
13
12
  ChatCompletionErrorPayload,
13
+ ChatMethodOptions,
14
14
  ChatStreamPayload,
15
15
  Embeddings,
16
16
  EmbeddingsOptions,
@@ -150,7 +150,7 @@ export function transformResponseToStream(data: OpenAI.ChatCompletion) {
150
150
  });
151
151
  }
152
152
 
153
- export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>({
153
+ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = any>({
154
154
  provider,
155
155
  baseURL: DEFAULT_BASE_URL,
156
156
  apiKey: DEFAULT_API_LEY,
@@ -199,7 +199,7 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
199
199
  this.id = options.id || provider;
200
200
  }
201
201
 
202
- async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatCompetitionOptions) {
202
+ async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatMethodOptions) {
203
203
  try {
204
204
  const inputStartAt = Date.now();
205
205
  const postPayload = chatCompletion?.handlePayload
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface VLLMModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeVLLMAI = LobeOpenAICompatibleFactory({
10
+ export const LobeVLLMAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'http://localhost:8000/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_VLLM_CHAT_COMPLETION === '1',
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeVolcengineAI = LobeOpenAICompatibleFactory({
4
+ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://ark.cn-beijing.volces.com/api/v3',
6
6
  chatCompletion: {
7
7
  handlePayload: (payload) => {
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeWenxinAI = LobeOpenAICompatibleFactory({
4
+ export const LobeWenxinAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://qianfan.baidubce.com/v2',
6
6
  chatCompletion: {
7
7
  handlePayload: (payload) => {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface XAIModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeXAI = LobeOpenAICompatibleFactory({
10
+ export const LobeXAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.x.ai/v1',
12
12
  chatCompletion: {
13
13
  handlePayload: (payload) => {
@@ -21,7 +21,10 @@ export const LobeXAI = LobeOpenAICompatibleFactory({
21
21
  stream: true,
22
22
  ...(enabledSearch && {
23
23
  search_parameters: {
24
- max_search_results: Math.min(Math.max(parseInt(process.env.XAI_MAX_SEARCH_RESULTS ?? '15', 10), 1), 30),
24
+ max_search_results: Math.min(
25
+ Math.max(parseInt(process.env.XAI_MAX_SEARCH_RESULTS ?? '15', 10), 1),
26
+ 30,
27
+ ),
25
28
  mode: 'auto',
26
29
  return_citations: true,
27
30
  sources: [
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface XinferenceModelCard {
7
7
  context_length: number;
@@ -12,7 +12,7 @@ export interface XinferenceModelCard {
12
12
  name: string;
13
13
  }
14
14
 
15
- export const LobeXinferenceAI = LobeOpenAICompatibleFactory({
15
+ export const LobeXinferenceAI = createOpenAICompatibleRuntime({
16
16
  baseURL: 'http://localhost:9997/v1',
17
17
  debug: {
18
18
  chatCompletion: () => process.env.DEBUG_XINFERENCE_CHAT_COMPLETION === '1',
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface ZeroOneModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
10
+ export const LobeZeroOneAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.lingyiwanwu.com/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface ZhipuModelCard {
7
7
  description: string;
@@ -9,7 +9,7 @@ export interface ZhipuModelCard {
9
9
  modelName: string;
10
10
  }
11
11
 
12
- export const LobeZhipuAI = LobeOpenAICompatibleFactory({
12
+ export const LobeZhipuAI = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://open.bigmodel.cn/api/paas/v4',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload) => {
package/src/middleware.ts CHANGED
@@ -206,7 +206,9 @@ const clerkAuthMiddleware = clerkMiddleware(
206
206
  async (auth, req) => {
207
207
  logClerk('Clerk middleware processing request: %s %s', req.method, req.url);
208
208
 
209
- const isProtected = isProtectedRoute(req);
209
+ // when enable auth protection, only public route is not protected, others are all protected
210
+ const isProtected = appEnv.ENABLE_AUTH_PROTECTION ? !isPublicRoute(req) : isProtectedRoute(req);
211
+
210
212
  logClerk('Route protection status: %s, %s', req.url, isProtected ? 'protected' : 'public');
211
213
 
212
214
  if (isProtected) {
@@ -40,6 +40,9 @@ export const getServerGlobalConfig = async () => {
40
40
  enabled: isDesktop ? true : undefined,
41
41
  fetchOnClient: isDesktop ? false : !process.env.OLLAMA_PROXY_URL,
42
42
  },
43
+ qwen: {
44
+ withDeploymentName: true,
45
+ },
43
46
  tencentcloud: {
44
47
  enabledKey: 'ENABLED_TENCENT_CLOUD',
45
48
  modelListKey: 'TENCENT_CLOUD_MODEL_LIST',
@@ -104,8 +104,7 @@ describe('searchRouter', () => {
104
104
  query: 'test query',
105
105
  });
106
106
 
107
- expect(result).toEqual({
108
- costTime: 0,
107
+ expect(result).toMatchObject({
109
108
  query: 'test query',
110
109
  results: [
111
110
  {
@@ -139,8 +138,7 @@ describe('searchRouter', () => {
139
138
  query: 'test query',
140
139
  });
141
140
 
142
- expect(result).toEqual({
143
- costTime: 0,
141
+ expect(result).toMatchObject({
144
142
  query: 'test query',
145
143
  results: [
146
144
  {
@@ -300,6 +300,7 @@ class ChatService {
300
300
  ModelProvider.Azure,
301
301
  ModelProvider.Volcengine,
302
302
  ModelProvider.AzureAI,
303
+ ModelProvider.Qwen,
303
304
  ] as string[];
304
305
 
305
306
  if (providersWithDeploymentName.includes(provider)) {
@@ -671,14 +671,16 @@ describe('chatMessage actions', () => {
671
671
  (chatService.createAssistantMessage as Mock).mockResolvedValue(aiResponse);
672
672
  const spy = vi.spyOn(chatService, 'createAssistantMessageStream');
673
673
  // 模拟消息创建
674
- (messageService.createMessage as Mock).mockResolvedValue('assistant-message-id');
674
+ const createMessageSpyOn = vi
675
+ .spyOn(messageService, 'createMessage')
676
+ .mockResolvedValue('assistant-message-id');
675
677
 
676
678
  await act(async () => {
677
679
  await result.current.internal_coreProcessMessage(messages, userMessage.id);
678
680
  });
679
681
 
680
682
  // 验证是否创建了代表 AI 响应的消息
681
- expect(messageService.createMessage).toHaveBeenCalledWith(
683
+ expect(createMessageSpyOn).toHaveBeenCalledWith(
682
684
  expect.objectContaining({
683
685
  role: 'assistant',
684
686
  content: LOADING_FLAT,
@@ -369,11 +369,12 @@ describe('chatMessage actions', () => {
369
369
  const messageId = 'message-id';
370
370
  const newContent = 'Updated content';
371
371
 
372
+ const spy = vi.spyOn(messageService, 'updateMessage');
372
373
  await act(async () => {
373
374
  await result.current.internal_updateMessageContent(messageId, newContent);
374
375
  });
375
376
 
376
- expect(messageService.updateMessage).toHaveBeenCalledWith(messageId, { content: newContent });
377
+ expect(spy).toHaveBeenCalledWith(messageId, { content: newContent });
377
378
  });
378
379
 
379
380
  it('should dispatch message update action', async () => {
@@ -264,7 +264,8 @@ describe('topic action', () => {
264
264
  const topicId = 'topic-id';
265
265
  const newTitle = 'Updated Topic Title';
266
266
  // Mock the topicService.updateTitle to resolve immediately
267
- (topicService.updateTopic as Mock).mockResolvedValue(undefined);
267
+
268
+ const spyOn = vi.spyOn(topicService, 'updateTopic');
268
269
 
269
270
  const { result } = renderHook(() => useChatStore());
270
271
 
@@ -276,7 +277,7 @@ describe('topic action', () => {
276
277
  });
277
278
 
278
279
  // Verify that the topicService.updateTitle was called with correct parameters
279
- expect(topicService.updateTopic).toHaveBeenCalledWith(topicId, {
280
+ expect(spyOn).toHaveBeenCalledWith(topicId, {
280
281
  title: 'Updated Topic Title',
281
282
  });
282
283
 
@@ -23,6 +23,7 @@ export const AiProviderSDKEnum = {
23
23
  Huggingface: 'huggingface',
24
24
  Ollama: 'ollama',
25
25
  Openai: 'openai',
26
+ Qwen: 'qwen',
26
27
  Volcengine: 'volcengine',
27
28
  } as const;
28
29