@lobehub/chat 1.90.3 → 1.90.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. package/.cursor/rules/backend-architecture.mdc +12 -9
  2. package/.cursor/rules/cursor-ux-optimize.mdc +1 -1
  3. package/.cursor/rules/define-database-model.mdc +1 -1
  4. package/.cursor/rules/drizzle-schema-style-guide.mdc +1 -1
  5. package/.cursor/rules/i18n/i18n.mdc +1 -1
  6. package/.cursor/rules/project-introduce.mdc +2 -1
  7. package/.cursor/rules/system-role.mdc +42 -0
  8. package/.cursor/rules/zustand-action-patterns.mdc +318 -0
  9. package/.cursor/rules/zustand-slice-organization.mdc +300 -0
  10. package/CHANGELOG.md +33 -0
  11. package/README.md +2 -2
  12. package/README.zh-CN.md +2 -2
  13. package/changelog/v1.json +12 -0
  14. package/docs/self-hosting/advanced/model-list.mdx +1 -1
  15. package/docs/self-hosting/advanced/model-list.zh-CN.mdx +1 -1
  16. package/docs/self-hosting/environment-variables/model-provider.mdx +2 -2
  17. package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +2 -2
  18. package/package.json +44 -44
  19. package/src/features/PluginsUI/Render/utils/iframeOnReady.test.ts +1 -1
  20. package/src/features/PluginsUI/Render/utils/pluginSettings.test.ts +1 -1
  21. package/src/features/PluginsUI/Render/utils/pluginState.test.ts +1 -1
  22. package/src/libs/model-runtime/BaseAI.ts +3 -3
  23. package/src/libs/model-runtime/ModelRuntime.ts +2 -2
  24. package/src/libs/model-runtime/UniformRuntime/index.ts +2 -2
  25. package/src/libs/model-runtime/ai21/index.ts +2 -2
  26. package/src/libs/model-runtime/ai360/index.ts +2 -2
  27. package/src/libs/model-runtime/anthropic/index.ts +15 -11
  28. package/src/libs/model-runtime/azureOpenai/index.ts +2 -2
  29. package/src/libs/model-runtime/azureai/index.ts +4 -4
  30. package/src/libs/model-runtime/baichuan/index.ts +2 -2
  31. package/src/libs/model-runtime/bedrock/index.ts +4 -4
  32. package/src/libs/model-runtime/cloudflare/index.ts +2 -2
  33. package/src/libs/model-runtime/cohere/index.ts +2 -2
  34. package/src/libs/model-runtime/deepseek/index.ts +2 -2
  35. package/src/libs/model-runtime/fireworksai/index.ts +2 -2
  36. package/src/libs/model-runtime/giteeai/index.ts +2 -2
  37. package/src/libs/model-runtime/github/index.ts +2 -2
  38. package/src/libs/model-runtime/google/index.ts +7 -5
  39. package/src/libs/model-runtime/groq/index.ts +2 -2
  40. package/src/libs/model-runtime/higress/index.ts +2 -2
  41. package/src/libs/model-runtime/huggingface/index.ts +2 -2
  42. package/src/libs/model-runtime/hunyuan/index.ts +2 -2
  43. package/src/libs/model-runtime/index.ts +1 -1
  44. package/src/libs/model-runtime/infiniai/index.ts +2 -2
  45. package/src/libs/model-runtime/internlm/index.ts +7 -9
  46. package/src/libs/model-runtime/jina/index.ts +2 -2
  47. package/src/libs/model-runtime/lmstudio/index.ts +2 -2
  48. package/src/libs/model-runtime/minimax/index.ts +2 -2
  49. package/src/libs/model-runtime/mistral/index.ts +2 -2
  50. package/src/libs/model-runtime/modelscope/index.ts +2 -3
  51. package/src/libs/model-runtime/moonshot/index.ts +2 -2
  52. package/src/libs/model-runtime/novita/index.ts +2 -2
  53. package/src/libs/model-runtime/nvidia/index.ts +2 -2
  54. package/src/libs/model-runtime/ollama/index.ts +2 -2
  55. package/src/libs/model-runtime/openai/index.ts +3 -3
  56. package/src/libs/model-runtime/openrouter/index.ts +2 -2
  57. package/src/libs/model-runtime/perplexity/index.ts +2 -2
  58. package/src/libs/model-runtime/ppio/index.ts +2 -2
  59. package/src/libs/model-runtime/qiniu/index.ts +2 -2
  60. package/src/libs/model-runtime/qwen/index.ts +2 -2
  61. package/src/libs/model-runtime/sambanova/index.ts +2 -2
  62. package/src/libs/model-runtime/search1api/index.ts +2 -2
  63. package/src/libs/model-runtime/sensenova/index.ts +2 -2
  64. package/src/libs/model-runtime/siliconcloud/index.ts +2 -2
  65. package/src/libs/model-runtime/spark/index.ts +15 -13
  66. package/src/libs/model-runtime/stepfun/index.ts +2 -2
  67. package/src/libs/model-runtime/taichu/index.ts +2 -2
  68. package/src/libs/model-runtime/tencentcloud/index.ts +2 -2
  69. package/src/libs/model-runtime/togetherai/index.ts +2 -2
  70. package/src/libs/model-runtime/types/chat.ts +1 -1
  71. package/src/libs/model-runtime/upstage/index.ts +2 -2
  72. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.test.ts +7 -7
  73. package/src/libs/model-runtime/utils/openaiCompatibleFactory/index.ts +3 -3
  74. package/src/libs/model-runtime/vllm/index.ts +2 -2
  75. package/src/libs/model-runtime/volcengine/index.ts +2 -2
  76. package/src/libs/model-runtime/wenxin/index.ts +2 -2
  77. package/src/libs/model-runtime/xai/index.ts +6 -3
  78. package/src/libs/model-runtime/xinference/index.ts +2 -2
  79. package/src/libs/model-runtime/zeroone/index.ts +2 -2
  80. package/src/libs/model-runtime/zhipu/index.ts +2 -2
  81. package/src/middleware.ts +3 -1
  82. package/src/server/routers/tools/search.test.ts +2 -4
  83. package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChat.test.ts +4 -2
  84. package/src/store/chat/slices/message/action.test.ts +2 -1
  85. package/src/store/chat/slices/topic/action.test.ts +3 -2
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface MistralModelCard {
7
7
  capabilities: {
@@ -13,7 +13,7 @@ export interface MistralModelCard {
13
13
  max_context_length: number;
14
14
  }
15
15
 
16
- export const LobeMistralAI = LobeOpenAICompatibleFactory({
16
+ export const LobeMistralAI = createOpenAICompatibleRuntime({
17
17
  baseURL: 'https://api.mistral.ai/v1',
18
18
  chatCompletion: {
19
19
  // Mistral API does not support stream_options: { include_usage: true }
@@ -1,8 +1,7 @@
1
-
2
1
  import type { ChatModelCard } from '@/types/llm';
3
2
 
4
3
  import { ModelProvider } from '../types';
5
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
5
 
7
6
  export interface ModelScopeModelCard {
8
7
  created: number;
@@ -11,7 +10,7 @@ export interface ModelScopeModelCard {
11
10
  owned_by: string;
12
11
  }
13
12
 
14
- export const LobeModelScopeAI = LobeOpenAICompatibleFactory({
13
+ export const LobeModelScopeAI = createOpenAICompatibleRuntime({
15
14
  baseURL: 'https://api-inference.modelscope.cn/v1',
16
15
  debug: {
17
16
  chatCompletion: () => process.env.DEBUG_MODELSCOPE_CHAT_COMPLETION === '1',
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface MoonshotModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeMoonshotAI = LobeOpenAICompatibleFactory({
10
+ export const LobeMoonshotAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.moonshot.cn/v1',
12
12
  chatCompletion: {
13
13
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,10 +1,10 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { NovitaModelCard } from './type';
6
6
 
7
- export const LobeNovitaAI = LobeOpenAICompatibleFactory({
7
+ export const LobeNovitaAI = createOpenAICompatibleRuntime({
8
8
  baseURL: 'https://api.novita.ai/v3/openai',
9
9
  constructorOptions: {
10
10
  defaultHeaders: {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface NvidiaModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeNvidiaAI = LobeOpenAICompatibleFactory({
10
+ export const LobeNvidiaAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://integrate.api.nvidia.com/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_NVIDIA_CHAT_COMPLETION === '1',
@@ -8,7 +8,7 @@ import { createErrorResponse } from '@/utils/errorResponse';
8
8
  import { LobeRuntimeAI } from '../BaseAI';
9
9
  import { AgentRuntimeErrorType } from '../error';
10
10
  import {
11
- ChatCompetitionOptions,
11
+ ChatMethodOptions,
12
12
  ChatStreamPayload,
13
13
  Embeddings,
14
14
  EmbeddingsPayload,
@@ -43,7 +43,7 @@ export class LobeOllamaAI implements LobeRuntimeAI {
43
43
  if (baseURL) this.baseURL = baseURL;
44
44
  }
45
45
 
46
- async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) {
46
+ async chat(payload: ChatStreamPayload, options?: ChatMethodOptions) {
47
47
  try {
48
48
  const abort = () => {
49
49
  this.client.abort();
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { pruneReasoningPayload } from '../utils/openaiHelpers';
6
6
 
7
7
  export interface OpenAIModelCard {
@@ -10,13 +10,13 @@ export interface OpenAIModelCard {
10
10
 
11
11
  const prunePrefixes = ['o1', 'o3', 'o4'];
12
12
 
13
- export const LobeOpenAI = LobeOpenAICompatibleFactory({
13
+ export const LobeOpenAI = createOpenAICompatibleRuntime({
14
14
  baseURL: 'https://api.openai.com/v1',
15
15
  chatCompletion: {
16
16
  handlePayload: (payload) => {
17
17
  const { model } = payload;
18
18
 
19
- if (prunePrefixes.some(prefix => model.startsWith(prefix))) {
19
+ if (prunePrefixes.some((prefix) => model.startsWith(prefix))) {
20
20
  return pruneReasoningPayload(payload) as any;
21
21
  }
22
22
 
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { OpenRouterModelCard, OpenRouterModelExtraInfo, OpenRouterReasoning } from './type';
6
6
 
7
7
  const formatPrice = (price: string) => {
@@ -9,7 +9,7 @@ const formatPrice = (price: string) => {
9
9
  return Number((Number(price) * 1e6).toPrecision(5));
10
10
  };
11
11
 
12
- export const LobeOpenRouterAI = LobeOpenAICompatibleFactory({
12
+ export const LobeOpenRouterAI = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://openrouter.ai/api/v1',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload) => {
@@ -1,9 +1,9 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
- export const LobePerplexityAI = LobeOpenAICompatibleFactory({
6
+ export const LobePerplexityAI = createOpenAICompatibleRuntime({
7
7
  baseURL: 'https://api.perplexity.ai',
8
8
  chatCompletion: {
9
9
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,10 +1,10 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { PPIOModelCard } from './type';
6
6
 
7
- export const LobePPIOAI = LobeOpenAICompatibleFactory({
7
+ export const LobePPIOAI = createOpenAICompatibleRuntime({
8
8
  baseURL: 'https://api.ppinfra.com/v3/openai',
9
9
  constructorOptions: {
10
10
  defaultHeaders: {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface QiniuModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeQiniuAI = LobeOpenAICompatibleFactory({
10
+ export const LobeQiniuAI = createOpenAICompatibleRuntime({
11
11
  apiKey: 'placeholder-to-avoid-error',
12
12
  baseURL: 'https://api.qnaigc.com/v1',
13
13
  debug: {
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { QwenAIStream } from '../utils/streams';
6
6
 
7
7
  export interface QwenModelCard {
@@ -20,7 +20,7 @@ export const QwenLegacyModels = new Set([
20
20
  'qwen-1.8b-longcontext-chat',
21
21
  ]);
22
22
 
23
- export const LobeQwenAI = LobeOpenAICompatibleFactory({
23
+ export const LobeQwenAI = createOpenAICompatibleRuntime({
24
24
  baseURL: 'https://dashscope.aliyuncs.com/compatible-mode/v1',
25
25
  chatCompletion: {
26
26
  handlePayload: (payload) => {
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeSambaNovaAI = LobeOpenAICompatibleFactory({
4
+ export const LobeSambaNovaAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://api.sambanova.ai/v1',
6
6
  debug: {
7
7
  chatCompletion: () => process.env.DEBUG_SAMBANOVA_CHAT_COMPLETION === '1',
@@ -3,13 +3,13 @@ import OpenAI from 'openai';
3
3
  import type { ChatModelCard } from '@/types/llm';
4
4
 
5
5
  import { ChatStreamPayload, ModelProvider } from '../types';
6
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
6
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
7
7
 
8
8
  export interface Search1APIModelCard {
9
9
  id: string;
10
10
  }
11
11
 
12
- export const LobeSearch1API = LobeOpenAICompatibleFactory({
12
+ export const LobeSearch1API = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://api.search1api.com/v1',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,14 +1,14 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { convertSenseNovaMessage } from '../utils/sensenovaHelpers';
6
6
 
7
7
  export interface SenseNovaModelCard {
8
8
  id: string;
9
9
  }
10
10
 
11
- export const LobeSenseNovaAI = LobeOpenAICompatibleFactory({
11
+ export const LobeSenseNovaAI = createOpenAICompatibleRuntime({
12
12
  baseURL: 'https://api.sensenova.cn/compatible-mode/v1',
13
13
  chatCompletion: {
14
14
  handlePayload: (payload) => {
@@ -2,13 +2,13 @@ import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { AgentRuntimeErrorType } from '../error';
4
4
  import { ChatCompletionErrorPayload, ModelProvider } from '../types';
5
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
5
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
6
6
 
7
7
  export interface SiliconCloudModelCard {
8
8
  id: string;
9
9
  }
10
10
 
11
- export const LobeSiliconCloudAI = LobeOpenAICompatibleFactory({
11
+ export const LobeSiliconCloudAI = createOpenAICompatibleRuntime({
12
12
  baseURL: 'https://api.siliconflow.cn/v1',
13
13
  chatCompletion: {
14
14
  handleError: (error: any): Omit<ChatCompletionErrorPayload, 'provider'> | undefined => {
@@ -1,26 +1,28 @@
1
1
  import { ChatStreamPayload, ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
  import { SparkAIStream, transformSparkResponseToStream } from '../utils/streams';
4
4
 
5
- export const LobeSparkAI = LobeOpenAICompatibleFactory({
5
+ export const LobeSparkAI = createOpenAICompatibleRuntime({
6
6
  baseURL: 'https://spark-api-open.xf-yun.com/v1',
7
7
  chatCompletion: {
8
8
  handlePayload: (payload: ChatStreamPayload) => {
9
9
  const { enabledSearch, tools, ...rest } = payload;
10
10
 
11
- const sparkTools = enabledSearch ? [
12
- ...(tools || []),
13
- {
14
- type: "web_search",
15
- web_search: {
16
- enable: true,
17
- search_mode: process.env.SPARK_SEARCH_MODE || "normal", // normal or deep
18
- /*
11
+ const sparkTools = enabledSearch
12
+ ? [
13
+ ...(tools || []),
14
+ {
15
+ type: 'web_search',
16
+ web_search: {
17
+ enable: true,
18
+ search_mode: process.env.SPARK_SEARCH_MODE || 'normal', // normal or deep
19
+ /*
19
20
  show_ref_label: true,
20
21
  */
21
- },
22
- }
23
- ] : tools;
22
+ },
23
+ },
24
+ ]
25
+ : tools;
24
26
 
25
27
  return {
26
28
  ...rest,
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface StepfunModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeStepfunAI = LobeOpenAICompatibleFactory({
10
+ export const LobeStepfunAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.stepfun.com/v1',
12
12
  chatCompletion: {
13
13
  handlePayload: (payload) => {
@@ -1,9 +1,9 @@
1
1
  import OpenAI from 'openai';
2
2
 
3
3
  import { ChatStreamPayload, ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
- export const LobeTaichuAI = LobeOpenAICompatibleFactory({
6
+ export const LobeTaichuAI = createOpenAICompatibleRuntime({
7
7
  baseURL: 'https://ai-maas.wair.ac.cn/maas/v1',
8
8
  chatCompletion: {
9
9
  handlePayload: (payload: ChatStreamPayload) => {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface TencentCloudModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeTencentCloudAI = LobeOpenAICompatibleFactory({
10
+ export const LobeTencentCloudAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.lkeap.cloud.tencent.com/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_TENCENT_CLOUD_CHAT_COMPLETION === '1',
@@ -1,10 +1,10 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
  import { TogetherAIModel } from './type';
6
6
 
7
- export const LobeTogetherAI = LobeOpenAICompatibleFactory({
7
+ export const LobeTogetherAI = createOpenAICompatibleRuntime({
8
8
  baseURL: 'https://api.together.xyz/v1',
9
9
  constructorOptions: {
10
10
  defaultHeaders: {
@@ -137,7 +137,7 @@ export interface ChatStreamPayload {
137
137
  top_p?: number;
138
138
  }
139
139
 
140
- export interface ChatCompetitionOptions {
140
+ export interface ChatMethodOptions {
141
141
  callback?: ChatStreamCallbacks;
142
142
  /**
143
143
  * response headers
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeUpstageAI = LobeOpenAICompatibleFactory({
4
+ export const LobeUpstageAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://api.upstage.ai/v1/solar',
6
6
  debug: {
7
7
  chatCompletion: () => process.env.DEBUG_UPSTAGE_CHAT_COMPLETION === '1',
@@ -14,7 +14,7 @@ import officalOpenAIModels from '@/libs/model-runtime/openai/fixtures/openai-mod
14
14
  import { sleep } from '@/utils/sleep';
15
15
 
16
16
  import * as debugStreamModule from '../debugStream';
17
- import { LobeOpenAICompatibleFactory } from './index';
17
+ import { createOpenAICompatibleRuntime } from './index';
18
18
 
19
19
  const provider = 'groq';
20
20
  const defaultBaseURL = 'https://api.groq.com/openai/v1';
@@ -26,7 +26,7 @@ vi.spyOn(console, 'error').mockImplementation(() => {});
26
26
 
27
27
  let instance: LobeOpenAICompatibleRuntime;
28
28
 
29
- const LobeMockProvider = LobeOpenAICompatibleFactory({
29
+ const LobeMockProvider = createOpenAICompatibleRuntime({
30
30
  baseURL: defaultBaseURL,
31
31
  chatCompletion: {
32
32
  handleError: (error) => {
@@ -426,7 +426,7 @@ describe('LobeOpenAICompatibleFactory', () => {
426
426
 
427
427
  describe('noUserId option', () => {
428
428
  it('should not add user to payload when noUserId is true', async () => {
429
- const LobeMockProvider = LobeOpenAICompatibleFactory({
429
+ const LobeMockProvider = createOpenAICompatibleRuntime({
430
430
  baseURL: 'https://api.mistral.ai/v1',
431
431
  chatCompletion: {
432
432
  noUserId: true,
@@ -457,7 +457,7 @@ describe('LobeOpenAICompatibleFactory', () => {
457
457
  });
458
458
 
459
459
  it('should add user to payload when noUserId is false', async () => {
460
- const LobeMockProvider = LobeOpenAICompatibleFactory({
460
+ const LobeMockProvider = createOpenAICompatibleRuntime({
461
461
  baseURL: 'https://api.mistral.ai/v1',
462
462
  chatCompletion: {
463
463
  noUserId: false,
@@ -488,7 +488,7 @@ describe('LobeOpenAICompatibleFactory', () => {
488
488
  });
489
489
 
490
490
  it('should add user to payload when noUserId is not set in chatCompletion', async () => {
491
- const LobeMockProvider = LobeOpenAICompatibleFactory({
491
+ const LobeMockProvider = createOpenAICompatibleRuntime({
492
492
  baseURL: 'https://api.mistral.ai/v1',
493
493
  provider: ModelProvider.Mistral,
494
494
  });
@@ -831,7 +831,7 @@ describe('LobeOpenAICompatibleFactory', () => {
831
831
  },
832
832
  );
833
833
 
834
- const LobeMockProvider = LobeOpenAICompatibleFactory({
834
+ const LobeMockProvider = createOpenAICompatibleRuntime({
835
835
  baseURL: 'https://api.test.com/v1',
836
836
  chatCompletion: {
837
837
  handleStream: customStreamHandler,
@@ -890,7 +890,7 @@ describe('LobeOpenAICompatibleFactory', () => {
890
890
  });
891
891
  });
892
892
 
893
- const LobeMockProvider = LobeOpenAICompatibleFactory({
893
+ const LobeMockProvider = createOpenAICompatibleRuntime({
894
894
  baseURL: 'https://api.test.com/v1',
895
895
  chatCompletion: {
896
896
  handleTransformResponseToStream: customTransformHandler,
@@ -9,8 +9,8 @@ import type { ChatModelCard } from '@/types/llm';
9
9
  import { LobeRuntimeAI } from '../../BaseAI';
10
10
  import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../../error';
11
11
  import type {
12
- ChatCompetitionOptions,
13
12
  ChatCompletionErrorPayload,
13
+ ChatMethodOptions,
14
14
  ChatStreamPayload,
15
15
  Embeddings,
16
16
  EmbeddingsOptions,
@@ -150,7 +150,7 @@ export function transformResponseToStream(data: OpenAI.ChatCompletion) {
150
150
  });
151
151
  }
152
152
 
153
- export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>({
153
+ export const createOpenAICompatibleRuntime = <T extends Record<string, any> = any>({
154
154
  provider,
155
155
  baseURL: DEFAULT_BASE_URL,
156
156
  apiKey: DEFAULT_API_LEY,
@@ -199,7 +199,7 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
199
199
  this.id = options.id || provider;
200
200
  }
201
201
 
202
- async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatCompetitionOptions) {
202
+ async chat({ responseMode, ...payload }: ChatStreamPayload, options?: ChatMethodOptions) {
203
203
  try {
204
204
  const inputStartAt = Date.now();
205
205
  const postPayload = chatCompletion?.handlePayload
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface VLLMModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeVLLMAI = LobeOpenAICompatibleFactory({
10
+ export const LobeVLLMAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'http://localhost:8000/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_VLLM_CHAT_COMPLETION === '1',
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeVolcengineAI = LobeOpenAICompatibleFactory({
4
+ export const LobeVolcengineAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://ark.cn-beijing.volces.com/api/v3',
6
6
  chatCompletion: {
7
7
  handlePayload: (payload) => {
@@ -1,7 +1,7 @@
1
1
  import { ModelProvider } from '../types';
2
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
2
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
3
3
 
4
- export const LobeWenxinAI = LobeOpenAICompatibleFactory({
4
+ export const LobeWenxinAI = createOpenAICompatibleRuntime({
5
5
  baseURL: 'https://qianfan.baidubce.com/v2',
6
6
  chatCompletion: {
7
7
  handlePayload: (payload) => {
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface XAIModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeXAI = LobeOpenAICompatibleFactory({
10
+ export const LobeXAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.x.ai/v1',
12
12
  chatCompletion: {
13
13
  handlePayload: (payload) => {
@@ -21,7 +21,10 @@ export const LobeXAI = LobeOpenAICompatibleFactory({
21
21
  stream: true,
22
22
  ...(enabledSearch && {
23
23
  search_parameters: {
24
- max_search_results: Math.min(Math.max(parseInt(process.env.XAI_MAX_SEARCH_RESULTS ?? '15', 10), 1), 30),
24
+ max_search_results: Math.min(
25
+ Math.max(parseInt(process.env.XAI_MAX_SEARCH_RESULTS ?? '15', 10), 1),
26
+ 30,
27
+ ),
25
28
  mode: 'auto',
26
29
  return_citations: true,
27
30
  sources: [
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface XinferenceModelCard {
7
7
  context_length: number;
@@ -12,7 +12,7 @@ export interface XinferenceModelCard {
12
12
  name: string;
13
13
  }
14
14
 
15
- export const LobeXinferenceAI = LobeOpenAICompatibleFactory({
15
+ export const LobeXinferenceAI = createOpenAICompatibleRuntime({
16
16
  baseURL: 'http://localhost:9997/v1',
17
17
  debug: {
18
18
  chatCompletion: () => process.env.DEBUG_XINFERENCE_CHAT_COMPLETION === '1',
@@ -1,13 +1,13 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface ZeroOneModelCard {
7
7
  id: string;
8
8
  }
9
9
 
10
- export const LobeZeroOneAI = LobeOpenAICompatibleFactory({
10
+ export const LobeZeroOneAI = createOpenAICompatibleRuntime({
11
11
  baseURL: 'https://api.lingyiwanwu.com/v1',
12
12
  debug: {
13
13
  chatCompletion: () => process.env.DEBUG_ZEROONE_CHAT_COMPLETION === '1',
@@ -1,7 +1,7 @@
1
1
  import type { ChatModelCard } from '@/types/llm';
2
2
 
3
3
  import { ModelProvider } from '../types';
4
- import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
4
+ import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
5
5
 
6
6
  export interface ZhipuModelCard {
7
7
  description: string;
@@ -9,7 +9,7 @@ export interface ZhipuModelCard {
9
9
  modelName: string;
10
10
  }
11
11
 
12
- export const LobeZhipuAI = LobeOpenAICompatibleFactory({
12
+ export const LobeZhipuAI = createOpenAICompatibleRuntime({
13
13
  baseURL: 'https://open.bigmodel.cn/api/paas/v4',
14
14
  chatCompletion: {
15
15
  handlePayload: (payload) => {
package/src/middleware.ts CHANGED
@@ -206,7 +206,9 @@ const clerkAuthMiddleware = clerkMiddleware(
206
206
  async (auth, req) => {
207
207
  logClerk('Clerk middleware processing request: %s %s', req.method, req.url);
208
208
 
209
- const isProtected = isProtectedRoute(req);
209
+ // when enable auth protection, only public route is not protected, others are all protected
210
+ const isProtected = appEnv.ENABLE_AUTH_PROTECTION ? !isPublicRoute(req) : isProtectedRoute(req);
211
+
210
212
  logClerk('Route protection status: %s, %s', req.url, isProtected ? 'protected' : 'public');
211
213
 
212
214
  if (isProtected) {