@lobehub/chat 1.120.1 → 1.120.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +3 -0
- package/CHANGELOG.md +50 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/Dockerfile.pglite +2 -0
- package/changelog/v1.json +18 -0
- package/docs/self-hosting/environment-variables/basic.mdx +0 -7
- package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +0 -7
- package/next.config.ts +0 -2
- package/package.json +3 -3
- package/packages/const/src/url.ts +1 -2
- package/packages/model-bank/package.json +1 -0
- package/packages/model-bank/src/aiModels/index.ts +3 -0
- package/packages/model-bank/src/aiModels/nebius.ts +1046 -0
- package/packages/model-bank/src/aiModels/openrouter.ts +7 -37
- package/packages/model-runtime/src/ModelRuntime.test.ts +1 -1
- package/packages/model-runtime/src/index.ts +1 -0
- package/packages/model-runtime/src/nebius/index.ts +78 -0
- package/packages/model-runtime/src/runtimeMap.ts +2 -0
- package/packages/model-runtime/src/types/type.ts +1 -0
- package/packages/types/src/user/settings/keyVaults.ts +1 -0
- package/packages/web-crawler/package.json +1 -1
- package/src/app/[variants]/metadata.ts +1 -7
- package/src/components/Analytics/Google.tsx +1 -1
- package/src/components/Analytics/LobeAnalyticsProviderWrapper.tsx +1 -1
- package/src/components/Analytics/Vercel.tsx +1 -1
- package/src/components/Analytics/index.tsx +1 -1
- package/src/config/__tests__/analytics.test.ts +1 -1
- package/src/config/__tests__/client.test.ts +1 -1
- package/src/config/llm.ts +6 -0
- package/src/config/modelProviders/index.ts +3 -0
- package/src/config/modelProviders/nebius.ts +20 -0
- package/src/envs/app.ts +0 -3
- package/src/libs/analytics/index.ts +1 -1
- package/src/libs/traces/index.test.ts +1 -1
- package/src/libs/traces/index.ts +1 -1
- package/src/libs/trpc/client/edge.ts +1 -2
- package/src/libs/unstructured/index.ts +1 -1
- package/src/locales/create.ts +1 -1
- package/src/server/globalConfig/index.test.ts +1 -2
- package/src/server/globalConfig/index.ts +2 -2
- package/src/server/modules/ContentChunk/index.ts +1 -1
- package/src/server/routers/tools/search.test.ts +1 -1
- package/src/server/services/search/impls/searxng/index.test.ts +1 -1
- package/src/server/services/search/impls/searxng/index.ts +1 -1
- package/src/server/services/search/index.ts +1 -1
- package/src/services/_url.ts +6 -19
- package/src/services/share.ts +1 -2
- package/packages/utils/src/basePath.ts +0 -3
- /package/src/{config → envs}/analytics.ts +0 -0
- /package/src/{config → envs}/debug.ts +0 -0
- /package/src/{config → envs}/knowledge.ts +0 -0
- /package/src/{config → envs}/langfuse.ts +0 -0
- /package/src/{config → envs}/tools.ts +0 -0
@@ -3,7 +3,7 @@ import { AIChatModelCard } from '../types/aiModel';
|
|
3
3
|
// https://openrouter.ai/docs/api-reference/list-available-models
|
4
4
|
const openrouterChatModels: AIChatModelCard[] = [
|
5
5
|
{
|
6
|
-
contextWindowTokens:
|
6
|
+
contextWindowTokens: 2_000_000,
|
7
7
|
description:
|
8
8
|
'根据上下文长度、主题和复杂性,你的请求将发送到 Llama 3 70B Instruct、Claude 3.5 Sonnet(自我调节)或 GPT-4o。',
|
9
9
|
displayName: 'Auto (best for prompt)',
|
@@ -184,7 +184,7 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
184
184
|
abilities: {
|
185
185
|
reasoning: true,
|
186
186
|
},
|
187
|
-
contextWindowTokens:
|
187
|
+
contextWindowTokens: 131_072,
|
188
188
|
description:
|
189
189
|
'Qwen3-235B-A22B 是由 Qwen 开发的 235B 参数专家混合 (MoE) 模型,每次前向传递激活 22B 参数。它支持在用于复杂推理、数学和代码任务的“思考”模式与用于一般对话效率的“非思考”模式之间无缝切换。该模型展示了强大的推理能力、多语言支持(100 多种语言和方言)、高级指令遵循和代理工具调用能力。它原生处理 32K 令牌上下文窗口,并使用基于 YaRN 的扩展扩展到 131K 令牌。',
|
190
190
|
displayName: 'Qwen3 235B A22B (Free)',
|
@@ -237,41 +237,11 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
237
237
|
},
|
238
238
|
type: 'chat',
|
239
239
|
},
|
240
|
-
{
|
241
|
-
abilities: {
|
242
|
-
reasoning: true,
|
243
|
-
},
|
244
|
-
contextWindowTokens: 32_000,
|
245
|
-
description:
|
246
|
-
'GLM-Z1-9B-0414 是由 THUDM 开发的 GLM-4 系列中的 9B 参数语言模型。它采用了最初应用于更大 GLM-Z1 模型的技术,包括扩展强化学习、成对排名对齐以及对数学、代码和逻辑等推理密集型任务的训练。尽管其规模较小,但它在通用推理任务上表现出强大的性能,并在其权重级别中优于许多开源模型。',
|
247
|
-
displayName: 'GLM Z1 9B (Free)',
|
248
|
-
id: 'thudm/glm-z1-9b:free',
|
249
|
-
type: 'chat',
|
250
|
-
},
|
251
|
-
{
|
252
|
-
contextWindowTokens: 32_000,
|
253
|
-
description:
|
254
|
-
'GLM-4-9B-0414 是 THUDM 开发的 GLM-4 系列中的 90 亿参数语言模型。GLM-4-9B-0414 使用与其较大的 32B 对应模型相同的强化学习和对齐策略进行训练,相对于其规模实现了高性能,使其适用于仍需要强大语言理解和生成能力的资源受限部署。',
|
255
|
-
displayName: 'GLM 4 9B (Free)',
|
256
|
-
id: 'thudm/glm-4-9b:free',
|
257
|
-
type: 'chat',
|
258
|
-
},
|
259
240
|
{
|
260
241
|
abilities: {
|
261
242
|
reasoning: true,
|
262
243
|
},
|
263
244
|
contextWindowTokens: 32_768,
|
264
|
-
description:
|
265
|
-
'GLM-Z1-32B-0414 是 GLM-4-32B 的增强推理变体,专为深度数学、逻辑和面向代码的问题解决而构建。它应用扩展强化学习(任务特定和基于通用成对偏好)来提高复杂多步骤任务的性能。与基础 GLM-4-32B 模型相比,Z1 显著提升了结构化推理和形式化领域的能力。\n\n该模型支持通过提示工程强制执行“思考”步骤,并为长格式输出提供改进的连贯性。它针对代理工作流进行了优化,并支持长上下文(通过 YaRN)、JSON 工具调用和用于稳定推理的细粒度采样配置。非常适合需要深思熟虑、多步骤推理或形式化推导的用例。',
|
266
|
-
displayName: 'GLM Z1 32B (Free)',
|
267
|
-
id: 'thudm/glm-z1-32b:free',
|
268
|
-
type: 'chat',
|
269
|
-
},
|
270
|
-
{
|
271
|
-
abilities: {
|
272
|
-
reasoning: true,
|
273
|
-
},
|
274
|
-
contextWindowTokens: 32_000,
|
275
245
|
description:
|
276
246
|
'GLM-Z1-32B-0414 是 GLM-4-32B 的增强推理变体,专为深度数学、逻辑和面向代码的问题解决而构建。它应用扩展强化学习(任务特定和基于通用成对偏好)来提高复杂多步骤任务的性能。与基础 GLM-4-32B 模型相比,Z1 显著提升了结构化推理和形式化领域的能力。\n\n该模型支持通过提示工程强制执行“思考”步骤,并为长格式输出提供改进的连贯性。它针对代理工作流进行了优化,并支持长上下文(通过 YaRN)、JSON 工具调用和用于稳定推理的细粒度采样配置。非常适合需要深思熟虑、多步骤推理或形式化推导的用例。',
|
277
247
|
displayName: 'GLM Z1 32B',
|
@@ -288,7 +258,7 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
288
258
|
abilities: {
|
289
259
|
reasoning: true,
|
290
260
|
},
|
291
|
-
contextWindowTokens:
|
261
|
+
contextWindowTokens: 32_000,
|
292
262
|
description:
|
293
263
|
'GLM-4-32B-0414 是一个 32B 双语(中英)开放权重语言模型,针对代码生成、函数调用和代理式任务进行了优化。它在 15T 高质量和重推理数据上进行了预训练,并使用人类偏好对齐、拒绝采样和强化学习进一步完善。该模型在复杂推理、工件生成和结构化输出任务方面表现出色,在多个基准测试中达到了与 GPT-4o 和 DeepSeek-V3-0324 相当的性能。',
|
294
264
|
displayName: 'GLM 4 32B (Free)',
|
@@ -715,7 +685,7 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
715
685
|
type: 'chat',
|
716
686
|
},
|
717
687
|
{
|
718
|
-
contextWindowTokens:
|
688
|
+
contextWindowTokens: 163_840,
|
719
689
|
description:
|
720
690
|
'DeepSeek V3 是一个 685B 参数的专家混合模型,是 DeepSeek 团队旗舰聊天模型系列的最新迭代。\n\n它继承了 [DeepSeek V3](/deepseek/deepseek-chat-v3) 模型,并在各种任务上表现出色。',
|
721
691
|
displayName: 'DeepSeek V3 0324',
|
@@ -1018,7 +988,7 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
1018
988
|
abilities: {
|
1019
989
|
functionCall: true,
|
1020
990
|
},
|
1021
|
-
contextWindowTokens:
|
991
|
+
contextWindowTokens: 131_072,
|
1022
992
|
description:
|
1023
993
|
'Llama 3.3 是 Llama 系列最先进的多语言开源大型语言模型,以极低成本体验媲美 405B 模型的性能。基于 Transformer 结构,并通过监督微调(SFT)和人类反馈强化学习(RLHF)提升有用性和安全性。其指令调优版本专为多语言对话优化,在多项行业基准上表现优于众多开源和封闭聊天模型。知识截止日期为 2023 年 12 月',
|
1024
994
|
displayName: 'Llama 3.3 70B Instruct',
|
@@ -1035,7 +1005,7 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
1035
1005
|
abilities: {
|
1036
1006
|
functionCall: true,
|
1037
1007
|
},
|
1038
|
-
contextWindowTokens:
|
1008
|
+
contextWindowTokens: 65_536,
|
1039
1009
|
description:
|
1040
1010
|
'Llama 3.3 是 Llama 系列最先进的多语言开源大型语言模型,以极低成本体验媲美 405B 模型的性能。基于 Transformer 结构,并通过监督微调(SFT)和人类反馈强化学习(RLHF)提升有用性和安全性。其指令调优版本专为多语言对话优化,在多项行业基准上表现优于众多开源和封闭聊天模型。知识截止日期为 2023 年 12 月',
|
1041
1011
|
displayName: 'Llama 3.3 70B Instruct (Free)',
|
@@ -1050,7 +1020,7 @@ const openrouterChatModels: AIChatModelCard[] = [
|
|
1050
1020
|
type: 'chat',
|
1051
1021
|
},
|
1052
1022
|
{
|
1053
|
-
contextWindowTokens:
|
1023
|
+
contextWindowTokens: 131_072,
|
1054
1024
|
description: 'LLaMA 3.1 提供多语言支持,是业界领先的生成模型之一。',
|
1055
1025
|
displayName: 'Llama 3.1 8B (Free)',
|
1056
1026
|
id: 'meta-llama/llama-3.1-8b-instruct:free',
|
@@ -5,7 +5,7 @@ import { Langfuse } from 'langfuse';
|
|
5
5
|
import { LangfuseGenerationClient, LangfuseTraceClient } from 'langfuse-core';
|
6
6
|
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
7
7
|
|
8
|
-
import * as langfuseCfg from '@/
|
8
|
+
import * as langfuseCfg from '@/envs/langfuse';
|
9
9
|
import { createTraceOptions } from '@/server/modules/ModelRuntime';
|
10
10
|
|
11
11
|
import { ChatStreamPayload, LobeOpenAI, ModelProvider, ModelRuntime } from '.';
|
@@ -13,6 +13,7 @@ export { LobeMinimaxAI } from './minimax';
|
|
13
13
|
export { LobeMistralAI } from './mistral';
|
14
14
|
export { ModelRuntime } from './ModelRuntime';
|
15
15
|
export { LobeMoonshotAI } from './moonshot';
|
16
|
+
export { LobeNebiusAI } from './nebius';
|
16
17
|
export { LobeOllamaAI } from './ollama';
|
17
18
|
export { LobeOpenAI } from './openai';
|
18
19
|
export { LobeOpenRouterAI } from './openrouter';
|
@@ -0,0 +1,78 @@
|
|
1
|
+
import { ModelProvider } from '../types';
|
2
|
+
import { processMultiProviderModelList } from '../utils/modelParse';
|
3
|
+
import { createOpenAICompatibleRuntime } from '../utils/openaiCompatibleFactory';
|
4
|
+
|
5
|
+
export interface NebiusModelCard {
|
6
|
+
id: string;
|
7
|
+
}
|
8
|
+
|
9
|
+
export const LobeNebiusAI = createOpenAICompatibleRuntime({
|
10
|
+
baseURL: 'https://api.studio.nebius.com/v1',
|
11
|
+
chatCompletion: {
|
12
|
+
handlePayload: (payload) => {
|
13
|
+
const { model, ...rest } = payload;
|
14
|
+
|
15
|
+
return {
|
16
|
+
...rest,
|
17
|
+
model,
|
18
|
+
stream: true,
|
19
|
+
} as any;
|
20
|
+
},
|
21
|
+
},
|
22
|
+
debug: {
|
23
|
+
chatCompletion: () => process.env.DEBUG_NEBIUS_CHAT_COMPLETION === '1',
|
24
|
+
},
|
25
|
+
models: async ({ client }) => {
|
26
|
+
const base = (client as any).baseURL || 'https://api.studio.nebius.com/v1';
|
27
|
+
const url = `${base.replace(/\/+$/, '')}/models?verbose=true`;
|
28
|
+
|
29
|
+
const res = await fetch(url, {
|
30
|
+
headers: {
|
31
|
+
Accept: 'application/json',
|
32
|
+
Authorization: `Bearer ${client.apiKey}`,
|
33
|
+
},
|
34
|
+
method: 'GET',
|
35
|
+
});
|
36
|
+
|
37
|
+
if (!res.ok) {
|
38
|
+
throw new Error(`Failed to fetch Nebius models: ${res.status} ${res.statusText}`);
|
39
|
+
}
|
40
|
+
|
41
|
+
const body = (await res.json()) as any;
|
42
|
+
const rawList = body?.data ?? [];
|
43
|
+
|
44
|
+
const standardList = rawList.map((m: any) => {
|
45
|
+
const modality = m.architecture?.modality;
|
46
|
+
let inferredType: string | undefined = undefined;
|
47
|
+
|
48
|
+
if (typeof modality === 'string' && modality.includes('->')) {
|
49
|
+
const parts = modality.split('->');
|
50
|
+
const right = parts[1]?.trim().toLowerCase();
|
51
|
+
if (right === 'image') {
|
52
|
+
inferredType = 'image';
|
53
|
+
}
|
54
|
+
if (right === 'embedding') {
|
55
|
+
inferredType = 'embedding';
|
56
|
+
}
|
57
|
+
}
|
58
|
+
|
59
|
+
return {
|
60
|
+
contextWindowTokens: m.context_length ?? undefined,
|
61
|
+
description: m.description ?? '',
|
62
|
+
displayName: m.name ?? m.id,
|
63
|
+
functionCall: m.features?.includes('function-calling'),
|
64
|
+
id: m.id,
|
65
|
+
pricing: {
|
66
|
+
input: m.pricing.prompt * 1_000_000,
|
67
|
+
output: m.pricing.completion * 1_000_000,
|
68
|
+
},
|
69
|
+
reasoning: m.features?.includes('reasoning'),
|
70
|
+
type: inferredType,
|
71
|
+
vision: m.features?.includes('vision'),
|
72
|
+
};
|
73
|
+
});
|
74
|
+
|
75
|
+
return processMultiProviderModelList(standardList, 'nebius');
|
76
|
+
},
|
77
|
+
provider: ModelProvider.Nebius,
|
78
|
+
});
|
@@ -29,6 +29,7 @@ import { LobeMinimaxAI } from './minimax';
|
|
29
29
|
import { LobeMistralAI } from './mistral';
|
30
30
|
import { LobeModelScopeAI } from './modelscope';
|
31
31
|
import { LobeMoonshotAI } from './moonshot';
|
32
|
+
import { LobeNebiusAI } from './nebius';
|
32
33
|
import { LobeNovitaAI } from './novita';
|
33
34
|
import { LobeNvidiaAI } from './nvidia';
|
34
35
|
import { LobeOllamaAI } from './ollama';
|
@@ -89,6 +90,7 @@ export const providerRuntimeMap = {
|
|
89
90
|
mistral: LobeMistralAI,
|
90
91
|
modelscope: LobeModelScopeAI,
|
91
92
|
moonshot: LobeMoonshotAI,
|
93
|
+
nebius: LobeNebiusAI,
|
92
94
|
novita: LobeNovitaAI,
|
93
95
|
nvidia: LobeNvidiaAI,
|
94
96
|
ollama: LobeOllamaAI,
|
@@ -68,6 +68,7 @@ export interface UserKeyVaults extends SearchEngineKeyVaults {
|
|
68
68
|
mistral?: OpenAICompatibleKeyVault;
|
69
69
|
modelscope?: OpenAICompatibleKeyVault;
|
70
70
|
moonshot?: OpenAICompatibleKeyVault;
|
71
|
+
nebius?: OpenAICompatibleKeyVault;
|
71
72
|
novita?: OpenAICompatibleKeyVault;
|
72
73
|
nvidia?: OpenAICompatibleKeyVault;
|
73
74
|
ollama?: OpenAICompatibleKeyVault;
|
@@ -2,16 +2,10 @@ import { BRANDING_LOGO_URL, BRANDING_NAME, ORG_NAME } from '@/const/branding';
|
|
2
2
|
import { DEFAULT_LANG } from '@/const/locale';
|
3
3
|
import { OFFICIAL_URL, OG_URL } from '@/const/url';
|
4
4
|
import { isCustomBranding, isCustomORG } from '@/const/version';
|
5
|
-
import { appEnv } from '@/envs/app';
|
6
5
|
import { translation } from '@/server/translation';
|
7
6
|
import { DynamicLayoutProps } from '@/types/next';
|
8
7
|
import { RouteVariants } from '@/utils/server/routeVariants';
|
9
8
|
|
10
|
-
const BASE_PATH = appEnv.NEXT_PUBLIC_BASE_PATH;
|
11
|
-
|
12
|
-
// if there is a base path, then we don't need the manifest
|
13
|
-
const noManifest = !!BASE_PATH;
|
14
|
-
|
15
9
|
export const generateMetadata = async (props: DynamicLayoutProps) => {
|
16
10
|
const locale = await RouteVariants.getLocale(props);
|
17
11
|
const { t } = await translation('metadata', locale);
|
@@ -32,7 +26,7 @@ export const generateMetadata = async (props: DynamicLayoutProps) => {
|
|
32
26
|
icon: '/favicon.ico?v=1',
|
33
27
|
shortcut: '/favicon-32x32.ico?v=1',
|
34
28
|
},
|
35
|
-
manifest:
|
29
|
+
manifest: '/manifest.json',
|
36
30
|
metadataBase: new URL(OFFICIAL_URL),
|
37
31
|
openGraph: {
|
38
32
|
description: t('chat.description', { appName: BRANDING_NAME }),
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { GoogleAnalytics as GA } from '@next/third-parties/google';
|
2
2
|
|
3
|
-
import { analyticsEnv } from '@/
|
3
|
+
import { analyticsEnv } from '@/envs/analytics';
|
4
4
|
|
5
5
|
const GoogleAnalytics = () => <GA gaId={analyticsEnv.GOOGLE_ANALYTICS_MEASUREMENT_ID!} />;
|
6
6
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ReactNode, memo } from 'react';
|
2
2
|
|
3
3
|
import { LobeAnalyticsProvider } from '@/components/Analytics/LobeAnalyticsProvider';
|
4
|
-
import { analyticsEnv } from '@/
|
4
|
+
import { analyticsEnv } from '@/envs/analytics';
|
5
5
|
import { isDev } from '@/utils/env';
|
6
6
|
|
7
7
|
type Props = {
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { Analytics } from '@vercel/analytics/react';
|
2
2
|
import { memo } from 'react';
|
3
3
|
|
4
|
-
import { analyticsEnv } from '@/
|
4
|
+
import { analyticsEnv } from '@/envs/analytics';
|
5
5
|
|
6
6
|
const VercelAnalytics = memo(() => <Analytics debug={analyticsEnv.DEBUG_VERCEL_ANALYTICS} />);
|
7
7
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
2
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
3
|
|
4
|
-
import { analyticsEnv, getAnalyticsConfig } from '
|
4
|
+
import { analyticsEnv, getAnalyticsConfig } from '../../envs/analytics';
|
5
5
|
|
6
6
|
beforeEach(() => {
|
7
7
|
// 在每个测试用例之前,清除所有的 console.warn mock
|
package/src/config/llm.ts
CHANGED
@@ -87,6 +87,9 @@ export const getLLMConfig = () => {
|
|
87
87
|
ENABLED_STEPFUN: z.boolean(),
|
88
88
|
STEPFUN_API_KEY: z.string().optional(),
|
89
89
|
|
90
|
+
ENABLED_NEBIUS: z.boolean(),
|
91
|
+
NEBIUS_API_KEY: z.string().optional(),
|
92
|
+
|
90
93
|
ENABLED_NOVITA: z.boolean(),
|
91
94
|
NOVITA_API_KEY: z.string().optional(),
|
92
95
|
|
@@ -364,6 +367,9 @@ export const getLLMConfig = () => {
|
|
364
367
|
|
365
368
|
ENABLED_AIHUBMIX: !!process.env.AIHUBMIX_API_KEY,
|
366
369
|
AIHUBMIX_API_KEY: process.env.AIHUBMIX_API_KEY,
|
370
|
+
|
371
|
+
ENABLED_NEBIUS: !!process.env.NEBIUS_API_KEY,
|
372
|
+
NEBIUS_API_KEY: process.env.NEBIUS_API_KEY,
|
367
373
|
},
|
368
374
|
});
|
369
375
|
};
|
@@ -31,6 +31,7 @@ import MinimaxProvider from './minimax';
|
|
31
31
|
import MistralProvider from './mistral';
|
32
32
|
import ModelScopeProvider from './modelscope';
|
33
33
|
import MoonshotProvider from './moonshot';
|
34
|
+
import NebiusProvider from './nebius';
|
34
35
|
import NovitaProvider from './novita';
|
35
36
|
import NvidiaProvider from './nvidia';
|
36
37
|
import OllamaProvider from './ollama';
|
@@ -175,6 +176,7 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
|
|
175
176
|
InfiniAIProvider,
|
176
177
|
AkashChatProvider,
|
177
178
|
QiniuProvider,
|
179
|
+
NebiusProvider,
|
178
180
|
];
|
179
181
|
|
180
182
|
export const filterEnabledModels = (provider: ModelProviderCard) => {
|
@@ -218,6 +220,7 @@ export { default as MinimaxProviderCard } from './minimax';
|
|
218
220
|
export { default as MistralProviderCard } from './mistral';
|
219
221
|
export { default as ModelScopeProviderCard } from './modelscope';
|
220
222
|
export { default as MoonshotProviderCard } from './moonshot';
|
223
|
+
export { default as NebiusProviderCard } from './nebius';
|
221
224
|
export { default as NovitaProviderCard } from './novita';
|
222
225
|
export { default as NvidiaProviderCard } from './nvidia';
|
223
226
|
export { default as OllamaProviderCard } from './ollama';
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import { ModelProviderCard } from '@/types/llm';
|
2
|
+
|
3
|
+
const Nebius: ModelProviderCard = {
|
4
|
+
chatModels: [],
|
5
|
+
checkModel: 'Qwen/Qwen2.5-Coder-7B',
|
6
|
+
description: 'Nebius 通过构建大规模GPU集群和垂直整合的云平台,为全球AI创新者提供高性能基础设施。',
|
7
|
+
id: 'nebius',
|
8
|
+
modelsUrl: 'https://studio.nebius.com/',
|
9
|
+
name: 'Nebius',
|
10
|
+
settings: {
|
11
|
+
proxyUrl: {
|
12
|
+
placeholder: 'https://api.studio.nebius.com/v1',
|
13
|
+
},
|
14
|
+
sdkType: 'openai',
|
15
|
+
showModelFetcher: true,
|
16
|
+
},
|
17
|
+
url: 'https://nebius.com/',
|
18
|
+
};
|
19
|
+
|
20
|
+
export default Nebius;
|
package/src/envs/app.ts
CHANGED
@@ -32,7 +32,6 @@ export const getAppConfig = () => {
|
|
32
32
|
|
33
33
|
return createEnv({
|
34
34
|
client: {
|
35
|
-
NEXT_PUBLIC_BASE_PATH: z.string(),
|
36
35
|
NEXT_PUBLIC_ENABLE_SENTRY: z.boolean(),
|
37
36
|
},
|
38
37
|
server: {
|
@@ -59,8 +58,6 @@ export const getAppConfig = () => {
|
|
59
58
|
SSRF_ALLOW_IP_ADDRESS_LIST: z.string().optional(),
|
60
59
|
},
|
61
60
|
runtimeEnv: {
|
62
|
-
NEXT_PUBLIC_BASE_PATH: process.env.NEXT_PUBLIC_BASE_PATH || '',
|
63
|
-
|
64
61
|
// Sentry
|
65
62
|
NEXT_PUBLIC_ENABLE_SENTRY: !!process.env.NEXT_PUBLIC_SENTRY_DSN,
|
66
63
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { createServerAnalytics } from '@lobehub/analytics/server';
|
2
2
|
|
3
|
-
import { analyticsEnv } from '@/config/analytics';
|
4
3
|
import { BUSINESS_LINE } from '@/const/analytics';
|
4
|
+
import { analyticsEnv } from '@/envs/analytics';
|
5
5
|
import { isDev } from '@/utils/env';
|
6
6
|
|
7
7
|
export const serverAnalytics = createServerAnalytics({
|
@@ -3,7 +3,7 @@ import { Langfuse } from 'langfuse';
|
|
3
3
|
import { CreateLangfuseTraceBody } from 'langfuse-core';
|
4
4
|
import { describe, expect, it, vi } from 'vitest';
|
5
5
|
|
6
|
-
import * as server from '@/
|
6
|
+
import * as server from '@/envs/langfuse';
|
7
7
|
|
8
8
|
import { TraceClient } from './index';
|
9
9
|
|
package/src/libs/traces/index.ts
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
import { Langfuse } from 'langfuse';
|
2
2
|
import { CreateLangfuseTraceBody } from 'langfuse-core';
|
3
3
|
|
4
|
-
import { getLangfuseConfig } from '@/config/langfuse';
|
5
4
|
import { CURRENT_VERSION } from '@/const/version';
|
5
|
+
import { getLangfuseConfig } from '@/envs/langfuse';
|
6
6
|
import { TraceEventClient } from '@/libs/traces/event';
|
7
7
|
|
8
8
|
/**
|
@@ -3,7 +3,6 @@ import superjson from 'superjson';
|
|
3
3
|
|
4
4
|
import { isDesktop } from '@/const/version';
|
5
5
|
import type { EdgeRouter } from '@/server/routers/edge';
|
6
|
-
import { withBasePath } from '@/utils/basePath';
|
7
6
|
import { fetchWithDesktopRemoteRPC } from '@/utils/electron/desktopRemoteRPCFetch';
|
8
7
|
|
9
8
|
export const edgeClient = createTRPCClient<EdgeRouter>({
|
@@ -21,7 +20,7 @@ export const edgeClient = createTRPCClient<EdgeRouter>({
|
|
21
20
|
},
|
22
21
|
maxURLLength: 2083,
|
23
22
|
transformer: superjson,
|
24
|
-
url:
|
23
|
+
url: '/trpc/edge',
|
25
24
|
}),
|
26
25
|
],
|
27
26
|
});
|
@@ -4,7 +4,7 @@ import { UnstructuredClient } from 'unstructured-client';
|
|
4
4
|
import { Strategy } from 'unstructured-client/sdk/models/shared';
|
5
5
|
import { PartitionResponse } from 'unstructured-client/src/sdk/models/operations';
|
6
6
|
|
7
|
-
import { knowledgeEnv } from '@/
|
7
|
+
import { knowledgeEnv } from '@/envs/knowledge';
|
8
8
|
|
9
9
|
export enum ChunkingStrategy {
|
10
10
|
Basic = 'basic',
|
package/src/locales/create.ts
CHANGED
@@ -4,8 +4,8 @@ import resourcesToBackend from 'i18next-resources-to-backend';
|
|
4
4
|
import { initReactI18next } from 'react-i18next';
|
5
5
|
import { isRtlLang } from 'rtl-detect';
|
6
6
|
|
7
|
-
import { getDebugConfig } from '@/config/debug';
|
8
7
|
import { DEFAULT_LANG } from '@/const/locale';
|
8
|
+
import { getDebugConfig } from '@/envs/debug';
|
9
9
|
import { normalizeLocale } from '@/locales/resources';
|
10
10
|
import { isDev, isOnServerSide } from '@/utils/env';
|
11
11
|
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import { describe, expect, it, vi } from 'vitest';
|
2
2
|
|
3
|
-
import { knowledgeEnv } from '@/config/knowledge';
|
4
3
|
import { getAppConfig } from '@/envs/app';
|
5
4
|
import { SystemEmbeddingConfig } from '@/types/knowledgeBase';
|
6
5
|
import { FilesConfigItem } from '@/types/user/settings/filesConfig';
|
@@ -13,7 +12,7 @@ vi.mock('@/envs/app', () => ({
|
|
13
12
|
getAppConfig: vi.fn(),
|
14
13
|
}));
|
15
14
|
|
16
|
-
vi.mock('@/
|
15
|
+
vi.mock('@/envs/knowledge', () => ({
|
17
16
|
knowledgeEnv: {
|
18
17
|
DEFAULT_FILES_CONFIG: 'test_config',
|
19
18
|
},
|
@@ -1,10 +1,10 @@
|
|
1
1
|
import { authEnv } from '@/config/auth';
|
2
2
|
import { fileEnv } from '@/config/file';
|
3
|
-
import { knowledgeEnv } from '@/config/knowledge';
|
4
|
-
import { langfuseEnv } from '@/config/langfuse';
|
5
3
|
import { enableNextAuth } from '@/const/auth';
|
6
4
|
import { isDesktop } from '@/const/version';
|
7
5
|
import { appEnv, getAppConfig } from '@/envs/app';
|
6
|
+
import { knowledgeEnv } from '@/envs/knowledge';
|
7
|
+
import { langfuseEnv } from '@/envs/langfuse';
|
8
8
|
import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
|
9
9
|
import { GlobalServerConfig } from '@/types/serverConfig';
|
10
10
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { Strategy } from 'unstructured-client/sdk/models/shared';
|
2
2
|
|
3
|
-
import { knowledgeEnv } from '@/config/knowledge';
|
4
3
|
import type { NewChunkItem, NewUnstructuredChunkItem } from '@/database/schemas';
|
4
|
+
import { knowledgeEnv } from '@/envs/knowledge';
|
5
5
|
import { ChunkingLoader } from '@/libs/langchain';
|
6
6
|
import { ChunkingStrategy, Unstructured } from '@/libs/unstructured';
|
7
7
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
import { TRPCError } from '@trpc/server';
|
3
3
|
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
4
4
|
|
5
|
-
import { toolsEnv } from '@/
|
5
|
+
import { toolsEnv } from '@/envs/tools';
|
6
6
|
import { SearXNGClient } from '@/server/services/search/impls/searxng/client';
|
7
7
|
import { SEARCH_SEARXNG_NOT_CONFIG } from '@/types/tool/search';
|
8
8
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TRPCError } from '@trpc/server';
|
2
2
|
|
3
|
-
import { toolsEnv } from '@/
|
3
|
+
import { toolsEnv } from '@/envs/tools';
|
4
4
|
import { SearXNGClient } from '@/server/services/search/impls/searxng/client';
|
5
5
|
import { SEARCH_SEARXNG_NOT_CONFIG, UniformSearchResponse } from '@/types/tool/search';
|
6
6
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { CrawlImplType, Crawler } from '@lobechat/web-crawler';
|
2
2
|
import pMap from 'p-map';
|
3
3
|
|
4
|
-
import { toolsEnv } from '@/
|
4
|
+
import { toolsEnv } from '@/envs/tools';
|
5
5
|
import { SearchParams } from '@/types/tool/search';
|
6
6
|
|
7
7
|
import { SearchImplType, SearchServiceImpl, createSearchServiceImpl } from './impls';
|
package/src/services/_url.ts
CHANGED
@@ -1,20 +1,7 @@
|
|
1
1
|
/* eslint-disable sort-keys-fix/sort-keys-fix */
|
2
|
-
import { transform } from 'lodash-es';
|
3
|
-
|
4
|
-
import { withBasePath } from '@/utils/basePath';
|
5
|
-
|
6
|
-
const mapWithBasePath = <T extends object>(apis: T): T => {
|
7
|
-
return transform(apis, (result, value, key) => {
|
8
|
-
if (typeof value === 'string') {
|
9
|
-
// @ts-ignore
|
10
|
-
result[key] = withBasePath(value);
|
11
|
-
} else {
|
12
|
-
result[key] = value;
|
13
|
-
}
|
14
|
-
});
|
15
|
-
};
|
16
2
|
|
17
|
-
|
3
|
+
|
4
|
+
export const API_ENDPOINTS = {
|
18
5
|
oauth: '/api/auth',
|
19
6
|
|
20
7
|
proxy: '/webapi/proxy',
|
@@ -26,11 +13,11 @@ export const API_ENDPOINTS = mapWithBasePath({
|
|
26
13
|
trace: '/webapi/trace',
|
27
14
|
|
28
15
|
// chat
|
29
|
-
chat: (provider: string) =>
|
16
|
+
chat: (provider: string) => `/webapi/chat/${provider}`,
|
30
17
|
|
31
18
|
// models
|
32
|
-
models: (provider: string) =>
|
33
|
-
modelPull: (provider: string) =>
|
19
|
+
models: (provider: string) => `/webapi/models/${provider}`,
|
20
|
+
modelPull: (provider: string) => `/webapi/models/${provider}/pull`,
|
34
21
|
|
35
22
|
// image
|
36
23
|
images: (provider: string) => `/webapi/text-to-image/${provider}`,
|
@@ -42,4 +29,4 @@ export const API_ENDPOINTS = mapWithBasePath({
|
|
42
29
|
tts: '/webapi/tts/openai',
|
43
30
|
edge: '/webapi/tts/edge',
|
44
31
|
microsoft: '/webapi/tts/microsoft',
|
45
|
-
}
|
32
|
+
};
|
package/src/services/share.ts
CHANGED
@@ -2,7 +2,6 @@ import type { PartialDeep } from 'type-fest';
|
|
2
2
|
|
3
3
|
import { LOBE_URL_IMPORT_NAME } from '@/const/url';
|
4
4
|
import { UserSettings } from '@/types/user/settings';
|
5
|
-
import { withBasePath } from '@/utils/basePath';
|
6
5
|
|
7
6
|
class ShareService {
|
8
7
|
/**
|
@@ -11,7 +10,7 @@ class ShareService {
|
|
11
10
|
* @returns The share settings URL.
|
12
11
|
*/
|
13
12
|
public createShareSettingsUrl = (settings: PartialDeep<UserSettings>) => {
|
14
|
-
return
|
13
|
+
return `/?${LOBE_URL_IMPORT_NAME}=${encodeURI(JSON.stringify(settings))}`;
|
15
14
|
};
|
16
15
|
|
17
16
|
/**
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|