@lobehub/chat 1.136.13 → 1.137.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursor/rules/add-setting-env.mdc +175 -0
- package/.cursor/rules/db-migrations.mdc +25 -0
- package/.env.example +7 -0
- package/CHANGELOG.md +50 -0
- package/Dockerfile +3 -2
- package/Dockerfile.database +15 -3
- package/Dockerfile.pglite +3 -2
- package/changelog/v1.json +18 -0
- package/docs/development/database-schema.dbml +1 -0
- package/docs/self-hosting/advanced/feature-flags.mdx +25 -15
- package/docs/self-hosting/advanced/feature-flags.zh-CN.mdx +25 -15
- package/docs/self-hosting/environment-variables/basic.mdx +12 -0
- package/docs/self-hosting/environment-variables/basic.zh-CN.mdx +12 -0
- package/locales/ar/setting.json +8 -0
- package/locales/bg-BG/setting.json +8 -0
- package/locales/de-DE/setting.json +8 -0
- package/locales/en-US/setting.json +8 -0
- package/locales/es-ES/setting.json +8 -0
- package/locales/fa-IR/setting.json +8 -0
- package/locales/fr-FR/setting.json +8 -0
- package/locales/it-IT/setting.json +8 -0
- package/locales/ja-JP/setting.json +8 -0
- package/locales/ko-KR/setting.json +8 -0
- package/locales/nl-NL/setting.json +8 -0
- package/locales/pl-PL/setting.json +8 -0
- package/locales/pt-BR/setting.json +8 -0
- package/locales/ru-RU/setting.json +8 -0
- package/locales/tr-TR/setting.json +8 -0
- package/locales/vi-VN/setting.json +8 -0
- package/locales/zh-CN/setting.json +8 -0
- package/locales/zh-TW/setting.json +8 -0
- package/package.json +1 -1
- package/packages/agent-runtime/examples/tools-calling.ts +4 -3
- package/packages/agent-runtime/src/core/__tests__/runtime.test.ts +559 -29
- package/packages/agent-runtime/src/core/runtime.ts +171 -43
- package/packages/agent-runtime/src/types/instruction.ts +32 -6
- package/packages/agent-runtime/src/types/runtime.ts +2 -2
- package/packages/agent-runtime/src/types/state.ts +1 -8
- package/packages/agent-runtime/vitest.config.mts +14 -0
- package/packages/const/src/settings/image.ts +8 -0
- package/packages/const/src/settings/index.ts +3 -0
- package/packages/context-engine/src/__tests__/pipeline.test.ts +485 -0
- package/packages/context-engine/src/base/__tests__/BaseProcessor.test.ts +381 -0
- package/packages/context-engine/src/base/__tests__/BaseProvider.test.ts +392 -0
- package/packages/context-engine/src/processors/__tests__/MessageCleanup.test.ts +346 -0
- package/packages/context-engine/src/processors/__tests__/ToolCall.test.ts +552 -0
- package/packages/database/migrations/0038_add_image_user_settings.sql +1 -0
- package/packages/database/migrations/meta/0038_snapshot.json +7580 -0
- package/packages/database/migrations/meta/_journal.json +7 -0
- package/packages/database/src/core/migrations.json +6 -0
- package/packages/database/src/models/user.ts +3 -1
- package/packages/database/src/schemas/user.ts +1 -0
- package/packages/file-loaders/src/loaders/docx/index.test.ts +0 -1
- package/packages/file-loaders/src/loaders/excel/__snapshots__/index.test.ts.snap +30 -0
- package/packages/file-loaders/src/loaders/excel/index.test.ts +8 -0
- package/packages/file-loaders/src/loaders/pptx/index.test.ts +25 -0
- package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
- package/packages/file-loaders/vitest.config.mts +8 -0
- package/packages/model-runtime/CLAUDE.md +5 -0
- package/packages/model-runtime/docs/test-coverage.md +706 -0
- package/packages/model-runtime/src/core/ModelRuntime.test.ts +231 -0
- package/packages/model-runtime/src/core/RouterRuntime/createRuntime.ts +1 -1
- package/packages/model-runtime/src/core/openaiCompatibleFactory/createImage.test.ts +799 -0
- package/packages/model-runtime/src/core/openaiCompatibleFactory/index.test.ts +188 -4
- package/packages/model-runtime/src/core/openaiCompatibleFactory/index.ts +41 -10
- package/packages/model-runtime/src/core/streams/openai/__snapshots__/responsesStream.test.ts.snap +439 -0
- package/packages/model-runtime/src/core/streams/openai/openai.test.ts +789 -0
- package/packages/model-runtime/src/core/streams/openai/responsesStream.test.ts +551 -0
- package/packages/model-runtime/src/core/usageConverters/utils/computeChatCost.test.ts +230 -0
- package/packages/model-runtime/src/core/usageConverters/utils/computeImageCost.test.ts +334 -37
- package/packages/model-runtime/src/providerTestUtils.ts +148 -145
- package/packages/model-runtime/src/providers/ai302/index.test.ts +60 -0
- package/packages/model-runtime/src/providers/ai302/index.ts +9 -4
- package/packages/model-runtime/src/providers/ai360/index.test.ts +1213 -1
- package/packages/model-runtime/src/providers/ai360/index.ts +9 -4
- package/packages/model-runtime/src/providers/aihubmix/index.test.ts +73 -0
- package/packages/model-runtime/src/providers/aihubmix/index.ts +6 -9
- package/packages/model-runtime/src/providers/akashchat/index.test.ts +433 -3
- package/packages/model-runtime/src/providers/akashchat/index.ts +12 -7
- package/packages/model-runtime/src/providers/anthropic/generateObject.test.ts +183 -29
- package/packages/model-runtime/src/providers/anthropic/generateObject.ts +40 -24
- package/packages/model-runtime/src/providers/azureai/index.test.ts +102 -0
- package/packages/model-runtime/src/providers/baichuan/index.test.ts +416 -26
- package/packages/model-runtime/src/providers/baichuan/index.ts +23 -20
- package/packages/model-runtime/src/providers/bedrock/index.test.ts +420 -2
- package/packages/model-runtime/src/providers/cerebras/index.test.ts +465 -0
- package/packages/model-runtime/src/providers/cerebras/index.ts +8 -3
- package/packages/model-runtime/src/providers/cohere/index.test.ts +1074 -1
- package/packages/model-runtime/src/providers/cohere/index.ts +8 -3
- package/packages/model-runtime/src/providers/cometapi/index.test.ts +439 -3
- package/packages/model-runtime/src/providers/cometapi/index.ts +8 -3
- package/packages/model-runtime/src/providers/deepseek/index.test.ts +116 -1
- package/packages/model-runtime/src/providers/deepseek/index.ts +8 -3
- package/packages/model-runtime/src/providers/fireworksai/index.test.ts +264 -3
- package/packages/model-runtime/src/providers/fireworksai/index.ts +8 -3
- package/packages/model-runtime/src/providers/giteeai/index.test.ts +325 -3
- package/packages/model-runtime/src/providers/giteeai/index.ts +23 -6
- package/packages/model-runtime/src/providers/github/index.test.ts +532 -3
- package/packages/model-runtime/src/providers/github/index.ts +8 -3
- package/packages/model-runtime/src/providers/groq/index.test.ts +344 -31
- package/packages/model-runtime/src/providers/groq/index.ts +8 -3
- package/packages/model-runtime/src/providers/higress/index.test.ts +142 -0
- package/packages/model-runtime/src/providers/higress/index.ts +8 -3
- package/packages/model-runtime/src/providers/huggingface/index.test.ts +612 -1
- package/packages/model-runtime/src/providers/huggingface/index.ts +9 -4
- package/packages/model-runtime/src/providers/hunyuan/index.test.ts +365 -1
- package/packages/model-runtime/src/providers/hunyuan/index.ts +9 -3
- package/packages/model-runtime/src/providers/infiniai/index.test.ts +71 -0
- package/packages/model-runtime/src/providers/internlm/index.test.ts +369 -2
- package/packages/model-runtime/src/providers/internlm/index.ts +10 -5
- package/packages/model-runtime/src/providers/jina/index.test.ts +164 -3
- package/packages/model-runtime/src/providers/jina/index.ts +8 -3
- package/packages/model-runtime/src/providers/lmstudio/index.test.ts +182 -3
- package/packages/model-runtime/src/providers/lmstudio/index.ts +8 -3
- package/packages/model-runtime/src/providers/mistral/index.test.ts +779 -27
- package/packages/model-runtime/src/providers/mistral/index.ts +8 -3
- package/packages/model-runtime/src/providers/modelscope/index.test.ts +232 -1
- package/packages/model-runtime/src/providers/modelscope/index.ts +8 -3
- package/packages/model-runtime/src/providers/moonshot/index.test.ts +489 -2
- package/packages/model-runtime/src/providers/moonshot/index.ts +8 -3
- package/packages/model-runtime/src/providers/nebius/index.test.ts +381 -3
- package/packages/model-runtime/src/providers/nebius/index.ts +8 -3
- package/packages/model-runtime/src/providers/newapi/index.test.ts +667 -3
- package/packages/model-runtime/src/providers/newapi/index.ts +6 -3
- package/packages/model-runtime/src/providers/nvidia/index.test.ts +168 -1
- package/packages/model-runtime/src/providers/nvidia/index.ts +12 -7
- package/packages/model-runtime/src/providers/ollama/index.test.ts +797 -1
- package/packages/model-runtime/src/providers/ollama/index.ts +8 -0
- package/packages/model-runtime/src/providers/ollamacloud/index.test.ts +411 -0
- package/packages/model-runtime/src/providers/ollamacloud/index.ts +8 -3
- package/packages/model-runtime/src/providers/openai/index.test.ts +171 -2
- package/packages/model-runtime/src/providers/openai/index.ts +8 -3
- package/packages/model-runtime/src/providers/openrouter/index.test.ts +1647 -95
- package/packages/model-runtime/src/providers/openrouter/index.ts +12 -7
- package/packages/model-runtime/src/providers/qiniu/index.test.ts +294 -1
- package/packages/model-runtime/src/providers/qiniu/index.ts +8 -3
- package/packages/model-runtime/src/providers/search1api/index.test.ts +1131 -11
- package/packages/model-runtime/src/providers/search1api/index.ts +10 -4
- package/packages/model-runtime/src/providers/sensenova/index.test.ts +1069 -1
- package/packages/model-runtime/src/providers/sensenova/index.ts +8 -3
- package/packages/model-runtime/src/providers/siliconcloud/index.test.ts +196 -0
- package/packages/model-runtime/src/providers/siliconcloud/index.ts +8 -3
- package/packages/model-runtime/src/providers/spark/index.test.ts +293 -1
- package/packages/model-runtime/src/providers/spark/index.ts +8 -3
- package/packages/model-runtime/src/providers/stepfun/index.test.ts +322 -3
- package/packages/model-runtime/src/providers/stepfun/index.ts +8 -3
- package/packages/model-runtime/src/providers/tencentcloud/index.test.ts +182 -3
- package/packages/model-runtime/src/providers/tencentcloud/index.ts +8 -3
- package/packages/model-runtime/src/providers/togetherai/index.test.ts +359 -4
- package/packages/model-runtime/src/providers/togetherai/index.ts +12 -5
- package/packages/model-runtime/src/providers/v0/index.test.ts +341 -0
- package/packages/model-runtime/src/providers/v0/index.ts +20 -6
- package/packages/model-runtime/src/providers/vercelaigateway/index.test.ts +710 -0
- package/packages/model-runtime/src/providers/vercelaigateway/index.ts +19 -13
- package/packages/model-runtime/src/providers/vllm/index.test.ts +45 -1
- package/packages/model-runtime/src/providers/volcengine/index.test.ts +75 -0
- package/packages/model-runtime/src/providers/wenxin/index.test.ts +144 -1
- package/packages/model-runtime/src/providers/wenxin/index.ts +8 -3
- package/packages/model-runtime/src/providers/xai/index.test.ts +105 -1
- package/packages/model-runtime/src/providers/xinference/index.test.ts +70 -1
- package/packages/model-runtime/src/providers/zeroone/index.test.ts +327 -3
- package/packages/model-runtime/src/providers/zeroone/index.ts +23 -6
- package/packages/model-runtime/src/providers/zhipu/index.test.ts +908 -236
- package/packages/model-runtime/src/providers/zhipu/index.ts +8 -3
- package/packages/model-runtime/src/types/structureOutput.ts +5 -1
- package/packages/model-runtime/vitest.config.mts +7 -1
- package/packages/types/src/aiChat.ts +20 -2
- package/packages/types/src/serverConfig.ts +7 -1
- package/packages/types/src/tool/index.ts +1 -0
- package/packages/types/src/tool/tool.ts +33 -0
- package/packages/types/src/user/settings/image.ts +3 -0
- package/packages/types/src/user/settings/index.ts +3 -0
- package/src/app/[variants]/(main)/settings/_layout/SettingsContent.tsx +3 -0
- package/src/app/[variants]/(main)/settings/hooks/useCategory.tsx +8 -3
- package/src/app/[variants]/(main)/settings/image/index.tsx +74 -0
- package/src/components/FormInput/FormSliderWithInput.tsx +40 -0
- package/src/components/FormInput/index.ts +1 -0
- package/src/envs/image.ts +27 -0
- package/src/features/Conversation/Messages/Assistant/index.tsx +1 -1
- package/src/features/Conversation/Messages/User/index.tsx +2 -2
- package/src/hooks/useFetchAiImageConfig.ts +12 -17
- package/src/locales/default/setting.ts +8 -0
- package/src/server/globalConfig/index.ts +5 -0
- package/src/server/routers/lambda/aiChat.ts +2 -0
- package/src/store/global/initialState.ts +1 -0
- package/src/store/image/slices/generationConfig/action.test.ts +17 -0
- package/src/store/image/slices/generationConfig/action.ts +18 -21
- package/src/store/image/slices/generationConfig/initialState.ts +3 -2
- package/src/store/user/slices/common/action.ts +1 -0
- package/src/store/user/slices/settings/selectors/settings.ts +3 -0
|
@@ -1,6 +1,9 @@
|
|
|
1
1
|
import { ModelProvider } from 'model-bank';
|
|
2
2
|
|
|
3
|
-
import {
|
|
3
|
+
import {
|
|
4
|
+
type OpenAICompatibleFactoryOptions,
|
|
5
|
+
createOpenAICompatibleRuntime,
|
|
6
|
+
} from '../../core/openaiCompatibleFactory';
|
|
4
7
|
import { processMultiProviderModelList } from '../../utils/modelParse';
|
|
5
8
|
|
|
6
9
|
export interface VercelAIGatewayModelCard {
|
|
@@ -12,15 +15,15 @@ export interface VercelAIGatewayModelCard {
|
|
|
12
15
|
name?: string;
|
|
13
16
|
pricing?: {
|
|
14
17
|
input?: string | number;
|
|
15
|
-
output?: string | number;
|
|
16
18
|
input_cache_read?: string | number;
|
|
17
19
|
input_cache_write?: string | number;
|
|
20
|
+
output?: string | number;
|
|
18
21
|
};
|
|
19
22
|
tags?: string[];
|
|
20
23
|
type?: string;
|
|
21
24
|
}
|
|
22
25
|
|
|
23
|
-
const formatPrice = (price?: string | number) => {
|
|
26
|
+
export const formatPrice = (price?: string | number) => {
|
|
24
27
|
if (price === undefined || price === null) return undefined;
|
|
25
28
|
const n = typeof price === 'number' ? price : Number(price);
|
|
26
29
|
if (Number.isNaN(n)) return undefined;
|
|
@@ -28,21 +31,22 @@ const formatPrice = (price?: string | number) => {
|
|
|
28
31
|
return Number((n * 1e6).toPrecision(5));
|
|
29
32
|
};
|
|
30
33
|
|
|
31
|
-
export const
|
|
34
|
+
export const params = {
|
|
32
35
|
baseURL: 'https://ai-gateway.vercel.sh/v1',
|
|
33
36
|
chatCompletion: {
|
|
34
37
|
handlePayload: (payload) => {
|
|
35
38
|
const { model, reasoning_effort, verbosity, ...rest } = payload;
|
|
36
39
|
|
|
37
40
|
const providerOptions: any = {};
|
|
38
|
-
if (reasoning_effort) {
|
|
39
|
-
providerOptions.openai = {
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
41
|
+
if (reasoning_effort || verbosity) {
|
|
42
|
+
providerOptions.openai = {};
|
|
43
|
+
if (reasoning_effort) {
|
|
44
|
+
providerOptions.openai.reasoningEffort = reasoning_effort;
|
|
45
|
+
providerOptions.openai.reasoningSummary = 'auto';
|
|
46
|
+
}
|
|
47
|
+
if (verbosity) {
|
|
48
|
+
providerOptions.openai.textVerbosity = verbosity;
|
|
49
|
+
}
|
|
46
50
|
}
|
|
47
51
|
|
|
48
52
|
return {
|
|
@@ -101,4 +105,6 @@ export const LobeVercelAIGatewayAI = createOpenAICompatibleRuntime({
|
|
|
101
105
|
return await processMultiProviderModelList(formattedModels, 'vercelaigateway');
|
|
102
106
|
},
|
|
103
107
|
provider: ModelProvider.VercelAIGateway,
|
|
104
|
-
}
|
|
108
|
+
} satisfies OpenAICompatibleFactoryOptions;
|
|
109
|
+
|
|
110
|
+
export const LobeVercelAIGatewayAI = createOpenAICompatibleRuntime(params);
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
// @vitest-environment node
|
|
2
2
|
import { ModelProvider } from 'model-bank';
|
|
3
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|
3
4
|
|
|
4
5
|
import { testProvider } from '../../providerTestUtils';
|
|
5
|
-
import { LobeVLLMAI } from './index';
|
|
6
|
+
import { LobeVLLMAI, VLLMModelCard } from './index';
|
|
6
7
|
|
|
7
8
|
testProvider({
|
|
8
9
|
Runtime: LobeVLLMAI,
|
|
@@ -11,3 +12,46 @@ testProvider({
|
|
|
11
12
|
chatDebugEnv: 'DEBUG_VLLM_CHAT_COMPLETION',
|
|
12
13
|
chatModel: 'llama-2-7b-chat',
|
|
13
14
|
});
|
|
15
|
+
|
|
16
|
+
describe('LobeVLLMAI - custom features', () => {
|
|
17
|
+
let instance: InstanceType<typeof LobeVLLMAI>;
|
|
18
|
+
|
|
19
|
+
beforeEach(() => {
|
|
20
|
+
instance = new LobeVLLMAI({ apiKey: 'test_api_key' });
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
describe('models', () => {
|
|
24
|
+
it('should fetch and process model list correctly', async () => {
|
|
25
|
+
const mockModelList: VLLMModelCard[] = [
|
|
26
|
+
{ id: 'meta-llama/Llama-2-7b-chat-hf' },
|
|
27
|
+
{ id: 'mistralai/Mistral-7B-Instruct-v0.1' },
|
|
28
|
+
{ id: 'qwen/Qwen-7B-Chat' },
|
|
29
|
+
];
|
|
30
|
+
|
|
31
|
+
vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
|
|
32
|
+
data: mockModelList,
|
|
33
|
+
} as any);
|
|
34
|
+
|
|
35
|
+
const models = await instance.models();
|
|
36
|
+
|
|
37
|
+
expect(instance['client'].models.list).toHaveBeenCalled();
|
|
38
|
+
expect(models).toHaveLength(3);
|
|
39
|
+
expect(models[0]).toMatchObject({
|
|
40
|
+
id: 'meta-llama/Llama-2-7b-chat-hf',
|
|
41
|
+
});
|
|
42
|
+
expect(models[1]).toMatchObject({
|
|
43
|
+
id: 'mistralai/Mistral-7B-Instruct-v0.1',
|
|
44
|
+
});
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
it('should handle empty model list', async () => {
|
|
48
|
+
vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
|
|
49
|
+
data: [],
|
|
50
|
+
} as any);
|
|
51
|
+
|
|
52
|
+
const models = await instance.models();
|
|
53
|
+
|
|
54
|
+
expect(models).toEqual([]);
|
|
55
|
+
});
|
|
56
|
+
});
|
|
57
|
+
});
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
// @vitest-environment node
|
|
2
|
+
import { ModelProvider } from 'model-bank';
|
|
3
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
|
+
|
|
5
|
+
import { testProvider } from '../../providerTestUtils';
|
|
6
|
+
import { LobeVolcengineAI } from './index';
|
|
7
|
+
|
|
8
|
+
testProvider({
|
|
9
|
+
Runtime: LobeVolcengineAI,
|
|
10
|
+
provider: ModelProvider.Volcengine,
|
|
11
|
+
defaultBaseURL: 'https://ark.cn-beijing.volces.com/api/v3',
|
|
12
|
+
chatDebugEnv: 'DEBUG_VOLCENGINE_CHAT_COMPLETION',
|
|
13
|
+
chatModel: 'doubao-pro-32k',
|
|
14
|
+
invalidErrorType: 'InvalidProviderAPIKey',
|
|
15
|
+
bizErrorType: 'ProviderBizError',
|
|
16
|
+
test: {
|
|
17
|
+
skipAPICall: true,
|
|
18
|
+
skipErrorHandle: true,
|
|
19
|
+
},
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
describe('LobeVolcengineAI - custom features', () => {
|
|
23
|
+
let instance: InstanceType<typeof LobeVolcengineAI>;
|
|
24
|
+
|
|
25
|
+
beforeEach(() => {
|
|
26
|
+
instance = new LobeVolcengineAI({ apiKey: 'test_api_key' });
|
|
27
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
28
|
+
new ReadableStream() as any,
|
|
29
|
+
);
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
describe('handlePayload', () => {
|
|
33
|
+
it('should add thinking for thinking-vision-pro model', async () => {
|
|
34
|
+
await instance.chat({
|
|
35
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
36
|
+
model: 'thinking-vision-pro',
|
|
37
|
+
thinking: {
|
|
38
|
+
type: 'enabled',
|
|
39
|
+
budget_tokens: 1000,
|
|
40
|
+
},
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
44
|
+
expect(calledPayload.thinking).toEqual({ type: 'enabled' });
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
it('should add thinking for deepseek-v3-1 model', async () => {
|
|
48
|
+
await instance.chat({
|
|
49
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
50
|
+
model: 'deepseek-v3-1',
|
|
51
|
+
thinking: {
|
|
52
|
+
type: 'enabled',
|
|
53
|
+
budget_tokens: 2000,
|
|
54
|
+
},
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
58
|
+
expect(calledPayload.thinking).toEqual({ type: 'enabled' });
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
it('should not add thinking for non-thinking models', async () => {
|
|
62
|
+
await instance.chat({
|
|
63
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
64
|
+
model: 'doubao-pro-32k',
|
|
65
|
+
thinking: {
|
|
66
|
+
type: 'enabled',
|
|
67
|
+
budget_tokens: 1000,
|
|
68
|
+
},
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
72
|
+
expect(calledPayload.thinking).toBeUndefined();
|
|
73
|
+
});
|
|
74
|
+
});
|
|
75
|
+
});
|
|
@@ -4,7 +4,7 @@ import { ModelProvider } from 'model-bank';
|
|
|
4
4
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
5
5
|
|
|
6
6
|
import { testProvider } from '../../providerTestUtils';
|
|
7
|
-
import { LobeWenxinAI } from './index';
|
|
7
|
+
import { LobeWenxinAI, params } from './index';
|
|
8
8
|
|
|
9
9
|
testProvider({
|
|
10
10
|
Runtime: LobeWenxinAI,
|
|
@@ -119,3 +119,146 @@ describe('LobeWenxinAI', () => {
|
|
|
119
119
|
});
|
|
120
120
|
});
|
|
121
121
|
});
|
|
122
|
+
|
|
123
|
+
describe('LobeWenxinAI - Custom Features', () => {
|
|
124
|
+
describe('Debug Configuration', () => {
|
|
125
|
+
it('should disable debug by default', () => {
|
|
126
|
+
delete process.env.DEBUG_WENXIN_CHAT_COMPLETION;
|
|
127
|
+
const result = params.debug.chatCompletion();
|
|
128
|
+
expect(result).toBe(false);
|
|
129
|
+
});
|
|
130
|
+
|
|
131
|
+
it('should enable debug when env is set to 1', () => {
|
|
132
|
+
process.env.DEBUG_WENXIN_CHAT_COMPLETION = '1';
|
|
133
|
+
const result = params.debug.chatCompletion();
|
|
134
|
+
expect(result).toBe(true);
|
|
135
|
+
});
|
|
136
|
+
|
|
137
|
+
it('should disable debug when env is set to other values', () => {
|
|
138
|
+
process.env.DEBUG_WENXIN_CHAT_COMPLETION = '0';
|
|
139
|
+
const result = params.debug.chatCompletion();
|
|
140
|
+
expect(result).toBe(false);
|
|
141
|
+
});
|
|
142
|
+
});
|
|
143
|
+
|
|
144
|
+
describe('handlePayload', () => {
|
|
145
|
+
it('should transform payload without enabledSearch', () => {
|
|
146
|
+
const payload = {
|
|
147
|
+
model: 'ernie-4.0-8k',
|
|
148
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
149
|
+
temperature: 0.7,
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
153
|
+
|
|
154
|
+
expect(result).toEqual({
|
|
155
|
+
model: 'ernie-4.0-8k',
|
|
156
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
157
|
+
temperature: 0.7,
|
|
158
|
+
stream: true,
|
|
159
|
+
});
|
|
160
|
+
expect(result).not.toHaveProperty('web_search');
|
|
161
|
+
});
|
|
162
|
+
|
|
163
|
+
it('should transform payload with enabledSearch set to false', () => {
|
|
164
|
+
const payload = {
|
|
165
|
+
model: 'ernie-4.0-8k',
|
|
166
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
167
|
+
temperature: 0.7,
|
|
168
|
+
enabledSearch: false,
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
172
|
+
|
|
173
|
+
expect(result).toEqual({
|
|
174
|
+
model: 'ernie-4.0-8k',
|
|
175
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
176
|
+
temperature: 0.7,
|
|
177
|
+
stream: true,
|
|
178
|
+
});
|
|
179
|
+
expect(result).not.toHaveProperty('web_search');
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
it('should transform payload with enabledSearch set to true', () => {
|
|
183
|
+
const payload = {
|
|
184
|
+
model: 'ernie-4.0-8k',
|
|
185
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
186
|
+
temperature: 0.7,
|
|
187
|
+
enabledSearch: true,
|
|
188
|
+
};
|
|
189
|
+
|
|
190
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
191
|
+
|
|
192
|
+
expect(result).toEqual({
|
|
193
|
+
model: 'ernie-4.0-8k',
|
|
194
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
195
|
+
temperature: 0.7,
|
|
196
|
+
stream: true,
|
|
197
|
+
web_search: {
|
|
198
|
+
enable: true,
|
|
199
|
+
enable_citation: true,
|
|
200
|
+
enable_trace: true,
|
|
201
|
+
},
|
|
202
|
+
});
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
it('should add web_search config when enabledSearch is true', () => {
|
|
206
|
+
const payload = {
|
|
207
|
+
model: 'ernie-4.0-8k',
|
|
208
|
+
messages: [{ role: 'user', content: 'What is the weather today?' }],
|
|
209
|
+
enabledSearch: true,
|
|
210
|
+
};
|
|
211
|
+
|
|
212
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
213
|
+
|
|
214
|
+
expect(result.web_search).toEqual({
|
|
215
|
+
enable: true,
|
|
216
|
+
enable_citation: true,
|
|
217
|
+
enable_trace: true,
|
|
218
|
+
});
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
it('should preserve all original payload properties except enabledSearch', () => {
|
|
222
|
+
const payload = {
|
|
223
|
+
model: 'ernie-4.0-8k',
|
|
224
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
225
|
+
temperature: 0.8,
|
|
226
|
+
max_tokens: 2048,
|
|
227
|
+
top_p: 0.9,
|
|
228
|
+
enabledSearch: true,
|
|
229
|
+
custom_field: 'test',
|
|
230
|
+
};
|
|
231
|
+
|
|
232
|
+
const result = params.chatCompletion!.handlePayload!(payload as any);
|
|
233
|
+
|
|
234
|
+
expect(result).toHaveProperty('model', 'ernie-4.0-8k');
|
|
235
|
+
expect(result).toHaveProperty('messages');
|
|
236
|
+
expect(result).toHaveProperty('temperature', 0.8);
|
|
237
|
+
expect(result).toHaveProperty('max_tokens', 2048);
|
|
238
|
+
expect(result).toHaveProperty('top_p', 0.9);
|
|
239
|
+
expect(result).toHaveProperty('custom_field', 'test');
|
|
240
|
+
expect(result).toHaveProperty('stream', true);
|
|
241
|
+
expect(result).toHaveProperty('web_search');
|
|
242
|
+
expect(result).not.toHaveProperty('enabledSearch');
|
|
243
|
+
});
|
|
244
|
+
|
|
245
|
+
it('should always set stream to true', () => {
|
|
246
|
+
const payloadWithoutStream = {
|
|
247
|
+
model: 'ernie-4.0-8k',
|
|
248
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
249
|
+
};
|
|
250
|
+
|
|
251
|
+
const result1 = params.chatCompletion!.handlePayload!(payloadWithoutStream as any);
|
|
252
|
+
expect(result1.stream).toBe(true);
|
|
253
|
+
|
|
254
|
+
const payloadWithStreamFalse = {
|
|
255
|
+
model: 'ernie-4.0-8k',
|
|
256
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
257
|
+
stream: false,
|
|
258
|
+
};
|
|
259
|
+
|
|
260
|
+
const result2 = params.chatCompletion!.handlePayload!(payloadWithStreamFalse as any);
|
|
261
|
+
expect(result2.stream).toBe(true);
|
|
262
|
+
});
|
|
263
|
+
});
|
|
264
|
+
});
|
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
import { ModelProvider } from 'model-bank';
|
|
2
2
|
|
|
3
|
-
import {
|
|
3
|
+
import {
|
|
4
|
+
OpenAICompatibleFactoryOptions,
|
|
5
|
+
createOpenAICompatibleRuntime,
|
|
6
|
+
} from '../../core/openaiCompatibleFactory';
|
|
4
7
|
|
|
5
|
-
export const
|
|
8
|
+
export const params = {
|
|
6
9
|
baseURL: 'https://qianfan.baidubce.com/v2',
|
|
7
10
|
chatCompletion: {
|
|
8
11
|
handlePayload: (payload) => {
|
|
@@ -25,4 +28,6 @@ export const LobeWenxinAI = createOpenAICompatibleRuntime({
|
|
|
25
28
|
chatCompletion: () => process.env.DEBUG_WENXIN_CHAT_COMPLETION === '1',
|
|
26
29
|
},
|
|
27
30
|
provider: ModelProvider.Wenxin,
|
|
28
|
-
}
|
|
31
|
+
} satisfies OpenAICompatibleFactoryOptions;
|
|
32
|
+
|
|
33
|
+
export const LobeWenxinAI = createOpenAICompatibleRuntime(params);
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
// @vitest-environment node
|
|
2
2
|
import { ModelProvider } from 'model-bank';
|
|
3
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|
3
4
|
|
|
4
5
|
import { testProvider } from '../../providerTestUtils';
|
|
5
|
-
import { LobeXAI } from './index';
|
|
6
|
+
import { LobeXAI, XAIModelCard, isGrokReasoningModel } from './index';
|
|
6
7
|
|
|
7
8
|
testProvider({
|
|
8
9
|
Runtime: LobeXAI,
|
|
@@ -11,3 +12,106 @@ testProvider({
|
|
|
11
12
|
chatDebugEnv: 'DEBUG_XAI_CHAT_COMPLETION',
|
|
12
13
|
chatModel: 'grok',
|
|
13
14
|
});
|
|
15
|
+
|
|
16
|
+
describe('LobeXAI - custom features', () => {
|
|
17
|
+
let instance: InstanceType<typeof LobeXAI>;
|
|
18
|
+
|
|
19
|
+
beforeEach(() => {
|
|
20
|
+
instance = new LobeXAI({ apiKey: 'test_api_key' });
|
|
21
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
|
|
22
|
+
new ReadableStream() as any,
|
|
23
|
+
);
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
describe('isGrokReasoningModel', () => {
|
|
27
|
+
it('should identify Grok reasoning models correctly', () => {
|
|
28
|
+
expect(isGrokReasoningModel('grok-3-mini')).toBe(true);
|
|
29
|
+
expect(isGrokReasoningModel('grok-4')).toBe(true);
|
|
30
|
+
expect(isGrokReasoningModel('grok-code')).toBe(true);
|
|
31
|
+
expect(isGrokReasoningModel('grok-2')).toBe(false);
|
|
32
|
+
expect(isGrokReasoningModel('other-model')).toBe(false);
|
|
33
|
+
});
|
|
34
|
+
});
|
|
35
|
+
|
|
36
|
+
describe('chat with handlePayload', () => {
|
|
37
|
+
it('should handle Grok reasoning models by removing frequency_penalty and presence_penalty', async () => {
|
|
38
|
+
await instance.chat({
|
|
39
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
40
|
+
model: 'grok-4',
|
|
41
|
+
frequency_penalty: 0.5,
|
|
42
|
+
presence_penalty: 0.3,
|
|
43
|
+
temperature: 0.7,
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
47
|
+
expect(calledPayload.frequency_penalty).toBeUndefined();
|
|
48
|
+
expect(calledPayload.presence_penalty).toBeUndefined();
|
|
49
|
+
expect(calledPayload.model).toBe('grok-4');
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
it('should keep frequency_penalty and presence_penalty for non-reasoning models', async () => {
|
|
53
|
+
await instance.chat({
|
|
54
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
55
|
+
model: 'grok-2',
|
|
56
|
+
frequency_penalty: 0.5,
|
|
57
|
+
presence_penalty: 0.3,
|
|
58
|
+
temperature: 0.7,
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
62
|
+
expect(calledPayload.frequency_penalty).toBe(0.5);
|
|
63
|
+
expect(calledPayload.presence_penalty).toBe(0.3);
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
it('should add search_parameters when enabledSearch is true', async () => {
|
|
67
|
+
process.env.XAI_MAX_SEARCH_RESULTS = '10';
|
|
68
|
+
process.env.XAI_SAFE_SEARCH = '1';
|
|
69
|
+
|
|
70
|
+
await instance.chat({
|
|
71
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
72
|
+
model: 'grok-2',
|
|
73
|
+
enabledSearch: true,
|
|
74
|
+
});
|
|
75
|
+
|
|
76
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
77
|
+
expect(calledPayload.search_parameters).toBeDefined();
|
|
78
|
+
expect(calledPayload.search_parameters.max_search_results).toBe(10);
|
|
79
|
+
expect(calledPayload.search_parameters.mode).toBe('auto');
|
|
80
|
+
expect(calledPayload.search_parameters.return_citations).toBe(true);
|
|
81
|
+
expect(calledPayload.search_parameters.sources).toHaveLength(3);
|
|
82
|
+
|
|
83
|
+
delete process.env.XAI_MAX_SEARCH_RESULTS;
|
|
84
|
+
delete process.env.XAI_SAFE_SEARCH;
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
it('should not add search_parameters when enabledSearch is false', async () => {
|
|
88
|
+
await instance.chat({
|
|
89
|
+
messages: [{ content: 'Hello', role: 'user' }],
|
|
90
|
+
model: 'grok-2',
|
|
91
|
+
enabledSearch: false,
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
const calledPayload = (instance['client'].chat.completions.create as any).mock.calls[0][0];
|
|
95
|
+
expect(calledPayload.search_parameters).toBeUndefined();
|
|
96
|
+
});
|
|
97
|
+
});
|
|
98
|
+
|
|
99
|
+
describe('models', () => {
|
|
100
|
+
it('should fetch and process model list correctly', async () => {
|
|
101
|
+
const mockModelList: XAIModelCard[] = [
|
|
102
|
+
{ id: 'grok-2' },
|
|
103
|
+
{ id: 'grok-3-mini' },
|
|
104
|
+
{ id: 'grok-4' },
|
|
105
|
+
];
|
|
106
|
+
|
|
107
|
+
vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
|
|
108
|
+
data: mockModelList,
|
|
109
|
+
} as any);
|
|
110
|
+
|
|
111
|
+
const models = await instance.models();
|
|
112
|
+
|
|
113
|
+
expect(instance['client'].models.list).toHaveBeenCalled();
|
|
114
|
+
expect(models.length).toBeGreaterThan(0);
|
|
115
|
+
});
|
|
116
|
+
});
|
|
117
|
+
});
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
// @vitest-environment node
|
|
2
2
|
import { ModelProvider } from 'model-bank';
|
|
3
|
+
import { beforeEach, describe, expect, it, vi } from 'vitest';
|
|
3
4
|
|
|
4
5
|
import { testProvider } from '../../providerTestUtils';
|
|
5
|
-
import { LobeXinferenceAI } from './index';
|
|
6
|
+
import { LobeXinferenceAI, XinferenceModelCard } from './index';
|
|
6
7
|
|
|
7
8
|
testProvider({
|
|
8
9
|
Runtime: LobeXinferenceAI,
|
|
@@ -11,3 +12,71 @@ testProvider({
|
|
|
11
12
|
chatDebugEnv: 'DEBUG_XINFERENCE_CHAT_COMPLETION',
|
|
12
13
|
chatModel: 'llama-2-7b-chat',
|
|
13
14
|
});
|
|
15
|
+
|
|
16
|
+
describe('LobeXinferenceAI - custom features', () => {
|
|
17
|
+
let instance: InstanceType<typeof LobeXinferenceAI>;
|
|
18
|
+
|
|
19
|
+
beforeEach(() => {
|
|
20
|
+
instance = new LobeXinferenceAI({ apiKey: 'test_api_key' });
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
describe('models', () => {
|
|
24
|
+
it('should fetch and process model list correctly', async () => {
|
|
25
|
+
const mockModelList: XinferenceModelCard[] = [
|
|
26
|
+
{
|
|
27
|
+
context_length: 4096,
|
|
28
|
+
id: 'qwen-7b',
|
|
29
|
+
model_ability: ['chat', 'tools', 'vision'],
|
|
30
|
+
model_description: 'Qwen 7B model',
|
|
31
|
+
model_type: 'LLM',
|
|
32
|
+
name: 'Qwen 7B',
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
context_length: 8192,
|
|
36
|
+
id: 'llama-2-13b',
|
|
37
|
+
model_ability: ['chat', 'reasoning'],
|
|
38
|
+
model_description: 'Llama 2 13B model',
|
|
39
|
+
model_type: 'LLM',
|
|
40
|
+
name: 'Llama 2 13B',
|
|
41
|
+
},
|
|
42
|
+
];
|
|
43
|
+
|
|
44
|
+
vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
|
|
45
|
+
data: mockModelList,
|
|
46
|
+
} as any);
|
|
47
|
+
|
|
48
|
+
const models = await instance.models();
|
|
49
|
+
|
|
50
|
+
expect(instance['client'].models.list).toHaveBeenCalled();
|
|
51
|
+
expect(models).toHaveLength(2);
|
|
52
|
+
expect(models[0]).toMatchObject({
|
|
53
|
+
id: 'qwen-7b',
|
|
54
|
+
displayName: 'Qwen 7B',
|
|
55
|
+
contextWindowTokens: 4096,
|
|
56
|
+
description: 'Qwen 7B model',
|
|
57
|
+
functionCall: true,
|
|
58
|
+
vision: true,
|
|
59
|
+
reasoning: false,
|
|
60
|
+
});
|
|
61
|
+
expect(models[1]).toMatchObject({
|
|
62
|
+
id: 'llama-2-13b',
|
|
63
|
+
displayName: 'Llama 2 13B',
|
|
64
|
+
contextWindowTokens: 8192,
|
|
65
|
+
description: 'Llama 2 13B model',
|
|
66
|
+
functionCall: false,
|
|
67
|
+
vision: false,
|
|
68
|
+
reasoning: true,
|
|
69
|
+
});
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
it('should handle empty model list', async () => {
|
|
73
|
+
vi.spyOn(instance['client'].models, 'list').mockResolvedValue({
|
|
74
|
+
data: [],
|
|
75
|
+
} as any);
|
|
76
|
+
|
|
77
|
+
const models = await instance.models();
|
|
78
|
+
|
|
79
|
+
expect(models).toEqual([]);
|
|
80
|
+
});
|
|
81
|
+
});
|
|
82
|
+
});
|