@lobehub/chat 1.45.16 → 1.46.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/.env.example +4 -1
  2. package/CHANGELOG.md +51 -0
  3. package/README.ja-JP.md +2 -2
  4. package/README.md +2 -2
  5. package/README.zh-CN.md +2 -2
  6. package/changelog/v1.json +18 -0
  7. package/docs/self-hosting/advanced/knowledge-base.mdx +9 -0
  8. package/docs/self-hosting/advanced/knowledge-base.zh-CN.mdx +9 -0
  9. package/locales/ar/providers.json +3 -0
  10. package/locales/bg-BG/providers.json +3 -0
  11. package/locales/de-DE/providers.json +3 -0
  12. package/locales/en-US/providers.json +3 -0
  13. package/locales/es-ES/providers.json +3 -0
  14. package/locales/fa-IR/providers.json +3 -0
  15. package/locales/fr-FR/providers.json +3 -0
  16. package/locales/it-IT/providers.json +3 -0
  17. package/locales/ja-JP/providers.json +3 -0
  18. package/locales/ko-KR/providers.json +3 -0
  19. package/locales/nl-NL/providers.json +3 -0
  20. package/locales/pl-PL/providers.json +3 -0
  21. package/locales/pt-BR/providers.json +3 -0
  22. package/locales/ru-RU/providers.json +3 -0
  23. package/locales/tr-TR/providers.json +3 -0
  24. package/locales/vi-VN/providers.json +3 -0
  25. package/locales/zh-CN/providers.json +3 -0
  26. package/locales/zh-TW/providers.json +3 -0
  27. package/package.json +4 -4
  28. package/src/app/(main)/settings/provider/(detail)/[id]/index.tsx +0 -1
  29. package/src/config/aiModels/index.ts +3 -0
  30. package/src/config/aiModels/lmstudio.ts +27 -0
  31. package/src/config/aiModels/minimax.ts +50 -0
  32. package/src/config/knowledge.ts +2 -0
  33. package/src/config/modelProviders/index.ts +6 -3
  34. package/src/config/modelProviders/lmstudio.ts +25 -0
  35. package/src/const/settings/knowledge.ts +25 -0
  36. package/src/const/settings/llm.ts +9 -0
  37. package/src/database/schemas/ragEvals.ts +2 -2
  38. package/src/libs/agent-runtime/AgentRuntime.ts +7 -0
  39. package/src/libs/agent-runtime/bedrock/index.ts +64 -3
  40. package/src/libs/agent-runtime/lmstudio/index.test.ts +255 -0
  41. package/src/libs/agent-runtime/lmstudio/index.ts +11 -0
  42. package/src/libs/agent-runtime/minimax/index.ts +39 -183
  43. package/src/libs/agent-runtime/ollama/index.ts +37 -1
  44. package/src/libs/agent-runtime/types/type.ts +1 -0
  45. package/src/libs/agent-runtime/utils/streams/index.ts +0 -1
  46. package/src/server/globalConfig/index.ts +6 -0
  47. package/src/server/globalConfig/parseFilesConfig.test.ts +17 -0
  48. package/src/server/globalConfig/parseFilesConfig.ts +57 -0
  49. package/src/server/routers/async/file.ts +8 -8
  50. package/src/server/routers/lambda/chunk.ts +12 -16
  51. package/src/types/knowledgeBase/index.ts +8 -0
  52. package/src/types/user/settings/filesConfig.ts +9 -0
  53. package/src/types/user/settings/keyVaults.ts +1 -0
  54. package/src/app/(backend)/webapi/chat/minimax/route.test.ts +0 -26
  55. package/src/app/(backend)/webapi/chat/minimax/route.ts +0 -6
  56. package/src/libs/agent-runtime/minimax/index.test.ts +0 -275
  57. package/src/libs/agent-runtime/utils/streams/minimax.test.ts +0 -27
  58. package/src/libs/agent-runtime/utils/streams/minimax.ts +0 -57
@@ -17,6 +17,7 @@ import HigressProvider from './higress';
17
17
  import HuggingFaceProvider from './huggingface';
18
18
  import HunyuanProvider from './hunyuan';
19
19
  import InternLMProvider from './internlm';
20
+ import LMStudioProvider from './lmstudio';
20
21
  import MinimaxProvider from './minimax';
21
22
  import MistralProvider from './mistral';
22
23
  import MoonshotProvider from './moonshot';
@@ -104,20 +105,21 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
104
105
  QwenProvider,
105
106
  WenxinProvider,
106
107
  HunyuanProvider,
107
- SparkProvider,
108
108
  ZhiPuProvider,
109
+ SiliconCloudProvider,
109
110
  ZeroOneProvider,
111
+ SparkProvider,
110
112
  SenseNovaProvider,
111
113
  StepfunProvider,
112
114
  MoonshotProvider,
113
115
  BaichuanProvider,
114
116
  MinimaxProvider,
115
- Ai360Provider,
116
- SiliconCloudProvider,
117
+ LMStudioProvider,
117
118
  InternLMProvider,
118
119
  HigressProvider,
119
120
  GiteeAIProvider,
120
121
  TaichuProvider,
122
+ Ai360Provider,
121
123
  ];
122
124
 
123
125
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -146,6 +148,7 @@ export { default as HigressProviderCard } from './higress';
146
148
  export { default as HuggingFaceProviderCard } from './huggingface';
147
149
  export { default as HunyuanProviderCard } from './hunyuan';
148
150
  export { default as InternLMProviderCard } from './internlm';
151
+ export { default as LMStudioProviderCard } from './lmstudio';
149
152
  export { default as MinimaxProviderCard } from './minimax';
150
153
  export { default as MistralProviderCard } from './mistral';
151
154
  export { default as MoonshotProviderCard } from './moonshot';
@@ -0,0 +1,25 @@
1
+ import { ModelProviderCard } from '@/types/llm';
2
+
3
+ // ref: https://ollama.com/library
4
+ const LMStudio: ModelProviderCard = {
5
+ chatModels: [],
6
+ description: 'LM Studio 是一个用于在您的计算机上开发和实验 LLMs 的桌面应用程序。',
7
+ id: 'lmstudio',
8
+ modelsUrl: 'https://lmstudio.ai/models',
9
+ name: 'LM Studio',
10
+ settings: {
11
+ defaultShowBrowserRequest: true,
12
+ proxyUrl: {
13
+ placeholder: 'http://127.0.0.1:1234/v1',
14
+ },
15
+ showApiKey: false,
16
+ showModelFetcher: true,
17
+ smoothing: {
18
+ speed: 2,
19
+ text: true,
20
+ },
21
+ },
22
+ url: 'https://lmstudio.ai',
23
+ };
24
+
25
+ export default LMStudio;
@@ -0,0 +1,25 @@
1
+ import { FilesConfig, FilesConfigItem } from '@/types/user/settings/filesConfig';
2
+
3
+ import {
4
+ DEFAULT_EMBEDDING_MODEL,
5
+ DEFAULT_PROVIDER,
6
+ DEFAULT_RERANK_MODEL,
7
+ DEFAULT_RERANK_PROVIDER,
8
+ DEFAULT_RERANK_QUERY_MODE,
9
+ } from './llm';
10
+
11
+ export const DEFAULT_FILE_EMBEDDING_MODEL_ITEM: FilesConfigItem = {
12
+ model: DEFAULT_EMBEDDING_MODEL,
13
+ provider: DEFAULT_PROVIDER,
14
+ };
15
+
16
+ export const DEFAULT_FILE_RERANK_MODEL_ITEM: FilesConfigItem = {
17
+ model: DEFAULT_RERANK_MODEL,
18
+ provider: DEFAULT_RERANK_PROVIDER,
19
+ };
20
+
21
+ export const DEFAULT_FILES_CONFIG: FilesConfig = {
22
+ embeddingModel: DEFAULT_FILE_EMBEDDING_MODEL_ITEM,
23
+ queryModel: DEFAULT_RERANK_QUERY_MODE,
24
+ rerankerModel: DEFAULT_FILE_RERANK_MODEL_ITEM,
25
+ };
@@ -2,6 +2,9 @@ import { ModelProvider } from '@/libs/agent-runtime';
2
2
  import { genUserLLMConfig } from '@/utils/genUserLLMConfig';
3
3
 
4
4
  export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
5
+ lmstudio: {
6
+ fetchOnClient: true,
7
+ },
5
8
  ollama: {
6
9
  enabled: true,
7
10
  fetchOnClient: true,
@@ -12,6 +15,12 @@ export const DEFAULT_LLM_CONFIG = genUserLLMConfig({
12
15
  });
13
16
 
14
17
  export const DEFAULT_MODEL = 'gpt-4o-mini';
18
+
15
19
  export const DEFAULT_EMBEDDING_MODEL = 'text-embedding-3-small';
20
+ export const DEFAULT_EMBEDDING_PROVIDER = ModelProvider.OpenAI;
21
+
22
+ export const DEFAULT_RERANK_MODEL = 'rerank-english-v3.0';
23
+ export const DEFAULT_RERANK_PROVIDER = 'cohere';
24
+ export const DEFAULT_RERANK_QUERY_MODE = 'full_text';
16
25
 
17
26
  export const DEFAULT_PROVIDER = ModelProvider.OpenAI;
@@ -1,7 +1,7 @@
1
1
  /* eslint-disable sort-keys-fix/sort-keys-fix */
2
2
  import { integer, jsonb, pgTable, text, uuid } from 'drizzle-orm/pg-core';
3
3
 
4
- import { DEFAULT_EMBEDDING_MODEL, DEFAULT_MODEL } from '@/const/settings';
4
+ import { DEFAULT_MODEL } from '@/const/settings';
5
5
  import { EvalEvaluationStatus } from '@/types/eval';
6
6
 
7
7
  import { timestamps } from './_helpers';
@@ -60,7 +60,7 @@ export const evalEvaluation = pgTable('rag_eval_evaluations', {
60
60
  onDelete: 'cascade',
61
61
  }),
62
62
  languageModel: text('language_model').$defaultFn(() => DEFAULT_MODEL),
63
- embeddingModel: text('embedding_model').$defaultFn(() => DEFAULT_EMBEDDING_MODEL),
63
+ embeddingModel: text('embedding_model'),
64
64
 
65
65
  userId: text('user_id').references(() => users.id, { onDelete: 'cascade' }),
66
66
  ...timestamps,
@@ -20,6 +20,7 @@ import { LobeHigressAI } from './higress';
20
20
  import { LobeHuggingFaceAI } from './huggingface';
21
21
  import { LobeHunyuanAI } from './hunyuan';
22
22
  import { LobeInternLMAI } from './internlm';
23
+ import { LobeLMStudioAI } from './lmstudio';
23
24
  import { LobeMinimaxAI } from './minimax';
24
25
  import { LobeMistralAI } from './mistral';
25
26
  import { LobeMoonshotAI } from './moonshot';
@@ -147,6 +148,7 @@ class AgentRuntime {
147
148
  huggingface: { apiKey?: string; baseURL?: string };
148
149
  hunyuan: Partial<ClientOptions>;
149
150
  internlm: Partial<ClientOptions>;
151
+ lmstudio: Partial<ClientOptions>;
150
152
  minimax: Partial<ClientOptions>;
151
153
  mistral: Partial<ClientOptions>;
152
154
  moonshot: Partial<ClientOptions>;
@@ -207,6 +209,11 @@ class AgentRuntime {
207
209
  break;
208
210
  }
209
211
 
212
+ case ModelProvider.LMStudio: {
213
+ runtimeModel = new LobeLMStudioAI(params.lmstudio);
214
+ break;
215
+ }
216
+
210
217
  case ModelProvider.Ollama: {
211
218
  runtimeModel = new LobeOllamaAI(params.ollama);
212
219
  break;
@@ -1,12 +1,20 @@
1
1
  import {
2
2
  BedrockRuntimeClient,
3
+ InvokeModelCommand,
3
4
  InvokeModelWithResponseStreamCommand,
4
5
  } from '@aws-sdk/client-bedrock-runtime';
5
6
  import { experimental_buildLlama2Prompt } from 'ai/prompts';
6
7
 
7
8
  import { LobeRuntimeAI } from '../BaseAI';
8
9
  import { AgentRuntimeErrorType } from '../error';
9
- import { ChatCompetitionOptions, ChatStreamPayload, ModelProvider } from '../types';
10
+ import {
11
+ ChatCompetitionOptions,
12
+ ChatStreamPayload,
13
+ Embeddings,
14
+ EmbeddingsOptions,
15
+ EmbeddingsPayload,
16
+ ModelProvider,
17
+ } from '../types';
10
18
  import { buildAnthropicMessages, buildAnthropicTools } from '../utils/anthropicHelpers';
11
19
  import { AgentRuntimeError } from '../utils/createError';
12
20
  import { debugStream } from '../utils/debugStream';
@@ -32,9 +40,7 @@ export class LobeBedrockAI implements LobeRuntimeAI {
32
40
  constructor({ region, accessKeyId, accessKeySecret, sessionToken }: LobeBedrockAIParams = {}) {
33
41
  if (!(accessKeyId && accessKeySecret))
34
42
  throw AgentRuntimeError.createError(AgentRuntimeErrorType.InvalidBedrockCredentials);
35
-
36
43
  this.region = region ?? 'us-east-1';
37
-
38
44
  this.client = new BedrockRuntimeClient({
39
45
  credentials: {
40
46
  accessKeyId: accessKeyId,
@@ -50,6 +56,61 @@ export class LobeBedrockAI implements LobeRuntimeAI {
50
56
 
51
57
  return this.invokeClaudeModel(payload, options);
52
58
  }
59
+ /**
60
+ * Supports the Amazon Titan Text models series.
61
+ * Cohere Embed models are not supported
62
+ * because the current text size per request
63
+ * exceeds the maximum 2048 characters limit
64
+ * for a single request for this series of models.
65
+ * [bedrock embed guide] https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-embed.html
66
+ */
67
+ async embeddings(payload: EmbeddingsPayload, options?: EmbeddingsOptions): Promise<Embeddings[]> {
68
+ const input = Array.isArray(payload.input) ? payload.input : [payload.input];
69
+ const promises = input.map((inputText: string) =>
70
+ this.invokeEmbeddingModel(
71
+ {
72
+ dimensions: payload.dimensions,
73
+ input: inputText,
74
+ model: payload.model,
75
+ },
76
+ options,
77
+ ),
78
+ );
79
+ return Promise.all(promises);
80
+ }
81
+
82
+ private invokeEmbeddingModel = async (
83
+ payload: EmbeddingsPayload,
84
+ options?: EmbeddingsOptions,
85
+ ): Promise<Embeddings> => {
86
+ const command = new InvokeModelCommand({
87
+ accept: 'application/json',
88
+ body: JSON.stringify({
89
+ dimensions: payload.dimensions,
90
+ inputText: payload.input,
91
+ normalize: true,
92
+ }),
93
+ contentType: 'application/json',
94
+ modelId: payload.model,
95
+ });
96
+ try {
97
+ const res = await this.client.send(command, { abortSignal: options?.signal });
98
+ const responseBody = JSON.parse(new TextDecoder().decode(res.body));
99
+ return responseBody.embedding;
100
+ } catch (e) {
101
+ const err = e as Error & { $metadata: any };
102
+ throw AgentRuntimeError.chat({
103
+ error: {
104
+ body: err.$metadata,
105
+ message: err.message,
106
+ type: err.name,
107
+ },
108
+ errorType: AgentRuntimeErrorType.ProviderBizError,
109
+ provider: ModelProvider.Bedrock,
110
+ region: this.region,
111
+ });
112
+ }
113
+ };
53
114
 
54
115
  private invokeClaudeModel = async (
55
116
  payload: ChatStreamPayload,
@@ -0,0 +1,255 @@
1
+ // @vitest-environment node
2
+ import OpenAI from 'openai';
3
+ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
4
+
5
+ import {
6
+ ChatStreamCallbacks,
7
+ LobeOpenAICompatibleRuntime,
8
+ ModelProvider,
9
+ } from '@/libs/agent-runtime';
10
+
11
+ import * as debugStreamModule from '../utils/debugStream';
12
+ import { LobeLMStudioAI } from './index';
13
+
14
+ const provider = ModelProvider.LMStudio;
15
+ const defaultBaseURL = 'http://localhost:1234/v1';
16
+
17
+ const bizErrorType = 'ProviderBizError';
18
+ const invalidErrorType = 'InvalidProviderAPIKey';
19
+
20
+ // Mock the console.error to avoid polluting test output
21
+ vi.spyOn(console, 'error').mockImplementation(() => {});
22
+
23
+ let instance: LobeOpenAICompatibleRuntime;
24
+
25
+ beforeEach(() => {
26
+ instance = new LobeLMStudioAI({ apiKey: 'test' });
27
+
28
+ // 使用 vi.spyOn 来模拟 chat.completions.create 方法
29
+ vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(
30
+ new ReadableStream() as any,
31
+ );
32
+ });
33
+
34
+ afterEach(() => {
35
+ vi.clearAllMocks();
36
+ });
37
+
38
+ describe('LobeLMStudioAI', () => {
39
+ describe('init', () => {
40
+ it('should correctly initialize with an API key', async () => {
41
+ const instance = new LobeLMStudioAI({ apiKey: 'test_api_key' });
42
+ expect(instance).toBeInstanceOf(LobeLMStudioAI);
43
+ expect(instance.baseURL).toEqual(defaultBaseURL);
44
+ });
45
+ });
46
+
47
+ describe('chat', () => {
48
+ describe('Error', () => {
49
+ it('should return OpenAIBizError with an openai error response when OpenAI.APIError is thrown', async () => {
50
+ // Arrange
51
+ const apiError = new OpenAI.APIError(
52
+ 400,
53
+ {
54
+ status: 400,
55
+ error: {
56
+ message: 'Bad Request',
57
+ },
58
+ },
59
+ 'Error message',
60
+ {},
61
+ );
62
+
63
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
64
+
65
+ // Act
66
+ try {
67
+ await instance.chat({
68
+ messages: [{ content: 'Hello', role: 'user' }],
69
+ model: 'deepseek-chat',
70
+ temperature: 0,
71
+ });
72
+ } catch (e) {
73
+ expect(e).toEqual({
74
+ endpoint: defaultBaseURL,
75
+ error: {
76
+ error: { message: 'Bad Request' },
77
+ status: 400,
78
+ },
79
+ errorType: bizErrorType,
80
+ provider,
81
+ });
82
+ }
83
+ });
84
+
85
+ it('should throw AgentRuntimeError with NoOpenAIAPIKey if no apiKey is provided', async () => {
86
+ try {
87
+ new LobeLMStudioAI({});
88
+ } catch (e) {
89
+ expect(e).toEqual({ errorType: invalidErrorType });
90
+ }
91
+ });
92
+
93
+ it('should return OpenAIBizError with the cause when OpenAI.APIError is thrown with cause', async () => {
94
+ // Arrange
95
+ const errorInfo = {
96
+ stack: 'abc',
97
+ cause: {
98
+ message: 'api is undefined',
99
+ },
100
+ };
101
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
102
+
103
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
104
+
105
+ // Act
106
+ try {
107
+ await instance.chat({
108
+ messages: [{ content: 'Hello', role: 'user' }],
109
+ model: 'deepseek-chat',
110
+ temperature: 0,
111
+ });
112
+ } catch (e) {
113
+ expect(e).toEqual({
114
+ endpoint: defaultBaseURL,
115
+ error: {
116
+ cause: { message: 'api is undefined' },
117
+ stack: 'abc',
118
+ },
119
+ errorType: bizErrorType,
120
+ provider,
121
+ });
122
+ }
123
+ });
124
+
125
+ it('should return OpenAIBizError with an cause response with desensitize Url', async () => {
126
+ // Arrange
127
+ const errorInfo = {
128
+ stack: 'abc',
129
+ cause: { message: 'api is undefined' },
130
+ };
131
+ const apiError = new OpenAI.APIError(400, errorInfo, 'module error', {});
132
+
133
+ instance = new LobeLMStudioAI({
134
+ apiKey: 'test',
135
+
136
+ baseURL: 'https://api.abc.com/v1',
137
+ });
138
+
139
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(apiError);
140
+
141
+ // Act
142
+ try {
143
+ await instance.chat({
144
+ messages: [{ content: 'Hello', role: 'user' }],
145
+ model: 'deepseek-chat',
146
+ temperature: 0,
147
+ });
148
+ } catch (e) {
149
+ expect(e).toEqual({
150
+ endpoint: 'https://api.***.com/v1',
151
+ error: {
152
+ cause: { message: 'api is undefined' },
153
+ stack: 'abc',
154
+ },
155
+ errorType: bizErrorType,
156
+ provider,
157
+ });
158
+ }
159
+ });
160
+
161
+ it('should throw an InvalidDeepSeekAPIKey error type on 401 status code', async () => {
162
+ // Mock the API call to simulate a 401 error
163
+ const error = new Error('Unauthorized') as any;
164
+ error.status = 401;
165
+ vi.mocked(instance['client'].chat.completions.create).mockRejectedValue(error);
166
+
167
+ try {
168
+ await instance.chat({
169
+ messages: [{ content: 'Hello', role: 'user' }],
170
+ model: 'deepseek-chat',
171
+ temperature: 0,
172
+ });
173
+ } catch (e) {
174
+ // Expect the chat method to throw an error with InvalidDeepSeekAPIKey
175
+ expect(e).toEqual({
176
+ endpoint: defaultBaseURL,
177
+ error: new Error('Unauthorized'),
178
+ errorType: invalidErrorType,
179
+ provider,
180
+ });
181
+ }
182
+ });
183
+
184
+ it('should return AgentRuntimeError for non-OpenAI errors', async () => {
185
+ // Arrange
186
+ const genericError = new Error('Generic Error');
187
+
188
+ vi.spyOn(instance['client'].chat.completions, 'create').mockRejectedValue(genericError);
189
+
190
+ // Act
191
+ try {
192
+ await instance.chat({
193
+ messages: [{ content: 'Hello', role: 'user' }],
194
+ model: 'deepseek-chat',
195
+ temperature: 0,
196
+ });
197
+ } catch (e) {
198
+ expect(e).toEqual({
199
+ endpoint: defaultBaseURL,
200
+ errorType: 'AgentRuntimeError',
201
+ provider,
202
+ error: {
203
+ name: genericError.name,
204
+ cause: genericError.cause,
205
+ message: genericError.message,
206
+ stack: genericError.stack,
207
+ },
208
+ });
209
+ }
210
+ });
211
+ });
212
+
213
+ describe('DEBUG', () => {
214
+ it('should call debugStream and return StreamingTextResponse when DEBUG_LMSTUDIO_CHAT_COMPLETION is 1', async () => {
215
+ // Arrange
216
+ const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流
217
+ const mockDebugStream = new ReadableStream({
218
+ start(controller) {
219
+ controller.enqueue('Debug stream content');
220
+ controller.close();
221
+ },
222
+ }) as any;
223
+ mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法
224
+
225
+ // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法
226
+ (instance['client'].chat.completions.create as Mock).mockResolvedValue({
227
+ tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }],
228
+ });
229
+
230
+ // 保存原始环境变量值
231
+ const originalDebugValue = process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION;
232
+
233
+ // 模拟环境变量
234
+ process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION = '1';
235
+ vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve());
236
+
237
+ // 执行测试
238
+ // 运行你的测试函数,确保它会在条件满足时调用 debugStream
239
+ // 假设的测试函数调用,你可能需要根据实际情况调整
240
+ await instance.chat({
241
+ messages: [{ content: 'Hello', role: 'user' }],
242
+ model: 'deepseek-chat',
243
+ stream: true,
244
+ temperature: 0,
245
+ });
246
+
247
+ // 验证 debugStream 被调用
248
+ expect(debugStreamModule.debugStream).toHaveBeenCalled();
249
+
250
+ // 恢复原始环境变量值
251
+ process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION = originalDebugValue;
252
+ });
253
+ });
254
+ });
255
+ });
@@ -0,0 +1,11 @@
1
+ import { ModelProvider } from '../types';
2
+ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
+
4
+ export const LobeLMStudioAI = LobeOpenAICompatibleFactory({
5
+ apiKey: 'placeholder-to-avoid-error',
6
+ baseURL: 'http://localhost:1234/v1',
7
+ debug: {
8
+ chatCompletion: () => process.env.DEBUG_LMSTUDIO_CHAT_COMPLETION === '1',
9
+ },
10
+ provider: ModelProvider.LMStudio,
11
+ });