@lobehub/chat 1.124.0 → 1.124.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +5 -0
- package/.github/scripts/pr-comment.js +11 -2
- package/.github/workflows/desktop-pr-build.yml +86 -12
- package/.github/workflows/release-desktop-beta.yml +91 -20
- package/CHANGELOG.md +58 -0
- package/Dockerfile +2 -0
- package/Dockerfile.database +2 -0
- package/Dockerfile.pglite +2 -0
- package/apps/desktop/electron-builder.js +8 -4
- package/changelog/v1.json +21 -0
- package/docs/self-hosting/environment-variables/model-provider.mdx +18 -0
- package/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +20 -0
- package/locales/ar/chat.json +2 -0
- package/locales/bg-BG/chat.json +2 -0
- package/locales/de-DE/chat.json +2 -0
- package/locales/en-US/chat.json +2 -0
- package/locales/es-ES/chat.json +2 -0
- package/locales/fa-IR/chat.json +2 -0
- package/locales/fr-FR/chat.json +2 -0
- package/locales/it-IT/chat.json +2 -0
- package/locales/ja-JP/chat.json +2 -0
- package/locales/ko-KR/chat.json +2 -0
- package/locales/nl-NL/chat.json +2 -0
- package/locales/pl-PL/chat.json +2 -0
- package/locales/pt-BR/chat.json +2 -0
- package/locales/ru-RU/chat.json +2 -0
- package/locales/tr-TR/chat.json +2 -0
- package/locales/vi-VN/chat.json +2 -0
- package/locales/zh-CN/chat.json +2 -0
- package/locales/zh-CN/modelProvider.json +1 -1
- package/locales/zh-TW/chat.json +2 -0
- package/package.json +1 -1
- package/packages/const/src/hotkeys.ts +1 -1
- package/packages/const/src/index.ts +1 -0
- package/packages/const/src/settings/hotkey.ts +3 -2
- package/packages/const/src/trace.ts +1 -1
- package/packages/const/src/user.ts +1 -2
- package/packages/database/src/client/db.test.ts +19 -13
- package/packages/electron-server-ipc/src/ipcClient.test.ts +783 -1
- package/packages/file-loaders/src/loadFile.test.ts +61 -0
- package/packages/file-loaders/src/utils/isTextReadableFile.test.ts +43 -0
- package/packages/file-loaders/src/utils/parser-utils.test.ts +155 -0
- package/packages/model-bank/src/aiModels/aihubmix.ts +38 -4
- package/packages/model-bank/src/aiModels/groq.ts +26 -8
- package/packages/model-bank/src/aiModels/hunyuan.ts +3 -3
- package/packages/model-bank/src/aiModels/modelscope.ts +13 -2
- package/packages/model-bank/src/aiModels/moonshot.ts +25 -5
- package/packages/model-bank/src/aiModels/novita.ts +40 -9
- package/packages/model-bank/src/aiModels/openrouter.ts +0 -13
- package/packages/model-bank/src/aiModels/qwen.ts +62 -1
- package/packages/model-bank/src/aiModels/siliconcloud.ts +20 -0
- package/packages/model-bank/src/aiModels/volcengine.ts +141 -15
- package/packages/model-runtime/package.json +2 -1
- package/packages/model-runtime/src/ai21/index.test.ts +2 -2
- package/packages/model-runtime/src/ai360/index.test.ts +2 -2
- package/packages/model-runtime/src/akashchat/index.test.ts +19 -0
- package/packages/model-runtime/src/anthropic/index.test.ts +1 -2
- package/packages/model-runtime/src/baichuan/index.test.ts +1 -2
- package/packages/model-runtime/src/bedrock/index.test.ts +1 -2
- package/packages/model-runtime/src/bfl/createImage.test.ts +1 -2
- package/packages/model-runtime/src/bfl/index.test.ts +1 -2
- package/packages/model-runtime/src/cloudflare/index.test.ts +1 -2
- package/packages/model-runtime/src/cohere/index.test.ts +19 -0
- package/packages/model-runtime/src/deepseek/index.test.ts +2 -2
- package/packages/model-runtime/src/fireworksai/index.test.ts +2 -2
- package/packages/model-runtime/src/giteeai/index.test.ts +2 -2
- package/packages/model-runtime/src/github/index.test.ts +2 -2
- package/packages/model-runtime/src/google/createImage.test.ts +1 -2
- package/packages/model-runtime/src/google/index.test.ts +1 -1
- package/packages/model-runtime/src/groq/index.test.ts +2 -3
- package/packages/model-runtime/src/huggingface/index.test.ts +40 -0
- package/packages/model-runtime/src/hunyuan/index.test.ts +2 -3
- package/packages/model-runtime/src/internlm/index.test.ts +2 -2
- package/packages/model-runtime/src/jina/index.test.ts +19 -0
- package/packages/model-runtime/src/lmstudio/index.test.ts +2 -2
- package/packages/model-runtime/src/minimax/index.test.ts +19 -0
- package/packages/model-runtime/src/mistral/index.test.ts +2 -3
- package/packages/model-runtime/src/modelscope/index.test.ts +19 -0
- package/packages/model-runtime/src/moonshot/index.test.ts +1 -2
- package/packages/model-runtime/src/nebius/index.test.ts +19 -0
- package/packages/model-runtime/src/newapi/index.test.ts +49 -42
- package/packages/model-runtime/src/newapi/index.ts +124 -143
- package/packages/model-runtime/src/novita/index.test.ts +3 -4
- package/packages/model-runtime/src/nvidia/index.test.ts +19 -0
- package/packages/model-runtime/src/openrouter/index.test.ts +2 -3
- package/packages/model-runtime/src/perplexity/index.test.ts +2 -3
- package/packages/model-runtime/src/ppio/index.test.ts +3 -4
- package/packages/model-runtime/src/qwen/index.test.ts +2 -2
- package/packages/model-runtime/src/sambanova/index.test.ts +19 -0
- package/packages/model-runtime/src/search1api/index.test.ts +19 -0
- package/packages/model-runtime/src/sensenova/index.test.ts +2 -2
- package/packages/model-runtime/src/spark/index.test.ts +2 -2
- package/packages/model-runtime/src/stepfun/index.test.ts +2 -2
- package/packages/model-runtime/src/taichu/index.test.ts +4 -5
- package/packages/model-runtime/src/tencentcloud/index.test.ts +1 -1
- package/packages/model-runtime/src/togetherai/index.test.ts +1 -2
- package/packages/model-runtime/src/upstage/index.test.ts +1 -2
- package/packages/model-runtime/src/utils/openaiCompatibleFactory/index.test.ts +9 -7
- package/packages/model-runtime/src/utils/streams/anthropic.ts +2 -2
- package/packages/model-runtime/src/utils/streams/openai/openai.ts +20 -13
- package/packages/model-runtime/src/utils/streams/openai/responsesStream.test.ts +1 -2
- package/packages/model-runtime/src/utils/streams/openai/responsesStream.ts +2 -2
- package/packages/model-runtime/src/utils/streams/protocol.ts +2 -2
- package/packages/model-runtime/src/wenxin/index.test.ts +2 -3
- package/packages/model-runtime/src/xai/index.test.ts +2 -2
- package/packages/model-runtime/src/zeroone/index.test.ts +1 -2
- package/packages/model-runtime/src/zhipu/index.test.ts +2 -3
- package/packages/model-runtime/vitest.config.mts +0 -7
- package/packages/types/src/index.ts +2 -0
- package/packages/types/src/message/base.ts +1 -1
- package/packages/types/src/openai/chat.ts +2 -3
- package/packages/utils/package.json +2 -1
- package/packages/utils/src/_deprecated/parseModels.test.ts +1 -1
- package/packages/utils/src/_deprecated/parseModels.ts +1 -1
- package/packages/utils/src/client/topic.test.ts +1 -2
- package/packages/utils/src/client/topic.ts +1 -2
- package/packages/utils/src/electron/desktopRemoteRPCFetch.ts +1 -1
- package/packages/utils/src/fetch/fetchSSE.ts +7 -8
- package/packages/utils/src/fetch/parseError.ts +1 -3
- package/packages/utils/src/format.test.ts +1 -2
- package/packages/utils/src/index.ts +1 -0
- package/packages/utils/src/toolManifest.ts +1 -2
- package/packages/utils/src/trace.ts +1 -1
- package/packages/utils/vitest.config.mts +1 -1
- package/packages/web-crawler/src/__tests__/urlRules.test.ts +275 -0
- package/packages/web-crawler/src/crawImpl/__tests__/exa.test.ts +269 -0
- package/packages/web-crawler/src/crawImpl/__tests__/firecrawl.test.ts +284 -0
- package/packages/web-crawler/src/crawImpl/__tests__/naive.test.ts +234 -0
- package/packages/web-crawler/src/crawImpl/__tests__/tavily.test.ts +359 -0
- package/packages/web-crawler/src/utils/__tests__/errorType.test.ts +217 -0
- package/packages/web-crawler/vitest.config.mts +3 -0
- package/scripts/electronWorkflow/mergeMacReleaseFiles.ts +207 -0
- package/src/app/[variants]/(main)/settings/provider/(detail)/newapi/page.tsx +1 -1
- package/src/components/Thinking/index.tsx +2 -3
- package/src/config/llm.ts +8 -0
- package/src/features/ChatInput/Desktop/index.tsx +16 -4
- package/src/features/ChatInput/StoreUpdater.tsx +2 -0
- package/src/libs/traces/index.ts +1 -1
- package/src/locales/default/chat.ts +1 -0
- package/src/locales/default/modelProvider.ts +1 -1
- package/src/server/modules/ModelRuntime/trace.ts +1 -2
- package/src/store/chat/slices/aiChat/actions/__tests__/cancel-functionality.test.ts +107 -0
- package/src/store/chat/slices/aiChat/actions/__tests__/generateAIChatV2.test.ts +352 -7
- package/src/store/chat/slices/aiChat/actions/generateAIChatV2.ts +2 -1
- package/packages/model-runtime/src/openrouter/__snapshots__/index.test.ts.snap +0 -113
@@ -1,3 +1,4 @@
|
|
1
|
+
import { LOBE_DEFAULT_MODEL_LIST } from 'model-bank';
|
1
2
|
import urlJoin from 'url-join';
|
2
3
|
|
3
4
|
import { createRouterRuntime } from '../RouterRuntime';
|
@@ -54,9 +55,6 @@ const getProviderFromOwnedBy = (ownedBy: string): string => {
|
|
54
55
|
return 'openai';
|
55
56
|
};
|
56
57
|
|
57
|
-
// 全局的模型路由映射,在 models 函数执行后被填充
|
58
|
-
let globalModelRouteMap: Map<string, string> = new Map();
|
59
|
-
|
60
58
|
export const LobeNewAPIAI = createRouterRuntime({
|
61
59
|
debug: {
|
62
60
|
chatCompletion: () => process.env.DEBUG_NEWAPI_CHAT_COMPLETION === '1',
|
@@ -66,180 +64,163 @@ export const LobeNewAPIAI = createRouterRuntime({
|
|
66
64
|
},
|
67
65
|
id: ModelProvider.NewAPI,
|
68
66
|
models: async ({ client: openAIClient }) => {
|
69
|
-
//
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
const
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
//
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
(pricingData.data as NewAPIPricing[]).forEach((pricing) => {
|
92
|
-
pricingMap.set(pricing.model_name, pricing);
|
93
|
-
});
|
94
|
-
}
|
67
|
+
// 获取基础 URL(移除末尾的 API 版本路径如 /v1、/v1beta 等)
|
68
|
+
const baseURL = openAIClient.baseURL.replace(/\/v\d+[a-z]*\/?$/, '');
|
69
|
+
|
70
|
+
const modelsPage = (await openAIClient.models.list()) as any;
|
71
|
+
const modelList: NewAPIModelCard[] = modelsPage.data || [];
|
72
|
+
|
73
|
+
// 尝试获取 pricing 信息以补充模型详细信息
|
74
|
+
let pricingMap: Map<string, NewAPIPricing> = new Map();
|
75
|
+
try {
|
76
|
+
// 使用保存的 baseURL
|
77
|
+
const pricingResponse = await fetch(`${baseURL}/api/pricing`, {
|
78
|
+
headers: {
|
79
|
+
Authorization: `Bearer ${openAIClient.apiKey}`,
|
80
|
+
},
|
81
|
+
});
|
82
|
+
|
83
|
+
if (pricingResponse.ok) {
|
84
|
+
const pricingData = await pricingResponse.json();
|
85
|
+
if (pricingData.success && pricingData.data) {
|
86
|
+
(pricingData.data as NewAPIPricing[]).forEach((pricing) => {
|
87
|
+
pricingMap.set(pricing.model_name, pricing);
|
88
|
+
});
|
95
89
|
}
|
96
|
-
} catch (error) {
|
97
|
-
// If fetching pricing information fails, continue using the basic model information
|
98
|
-
console.debug('Failed to fetch NewAPI pricing info:', error);
|
99
90
|
}
|
100
|
-
|
101
|
-
//
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
outputPrice = inputPrice * (pricing.completion_ratio || 1);
|
136
|
-
|
137
|
-
enhancedModel.pricing = {
|
138
|
-
input: inputPrice,
|
139
|
-
output: outputPrice,
|
140
|
-
};
|
141
|
-
}
|
91
|
+
} catch (error) {
|
92
|
+
// If fetching pricing information fails, continue using the basic model information
|
93
|
+
console.debug('Failed to fetch NewAPI pricing info:', error);
|
94
|
+
}
|
95
|
+
|
96
|
+
// Process the model list: determine the provider for each model based on priority rules
|
97
|
+
const enrichedModelList = modelList.map((model) => {
|
98
|
+
let enhancedModel: any = { ...model };
|
99
|
+
|
100
|
+
// 1. 添加 pricing 信息
|
101
|
+
const pricing = pricingMap.get(model.id);
|
102
|
+
if (pricing) {
|
103
|
+
// NewAPI 的价格计算逻辑:
|
104
|
+
// - quota_type: 0 表示按量计费(按 token),1 表示按次计费
|
105
|
+
// - model_ratio: 相对于基础价格的倍率(基础价格 = $0.002/1K tokens)
|
106
|
+
// - model_price: 直接指定的价格(优先使用)
|
107
|
+
// - completion_ratio: 输出价格相对于输入价格的倍率
|
108
|
+
//
|
109
|
+
// LobeChat 需要的格式:美元/百万 token
|
110
|
+
|
111
|
+
let inputPrice: number | undefined;
|
112
|
+
let outputPrice: number | undefined;
|
113
|
+
|
114
|
+
if (pricing.quota_type === 0) {
|
115
|
+
// 按量计费
|
116
|
+
if (pricing.model_price && pricing.model_price > 0) {
|
117
|
+
// model_price is a direct price value; need to confirm its unit.
|
118
|
+
// Assumption: model_price is the price per 1,000 tokens (i.e., $/1K tokens).
|
119
|
+
// To convert to price per 1,000,000 tokens ($/1M tokens), multiply by 1,000,000 / 1,000 = 1,000.
|
120
|
+
// Since the base price is $0.002/1K tokens, multiplying by 2 gives $2/1M tokens.
|
121
|
+
// Therefore, inputPrice = model_price * 2 converts the price to $/1M tokens for LobeChat.
|
122
|
+
inputPrice = pricing.model_price * 2;
|
123
|
+
} else if (pricing.model_ratio) {
|
124
|
+
// model_ratio × $0.002/1K = model_ratio × $2/1M
|
125
|
+
inputPrice = pricing.model_ratio * 2; // 转换为 $/1M tokens
|
142
126
|
}
|
143
|
-
// quota_type === 1 按次计费暂不支持
|
144
|
-
}
|
145
127
|
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
detectedProvider = 'google';
|
155
|
-
} else if (model.supported_endpoint_types.includes('xai')) {
|
156
|
-
detectedProvider = 'xai';
|
128
|
+
if (inputPrice !== undefined) {
|
129
|
+
// 计算输出价格
|
130
|
+
outputPrice = inputPrice * (pricing.completion_ratio || 1);
|
131
|
+
|
132
|
+
enhancedModel.pricing = {
|
133
|
+
input: inputPrice,
|
134
|
+
output: outputPrice,
|
135
|
+
};
|
157
136
|
}
|
158
137
|
}
|
159
|
-
//
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
138
|
+
// quota_type === 1 按次计费暂不支持
|
139
|
+
}
|
140
|
+
|
141
|
+
// 2. 根据优先级处理 provider 信息并缓存路由
|
142
|
+
let detectedProvider = 'openai'; // 默认
|
143
|
+
|
144
|
+
// 优先级1:使用 supported_endpoint_types
|
145
|
+
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
|
146
|
+
if (model.supported_endpoint_types.includes('anthropic')) {
|
147
|
+
detectedProvider = 'anthropic';
|
148
|
+
} else if (model.supported_endpoint_types.includes('gemini')) {
|
149
|
+
detectedProvider = 'google';
|
150
|
+
} else if (model.supported_endpoint_types.includes('xai')) {
|
151
|
+
detectedProvider = 'xai';
|
166
152
|
}
|
153
|
+
}
|
154
|
+
// 优先级2:使用 owned_by 字段
|
155
|
+
else if (model.owned_by) {
|
156
|
+
detectedProvider = getProviderFromOwnedBy(model.owned_by);
|
157
|
+
}
|
158
|
+
// 优先级3:基于模型名称检测
|
159
|
+
else {
|
160
|
+
detectedProvider = detectModelProvider(model.id);
|
161
|
+
}
|
167
162
|
|
168
|
-
|
169
|
-
|
170
|
-
// 同时更新全局路由映射表
|
171
|
-
globalModelRouteMap.set(model.id, detectedProvider);
|
163
|
+
// 将检测到的 provider 信息附加到模型上
|
164
|
+
enhancedModel._detectedProvider = detectedProvider;
|
172
165
|
|
173
|
-
|
174
|
-
|
166
|
+
return enhancedModel;
|
167
|
+
});
|
175
168
|
|
176
|
-
|
177
|
-
|
169
|
+
// 使用 processMultiProviderModelList 处理模型能力
|
170
|
+
const processedModels = await processMultiProviderModelList(enrichedModelList, 'newapi');
|
178
171
|
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
// 使用全局的模型路由映射
|
192
|
-
const userBaseURL = options.baseURL?.replace(/\/v1\/?$/, '') || '';
|
193
|
-
|
194
|
-
return [
|
172
|
+
// 清理临时字段
|
173
|
+
return processedModels.map((model: any) => {
|
174
|
+
if (model._detectedProvider) {
|
175
|
+
delete model._detectedProvider;
|
176
|
+
}
|
177
|
+
return model;
|
178
|
+
});
|
179
|
+
},
|
180
|
+
routers: (options) => {
|
181
|
+
const userBaseURL = options.baseURL?.replace(/\/v\d+[a-z]*\/?$/, '') || '';
|
182
|
+
|
183
|
+
return [
|
195
184
|
{
|
196
185
|
apiType: 'anthropic',
|
197
|
-
models: () =>
|
198
|
-
|
199
|
-
|
200
|
-
.filter(([, provider]) => provider === 'anthropic')
|
201
|
-
.map(([modelId]) => modelId),
|
202
|
-
),
|
186
|
+
models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
|
187
|
+
(id) => detectModelProvider(id) === 'anthropic',
|
188
|
+
),
|
203
189
|
options: {
|
204
|
-
|
205
|
-
baseURL:
|
190
|
+
...options,
|
191
|
+
baseURL: userBaseURL,
|
206
192
|
},
|
207
193
|
},
|
208
194
|
{
|
209
195
|
apiType: 'google',
|
210
|
-
models: () =>
|
211
|
-
|
212
|
-
|
213
|
-
.filter(([, provider]) => provider === 'google')
|
214
|
-
.map(([modelId]) => modelId),
|
215
|
-
),
|
196
|
+
models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
|
197
|
+
(id) => detectModelProvider(id) === 'google',
|
198
|
+
),
|
216
199
|
options: {
|
217
|
-
|
218
|
-
baseURL:
|
200
|
+
...options,
|
201
|
+
baseURL: userBaseURL,
|
219
202
|
},
|
220
203
|
},
|
221
204
|
{
|
222
205
|
apiType: 'xai',
|
223
|
-
models: () =>
|
224
|
-
|
225
|
-
|
226
|
-
.filter(([, provider]) => provider === 'xai')
|
227
|
-
.map(([modelId]) => modelId),
|
228
|
-
),
|
206
|
+
models: LOBE_DEFAULT_MODEL_LIST.map((m) => m.id).filter(
|
207
|
+
(id) => detectModelProvider(id) === 'xai',
|
208
|
+
),
|
229
209
|
options: {
|
230
|
-
|
210
|
+
...options,
|
231
211
|
baseURL: urlJoin(userBaseURL, '/v1'),
|
232
212
|
},
|
233
213
|
},
|
234
214
|
{
|
235
215
|
apiType: 'openai',
|
236
216
|
options: {
|
217
|
+
...options,
|
237
218
|
baseURL: urlJoin(userBaseURL, '/v1'),
|
238
219
|
chatCompletion: {
|
239
220
|
handlePayload,
|
240
221
|
},
|
241
222
|
},
|
242
223
|
},
|
243
|
-
|
244
|
-
|
224
|
+
];
|
225
|
+
},
|
245
226
|
});
|
@@ -1,10 +1,9 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
|
3
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
2
4
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
5
|
|
4
|
-
import {
|
5
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
6
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
7
|
-
|
6
|
+
import { testProvider } from '../providerTestUtils';
|
8
7
|
import models from './fixtures/models.json';
|
9
8
|
import { LobeNovitaAI } from './index';
|
10
9
|
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeNvidiaAI } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.Nvidia;
|
8
|
+
const defaultBaseURL = 'https://integrate.api.nvidia.com/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeNvidiaAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_NVIDIA_CHAT_COMPLETION',
|
15
|
+
chatModel: 'meta/llama-3.1-8b-instruct',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
@@ -1,9 +1,8 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
|
2
3
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import {
|
5
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
6
|
-
|
5
|
+
import { testProvider } from '../providerTestUtils';
|
7
6
|
import models from './fixtures/models.json';
|
8
7
|
import { LobeOpenRouterAI } from './index';
|
9
8
|
|
@@ -1,9 +1,8 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime, ModelProvider } from '@lobechat/model-runtime';
|
2
3
|
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
4
|
|
4
|
-
import {
|
5
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
6
|
-
|
5
|
+
import { testProvider } from '../providerTestUtils';
|
7
6
|
import { LobePerplexityAI } from './index';
|
8
7
|
|
9
8
|
testProvider({
|
@@ -1,10 +1,9 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
+
import { LobeOpenAICompatibleRuntime } from '@lobechat/model-runtime';
|
3
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
2
4
|
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
3
5
|
|
4
|
-
import {
|
5
|
-
import { ModelProvider } from '@/libs/model-runtime';
|
6
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
7
|
-
|
6
|
+
import { testProvider } from '../providerTestUtils';
|
8
7
|
import models from './fixtures/models.json';
|
9
8
|
import { LobePPIOAI } from './index';
|
10
9
|
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeQwenAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.Qwen;
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeSambaNovaAI } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.SambaNova;
|
8
|
+
const defaultBaseURL = 'https://api.sambanova.ai/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeSambaNovaAI,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_SAMBANOVA_CHAT_COMPLETION',
|
15
|
+
chatModel: 'Meta-Llama-3.1-8B-Instruct',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
@@ -0,0 +1,19 @@
|
|
1
|
+
// @vitest-environment node
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
3
|
+
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
|
+
import { LobeSearch1API } from './index';
|
6
|
+
|
7
|
+
const provider = ModelProvider.Search1API;
|
8
|
+
const defaultBaseURL = 'https://api.search1api.com/v1';
|
9
|
+
|
10
|
+
testProvider({
|
11
|
+
Runtime: LobeSearch1API,
|
12
|
+
provider,
|
13
|
+
defaultBaseURL,
|
14
|
+
chatDebugEnv: 'DEBUG_SEARCH1API_CHAT_COMPLETION',
|
15
|
+
chatModel: 'gpt-4o-mini',
|
16
|
+
test: {
|
17
|
+
skipAPICall: true,
|
18
|
+
},
|
19
|
+
});
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeSenseNovaAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.SenseNova;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeSparkAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.Spark;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import { ModelProvider } from '
|
3
|
-
import { testProvider } from '@/libs/model-runtime/providerTestUtils';
|
2
|
+
import { ModelProvider } from '@lobechat/model-runtime';
|
4
3
|
|
4
|
+
import { testProvider } from '../providerTestUtils';
|
5
5
|
import { LobeStepfunAI } from './index';
|
6
6
|
|
7
7
|
const provider = ModelProvider.Stepfun;
|
@@ -1,15 +1,14 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import OpenAI from 'openai';
|
3
|
-
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
4
|
-
|
5
2
|
import {
|
6
3
|
ChatStreamCallbacks,
|
7
4
|
LobeMoonshotAI,
|
8
5
|
LobeOpenAICompatibleRuntime,
|
9
6
|
ModelProvider,
|
10
|
-
} from '
|
11
|
-
import
|
7
|
+
} from '@lobechat/model-runtime';
|
8
|
+
import OpenAI from 'openai';
|
9
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
12
10
|
|
11
|
+
import { testProvider } from '../providerTestUtils';
|
13
12
|
import * as debugStreamModule from '../utils/debugStream';
|
14
13
|
import { LobeTaichuAI } from './index';
|
15
14
|
|
@@ -1,22 +1,24 @@
|
|
1
1
|
// @vitest-environment node
|
2
|
-
import OpenAI from 'openai';
|
3
|
-
import type { Stream } from 'openai/streaming';
|
4
|
-
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
5
|
-
|
6
2
|
import {
|
7
3
|
AgentRuntimeErrorType,
|
8
4
|
ChatStreamCallbacks,
|
9
5
|
ChatStreamPayload,
|
10
6
|
LobeOpenAICompatibleRuntime,
|
11
7
|
ModelProvider,
|
12
|
-
} from '
|
13
|
-
import
|
14
|
-
import {
|
8
|
+
} from '@lobechat/model-runtime';
|
9
|
+
import OpenAI from 'openai';
|
10
|
+
import type { Stream } from 'openai/streaming';
|
11
|
+
import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
15
12
|
|
16
13
|
import * as debugStreamModule from '../debugStream';
|
17
14
|
import * as openaiHelpers from '../openaiHelpers';
|
18
15
|
import { createOpenAICompatibleRuntime } from './index';
|
19
16
|
|
17
|
+
const sleep = async (ms: number) =>
|
18
|
+
await new Promise((resolve) => {
|
19
|
+
setTimeout(resolve, ms);
|
20
|
+
});
|
21
|
+
|
20
22
|
const provider = 'groq';
|
21
23
|
const defaultBaseURL = 'https://api.groq.com/openai/v1';
|
22
24
|
const bizErrorType = 'ProviderBizError';
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import Anthropic from '@anthropic-ai/sdk';
|
2
2
|
import type { Stream } from '@anthropic-ai/sdk/streaming';
|
3
3
|
|
4
|
-
import {
|
4
|
+
import { ChatCitationItem, ModelTokensUsage } from '@/types/message';
|
5
5
|
|
6
6
|
import { ChatStreamCallbacks } from '../../types';
|
7
7
|
import {
|
@@ -180,7 +180,7 @@ export const transformAnthropicStream = (
|
|
180
180
|
context.returnedCitationArray.push({
|
181
181
|
title: citations.title,
|
182
182
|
url: citations.url,
|
183
|
-
} as
|
183
|
+
} as ChatCitationItem);
|
184
184
|
}
|
185
185
|
|
186
186
|
return { data: null, id: context.id, type: 'text' };
|