@lobehub/chat 1.19.27 → 1.19.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.19.29](https://github.com/lobehub/lobe-chat/compare/v1.19.28...v1.19.29)
6
+
7
+ <sup>Released on **2024-09-24**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update taichu provider info & add taichu vision model.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update taichu provider info & add taichu vision model, closes [#4114](https://github.com/lobehub/lobe-chat/issues/4114) ([e5331db](https://github.com/lobehub/lobe-chat/commit/e5331db))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.19.28](https://github.com/lobehub/lobe-chat/compare/v1.19.27...v1.19.28)
31
+
32
+ <sup>Released on **2024-09-24**</sup>
33
+
34
+ #### 💄 Styles
35
+
36
+ - **misc**: Add function call support for Stepfun.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### Styles
44
+
45
+ - **misc**: Add function call support for Stepfun, closes [#4101](https://github.com/lobehub/lobe-chat/issues/4101) ([8d7d96e](https://github.com/lobehub/lobe-chat/commit/8d7d96e))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.19.27](https://github.com/lobehub/lobe-chat/compare/v1.19.26...v1.19.27)
6
56
 
7
57
  <sup>Released on **2024-09-24**</sup>
@@ -114,6 +114,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
114
114
  - Default: `https://generativelanguage.googleapis.com`
115
115
  - Example: `https://api.genai.gd.edu.kg/google`
116
116
 
117
+ ### `GOOGLE_MODEL_LIST`
118
+
119
+ - Type: Optional
120
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
121
+ - Default: `-`
122
+ - Example: `-all,+gemini-1.5-flash-latest,+gemini-1.5-pro-latest`
123
+
117
124
  ## Anthropic AI
118
125
 
119
126
  ### `ANTHROPIC_API_KEY`
@@ -185,6 +192,22 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
185
192
  - Default: `-`
186
193
  - Example: `-all,+01-ai/yi-34b-chat,+huggingfaceh4/zephyr-7b-beta`
187
194
 
195
+ ## Github
196
+
197
+ ### `GITHUB_TOKEN`
198
+
199
+ - Type: Required
200
+ - Description: This is your Personal access tokens you got for in the Github
201
+ - Default: `-`
202
+ - Example:`ghp_xxxxxx...xxxxxx=`
203
+
204
+ ### `GITHUB_MODEL_LIST`
205
+
206
+ - Type: Optional
207
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
208
+ - Default: `-`
209
+ - Example: `-all,+gpt-4o,+gpt-4o-mini`
210
+
188
211
  ## TogetherAI
189
212
 
190
213
  ### `TOGETHERAI_API_KEY`
@@ -199,7 +222,23 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
199
222
  - Type: Optional
200
223
  - Description: Used to specify a custom TogetherAI model list. Model definition syntax rules see [model-list][model-list]
201
224
  - Default: `-`
202
- - Example: `01-ai/yi-34b-chat`
225
+ - Example: `-all,+meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo,+meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
226
+
227
+ ## Fireworks AI
228
+
229
+ ### `FIREWORKSAI_API_KEY`
230
+
231
+ - Type: Required
232
+ - Description: This is the API key you applied for in the Fireworks AI service
233
+ - Default: `-`
234
+ - Example:`xxxxxx...xxxxxx`
235
+
236
+ ### `FIREWORKSAI_MODEL_LIST`
237
+
238
+ - Type: Optional
239
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
240
+ - Default: `-`
241
+ - Example: `-all,+accounts/fireworks/models/firefunction-v2,+accounts/fireworks/models/firefunction-v1`
203
242
 
204
243
  ## Ollama
205
244
 
@@ -242,6 +281,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
242
281
  - Default: -
243
282
  - Example: `pplx-xxxxxx...xxxxxx`
244
283
 
284
+ ### `PERPLEXITY_PROXY_URL`
285
+
286
+ - Type: Optional
287
+ - Description: If you manually configure the Perplexity API proxy, you can use this configuration item to override the default Perplexity API request base URL
288
+ - Default: `https://api.Perplexity.ai`
289
+ - Example: `https://my-Perplexity-proxy.com`
290
+
245
291
  ## Minimax AI
246
292
 
247
293
  ### `MINIMAX_API_KEY`
@@ -269,6 +315,20 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
269
315
  - Default: -
270
316
  - Example: `gsk_xxxxxx...xxxxxx`
271
317
 
318
+ ### `GROQ_PROXY_URL`
319
+
320
+ - Type: Optional
321
+ - Description: If you manually configure the Groq API proxy, you can use this configuration item to override the default Groq API request base URL
322
+ - Default: `https://api.groq.com/openai/v1`
323
+ - Example: `https://my-groq-proxy.com/v1`
324
+
325
+ ### `GROQ_MODEL_LIST`
326
+
327
+ - Type: Optional
328
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
329
+ - Default: `-`
330
+ - Example: `-all,+gemma2-9b-it,+llama-3.1-8b-instant`
331
+
272
332
  ## ZHIPU AI
273
333
 
274
334
  ### `ZHIPU_API_KEY`
@@ -278,6 +338,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
278
338
  - Default: -
279
339
  - Example: `4582d332441a313f5c2ed9824d1798ca.rC8EcTAhgbOuAuVT`
280
340
 
341
+ ### `ZHIPU_MODEL_LIST`
342
+
343
+ - Type: Optional
344
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
345
+ - Default: `-`
346
+ - Example: `-all,+glm-4-alltools,+glm-4-plus`
347
+
281
348
  ## 01.AI
282
349
 
283
350
  ### `ZEROONE_API_KEY`
@@ -287,6 +354,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
287
354
  - Default: -
288
355
  - Example:`xxxxxx...xxxxxx`
289
356
 
357
+ ### `ZEROONE_MODEL_LIST`
358
+
359
+ - Type: Optional
360
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
361
+ - Default: `-`
362
+ - Example: `-all,+yi-large,+yi-large-rag`
363
+
290
364
  ## Qwen
291
365
 
292
366
  ### `QWEN_API_KEY`
@@ -296,4 +370,113 @@ If you need to use Azure OpenAI to provide model services, you can refer to the
296
370
  - Default: -
297
371
  - Example:`sk-xxxxx...xxxxx`
298
372
 
373
+ ### `QWEN_MODEL_LIST`
374
+
375
+ - Type: Optional
376
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
377
+ - Default: `-`
378
+ - Example: `-all,+qwen-turbo-latest,+qwen-plus-latest`
379
+
380
+ ## Stepfun AI
381
+
382
+ ### `STEPFUN_API_KEY`
383
+
384
+ - Type: Required
385
+ - Description: This is the DashScope API key you can obtain from Stepfun AI service
386
+ - Default: -
387
+ - Example:`sk-xxxxx...xxxxx`
388
+
389
+ ## Novita AI
390
+
391
+ ### `NOVITA_API_KEY`
392
+
393
+ - Type: Required
394
+ - Description: This is the API key you applied for in the Novita AI service
395
+ - Default: -
396
+ - Example:`xxxxxx...xxxxxx`
397
+
398
+ ### `NOVITA_MODEL_LIST`
399
+
400
+ - Type: Optional
401
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
402
+ - Default: `-`
403
+ - Example: `-all,+meta-llama/llama-3.1-8b-instruct,+meta-llama/llama-3.1-70b-instruct`
404
+
405
+ ## BAICHUAN
406
+
407
+ ### `BAICHUAN_API_KEY`
408
+
409
+ - Type: Required
410
+ - Description: This is the API key you applied for in the BAICHUAN service
411
+ - Default: -
412
+ - Example:`xxxxxx...xxxxxx`
413
+
414
+ ## TAICHU
415
+
416
+ ### `TAICHU_API_KEY`
417
+
418
+ - Type: Required
419
+ - Description: This is the API key you applied for in the TAICHU service
420
+ - Default: -
421
+ - Example:`xxxxxx...xxxxxx`
422
+
423
+ ## 360 AI
424
+
425
+ ### `AI360_API_KEY`
426
+
427
+ - Type: Required
428
+ - Description: This is the API key you applied for in the 360 AI service
429
+ - Default: -
430
+ - Example:`xxxxxx...xxxxxx`
431
+
432
+ ## Siliconflow
433
+
434
+ ### `SILICONCLOUD_API_KEY`
435
+
436
+ - Type: Required
437
+ - Description: This is the API key you applied from Siliconflow service
438
+ - Default: -
439
+ - Example: `xxxxxx...xxxxxx`
440
+
441
+ ### `SILICONCLOUD_PROXY_URL`
442
+
443
+ - Type: Optional
444
+ - Description: If you manually configure the Siliconflow API proxy, you can use this configuration item to override the default Siliconflow API request base URL
445
+ - Default: `https://api.siliconflow.cn/v1`
446
+ - Example: `https://my-siliconflow-proxy.com/v1`
447
+
448
+ ### `SILICONCLOUD_MODEL_LIST`
449
+
450
+ - Type: Optional
451
+ - Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list]
452
+ - Default: `-`
453
+ - Example: `-all,+deepseek-ai/DeepSeek-V2.5,+Qwen/Qwen2.5-7B-Instruct`
454
+
455
+ ## Upstage AI
456
+
457
+ ### `UPSTAGE_API_KEY`
458
+
459
+ - Type: Required
460
+ - Description: This is the API key you applied from Upstage AI service
461
+ - Default: -
462
+ - Example: `xxxxxx...xxxxxx`
463
+
464
+ ## Spark AI
465
+
466
+ ### `SPARK_API_KEY`
467
+
468
+ - Type: Required
469
+ - Description: This is the API key you applied from Spark AI service
470
+ - Default: -
471
+ - Example: `xxxxxx...xxxxxx`
472
+
473
+ ## A21 AI
474
+
475
+ ### `AI21_API_KEY`
476
+
477
+ - Type: Required
478
+ - Description: This is the API key you applied from AI21_API_KEY service
479
+ - Default: -
480
+ - Example: `xxxxxx...xxxxxx`
481
+
299
482
  [model-list]: /docs/self-hosting/advanced/model-list
@@ -112,6 +112,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
112
112
  - 默认值:`https://generativelanguage.googleapis.com`
113
113
  - 示例:`https://api.genai.gd.edu.kg/google`
114
114
 
115
+ ### `GOOGLE_MODEL_LIST`
116
+
117
+ - 类型:可选
118
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
119
+ - 默认值:`-`
120
+ - 示例:`-all,+gemini-1.5-flash-latest,+gemini-1.5-pro-latest`
121
+
115
122
  ## Anthropic AI
116
123
 
117
124
  ### `ANTHROPIC_API_KEY`
@@ -179,10 +186,26 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
179
186
  ### `OPENROUTER_MODEL_LIST`
180
187
 
181
188
  - 类型:可选
182
- - 描述:用于指定自定义 OpenRouter 模型列表。模型定义语法规则见 [模型列表][model-list]
189
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
183
190
  - 默认值:`-`
184
191
  - 示例:`-all,+01-ai/yi-34b-chat,+huggingfaceh4/zephyr-7b-beta`
185
192
 
193
+ ## Github
194
+
195
+ ### `GITHUB_TOKEN`
196
+
197
+ - 类型:必选
198
+ - 描述:这是你在 Github 申请的 Personal access tokens
199
+ - 默认值:-
200
+ - 示例:`ghp_xxxxxx...xxxxxx=`
201
+
202
+ ### `GITHUB_MODEL_LIST`
203
+
204
+ - 类型:可选
205
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
206
+ - 默认值:`-`
207
+ - 示例:`-all,+gpt-4o,+gpt-4o-mini`
208
+
186
209
  ## TogetherAI
187
210
 
188
211
  ### `TOGETHERAI_API_KEY`
@@ -195,9 +218,25 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
195
218
  ### `TOGETHERAI_MODEL_LIST`
196
219
 
197
220
  - 类型:可选
198
- - 描述:用于指定自定义 Together AI 的模型列表。模型定义语法规则见 [模型列表][model-list]
221
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
199
222
  - 默认值:`-`
200
- - 示例:`01-ai/yi-34b-chat`
223
+ - 示例:`-all,+meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo,+meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo`
224
+
225
+ ## Fireworks AI
226
+
227
+ ### `FIREWORKSAI_API_KEY`
228
+
229
+ - 类型:必选
230
+ - 描述:这是你在 Fireworks AI 服务中申请的 API 密钥
231
+ - 默认值:-
232
+ - 示例:`xxxxxx...xxxxxx`
233
+
234
+ ### `FIREWORKSAI_MODEL_LIST`
235
+
236
+ - 类型:可选
237
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
238
+ - 默认值:`-`
239
+ - 示例:`-all,+accounts/fireworks/models/firefunction-v2,+accounts/fireworks/models/firefunction-v1`
201
240
 
202
241
  ## Ollama
203
242
 
@@ -240,6 +279,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
240
279
  - 默认值:-
241
280
  - 示例:`pplx-xxxxxx...xxxxxx`
242
281
 
282
+ ### `PERPLEXITY_PROXY_URL`
283
+
284
+ - 类型:可选
285
+ - 描述:如果你手动配置了 Perplexity 接口代理,可以使用此配置项来覆盖默认的 Perplexity API 请求基础 URL
286
+ - 默认值:`https://api.Perplexity.ai`
287
+ - 示例:`https://my-Perplexity-proxy.com`
288
+
243
289
  ## Minimax AI
244
290
 
245
291
  ### `MINIMAX_API_KEY`
@@ -267,6 +313,20 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
267
313
  - 默认值:-
268
314
  - 示例:`gsk_xxxxxx...xxxxxx`
269
315
 
316
+ ### `GROQ_MODEL_LIST`
317
+
318
+ - 类型:可选
319
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
320
+ - 默认值:`-`
321
+ - 示例:`-all,+gemma2-9b-it,+llama-3.1-8b-instant`
322
+
323
+ ### `GROQ_PROXY_URL`
324
+
325
+ - 类型:可选
326
+ - 描述:如果你手动配置了 Groq 接口代理,可以使用此配置项来覆盖默认的 Groq API 请求基础 URL
327
+ - 默认值:`https://api.groq.com/openai/v1`
328
+ - 示例:`https://my-groq-proxy.com/v1`
329
+
270
330
  ## 智谱 AI
271
331
 
272
332
  ### `ZHIPU_API_KEY`
@@ -276,6 +336,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
276
336
  - 默认值:-
277
337
  - 示例:`4582d332441a313f5c2ed9824d1798ca.rC8EcTAhgbOuAuVT`
278
338
 
339
+ ### `ZHIPU_MODEL_LIST`
340
+
341
+ - 类型:可选
342
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
343
+ - 默认值:`-`
344
+ - 示例:`-all,+glm-4-alltools,+glm-4-plus`
345
+
279
346
  ## 01 AI
280
347
 
281
348
  ### `ZEROONE_API_KEY`
@@ -285,7 +352,12 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
285
352
  - 默认值:-
286
353
  - 示例:`xxxxxx...xxxxxx`
287
354
 
288
- [model-list]: /zh/docs/self-hosting/advanced/model-list
355
+ ### `ZEROONE_MODEL_LIST`
356
+
357
+ - 类型:可选
358
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
359
+ - 默认值:`-`
360
+ - 示例:`-all,+yi-large,+yi-large-rag`
289
361
 
290
362
  ## 通义千问
291
363
 
@@ -295,3 +367,114 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量,
295
367
  - 描述:这是你在阿里云百炼平台上获取的 DashScope API 密钥
296
368
  - 默认值:-
297
369
  - 示例:`sk-xxxxx...xxxxx`
370
+
371
+ ### `QWEN_MODEL_LIST`
372
+
373
+ - 类型:可选
374
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
375
+ - 默认值:`-`
376
+ - 示例:`-all,+qwen-turbo-latest,+qwen-plus-latest`
377
+
378
+ ## Stepfun AI
379
+
380
+ ### `STEPFUN_API_KEY`
381
+
382
+ - 类型:必选
383
+ - 描述:这是你在 Stepfun AI 服务中申请的 API 密钥
384
+ - 默认值:-
385
+ - 示例:`xxxxxx...xxxxxx`
386
+
387
+ ## Novita AI
388
+
389
+ ### `NOVITA_API_KEY`
390
+
391
+ - 类型:必选
392
+ - 描述:这是你在 Novita AI 服务中申请的 API 密钥
393
+ - 默认值:-
394
+ - 示例:`xxxxxx...xxxxxx`
395
+
396
+ ### `NOVITA_MODEL_LIST`
397
+
398
+ - 类型:可选
399
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
400
+ - 默认值:`-`
401
+ - 示例:`-all,+meta-llama/llama-3.1-8b-instruct,+meta-llama/llama-3.1-70b-instruct`
402
+
403
+ ## 百川
404
+
405
+ ### `BAICHUAN_API_KEY`
406
+
407
+ - 类型:必选
408
+ - 描述:这是你在 百川智能 服务平台申请的 API 密钥
409
+ - 默认值:-
410
+ - 示例:`xxxxxx...xxxxxx`
411
+
412
+ ## 紫东太初
413
+
414
+ ### `TAICHU_API_KEY`
415
+
416
+ - 类型:必选
417
+ - 描述:这是你在 紫东太初 服务平台申请的 API 密钥
418
+ - 默认值:-
419
+ - 示例:`xxxxxx...xxxxxx`
420
+
421
+ ## 360 AI
422
+
423
+ ### `AI360_API_KEY`
424
+
425
+ - 类型:必选
426
+ - 描述:这是你在 360智脑 服务平台申请的 API 密钥
427
+ - 默认值:-
428
+ - 示例:`xxxxxx...xxxxxx`
429
+
430
+ ## Siliconflow
431
+
432
+ ### `SILICONCLOUD_API_KEY`
433
+
434
+ - 类型:必选
435
+ - 描述:这是你在 Siliconflow 服务中申请的 API 密钥
436
+ - 默认值:-
437
+ - 示例:`xxxxxx...xxxxxx`
438
+
439
+ ### `SILICONCLOUD_MODEL_LIST`
440
+
441
+ - 类型:可选
442
+ - 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list]
443
+ - 默认值:`-`
444
+ - 示例:`-all,+deepseek-ai/DeepSeek-V2.5,+Qwen/Qwen2.5-7B-Instruct`
445
+
446
+ ### `SILICONCLOUD_PROXY_URL`
447
+
448
+ - 类型:可选
449
+ - 描述:如果你手动配置了 Siliconflow 接口代理,可以使用此配置项来覆盖默认的 Siliconflow API 请求基础 URL
450
+ - 默认值:`https://api.siliconflow.cn/v1`
451
+ - 示例:`https://my-siliconflow-proxy.com/v1`
452
+
453
+ ## Upstage AI
454
+
455
+ ### `UPSTAGE_API_KEY`
456
+
457
+ - 类型:必选
458
+ - 描述:这是你在 Upstage AI 服务中申请的 API 密钥
459
+ - 默认值:-
460
+ - 示例:`xxxxxx...xxxxxx`
461
+
462
+ ## Spark AI
463
+
464
+ ### `SPARK_API_KEY`
465
+
466
+ - 类型:必选
467
+ - 描述:这是你在 Spark AI 服务中申请的 API 密钥
468
+ - 默认值:-
469
+ - 示例:`xxxxxx...xxxxxx`
470
+
471
+ ## A21 AI
472
+
473
+ ### `AI21_API_KEY`
474
+
475
+ - 类型:必选
476
+ - 描述:这是你在 A21 AI 服务中申请的 API 密钥
477
+ - 默认值:-
478
+ - 示例:`xxxxxx...xxxxxx`
479
+
480
+ [model-list]: /zh/docs/self-hosting/advanced/model-list
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.19.27",
3
+ "version": "1.19.29",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -68,8 +68,8 @@ export const useProviderList = (): ProviderItem[] => {
68
68
  BaichuanProviderCard,
69
69
  MinimaxProviderCard,
70
70
  Ai360ProviderCard,
71
- SiliconCloudProviderCard,
72
71
  TaichuProviderCard,
72
+ SiliconCloudProviderCard,
73
73
  ],
74
74
  [AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider, GithubProvider],
75
75
  );
@@ -86,8 +86,8 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [
86
86
  BaichuanProvider,
87
87
  MinimaxProvider,
88
88
  Ai360Provider,
89
- SiliconCloudProvider,
90
89
  TaichuProvider,
90
+ SiliconCloudProvider,
91
91
  ];
92
92
 
93
93
  export const filterEnabledModels = (provider: ModelProviderCard) => {
@@ -8,12 +8,14 @@ const Stepfun: ModelProviderCard = {
8
8
  description: '支持大规模上下文交互,适合复杂对话场景。',
9
9
  displayName: 'Step 2 16K',
10
10
  enabled: true,
11
+ functionCall: true,
11
12
  id: 'step-2-16k',
12
13
  tokens: 16_000,
13
14
  },
14
15
  {
15
16
  description: '具备超长上下文处理能力,尤其适合长文档分析。',
16
17
  displayName: 'Step 1 256K',
18
+ functionCall: true,
17
19
  id: 'step-1-256k',
18
20
  tokens: 256_000,
19
21
  },
@@ -21,6 +23,7 @@ const Stepfun: ModelProviderCard = {
21
23
  description: '平衡性能与成本,适合一般场景。',
22
24
  displayName: 'Step 1 128K',
23
25
  enabled: true,
26
+ functionCall: true,
24
27
  id: 'step-1-128k',
25
28
  tokens: 128_000,
26
29
  },
@@ -28,6 +31,7 @@ const Stepfun: ModelProviderCard = {
28
31
  description: '支持中等长度的对话,适用于多种应用场景。',
29
32
  displayName: 'Step 1 32K',
30
33
  enabled: true,
34
+ functionCall: true,
31
35
  id: 'step-1-32k',
32
36
  tokens: 32_000,
33
37
  },
@@ -35,6 +39,7 @@ const Stepfun: ModelProviderCard = {
35
39
  description: '小型模型,适合轻量级任务。',
36
40
  displayName: 'Step 1 8K',
37
41
  enabled: true,
42
+ functionCall: true,
38
43
  id: 'step-1-8k',
39
44
  tokens: 8000,
40
45
  },
@@ -42,6 +47,7 @@ const Stepfun: ModelProviderCard = {
42
47
  description: '高速模型,适合实时对话。',
43
48
  displayName: 'Step 1 Flash',
44
49
  enabled: true,
50
+ functionCall: true,
45
51
  id: 'step-1-flash',
46
52
  tokens: 8000,
47
53
  },
@@ -49,6 +55,7 @@ const Stepfun: ModelProviderCard = {
49
55
  description: '支持视觉输入,增强多模态交互体验。',
50
56
  displayName: 'Step 1V 32K',
51
57
  enabled: true,
58
+ functionCall: true,
52
59
  id: 'step-1v-32k',
53
60
  tokens: 32_000,
54
61
  vision: true,
@@ -57,6 +64,7 @@ const Stepfun: ModelProviderCard = {
57
64
  description: '小型视觉模型,适合基本的图文任务。',
58
65
  displayName: 'Step 1V 8K',
59
66
  enabled: true,
67
+ functionCall: true,
60
68
  id: 'step-1v-8k',
61
69
  tokens: 8000,
62
70
  vision: true,
@@ -5,13 +5,22 @@ const Taichu: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
7
  description:
8
- '紫东太初语言大模型具备超强语言理解能力以及文本创作、知识问答、代码编程、数学计算、逻辑推理、情感分析、文本摘要等能力。创新性地将大数据预训练与多源丰富知识相结合,通过持续打磨算法技术,并不断吸收海量文本数据中词汇、结构、语法、语义等方面的新知识,实现模型效果不断进化。为用户提供更加便捷的信息和服务以及更为智能化的体验。',
9
- displayName: 'Taichu-2.0',
8
+ 'Taichu 2.0 基于海量高质数据训练,具有更强的文本理解、内容创作、对话问答等能力',
9
+ displayName: 'Taichu 2.0',
10
10
  enabled: true,
11
- functionCall: false,
11
+ functionCall: true,
12
12
  id: 'taichu_llm',
13
13
  tokens: 32_768,
14
14
  },
15
+ {
16
+ description:
17
+ 'Taichu 2.0V 融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出',
18
+ displayName: 'Taichu 2.0V',
19
+ enabled: true,
20
+ id: 'taichu_vqa',
21
+ tokens: 4096,
22
+ vision: true,
23
+ },
15
24
  ],
16
25
  checkModel: 'taichu_llm',
17
26
  description:
@@ -3,6 +3,14 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
3
3
 
4
4
  export const LobeStepfunAI = LobeOpenAICompatibleFactory({
5
5
  baseURL: 'https://api.stepfun.com/v1',
6
+ chatCompletion: {
7
+ handlePayload: (payload) => {
8
+ return {
9
+ ...payload,
10
+ stream: !payload.tools,
11
+ } as any;
12
+ },
13
+ },
6
14
  debug: {
7
15
  chatCompletion: () => process.env.DEBUG_STEPFUN_CHAT_COMPLETION === '1',
8
16
  },