@lobehub/chat 1.7.8 → 1.7.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,65 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.7.10](https://github.com/lobehub/lobe-chat/compare/v1.7.9...v1.7.10)
6
+
7
+ <sup>Released on **2024-08-02**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add Gemini 1.5 Pro Exp model.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add Gemini 1.5 Pro Exp model, closes [#3384](https://github.com/lobehub/lobe-chat/issues/3384) ([0de8b7b](https://github.com/lobehub/lobe-chat/commit/0de8b7b))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.7.9](https://github.com/lobehub/lobe-chat/compare/v1.7.8...v1.7.9)
31
+
32
+ <sup>Released on **2024-08-01**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Fix Mistral models calling & update model info.
37
+
38
+ #### 💄 Styles
39
+
40
+ - **misc**: Fix stepfun & baichuan model tag icon missing, update Perplexity models.
41
+
42
+ <br/>
43
+
44
+ <details>
45
+ <summary><kbd>Improvements and Fixes</kbd></summary>
46
+
47
+ #### What's fixed
48
+
49
+ - **misc**: Fix Mistral models calling & update model info, closes [#3377](https://github.com/lobehub/lobe-chat/issues/3377) [#3098](https://github.com/lobehub/lobe-chat/issues/3098) ([66274d0](https://github.com/lobehub/lobe-chat/commit/66274d0))
50
+
51
+ #### Styles
52
+
53
+ - **misc**: Fix stepfun & baichuan model tag icon missing, closes [#3379](https://github.com/lobehub/lobe-chat/issues/3379) ([e283ef4](https://github.com/lobehub/lobe-chat/commit/e283ef4))
54
+ - **misc**: Update Perplexity models, closes [#3380](https://github.com/lobehub/lobe-chat/issues/3380) ([06cb946](https://github.com/lobehub/lobe-chat/commit/06cb946))
55
+
56
+ </details>
57
+
58
+ <div align="right">
59
+
60
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
61
+
62
+ </div>
63
+
5
64
  ### [Version 1.7.8](https://github.com/lobehub/lobe-chat/compare/v1.7.7...v1.7.8)
6
65
 
7
66
  <sup>Released on **2024-07-30**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.7.8",
3
+ "version": "1.7.10",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -62,7 +62,7 @@ const ModelIcon = memo<ModelProviderIconProps>(({ model: originModel, size = 12
62
62
  if (model.includes('moonshot')) return <Moonshot.Avatar size={size} />;
63
63
  if (model.includes('qwen')) return <Tongyi.Avatar background={Tongyi.colorPrimary} size={size} />;
64
64
  if (model.includes('minmax') || model.includes('abab')) return <Minimax.Avatar size={size} />;
65
- if (model.includes('mistral') || model.includes('mixtral')) return <Mistral.Avatar size={size} />;
65
+ if (model.includes('mistral') || model.includes('mixtral') || model.includes('codestral')) return <Mistral.Avatar size={size} />;
66
66
  if (model.includes('pplx') || model.includes('sonar')) return <Perplexity.Avatar size={size} />;
67
67
  if (model.includes('yi-')) return <Yi.Avatar size={size} />;
68
68
  if (model.startsWith('openrouter')) return <OpenRouter.Avatar size={size} />; // only for Cinematika and Auto
@@ -29,6 +29,7 @@ import {
29
29
  Rwkv,
30
30
  Spark,
31
31
  Stability,
32
+ Stepfun,
32
33
  Tongyi,
33
34
  Wenxin,
34
35
  Yi,
@@ -40,9 +41,12 @@ interface ModelIconProps {
40
41
  size?: number;
41
42
  }
42
43
 
43
- const ModelIcon = memo<ModelIconProps>(({ model, size = 12 }) => {
44
- if (!model) return;
44
+ const ModelIcon = memo<ModelIconProps>(({ model: originModel, size = 12 }) => {
45
+ if (!originModel) return;
45
46
 
47
+ // lower case the origin model so to better match more model id case
48
+ const model = originModel.toLowerCase();
49
+
46
50
  // currently supported models, maybe not in its own provider
47
51
  if (model.startsWith('gpt')) return <OpenAI size={size} />;
48
52
  if (model.startsWith('glm') || model.includes('chatglm')) return <ChatGLM size={size} />;
@@ -58,13 +62,14 @@ const ModelIcon = memo<ModelIconProps>(({ model, size = 12 }) => {
58
62
  if (model.includes('qwen')) return <Tongyi size={size} />;
59
63
  if (model.includes('minmax')) return <Minimax size={size} />;
60
64
  if (model.includes('abab')) return <Minimax size={size} />;
61
- if (model.includes('mistral') || model.includes('mixtral')) return <Mistral size={size} />;
65
+ if (model.includes('mistral') || model.includes('mixtral') || model.includes('codestral')) return <Mistral size={size} />;
62
66
  if (model.includes('pplx') || model.includes('sonar')) return <Perplexity size={size} />;
63
67
  if (model.includes('yi-')) return <Yi size={size} />;
64
68
  if (model.startsWith('openrouter')) return <OpenRouter size={size} />; // only for Cinematika and Auto
65
69
  if (model.startsWith('openchat')) return <OpenChat size={size} />;
66
70
  if (model.includes('command')) return <Cohere size={size} />;
67
71
  if (model.includes('dbrx')) return <Dbrx size={size} />;
72
+ if (model.includes('step')) return <Stepfun size={size} />;
68
73
  if (model.includes('taichu')) return <AiMass size={size} />;
69
74
  if (model.includes('360gpt')) return <Ai360 size={size} />;
70
75
 
@@ -24,7 +24,7 @@ const Google: ModelProviderCard = {
24
24
  vision: true,
25
25
  },
26
26
  {
27
- description: 'Mid-size multimodal model that supports up to 1 million tokens',
27
+ description: 'Mid-size multimodal model that supports up to 2 million tokens',
28
28
  displayName: 'Gemini 1.5 Pro',
29
29
  enabled: true,
30
30
  functionCall: true,
@@ -34,7 +34,7 @@ const Google: ModelProviderCard = {
34
34
  vision: true,
35
35
  },
36
36
  {
37
- description: 'Mid-size multimodal model that supports up to 1 million tokens',
37
+ description: 'Mid-size multimodal model that supports up to 2 million tokens',
38
38
  displayName: 'Gemini 1.5 Pro 001',
39
39
  functionCall: true,
40
40
  id: 'gemini-1.5-pro-001',
@@ -42,6 +42,16 @@ const Google: ModelProviderCard = {
42
42
  tokens: 2_097_152 + 8192,
43
43
  vision: true,
44
44
  },
45
+ {
46
+ description: 'Mid-size multimodal model that supports up to 2 million tokens',
47
+ displayName: 'Gemini 1.5 Pro Experimental 0801',
48
+ enabled: true,
49
+ functionCall: true,
50
+ id: 'gemini-1.5-pro-exp-0801',
51
+ maxOutput: 8192,
52
+ tokens: 2_097_152 + 8192,
53
+ vision: true,
54
+ },
45
55
  {
46
56
  description:
47
57
  'The best model for scaling across a wide range of tasks. This is the latest model.',
@@ -1,6 +1,7 @@
1
1
  import { ModelProviderCard } from '@/types/llm';
2
2
 
3
3
  // ref https://docs.mistral.ai/getting-started/models/
4
+ // ref https://docs.mistral.ai/capabilities/function_calling/
4
5
  const Mistral: ModelProviderCard = {
5
6
  chatModels: [
6
7
  {
@@ -23,23 +24,31 @@ const Mistral: ModelProviderCard = {
23
24
  tokens: 65_536,
24
25
  },
25
26
  {
26
- displayName: 'Mistral Small',
27
+ displayName: 'Mistral Nemo',
27
28
  enabled: true,
28
- id: 'mistral-small-latest',
29
- tokens: 32_768,
29
+ functionCall: true,
30
+ id: 'open-mistral-nemo',
31
+ tokens: 128_000,
30
32
  },
31
33
  {
32
- displayName: 'Mistral Medium',
34
+ displayName: 'Mistral Large',
33
35
  enabled: true,
34
- id: 'mistral-medium-latest',
35
- tokens: 32_768,
36
+ functionCall: true,
37
+ id: 'mistral-large-latest',
38
+ tokens: 128_000,
36
39
  },
37
40
  {
38
- displayName: 'Mistral Large',
41
+ displayName: 'Codestral',
39
42
  enabled: true,
40
- id: 'mistral-large-latest',
43
+ id: 'codestral-latest',
41
44
  tokens: 32_768,
42
45
  },
46
+ {
47
+ displayName: 'Codestral Mamba',
48
+ enabled: true,
49
+ id: 'open-codestral-mamba',
50
+ tokens: 256_000,
51
+ },
43
52
  ],
44
53
  checkModel: 'open-mistral-7b',
45
54
  id: 'mistral',
@@ -4,44 +4,41 @@ import { ModelProviderCard } from '@/types/llm';
4
4
  const Perplexity: ModelProviderCard = {
5
5
  chatModels: [
6
6
  {
7
- displayName: 'Perplexity 8B Chat',
8
- id: 'llama-3-sonar-small-32k-chat',
9
- tokens: 32_768,
10
- },
11
- {
12
- displayName: 'Perplexity 70B Chat',
7
+ displayName: 'Llama3.1 Sonar Small Chat',
13
8
  enabled: true,
14
- id: 'llama-3-sonar-large-32k-chat',
15
- tokens: 32_768,
9
+ id: 'llama-3.1-sonar-small-128k-chat',
10
+ tokens: 128_000,
16
11
  },
17
12
  {
18
- displayName: 'Perplexity 8B Online',
19
- id: 'llama-3-sonar-small-32k-online',
20
- tokens: 28_000,
13
+ displayName: 'Llama3.1 Sonar Large Chat',
14
+ enabled: true,
15
+ id: 'llama-3.1-sonar-large-128k-chat',
16
+ tokens: 128_000,
21
17
  },
22
18
  {
23
- displayName: 'Perplexity 70B Online',
19
+ displayName: 'Llama3.1 Sonar Small Online',
24
20
  enabled: true,
25
- id: 'llama-3-sonar-large-32k-online',
26
- tokens: 28_000,
21
+ id: 'llama-3.1-sonar-small-128k-online',
22
+ tokens: 128_000,
27
23
  },
28
24
  {
29
- displayName: 'Llama3 8B Instruct',
30
- id: 'llama-3-8b-instruct',
31
- tokens: 8192,
25
+ displayName: 'Llama3.1 Sonar Large Online',
26
+ enabled: true,
27
+ id: 'llama-3.1-sonar-large-128k-online',
28
+ tokens: 128_000,
32
29
  },
33
30
  {
34
- displayName: 'Llama3 70B Instruct',
35
- id: 'llama-3-70b-instruct',
36
- tokens: 8192,
31
+ displayName: 'Llama3.1 8B Instruct',
32
+ id: 'llama-3.1-8b-instruct',
33
+ tokens: 128_000,
37
34
  },
38
35
  {
39
- displayName: 'Mixtral 8x7B Instruct',
40
- id: 'mixtral-8x7b-instruct',
41
- tokens: 16_384,
36
+ displayName: 'Llama3.1 70B Instruct',
37
+ id: 'llama-3.1-70b-instruct',
38
+ tokens: 128_000,
42
39
  },
43
40
  ],
44
- checkModel: 'llama-3-8b-instruct',
41
+ checkModel: 'llama-3.1-8b-instruct',
45
42
  id: 'perplexity',
46
43
  name: 'Perplexity',
47
44
  proxyUrl: {
@@ -5,14 +5,15 @@ export const LobeMistralAI = LobeOpenAICompatibleFactory({
5
5
  baseURL: 'https://api.mistral.ai/v1',
6
6
  chatCompletion: {
7
7
  handlePayload: (payload) => ({
8
- max_tokens: payload.max_tokens,
8
+ ...payload.max_tokens !== undefined && { max_tokens: payload.max_tokens },
9
9
  messages: payload.messages as any,
10
10
  model: payload.model,
11
11
  stream: true,
12
12
  temperature: payload.temperature,
13
- tools: payload.tools,
13
+ ...payload.tools && { tools: payload.tools },
14
14
  top_p: payload.top_p,
15
15
  }),
16
+ noUserId: true,
16
17
  },
17
18
  debug: {
18
19
  chatCompletion: () => process.env.DEBUG_MISTRAL_CHAT_COMPLETION === '1',
@@ -417,6 +417,92 @@ describe('LobeOpenAICompatibleFactory', () => {
417
417
  });
418
418
  });
419
419
 
420
+ describe('noUserId option', () => {
421
+ it('should not add user to payload when noUserId is true', async () => {
422
+ const LobeMockProvider = LobeOpenAICompatibleFactory({
423
+ baseURL: 'https://api.mistral.ai/v1',
424
+ chatCompletion: {
425
+ noUserId: true,
426
+ },
427
+ provider: ModelProvider.Mistral,
428
+ });
429
+
430
+ const instance = new LobeMockProvider({ apiKey: 'test' });
431
+ const mockCreateMethod = vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(new ReadableStream() as any);
432
+
433
+ await instance.chat(
434
+ {
435
+ messages: [{ content: 'Hello', role: 'user' }],
436
+ model: 'open-mistral-7b',
437
+ temperature: 0,
438
+ },
439
+ { user: 'testUser' }
440
+ );
441
+
442
+ expect(mockCreateMethod).toHaveBeenCalledWith(
443
+ expect.not.objectContaining({
444
+ user: 'testUser',
445
+ }),
446
+ expect.anything()
447
+ );
448
+ });
449
+
450
+ it('should add user to payload when noUserId is false', async () => {
451
+ const LobeMockProvider = LobeOpenAICompatibleFactory({
452
+ baseURL: 'https://api.mistral.ai/v1',
453
+ chatCompletion: {
454
+ noUserId: false,
455
+ },
456
+ provider: ModelProvider.Mistral,
457
+ });
458
+
459
+ const instance = new LobeMockProvider({ apiKey: 'test' });
460
+ const mockCreateMethod = vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(new ReadableStream() as any);
461
+
462
+ await instance.chat(
463
+ {
464
+ messages: [{ content: 'Hello', role: 'user' }],
465
+ model: 'open-mistral-7b',
466
+ temperature: 0,
467
+ },
468
+ { user: 'testUser' }
469
+ );
470
+
471
+ expect(mockCreateMethod).toHaveBeenCalledWith(
472
+ expect.objectContaining({
473
+ user: 'testUser',
474
+ }),
475
+ expect.anything()
476
+ );
477
+ });
478
+
479
+ it('should add user to payload when noUserId is not set in chatCompletion', async () => {
480
+ const LobeMockProvider = LobeOpenAICompatibleFactory({
481
+ baseURL: 'https://api.mistral.ai/v1',
482
+ provider: ModelProvider.Mistral,
483
+ });
484
+
485
+ const instance = new LobeMockProvider({ apiKey: 'test' });
486
+ const mockCreateMethod = vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue(new ReadableStream() as any);
487
+
488
+ await instance.chat(
489
+ {
490
+ messages: [{ content: 'Hello', role: 'user' }],
491
+ model: 'open-mistral-7b',
492
+ temperature: 0,
493
+ },
494
+ { user: 'testUser' }
495
+ );
496
+
497
+ expect(mockCreateMethod).toHaveBeenCalledWith(
498
+ expect.objectContaining({
499
+ user: 'testUser',
500
+ }),
501
+ expect.anything()
502
+ );
503
+ });
504
+ });
505
+
420
506
  describe('cancel request', () => {
421
507
  it('should cancel ongoing request correctly', async () => {
422
508
  const controller = new AbortController();
@@ -40,6 +40,7 @@ interface OpenAICompatibleFactoryOptions<T extends Record<string, any> = any> {
40
40
  payload: ChatStreamPayload,
41
41
  options: ConstructorOptions<T>,
42
42
  ) => OpenAI.ChatCompletionCreateParamsStreaming;
43
+ noUserId?: boolean;
43
44
  };
44
45
  constructorOptions?: ConstructorOptions<T>;
45
46
  debug?: {
@@ -151,7 +152,10 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
151
152
  } as OpenAI.ChatCompletionCreateParamsStreaming);
152
153
 
153
154
  const response = await this.client.chat.completions.create(
154
- { ...postPayload, user: options?.user },
155
+ {
156
+ ...postPayload,
157
+ ...(chatCompletion?.noUserId ? {} : { user: options?.user })
158
+ },
155
159
  {
156
160
  // https://github.com/lobehub/lobe-chat/pull/318
157
161
  headers: { Accept: '*/*' },