@lobehub/chat 1.20.6 → 1.20.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/package.json +1 -1
- package/src/config/modelProviders/groq.ts +33 -8
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.20.7](https://github.com/lobehub/lobe-chat/compare/v1.20.6...v1.20.7)
|
6
|
+
|
7
|
+
<sup>Released on **2024-09-29**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Update groq model list.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Update groq model list, closes [#4195](https://github.com/lobehub/lobe-chat/issues/4195) ([ef5164d](https://github.com/lobehub/lobe-chat/commit/ef5164d))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.20.6](https://github.com/lobehub/lobe-chat/compare/v1.20.5...v1.20.6)
|
6
31
|
|
7
32
|
<sup>Released on **2024-09-29**</sup>
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.20.
|
3
|
+
"version": "1.20.7",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -3,11 +3,39 @@ import { ModelProviderCard } from '@/types/llm';
|
|
3
3
|
// ref https://console.groq.com/docs/tool-use
|
4
4
|
const Groq: ModelProviderCard = {
|
5
5
|
chatModels: [
|
6
|
-
// TODO: During preview launch, Groq is limiting 3.
|
6
|
+
// TODO: During preview launch, Groq is limiting 3.2 models to max_tokens of 8k.
|
7
|
+
{
|
8
|
+
description:
|
9
|
+
'Llama 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
|
10
|
+
displayName: 'Llama 3.2 11B Vision (Preview)',
|
11
|
+
enabled: true,
|
12
|
+
id: 'llama-3.2-11b-vision-preview',
|
13
|
+
maxOutput: 8192,
|
14
|
+
pricing: {
|
15
|
+
input: 0.05,
|
16
|
+
output: 0.08,
|
17
|
+
},
|
18
|
+
tokens: 8192,
|
19
|
+
vision: true,
|
20
|
+
},
|
21
|
+
{
|
22
|
+
description:
|
23
|
+
'Llama 3.2 旨在处理结合视觉和文本数据的任务。它在图像描述和视觉问答等任务中表现出色,跨越了语言生成和视觉推理之间的鸿沟。',
|
24
|
+
displayName: 'Llama 3.2 90B Vision (Preview)',
|
25
|
+
enabled: true,
|
26
|
+
id: 'llama-3.2-90b-vision-preview',
|
27
|
+
maxOutput: 8192,
|
28
|
+
pricing: {
|
29
|
+
input: 0.59,
|
30
|
+
output: 0.79,
|
31
|
+
},
|
32
|
+
tokens: 8192,
|
33
|
+
vision: true,
|
34
|
+
},
|
7
35
|
{
|
8
36
|
description:
|
9
37
|
'Llama 3.1 8B 是一款高效能模型,提供了快速的文本生成能力,非常适合需要大规模效率和成本效益的应用场景。',
|
10
|
-
displayName: 'Llama 3.1 8B
|
38
|
+
displayName: 'Llama 3.1 8B',
|
11
39
|
enabled: true,
|
12
40
|
functionCall: true,
|
13
41
|
id: 'llama-3.1-8b-instant',
|
@@ -21,7 +49,7 @@ const Groq: ModelProviderCard = {
|
|
21
49
|
{
|
22
50
|
description:
|
23
51
|
'Llama 3.1 70B 提供更强大的AI推理能力,适合复杂应用,支持超多的计算处理并保证高效和准确率。',
|
24
|
-
displayName: 'Llama 3.1 70B
|
52
|
+
displayName: 'Llama 3.1 70B',
|
25
53
|
enabled: true,
|
26
54
|
functionCall: true,
|
27
55
|
id: 'llama-3.1-70b-versatile',
|
@@ -35,7 +63,7 @@ const Groq: ModelProviderCard = {
|
|
35
63
|
/*
|
36
64
|
// Offline due to overwhelming demand! Stay tuned for updates.
|
37
65
|
{
|
38
|
-
displayName: 'Llama 3.1 405B
|
66
|
+
displayName: 'Llama 3.1 405B',
|
39
67
|
functionCall: true,
|
40
68
|
id: 'llama-3.1-405b-reasoning',
|
41
69
|
tokens: 8_192,
|
@@ -44,7 +72,6 @@ const Groq: ModelProviderCard = {
|
|
44
72
|
{
|
45
73
|
description: 'Llama 3 Groq 8B Tool Use 是针对高效工具使用优化的模型,支持快速并行计算。',
|
46
74
|
displayName: 'Llama 3 Groq 8B Tool Use (Preview)',
|
47
|
-
enabled: true,
|
48
75
|
functionCall: true,
|
49
76
|
id: 'llama3-groq-8b-8192-tool-use-preview',
|
50
77
|
pricing: {
|
@@ -56,7 +83,6 @@ const Groq: ModelProviderCard = {
|
|
56
83
|
{
|
57
84
|
description: 'Llama 3 Groq 70B Tool Use 提供强大的工具调用能力,支持复杂任务的高效处理。',
|
58
85
|
displayName: 'Llama 3 Groq 70B Tool Use (Preview)',
|
59
|
-
enabled: true,
|
60
86
|
functionCall: true,
|
61
87
|
id: 'llama3-groq-70b-8192-tool-use-preview',
|
62
88
|
pricing: {
|
@@ -124,13 +150,12 @@ const Groq: ModelProviderCard = {
|
|
124
150
|
{
|
125
151
|
description: 'LLaVA 1.5 7B 提供视觉处理能力融合,通过视觉信息输入生成复杂输出。',
|
126
152
|
displayName: 'LLaVA 1.5 7B',
|
127
|
-
enabled: true,
|
128
153
|
id: 'llava-v1.5-7b-4096-preview',
|
129
154
|
tokens: 4096,
|
130
155
|
vision: true,
|
131
156
|
},
|
132
157
|
],
|
133
|
-
checkModel: '
|
158
|
+
checkModel: 'llama-3.1-8b-instant',
|
134
159
|
description:
|
135
160
|
'Groq 的 LPU 推理引擎在最新的独立大语言模型(LLM)基准测试中表现卓越,以其惊人的速度和效率重新定义了 AI 解决方案的标准。Groq 是一种即时推理速度的代表,在基于云的部署中展现了良好的性能。',
|
136
161
|
id: 'groq',
|