@lobehub/chat 1.6.11 → 1.6.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/package.json +2 -2
- package/src/config/modelProviders/groq.ts +21 -0
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,31 @@
|
|
|
2
2
|
|
|
3
3
|
# Changelog
|
|
4
4
|
|
|
5
|
+
### [Version 1.6.12](https://github.com/lobehub/lobe-chat/compare/v1.6.11...v1.6.12)
|
|
6
|
+
|
|
7
|
+
<sup>Released on **2024-07-25**</sup>
|
|
8
|
+
|
|
9
|
+
#### 💄 Styles
|
|
10
|
+
|
|
11
|
+
- **misc**: Add new models to groq which are llama 3.1.
|
|
12
|
+
|
|
13
|
+
<br/>
|
|
14
|
+
|
|
15
|
+
<details>
|
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
|
17
|
+
|
|
18
|
+
#### Styles
|
|
19
|
+
|
|
20
|
+
- **misc**: Add new models to groq which are llama 3.1, closes [#3301](https://github.com/lobehub/lobe-chat/issues/3301) ([ec20fd0](https://github.com/lobehub/lobe-chat/commit/ec20fd0))
|
|
21
|
+
|
|
22
|
+
</details>
|
|
23
|
+
|
|
24
|
+
<div align="right">
|
|
25
|
+
|
|
26
|
+
[](#readme-top)
|
|
27
|
+
|
|
28
|
+
</div>
|
|
29
|
+
|
|
5
30
|
### [Version 1.6.11](https://github.com/lobehub/lobe-chat/compare/v1.6.10...v1.6.11)
|
|
6
31
|
|
|
7
32
|
<sup>Released on **2024-07-24**</sup>
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@lobehub/chat",
|
|
3
|
-
"version": "1.6.
|
|
3
|
+
"version": "1.6.12",
|
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"framework",
|
|
@@ -55,7 +55,7 @@
|
|
|
55
55
|
"release": "semantic-release",
|
|
56
56
|
"self-hosting:docker": "docker build -t lobe-chat:local .",
|
|
57
57
|
"self-hosting:docker-cn": "docker build -t lobe-chat:local --build-arg USE_NPM_CN_MIRROR=true .",
|
|
58
|
-
"start": "next start",
|
|
58
|
+
"start": "next start -p 3210",
|
|
59
59
|
"stylelint": "stylelint \"src/**/*.{js,jsx,ts,tsx}\" --fix",
|
|
60
60
|
"test": "npm run test-app && npm run test-server",
|
|
61
61
|
"test-app": "vitest run --config vitest.config.ts",
|
|
@@ -3,6 +3,27 @@ import { ModelProviderCard } from '@/types/llm';
|
|
|
3
3
|
// ref https://console.groq.com/docs/models
|
|
4
4
|
const Groq: ModelProviderCard = {
|
|
5
5
|
chatModels: [
|
|
6
|
+
{
|
|
7
|
+
displayName: 'LLaMA3.1 405B (Preview)',
|
|
8
|
+
enabled: false,
|
|
9
|
+
functionCall: true,
|
|
10
|
+
id: 'llama-3.1-405b-reasoning',
|
|
11
|
+
tokens: 16_000,
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
displayName: 'LLaMA3.1 70B (Preview)',
|
|
15
|
+
enabled: true,
|
|
16
|
+
functionCall: true,
|
|
17
|
+
id: 'llama-3.1-70b-versatile',
|
|
18
|
+
tokens: 8000,
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
displayName: 'LLaMA3.1 8B (Preview)',
|
|
22
|
+
enabled: true,
|
|
23
|
+
functionCall: true,
|
|
24
|
+
id: 'llama-3.1-8b-instant',
|
|
25
|
+
tokens: 8000,
|
|
26
|
+
},
|
|
6
27
|
{
|
|
7
28
|
displayName: 'LLaMA3 70B',
|
|
8
29
|
enabled: true,
|