@lobehub/chat 1.6.10 → 1.6.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.6.12](https://github.com/lobehub/lobe-chat/compare/v1.6.11...v1.6.12)
6
+
7
+ <sup>Released on **2024-07-25**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Add new models to groq which are llama 3.1.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Add new models to groq which are llama 3.1, closes [#3301](https://github.com/lobehub/lobe-chat/issues/3301) ([ec20fd0](https://github.com/lobehub/lobe-chat/commit/ec20fd0))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.6.11](https://github.com/lobehub/lobe-chat/compare/v1.6.10...v1.6.11)
31
+
32
+ <sup>Released on **2024-07-24**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Fix `UNAUTHORIZED` issue with clerk auth provider.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Fix `UNAUTHORIZED` issue with clerk auth provider, closes [#3299](https://github.com/lobehub/lobe-chat/issues/3299) ([97bea09](https://github.com/lobehub/lobe-chat/commit/97bea09))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.6.10](https://github.com/lobehub/lobe-chat/compare/v1.6.9...v1.6.10)
6
56
 
7
57
  <sup>Released on **2024-07-23**</sup>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.6.10",
3
+ "version": "1.6.12",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -55,7 +55,7 @@
55
55
  "release": "semantic-release",
56
56
  "self-hosting:docker": "docker build -t lobe-chat:local .",
57
57
  "self-hosting:docker-cn": "docker build -t lobe-chat:local --build-arg USE_NPM_CN_MIRROR=true .",
58
- "start": "next start",
58
+ "start": "next start -p 3210",
59
59
  "stylelint": "stylelint \"src/**/*.{js,jsx,ts,tsx}\" --fix",
60
60
  "test": "npm run test-app && npm run test-server",
61
61
  "test-app": "vitest run --config vitest.config.ts",
@@ -108,7 +108,7 @@
108
108
  "@azure/openai": "1.0.0-beta.12",
109
109
  "@cfworker/json-schema": "^1.12.8",
110
110
  "@clerk/localizations": "2.0.0",
111
- "@clerk/nextjs": "^5.2.2",
111
+ "@clerk/nextjs": "^5.2.6",
112
112
  "@clerk/themes": "^2.1.10",
113
113
  "@google/generative-ai": "^0.14.1",
114
114
  "@icons-pack/react-simple-icons": "^9.6.0",
@@ -3,6 +3,27 @@ import { ModelProviderCard } from '@/types/llm';
3
3
  // ref https://console.groq.com/docs/models
4
4
  const Groq: ModelProviderCard = {
5
5
  chatModels: [
6
+ {
7
+ displayName: 'LLaMA3.1 405B (Preview)',
8
+ enabled: false,
9
+ functionCall: true,
10
+ id: 'llama-3.1-405b-reasoning',
11
+ tokens: 16_000,
12
+ },
13
+ {
14
+ displayName: 'LLaMA3.1 70B (Preview)',
15
+ enabled: true,
16
+ functionCall: true,
17
+ id: 'llama-3.1-70b-versatile',
18
+ tokens: 8000,
19
+ },
20
+ {
21
+ displayName: 'LLaMA3.1 8B (Preview)',
22
+ enabled: true,
23
+ functionCall: true,
24
+ id: 'llama-3.1-8b-instant',
25
+ tokens: 8000,
26
+ },
6
27
  {
7
28
  displayName: 'LLaMA3 70B',
8
29
  enabled: true,