@lobehub/chat 1.32.7 → 1.32.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/Dockerfile +1 -1
  3. package/Dockerfile.database +1 -1
  4. package/docs/self-hosting/platform/alibaba-cloud.mdx +4 -1
  5. package/docs/self-hosting/platform/btpanel.mdx +5 -1
  6. package/docs/self-hosting/platform/btpanel.zh-CN.mdx +3 -1
  7. package/docs/self-hosting/server-database.mdx +1 -1
  8. package/locales/ar/models.json +2 -0
  9. package/locales/ar/setting.json +1 -0
  10. package/locales/bg-BG/models.json +2 -0
  11. package/locales/bg-BG/setting.json +1 -0
  12. package/locales/de-DE/models.json +2 -0
  13. package/locales/de-DE/setting.json +1 -0
  14. package/locales/en-US/models.json +2 -0
  15. package/locales/en-US/setting.json +1 -0
  16. package/locales/es-ES/models.json +2 -0
  17. package/locales/es-ES/setting.json +1 -0
  18. package/locales/fa-IR/models.json +2 -0
  19. package/locales/fa-IR/setting.json +1 -0
  20. package/locales/fr-FR/models.json +2 -0
  21. package/locales/fr-FR/setting.json +1 -0
  22. package/locales/it-IT/models.json +2 -0
  23. package/locales/it-IT/setting.json +1 -0
  24. package/locales/ja-JP/models.json +2 -0
  25. package/locales/ja-JP/setting.json +1 -0
  26. package/locales/ko-KR/models.json +2 -0
  27. package/locales/ko-KR/setting.json +1 -0
  28. package/locales/nl-NL/models.json +2 -0
  29. package/locales/nl-NL/setting.json +1 -0
  30. package/locales/pl-PL/models.json +2 -0
  31. package/locales/pl-PL/setting.json +1 -0
  32. package/locales/pt-BR/models.json +2 -0
  33. package/locales/pt-BR/setting.json +1 -0
  34. package/locales/ru-RU/models.json +2 -0
  35. package/locales/ru-RU/setting.json +1 -0
  36. package/locales/tr-TR/models.json +2 -0
  37. package/locales/tr-TR/setting.json +1 -0
  38. package/locales/vi-VN/models.json +2 -0
  39. package/locales/vi-VN/setting.json +1 -0
  40. package/locales/zh-CN/models.json +4 -2
  41. package/locales/zh-CN/setting.json +1 -0
  42. package/locales/zh-TW/models.json +2 -0
  43. package/locales/zh-TW/setting.json +1 -0
  44. package/package.json +1 -1
  45. package/src/app/(backend)/_deprecated/createBizOpenAI/createAzureOpenai.ts +2 -1
  46. package/src/app/(backend)/_deprecated/createBizOpenAI/createOpenai.ts +2 -1
  47. package/src/app/(main)/settings/llm/components/ProviderModelList/ModelFetcher.tsx +23 -6
  48. package/src/app/layout.tsx +4 -2
  49. package/src/config/llm.ts +0 -18
  50. package/src/config/modelProviders/moonshot.ts +3 -0
  51. package/src/locales/default/setting.ts +1 -0
  52. package/src/server/globalConfig/genServerLLMConfig.test.ts +1 -1
  53. package/src/server/globalConfig/index.ts +1 -2
  54. package/src/server/modules/AgentRuntime/index.ts +31 -33
  55. package/src/store/user/slices/modelList/action.ts +16 -1
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.32.9](https://github.com/lobehub/lobe-chat/compare/v1.32.8...v1.32.9)
6
+
7
+ <sup>Released on **2024-11-24**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Support to reset fetched models.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Support to reset fetched models, closes [#4260](https://github.com/lobehub/lobe-chat/issues/4260) ([22480a9](https://github.com/lobehub/lobe-chat/commit/22480a9))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.32.8](https://github.com/lobehub/lobe-chat/compare/v1.32.7...v1.32.8)
31
+
32
+ <sup>Released on **2024-11-24**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Fix `XAI_PROXY_URL` env missing.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Fix `XAI_PROXY_URL` env missing, closes [#4747](https://github.com/lobehub/lobe-chat/issues/4747) ([7c9e88e](https://github.com/lobehub/lobe-chat/commit/7c9e88e))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.32.7](https://github.com/lobehub/lobe-chat/compare/v1.32.6...v1.32.7)
6
56
 
7
57
  <sup>Released on **2024-11-24**</sup>
package/Dockerfile CHANGED
@@ -205,7 +205,7 @@ ENV \
205
205
  # Wenxin
206
206
  WENXIN_ACCESS_KEY="" WENXIN_SECRET_KEY="" WENXIN_MODEL_LIST="" \
207
207
  # xAI
208
- XAI_API_KEY="" XAI_MODEL_LIST="" \
208
+ XAI_API_KEY="" XAI_MODEL_LIST="" XAI_PROXY_URL="" \
209
209
  # 01.AI
210
210
  ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
211
211
  # Zhipu
@@ -240,7 +240,7 @@ ENV \
240
240
  # Wenxin
241
241
  WENXIN_ACCESS_KEY="" WENXIN_SECRET_KEY="" WENXIN_MODEL_LIST="" \
242
242
  # xAI
243
- XAI_API_KEY="" XAI_MODEL_LIST="" \
243
+ XAI_API_KEY="" XAI_MODEL_LIST="" XAI_PROXY_URL="" \
244
244
  # 01.AI
245
245
  ZEROONE_API_KEY="" ZEROONE_MODEL_LIST="" \
246
246
  # Zhipu
@@ -1,6 +1,9 @@
1
1
  ---
2
2
  title: Deploy LobeChat on Alibaba Cloud
3
- description: Learn how to deploy the LobeChat application on Alibaba Cloud, including preparing the large model API Key, clicking the deploy button, and other operations.
3
+ description: >-
4
+ Learn how to deploy the LobeChat application on Alibaba Cloud, including
5
+ preparing the large model API Key, clicking the deploy button, and other
6
+ operations.
4
7
  tags:
5
8
  - Alibaba Cloud
6
9
  - LobeChat
@@ -1,6 +1,10 @@
1
1
  ---
2
2
  title: Deploy LobeChat using aaPanel
3
- description: Learn how to deploy the LobeChat service using aaPanel-Docker, including installing the Docker container environment and using the command to start the service with one click. Detailed instructions on how to configure environment variables and use proxy addresses.
3
+ description: >-
4
+ Learn how to deploy the LobeChat service using aaPanel-Docker, including
5
+ installing the Docker container environment and using the command to start the
6
+ service with one click. Detailed instructions on how to configure environment
7
+ variables and use proxy addresses.
4
8
  tags:
5
9
  - Docker
6
10
  - LobeChat
@@ -1,6 +1,8 @@
1
1
  ---
2
2
  title: 通过 宝塔面板Docker应用商店 部署 LobeChat
3
- description: 学习如何使用 宝塔面板Docker应用 部署 LobeChat 服务,包括安装 Docker 容器环境和使用指令一键启动服务。详细说明如何配置环境变量和使用代理地址。
3
+ description: >-
4
+ 学习如何使用 宝塔面板Docker应用 部署 LobeChat 服务,包括安装 Docker
5
+ 容器环境和使用指令一键启动服务。详细说明如何配置环境变量和使用代理地址。
4
6
  tags:
5
7
  - Docker
6
8
  - LobeChat
@@ -141,4 +141,4 @@ For detailed configuration guidelines on S3, please refer to [S3 Object Storage]
141
141
 
142
142
  The above is a detailed explanation of configuring LobeChat with a server-side database. You can configure it according to your actual situation and then choose a deployment platform that suits you to start deployment:
143
143
 
144
- <PlatformCards urlPrefix={'server-database'} />
144
+ <PlatformCards urlPrefix={'server-database'} />
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO هو دمج متعدد النماذج مرن للغاية، يهدف إلى تقديم تجربة إبداعية ممتازة."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) هو نموذج تعليمات عالي الدقة، مناسب للحسابات المعقدة."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 هو نموذج لغوي كبير مفتوح (LLM) موجه للمطورين والباحثين والشركات، يهدف إلى مساعدتهم في بناء وتجربة وتوسيع أفكارهم في الذكاء الاصطناعي بشكل مسؤول. كجزء من نظام الابتكار المجتمعي العالمي، فهو مثالي للأجهزة ذات القدرة الحاسوبية والموارد المحدودة، والأجهزة الطرفية، وأوقات التدريب الأسرع."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B هو أحدث نموذج خفيف الوزن وسريع من Microsoft AI، ويقترب أداؤه من 10 أضعاف النماذج الرائدة المفتوحة المصدر الحالية."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "استخدام طريقة طلب العميل"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "مسح النموذج المستخرج",
101
102
  "fetch": "احصل على قائمة النماذج",
102
103
  "fetching": "جاري الحصول على قائمة النماذج...",
103
104
  "latestTime": "آخر تحديث: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO е високо гъвкава многомоделна комбинация, предназначена да предостави изключителен креативен опит."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) е модел с висока точност за инструкции, подходящ за сложни изчисления."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 е отворен голям езиков модел (LLM), насочен към разработчици, изследователи и предприятия, предназначен да им помогне да изградят, експериментират и отговорно разширят своите идеи за генеративен ИИ. Като част от основната система на глобалната общност за иновации, той е особено подходящ за устройства с ограничени изчислителни ресурси и по-бързо време за обучение."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B е най-новият бърз и лек модел на Microsoft AI, с производителност, близка до 10 пъти на съществуващите водещи отворени модели."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Използване на режим на заявка от клиента"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Изчисти получената модел",
101
102
  "fetch": "Изтегляне на списъка с модели",
102
103
  "fetching": "Изтегляне на списъка с модели...",
103
104
  "latestTime": "Последно актуализирано: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO ist eine hochflexible Multi-Modell-Kombination, die darauf abzielt, außergewöhnliche kreative Erlebnisse zu bieten."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) ist ein hochpräzises Anweisungsmodell, das für komplexe Berechnungen geeignet ist."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 ist ein offenes großes Sprachmodell (LLM), das sich an Entwickler, Forscher und Unternehmen richtet und ihnen hilft, ihre Ideen für generative KI zu entwickeln, zu experimentieren und verantwortungsbewusst zu skalieren. Als Teil eines globalen Innovationssystems ist es besonders geeignet für Umgebungen mit begrenzter Rechenleistung und Ressourcen, für Edge-Geräte und schnellere Trainingszeiten."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B ist das neueste schnelle und leichte Modell von Microsoft AI, dessen Leistung fast zehnmal so hoch ist wie die bestehender führender Open-Source-Modelle."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Client Fetch-Modus verwenden"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Abgerufenes Modell löschen",
101
102
  "fetch": "Modelle abrufen",
102
103
  "fetching": "Modelle werden abgerufen...",
103
104
  "latestTime": "Letzte Aktualisierung: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO is a highly flexible multi-model fusion designed to provide an exceptional creative experience."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) is a high-precision instruction model suitable for complex computations."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 is an open large language model (LLM) aimed at developers, researchers, and enterprises, designed to help them build, experiment, and responsibly scale their generative AI ideas. As part of a foundational system for global community innovation, it is particularly suitable for those with limited computational power and resources, edge devices, and faster training times."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B is Microsoft's latest lightweight AI model, performing nearly ten times better than existing leading open-source models."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Use Client-Side Fetching Mode"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Clear fetched model",
101
102
  "fetch": "Get Model List",
102
103
  "fetching": "Fetching Model List...",
103
104
  "latestTime": "Last Updated: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO es una fusión de múltiples modelos altamente flexible, diseñada para ofrecer una experiencia creativa excepcional."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) es un modelo de instrucciones de alta precisión, adecuado para cálculos complejos."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 es un modelo de lenguaje de gran tamaño (LLM) abierto dirigido a desarrolladores, investigadores y empresas, diseñado para ayudarles a construir, experimentar y escalar de manera responsable sus ideas de IA generativa. Como parte de un sistema base para la innovación de la comunidad global, es ideal para dispositivos de borde con recursos y capacidades computacionales limitadas, así como para tiempos de entrenamiento más rápidos."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B es el último modelo ligero y rápido de Microsoft AI, con un rendimiento cercano a 10 veces el de los modelos líderes de código abierto existentes."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Usar el modo de solicitud en el cliente"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Eliminar el modelo obtenido",
101
102
  "fetch": "Obtener lista de modelos",
102
103
  "fetching": "Obteniendo lista de modelos...",
103
104
  "latestTime": "Última actualización: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO یک ترکیب چندمدلی بسیار انعطاف‌پذیر است که برای ارائه تجربه‌ای خلاقانه و برجسته طراحی شده است."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) یک مدل دستورالعمل با دقت بالا است که برای محاسبات پیچیده مناسب است."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 یک مدل زبان بزرگ باز (LLM) است که برای توسعه‌دهندگان، پژوهشگران و شرکت‌ها طراحی شده است تا به آن‌ها در ساخت، آزمایش و گسترش مسئولانه ایده‌های هوش مصنوعی مولد کمک کند. به عنوان بخشی از سیستم پایه نوآوری جامعه جهانی، این مدل برای دستگاه‌های با توان محاسباتی و منابع محدود، دستگاه‌های لبه و زمان‌های آموزش سریع‌تر بسیار مناسب است."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B مدل جدید و سبک وزن AI مایکروسافت است که عملکرد آن نزدیک به 10 برابر مدل‌های پیشرو متن‌باز موجود است."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "استفاده از حالت درخواست از سمت کلاینت"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "پاک کردن مدل‌های دریافت شده",
101
102
  "fetch": "دریافت لیست مدل‌ها",
102
103
  "fetching": "در حال دریافت لیست مدل‌ها...",
103
104
  "latestTime": "آخرین زمان به‌روزرسانی: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO est une fusion de modèles hautement flexible, visant à offrir une expérience créative exceptionnelle."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) est un modèle d'instructions de haute précision, adapté aux calculs complexes."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 est un modèle de langage ouvert (LLM) destiné aux développeurs, chercheurs et entreprises, conçu pour les aider à construire, expérimenter et étendre de manière responsable leurs idées d'IA générative. En tant que partie intégrante d'un système de base pour l'innovation de la communauté mondiale, il est particulièrement adapté aux appareils à capacité de calcul et de ressources limitées, ainsi qu'à des temps d'entraînement plus rapides."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B est le dernier modèle léger et rapide de Microsoft AI, offrant des performances proches de dix fois celles des modèles leaders open source existants."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Utiliser le mode de requête client"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Effacer le modèle récupéré",
101
102
  "fetch": "Obtenir la liste des modèles",
102
103
  "fetching": "Récupération de la liste des modèles en cours...",
103
104
  "latestTime": "Dernière mise à jour : {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO è un modello altamente flessibile, progettato per offrire un'esperienza creativa eccezionale."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) è un modello di istruzioni ad alta precisione, adatto per calcoli complessi."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 è un modello di linguaggio di grandi dimensioni (LLM) open source progettato per sviluppatori, ricercatori e aziende, per aiutarli a costruire, sperimentare e scalare responsabilmente le loro idee di AI generativa. Come parte di un sistema di base per l'innovazione della comunità globale, è particolarmente adatto per dispositivi a bassa potenza e risorse limitate, oltre a garantire tempi di addestramento più rapidi."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B è il modello leggero e veloce più recente di Microsoft AI, con prestazioni vicine a quelle dei modelli leader open source esistenti."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Utilizzo del modo di richiesta del client"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Cancella il modello ottenuto",
101
102
  "fetch": "Ottenere l'elenco dei modelli",
102
103
  "fetching": "Recupero dell'elenco dei modelli in corso...",
103
104
  "latestTime": "Ultimo aggiornamento: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPOは非常に柔軟なマルチモデル統合で、卓越した創造的体験を提供することを目的としています。"
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)は、高精度の指示モデルであり、複雑な計算に適しています。"
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3は、開発者、研究者、企業向けのオープンな大規模言語モデル(LLM)であり、生成AIのアイデアを構築、実験、責任を持って拡張するのを支援することを目的としています。世界的なコミュニティの革新の基盤システムの一部として、計算能力とリソースが限られたエッジデバイスや、より迅速なトレーニング時間に非常に適しています。"
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7BはMicrosoft AIの最新の高速軽量モデルで、既存のオープンソースリーダーモデルの10倍に近い性能を持っています。"
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "クライアントサイドリクエストモードの使用"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "取得したモデルをクリア",
101
102
  "fetch": "モデルリストを取得する",
102
103
  "fetching": "モデルリストを取得中...",
103
104
  "latestTime": "最終更新時間:{{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO는 뛰어난 창의적 경험을 제공하기 위해 설계된 고도로 유연한 다중 모델 통합입니다."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B)는 고정밀 지시 모델로, 복잡한 계산에 적합합니다."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3은 개발자, 연구자 및 기업을 위한 오픈 대형 언어 모델(LLM)로, 생성 AI 아이디어를 구축하고 실험하며 책임감 있게 확장하는 데 도움을 주기 위해 설계되었습니다. 전 세계 커뮤니티 혁신의 기초 시스템의 일환으로, 계산 능력과 자원이 제한된 환경, 엣지 장치 및 더 빠른 훈련 시간에 매우 적합합니다."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B는 Microsoft AI의 최신 경량 모델로, 기존 오픈 소스 선도 모델의 성능에 근접합니다."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "클라이언트 요청 모드 사용"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "가져온 모델 지우기",
101
102
  "fetch": "모델 목록 가져오기",
102
103
  "fetching": "모델 목록을 가져오는 중...",
103
104
  "latestTime": "마지막 업데이트 시간: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO is een zeer flexibele multi-model combinatie, ontworpen om een uitstekende creatieve ervaring te bieden."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) is een hoogprecisie instructiemodel, geschikt voor complexe berekeningen."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 is een open groot taalmodel (LLM) gericht op ontwikkelaars, onderzoekers en bedrijven, ontworpen om hen te helpen bij het bouwen, experimenteren en verantwoordelijk opschalen van hun generatieve AI-ideeën. Als onderdeel van het basis systeem voor wereldwijde gemeenschapsinnovatie is het zeer geschikt voor apparaten met beperkte rekenkracht en middelen, edge-apparaten en snellere trainingstijden."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B is het nieuwste snelle en lichte model van Microsoft AI, met prestaties die bijna 10 keer beter zijn dan de huidige toonaangevende open-source modellen."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Gebruik de ophaalmodus aan de clientzijde"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Verwijder opgehaalde model",
101
102
  "fetch": "Haal model lijst op",
102
103
  "fetching": "Model lijst wordt opgehaald...",
103
104
  "latestTime": "Laatst bijgewerkt: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO to wysoce elastyczna fuzja wielu modeli, mająca na celu zapewnienie doskonałego doświadczenia twórczego."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) to model poleceń o wysokiej precyzji, idealny do złożonych obliczeń."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 to otwarty duży model językowy (LLM) skierowany do deweloperów, badaczy i przedsiębiorstw, mający na celu pomoc w budowaniu, eksperymentowaniu i odpowiedzialnym rozwijaniu ich pomysłów na generatywną sztuczną inteligencję. Jako część podstawowego systemu innowacji globalnej społeczności, jest idealny dla urządzeń o ograniczonej mocy obliczeniowej i zasobach, a także dla szybszego czasu szkolenia."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B to najnowszy szybki i lekki model AI od Microsoftu, osiągający wydajność bliską 10-krotności istniejących wiodących modeli open source."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "使用客户端请求模式"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Wyczyść pobrany model",
101
102
  "fetch": "Pobierz listę modeli",
102
103
  "fetching": "Trwa pobieranie listy modeli...",
103
104
  "latestTime": "Ostatnia aktualizacja: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO é uma fusão de múltiplos modelos altamente flexível, projetada para oferecer uma experiência criativa excepcional."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) é um modelo de instrução de alta precisão, adequado para cálculos complexos."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 é um modelo de linguagem de grande escala (LLM) aberto voltado para desenvolvedores, pesquisadores e empresas, projetado para ajudá-los a construir, experimentar e expandir suas ideias de IA geradora de forma responsável. Como parte de um sistema de base para inovação da comunidade global, é ideal para dispositivos de borda com capacidade de computação e recursos limitados, além de tempos de treinamento mais rápidos."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B é o modelo leve e rápido mais recente da Microsoft AI, com desempenho próximo a 10 vezes o de modelos de código aberto existentes."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Usar o modo de solicitação do cliente"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Limpar o modelo obtido",
101
102
  "fetch": "Obter lista de modelos",
102
103
  "fetching": "Obtendo lista de modelos...",
103
104
  "latestTime": "Última atualização: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO — это высокоадаптивная многомодельная комбинация, предназначенная для предоставления выдающегося творческого опыта."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) — это высокоточная модель команд, подходящая для сложных вычислений."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 — это открытая большая языковая модель (LLM), ориентированная на разработчиков, исследователей и предприятия, предназначенная для помощи в создании, экспериментировании и ответственном масштабировании их идей по генеративному ИИ. В качестве части базовой системы для инноваций глобального сообщества она идеально подходит для устройств с ограниченными вычислительными мощностями и ресурсами, а также для более быстрого времени обучения."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B — это новая быстрая и легкая модель от Microsoft AI, производительность которой близка к 10-кратной производительности существующих открытых моделей."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Использовать режим запроса с клиента"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Очистить полученную модель",
101
102
  "fetch": "Получить список моделей",
102
103
  "fetching": "Идет получение списка моделей...",
103
104
  "latestTime": "Последнее обновление: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO, olağanüstü yaratıcı deneyimler sunmak için tasarlanmış son derece esnek bir çoklu model birleşimidir."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B), karmaşık hesaplamalar için yüksek hassasiyetli bir talimat modelidir."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3, geliştiriciler, araştırmacılar ve işletmeler için açık bir büyük dil modelidir (LLM) ve onların üretken AI fikirlerini inşa etmelerine, denemelerine ve sorumlu bir şekilde genişletmelerine yardımcı olmak için tasarlanmıştır. Küresel topluluk yeniliğinin temel sistemlerinden biri olarak, sınırlı hesaplama gücü ve kaynaklara sahip, kenar cihazları ve daha hızlı eğitim süreleri için son derece uygundur."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B, Microsoft AI'nın en son hızlı ve hafif modelidir ve mevcut açık kaynak lider modellerin performansına yakın bir performans sunmaktadır."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "İstemci Tarafından Veri Alımı"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Alınan modeli temizle",
101
102
  "fetch": "Modelleri Al",
102
103
  "fetching": "Modelleri alınıyor...",
103
104
  "latestTime": "Son güncelleme zamanı: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO là một mô hình kết hợp đa dạng, nhằm cung cấp trải nghiệm sáng tạo xuất sắc."
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) là mô hình chỉ dẫn chính xác cao, phù hợp cho tính toán phức tạp."
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 là một mô hình ngôn ngữ lớn (LLM) mở dành cho các nhà phát triển, nhà nghiên cứu và doanh nghiệp, nhằm giúp họ xây dựng, thử nghiệm và mở rộng ý tưởng AI sinh một cách có trách nhiệm. Là một phần của hệ thống cơ sở hạ tầng đổi mới toàn cầu, nó rất phù hợp cho các thiết bị biên và thời gian huấn luyện nhanh hơn với khả năng tính toán và tài nguyên hạn chế."
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B là mô hình nhẹ và nhanh mới nhất của Microsoft AI, hiệu suất gần gấp 10 lần so với các mô hình mở nguồn hiện có."
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "Sử dụng chế độ yêu cầu từ khách hàng"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "Xóa mô hình đã lấy",
101
102
  "fetch": "Lấy danh sách mô hình",
102
103
  "fetching": "Đang lấy danh sách mô hình...",
103
104
  "latestTime": "Thời gian cập nhật lần cuối: {{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度灵活的多模型合并,旨在提供卓越的创造性体验。"
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,适用于复杂计算。"
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 是一款面向开发者、研究人员和企业的开放大型语言模型 (LLM),旨在帮助他们构建、实验并负责任地扩展他们的生成 AI 想法。作为全球社区创新的基础系统的一部分,它非常适合计算能力和资源有限、边缘设备和更快的训练时间。"
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B 是微软AI最新的快速轻量化模型,性能接近于现有开源领导模型的10倍。"
851
853
  },
@@ -883,10 +885,10 @@
883
885
  "description": "Mistral (7B) Instruct 以高性能著称,适用于多种语言任务。"
884
886
  },
885
887
  "mistralai/Mistral-7B-Instruct-v0.2": {
886
- "description": "Mistral AI的指令调优模型"
888
+ "description": "Mistral (7B) Instruct v0.2 提供改进的指令处理能力和更精确的结果。"
887
889
  },
888
890
  "mistralai/Mistral-7B-Instruct-v0.3": {
889
- "description": "Mistral (7B) Instruct v0.3 提供高效的计算能力和自然语言理解,适合广泛的应用。"
891
+ "description": "Mistral AI的指令调优模型"
890
892
  },
891
893
  "mistralai/Mistral-7B-v0.1": {
892
894
  "description": "Mistral 7B是一款紧凑但高性能的模型,擅长批量处理和简单任务,如分类和文本生成,具有良好的推理能力。"
@@ -98,6 +98,7 @@
98
98
  "title": "使用客户端请求模式"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "清除获取的模型",
101
102
  "fetch": "获取模型列表",
102
103
  "fetching": "正在获取模型列表...",
103
104
  "latestTime": "上次更新时间:{{time}}",
@@ -94,6 +94,7 @@
94
94
  "Nous-Hermes-2-Mixtral-8x7B-DPO": {
95
95
  "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度靈活的多模型合併,旨在提供卓越的創造性體驗。"
96
96
  },
97
+ "NousResearch/Hermes-3-Llama-3.1-8B": {},
97
98
  "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": {
98
99
  "description": "Nous Hermes 2 - Mixtral 8x7B-DPO (46.7B) 是高精度的指令模型,適用於複雜計算。"
99
100
  },
@@ -846,6 +847,7 @@
846
847
  "meta.llama3-8b-instruct-v1:0": {
847
848
  "description": "Meta Llama 3 是一款面向開發者、研究人員和企業的開放大型語言模型 (LLM),旨在幫助他們構建、實驗並負責任地擴展他們的生成 AI 想法。作為全球社區創新的基礎系統的一部分,它非常適合計算能力和資源有限、邊緣設備和更快的訓練時間。"
848
849
  },
850
+ "microsoft/Phi-3.5-mini-instruct": {},
849
851
  "microsoft/wizardlm 2-7b": {
850
852
  "description": "WizardLM 2 7B 是微軟AI最新的快速輕量化模型,性能接近於現有開源領導模型的10倍。"
851
853
  },
@@ -98,6 +98,7 @@
98
98
  "title": "使用客戶端請求模式"
99
99
  },
100
100
  "fetcher": {
101
+ "clear": "清除獲取的模型",
101
102
  "fetch": "獲取模型列表",
102
103
  "fetching": "正在獲取模型列表...",
103
104
  "latestTime": "上次更新時間:{{time}}",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.32.7",
3
+ "version": "1.32.9",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -11,7 +11,8 @@ export const createAzureOpenai = (params: {
11
11
  model: string;
12
12
  userApiKey?: string | null;
13
13
  }) => {
14
- const { OPENAI_PROXY_URL = '', AZURE_API_VERSION, AZURE_API_KEY } = getLLMConfig();
14
+ const { AZURE_API_VERSION, AZURE_API_KEY } = getLLMConfig();
15
+ const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL || '';
15
16
 
16
17
  const endpoint = !params.endpoint ? OPENAI_PROXY_URL : params.endpoint;
17
18
  const baseURL = urlJoin(endpoint, `/openai/deployments/${params.model.replace('.', '')}`); // refs: https://test-001.openai.azure.com/openai/deployments/gpt-35-turbo
@@ -5,7 +5,8 @@ import { ChatErrorType } from '@/types/fetch';
5
5
 
6
6
  // create OpenAI instance
7
7
  export const createOpenai = (userApiKey: string | null, endpoint?: string | null) => {
8
- const { OPENAI_API_KEY, OPENAI_PROXY_URL } = getLLMConfig();
8
+ const { OPENAI_API_KEY } = getLLMConfig();
9
+ const OPENAI_PROXY_URL = process.env.OPENAI_PROXY_URL;
9
10
 
10
11
  const baseURL = endpoint ? endpoint : OPENAI_PROXY_URL ? OPENAI_PROXY_URL : undefined;
11
12
 
@@ -1,8 +1,9 @@
1
- import { Icon, Tooltip } from '@lobehub/ui';
1
+ import { ActionIcon, Icon, Tooltip } from '@lobehub/ui';
2
2
  import { Typography } from 'antd';
3
3
  import { createStyles } from 'antd-style';
4
4
  import dayjs from 'dayjs';
5
- import { LucideLoaderCircle, LucideRefreshCcwDot } from 'lucide-react';
5
+ import isEqual from 'fast-deep-equal';
6
+ import { CircleX, LucideLoaderCircle, LucideRefreshCcwDot } from 'lucide-react';
6
7
  import { memo } from 'react';
7
8
  import { useTranslation } from 'react-i18next';
8
9
  import { Flexbox } from 'react-layout-kit';
@@ -40,8 +41,9 @@ interface ModelFetcherProps {
40
41
  const ModelFetcher = memo<ModelFetcherProps>(({ provider }) => {
41
42
  const { styles } = useStyles();
42
43
  const { t } = useTranslation('setting');
43
- const [useFetchProviderModelList] = useUserStore((s) => [
44
+ const [useFetchProviderModelList, clearObtainedModels] = useUserStore((s) => [
44
45
  s.useFetchProviderModelList,
46
+ s.clearObtainedModels,
45
47
  s.setModelProviderConfig,
46
48
  ]);
47
49
  const enabledAutoFetch = useUserStore(modelConfigSelectors.isAutoFetchModelsEnabled(provider));
@@ -52,19 +54,34 @@ const ModelFetcher = memo<ModelFetcherProps>(({ provider }) => {
52
54
  (s) => modelProviderSelectors.getModelCardsById(provider)(s).length,
53
55
  );
54
56
 
57
+ const remoteModels = useUserStore(
58
+ modelProviderSelectors.remoteProviderModelCards(provider),
59
+ isEqual,
60
+ );
61
+
55
62
  const { mutate, isValidating } = useFetchProviderModelList(provider, enabledAutoFetch);
56
63
 
57
64
  return (
58
65
  <Typography.Text style={{ fontSize: 12 }} type={'secondary'}>
59
66
  <Flexbox align={'center'} gap={0} horizontal justify={'space-between'}>
60
- <div>{t('llm.modelList.total', { count: totalModels })}</div>
67
+ <div style={{ display: 'flex', lineHeight: '24px' }}>
68
+ {t('llm.modelList.total', { count: totalModels })}
69
+ {remoteModels && remoteModels.length > 0 && (
70
+ <ActionIcon
71
+ icon={CircleX}
72
+ onClick={() => clearObtainedModels(provider)}
73
+ size={'small'}
74
+ title={t('llm.fetcher.clear')}
75
+ />
76
+ )}
77
+ </div>
61
78
  <Tooltip
62
79
  overlayStyle={{ pointerEvents: 'none' }}
63
80
  title={
64
81
  latestFetchTime
65
82
  ? t('llm.fetcher.latestTime', {
66
- time: dayjs(latestFetchTime).format('YYYY-MM-DD HH:mm:ss'),
67
- })
83
+ time: dayjs(latestFetchTime).format('YYYY-MM-DD HH:mm:ss'),
84
+ })
68
85
  : t('llm.fetcher.noLatestTime')
69
86
  }
70
87
  >
@@ -22,11 +22,13 @@ const RootLayout = async ({ children, modal }: RootLayoutProps) => {
22
22
  const cookieStore = await cookies();
23
23
 
24
24
  const lang = cookieStore.get(LOBE_LOCALE_COOKIE);
25
- const direction = isRtlLang(lang?.value || DEFAULT_LANG) ? 'rtl' : 'ltr';
25
+ const locale = lang?.value || DEFAULT_LANG;
26
+
27
+ const direction = isRtlLang(locale) ? 'rtl' : 'ltr';
26
28
  const mobile = isMobileDevice();
27
29
 
28
30
  return (
29
- <html dir={direction} lang={lang?.value || DEFAULT_LANG} suppressHydrationWarning>
31
+ <html dir={direction} lang={locale} suppressHydrationWarning>
30
32
  <body>
31
33
  <GlobalProvider>
32
34
  <AuthProvider>
package/src/config/llm.ts CHANGED
@@ -9,7 +9,6 @@ export const getLLMConfig = () => {
9
9
 
10
10
  ENABLED_OPENAI: z.boolean(),
11
11
  OPENAI_API_KEY: z.string().optional(),
12
- OPENAI_PROXY_URL: z.string().optional(),
13
12
 
14
13
  ENABLED_AZURE_OPENAI: z.boolean(),
15
14
  AZURE_API_KEY: z.string().optional(),
@@ -24,19 +23,15 @@ export const getLLMConfig = () => {
24
23
 
25
24
  ENABLED_GOOGLE: z.boolean(),
26
25
  GOOGLE_API_KEY: z.string().optional(),
27
- GOOGLE_PROXY_URL: z.string().optional(),
28
26
 
29
27
  ENABLED_MOONSHOT: z.boolean(),
30
28
  MOONSHOT_API_KEY: z.string().optional(),
31
- MOONSHOT_PROXY_URL: z.string().optional(),
32
29
 
33
30
  ENABLED_PERPLEXITY: z.boolean(),
34
31
  PERPLEXITY_API_KEY: z.string().optional(),
35
- PERPLEXITY_PROXY_URL: z.string().optional(),
36
32
 
37
33
  ENABLED_ANTHROPIC: z.boolean(),
38
34
  ANTHROPIC_API_KEY: z.string().optional(),
39
- ANTHROPIC_PROXY_URL: z.string().optional(),
40
35
 
41
36
  ENABLED_MINIMAX: z.boolean(),
42
37
  MINIMAX_API_KEY: z.string().optional(),
@@ -46,7 +41,6 @@ export const getLLMConfig = () => {
46
41
 
47
42
  ENABLED_GROQ: z.boolean(),
48
43
  GROQ_API_KEY: z.string().optional(),
49
- GROQ_PROXY_URL: z.string().optional(),
50
44
 
51
45
  ENABLED_GITHUB: z.boolean(),
52
46
  GITHUB_TOKEN: z.string().optional(),
@@ -74,7 +68,6 @@ export const getLLMConfig = () => {
74
68
  WENXIN_SECRET_KEY: z.string().optional(),
75
69
 
76
70
  ENABLED_OLLAMA: z.boolean(),
77
- OLLAMA_PROXY_URL: z.string().optional(),
78
71
 
79
72
  ENABLED_QWEN: z.boolean(),
80
73
  QWEN_API_KEY: z.string().optional(),
@@ -100,7 +93,6 @@ export const getLLMConfig = () => {
100
93
 
101
94
  ENABLED_SILICONCLOUD: z.boolean(),
102
95
  SILICONCLOUD_API_KEY: z.string().optional(),
103
- SILICONCLOUD_PROXY_URL: z.string().optional(),
104
96
 
105
97
  ENABLED_UPSTAGE: z.boolean(),
106
98
  UPSTAGE_API_KEY: z.string().optional(),
@@ -116,7 +108,6 @@ export const getLLMConfig = () => {
116
108
 
117
109
  ENABLED_HUGGINGFACE: z.boolean(),
118
110
  HUGGINGFACE_API_KEY: z.string().optional(),
119
- HUGGINGFACE_PROXY_URL: z.string().optional(),
120
111
 
121
112
  ENABLED_SENSENOVA: z.boolean(),
122
113
  SENSENOVA_ACCESS_KEY_ID: z.string().optional(),
@@ -133,7 +124,6 @@ export const getLLMConfig = () => {
133
124
 
134
125
  ENABLED_OPENAI: process.env.ENABLED_OPENAI !== '0',
135
126
  OPENAI_API_KEY: process.env.OPENAI_API_KEY,
136
- OPENAI_PROXY_URL: process.env.OPENAI_PROXY_URL,
137
127
 
138
128
  ENABLED_AZURE_OPENAI: !!process.env.AZURE_API_KEY,
139
129
  AZURE_API_KEY: process.env.AZURE_API_KEY,
@@ -148,15 +138,12 @@ export const getLLMConfig = () => {
148
138
 
149
139
  ENABLED_GOOGLE: !!process.env.GOOGLE_API_KEY,
150
140
  GOOGLE_API_KEY: process.env.GOOGLE_API_KEY,
151
- GOOGLE_PROXY_URL: process.env.GOOGLE_PROXY_URL,
152
141
 
153
142
  ENABLED_PERPLEXITY: !!process.env.PERPLEXITY_API_KEY,
154
143
  PERPLEXITY_API_KEY: process.env.PERPLEXITY_API_KEY,
155
- PERPLEXITY_PROXY_URL: process.env.PERPLEXITY_PROXY_URL,
156
144
 
157
145
  ENABLED_ANTHROPIC: !!process.env.ANTHROPIC_API_KEY,
158
146
  ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
159
- ANTHROPIC_PROXY_URL: process.env.ANTHROPIC_PROXY_URL,
160
147
 
161
148
  ENABLED_MINIMAX: !!process.env.MINIMAX_API_KEY,
162
149
  MINIMAX_API_KEY: process.env.MINIMAX_API_KEY,
@@ -175,11 +162,9 @@ export const getLLMConfig = () => {
175
162
 
176
163
  ENABLED_MOONSHOT: !!process.env.MOONSHOT_API_KEY,
177
164
  MOONSHOT_API_KEY: process.env.MOONSHOT_API_KEY,
178
- MOONSHOT_PROXY_URL: process.env.MOONSHOT_PROXY_URL,
179
165
 
180
166
  ENABLED_GROQ: !!process.env.GROQ_API_KEY,
181
167
  GROQ_API_KEY: process.env.GROQ_API_KEY,
182
- GROQ_PROXY_URL: process.env.GROQ_PROXY_URL,
183
168
 
184
169
  ENABLED_GITHUB: !!process.env.GITHUB_TOKEN,
185
170
  GITHUB_TOKEN: process.env.GITHUB_TOKEN,
@@ -198,7 +183,6 @@ export const getLLMConfig = () => {
198
183
  WENXIN_SECRET_KEY: process.env.WENXIN_SECRET_KEY,
199
184
 
200
185
  ENABLED_OLLAMA: process.env.ENABLED_OLLAMA !== '0',
201
- OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '',
202
186
 
203
187
  ENABLED_QWEN: !!process.env.QWEN_API_KEY,
204
188
  QWEN_API_KEY: process.env.QWEN_API_KEY,
@@ -225,7 +209,6 @@ export const getLLMConfig = () => {
225
209
 
226
210
  ENABLED_SILICONCLOUD: !!process.env.SILICONCLOUD_API_KEY,
227
211
  SILICONCLOUD_API_KEY: process.env.SILICONCLOUD_API_KEY,
228
- SILICONCLOUD_PROXY_URL: process.env.SILICONCLOUD_PROXY_URL,
229
212
 
230
213
  ENABLED_UPSTAGE: !!process.env.UPSTAGE_API_KEY,
231
214
  UPSTAGE_API_KEY: process.env.UPSTAGE_API_KEY,
@@ -241,7 +224,6 @@ export const getLLMConfig = () => {
241
224
 
242
225
  ENABLED_HUGGINGFACE: !!process.env.HUGGINGFACE_API_KEY,
243
226
  HUGGINGFACE_API_KEY: process.env.HUGGINGFACE_API_KEY,
244
- HUGGINGFACE_PROXY_URL: process.env.HUGGINGFACE_PROXY_URL,
245
227
 
246
228
  ENABLED_SENSENOVA: !!process.env.SENSENOVA_ACCESS_KEY_ID && !!process.env.SENSENOVA_ACCESS_KEY_SECRET,
247
229
  SENSENOVA_ACCESS_KEY_ID: process.env.SENSENOVA_ACCESS_KEY_ID,
@@ -37,6 +37,9 @@ const Moonshot: ModelProviderCard = {
37
37
  id: 'moonshot',
38
38
  modelsUrl: 'https://platform.moonshot.cn/docs/intro',
39
39
  name: 'Moonshot',
40
+ proxyUrl: {
41
+ placeholder: 'https://api.moonshot.cn/v1',
42
+ },
40
43
  smoothing: {
41
44
  speed: 2,
42
45
  text: true,
@@ -100,6 +100,7 @@ export default {
100
100
  title: '使用客户端请求模式',
101
101
  },
102
102
  fetcher: {
103
+ clear: '清除获取的模型',
103
104
  fetch: '获取模型列表',
104
105
  fetching: '正在获取模型列表...',
105
106
  latestTime: '上次更新时间:{{time}}',
@@ -67,7 +67,7 @@ describe('genServerLLMConfig', () => {
67
67
  modelListKey: 'AWS_BEDROCK_MODEL_LIST',
68
68
  },
69
69
  ollama: {
70
- fetchOnClient: !getLLMConfig().OLLAMA_PROXY_URL,
70
+ fetchOnClient: !process.env.OLLAMA_PROXY_URL,
71
71
  },
72
72
  };
73
73
  const config = genServerLLMConfig(specificConfig);
@@ -2,7 +2,6 @@ import { appEnv, getAppConfig } from '@/config/app';
2
2
  import { authEnv } from '@/config/auth';
3
3
  import { fileEnv } from '@/config/file';
4
4
  import { langfuseEnv } from '@/config/langfuse';
5
- import { getLLMConfig } from '@/config/llm';
6
5
  import { enableNextAuth } from '@/const/auth';
7
6
  import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
8
7
  import { GlobalServerConfig } from '@/types/serverConfig';
@@ -31,7 +30,7 @@ export const getServerGlobalConfig = () => {
31
30
  modelListKey: 'AWS_BEDROCK_MODEL_LIST',
32
31
  },
33
32
  ollama: {
34
- fetchOnClient: !getLLMConfig().OLLAMA_PROXY_URL,
33
+ fetchOnClient: !process.env.OLLAMA_PROXY_URL,
35
34
  },
36
35
  }),
37
36
  oAuthSSOProviders: authEnv.NEXT_AUTH_SSO_PROVIDERS.trim().split(/[,,]/),
@@ -30,14 +30,13 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
30
30
  switch (provider) {
31
31
  default: // Use Openai options as default
32
32
  case ModelProvider.OpenAI: {
33
- const { OPENAI_API_KEY, OPENAI_PROXY_URL } = getLLMConfig();
33
+ const { OPENAI_API_KEY } = getLLMConfig();
34
+
34
35
  const openaiApiKey = payload?.apiKey || OPENAI_API_KEY;
35
- const baseURL = payload?.endpoint || OPENAI_PROXY_URL;
36
+ const baseURL = payload?.endpoint || process.env.OPENAI_PROXY_URL;
36
37
  const apiKey = apiKeyManager.pick(openaiApiKey);
37
- return {
38
- apiKey,
39
- baseURL,
40
- };
38
+
39
+ return { apiKey, baseURL };
41
40
  }
42
41
  case ModelProvider.Azure: {
43
42
  const { AZURE_API_KEY, AZURE_API_VERSION, AZURE_ENDPOINT } = getLLMConfig();
@@ -52,27 +51,26 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
52
51
  }
53
52
  case ModelProvider.ZhiPu: {
54
53
  const { ZHIPU_API_KEY } = getLLMConfig();
54
+
55
55
  const apiKey = apiKeyManager.pick(payload?.apiKey || ZHIPU_API_KEY);
56
- return {
57
- apiKey,
58
- };
56
+
57
+ return { apiKey };
59
58
  }
60
59
  case ModelProvider.Google: {
61
- const { GOOGLE_API_KEY, GOOGLE_PROXY_URL } = getLLMConfig();
60
+ const { GOOGLE_API_KEY } = getLLMConfig();
61
+
62
62
  const apiKey = apiKeyManager.pick(payload?.apiKey || GOOGLE_API_KEY);
63
- const baseURL = payload?.endpoint || GOOGLE_PROXY_URL;
64
- return {
65
- apiKey,
66
- baseURL,
67
- };
63
+ const baseURL = payload?.endpoint || process.env.GOOGLE_PROXY_URL;
64
+
65
+ return { apiKey, baseURL };
68
66
  }
69
67
  case ModelProvider.Moonshot: {
70
- const { MOONSHOT_API_KEY, MOONSHOT_PROXY_URL } = getLLMConfig();
68
+ const { MOONSHOT_API_KEY } = getLLMConfig();
69
+
71
70
  const apiKey = apiKeyManager.pick(payload?.apiKey || MOONSHOT_API_KEY);
72
- return {
73
- apiKey,
74
- baseURL: MOONSHOT_PROXY_URL,
75
- };
71
+ const baseURL = payload?.endpoint || process.env.MOONSHOT_PROXY_URL;
72
+
73
+ return { apiKey, baseURL };
76
74
  }
77
75
  case ModelProvider.Bedrock: {
78
76
  const { AWS_SECRET_ACCESS_KEY, AWS_ACCESS_KEY_ID, AWS_REGION, AWS_SESSION_TOKEN } =
@@ -91,23 +89,22 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
91
89
  return { accessKeyId, accessKeySecret, region, sessionToken };
92
90
  }
93
91
  case ModelProvider.Ollama: {
94
- const { OLLAMA_PROXY_URL } = getLLMConfig();
95
- const baseURL = payload?.endpoint || OLLAMA_PROXY_URL;
92
+ const baseURL = payload?.endpoint || process.env.OLLAMA_PROXY_URL;
96
93
  return { baseURL };
97
94
  }
98
95
  case ModelProvider.Perplexity: {
99
- const { PERPLEXITY_API_KEY, PERPLEXITY_PROXY_URL } = getLLMConfig();
96
+ const { PERPLEXITY_API_KEY } = getLLMConfig();
100
97
 
101
98
  const apiKey = apiKeyManager.pick(payload?.apiKey || PERPLEXITY_API_KEY);
102
- const baseURL = payload?.endpoint || PERPLEXITY_PROXY_URL;
99
+ const baseURL = payload?.endpoint || process.env.PERPLEXITY_PROXY_URL;
103
100
 
104
101
  return { apiKey, baseURL };
105
102
  }
106
103
  case ModelProvider.Anthropic: {
107
- const { ANTHROPIC_API_KEY, ANTHROPIC_PROXY_URL } = getLLMConfig();
104
+ const { ANTHROPIC_API_KEY } = getLLMConfig();
108
105
 
109
106
  const apiKey = apiKeyManager.pick(payload?.apiKey || ANTHROPIC_API_KEY);
110
- const baseURL = payload?.endpoint || ANTHROPIC_PROXY_URL;
107
+ const baseURL = payload?.endpoint || process.env.ANTHROPIC_PROXY_URL;
111
108
 
112
109
  return { apiKey, baseURL };
113
110
  }
@@ -126,10 +123,10 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
126
123
  return { apiKey };
127
124
  }
128
125
  case ModelProvider.Groq: {
129
- const { GROQ_API_KEY, GROQ_PROXY_URL } = getLLMConfig();
126
+ const { GROQ_API_KEY } = getLLMConfig();
130
127
 
131
128
  const apiKey = apiKeyManager.pick(payload?.apiKey || GROQ_API_KEY);
132
- const baseURL = payload?.endpoint || GROQ_PROXY_URL;
129
+ const baseURL = payload?.endpoint || process.env.GROQ_PROXY_URL;
133
130
 
134
131
  return { apiKey, baseURL };
135
132
  }
@@ -229,19 +226,19 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
229
226
  return { apiKey };
230
227
  }
231
228
  case ModelProvider.SiliconCloud: {
232
- const { SILICONCLOUD_API_KEY, SILICONCLOUD_PROXY_URL } = getLLMConfig();
229
+ const { SILICONCLOUD_API_KEY } = getLLMConfig();
233
230
 
234
231
  const apiKey = apiKeyManager.pick(payload?.apiKey || SILICONCLOUD_API_KEY);
235
- const baseURL = payload?.endpoint || SILICONCLOUD_PROXY_URL;
232
+ const baseURL = payload?.endpoint || process.env.SILICONCLOUD_PROXY_URL;
236
233
 
237
234
  return { apiKey, baseURL };
238
235
  }
239
236
 
240
237
  case ModelProvider.HuggingFace: {
241
- const { HUGGINGFACE_PROXY_URL, HUGGINGFACE_API_KEY } = getLLMConfig();
238
+ const { HUGGINGFACE_API_KEY } = getLLMConfig();
242
239
 
243
240
  const apiKey = apiKeyManager.pick(payload?.apiKey || HUGGINGFACE_API_KEY);
244
- const baseURL = payload?.endpoint || HUGGINGFACE_PROXY_URL;
241
+ const baseURL = payload?.endpoint || process.env.HUGGINGFACE_PROXY_URL;
245
242
 
246
243
  return { apiKey, baseURL };
247
244
  }
@@ -292,8 +289,9 @@ const getLlmOptionsFromPayload = (provider: string, payload: JWTPayload) => {
292
289
  const { XAI_API_KEY } = getLLMConfig();
293
290
 
294
291
  const apiKey = apiKeyManager.pick(payload?.apiKey || XAI_API_KEY);
292
+ const baseURL = payload?.endpoint || process.env.XAI_PROXY_URL;
295
293
 
296
- return { apiKey };
294
+ return { apiKey, baseURL };
297
295
  }
298
296
  case ModelProvider.InternLM: {
299
297
  const { INTERNLM_API_KEY } = getLLMConfig();
@@ -20,6 +20,7 @@ import { modelProviderSelectors } from './selectors/modelProvider';
20
20
  * 设置操作
21
21
  */
22
22
  export interface ModelListAction {
23
+ clearObtainedModels: (provider: GlobalLLMProviderKey) => Promise<void>;
23
24
  dispatchCustomModelCards: (
24
25
  provider: GlobalLLMProviderKey,
25
26
  payload: CustomModelCardDispatch,
@@ -61,6 +62,13 @@ export const createModelListSlice: StateCreator<
61
62
  [],
62
63
  ModelListAction
63
64
  > = (set, get) => ({
65
+ clearObtainedModels: async (provider: GlobalLLMProviderKey) => {
66
+ await get().setModelProviderConfig(provider, {
67
+ remoteModelCards: [],
68
+ });
69
+
70
+ get().refreshDefaultModelProviderList();
71
+ },
64
72
  dispatchCustomModelCards: async (provider, payload) => {
65
73
  const prevState = settingsSelectors.providerConfig(provider)(get());
66
74
 
@@ -86,7 +94,14 @@ export const createModelListSlice: StateCreator<
86
94
  ? modelProviderSelectors.remoteProviderModelCards(providerKey)(get())
87
95
  : undefined;
88
96
 
89
- return serverChatModels ?? remoteChatModels ?? providerCard.chatModels;
97
+ if (serverChatModels && serverChatModels.length > 0) {
98
+ return serverChatModels;
99
+ }
100
+ if (remoteChatModels && remoteChatModels.length > 0) {
101
+ return remoteChatModels;
102
+ }
103
+
104
+ return providerCard.chatModels;
90
105
  };
91
106
 
92
107
  const defaultModelProviderList = produce(DEFAULT_MODEL_PROVIDER_LIST, (draft) => {