@lobehub/chat 1.49.6 → 1.49.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/locales/ar/components.json +2 -2
- package/locales/ar/providers.json +3 -0
- package/locales/bg-BG/components.json +2 -2
- package/locales/bg-BG/providers.json +3 -0
- package/locales/de-DE/components.json +2 -2
- package/locales/de-DE/providers.json +3 -0
- package/locales/en-US/components.json +2 -2
- package/locales/en-US/providers.json +3 -0
- package/locales/es-ES/components.json +2 -2
- package/locales/es-ES/providers.json +3 -0
- package/locales/fa-IR/components.json +2 -2
- package/locales/fa-IR/providers.json +3 -0
- package/locales/fr-FR/components.json +2 -2
- package/locales/fr-FR/providers.json +3 -0
- package/locales/it-IT/components.json +2 -2
- package/locales/it-IT/providers.json +3 -0
- package/locales/ja-JP/components.json +2 -2
- package/locales/ja-JP/providers.json +3 -0
- package/locales/ko-KR/components.json +2 -2
- package/locales/ko-KR/providers.json +3 -0
- package/locales/nl-NL/components.json +2 -2
- package/locales/nl-NL/providers.json +3 -0
- package/locales/pl-PL/components.json +2 -2
- package/locales/pl-PL/providers.json +3 -0
- package/locales/pt-BR/components.json +2 -2
- package/locales/pt-BR/providers.json +3 -0
- package/locales/ru-RU/components.json +2 -2
- package/locales/ru-RU/providers.json +3 -0
- package/locales/tr-TR/components.json +2 -2
- package/locales/tr-TR/providers.json +3 -0
- package/locales/vi-VN/components.json +2 -2
- package/locales/vi-VN/providers.json +3 -0
- package/locales/zh-CN/components.json +2 -2
- package/locales/zh-CN/providers.json +3 -0
- package/locales/zh-TW/components.json +2 -2
- package/locales/zh-TW/providers.json +3 -0
- package/package.json +3 -2
- package/src/components/Thinking/index.tsx +31 -17
- package/src/const/plugin.ts +2 -0
- package/src/features/Conversation/components/ChatItem/index.tsx +7 -3
- package/src/features/Conversation/components/ChatItem/utils.ts +15 -0
- package/src/features/Conversation/components/MarkdownElements/LobeArtifact/index.ts +5 -2
- package/src/features/Conversation/components/MarkdownElements/LobeThinking/index.ts +2 -1
- package/src/features/Conversation/components/MarkdownElements/Thinking/Render.tsx +27 -0
- package/src/features/Conversation/components/MarkdownElements/Thinking/index.ts +11 -0
- package/src/features/Conversation/components/MarkdownElements/Thinking/remarkPlugin.ts +57 -0
- package/src/features/Conversation/components/MarkdownElements/index.ts +3 -1
- package/src/features/Conversation/components/MarkdownElements/type.ts +8 -1
- package/src/libs/agent-runtime/deepseek/index.test.ts +135 -0
- package/src/libs/agent-runtime/deepseek/index.ts +28 -3
- package/src/locales/default/components.ts +2 -2
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.49.8](https://github.com/lobehub/lobe-chat/compare/v1.49.7...v1.49.8)
|
6
|
+
|
7
|
+
<sup>Released on **2025-02-01**</sup>
|
8
|
+
|
9
|
+
#### 💄 Styles
|
10
|
+
|
11
|
+
- **misc**: Support thinking for all non DeepSeek official api R1 models.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### Styles
|
19
|
+
|
20
|
+
- **misc**: Support thinking for all non DeepSeek official api R1 models, closes [#5654](https://github.com/lobehub/lobe-chat/issues/5654) ([9b32137](https://github.com/lobehub/lobe-chat/commit/9b32137))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.49.7](https://github.com/lobehub/lobe-chat/compare/v1.49.6...v1.49.7)
|
31
|
+
|
32
|
+
<sup>Released on **2025-02-01**</sup>
|
33
|
+
|
34
|
+
#### 🐛 Bug Fixes
|
35
|
+
|
36
|
+
- **misc**: Multiple deepseek-reasoner request errors.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### What's fixed
|
44
|
+
|
45
|
+
- **misc**: Multiple deepseek-reasoner request errors, closes [#5601](https://github.com/lobehub/lobe-chat/issues/5601) ([71cc32b](https://github.com/lobehub/lobe-chat/commit/71cc32b))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.49.6](https://github.com/lobehub/lobe-chat/compare/v1.49.5...v1.49.6)
|
6
56
|
|
7
57
|
<sup>Released on **2025-01-30**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"improvements": [
|
5
|
+
"Support thinking for all non DeepSeek official api R1 models."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-02-01",
|
9
|
+
"version": "1.49.8"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"fixes": [
|
14
|
+
"Multiple deepseek-reasoner request errors."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-02-01",
|
18
|
+
"version": "1.49.7"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"fixes": [
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "في حالة تفكير عميق...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "لقد فكرت بعمق (استغرق الأمر {{duration}} ثانية)",
|
92
|
+
"thoughtWithDuration": "لقد فكرت بعمق"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek هي شركة تركز على أبحاث وتطبيقات تقنيات الذكاء الاصطناعي، حيث يجمع نموذجها الأحدث DeepSeek-V2.5 بين قدرات الحوار العامة ومعالجة الشيفرات، وقد حقق تحسينات ملحوظة في محاذاة تفضيلات البشر، ومهام الكتابة، واتباع التعليمات."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "نموذج كبير تم تطويره داخليًا بواسطة بايت دانس. تم التحقق من صحته من خلال أكثر من 50 سيناريو عمل داخلي، مع استخدام يومي يتجاوز تريليون توكن، مما يتيح تقديم قدرات متعددة الأنماط، ويعمل على توفير تجربة عمل غنية للشركات من خلال نموذج عالي الجودة."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI هي شركة رائدة في تقديم خدمات نماذج اللغة المتقدمة، تركز على استدعاء الوظائف والمعالجة متعددة الوسائط. نموذجها الأحدث Firefunction V2 مبني على Llama-3، مُحسّن لاستدعاء الوظائف، والحوار، واتباع التعليمات. يدعم نموذج اللغة البصرية FireLLaVA-13B إدخال الصور والنصوص المختلطة. تشمل النماذج البارزة الأخرى سلسلة Llama وسلسلة Mixtral، مما يوفر دعمًا فعالًا لاتباع التعليمات وتوليدها بلغات متعددة."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "В процес на дълбочинно размисъл...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Дълбоко размислих (отне ми {{duration}} секунди)",
|
92
|
+
"thoughtWithDuration": "Дълбоко размислих"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek е компания, специализирана в изследвания и приложения на технологии за изкуствен интелект, чийто най-нов модел DeepSeek-V2.5 комбинира способности за общи диалози и обработка на код, постигайки значителни подобрения в съответствието с човешките предпочитания, писателските задачи и следването на инструкции."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Модел, разработен от ByteDance. Проверен в над 50 бизнес сценария в рамките на компанията, с ежедневна употреба на триллиони токени, който продължава да се усъвършенства, предоставяйки множество модални възможности и създавайки богато бизнес изживяване с висококачествени модели."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI е водещ доставчик на напреднали езикови модели, фокусирайки се върху извикване на функции и мултимодална обработка. Най-новият им модел Firefunction V2, базиран на Llama-3, е оптимизиран за извикване на функции, диалози и следване на инструкции. Визуалният езиков модел FireLLaVA-13B поддържа смесени входове от изображения и текст. Други забележителни модели включват серията Llama и серията Mixtral, предлагащи ефективна поддръжка за многоезично следване на инструкции и генериране."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Tiefes Nachdenken...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Tiefgründig nachgedacht (Dauer: {{duration}} Sekunden)",
|
92
|
+
"thoughtWithDuration": "Tiefgründig nachgedacht"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek ist ein Unternehmen, das sich auf die Forschung und Anwendung von KI-Technologien spezialisiert hat. Ihr neuestes Modell, DeepSeek-V2.5, kombiniert allgemeine Dialog- und Codeverarbeitungsfähigkeiten und hat signifikante Fortschritte in den Bereichen menschliche Präferenzanpassung, Schreibaufgaben und Befehlsbefolgung erzielt."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Ein von ByteDance entwickeltes großes Modell. Durch die praktische Validierung in über 50 internen Geschäftsszenarien und die kontinuierliche Verfeinerung mit täglich Billionen von Tokens bietet es vielfältige Modalitäten und schafft mit hochwertigen Modellergebnissen ein reichhaltiges Geschäftserlebnis für Unternehmen."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI ist ein führender Anbieter von fortschrittlichen Sprachmodellen, der sich auf Funktionsaufrufe und multimodale Verarbeitung spezialisiert hat. Ihr neuestes Modell, Firefunction V2, basiert auf Llama-3 und ist für Funktionsaufrufe, Dialoge und Befehlsbefolgung optimiert. Das visuelle Sprachmodell FireLLaVA-13B unterstützt gemischte Eingaben von Bildern und Text. Weitere bemerkenswerte Modelle sind die Llama-Serie und die Mixtral-Serie, die effiziente mehrsprachige Befehlsbefolgung und Generierungsunterstützung bieten."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Deep in thought...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Deeply thought (took {{duration}} seconds)",
|
92
|
+
"thoughtWithDuration": "Deeply thought"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek is a company focused on AI technology research and application, with its latest model DeepSeek-V2.5 integrating general dialogue and code processing capabilities, achieving significant improvements in human preference alignment, writing tasks, and instruction following."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "A self-developed large model launched by ByteDance. Verified through practical applications in over 50 internal business scenarios, it continuously refines its capabilities with a daily usage of trillions of tokens, providing various modal abilities to create a rich business experience for enterprises with high-quality model performance."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI is a leading provider of advanced language model services, focusing on functional calling and multimodal processing. Its latest model, Firefunction V2, is based on Llama-3, optimized for function calling, conversation, and instruction following. The visual language model FireLLaVA-13B supports mixed input of images and text. Other notable models include the Llama series and Mixtral series, providing efficient multilingual instruction following and generation support."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Pensando profundamente...",
|
91
|
-
"thought": "He pensado profundamente",
|
92
|
-
"thoughtWithDuration": "He pensado profundamente
|
91
|
+
"thought": "He pensado profundamente (durante {{duration}} segundos)",
|
92
|
+
"thoughtWithDuration": "He pensado profundamente"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek es una empresa centrada en la investigación y aplicación de tecnologías de inteligencia artificial, cuyo modelo más reciente, DeepSeek-V2.5, combina capacidades de diálogo general y procesamiento de código, logrando mejoras significativas en alineación con preferencias humanas, tareas de escritura y seguimiento de instrucciones."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Un modelo grande desarrollado internamente por ByteDance. Validado a través de más de 50 escenarios de negocio internos, con un uso diario de tokens en billones que se perfecciona continuamente, ofrece múltiples capacidades modales y crea experiencias comerciales ricas para las empresas con un rendimiento de modelo de alta calidad."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI es un proveedor líder de servicios de modelos de lenguaje avanzados, enfocado en la llamada de funciones y el procesamiento multimodal. Su modelo más reciente, Firefunction V2, basado en Llama-3, está optimizado para llamadas de funciones, diálogos y seguimiento de instrucciones. El modelo de lenguaje visual FireLLaVA-13B admite entradas mixtas de imágenes y texto. Otros modelos notables incluyen la serie Llama y la serie Mixtral, que ofrecen un soporte eficiente para el seguimiento y generación de instrucciones multilingües."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "در حال تفکر عمیق...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "بهطور عمیق فکر شده است (مدت زمان {{duration}} ثانیه)",
|
92
|
+
"thoughtWithDuration": "بهطور عمیق فکر شده است"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek یک شرکت متمرکز بر تحقیق و کاربرد فناوری هوش مصنوعی است. مدل جدید آن، DeepSeek-V2.5، تواناییهای مکالمه عمومی و پردازش کد را ترکیب کرده و در زمینههایی مانند همترازی با ترجیحات انسانی، وظایف نوشتاری و پیروی از دستورات بهبود قابل توجهی داشته است."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "مدل بزرگ خودساخته شده توسط بایتدANCE. با تأیید در بیش از 50 سناریوی تجاری داخلی بایتدANCE، با استفاده روزانه از تریلیونها توکن، به طور مداوم بهبود یافته و تواناییهای چندگانهای را ارائه میدهد تا تجربههای تجاری غنی را با کیفیت مدل بالا برای شرکتها ایجاد کند."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI یک ارائهدهنده پیشرو در خدمات مدلهای زبان پیشرفته است که بر فراخوانی توابع و پردازش چندوجهی تمرکز دارد. جدیدترین مدل آن، Firefunction V2، بر اساس Llama-3 ساخته شده و برای فراخوانی توابع، مکالمه و پیروی از دستورات بهینهسازی شده است. مدل زبان تصویری FireLLaVA-13B از ورودیهای ترکیبی تصویر و متن پشتیبانی میکند. سایر مدلهای قابل توجه شامل سری Llama و سری Mixtral هستند که پشتیبانی کارآمدی از پیروی دستورات چندزبانه و تولید ارائه میدهند."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "En pleine réflexion...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Pensée approfondie (durée : {{duration}} secondes)",
|
92
|
+
"thoughtWithDuration": "Pensée approfondie"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek est une entreprise spécialisée dans la recherche et l'application des technologies d'intelligence artificielle, dont le dernier modèle, DeepSeek-V2.5, combine des capacités de dialogue général et de traitement de code, réalisant des améliorations significatives dans l'alignement des préférences humaines, les tâches d'écriture et le suivi des instructions."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Un grand modèle développé en interne par ByteDance. Validé par la pratique dans plus de 50 scénarios d'affaires au sein de ByteDance, avec un volume d'utilisation quotidien de plusieurs trillions de tokens, il offre diverses capacités multimodales, créant ainsi une expérience commerciale riche grâce à des performances de modèle de haute qualité."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI est un fournisseur de services de modèles linguistiques avancés, axé sur les appels de fonction et le traitement multimodal. Son dernier modèle, Firefunction V2, basé sur Llama-3, est optimisé pour les appels de fonction, les dialogues et le suivi des instructions. Le modèle de langage visuel FireLLaVA-13B prend en charge les entrées mixtes d'images et de texte. D'autres modèles notables incluent la série Llama et la série Mixtral, offrant un support efficace pour le suivi et la génération d'instructions multilingues."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Pensando profondamente...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Ho riflettuto a lungo (tempo impiegato {{duration}} secondi)",
|
92
|
+
"thoughtWithDuration": "Ho riflettuto a lungo"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek è un'azienda focalizzata sulla ricerca e applicazione della tecnologia AI, il cui ultimo modello DeepSeek-V2.5 combina capacità di dialogo generico e elaborazione del codice, realizzando miglioramenti significativi nell'allineamento delle preferenze umane, nei compiti di scrittura e nel rispetto delle istruzioni."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Il grande modello sviluppato internamente da ByteDance. Validato attraverso oltre 50 scenari aziendali interni, con un utilizzo quotidiano di trilioni di token che affinano continuamente il modello, offre diverse capacità multimodali, creando esperienze aziendali ricche con risultati di alta qualità."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI è un fornitore leader di servizi di modelli linguistici avanzati, focalizzato su chiamate funzionali e elaborazione multimodale. Il suo ultimo modello Firefunction V2, basato su Llama-3, è ottimizzato per chiamate di funzione, dialogo e rispetto delle istruzioni. Il modello di linguaggio visivo FireLLaVA-13B supporta input misti di immagini e testo. Altri modelli notevoli includono la serie Llama e la serie Mixtral, offrendo supporto efficiente per il rispetto e la generazione di istruzioni multilingue."
|
28
31
|
},
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeekは、人工知能技術の研究と応用に特化した企業であり、最新のモデルDeepSeek-V2.5は、汎用対話とコード処理能力を融合させ、人間の好みの整合、ライティングタスク、指示の遵守などの面で顕著な向上を実現しています。"
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "バイトダンスが開発した独自の大規模モデルです。バイトダンス内部の50以上のビジネスシーンでの実践を通じて検証され、毎日数兆トークンの大規模な使用量で磨かれ、多様なモーダル能力を提供し、高品質なモデル効果で企業に豊かなビジネス体験を提供します。"
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AIは、先進的な言語モデルサービスのリーダーであり、機能呼び出しと多モーダル処理に特化しています。最新のモデルFirefunction V2はLlama-3に基づいており、関数呼び出し、対話、指示の遵守に最適化されています。視覚言語モデルFireLLaVA-13Bは、画像とテキストの混合入力をサポートしています。他の注目すべきモデルには、LlamaシリーズやMixtralシリーズがあり、高効率の多言語指示遵守と生成サポートを提供しています。"
|
28
31
|
},
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek는 인공지능 기술 연구 및 응용에 집중하는 회사로, 최신 모델인 DeepSeek-V2.5는 일반 대화 및 코드 처리 능력을 통합하고 인간의 선호 정렬, 작문 작업 및 지시 따르기 등에서 상당한 향상을 이루었습니다."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "바이트댄스가 개발한 자체 대형 모델입니다. 바이트댄스 내부의 50개 이상의 비즈니스 시나리오에서 검증되었으며, 매일 수조 개의 토큰 사용량을 지속적으로 다듬어 다양한 모드 기능을 제공하여 우수한 모델 효과로 기업에 풍부한 비즈니스 경험을 제공합니다."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI는 기능 호출 및 다중 모드 처리를 전문으로 하는 선도적인 고급 언어 모델 서비스 제공업체입니다. 최신 모델인 Firefunction V2는 Llama-3를 기반으로 하며, 함수 호출, 대화 및 지시 따르기에 최적화되어 있습니다. 비주얼 언어 모델인 FireLLaVA-13B는 이미지와 텍스트 혼합 입력을 지원합니다. 기타 주목할 만한 모델로는 Llama 시리즈와 Mixtral 시리즈가 있으며, 효율적인 다국어 지시 따르기 및 생성 지원을 제공합니다."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Diep in gedachten...",
|
91
|
-
"thought": "Diep nagedacht",
|
92
|
-
"thoughtWithDuration": "Diep nagedacht
|
91
|
+
"thought": "Diep nagedacht (tijd gebruikt {{duration}} seconden)",
|
92
|
+
"thoughtWithDuration": "Diep nagedacht"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek is een bedrijf dat zich richt op onderzoek en toepassing van kunstmatige intelligentietechnologie, en hun nieuwste model DeepSeek-V2.5 combineert algemene dialoog- en codeverwerkingscapaciteiten, met significante verbeteringen in het afstemmen op menselijke voorkeuren, schrijfopdrachten en het volgen van instructies."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Een door ByteDance ontwikkelde grote model. Bewezen in meer dan 50 interne zakelijke scenario's, met een dagelijks gebruik van triljoenen tokens, biedt het verschillende modaliteiten en creëert een rijke zakelijke ervaring voor bedrijven met hoogwaardige modelprestaties."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI is een toonaangevende aanbieder van geavanceerde taalmodellen, met een focus op functionele aanroepen en multimodale verwerking. Hun nieuwste model Firefunction V2 is gebaseerd op Llama-3 en geoptimaliseerd voor functieaanroepen, dialogen en het volgen van instructies. Het visuele taalmodel FireLLaVA-13B ondersteunt gemengde invoer van afbeeldingen en tekst. Andere opmerkelijke modellen zijn de Llama-serie en de Mixtral-serie, die efficiënte ondersteuning bieden voor meertalig volgen van instructies en genereren."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Głęboko myślę...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Głęboko przemyślane (czas: {{duration}} sekund)",
|
92
|
+
"thoughtWithDuration": "Głęboko przemyślane"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek to firma skoncentrowana na badaniach i zastosowaniach technologii sztucznej inteligencji, której najnowszy model DeepSeek-V2.5 łączy zdolności do prowadzenia ogólnych rozmów i przetwarzania kodu, osiągając znaczące postępy w zakresie dostosowywania do preferencji ludzkich, zadań pisarskich i przestrzegania instrukcji."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Model dużych rozmiarów opracowany przez ByteDance. Potwierdzony w ponad 50 scenariuszach biznesowych wewnątrz firmy, z codziennym użyciem bilionów tokenów, co pozwala na ciągłe doskonalenie. Oferuje różnorodne możliwości modalne, tworząc bogate doświadczenia biznesowe dla przedsiębiorstw dzięki wysokiej jakości modelom."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI to wiodący dostawca zaawansowanych modeli językowych, skoncentrowany na wywołaniach funkcji i przetwarzaniu multimodalnym. Jego najnowszy model Firefunction V2 oparty na Llama-3, zoptymalizowany do wywołań funkcji, dialogów i przestrzegania instrukcji. Model wizualny FireLLaVA-13B wspiera mieszane wejścia obrazów i tekstu. Inne znaczące modele to seria Llama i seria Mixtral, oferujące efektywne wsparcie dla wielojęzycznego przestrzegania instrukcji i generacji."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Pensando profundamente...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Já pensei profundamente (tempo gasto {{duration}} segundos)",
|
92
|
+
"thoughtWithDuration": "Já pensei profundamente"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "A DeepSeek é uma empresa focada em pesquisa e aplicação de tecnologia de inteligência artificial, cujo modelo mais recente, DeepSeek-V2.5, combina capacidades de diálogo geral e processamento de código, alcançando melhorias significativas em alinhamento com preferências humanas, tarefas de escrita e seguimento de instruções."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Um grande modelo desenvolvido internamente pela ByteDance. Validado através da prática em mais de 50 cenários de negócios dentro da ByteDance, com um uso diário de trilhões de tokens, continuamente aprimorado, oferece diversas capacidades multimodais, criando uma rica experiência de negócios para as empresas com resultados de modelo de alta qualidade."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI é um fornecedor líder de serviços de modelos de linguagem avançados, focando em chamadas de função e processamento multimodal. Seu modelo mais recente, Firefunction V2, baseado em Llama-3, é otimizado para chamadas de função, diálogos e seguimento de instruções. O modelo de linguagem visual FireLLaVA-13B suporta entradas mistas de imagem e texto. Outros modelos notáveis incluem a série Llama e a série Mixtral, oferecendo suporte eficiente para seguimento e geração de instruções multilíngues."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Глубокое размышление...",
|
91
|
-
"thought": "Глубоко
|
92
|
-
"thoughtWithDuration": "Глубоко
|
91
|
+
"thought": "Глубоко обдумано (время: {{duration}} секунд)",
|
92
|
+
"thoughtWithDuration": "Глубоко обдумано"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek — это компания, сосредоточенная на исследованиях и применении технологий искусственного интеллекта, ее последняя модель DeepSeek-V2.5 объединяет возможности общего диалога и обработки кода, достигнув значительных улучшений в области согласования с человеческими предпочтениями, написания текстов и выполнения инструкций."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Модель большого размера, разработанная ByteDance. Проверенная на более чем 50 внутренних бизнес-сценариях, с ежедневным использованием триллионов токенов, она продолжает совершенствоваться, предлагая множество модальных возможностей и создавая богатый бизнес-опыт для компаний с помощью качественных моделей."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI — это ведущий поставщик высококлассных языковых моделей, сосредоточенный на вызовах функций и мультимодальной обработке. Их последняя модель Firefunction V2 основана на Llama-3 и оптимизирована для вызовов функций, диалогов и выполнения инструкций. Модель визуального языка FireLLaVA-13B поддерживает смешанный ввод изображений и текста. Другие заметные модели включают серию Llama и серию Mixtral, предлагая эффективную поддержку многоязычных инструкций и генерации."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Derin düşünme aşamasında...",
|
91
|
-
"thought": "
|
92
|
-
"thoughtWithDuration": "
|
91
|
+
"thought": "Derinlemesine düşündüm (geçen süre {{duration}} saniye)",
|
92
|
+
"thoughtWithDuration": "Derinlemesine düşündüm"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek, yapay zeka teknolojisi araştırma ve uygulamalarına odaklanan bir şirkettir. En son modeli DeepSeek-V2.5, genel diyalog ve kod işleme yeteneklerini birleştirerek, insan tercihleriyle uyum, yazma görevleri ve talimat takibi gibi alanlarda önemli iyileştirmeler sağlamaktadır."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "ByteDance tarafından geliştirilen kendi büyük modeli. ByteDance içindeki 50'den fazla iş senaryosunda uygulama doğrulaması ile, günlük trilyon seviyesinde token kullanımı ile sürekli olarak geliştirilmekte, çeşitli modalite yetenekleri sunmakta ve kaliteli model performansı ile işletmelere zengin iş deneyimleri yaratmaktadır."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI, işlev çağrısı ve çok modlu işleme üzerine odaklanan önde gelen bir gelişmiş dil modeli hizmet sağlayıcısıdır. En son modeli Firefunction V2, Llama-3 tabanlıdır ve işlev çağrısı, diyalog ve talimat takibi için optimize edilmiştir. Görsel dil modeli FireLLaVA-13B, görüntü ve metin karışık girişi desteklemektedir. Diğer dikkat çekici modeller arasında Llama serisi ve Mixtral serisi bulunmaktadır ve etkili çok dilli talimat takibi ve üretim desteği sunmaktadır."
|
28
31
|
},
|
@@ -88,7 +88,7 @@
|
|
88
88
|
},
|
89
89
|
"Thinking": {
|
90
90
|
"thinking": "Đang suy nghĩ sâu sắc...",
|
91
|
-
"thought": "Đã suy nghĩ sâu sắc",
|
92
|
-
"thoughtWithDuration": "Đã suy nghĩ sâu sắc
|
91
|
+
"thought": "Đã suy nghĩ sâu sắc (mất {{duration}} giây)",
|
92
|
+
"thoughtWithDuration": "Đã suy nghĩ sâu sắc"
|
93
93
|
}
|
94
94
|
}
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek là một công ty tập trung vào nghiên cứu và ứng dụng công nghệ trí tuệ nhân tạo, mô hình mới nhất của họ, DeepSeek-V2.5, kết hợp khả năng đối thoại chung và xử lý mã, đồng thời đạt được sự cải thiện đáng kể trong việc căn chỉnh sở thích của con người, nhiệm vụ viết và tuân theo chỉ dẫn."
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "Mô hình lớn tự phát triển do ByteDance phát triển. Được xác thực qua hơn 50 tình huống kinh doanh nội bộ của ByteDance, với việc sử dụng hàng nghìn tỷ token mỗi ngày để liên tục cải tiến, cung cấp nhiều khả năng đa phương thức, tạo ra trải nghiệm kinh doanh phong phú cho doanh nghiệp với hiệu quả mô hình chất lượng cao."
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI là nhà cung cấp dịch vụ mô hình ngôn ngữ cao cấp hàng đầu, tập trung vào gọi chức năng và xử lý đa phương thức. Mô hình mới nhất của họ, Firefunction V2, dựa trên Llama-3, được tối ưu hóa cho gọi chức năng, đối thoại và tuân theo chỉ dẫn. Mô hình ngôn ngữ hình ảnh FireLLaVA-13B hỗ trợ đầu vào hỗn hợp hình ảnh và văn bản. Các mô hình đáng chú ý khác bao gồm dòng Llama và dòng Mixtral, cung cấp hỗ trợ cho việc tuân theo và tạo ra chỉ dẫn đa ngôn ngữ hiệu quả."
|
28
31
|
},
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek 是一家专注于人工智能技术研究和应用的公司,其最新模型 DeepSeek-V3 多项评测成绩超越 Qwen2.5-72B 和 Llama-3.1-405B 等开源模型,性能对齐领军闭源模型 GPT-4o 与 Claude-3.5-Sonnet。"
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "字节跳动推出的自研大模型。通过字节跳动内部50+业务场景实践验证,每日万亿级tokens大使用量持续打磨,提供多种模态能力,以优质模型效果为企业打造丰富的业务体验。"
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI 是一家领先的高级语言模型服务商,专注于功能调用和多模态处理。其最新模型 Firefunction V2 基于 Llama-3,优化用于函数调用、对话及指令跟随。视觉语言模型 FireLLaVA-13B 支持图像和文本混合输入。其他 notable 模型包括 Llama 系列和 Mixtral 系列,提供高效的多语言指令跟随与生成支持。"
|
28
31
|
},
|
@@ -23,6 +23,9 @@
|
|
23
23
|
"deepseek": {
|
24
24
|
"description": "DeepSeek 是一家專注於人工智慧技術研究和應用的公司,其最新模型 DeepSeek-V2.5 融合了通用對話和代碼處理能力,並在人類偏好對齊、寫作任務和指令跟隨等方面實現了顯著提升。"
|
25
25
|
},
|
26
|
+
"doubao": {
|
27
|
+
"description": "字節跳動推出的自研大模型。透過字節跳動內部50+業務場景實踐驗證,每日萬億級tokens大使用量持續打磨,提供多種模態能力,以優質模型效果為企業打造豐富的業務體驗。"
|
28
|
+
},
|
26
29
|
"fireworksai": {
|
27
30
|
"description": "Fireworks AI 是一家領先的高級語言模型服務商,專注於功能調用和多模態處理。其最新模型 Firefunction V2 基於 Llama-3,優化用於函數調用、對話及指令跟隨。視覺語言模型 FireLLaVA-13B 支持圖像和文本混合輸入。其他 notable 模型包括 Llama 系列和 Mixtral 系列,提供高效的多語言指令跟隨與生成支持。"
|
28
31
|
},
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@lobehub/chat",
|
3
|
-
"version": "1.49.
|
3
|
+
"version": "1.49.8",
|
4
4
|
"description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
|
5
5
|
"keywords": [
|
6
6
|
"framework",
|
@@ -129,7 +129,7 @@
|
|
129
129
|
"@lobehub/chat-plugins-gateway": "^1.9.0",
|
130
130
|
"@lobehub/icons": "^1.61.1",
|
131
131
|
"@lobehub/tts": "^1.28.0",
|
132
|
-
"@lobehub/ui": "^1.164.
|
132
|
+
"@lobehub/ui": "^1.164.10",
|
133
133
|
"@neondatabase/serverless": "^0.10.4",
|
134
134
|
"@next/third-parties": "^15.1.4",
|
135
135
|
"@react-spring/web": "^9.7.5",
|
@@ -174,6 +174,7 @@
|
|
174
174
|
"lodash-es": "^4.17.21",
|
175
175
|
"lucide-react": "^0.471.0",
|
176
176
|
"mammoth": "^1.9.0",
|
177
|
+
"mdast-util-to-markdown": "^2.1.2",
|
177
178
|
"modern-screenshot": "^4.5.5",
|
178
179
|
"nanoid": "^5.0.9",
|
179
180
|
"next": "^15.1.4",
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { Icon, Markdown } from '@lobehub/ui';
|
1
|
+
import { CopyButton, Icon, Markdown } from '@lobehub/ui';
|
2
2
|
import { createStyles } from 'antd-style';
|
3
3
|
import { AnimatePresence, motion } from 'framer-motion';
|
4
4
|
import { AtomIcon, ChevronDown, ChevronRight } from 'lucide-react';
|
@@ -9,8 +9,6 @@ import { Flexbox } from 'react-layout-kit';
|
|
9
9
|
|
10
10
|
const useStyles = createStyles(({ css, token, isDarkMode }) => ({
|
11
11
|
container: css`
|
12
|
-
cursor: pointer;
|
13
|
-
|
14
12
|
width: fit-content;
|
15
13
|
padding-block: 4px;
|
16
14
|
padding-inline: 8px;
|
@@ -66,8 +64,8 @@ interface ThinkingProps {
|
|
66
64
|
thinking?: boolean;
|
67
65
|
}
|
68
66
|
|
69
|
-
const Thinking = memo<ThinkingProps>(({ content
|
70
|
-
const { t } = useTranslation('components');
|
67
|
+
const Thinking = memo<ThinkingProps>(({ content, duration, thinking }) => {
|
68
|
+
const { t } = useTranslation(['components', 'common']);
|
71
69
|
const { styles, cx } = useStyles();
|
72
70
|
|
73
71
|
const [showDetail, setShowDetail] = useState(false);
|
@@ -83,14 +81,16 @@ const Thinking = memo<ThinkingProps>(({ content = '', duration, thinking }) => {
|
|
83
81
|
}, [thinking, content]);
|
84
82
|
|
85
83
|
return (
|
86
|
-
<Flexbox
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
84
|
+
<Flexbox className={cx(styles.container, showDetail && styles.expand)} gap={16}>
|
85
|
+
<Flexbox
|
86
|
+
distribution={'space-between'}
|
87
|
+
flex={1}
|
88
|
+
horizontal
|
89
|
+
onClick={() => {
|
90
|
+
setShowDetail(!showDetail);
|
91
|
+
}}
|
92
|
+
style={{ cursor: 'pointer' }}
|
93
|
+
>
|
94
94
|
{thinking ? (
|
95
95
|
<Flexbox gap={8} horizontal>
|
96
96
|
<Icon icon={AtomIcon} />
|
@@ -101,12 +101,23 @@ const Thinking = memo<ThinkingProps>(({ content = '', duration, thinking }) => {
|
|
101
101
|
) : (
|
102
102
|
<Flexbox gap={8} horizontal>
|
103
103
|
<Icon icon={AtomIcon} />
|
104
|
-
{
|
104
|
+
{!duration
|
105
105
|
? t('Thinking.thoughtWithDuration')
|
106
106
|
: t('Thinking.thought', { duration: ((duration || 0) / 1000).toFixed(1) })}
|
107
107
|
</Flexbox>
|
108
108
|
)}
|
109
|
-
<
|
109
|
+
<Flexbox gap={4} horizontal>
|
110
|
+
{showDetail && content && (
|
111
|
+
<div
|
112
|
+
onClick={(event) => {
|
113
|
+
event.stopPropagation();
|
114
|
+
}}
|
115
|
+
>
|
116
|
+
<CopyButton content={content} size={'small'} title={t('copy', { ns: 'common' })} />
|
117
|
+
</div>
|
118
|
+
)}
|
119
|
+
<Icon icon={showDetail ? ChevronDown : ChevronRight} />
|
120
|
+
</Flexbox>
|
110
121
|
</Flexbox>
|
111
122
|
|
112
123
|
<AnimatePresence initial={false}>
|
@@ -125,10 +136,13 @@ const Thinking = memo<ThinkingProps>(({ content = '', duration, thinking }) => {
|
|
125
136
|
open: { height: 'auto', opacity: 1, width: 'auto' },
|
126
137
|
}}
|
127
138
|
>
|
128
|
-
|
139
|
+
{typeof content === 'string' ? (
|
140
|
+
<Markdown variant={'chat'}>{content}</Markdown>
|
141
|
+
) : (
|
142
|
+
content
|
143
|
+
)}
|
129
144
|
</motion.div>
|
130
145
|
)}
|
131
|
-
{/*<Flexbox></Flexbox>*/}
|
132
146
|
</AnimatePresence>
|
133
147
|
</Flexbox>
|
134
148
|
);
|
package/src/const/plugin.ts
CHANGED
@@ -12,3 +12,5 @@ export const ARTIFACT_TAG_CLOSED_REGEX = /<lobeArtifact\b[^>]*>([\S\s]*?)<\/lobe
|
|
12
12
|
|
13
13
|
// https://regex101.com/r/AvPA2g/1
|
14
14
|
export const ARTIFACT_THINKING_TAG_REGEX = /<lobeThinking\b[^>]*>([\S\s]*?)(?:<\/lobeThinking>|$)/;
|
15
|
+
|
16
|
+
export const THINKING_TAG_REGEX = /<think\b[^>]*>([\S\s]*?)(?:<\/think>|$)/;
|
@@ -26,9 +26,10 @@ import {
|
|
26
26
|
import History from '../History';
|
27
27
|
import { markdownElements } from '../MarkdownElements';
|
28
28
|
import { InPortalThreadContext } from './InPortalThreadContext';
|
29
|
-
import { processWithArtifact } from './utils';
|
29
|
+
import { normalizeThinkTags, processWithArtifact } from './utils';
|
30
30
|
|
31
|
-
const rehypePlugins = markdownElements.map((element) => element.rehypePlugin);
|
31
|
+
const rehypePlugins = markdownElements.map((element) => element.rehypePlugin).filter(Boolean);
|
32
|
+
const remarkPlugins = markdownElements.map((element) => element.remarkPlugin).filter(Boolean);
|
32
33
|
|
33
34
|
const useStyles = createStyles(({ css, prefixCls }) => ({
|
34
35
|
loading: css`
|
@@ -148,7 +149,9 @@ const Item = memo<ChatListItemProps>(
|
|
148
149
|
|
149
150
|
// remove line breaks in artifact tag to make the ast transform easier
|
150
151
|
const message =
|
151
|
-
!editing && item?.role === 'assistant'
|
152
|
+
!editing && item?.role === 'assistant'
|
153
|
+
? normalizeThinkTags(processWithArtifact(item?.content))
|
154
|
+
: item?.content;
|
152
155
|
|
153
156
|
// ======================= Performance Optimization ======================= //
|
154
157
|
// these useMemo/useCallback are all for the performance optimization
|
@@ -172,6 +175,7 @@ const Item = memo<ChatListItemProps>(
|
|
172
175
|
components,
|
173
176
|
customRender: markdownCustomRender,
|
174
177
|
rehypePlugins,
|
178
|
+
remarkPlugins,
|
175
179
|
}),
|
176
180
|
[components, markdownCustomRender],
|
177
181
|
);
|
@@ -26,3 +26,18 @@ export const processWithArtifact = (input: string = '') => {
|
|
26
26
|
|
27
27
|
return output;
|
28
28
|
};
|
29
|
+
|
30
|
+
// 预处理函数:确保 think 标签前后有两个换行符
|
31
|
+
export const normalizeThinkTags = (input: string) => {
|
32
|
+
return (
|
33
|
+
input
|
34
|
+
// 确保 <think> 标签前后有两个换行符
|
35
|
+
.replaceAll(/([^\n])\s*<think>/g, '$1\n\n<think>')
|
36
|
+
.replaceAll(/<think>\s*([^\n])/g, '<think>\n\n$1')
|
37
|
+
// 确保 </think> 标签前后有两个换行符
|
38
|
+
.replaceAll(/([^\n])\s*<\/think>/g, '$1\n\n</think>')
|
39
|
+
.replaceAll(/<\/think>\s*([^\n])/g, '</think>\n\n$1')
|
40
|
+
// 处理可能产生的多余换行符
|
41
|
+
.replaceAll(/\n{3,}/g, '\n\n')
|
42
|
+
);
|
43
|
+
};
|
@@ -1,8 +1,11 @@
|
|
1
|
+
import { FC } from 'react';
|
2
|
+
|
3
|
+
import { MarkdownElement, MarkdownElementProps } from '../type';
|
1
4
|
import Component from './Render';
|
2
5
|
import rehypePlugin from './rehypePlugin';
|
3
6
|
|
4
|
-
const AntArtifactElement = {
|
5
|
-
Component
|
7
|
+
const AntArtifactElement: MarkdownElement = {
|
8
|
+
Component: Component as unknown as FC<MarkdownElementProps>,
|
6
9
|
rehypePlugin,
|
7
10
|
tag: 'lobeArtifact',
|
8
11
|
};
|
@@ -1,9 +1,10 @@
|
|
1
1
|
import { ARTIFACT_THINKING_TAG } from '@/const/plugin';
|
2
2
|
|
3
|
+
import { MarkdownElement } from '../type';
|
3
4
|
import Component from './Render';
|
4
5
|
import rehypePlugin from './rehypePlugin';
|
5
6
|
|
6
|
-
const LobeThinkingElement = {
|
7
|
+
const LobeThinkingElement: MarkdownElement = {
|
7
8
|
Component,
|
8
9
|
rehypePlugin,
|
9
10
|
tag: ARTIFACT_THINKING_TAG,
|
@@ -0,0 +1,27 @@
|
|
1
|
+
import { memo } from 'react';
|
2
|
+
|
3
|
+
import Thinking from '@/components/Thinking';
|
4
|
+
import { useChatStore } from '@/store/chat';
|
5
|
+
import { chatSelectors } from '@/store/chat/selectors';
|
6
|
+
|
7
|
+
import { MarkdownElementProps } from '../type';
|
8
|
+
|
9
|
+
const isThinkingClosed = (input: string = '') => {
|
10
|
+
const openTag = `<think>`;
|
11
|
+
const closeTag = `</think>`;
|
12
|
+
|
13
|
+
return input.includes(openTag) && input.includes(closeTag);
|
14
|
+
};
|
15
|
+
|
16
|
+
const Render = memo<MarkdownElementProps>(({ children, id }) => {
|
17
|
+
const [isGenerating] = useChatStore((s) => {
|
18
|
+
const message = chatSelectors.getMessageById(id)(s);
|
19
|
+
return [!isThinkingClosed(message?.content)];
|
20
|
+
});
|
21
|
+
|
22
|
+
if (!isGenerating && !children) return;
|
23
|
+
|
24
|
+
return <Thinking content={children as string} thinking={isGenerating} />;
|
25
|
+
});
|
26
|
+
|
27
|
+
export default Render;
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import { MarkdownElement } from '../type';
|
2
|
+
import Component from './Render';
|
3
|
+
import { createRemarkCustomTagPlugin } from './remarkPlugin';
|
4
|
+
|
5
|
+
const ThinkingElement: MarkdownElement = {
|
6
|
+
Component,
|
7
|
+
remarkPlugin: createRemarkCustomTagPlugin('think'),
|
8
|
+
tag: 'think',
|
9
|
+
};
|
10
|
+
|
11
|
+
export default ThinkingElement;
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import { toMarkdown } from 'mdast-util-to-markdown';
|
2
|
+
import { SKIP, visit } from 'unist-util-visit';
|
3
|
+
|
4
|
+
export const createRemarkCustomTagPlugin = (tag: string) => () => {
|
5
|
+
return (tree: any) => {
|
6
|
+
visit(tree, 'html', (node, index, parent) => {
|
7
|
+
if (node.value === `<${tag}>`) {
|
8
|
+
const startIndex = index as number;
|
9
|
+
let endIndex = startIndex + 1;
|
10
|
+
let hasCloseTag = false;
|
11
|
+
|
12
|
+
// 查找闭合标签
|
13
|
+
while (endIndex < parent.children.length) {
|
14
|
+
const sibling = parent.children[endIndex];
|
15
|
+
if (sibling.type === 'html' && sibling.value === `</${tag}>`) {
|
16
|
+
hasCloseTag = true;
|
17
|
+
break;
|
18
|
+
}
|
19
|
+
endIndex++;
|
20
|
+
}
|
21
|
+
|
22
|
+
// 计算需要删除的节点范围
|
23
|
+
const deleteCount = hasCloseTag
|
24
|
+
? endIndex - startIndex + 1
|
25
|
+
: parent.children.length - startIndex;
|
26
|
+
|
27
|
+
// 提取内容节点
|
28
|
+
const contentNodes = parent.children.slice(
|
29
|
+
startIndex + 1,
|
30
|
+
hasCloseTag ? endIndex : undefined,
|
31
|
+
);
|
32
|
+
|
33
|
+
// 转换为 Markdown 字符串
|
34
|
+
const content = contentNodes
|
35
|
+
.map((n: any) => toMarkdown(n))
|
36
|
+
.join('\n\n')
|
37
|
+
.trim();
|
38
|
+
|
39
|
+
// 创建自定义节点
|
40
|
+
const customNode = {
|
41
|
+
data: {
|
42
|
+
hChildren: [{ type: 'text', value: content }],
|
43
|
+
hName: tag,
|
44
|
+
},
|
45
|
+
position: node.position,
|
46
|
+
type: `${tag}Block`,
|
47
|
+
};
|
48
|
+
|
49
|
+
// 替换原始节点
|
50
|
+
parent.children.splice(startIndex, deleteCount, customNode);
|
51
|
+
|
52
|
+
// 跳过已处理的节点
|
53
|
+
return [SKIP, startIndex + 1];
|
54
|
+
}
|
55
|
+
});
|
56
|
+
};
|
57
|
+
};
|
@@ -1,4 +1,6 @@
|
|
1
1
|
import LobeArtifact from './LobeArtifact';
|
2
2
|
import LobeThinking from './LobeThinking';
|
3
|
+
import Thinking from './Thinking';
|
4
|
+
import { MarkdownElement } from './type';
|
3
5
|
|
4
|
-
export const markdownElements = [LobeArtifact, LobeThinking];
|
6
|
+
export const markdownElements: MarkdownElement[] = [Thinking, LobeArtifact, LobeThinking];
|
@@ -1,7 +1,14 @@
|
|
1
|
-
import { ReactNode } from 'react';
|
1
|
+
import { FC, ReactNode } from 'react';
|
2
2
|
|
3
3
|
export interface MarkdownElementProps {
|
4
4
|
children: ReactNode;
|
5
5
|
id: string;
|
6
6
|
type: string;
|
7
7
|
}
|
8
|
+
|
9
|
+
export interface MarkdownElement {
|
10
|
+
Component: FC<MarkdownElementProps>;
|
11
|
+
rehypePlugin?: any;
|
12
|
+
remarkPlugin?: any;
|
13
|
+
tag: string;
|
14
|
+
}
|
@@ -4,12 +4,15 @@ import { Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
|
4
4
|
|
5
5
|
import {
|
6
6
|
ChatStreamCallbacks,
|
7
|
+
ChatStreamPayload,
|
8
|
+
LLMRoleType,
|
7
9
|
LobeOpenAICompatibleRuntime,
|
8
10
|
ModelProvider,
|
9
11
|
} from '@/libs/agent-runtime';
|
10
12
|
|
11
13
|
import * as debugStreamModule from '../utils/debugStream';
|
12
14
|
import { LobeDeepSeekAI } from './index';
|
15
|
+
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';
|
13
16
|
|
14
17
|
const provider = ModelProvider.DeepSeek;
|
15
18
|
const defaultBaseURL = 'https://api.deepseek.com/v1';
|
@@ -22,6 +25,17 @@ vi.spyOn(console, 'error').mockImplementation(() => {});
|
|
22
25
|
|
23
26
|
let instance: LobeOpenAICompatibleRuntime;
|
24
27
|
|
28
|
+
const createDeepSeekAIInstance = () => new LobeDeepSeekAI({ apiKey: 'test' });
|
29
|
+
|
30
|
+
const mockSuccessfulChatCompletion = () => {
|
31
|
+
vi.spyOn(instance['client'].chat.completions, 'create').mockResolvedValue({
|
32
|
+
id: 'cmpl-mock',
|
33
|
+
object: 'chat.completion',
|
34
|
+
created: Date.now(),
|
35
|
+
choices: [{ index: 0, message: { role: 'assistant', content: 'Mock response' }, finish_reason: 'stop' }],
|
36
|
+
} as any);
|
37
|
+
};
|
38
|
+
|
25
39
|
beforeEach(() => {
|
26
40
|
instance = new LobeDeepSeekAI({ apiKey: 'test' });
|
27
41
|
|
@@ -251,5 +265,126 @@ describe('LobeDeepSeekAI', () => {
|
|
251
265
|
process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION = originalDebugValue;
|
252
266
|
});
|
253
267
|
});
|
268
|
+
|
269
|
+
describe('deepseek-reasoner', () => {
|
270
|
+
beforeEach(() => {
|
271
|
+
instance = createDeepSeekAIInstance();
|
272
|
+
mockSuccessfulChatCompletion();
|
273
|
+
});
|
274
|
+
|
275
|
+
it('should insert a user message if the first message is from assistant', async () => {
|
276
|
+
const payloadMessages = [{ content: 'Hello', role: 'assistant' as LLMRoleType }];
|
277
|
+
const expectedMessages = [
|
278
|
+
{ content: '', role: 'user' },
|
279
|
+
...payloadMessages,
|
280
|
+
];
|
281
|
+
|
282
|
+
const payload: ChatStreamPayload = {
|
283
|
+
messages: payloadMessages,
|
284
|
+
model: 'deepseek-reasoner',
|
285
|
+
temperature: 0,
|
286
|
+
};
|
287
|
+
|
288
|
+
await instance.chat(payload);
|
289
|
+
|
290
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
|
291
|
+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
|
292
|
+
const actualMessages = actualArgs[0].messages;
|
293
|
+
expect(actualMessages).toEqual(expectedMessages);
|
294
|
+
});
|
295
|
+
|
296
|
+
it('should insert a user message if the first message is from assistant (with system summary)', async () => {
|
297
|
+
const payloadMessages = [
|
298
|
+
{ content: 'System summary', role: 'system' as LLMRoleType },
|
299
|
+
{ content: 'Hello', role: 'assistant' as LLMRoleType },
|
300
|
+
];
|
301
|
+
const expectedMessages = [
|
302
|
+
{ content: 'System summary', role: 'system' },
|
303
|
+
{ content: '', role: 'user' },
|
304
|
+
{ content: 'Hello', role: 'assistant' },
|
305
|
+
];
|
306
|
+
|
307
|
+
const payload: ChatStreamPayload = {
|
308
|
+
messages: payloadMessages,
|
309
|
+
model: 'deepseek-reasoner',
|
310
|
+
temperature: 0,
|
311
|
+
};
|
312
|
+
|
313
|
+
await instance.chat(payload);
|
314
|
+
|
315
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
|
316
|
+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
|
317
|
+
const actualMessages = actualArgs[0].messages;
|
318
|
+
expect(actualMessages).toEqual(expectedMessages);
|
319
|
+
});
|
320
|
+
|
321
|
+
it('should insert alternating roles if messages do not alternate', async () => {
|
322
|
+
const payloadMessages = [
|
323
|
+
{ content: 'user1', role: 'user' as LLMRoleType },
|
324
|
+
{ content: 'user2', role: 'user' as LLMRoleType },
|
325
|
+
{ content: 'assistant1', role: 'assistant' as LLMRoleType },
|
326
|
+
{ content: 'assistant2', role: 'assistant' as LLMRoleType },
|
327
|
+
];
|
328
|
+
const expectedMessages = [
|
329
|
+
{ content: 'user1', role: 'user' },
|
330
|
+
{ content: '', role: 'assistant' },
|
331
|
+
{ content: 'user2', role: 'user' },
|
332
|
+
{ content: 'assistant1', role: 'assistant' },
|
333
|
+
{ content: '', role: 'user' },
|
334
|
+
{ content: 'assistant2', role: 'assistant' },
|
335
|
+
];
|
336
|
+
|
337
|
+
const payload: ChatStreamPayload = {
|
338
|
+
messages: payloadMessages,
|
339
|
+
model: 'deepseek-reasoner',
|
340
|
+
temperature: 0,
|
341
|
+
};
|
342
|
+
|
343
|
+
await instance.chat(payload);
|
344
|
+
|
345
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
|
346
|
+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
|
347
|
+
const actualMessages = actualArgs[0].messages;
|
348
|
+
expect(actualMessages).toEqual(expectedMessages);
|
349
|
+
});
|
350
|
+
|
351
|
+
it('complex condition', async () => {
|
352
|
+
const payloadMessages = [
|
353
|
+
{ content: 'system', role: 'system' as LLMRoleType },
|
354
|
+
{ content: 'assistant', role: 'assistant' as LLMRoleType },
|
355
|
+
{ content: 'user1', role: 'user' as LLMRoleType },
|
356
|
+
{ content: 'user2', role: 'user' as LLMRoleType },
|
357
|
+
{ content: 'user3', role: 'user' as LLMRoleType },
|
358
|
+
{ content: 'assistant1', role: 'assistant' as LLMRoleType },
|
359
|
+
{ content: 'assistant2', role: 'assistant' as LLMRoleType },
|
360
|
+
];
|
361
|
+
const expectedMessages = [
|
362
|
+
{ content: 'system', role: 'system' },
|
363
|
+
{ content: '', role: 'user' },
|
364
|
+
{ content: 'assistant', role: 'assistant' },
|
365
|
+
{ content: 'user1', role: 'user' },
|
366
|
+
{ content: '', role: 'assistant' },
|
367
|
+
{ content: 'user2', role: 'user' },
|
368
|
+
{ content: '', role: 'assistant' },
|
369
|
+
{ content: 'user3', role: 'user' },
|
370
|
+
{ content: 'assistant1', role: 'assistant' },
|
371
|
+
{ content: '', role: 'user' },
|
372
|
+
{ content: 'assistant2', role: 'assistant' },
|
373
|
+
];
|
374
|
+
|
375
|
+
const payload: ChatStreamPayload = {
|
376
|
+
messages: payloadMessages,
|
377
|
+
model: 'deepseek-reasoner',
|
378
|
+
temperature: 0,
|
379
|
+
};
|
380
|
+
|
381
|
+
await instance.chat(payload);
|
382
|
+
|
383
|
+
expect(instance['client'].chat.completions.create).toHaveBeenCalled();
|
384
|
+
const actualArgs = (instance['client'].chat.completions.create as Mock).mock.calls[0];
|
385
|
+
const actualMessages = actualArgs[0].messages;
|
386
|
+
expect(actualMessages).toEqual(expectedMessages);
|
387
|
+
});
|
388
|
+
});
|
254
389
|
});
|
255
390
|
});
|
@@ -12,24 +12,49 @@ export interface DeepSeekModelCard {
|
|
12
12
|
export const LobeDeepSeekAI = LobeOpenAICompatibleFactory({
|
13
13
|
baseURL: 'https://api.deepseek.com/v1',
|
14
14
|
chatCompletion: {
|
15
|
-
handlePayload: ({ frequency_penalty, model, presence_penalty, temperature, top_p, ...payload }: ChatStreamPayload) =>
|
16
|
-
|
15
|
+
handlePayload: ({ frequency_penalty, messages, model, presence_penalty, temperature, top_p, ...payload }: ChatStreamPayload) => {
|
16
|
+
// github.com/lobehub/lobe-chat/pull/5548
|
17
|
+
let filteredMessages = messages.filter(message => message.role !== 'system');
|
18
|
+
|
19
|
+
if (filteredMessages.length > 0 && filteredMessages[0].role === 'assistant') {
|
20
|
+
filteredMessages.unshift({ content: "", role: "user" });
|
21
|
+
}
|
22
|
+
|
23
|
+
let lastRole = '';
|
24
|
+
for (let i = 0; i < filteredMessages.length; i++) {
|
25
|
+
const message = filteredMessages[i];
|
26
|
+
if (message.role === lastRole) {
|
27
|
+
const newRole = lastRole === 'assistant' ? 'user' : 'assistant';
|
28
|
+
filteredMessages.splice(i, 0, { content: "", role: newRole });
|
29
|
+
i++;
|
30
|
+
}
|
31
|
+
lastRole = message.role;
|
32
|
+
}
|
33
|
+
|
34
|
+
if (messages.length > 0 && messages[0].role === 'system') {
|
35
|
+
filteredMessages.unshift(messages[0]);
|
36
|
+
}
|
37
|
+
|
38
|
+
return {
|
17
39
|
...payload,
|
18
40
|
model,
|
19
41
|
...(model === 'deepseek-reasoner'
|
20
42
|
? {
|
21
43
|
frequency_penalty: undefined,
|
44
|
+
messages: filteredMessages,
|
22
45
|
presence_penalty: undefined,
|
23
46
|
temperature: undefined,
|
24
47
|
top_p: undefined,
|
25
48
|
}
|
26
49
|
: {
|
27
50
|
frequency_penalty,
|
51
|
+
messages,
|
28
52
|
presence_penalty,
|
29
53
|
temperature,
|
30
54
|
top_p,
|
31
55
|
}),
|
32
|
-
}
|
56
|
+
} as OpenAI.ChatCompletionCreateParamsStreaming;
|
57
|
+
},
|
33
58
|
},
|
34
59
|
debug: {
|
35
60
|
chatCompletion: () => process.env.DEBUG_DEEPSEEK_CHAT_COMPLETION === '1',
|