@lobehub/chat 1.128.0 → 1.128.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/.github/workflows/test.yml +8 -1
  2. package/CHANGELOG.md +51 -0
  3. package/changelog/v1.json +18 -0
  4. package/locales/ar/models.json +6 -0
  5. package/locales/bg-BG/models.json +6 -0
  6. package/locales/de-DE/models.json +6 -0
  7. package/locales/en-US/models.json +6 -0
  8. package/locales/es-ES/models.json +6 -0
  9. package/locales/fa-IR/models.json +6 -0
  10. package/locales/fr-FR/models.json +6 -0
  11. package/locales/it-IT/models.json +6 -0
  12. package/locales/ja-JP/models.json +6 -0
  13. package/locales/ko-KR/models.json +6 -0
  14. package/locales/nl-NL/models.json +6 -0
  15. package/locales/pl-PL/models.json +6 -0
  16. package/locales/pt-BR/models.json +6 -0
  17. package/locales/ru-RU/models.json +6 -0
  18. package/locales/tr-TR/models.json +6 -0
  19. package/locales/vi-VN/models.json +6 -0
  20. package/locales/zh-CN/models.json +6 -0
  21. package/locales/zh-TW/models.json +6 -0
  22. package/next.config.ts +8 -1
  23. package/package.json +71 -69
  24. package/packages/context-engine/ARCHITECTURE.md +425 -0
  25. package/packages/context-engine/package.json +40 -0
  26. package/packages/context-engine/src/base/BaseProcessor.ts +87 -0
  27. package/packages/context-engine/src/base/BaseProvider.ts +22 -0
  28. package/packages/context-engine/src/index.ts +32 -0
  29. package/packages/context-engine/src/pipeline.ts +219 -0
  30. package/packages/context-engine/src/processors/HistoryTruncate.ts +76 -0
  31. package/packages/context-engine/src/processors/InputTemplate.ts +83 -0
  32. package/packages/context-engine/src/processors/MessageCleanup.ts +87 -0
  33. package/packages/context-engine/src/processors/MessageContent.ts +298 -0
  34. package/packages/context-engine/src/processors/PlaceholderVariables.ts +196 -0
  35. package/packages/context-engine/src/processors/ToolCall.ts +186 -0
  36. package/packages/context-engine/src/processors/ToolMessageReorder.ts +113 -0
  37. package/packages/context-engine/src/processors/__tests__/HistoryTruncate.test.ts +175 -0
  38. package/packages/context-engine/src/processors/__tests__/InputTemplate.test.ts +243 -0
  39. package/packages/context-engine/src/processors/__tests__/MessageContent.test.ts +394 -0
  40. package/packages/context-engine/src/processors/__tests__/PlaceholderVariables.test.ts +334 -0
  41. package/packages/context-engine/src/processors/__tests__/ToolMessageReorder.test.ts +186 -0
  42. package/packages/context-engine/src/processors/index.ts +15 -0
  43. package/packages/context-engine/src/providers/HistorySummary.ts +102 -0
  44. package/packages/context-engine/src/providers/InboxGuide.ts +102 -0
  45. package/packages/context-engine/src/providers/SystemRoleInjector.ts +64 -0
  46. package/packages/context-engine/src/providers/ToolSystemRole.ts +118 -0
  47. package/packages/context-engine/src/providers/__tests__/HistorySummaryProvider.test.ts +112 -0
  48. package/packages/context-engine/src/providers/__tests__/InboxGuideProvider.test.ts +121 -0
  49. package/packages/context-engine/src/providers/__tests__/SystemRoleInjector.test.ts +200 -0
  50. package/packages/context-engine/src/providers/__tests__/ToolSystemRoleProvider.test.ts +140 -0
  51. package/packages/context-engine/src/providers/index.ts +11 -0
  52. package/packages/context-engine/src/types.ts +201 -0
  53. package/packages/context-engine/vitest.config.mts +10 -0
  54. package/packages/database/package.json +1 -1
  55. package/packages/model-bank/src/aiModels/deepseek.ts +4 -4
  56. package/packages/model-bank/src/aiModels/google.ts +30 -6
  57. package/packages/model-bank/src/aiModels/groq.ts +1 -19
  58. package/packages/model-bank/src/aiModels/modelscope.ts +24 -18
  59. package/packages/model-bank/src/aiModels/novita.ts +71 -5
  60. package/packages/model-bank/src/aiModels/qwen.ts +87 -2
  61. package/packages/model-bank/src/aiModels/siliconcloud.ts +65 -2
  62. package/packages/model-bank/src/aiModels/volcengine.ts +4 -3
  63. package/packages/model-runtime/src/utils/modelParse.ts +4 -4
  64. package/packages/prompts/src/prompts/systemRole/index.ts +1 -1
  65. package/packages/utils/src/index.ts +2 -0
  66. package/packages/utils/src/uriParser.test.ts +29 -0
  67. package/packages/utils/src/uriParser.ts +24 -0
  68. package/src/services/{__tests__ → chat}/chat.test.ts +22 -1032
  69. package/src/services/chat/clientModelRuntime.test.ts +385 -0
  70. package/src/services/chat/clientModelRuntime.ts +34 -0
  71. package/src/services/chat/contextEngineering.test.ts +848 -0
  72. package/src/services/chat/contextEngineering.ts +123 -0
  73. package/src/services/chat/helper.ts +61 -0
  74. package/src/services/{chat.ts → chat/index.ts} +24 -366
  75. package/src/services/chat/types.ts +9 -0
  76. package/src/services/models.ts +1 -1
  77. package/src/store/aiInfra/slices/aiModel/selectors.ts +2 -2
  78. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +1 -40
  79. /package/src/services/{__tests__ → chat}/__snapshots__/chat.test.ts.snap +0 -0
@@ -11,7 +11,14 @@ jobs:
11
11
  runs-on: ubuntu-latest
12
12
  strategy:
13
13
  matrix:
14
- package: [file-loaders, prompts, model-runtime, web-crawler, electron-server-ipc, utils]
14
+ package:
15
+ - file-loaders
16
+ - prompts
17
+ - model-runtime
18
+ - web-crawler
19
+ - electron-server-ipc
20
+ - utils
21
+ - context-engine
15
22
 
16
23
  name: Test package ${{ matrix.package }}
17
24
 
package/CHANGELOG.md CHANGED
@@ -2,6 +2,57 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.128.2](https://github.com/lobehub/lobe-chat/compare/v1.128.1...v1.128.2)
6
+
7
+ <sup>Released on **2025-09-13**</sup>
8
+
9
+ #### 💄 Styles
10
+
11
+ - **misc**: Update i18n, Update model configs.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Styles
19
+
20
+ - **misc**: Update i18n, closes [#9237](https://github.com/lobehub/lobe-chat/issues/9237) ([642dc3b](https://github.com/lobehub/lobe-chat/commit/642dc3b))
21
+ - **misc**: Update model configs, closes [#9170](https://github.com/lobehub/lobe-chat/issues/9170) ([f89b730](https://github.com/lobehub/lobe-chat/commit/f89b730))
22
+
23
+ </details>
24
+
25
+ <div align="right">
26
+
27
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
28
+
29
+ </div>
30
+
31
+ ### [Version 1.128.1](https://github.com/lobehub/lobe-chat/compare/v1.128.0...v1.128.1)
32
+
33
+ <sup>Released on **2025-09-12**</sup>
34
+
35
+ #### ♻ Code Refactoring
36
+
37
+ - **misc**: Refactor message proccesser to the context engine.
38
+
39
+ <br/>
40
+
41
+ <details>
42
+ <summary><kbd>Improvements and Fixes</kbd></summary>
43
+
44
+ #### Code refactoring
45
+
46
+ - **misc**: Refactor message proccesser to the context engine, closes [#9230](https://github.com/lobehub/lobe-chat/issues/9230) ([dacfffd](https://github.com/lobehub/lobe-chat/commit/dacfffd))
47
+
48
+ </details>
49
+
50
+ <div align="right">
51
+
52
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
53
+
54
+ </div>
55
+
5
56
  ## [Version 1.128.0](https://github.com/lobehub/lobe-chat/compare/v1.127.4...v1.128.0)
6
57
 
7
58
  <sup>Released on **2025-09-12**</sup>
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Update i18n, Update model configs."
6
+ ]
7
+ },
8
+ "date": "2025-09-13",
9
+ "version": "1.128.2"
10
+ },
11
+ {
12
+ "children": {
13
+ "improvements": [
14
+ "Refactor message proccesser to the context engine."
15
+ ]
16
+ },
17
+ "date": "2025-09-12",
18
+ "version": "1.128.1"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "features": [
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "نسخة المعاينة لنموذج Qwen 3 Max من سلسلة Tongyi Qianwen، مع تحسينات كبيرة في القدرات العامة مقارنة بسلسلة 2.5، بما في ذلك فهم النصوص العامة باللغتين الصينية والإنجليزية، الالتزام بالتعليمات المعقدة، المهام المفتوحة الذاتية، القدرات متعددة اللغات، واستدعاء الأدوات؛ مع تقليل الأوهام المعرفية للنموذج."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "نموذج مفتوح المصدر من الجيل الجديد لوضع عدم التفكير مبني على Qwen3، يتميز بفهم أفضل للنصوص الصينية مقارنة بالإصدار السابق (Tongyi Qianwen 3-235B-A22B-Instruct-2507)، مع تعزيز في قدرات الاستدلال المنطقي وأداء أفضل في مهام توليد النصوص."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "نموذج مفتوح المصدر من الجيل الجديد لوضع التفكير مبني على Qwen3، يتميز بتحسين في الالتزام بالتعليمات مقارنة بالإصدار السابق (Tongyi Qianwen 3-235B-A22B-Thinking-2507)، مع ردود ملخصة وأكثر إيجازًا من النموذج."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ هو نموذج بحث تجريبي يركز على تحسين قدرات الاستدلال للذكاء الاصطناعي."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Preview версия на Max модела от серията Qwen 3, с голямо подобрение спрямо серия 2.5 в общите универсални способности, разбиране на текст на китайски и английски, следване на сложни инструкции, субективни отворени задачи, многоезични способности и използване на инструменти; моделът има по-малко халюцинации на знания."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Базирано на Qwen3, ново поколение отворен модел без мисловен режим, който предлага по-добро разбиране на китайски текстове, подобрени логически умения и по-добри резултати при задачи за генериране на текст в сравнение с предишната версия (Tongyi Qianwen 3-235B-A22B-Instruct-2507)."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Базирано на Qwen3, ново поколение отворен модел с мисловен режим, който подобрява спазването на инструкции и предоставя по-кратки и точни обобщения в сравнение с предишната версия (Tongyi Qianwen 3-235B-A22B-Thinking-2507)."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ е експериментален изследователски модел, който се фокусира върху подобряване на AI разсъдъчните способности."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Die Preview-Version des Max-Modells der Tongyi Qianwen 3-Serie zeigt im Vergleich zur 2.5-Serie eine deutliche Steigerung der allgemeinen Fähigkeiten, einschließlich verbesserter chinesisch- und englischsprachiger Textverständnisfähigkeiten, komplexer Befolgung von Anweisungen, subjektiver offener Aufgaben, Mehrsprachigkeit und Werkzeugaufruf-Fähigkeiten; das Modell zeigt weniger Wissenshalluzinationen."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Ein neues Open-Source-Modell der nächsten Generation im Nicht-Denk-Modus basierend auf Qwen3. Im Vergleich zur vorherigen Version (Tongyi Qianwen 3-235B-A22B-Instruct-2507) bietet es eine verbesserte chinesische Textverständnisfähigkeit, verstärkte logische Schlussfolgerungen und bessere Leistung bei textgenerierenden Aufgaben."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Ein neues Open-Source-Modell der nächsten Generation im Denkmodus basierend auf Qwen3. Im Vergleich zur vorherigen Version (Tongyi Qianwen 3-235B-A22B-Thinking-2507) wurde die Befehlsbefolgung verbessert und die Modellantworten sind prägnanter zusammengefasst."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ ist ein experimentelles Forschungsmodell, das sich auf die Verbesserung der KI-Inferenzfähigkeiten konzentriert."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "The Qwen 3 series Max model preview version shows a significant overall improvement compared to the 2.5 series in general capabilities, including Chinese and English text understanding, complex instruction adherence, subjective open tasks, multilingual capabilities, and tool invocation. The model also exhibits fewer knowledge hallucinations."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "A new generation of non-thinking mode open-source model based on Qwen3. Compared to the previous version (Tongyi Qianwen 3-235B-A22B-Instruct-2507), it offers better Chinese text comprehension, enhanced logical reasoning abilities, and improved performance in text generation tasks."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "A new generation of thinking mode open-source model based on Qwen3. Compared to the previous version (Tongyi Qianwen 3-235B-A22B-Thinking-2507), it features improved instruction-following capabilities and more concise model-generated summaries."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ is an experimental research model focused on improving AI reasoning capabilities."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Versión preliminar del modelo Max de la serie Qwen 3, que presenta una mejora significativa en la capacidad general en comparación con la serie 2.5, incluyendo comprensión de texto en chino e inglés, cumplimiento de instrucciones complejas, tareas abiertas subjetivas, capacidades multilingües y llamadas a herramientas; además, reduce notablemente las alucinaciones de conocimiento del modelo."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Modelo de código abierto de nueva generación basado en Qwen3 en modo no reflexivo, que ofrece una mejor comprensión del texto en chino, mayor capacidad de razonamiento lógico y un mejor desempeño en tareas de generación de texto en comparación con la versión anterior (Tongyi Qianwen 3-235B-A22B-Instruct-2507)."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Modelo de código abierto de nueva generación basado en Qwen3 en modo reflexivo, que mejora la capacidad de seguir instrucciones y ofrece respuestas más concisas en comparación con la versión anterior (Tongyi Qianwen 3-235B-A22B-Thinking-2507)."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ es un modelo de investigación experimental que se centra en mejorar la capacidad de razonamiento de la IA."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "نسخه پیش‌نمایش مدل Max از سری Qwen 3، نسبت به سری 2.5 بهبود قابل توجهی در توانایی‌های عمومی، درک متن‌های چندزبانه چینی و انگلیسی، پیروی از دستورات پیچیده، انجام وظایف باز و ذهنی، پشتیبانی چندزبانه و فراخوانی ابزارها دارد؛ همچنین خطاهای دانش مدل کاهش یافته است."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "مدل متن‌باز نسل جدید بدون حالت تفکر مبتنی بر Qwen3، که نسبت به نسخه قبلی (Tongyi Qianwen 3-235B-A22B-Instruct-2507) در درک متن‌های چینی بهتر است، توانایی استدلال منطقی بهبود یافته و عملکرد بهتری در وظایف تولید متن دارد."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "مدل متن‌باز نسل جدید با حالت تفکر مبتنی بر Qwen3، که نسبت به نسخه قبلی (Tongyi Qianwen 3-235B-A22B-Thinking-2507) در پیروی از دستورات پیشرفت داشته و پاسخ‌های مدل خلاصه‌تر شده‌اند."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ یک مدل تحقیقاتی تجربی است که بر بهبود توانایی استدلال AI تمرکز دارد."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Version Preview du modèle Max de la série Qwen 3, avec une amélioration significative des capacités générales par rapport à la série 2.5, notamment en compréhension de texte général bilingue (chinois et anglais), respect des instructions complexes, tâches ouvertes subjectives, capacités multilingues et appels d'outils ; le modèle présente également moins d'hallucinations de connaissances."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Modèle open source de nouvelle génération en mode non réflexif basé sur Qwen3, offrant une meilleure compréhension du texte en chinois, des capacités de raisonnement logique renforcées et de meilleures performances dans les tâches de génération de texte par rapport à la version précédente (Tongyi Qianwen 3-235B-A22B-Instruct-2507)."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Modèle open source de nouvelle génération en mode réflexif basé sur Qwen3, avec une meilleure conformité aux instructions et des réponses plus concises dans les résumés par rapport à la version précédente (Tongyi Qianwen 3-235B-A22B-Thinking-2507)."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ est un modèle de recherche expérimental, axé sur l'amélioration des capacités de raisonnement de l'IA."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Versione preview del modello Max della serie Qwen 3, che presenta un miglioramento significativo rispetto alla serie 2.5 nelle capacità generali, comprensione del testo in cinese e inglese, rispetto di istruzioni complesse, compiti soggettivi aperti, capacità multilingue e chiamata di strumenti; il modello presenta meno allucinazioni di conoscenza."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Modello open source di nuova generazione basato su Qwen3 in modalità non riflessiva, con una migliore comprensione del testo in cinese rispetto alla versione precedente (Tongyi Qianwen 3-235B-A22B-Instruct-2507), capacità di ragionamento logico potenziate e prestazioni superiori nelle attività di generazione di testo."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Modello open source di nuova generazione basato su Qwen3 in modalità riflessiva, con migliorata capacità di seguire le istruzioni rispetto alla versione precedente (Tongyi Qianwen 3-235B-A22B-Thinking-2507) e risposte di sintesi più concise."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ è un modello di ricerca sperimentale, focalizzato sul miglioramento delle capacità di ragionamento dell'IA."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "通義千問3シリーズMaxモデルのプレビュー版で、2.5シリーズに比べて全体的な汎用能力が大幅に向上し、中日両言語の汎用テキスト理解能力、複雑な指示遵守能力、主観的なオープンタスク能力、多言語能力、ツール呼び出し能力が著しく強化されました。モデルの知識幻覚も減少しています。"
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Qwen3に基づく次世代の非思考モードのオープンソースモデルで、前バージョン(通義千問3-235B-A22B-Instruct-2507)と比べて中国語テキストの理解能力が向上し、論理推論能力が強化され、テキスト生成タスクのパフォーマンスがより優れています。"
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Qwen3に基づく次世代の思考モードのオープンソースモデルで、前バージョン(通義千問3-235B-A22B-Thinking-2507)と比べて指示遵守能力が向上し、モデルの要約応答がより簡潔になっています。"
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQはAIの推論能力を向上させることに特化した実験的研究モデルです。"
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "통의천문3 시리즈 Max 모델 프리뷰 버전으로, 2.5 시리즈에 비해 전반적인 범용 능력이 크게 향상되었으며, 중영문 범용 텍스트 이해 능력, 복잡한 지시 준수 능력, 주관적 개방형 작업 능력, 다국어 능력, 도구 호출 능력이 모두 크게 강화되었습니다. 모델의 지식 환각도 감소하였습니다."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Qwen3 기반의 차세대 비사고 모드 오픈 소스 모델로, 이전 버전(통의천문3-235B-A22B-Instruct-2507)과 비교하여 중국어 텍스트 이해 능력이 향상되었고, 논리 추론 능력이 강화되었으며, 텍스트 생성 작업에서 더 우수한 성능을 보입니다."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Qwen3 기반의 차세대 사고 모드 오픈 소스 모델로, 이전 버전(통의천문3-235B-A22B-Thinking-2507)과 비교하여 명령 준수 능력이 향상되었고, 모델의 요약 응답이 더욱 간결해졌습니다."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ는 AI 추론 능력을 향상시키는 데 중점을 둔 실험 연구 모델입니다."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Previewversie van het Qwen 3 Max-model uit de Tongyi Qianwen 3-serie, met aanzienlijke verbeteringen ten opzichte van de 2.5-serie in algemene capaciteiten, tweetalig tekstbegrip (Chinees en Engels), complexe instructienaleving, subjectieve open taken, meertalige vaardigheden en toolaanroepen; het model vertoont minder kennisillusies."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Een nieuwe generatie open-source model zonder denkmodus gebaseerd op Qwen3, met verbeterde Chinese tekstbegrip, versterkte logische redeneervaardigheden en betere prestaties bij tekstgeneratietaken vergeleken met de vorige versie (Tongyi Qianwen 3-235B-A22B-Instruct-2507)."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Een nieuwe generatie open-source model met denkmodus gebaseerd op Qwen3, met verbeterde instructienaleving en meer beknopte samenvattende antwoorden vergeleken met de vorige versie (Tongyi Qianwen 3-235B-A22B-Thinking-2507)."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ is een experimenteel onderzoeksmodel dat zich richt op het verbeteren van de AI-redeneringscapaciteiten."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Preview modelu Max z serii Qwen 3, który w porównaniu do serii 2.5 znacząco poprawia ogólne zdolności, w tym rozumienie tekstu w języku chińskim i angielskim, przestrzeganie złożonych instrukcji, zdolności do zadań otwartych i subiektywnych, wielojęzyczność oraz wywoływanie narzędzi; model generuje mniej halucynacji wiedzy."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Nowa generacja otwartego modelu bez trybu myślenia oparta na Qwen3, która w porównaniu z poprzednią wersją (Tongyi Qianwen 3-235B-A22B-Instruct-2507) cechuje się lepszym rozumieniem tekstu w języku chińskim, wzmocnionymi zdolnościami wnioskowania logicznego oraz lepszą wydajnością w zadaniach generowania tekstu."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Nowa generacja otwartego modelu z trybem myślenia oparta na Qwen3, która w porównaniu z poprzednią wersją (Tongyi Qianwen 3-235B-A22B-Thinking-2507) wykazuje poprawę w przestrzeganiu instrukcji oraz bardziej zwięzłe podsumowania w odpowiedziach modelu."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ to eksperymentalny model badawczy, skoncentrowany na zwiększeniu zdolności wnioskowania AI."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Versão Preview do modelo Max da série Qwen 3, com melhorias significativas em relação à série 2.5 em capacidades gerais, compreensão de texto em chinês e inglês, conformidade com instruções complexas, tarefas subjetivas abertas, multilinguismo e chamadas de ferramentas; o modelo apresenta menos alucinações de conhecimento."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Modelo open source de nova geração no modo não reflexivo baseado no Qwen3, que apresenta melhor compreensão de texto em chinês, capacidades aprimoradas de raciocínio lógico e desempenho superior em tarefas de geração de texto em comparação com a versão anterior (Tongyi Qianwen 3-235B-A22B-Instruct-2507)."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Modelo open source de nova geração no modo reflexivo baseado no Qwen3, que oferece melhor conformidade com instruções e respostas mais concisas em resumos, em comparação com a versão anterior (Tongyi Qianwen 3-235B-A22B-Thinking-2507)."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ é um modelo de pesquisa experimental, focado em melhorar a capacidade de raciocínio da IA."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Предварительная версия модели серии Qwen 3 Max, которая значительно превосходит серию 2.5 по универсальным возможностям, включая понимание текста на китайском и английском языках, выполнение сложных инструкций, способности к субъективным открытым задачам, многоязычность и вызов инструментов; модель демонстрирует меньше искажений знаний."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Новая генерация открытой модели без режима мышления на базе Qwen3, которая по сравнению с предыдущей версией (通义千问3-235B-A22B-Instruct-2507) обладает улучшенными способностями понимания китайского текста, усиленными логическими рассуждениями и лучшими результатами в задачах генерации текста."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Новая генерация открытой модели с режимом мышления на базе Qwen3, которая по сравнению с предыдущей версией (通义千问3-235B-A22B-Thinking-2507) демонстрирует улучшенное следование инструкциям и более лаконичные ответы модели."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ — это экспериментальная исследовательская модель, сосредоточенная на повышении возможностей вывода ИИ."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Tongyi Qianwen 3 serisi Max modelinin önizleme sürümüdür. 2.5 serisine kıyasla genel yeteneklerde büyük gelişmeler göstermiştir; Çince ve İngilizce genel metin anlama, karmaşık talimat uyumu, öznel açık görevler, çok dilli yetenekler ve araç çağrısı yetenekleri belirgin şekilde artmıştır; model bilgi yanılgıları daha azdır."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Qwen3 tabanlı yeni nesil düşünmeden çalışan açık kaynak modeli, önceki sürüme (Tongyi Qianwen 3-235B-A22B-Instruct-2507) kıyasla Çince metin anlama yeteneği daha iyi, mantıksal çıkarım yeteneği geliştirilmiş ve metin üretimi görevlerinde daha başarılıdır."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Qwen3 tabanlı yeni nesil düşünme modlu açık kaynak modeli, önceki sürüme (Tongyi Qianwen 3-235B-A22B-Thinking-2507) kıyasla komutlara uyum yeteneği artırılmış ve modelin özetleyici yanıtları daha özlü hale getirilmiştir."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ, AI akıl yürütme yeteneklerini artırmaya odaklanan deneysel bir araştırma modelidir."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "Phiên bản xem trước mô hình Max của dòng Qwen 3, so với dòng 2.5 có sự cải thiện lớn về năng lực tổng quát, khả năng hiểu văn bản song ngữ Trung-Anh, tuân thủ chỉ thị phức tạp, thực hiện nhiệm vụ mở chủ quan, đa ngôn ngữ và gọi công cụ đều được tăng cường rõ rệt; mô hình giảm thiểu ảo giác kiến thức."
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "Mô hình mã nguồn mở thế hệ mới không có chế độ suy nghĩ dựa trên Qwen3, so với phiên bản trước (Thông Nghĩa Thiên Vấn 3-235B-A22B-Instruct-2507) có khả năng hiểu văn bản tiếng Trung tốt hơn, năng lực suy luận logic được cải thiện, và hiệu suất trong các nhiệm vụ tạo văn bản cũng tốt hơn."
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "Mô hình mã nguồn mở thế hệ mới có chế độ suy nghĩ dựa trên Qwen3, so với phiên bản trước (Thông Nghĩa Thiên Vấn 3-235B-A22B-Thinking-2507) có khả năng tuân thủ chỉ dẫn được nâng cao, và các phản hồi tóm tắt của mô hình trở nên ngắn gọn hơn."
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ là một mô hình nghiên cứu thử nghiệm, tập trung vào việc nâng cao khả năng suy luận của AI."
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "通义千问3系列Max模型Preview版本,相较2.5系列整体通用能力有大幅度提升,中英文通用文本理解能力、复杂指令遵循能力、主观开放任务能力、多语言能力、工具调用能力均显著增强;模型知识幻觉更少。"
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "基于 Qwen3 的新一代非思考模式开源模型,相较上一版本(通义千问3-235B-A22B-Instruct-2507)中文文本理解能力更佳、逻辑推理能力有增强、文本生成类任务表现更好。"
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "基于 Qwen3 的新一代思考模式开源模型,相较上一版本(通义千问3-235B-A22B-Thinking-2507)指令遵循能力有提升、模型总结回复更加精简。"
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ 是 Qwen 系列的推理模型。与传统的指令调优模型相比,QwQ 具备思考和推理的能力,能够在下游任务中,尤其是困难问题上,显著提升性能。QwQ-32B 是中型推理模型,能够在与最先进的推理模型(如 DeepSeek-R1、o1-mini)竞争时取得可观的表现。"
2620
2626
  },
@@ -2615,6 +2615,12 @@
2615
2615
  "qwen3-max-preview": {
2616
2616
  "description": "通義千問3系列 Max 模型 Preview 版本,相較 2.5 系列整體通用能力有大幅度提升,中英文通用文本理解能力、複雜指令遵循能力、主觀開放任務能力、多語言能力、工具調用能力均顯著增強;模型知識幻覺更少。"
2617
2617
  },
2618
+ "qwen3-next-80b-a3b-instruct": {
2619
+ "description": "基於 Qwen3 的新一代非思考模式開源模型,相較上一版本(通義千問3-235B-A22B-Instruct-2507)中文文本理解能力更佳、邏輯推理能力有增強、文本生成類任務表現更好。"
2620
+ },
2621
+ "qwen3-next-80b-a3b-thinking": {
2622
+ "description": "基於 Qwen3 的新一代思考模式開源模型,相較上一版本(通義千問3-235B-A22B-Thinking-2507)指令遵循能力有提升、模型總結回覆更加精簡。"
2623
+ },
2618
2624
  "qwq": {
2619
2625
  "description": "QwQ 是一個實驗研究模型,專注於提高 AI 推理能力。"
2620
2626
  },
package/next.config.ts CHANGED
@@ -26,6 +26,9 @@ const nextConfig: NextConfig = {
26
26
  emotion: true,
27
27
  },
28
28
  compress: isProd,
29
+ eslint: {
30
+ ignoreDuringBuilds: true,
31
+ },
29
32
  experimental: {
30
33
  optimizePackageImports: [
31
34
  'emoji-mart',
@@ -195,6 +198,7 @@ const nextConfig: NextConfig = {
195
198
  },
196
199
  },
197
200
  reactStrictMode: true,
201
+
198
202
  redirects: async () => [
199
203
  {
200
204
  destination: '/sitemap-index.xml',
@@ -264,10 +268,13 @@ const nextConfig: NextConfig = {
264
268
  source: '/repos',
265
269
  },
266
270
  ],
271
+
267
272
  // when external packages in dev mode with turbopack, this config will lead to bundle error
268
273
  serverExternalPackages: isProd ? ['@electric-sql/pglite'] : undefined,
269
-
270
274
  transpilePackages: ['pdfjs-dist', 'mermaid'],
275
+ typescript: {
276
+ ignoreBuildErrors: true,
277
+ },
271
278
 
272
279
  webpack(config) {
273
280
  config.experiments = {