@lobehub/chat 1.48.1 → 1.48.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/CHANGELOG.md +26 -0
  2. package/README.ja-JP.md +14 -14
  3. package/README.md +14 -14
  4. package/README.zh-CN.md +14 -14
  5. package/changelog/v1.json +9 -0
  6. package/docs/changelog/2025-01-22-new-ai-provider.mdx +25 -0
  7. package/docs/changelog/2025-01-22-new-ai-provider.zh-CN.mdx +23 -0
  8. package/docs/changelog/index.json +23 -17
  9. package/docs/self-hosting/advanced/auth/next-auth/casdoor.mdx +1 -0
  10. package/docs/self-hosting/advanced/auth/next-auth/casdoor.zh-CN.mdx +1 -0
  11. package/docs/self-hosting/advanced/knowledge-base.mdx +1 -0
  12. package/docs/self-hosting/advanced/knowledge-base.zh-CN.mdx +1 -0
  13. package/docs/self-hosting/advanced/observability/langfuse.mdx +28 -45
  14. package/docs/self-hosting/server-database.mdx +1 -1
  15. package/locales/ar/changelog.json +2 -2
  16. package/locales/ar/models.json +27 -0
  17. package/locales/bg-BG/changelog.json +2 -2
  18. package/locales/bg-BG/models.json +27 -0
  19. package/locales/de-DE/changelog.json +2 -2
  20. package/locales/de-DE/models.json +27 -0
  21. package/locales/en-US/changelog.json +2 -2
  22. package/locales/en-US/models.json +27 -0
  23. package/locales/es-ES/changelog.json +2 -2
  24. package/locales/es-ES/models.json +27 -0
  25. package/locales/fa-IR/changelog.json +2 -2
  26. package/locales/fa-IR/models.json +27 -0
  27. package/locales/fr-FR/changelog.json +2 -2
  28. package/locales/fr-FR/models.json +27 -0
  29. package/locales/it-IT/changelog.json +2 -2
  30. package/locales/it-IT/models.json +27 -0
  31. package/locales/ja-JP/changelog.json +2 -2
  32. package/locales/ja-JP/models.json +27 -0
  33. package/locales/ko-KR/changelog.json +2 -2
  34. package/locales/ko-KR/models.json +27 -0
  35. package/locales/nl-NL/changelog.json +2 -2
  36. package/locales/nl-NL/models.json +27 -0
  37. package/locales/pl-PL/changelog.json +2 -2
  38. package/locales/pl-PL/models.json +27 -0
  39. package/locales/pt-BR/changelog.json +2 -2
  40. package/locales/pt-BR/models.json +27 -0
  41. package/locales/ru-RU/changelog.json +2 -2
  42. package/locales/ru-RU/models.json +27 -0
  43. package/locales/tr-TR/changelog.json +2 -2
  44. package/locales/tr-TR/models.json +27 -0
  45. package/locales/vi-VN/changelog.json +2 -2
  46. package/locales/vi-VN/models.json +27 -0
  47. package/locales/zh-CN/changelog.json +2 -2
  48. package/locales/zh-CN/models.json +28 -1
  49. package/locales/zh-TW/changelog.json +2 -2
  50. package/locales/zh-TW/models.json +27 -0
  51. package/package.json +1 -1
  52. package/src/app/@modal/(.)changelog/modal/features/Pagination.tsx +1 -1
  53. package/src/libs/agent-runtime/qwen/index.ts +3 -0
  54. package/src/locales/default/changelog.ts +1 -1
  55. package/src/server/services/changelog/index.test.ts +1 -1
  56. package/src/server/services/changelog/index.ts +1 -1
@@ -8,8 +8,8 @@
8
8
  "allChangelog": "查看所有更新日誌",
9
9
  "description": "持續追蹤 {{appName}} 的新功能和改進",
10
10
  "pagination": {
11
- "older": "查看歷史變更",
12
- "prev": "上一頁"
11
+ "next": "下一頁",
12
+ "older": "查看歷史變更"
13
13
  },
14
14
  "readDetails": "閱讀詳情",
15
15
  "title": "更新日誌",
@@ -809,9 +809,18 @@
809
809
  "hunyuan-functioncall": {
810
810
  "description": "混元最新 MOE 架構 FunctionCall 模型,經過高質量的 FunctionCall 數據訓練,上下文窗口達 32K,在多個維度的評測指標上處於領先。"
811
811
  },
812
+ "hunyuan-large": {
813
+ "description": "Hunyuan-large 模型總參數量約 389B,激活參數量約 52B,是當前業界參數規模最大、效果最好的 Transformer 架構的開源 MoE 模型。"
814
+ },
815
+ "hunyuan-large-longcontext": {
816
+ "description": "擅長處理長文任務如文檔摘要和文檔問答等,同時也具備處理通用文本生成任務的能力。在長文本的分析和生成上表現優異,能有效應對複雜和詳盡的長文內容處理需求。"
817
+ },
812
818
  "hunyuan-lite": {
813
819
  "description": "升級為 MOE 結構,上下文窗口為 256k,在 NLP、代碼、數學、行業等多項評測集上領先眾多開源模型。"
814
820
  },
821
+ "hunyuan-lite-vision": {
822
+ "description": "混元最新7B多模態模型,上下文窗口32K,支持中英文場景的多模態對話、圖像物體識別、文檔表格理解、多模態數學等,在多個維度上評測指標優於7B競品模型。"
823
+ },
815
824
  "hunyuan-pro": {
816
825
  "description": "萬億級參數規模 MOE-32K 長文模型。在各種 benchmark 上達到絕對領先的水平,具備複雜指令和推理能力,支持 functioncall,在多語言翻譯、金融法律醫療等領域應用重點優化。"
817
826
  },
@@ -824,9 +833,24 @@
824
833
  "hunyuan-standard-256K": {
825
834
  "description": "採用更優的路由策略,同時緩解了負載均衡和專家趨同的問題。長文方面,大海撈針指標達到 99.9%。MOE-256K 在長度和效果上進一步突破,極大地擴展了可輸入長度。"
826
835
  },
836
+ "hunyuan-standard-vision": {
837
+ "description": "混元最新多模態模型,支持多語種作答,中英文能力均衡。"
838
+ },
827
839
  "hunyuan-turbo": {
828
840
  "description": "混元全新一代大語言模型的預覽版,採用全新的混合專家模型(MoE)結構,相較於 hunyuan-pro 推理效率更快,效果表現更強。"
829
841
  },
842
+ "hunyuan-turbo-20241120": {
843
+ "description": "hunyuan-turbo 2024 年 11 月 20 日固定版本,介於 hunyuan-turbo 和 hunyuan-turbo-latest 之間的一個版本。"
844
+ },
845
+ "hunyuan-turbo-20241223": {
846
+ "description": "本版本優化:數據指令scaling,大幅提升模型通用泛化能力;大幅提升數學、程式碼、邏輯推理能力;優化文本理解字詞理解相關能力;優化文本創作內容生成質量"
847
+ },
848
+ "hunyuan-turbo-latest": {
849
+ "description": "通用體驗優化,包括NLP理解、文本創作、閒聊、知識問答、翻譯、領域等;提升擬人性,優化模型情商;提升意圖模糊時模型主動澄清能力;提升字詞解析類問題的處理能力;提升創作的質量和可互動性;提升多輪體驗。"
850
+ },
851
+ "hunyuan-turbo-vision": {
852
+ "description": "混元新一代視覺語言旗艦大模型,採用全新的混合專家模型(MoE)結構,在圖文理解相關的基礎識別、內容創作、知識問答、分析推理等能力上相比前一代模型全面提升。"
853
+ },
830
854
  "hunyuan-vision": {
831
855
  "description": "混元最新多模態模型,支持圖片 + 文本輸入生成文本內容。"
832
856
  },
@@ -1193,6 +1217,9 @@
1193
1217
  "pro-128k": {
1194
1218
  "description": "Spark Pro 128K 配置了特大上下文處理能力,能夠處理多達128K的上下文信息,特別適合需通篇分析和長期邏輯關聯處理的長文內容,可在複雜文本溝通中提供流暢一致的邏輯與多樣的引用支持。"
1195
1219
  },
1220
+ "qvq-72b-preview": {
1221
+ "description": "QVQ模型是由 Qwen 團隊開發的實驗性研究模型,專注於提升視覺推理能力,尤其在數學推理領域。"
1222
+ },
1196
1223
  "qwen-coder-plus-latest": {
1197
1224
  "description": "通義千問代碼模型。"
1198
1225
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.48.1",
3
+ "version": "1.48.2",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -41,7 +41,7 @@ const Pagination = memo(() => {
41
41
  >
42
42
  <Flexbox align={'flex-end'} className={styles.button} gap={4} padding={16}>
43
43
  <Flexbox align={'center'} className={styles.desc} gap={4} horizontal>
44
- {t('pagination.prev')}
44
+ {t('pagination.next')}
45
45
  <Icon icon={ChevronRightIcon} />
46
46
  </Flexbox>
47
47
  <div className={styles.title}>{t('pagination.older')}</div>
@@ -60,6 +60,9 @@ export const LobeQwenAI = LobeOpenAICompatibleFactory({
60
60
  search_strategy: process.env.QWEN_SEARCH_STRATEGY || 'standard', // standard or pro
61
61
  }
62
62
  }),
63
+ ...(payload.tools && {
64
+ parallel_tool_calls: true,
65
+ }),
63
66
  } as any;
64
67
  },
65
68
  handleStream: QwenAIStream,
@@ -8,8 +8,8 @@ export default {
8
8
  allChangelog: '查看所有更新日志',
9
9
  description: '持续追踪 {{appName}} 的新功能和改进',
10
10
  pagination: {
11
+ next: '下一页',
11
12
  older: '查看历史变更',
12
- prev: '上一页',
13
13
  },
14
14
  readDetails: '阅读详情',
15
15
  title: '更新日志',
@@ -229,7 +229,7 @@ describe('ChangelogService', () => {
229
229
  it('should format version range correctly', () => {
230
230
  // @ts-ignore - accessing private method for testing
231
231
  const result = service.formatVersionRange(['1.0.0', '1.1.0']);
232
- expect(result).toEqual(['1.1.0', '1.0.0']);
232
+ expect(result).toEqual(['1.0.0', '1.1.0']);
233
233
  });
234
234
 
235
235
  it('should return single version as is', () => {
@@ -166,7 +166,7 @@ export class ChangelogService {
166
166
  const minVersion = semver.lt(v1, v2) ? v1 : v2;
167
167
  const maxVersion = semver.gt(v1, v2) ? v1 : v2;
168
168
 
169
- return [maxVersion, minVersion];
169
+ return [minVersion, maxVersion];
170
170
  }
171
171
 
172
172
  private genUrl(path: string) {