@lobehub/chat 1.47.22 → 1.48.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/changelog/v1.json +18 -0
  3. package/locales/ar/chat.json +4 -0
  4. package/locales/ar/components.json +1 -0
  5. package/locales/ar/models.json +6 -0
  6. package/locales/bg-BG/chat.json +4 -0
  7. package/locales/bg-BG/components.json +1 -0
  8. package/locales/bg-BG/models.json +6 -0
  9. package/locales/de-DE/chat.json +4 -0
  10. package/locales/de-DE/components.json +1 -0
  11. package/locales/de-DE/models.json +6 -0
  12. package/locales/en-US/chat.json +4 -0
  13. package/locales/en-US/components.json +1 -0
  14. package/locales/en-US/models.json +6 -0
  15. package/locales/es-ES/chat.json +4 -0
  16. package/locales/es-ES/components.json +1 -0
  17. package/locales/es-ES/models.json +6 -0
  18. package/locales/fa-IR/chat.json +4 -0
  19. package/locales/fa-IR/components.json +1 -0
  20. package/locales/fa-IR/models.json +6 -0
  21. package/locales/fr-FR/chat.json +4 -0
  22. package/locales/fr-FR/components.json +1 -0
  23. package/locales/fr-FR/models.json +6 -0
  24. package/locales/it-IT/chat.json +4 -0
  25. package/locales/it-IT/components.json +1 -0
  26. package/locales/it-IT/models.json +6 -0
  27. package/locales/ja-JP/chat.json +4 -0
  28. package/locales/ja-JP/components.json +1 -0
  29. package/locales/ja-JP/models.json +6 -0
  30. package/locales/ko-KR/chat.json +4 -0
  31. package/locales/ko-KR/components.json +1 -0
  32. package/locales/ko-KR/models.json +6 -0
  33. package/locales/nl-NL/chat.json +4 -0
  34. package/locales/nl-NL/components.json +1 -0
  35. package/locales/nl-NL/models.json +6 -0
  36. package/locales/pl-PL/chat.json +4 -0
  37. package/locales/pl-PL/components.json +1 -0
  38. package/locales/pl-PL/models.json +6 -0
  39. package/locales/pt-BR/chat.json +4 -0
  40. package/locales/pt-BR/components.json +1 -0
  41. package/locales/pt-BR/models.json +6 -0
  42. package/locales/ru-RU/chat.json +4 -0
  43. package/locales/ru-RU/components.json +1 -0
  44. package/locales/ru-RU/models.json +6 -0
  45. package/locales/tr-TR/chat.json +4 -0
  46. package/locales/tr-TR/components.json +1 -0
  47. package/locales/tr-TR/models.json +6 -0
  48. package/locales/vi-VN/chat.json +4 -0
  49. package/locales/vi-VN/components.json +1 -0
  50. package/locales/vi-VN/models.json +6 -0
  51. package/locales/zh-CN/chat.json +4 -0
  52. package/locales/zh-CN/components.json +1 -0
  53. package/locales/zh-CN/modelProvider.json +2 -2
  54. package/locales/zh-CN/models.json +7 -1
  55. package/locales/zh-TW/chat.json +4 -0
  56. package/locales/zh-TW/components.json +1 -0
  57. package/locales/zh-TW/models.json +6 -0
  58. package/package.json +1 -1
  59. package/src/components/ModelSelect/index.tsx +16 -1
  60. package/src/config/aiModels/deepseek.ts +3 -0
  61. package/src/config/aiModels/hunyuan.ts +132 -12
  62. package/src/config/aiModels/qwen.ts +19 -2
  63. package/src/config/modelProviders/hunyuan.ts +2 -0
  64. package/src/database/client/migrations.json +13 -2
  65. package/src/database/migrations/0014_add_message_reasoning.sql +1 -0
  66. package/src/database/migrations/meta/0014_snapshot.json +3961 -0
  67. package/src/database/migrations/meta/_journal.json +7 -0
  68. package/src/database/schemas/message.ts +2 -3
  69. package/src/database/server/models/__tests__/message.test.ts +5 -4
  70. package/src/database/server/models/message.ts +35 -13
  71. package/src/database/server/models/topic.ts +3 -2
  72. package/src/features/Conversation/Messages/Assistant/Reasoning/index.tsx +123 -0
  73. package/src/features/Conversation/Messages/Assistant/index.tsx +8 -1
  74. package/src/features/Conversation/components/MarkdownElements/LobeThinking/index.ts +2 -2
  75. package/src/libs/agent-runtime/deepseek/index.ts +1 -1
  76. package/src/libs/agent-runtime/google/index.ts +7 -5
  77. package/src/libs/agent-runtime/hunyuan/index.ts +24 -0
  78. package/src/libs/agent-runtime/qwen/index.ts +8 -3
  79. package/src/libs/agent-runtime/stepfun/index.ts +7 -1
  80. package/src/libs/agent-runtime/utils/streams/openai.test.ts +203 -0
  81. package/src/libs/agent-runtime/utils/streams/openai.ts +8 -1
  82. package/src/libs/agent-runtime/utils/streams/protocol.ts +1 -1
  83. package/src/locales/default/chat.ts +4 -0
  84. package/src/locales/default/components.ts +1 -0
  85. package/src/server/routers/lambda/message.ts +4 -2
  86. package/src/services/message/client.test.ts +1 -1
  87. package/src/services/message/type.ts +1 -1
  88. package/src/store/chat/selectors.ts +1 -0
  89. package/src/store/chat/slices/aiChat/actions/generateAIChat.ts +60 -14
  90. package/src/store/chat/slices/aiChat/initialState.ts +5 -0
  91. package/src/store/chat/slices/aiChat/selectors.ts +9 -0
  92. package/src/store/chat/slices/message/action.ts +4 -1
  93. package/src/types/aiModel.ts +5 -14
  94. package/src/types/message/base.ts +59 -0
  95. package/src/types/message/chat.ts +136 -0
  96. package/src/types/message/index.ts +2 -135
  97. package/src/utils/fetch/__tests__/fetchSSE.test.ts +34 -0
  98. package/src/utils/fetch/fetchSSE.ts +38 -3
@@ -1328,6 +1328,9 @@
1328
1328
  "step-1.5v-mini": {
1329
1329
  "description": "Este modelo possui uma poderosa capacidade de compreensão de vídeo."
1330
1330
  },
1331
+ "step-1o-vision-32k": {
1332
+ "description": "Este modelo possui uma poderosa capacidade de compreensão de imagens. Em comparação com a série de modelos step-1v, apresenta um desempenho visual superior."
1333
+ },
1331
1334
  "step-1v-32k": {
1332
1335
  "description": "Suporta entradas visuais, aprimorando a experiência de interação multimodal."
1333
1336
  },
@@ -1337,6 +1340,9 @@
1337
1340
  "step-2-16k": {
1338
1341
  "description": "Suporta interações de contexto em larga escala, adequado para cenários de diálogo complexos."
1339
1342
  },
1343
+ "step-2-mini": {
1344
+ "description": "Um modelo de grande escala de alta velocidade baseado na nova arquitetura de atenção auto-desenvolvida MFA, alcançando resultados semelhantes ao step1 com um custo muito baixo, enquanto mantém uma maior taxa de transferência e um tempo de resposta mais rápido. Capaz de lidar com tarefas gerais, possui especialização em habilidades de codificação."
1345
+ },
1340
1346
  "taichu2_mm": {
1341
1347
  "description": "Integra capacidades de compreensão de imagem, transferência de conhecimento e atribuição lógica, destacando-se no campo de perguntas e respostas baseadas em texto e imagem."
1342
1348
  },
@@ -82,6 +82,10 @@
82
82
  }
83
83
  }
84
84
  },
85
+ "reasoning": {
86
+ "thinking": "Глубокое размышление",
87
+ "thought": "Глубоко размышлял (время: {{duration}} секунд)"
88
+ },
85
89
  "regenerate": "Сгенерировать заново",
86
90
  "roleAndArchive": "Роль и архив",
87
91
  "searchAgentPlaceholder": "Поиск помощника...",
@@ -76,6 +76,7 @@
76
76
  "custom": "Пользовательская модель по умолчанию поддерживает как вызов функций, так и распознавание изображений. Пожалуйста, проверьте доступность указанных возможностей в вашем случае",
77
77
  "file": "Эта модель поддерживает загрузку и распознавание файлов",
78
78
  "functionCall": "Эта модель поддерживает вызов функций",
79
+ "reasoning": "Эта модель поддерживает глубокое мышление",
79
80
  "tokens": "Эта модель поддерживает до {{tokens}} токенов в одной сессии",
80
81
  "vision": "Эта модель поддерживает распознавание изображений"
81
82
  },
@@ -1328,6 +1328,9 @@
1328
1328
  "step-1.5v-mini": {
1329
1329
  "description": "Эта модель обладает мощными возможностями понимания видео."
1330
1330
  },
1331
+ "step-1o-vision-32k": {
1332
+ "description": "Эта модель обладает мощными способностями к пониманию изображений. По сравнению с серией моделей step-1v, она имеет более высокую визуальную производительность."
1333
+ },
1331
1334
  "step-1v-32k": {
1332
1335
  "description": "Поддерживает визуальный ввод, улучшая мультимодальный опыт взаимодействия."
1333
1336
  },
@@ -1337,6 +1340,9 @@
1337
1340
  "step-2-16k": {
1338
1341
  "description": "Поддерживает масштабные взаимодействия контекста, подходит для сложных диалоговых сценариев."
1339
1342
  },
1343
+ "step-2-mini": {
1344
+ "description": "Супербыстрая большая модель на основе новой самодельной архитектуры внимания MFA, достигающая аналогичных результатов, как step1, при очень низких затратах, одновременно обеспечивая более высокую пропускную способность и более быстрое время отклика. Способна обрабатывать общие задачи и обладает особыми навыками в кодировании."
1345
+ },
1340
1346
  "taichu2_mm": {
1341
1347
  "description": "Объединяет способности понимания изображений, переноса знаний, логической атрибуции и демонстрирует выдающиеся результаты в области вопросов и ответов на основе текста и изображений."
1342
1348
  },
@@ -82,6 +82,10 @@
82
82
  }
83
83
  }
84
84
  },
85
+ "reasoning": {
86
+ "thinking": "Derin düşünme aşamasında",
87
+ "thought": "Derinlemesine düşündü (geçen süre {{duration}} saniye)"
88
+ },
85
89
  "regenerate": "Tekrarla",
86
90
  "roleAndArchive": "Rol ve Arşiv",
87
91
  "searchAgentPlaceholder": "Arama Asistanı...",
@@ -76,6 +76,7 @@
76
76
  "custom": "Özel model, varsayılan olarak hem fonksiyon çağrısını hem de görüntü tanımayı destekler, yukarıdaki yeteneklerin kullanılabilirliğini doğrulamak için lütfen gerçek durumu kontrol edin",
77
77
  "file": "Bu model dosya yükleme ve tanımayı destekler",
78
78
  "functionCall": "Bu model fonksiyon çağrısını destekler",
79
+ "reasoning": "Bu model derin düşünmeyi destekler",
79
80
  "tokens": "Bu model tek bir oturumda en fazla {{tokens}} Token destekler",
80
81
  "vision": "Bu model görüntü tanımıyı destekler"
81
82
  },
@@ -1328,6 +1328,9 @@
1328
1328
  "step-1.5v-mini": {
1329
1329
  "description": "Bu model, güçlü bir video anlama yeteneğine sahiptir."
1330
1330
  },
1331
+ "step-1o-vision-32k": {
1332
+ "description": "Bu model, güçlü bir görüntü anlama yeteneğine sahiptir. Step-1v serisi modellere kıyasla daha güçlü bir görsel performansa sahiptir."
1333
+ },
1331
1334
  "step-1v-32k": {
1332
1335
  "description": "Görsel girdi desteği sunar, çok modlu etkileşim deneyimini artırır."
1333
1336
  },
@@ -1337,6 +1340,9 @@
1337
1340
  "step-2-16k": {
1338
1341
  "description": "Büyük ölçekli bağlam etkileşimlerini destekler, karmaşık diyalog senaryoları için uygundur."
1339
1342
  },
1343
+ "step-2-mini": {
1344
+ "description": "Yeni nesil kendi geliştirdiğimiz MFA Attention mimarisine dayanan hızlı büyük model, çok düşük maliyetle step1 ile benzer sonuçlar elde ederken, daha yüksek bir throughput ve daha hızlı yanıt süresi sağlıyor. Genel görevleri işleyebilme yeteneğine sahip olup, kodlama yeteneklerinde uzmanlık gösteriyor."
1345
+ },
1340
1346
  "taichu2_mm": {
1341
1347
  "description": "Görüntü anlama, bilgi transferi, mantıksal atıf gibi yetenekleri birleştirerek, metin ve görüntü ile soru-cevap alanında öne çıkmaktadır."
1342
1348
  },
@@ -82,6 +82,10 @@
82
82
  }
83
83
  }
84
84
  },
85
+ "reasoning": {
86
+ "thinking": "Đang suy nghĩ sâu sắc",
87
+ "thought": "Đã suy nghĩ sâu sắc (thời gian: {{duration}} giây)"
88
+ },
85
89
  "regenerate": "Tạo lại",
86
90
  "roleAndArchive": "Vai trò và lưu trữ",
87
91
  "searchAgentPlaceholder": "Trợ lý tìm kiếm...",
@@ -76,6 +76,7 @@
76
76
  "custom": "Mô hình tùy chỉnh, mặc định hỗ trợ cả cuộc gọi hàm và nhận diện hình ảnh, vui lòng xác minh khả năng sử dụng của chúng theo tình hình cụ thể",
77
77
  "file": "Mô hình này hỗ trợ tải lên và nhận diện tệp",
78
78
  "functionCall": "Mô hình này hỗ trợ cuộc gọi hàm (Function Call)",
79
+ "reasoning": "Mô hình này hỗ trợ tư duy sâu sắc",
79
80
  "tokens": "Mỗi phiên của mô hình này hỗ trợ tối đa {{tokens}} Tokens",
80
81
  "vision": "Mô hình này hỗ trợ nhận diện hình ảnh"
81
82
  },
@@ -1328,6 +1328,9 @@
1328
1328
  "step-1.5v-mini": {
1329
1329
  "description": "Mô hình này có khả năng hiểu video mạnh mẽ."
1330
1330
  },
1331
+ "step-1o-vision-32k": {
1332
+ "description": "Mô hình này có khả năng hiểu hình ảnh mạnh mẽ. So với các mô hình trong series step-1v, nó có hiệu suất thị giác vượt trội hơn."
1333
+ },
1331
1334
  "step-1v-32k": {
1332
1335
  "description": "Hỗ trợ đầu vào hình ảnh, tăng cường trải nghiệm tương tác đa mô hình."
1333
1336
  },
@@ -1337,6 +1340,9 @@
1337
1340
  "step-2-16k": {
1338
1341
  "description": "Hỗ trợ tương tác ngữ cảnh quy mô lớn, phù hợp cho các tình huống đối thoại phức tạp."
1339
1342
  },
1343
+ "step-2-mini": {
1344
+ "description": "Mô hình lớn siêu tốc dựa trên kiến trúc Attention tự nghiên cứu thế hệ mới MFA, đạt được hiệu quả tương tự như step1 với chi phí rất thấp, đồng thời duy trì thông lượng cao hơn và độ trễ phản hồi nhanh hơn. Có khả năng xử lý các nhiệm vụ chung, đặc biệt có năng lực trong lập trình."
1345
+ },
1340
1346
  "taichu2_mm": {
1341
1347
  "description": "Kết hợp khả năng hiểu hình ảnh, chuyển giao kiến thức, suy luận logic, nổi bật trong lĩnh vực hỏi đáp hình ảnh và văn bản."
1342
1348
  },
@@ -82,6 +82,10 @@
82
82
  }
83
83
  }
84
84
  },
85
+ "reasoning": {
86
+ "thinking": "深度思考中",
87
+ "thought": "已深度思考(用时 {{duration}} 秒)"
88
+ },
85
89
  "regenerate": "重新生成",
86
90
  "roleAndArchive": "角色与记录",
87
91
  "searchAgentPlaceholder": "搜索助手...",
@@ -76,6 +76,7 @@
76
76
  "custom": "自定义模型,默认设定同时支持函数调用与视觉识别,请根据实际情况验证上述能力的可用性",
77
77
  "file": "该模型支持上传文件读取与识别",
78
78
  "functionCall": "该模型支持函数调用(Function Call)",
79
+ "reasoning": "该模型支持深度思考",
79
80
  "tokens": "该模型单个会话最多支持 {{tokens}} Tokens",
80
81
  "vision": "该模型支持视觉识别"
81
82
  },
@@ -198,9 +198,9 @@
198
198
  },
199
199
  "baseURL": {
200
200
  "desc": "必须包含 http(s)://",
201
+ "invalid": "请输入合法的 URL",
201
202
  "placeholder": "https://your-proxy-url.com/v1",
202
- "title": "API 代理地址",
203
- "invalid": "请输入合法的 URL"
203
+ "title": "API 代理地址"
204
204
  },
205
205
  "checker": {
206
206
  "button": "检查",
@@ -1328,6 +1328,9 @@
1328
1328
  "step-1.5v-mini": {
1329
1329
  "description": "该模型拥有强大的视频理解能力。"
1330
1330
  },
1331
+ "step-1o-vision-32k": {
1332
+ "description": "该模型拥有强大的图像理解能力。相比于 step-1v 系列模型,拥有更强的视觉性能。"
1333
+ },
1331
1334
  "step-1v-32k": {
1332
1335
  "description": "支持视觉输入,增强多模态交互体验。"
1333
1336
  },
@@ -1335,7 +1338,10 @@
1335
1338
  "description": "小型视觉模型,适合基本的图文任务。"
1336
1339
  },
1337
1340
  "step-2-16k": {
1338
- "description": "支持大规模上下文交互,适合复杂对话场景。"
1341
+ "description": "step-2模型的实验版本,包含最新的特性,滚动更新中。不推荐在正式生产环境使用。"
1342
+ },
1343
+ "step-2-mini": {
1344
+ "description": "基于新一代自研Attention架构MFA的极速大模型,用极低成本达到和step1类似的效果,同时保持了更高的吞吐和更快响应时延。能够处理通用任务,在代码能力上具备特长。"
1339
1345
  },
1340
1346
  "taichu2_mm": {
1341
1347
  "description": "融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出"
@@ -82,6 +82,10 @@
82
82
  }
83
83
  }
84
84
  },
85
+ "reasoning": {
86
+ "thinking": "深入思考中",
87
+ "thought": "已深度思考(用時 {{duration}} 秒)"
88
+ },
85
89
  "regenerate": "重新生成",
86
90
  "roleAndArchive": "角色與記錄",
87
91
  "searchAgentPlaceholder": "搜尋助手...",
@@ -76,6 +76,7 @@
76
76
  "custom": "自訂模型,預設支援函式呼叫與視覺辨識,請根據實際情況驗證上述能力的可用性",
77
77
  "file": "該模型支援上傳檔案讀取與辨識",
78
78
  "functionCall": "該模型支援函式呼叫(Function Call)",
79
+ "reasoning": "該模型支持深度思考",
79
80
  "tokens": "該模型單一會話最多支援 {{tokens}} Tokens",
80
81
  "vision": "該模型支援視覺辨識"
81
82
  },
@@ -1328,6 +1328,9 @@
1328
1328
  "step-1.5v-mini": {
1329
1329
  "description": "該模型擁有強大的視頻理解能力。"
1330
1330
  },
1331
+ "step-1o-vision-32k": {
1332
+ "description": "該模型擁有強大的圖像理解能力。相比於 step-1v 系列模型,擁有更強的視覺性能。"
1333
+ },
1331
1334
  "step-1v-32k": {
1332
1335
  "description": "支持視覺輸入,增強多模態交互體驗。"
1333
1336
  },
@@ -1337,6 +1340,9 @@
1337
1340
  "step-2-16k": {
1338
1341
  "description": "支持大規模上下文交互,適合複雜對話場景。"
1339
1342
  },
1343
+ "step-2-mini": {
1344
+ "description": "基於新一代自研Attention架構MFA的極速大模型,用極低成本達到和step1類似的效果,同時保持了更高的吞吐和更快響應時延。能夠處理通用任務,在程式碼能力上具備特長。"
1345
+ },
1340
1346
  "taichu2_mm": {
1341
1347
  "description": "融合了圖像理解、知識遷移、邏輯歸因等能力,在圖文問答領域表現突出"
1342
1348
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.47.22",
3
+ "version": "1.48.0",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -2,7 +2,7 @@ import { IconAvatarProps, ModelIcon, ProviderIcon } from '@lobehub/icons';
2
2
  import { Avatar, Icon, Tooltip } from '@lobehub/ui';
3
3
  import { Typography } from 'antd';
4
4
  import { createStyles } from 'antd-style';
5
- import { Infinity, LucideEye, LucidePaperclip, ToyBrick } from 'lucide-react';
5
+ import { Infinity, AtomIcon, LucideEye, LucidePaperclip, ToyBrick } from 'lucide-react';
6
6
  import numeral from 'numeral';
7
7
  import { rgba } from 'polished';
8
8
  import { FC, memo } from 'react';
@@ -45,6 +45,10 @@ const useStyles = createStyles(({ css, token }) => ({
45
45
  color: ${token.green};
46
46
  background: ${token.green1};
47
47
  `,
48
+ tagPurple: css`
49
+ color: ${token.purple};
50
+ background: ${token.purple1};
51
+ `,
48
52
  token: css`
49
53
  width: 36px;
50
54
  height: 20px;
@@ -107,6 +111,17 @@ export const ModelInfoTags = memo<ModelInfoTagsProps>(
107
111
  </div>
108
112
  </Tooltip>
109
113
  )}
114
+ {model.reasoning && (
115
+ <Tooltip
116
+ placement={placement}
117
+ styles={{ root: { pointerEvents: 'none' } }}
118
+ title={t('ModelSelect.featureTag.reasoning')}
119
+ >
120
+ <div className={cx(styles.tag, styles.tagPurple)} style={{ cursor: 'pointer' }}>
121
+ <Icon icon={AtomIcon} />
122
+ </div>
123
+ </Tooltip>
124
+ )}
110
125
  {typeof model.contextWindowTokens === 'number' && (
111
126
  <Tooltip
112
127
  placement={placement}
@@ -21,6 +21,9 @@ const deepseekChatModels: AIChatModelCard[] = [
21
21
  type: 'chat',
22
22
  },
23
23
  {
24
+ abilities: {
25
+ reasoning: true,
26
+ },
24
27
  contextWindowTokens: 65_536,
25
28
  description:
26
29
  'DeepSeek 推出的推理模型。在输出最终回答之前,模型会先输出一段思维链内容,以提升最终答案的准确性。',
@@ -14,6 +14,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
14
14
  input: 0,
15
15
  output: 0,
16
16
  },
17
+ releasedAt: '2024-10-30',
17
18
  type: 'chat',
18
19
  },
19
20
  {
@@ -26,9 +27,10 @@ const hunyuanChatModels: AIChatModelCard[] = [
26
27
  maxOutput: 2000,
27
28
  pricing: {
28
29
  currency: 'CNY',
29
- input: 4.5,
30
- output: 5,
30
+ input: 0.8,
31
+ output: 2,
31
32
  },
33
+ releasedAt: '2024-10-28',
32
34
  type: 'chat',
33
35
  },
34
36
  {
@@ -41,9 +43,10 @@ const hunyuanChatModels: AIChatModelCard[] = [
41
43
  maxOutput: 6000,
42
44
  pricing: {
43
45
  currency: 'CNY',
44
- input: 15,
45
- output: 60,
46
+ input: 0.5,
47
+ output: 2,
46
48
  },
49
+ releasedAt: '2024-10-28',
47
50
  type: 'chat',
48
51
  },
49
52
  {
@@ -52,9 +55,27 @@ const hunyuanChatModels: AIChatModelCard[] = [
52
55
  },
53
56
  contextWindowTokens: 32_000,
54
57
  description:
55
- '混元全新一代大语言模型的预览版,采用全新的混合专家模型(MoE)结构,相比hunyuan-pro推理效率更快,效果表现更强。',
58
+ '通用体验优化,包括NLP理解、文本创作、闲聊、知识问答、翻译、领域等;提升拟人性,优化模型情商;提升意图模糊时模型主动澄清能力;提升字词解析类问题的处理能力;提升创作的质量和可互动性;提升多轮体验。',
56
59
  displayName: 'Hunyuan Turbo',
57
60
  enabled: true,
61
+ id: 'hunyuan-turbo-latest',
62
+ maxOutput: 4000,
63
+ pricing: {
64
+ currency: 'CNY',
65
+ input: 15,
66
+ output: 50,
67
+ },
68
+ releasedAt: '2025-01-10',
69
+ type: 'chat',
70
+ },
71
+ {
72
+ abilities: {
73
+ functionCall: true,
74
+ },
75
+ contextWindowTokens: 32_000,
76
+ description:
77
+ '本版本优化:数据指令scaling,大幅提升模型通用泛化能力;大幅提升数学、代码、逻辑推理能力;优化文本理解字词理解相关能力;优化文本创作内容生成质量',
78
+ displayName: 'Hunyuan Turbo',
58
79
  id: 'hunyuan-turbo',
59
80
  maxOutput: 4000,
60
81
  pricing: {
@@ -62,6 +83,25 @@ const hunyuanChatModels: AIChatModelCard[] = [
62
83
  input: 15,
63
84
  output: 50,
64
85
  },
86
+ releasedAt: '2025-01-10',
87
+ type: 'chat',
88
+ },
89
+ {
90
+ abilities: {
91
+ functionCall: true,
92
+ },
93
+ contextWindowTokens: 32_000,
94
+ description:
95
+ '本版本优化:数据指令scaling,大幅提升模型通用泛化能力;大幅提升数学、代码、逻辑推理能力;优化文本理解字词理解相关能力;优化文本创作内容生成质量',
96
+ displayName: 'Hunyuan Turbo 20241223',
97
+ id: 'hunyuan-turbo-20241223',
98
+ maxOutput: 4000,
99
+ pricing: {
100
+ currency: 'CNY',
101
+ input: 15,
102
+ output: 50,
103
+ },
104
+ releasedAt: '2025-01-10',
65
105
  type: 'chat',
66
106
  },
67
107
  {
@@ -70,16 +110,74 @@ const hunyuanChatModels: AIChatModelCard[] = [
70
110
  },
71
111
  contextWindowTokens: 32_000,
72
112
  description:
73
- '万亿级参数规模 MOE-32K 长文模型。在各种 benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 functioncall,在多语言翻译、金融法律医疗等领域应用重点优化。',
74
- displayName: 'Hunyuan Pro',
113
+ 'hunyuan-turbo 2024 年 11 月 20 日固定版本,介于 hunyuan-turbo hunyuan-turbo-latest 之间的一个版本。',
114
+ displayName: 'Hunyuan Turbo 20241120',
115
+ id: 'hunyuan-turbo-20241120',
116
+ maxOutput: 4000,
117
+ pricing: {
118
+ currency: 'CNY',
119
+ input: 15,
120
+ output: 50,
121
+ },
122
+ releasedAt: '2024-11-20',
123
+ type: 'chat',
124
+ },
125
+ {
126
+ contextWindowTokens: 32_000,
127
+ description:
128
+ 'Hunyuan-large 模型总参数量约 389B,激活参数量约 52B,是当前业界参数规模最大、效果最好的 Transformer 架构的开源 MoE 模型。',
129
+ displayName: 'Hunyuan Large',
75
130
  enabled: true,
76
- id: 'hunyuan-pro',
131
+ id: 'hunyuan-large',
77
132
  maxOutput: 4000,
78
133
  pricing: {
79
134
  currency: 'CNY',
80
- input: 30,
81
- output: 100,
135
+ input: 4,
136
+ output: 12,
137
+ },
138
+ releasedAt: '2024-11-20',
139
+ type: 'chat',
140
+ },
141
+ {
142
+ contextWindowTokens: 134_000,
143
+ description:
144
+ '擅长处理长文任务如文档摘要和文档问答等,同时也具备处理通用文本生成任务的能力。在长文本的分析和生成上表现优异,能有效应对复杂和详尽的长文内容处理需求。',
145
+ displayName: 'Hunyuan Large Longcontext',
146
+ enabled: true,
147
+ id: 'hunyuan-large-longcontext',
148
+ maxOutput: 6000,
149
+ pricing: {
150
+ currency: 'CNY',
151
+ input: 6,
152
+ output: 18,
153
+ },
154
+ releasedAt: '2024-12-18',
155
+ type: 'chat',
156
+ },
157
+ {
158
+ abilities: {
159
+ vision: true,
160
+ },
161
+ contextWindowTokens: 36_000,
162
+ description: '混元最新7B多模态模型,上下文窗口32K,支持中英文场景的多模态对话、图像物体识别、文档表格理解、多模态数学等,在多个维度上评测指标优于7B竞品模型。',
163
+ displayName: 'Hunyuan Lite Vision',
164
+ enabled: true,
165
+ id: 'hunyuan-lite-vision',
166
+ maxOutput: 4000,
167
+ releasedAt: '2024-12-12',
168
+ type: 'chat',
169
+ },
170
+ {
171
+ abilities: {
172
+ vision: true,
82
173
  },
174
+ contextWindowTokens: 8000,
175
+ description: '混元最新多模态模型,支持多语种作答,中英文能力均衡。',
176
+ displayName: 'Hunyuan Standard Vision',
177
+ enabled: true,
178
+ id: 'hunyuan-standard-vision',
179
+ maxOutput: 2000,
180
+ releasedAt: '2024-12-31',
83
181
  type: 'chat',
84
182
  },
85
183
  {
@@ -87,16 +185,35 @@ const hunyuanChatModels: AIChatModelCard[] = [
87
185
  vision: true,
88
186
  },
89
187
  contextWindowTokens: 8000,
188
+ description: '混元新一代视觉语言旗舰大模型,采用全新的混合专家模型(MoE)结构,在图文理解相关的基础识别、内容创作、知识问答、分析推理等能力上相比前一代模型全面提升。',
189
+ displayName: 'Hunyuan Turbo Vision',
190
+ enabled: true,
191
+ id: 'hunyuan-turbo-vision',
192
+ maxOutput: 2000,
193
+ pricing: {
194
+ currency: 'CNY',
195
+ input: 80,
196
+ output: 80,
197
+ },
198
+ releasedAt: '2024-11-26',
199
+ type: 'chat',
200
+ },
201
+ {
202
+ abilities: {
203
+ vision: true,
204
+ },
205
+ contextWindowTokens: 12_000,
90
206
  description: '混元最新多模态模型,支持图片+文本输入生成文本内容。',
91
207
  displayName: 'Hunyuan Vision',
92
208
  enabled: true,
93
209
  id: 'hunyuan-vision',
94
- maxOutput: 4000,
210
+ maxOutput: 6000,
95
211
  pricing: {
96
212
  currency: 'CNY',
97
213
  input: 18,
98
214
  output: 18,
99
215
  },
216
+ releasedAt: '2025-01-03',
100
217
  type: 'chat',
101
218
  },
102
219
  {
@@ -111,6 +228,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
111
228
  input: 4,
112
229
  output: 8,
113
230
  },
231
+ releasedAt: '2024-11-12',
114
232
  type: 'chat',
115
233
  },
116
234
  {
@@ -128,10 +246,11 @@ const hunyuanChatModels: AIChatModelCard[] = [
128
246
  input: 4,
129
247
  output: 8,
130
248
  },
249
+ releasedAt: '2024-11-15',
131
250
  type: 'chat',
132
251
  },
133
252
  {
134
- contextWindowTokens: 8000,
253
+ contextWindowTokens: 32_000,
135
254
  description:
136
255
  '混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果。',
137
256
  displayName: 'Hunyuan Role',
@@ -142,6 +261,7 @@ const hunyuanChatModels: AIChatModelCard[] = [
142
261
  input: 4,
143
262
  output: 8,
144
263
  },
264
+ releasedAt: '2024-07-04',
145
265
  type: 'chat',
146
266
  },
147
267
  ];
@@ -152,9 +152,26 @@ const qwenChatModels: AIChatModelCard[] = [
152
152
  id: 'qwq-32b-preview',
153
153
  pricing: {
154
154
  currency: 'CNY',
155
- input: 0,
156
- output: 0,
155
+ input: 3.5,
156
+ output: 7,
157
+ },
158
+ releasedAt: '2024-11-28',
159
+ type: 'chat',
160
+ },
161
+ {
162
+ abilities: {
163
+ vision: true,
164
+ },
165
+ contextWindowTokens: 32_768,
166
+ description: 'QVQ模型是由 Qwen 团队开发的实验性研究模型,专注于提升视觉推理能力,尤其在数学推理领域。',
167
+ displayName: 'QVQ 72B Preview',
168
+ id: 'qvq-72b-preview',
169
+ pricing: {
170
+ currency: 'CNY',
171
+ input: 12,
172
+ output: 36,
157
173
  },
174
+ releasedAt: '2024-12-25',
158
175
  type: 'chat',
159
176
  },
160
177
  {
@@ -135,11 +135,13 @@ const Hunyuan: ModelProviderCard = {
135
135
  '由腾讯研发的大语言模型,具备强大的中文创作能力,复杂语境下的逻辑推理能力,以及可靠的任务执行能力',
136
136
  disableBrowserRequest: true,
137
137
  id: 'hunyuan',
138
+ modelList: { showModelFetcher: true },
138
139
  modelsUrl: 'https://cloud.tencent.com/document/product/1729/104753',
139
140
  name: 'Hunyuan',
140
141
  settings: {
141
142
  disableBrowserRequest: true,
142
143
  sdkType: 'openai',
144
+ showModelFetcher: true,
143
145
  },
144
146
  url: 'https://hunyuan.tencent.com',
145
147
  };
@@ -223,7 +223,10 @@
223
223
  "hash": "9646161fa041354714f823d726af27247bcd6e60fa3be5698c0d69f337a5700b"
224
224
  },
225
225
  {
226
- "sql": ["DROP TABLE \"user_budgets\";", "\nDROP TABLE \"user_subscriptions\";"],
226
+ "sql": [
227
+ "DROP TABLE \"user_budgets\";",
228
+ "\nDROP TABLE \"user_subscriptions\";"
229
+ ],
227
230
  "bps": true,
228
231
  "folderMillis": 1729699958471,
229
232
  "hash": "7dad43a2a25d1aec82124a4e53f8d82f8505c3073f23606c1dc5d2a4598eacf9"
@@ -293,5 +296,13 @@
293
296
  "bps": true,
294
297
  "folderMillis": 1735834653361,
295
298
  "hash": "845a692ceabbfc3caf252a97d3e19a213bc0c433df2689900135f9cfded2cf49"
299
+ },
300
+ {
301
+ "sql": [
302
+ "ALTER TABLE \"messages\" ADD COLUMN \"reasoning\" jsonb;"
303
+ ],
304
+ "bps": true,
305
+ "folderMillis": 1737609172353,
306
+ "hash": "2cb36ae4fcdd7b7064767e04bfbb36ae34518ff4bb1b39006f2dd394d1893868"
296
307
  }
297
- ]
308
+ ]
@@ -0,0 +1 @@
1
+ ALTER TABLE "messages" ADD COLUMN "reasoning" jsonb;