@lobehub/chat 1.68.5 → 1.68.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/README.md +2 -2
  3. package/README.zh-CN.md +2 -2
  4. package/changelog/v1.json +18 -0
  5. package/locales/ar/modelProvider.json +3 -0
  6. package/locales/bg-BG/modelProvider.json +3 -0
  7. package/locales/de-DE/modelProvider.json +3 -0
  8. package/locales/en-US/modelProvider.json +3 -0
  9. package/locales/es-ES/modelProvider.json +3 -0
  10. package/locales/fa-IR/modelProvider.json +3 -0
  11. package/locales/fr-FR/modelProvider.json +3 -0
  12. package/locales/it-IT/modelProvider.json +3 -0
  13. package/locales/ja-JP/modelProvider.json +3 -0
  14. package/locales/ko-KR/modelProvider.json +3 -0
  15. package/locales/nl-NL/modelProvider.json +3 -0
  16. package/locales/pl-PL/modelProvider.json +3 -0
  17. package/locales/pt-BR/modelProvider.json +3 -0
  18. package/locales/ru-RU/modelProvider.json +3 -0
  19. package/locales/tr-TR/modelProvider.json +3 -0
  20. package/locales/vi-VN/modelProvider.json +3 -0
  21. package/locales/zh-CN/modelProvider.json +3 -0
  22. package/locales/zh-TW/modelProvider.json +3 -0
  23. package/package.json +1 -1
  24. package/src/app/(backend)/webapi/chat/[provider]/route.test.ts +2 -2
  25. package/src/app/[variants]/(main)/settings/provider/features/CreateNewProvider/index.tsx +1 -1
  26. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/UpdateProviderInfo/SettingModal.tsx +34 -2
  27. package/src/app/[variants]/(main)/settings/provider/features/ProviderConfig/index.tsx +14 -12
  28. package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +6 -6
  29. package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +6 -6
  30. package/src/libs/agent-runtime/AgentRuntime.test.ts +76 -255
  31. package/src/libs/agent-runtime/AgentRuntime.ts +13 -338
  32. package/src/libs/agent-runtime/azureOpenai/index.test.ts +9 -9
  33. package/src/libs/agent-runtime/azureOpenai/index.ts +6 -6
  34. package/src/libs/agent-runtime/runtimeMap.ts +97 -0
  35. package/src/libs/agent-runtime/vertexai/index.ts +3 -1
  36. package/src/locales/default/modelProvider.ts +3 -0
  37. package/src/server/modules/AgentRuntime/index.ts +3 -2
  38. package/src/services/chat.ts +4 -6
package/CHANGELOG.md CHANGED
@@ -2,6 +2,56 @@
2
2
 
3
3
  # Changelog
4
4
 
5
+ ### [Version 1.68.7](https://github.com/lobehub/lobe-chat/compare/v1.68.6...v1.68.7)
6
+
7
+ <sup>Released on **2025-03-05**</sup>
8
+
9
+ #### ♻ Code Refactoring
10
+
11
+ - **misc**: Refactor agent runtime to better code format.
12
+
13
+ <br/>
14
+
15
+ <details>
16
+ <summary><kbd>Improvements and Fixes</kbd></summary>
17
+
18
+ #### Code refactoring
19
+
20
+ - **misc**: Refactor agent runtime to better code format, closes [#6284](https://github.com/lobehub/lobe-chat/issues/6284) ([fc1ed4a](https://github.com/lobehub/lobe-chat/commit/fc1ed4a))
21
+
22
+ </details>
23
+
24
+ <div align="right">
25
+
26
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
27
+
28
+ </div>
29
+
30
+ ### [Version 1.68.6](https://github.com/lobehub/lobe-chat/compare/v1.68.5...v1.68.6)
31
+
32
+ <sup>Released on **2025-03-05**</sup>
33
+
34
+ #### 🐛 Bug Fixes
35
+
36
+ - **misc**: Fix custom ai provider sdk type.
37
+
38
+ <br/>
39
+
40
+ <details>
41
+ <summary><kbd>Improvements and Fixes</kbd></summary>
42
+
43
+ #### What's fixed
44
+
45
+ - **misc**: Fix custom ai provider sdk type, closes [#6712](https://github.com/lobehub/lobe-chat/issues/6712) ([7f8c379](https://github.com/lobehub/lobe-chat/commit/7f8c379))
46
+
47
+ </details>
48
+
49
+ <div align="right">
50
+
51
+ [![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top)
52
+
53
+ </div>
54
+
5
55
  ### [Version 1.68.5](https://github.com/lobehub/lobe-chat/compare/v1.68.4...v1.68.5)
6
56
 
7
57
  <sup>Released on **2025-03-04**</sup>
package/README.md CHANGED
@@ -194,12 +194,12 @@ We have implemented support for the following model service providers:
194
194
  - **[HuggingFace](https://lobechat.com/discover/provider/huggingface)**: The HuggingFace Inference API provides a fast and free way for you to explore thousands of models for various tasks. Whether you are prototyping for a new application or experimenting with the capabilities of machine learning, this API gives you instant access to high-performance models across multiple domains.
195
195
  - **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter is a service platform providing access to various cutting-edge large model interfaces, supporting OpenAI, Anthropic, LLaMA, and more, suitable for diverse development and application needs. Users can flexibly choose the optimal model and pricing based on their requirements, enhancing the AI experience.
196
196
  - **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: Run serverless GPU-powered machine learning models on Cloudflare's global network.
197
+ - **[GitHub](https://lobechat.com/discover/provider/github)**: With GitHub Models, developers can become AI engineers and leverage the industry's leading AI models.
197
198
 
198
199
  <details><summary><kbd>See more providers (+27)</kbd></summary>
199
200
 
200
- - **[GitHub](https://lobechat.com/discover/provider/github)**: With GitHub Models, developers can become AI engineers and leverage the industry's leading AI models.
201
- - **[PPIO](https://lobechat.com/discover/provider/ppio)**: PPIO supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.
202
201
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI is a platform providing a variety of large language models and AI image generation API services, flexible, reliable, and cost-effective. It supports the latest open-source models like Llama3 and Mistral, offering a comprehensive, user-friendly, and auto-scaling API solution for generative AI application development, suitable for the rapid growth of AI startups.
202
+ - **[PPIO](https://lobechat.com/discover/provider/ppio)**: PPIO supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.
203
203
  - **[Together AI](https://lobechat.com/discover/provider/togetherai)**: Together AI is dedicated to achieving leading performance through innovative AI models, offering extensive customization capabilities, including rapid scaling support and intuitive deployment processes to meet various enterprise needs.
204
204
  - **[Fireworks AI](https://lobechat.com/discover/provider/fireworksai)**: Fireworks AI is a leading provider of advanced language model services, focusing on functional calling and multimodal processing. Its latest model, Firefunction V2, is based on Llama-3, optimized for function calling, conversation, and instruction following. The visual language model FireLLaVA-13B supports mixed input of images and text. Other notable models include the Llama series and Mixtral series, providing efficient multilingual instruction following and generation support.
205
205
  - **[Groq](https://lobechat.com/discover/provider/groq)**: Groq's LPU inference engine has excelled in the latest independent large language model (LLM) benchmarks, redefining the standards for AI solutions with its remarkable speed and efficiency. Groq represents instant inference speed, demonstrating strong performance in cloud-based deployments.
package/README.zh-CN.md CHANGED
@@ -194,12 +194,12 @@ LobeChat 支持文件上传与知识库功能,你可以上传文件、图片
194
194
  - **[HuggingFace](https://lobechat.com/discover/provider/huggingface)**: HuggingFace Inference API 提供了一种快速且免费的方式,让您可以探索成千上万种模型,适用于各种任务。无论您是在为新应用程序进行原型设计,还是在尝试机器学习的功能,这个 API 都能让您即时访问多个领域的高性能模型。
195
195
  - **[OpenRouter](https://lobechat.com/discover/provider/openrouter)**: OpenRouter 是一个提供多种前沿大模型接口的服务平台,支持 OpenAI、Anthropic、LLaMA 及更多,适合多样化的开发和应用需求。用户可根据自身需求灵活选择最优的模型和价格,助力 AI 体验的提升。
196
196
  - **[Cloudflare Workers AI](https://lobechat.com/discover/provider/cloudflare)**: 在 Cloudflare 的全球网络上运行由无服务器 GPU 驱动的机器学习模型。
197
+ - **[GitHub](https://lobechat.com/discover/provider/github)**: 通过 GitHub 模型,开发人员可以成为 AI 工程师,并使用行业领先的 AI 模型进行构建。
197
198
 
198
199
  <details><summary><kbd>See more providers (+27)</kbd></summary>
199
200
 
200
- - **[GitHub](https://lobechat.com/discover/provider/github)**: 通过 GitHub 模型,开发人员可以成为 AI 工程师,并使用行业领先的 AI 模型进行构建。
201
- - **[PPIO](https://lobechat.com/discover/provider/ppio)**: PPIO 派欧云提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。
202
201
  - **[Novita](https://lobechat.com/discover/provider/novita)**: Novita AI 是一个提供多种大语言模型与 AI 图像生成的 API 服务的平台,灵活、可靠且具有成本效益。它支持 Llama3、Mistral 等最新的开源模型,并为生成式 AI 应用开发提供了全面、用户友好且自动扩展的 API 解决方案,适合 AI 初创公司的快速发展。
202
+ - **[PPIO](https://lobechat.com/discover/provider/ppio)**: PPIO 派欧云提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。
203
203
  - **[Together AI](https://lobechat.com/discover/provider/togetherai)**: Together AI 致力于通过创新的 AI 模型实现领先的性能,提供广泛的自定义能力,包括快速扩展支持和直观的部署流程,满足企业的各种需求。
204
204
  - **[Fireworks AI](https://lobechat.com/discover/provider/fireworksai)**: Fireworks AI 是一家领先的高级语言模型服务商,专注于功能调用和多模态处理。其最新模型 Firefunction V2 基于 Llama-3,优化用于函数调用、对话及指令跟随。视觉语言模型 FireLLaVA-13B 支持图像和文本混合输入。其他 notable 模型包括 Llama 系列和 Mixtral 系列,提供高效的多语言指令跟随与生成支持。
205
205
  - **[Groq](https://lobechat.com/discover/provider/groq)**: Groq 的 LPU 推理引擎在最新的独立大语言模型(LLM)基准测试中表现卓越,以其惊人的速度和效率重新定义了 AI 解决方案的标准。Groq 是一种即时推理速度的代表,在基于云的部署中展现了良好的性能。
package/changelog/v1.json CHANGED
@@ -1,4 +1,22 @@
1
1
  [
2
+ {
3
+ "children": {
4
+ "improvements": [
5
+ "Refactor agent runtime to better code format."
6
+ ]
7
+ },
8
+ "date": "2025-03-05",
9
+ "version": "1.68.7"
10
+ },
11
+ {
12
+ "children": {
13
+ "fixes": [
14
+ "Fix custom ai provider sdk type."
15
+ ]
16
+ },
17
+ "date": "2025-03-05",
18
+ "version": "1.68.6"
19
+ },
2
20
  {
3
21
  "children": {
4
22
  "improvements": [
@@ -322,6 +322,9 @@
322
322
  "tooltip": "تحديث التكوين الأساسي للمزود",
323
323
  "updateSuccess": "تم التحديث بنجاح"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "تحديث إعدادات مزود الذكاء الاصطناعي المخصص"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "أدخل مفاتيح Vertex AI الخاصة بك",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Актуализиране на основната конфигурация на доставчика",
323
323
  "updateSuccess": "Актуализацията е успешна"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Актуализиране на конфигурацията на доставчика на персонализирани AI услуги"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Въведете вашите ключове за Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Aktualisieren Sie die grundlegenden Anbieterinformationen",
323
323
  "updateSuccess": "Aktualisierung erfolgreich"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Konfiguration des benutzerdefinierten KI-Anbieters aktualisieren"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Geben Sie Ihre Vertex AI-Schlüssel ein",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Update provider basic configuration",
323
323
  "updateSuccess": "Update successful"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Update Custom AI Provider Configuration"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Enter your Vertex AI Keys",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Actualizar configuración básica del proveedor",
323
323
  "updateSuccess": "Actualización exitosa"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Actualizar la configuración del proveedor de IA personalizado"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Introduce tus claves de Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "به‌روزرسانی پیکربندی پایه ارائه‌دهنده",
323
323
  "updateSuccess": "به‌روزرسانی با موفقیت انجام شد"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "به‌روزرسانی تنظیمات ارائه‌دهنده AI سفارشی"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "کلیدهای Vertex AI خود را وارد کنید",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Mettre à jour la configuration de base du fournisseur",
323
323
  "updateSuccess": "Mise à jour réussie"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Mettre à jour la configuration du fournisseur de services AI personnalisé"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Entrez vos clés Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Aggiorna la configurazione di base del fornitore",
323
323
  "updateSuccess": "Aggiornamento avvenuto con successo"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Aggiorna la configurazione del fornitore di AI personalizzato"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Inserisci le tue chiavi Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "サービスプロバイダーの基本設定を更新",
323
323
  "updateSuccess": "更新に成功しました"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "カスタム AI プロバイダー設定の更新"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "あなたの Vertex AI キーを入力してください",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "서비스 제공자 기본 설정 업데이트",
323
323
  "updateSuccess": "업데이트 성공"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "사용자 정의 AI 서비스 제공자 구성 업데이트"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "당신의 Vertex AI 키를 입력하세요",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Werk basisconfiguratie van provider bij",
323
323
  "updateSuccess": "Bijwerking geslaagd"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Bijwerken van de configuratie van de aangepaste AI-provider"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Vul je Vertex AI-sleutels in",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Aktualizuj podstawowe ustawienia dostawcy",
323
323
  "updateSuccess": "Aktualizacja zakończona sukcesem"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Aktualizuj konfigurację dostawcy AI"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Wprowadź swoje klucze Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Atualizar configurações básicas do provedor",
323
323
  "updateSuccess": "Atualização bem-sucedida"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Atualizar configuração do provedor de IA personalizado"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Insira suas Chaves do Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Обновить базовую конфигурацию провайдера",
323
323
  "updateSuccess": "Обновление успешно"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Обновить настройки поставщика пользовательского ИИ"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Введите ваши ключи Vertex AI",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Hizmet sağlayıcının temel yapılandırmasını güncelle",
323
323
  "updateSuccess": "Güncelleme başarılı"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Özel AI Sağlayıcı Yapılandırmasını Güncelle"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Vertex AI Anahtarlarınızı buraya girin",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "Cập nhật cấu hình cơ bản của nhà cung cấp",
323
323
  "updateSuccess": "Cập nhật thành công"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "Cập nhật cấu hình nhà cung cấp AI tùy chỉnh"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "Nhập khóa Vertex AI của bạn",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "更新服务商基础配置",
323
323
  "updateSuccess": "更新成功"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "更新自定义 AI 服务商配置"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "填入你的 Vertex Ai Keys",
@@ -322,6 +322,9 @@
322
322
  "tooltip": "更新服務商基礎配置",
323
323
  "updateSuccess": "更新成功"
324
324
  },
325
+ "updateCustomAiProvider": {
326
+ "title": "更新自訂 AI 服務商配置"
327
+ },
325
328
  "vertexai": {
326
329
  "apiKey": {
327
330
  "desc": "填入你的 Vertex AI 金鑰",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@lobehub/chat",
3
- "version": "1.68.5",
3
+ "version": "1.68.7",
4
4
  "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.",
5
5
  "keywords": [
6
6
  "framework",
@@ -71,7 +71,7 @@ describe('POST handler', () => {
71
71
 
72
72
  // migrate to new AgentRuntime init api
73
73
  const spy = vi
74
- .spyOn(AgentRuntime, 'initializeWithProviderOptions')
74
+ .spyOn(AgentRuntime, 'initializeWithProvider')
75
75
  .mockResolvedValue(new AgentRuntime(mockRuntime));
76
76
 
77
77
  // 调用 POST 函数
@@ -117,7 +117,7 @@ describe('POST handler', () => {
117
117
 
118
118
  const mockRuntime: LobeRuntimeAI = { baseURL: 'abc', chat: vi.fn() };
119
119
 
120
- vi.spyOn(AgentRuntime, 'initializeWithProviderOptions').mockResolvedValue(
120
+ vi.spyOn(AgentRuntime, 'initializeWithProvider').mockResolvedValue(
121
121
  new AgentRuntime(mockRuntime),
122
122
  );
123
123
 
@@ -107,7 +107,7 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open }) => {
107
107
  ),
108
108
  label: t('createNewAiProvider.sdkType.title'),
109
109
  minWidth: 400,
110
- name: 'sdkType',
110
+ name: ['settings', 'sdkType'],
111
111
  rules: [{ message: t('createNewAiProvider.sdkType.required'), required: true }],
112
112
  },
113
113
  {
@@ -1,6 +1,7 @@
1
+ import { ProviderIcon } from '@lobehub/icons';
1
2
  import { FormModal, Icon } from '@lobehub/ui';
2
3
  import type { FormItemProps } from '@lobehub/ui/es/Form/components/FormItem';
3
- import { App, Button, Input } from 'antd';
4
+ import { App, Button, Input, Select } from 'antd';
4
5
  import { BrainIcon } from 'lucide-react';
5
6
  import { useRouter } from 'next/navigation';
6
7
  import { memo, useState } from 'react';
@@ -78,6 +79,33 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open, initial
78
79
  },
79
80
  ];
80
81
 
82
+ const configItems: FormItemProps[] = [
83
+ {
84
+ children: (
85
+ <Select
86
+ optionRender={({ label, value }) => (
87
+ <Flexbox align={'center'} gap={8} horizontal>
88
+ <ProviderIcon provider={value as string} size={18} />
89
+ {label}
90
+ </Flexbox>
91
+ )}
92
+ options={[
93
+ { label: 'OpenAI', value: 'openai' },
94
+ { label: 'Anthropic', value: 'anthropic' },
95
+ { label: 'Ollama', value: 'ollama' },
96
+ // { label: 'Azure AI', value: 'azureai' },
97
+ ]}
98
+ placeholder={t('createNewAiProvider.sdkType.placeholder')}
99
+ variant={'filled'}
100
+ />
101
+ ),
102
+ label: t('createNewAiProvider.sdkType.title'),
103
+ minWidth: 400,
104
+ name: ['settings', 'sdkType'],
105
+ rules: [{ message: t('createNewAiProvider.sdkType.required'), required: true }],
106
+ },
107
+ ];
108
+
81
109
  return (
82
110
  <FormModal
83
111
  footer={
@@ -118,6 +146,10 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open, initial
118
146
  children: basicItems,
119
147
  title: t('createNewAiProvider.basicTitle'),
120
148
  },
149
+ {
150
+ children: configItems,
151
+ title: t('createNewAiProvider.configTitle'),
152
+ },
121
153
  ]}
122
154
  onCancel={onClose}
123
155
  onFinish={onFinish}
@@ -127,7 +159,7 @@ const CreateNewProvider = memo<CreateNewProviderProps>(({ onClose, open, initial
127
159
  title={
128
160
  <Flexbox gap={8} horizontal>
129
161
  <Icon icon={BrainIcon} />
130
- {t('createNewAiProvider.title')}
162
+ {t('updateCustomAiProvider.title')}
131
163
  </Flexbox>
132
164
  }
133
165
  />
@@ -324,19 +324,21 @@ const ProviderConfig = memo<ProviderConfigProps>(
324
324
  {name}
325
325
  </Flexbox>
326
326
  ) : (
327
- <ProviderCombine provider={id} size={24} />
327
+ <>
328
+ <ProviderCombine provider={id} size={24} />
329
+ <Tooltip title={t('providerModels.config.helpDoc')}>
330
+ <Link
331
+ href={urlJoin(BASE_PROVIDER_DOC_URL, id)}
332
+ onClick={(e) => e.stopPropagation()}
333
+ target={'_blank'}
334
+ >
335
+ <Center className={styles.help} height={20} width={20}>
336
+ ?
337
+ </Center>
338
+ </Link>
339
+ </Tooltip>
340
+ </>
328
341
  )}
329
- <Tooltip title={t('providerModels.config.helpDoc')}>
330
- <Link
331
- href={urlJoin(BASE_PROVIDER_DOC_URL, id)}
332
- onClick={(e) => e.stopPropagation()}
333
- target={'_blank'}
334
- >
335
- <Center className={styles.help} height={20} width={20}>
336
- ?
337
- </Center>
338
- </Link>
339
- </Tooltip>
340
342
  </Flexbox>
341
343
  ),
342
344
  };
@@ -96,6 +96,12 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
96
96
  <div />
97
97
  <Flexbox align={'center'} className={styles.pricing} gap={8} horizontal>
98
98
  {t('messages.modelCard.creditPricing')}:
99
+ <Tooltip title={t('messages.modelCard.pricing.inputTokens', { amount: inputPrice })}>
100
+ <Flexbox gap={2} horizontal>
101
+ <Icon icon={ArrowUpFromDot} />
102
+ {inputPrice}
103
+ </Flexbox>
104
+ </Tooltip>
99
105
  {pricing?.cachedInput && (
100
106
  <Tooltip
101
107
  title={t('messages.modelCard.pricing.inputCachedTokens', {
@@ -108,12 +114,6 @@ const ModelCard = memo<ModelCardProps>(({ pricing, id, provider, displayName })
108
114
  </Flexbox>
109
115
  </Tooltip>
110
116
  )}
111
- <Tooltip title={t('messages.modelCard.pricing.inputTokens', { amount: inputPrice })}>
112
- <Flexbox gap={2} horizontal>
113
- <Icon icon={ArrowUpFromDot} />
114
- {inputPrice}
115
- </Flexbox>
116
- </Tooltip>
117
117
  <Tooltip title={t('messages.modelCard.pricing.outputTokens', { amount: outputPrice })}>
118
118
  <Flexbox gap={2} horizontal>
119
119
  <Icon icon={ArrowDownToDot} />
@@ -67,12 +67,6 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
67
67
  ].filter(Boolean) as TokenProgressItem[];
68
68
 
69
69
  const totalDetail = [
70
- !!detailTokens.cachedInput && {
71
- color: theme.orange,
72
- id: 'cachedInput',
73
- title: t('messages.tokenDetails.inputCached'),
74
- value: isShowCredit ? detailTokens.cachedInput.credit : detailTokens.cachedInput.token,
75
- },
76
70
  !!detailTokens.uncachedInput && {
77
71
  color: theme.colorFill,
78
72
 
@@ -80,6 +74,12 @@ const TokenDetail = memo<TokenDetailProps>(({ usage, model, provider }) => {
80
74
  title: t('messages.tokenDetails.inputUncached'),
81
75
  value: isShowCredit ? detailTokens.uncachedInput.credit : detailTokens.uncachedInput.token,
82
76
  },
77
+ !!detailTokens.cachedInput && {
78
+ color: theme.orange,
79
+ id: 'cachedInput',
80
+ title: t('messages.tokenDetails.inputCached'),
81
+ value: isShowCredit ? detailTokens.cachedInput.credit : detailTokens.cachedInput.token,
82
+ },
83
83
  !!detailTokens.totalOutput && {
84
84
  color: theme.colorSuccess,
85
85
  id: 'output',