@lobehub/chat 1.68.8 → 1.68.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +50 -0
- package/changelog/v1.json +18 -0
- package/docs/usage/providers/ppio.mdx +5 -5
- package/docs/usage/providers/ppio.zh-CN.mdx +7 -7
- package/locales/ar/chat.json +5 -1
- package/locales/ar/models.json +6 -9
- package/locales/bg-BG/chat.json +5 -1
- package/locales/bg-BG/models.json +6 -9
- package/locales/de-DE/chat.json +5 -1
- package/locales/de-DE/models.json +6 -9
- package/locales/en-US/chat.json +5 -1
- package/locales/en-US/models.json +6 -9
- package/locales/es-ES/chat.json +5 -1
- package/locales/es-ES/models.json +6 -9
- package/locales/fa-IR/chat.json +5 -1
- package/locales/fa-IR/models.json +6 -9
- package/locales/fr-FR/chat.json +5 -1
- package/locales/fr-FR/models.json +6 -9
- package/locales/it-IT/chat.json +5 -1
- package/locales/it-IT/models.json +6 -9
- package/locales/ja-JP/chat.json +5 -1
- package/locales/ja-JP/models.json +6 -9
- package/locales/ko-KR/chat.json +5 -1
- package/locales/ko-KR/models.json +6 -9
- package/locales/nl-NL/chat.json +5 -1
- package/locales/nl-NL/models.json +6 -9
- package/locales/pl-PL/chat.json +5 -1
- package/locales/pl-PL/models.json +6 -9
- package/locales/pt-BR/chat.json +5 -1
- package/locales/pt-BR/models.json +6 -9
- package/locales/ru-RU/chat.json +5 -1
- package/locales/ru-RU/models.json +6 -9
- package/locales/tr-TR/chat.json +5 -1
- package/locales/tr-TR/models.json +6 -9
- package/locales/vi-VN/chat.json +5 -1
- package/locales/vi-VN/models.json +6 -9
- package/locales/zh-CN/chat.json +5 -1
- package/locales/zh-CN/models.json +6 -9
- package/locales/zh-TW/chat.json +5 -1
- package/locales/zh-TW/models.json +6 -9
- package/package.json +3 -1
- package/src/config/aiModels/perplexity.ts +36 -20
- package/src/config/modelProviders/ppio.ts +1 -1
- package/src/database/client/migrations.json +8 -3
- package/src/features/Conversation/Extras/Usage/UsageDetail/ModelCard.tsx +27 -9
- package/src/features/Conversation/Extras/Usage/UsageDetail/index.tsx +77 -35
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.test.ts +253 -0
- package/src/features/Conversation/Extras/Usage/UsageDetail/tokens.ts +65 -46
- package/src/libs/agent-runtime/baichuan/index.test.ts +58 -1
- package/src/libs/agent-runtime/groq/index.test.ts +36 -284
- package/src/libs/agent-runtime/mistral/index.test.ts +39 -300
- package/src/libs/agent-runtime/perplexity/index.test.ts +12 -10
- package/src/libs/agent-runtime/providerTestUtils.ts +58 -0
- package/src/libs/agent-runtime/togetherai/index.test.ts +7 -295
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.test.ts +3 -0
- package/src/libs/agent-runtime/utils/openaiCompatibleFactory/index.ts +5 -2
- package/src/libs/agent-runtime/utils/streams/anthropic.test.ts +89 -5
- package/src/libs/agent-runtime/utils/streams/anthropic.ts +25 -8
- package/src/libs/agent-runtime/utils/streams/openai.test.ts +188 -84
- package/src/libs/agent-runtime/utils/streams/openai.ts +8 -17
- package/src/libs/agent-runtime/utils/usageConverter.test.ts +249 -0
- package/src/libs/agent-runtime/utils/usageConverter.ts +50 -0
- package/src/libs/agent-runtime/zeroone/index.test.ts +7 -294
- package/src/libs/langchain/loaders/epub/__tests__/__snapshots__/index.test.ts.snap +238 -0
- package/src/libs/langchain/loaders/epub/__tests__/demo.epub +0 -0
- package/src/libs/langchain/loaders/epub/__tests__/index.test.ts +24 -0
- package/src/libs/langchain/loaders/epub/index.ts +21 -0
- package/src/libs/langchain/loaders/index.ts +9 -0
- package/src/libs/langchain/types.ts +2 -1
- package/src/locales/default/chat.ts +4 -0
- package/src/server/utils/tempFileManager.ts +70 -0
- package/src/types/message/base.ts +14 -4
- package/src/utils/filter.test.ts +0 -122
- package/src/utils/filter.ts +0 -29
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,56 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
### [Version 1.68.10](https://github.com/lobehub/lobe-chat/compare/v1.68.9...v1.68.10)
|
6
|
+
|
7
|
+
<sup>Released on **2025-03-06**</sup>
|
8
|
+
|
9
|
+
#### 🐛 Bug Fixes
|
10
|
+
|
11
|
+
- **misc**: Fix litellm streaming usage and refactor the usage chunk.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's fixed
|
19
|
+
|
20
|
+
- **misc**: Fix litellm streaming usage and refactor the usage chunk, closes [#6734](https://github.com/lobehub/lobe-chat/issues/6734) ([9f09952](https://github.com/lobehub/lobe-chat/commit/9f09952))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
30
|
+
### [Version 1.68.9](https://github.com/lobehub/lobe-chat/compare/v1.68.8...v1.68.9)
|
31
|
+
|
32
|
+
<sup>Released on **2025-03-05**</sup>
|
33
|
+
|
34
|
+
#### 💄 Styles
|
35
|
+
|
36
|
+
- **misc**: Add epub file chunk split support.
|
37
|
+
|
38
|
+
<br/>
|
39
|
+
|
40
|
+
<details>
|
41
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
42
|
+
|
43
|
+
#### Styles
|
44
|
+
|
45
|
+
- **misc**: Add epub file chunk split support, closes [#6317](https://github.com/lobehub/lobe-chat/issues/6317) ([a79ab7a](https://github.com/lobehub/lobe-chat/commit/a79ab7a))
|
46
|
+
|
47
|
+
</details>
|
48
|
+
|
49
|
+
<div align="right">
|
50
|
+
|
51
|
+
[](#readme-top)
|
52
|
+
|
53
|
+
</div>
|
54
|
+
|
5
55
|
### [Version 1.68.8](https://github.com/lobehub/lobe-chat/compare/v1.68.7...v1.68.8)
|
6
56
|
|
7
57
|
<sup>Released on **2025-03-05**</sup>
|
package/changelog/v1.json
CHANGED
@@ -1,4 +1,22 @@
|
|
1
1
|
[
|
2
|
+
{
|
3
|
+
"children": {
|
4
|
+
"fixes": [
|
5
|
+
"Fix litellm streaming usage and refactor the usage chunk."
|
6
|
+
]
|
7
|
+
},
|
8
|
+
"date": "2025-03-06",
|
9
|
+
"version": "1.68.10"
|
10
|
+
},
|
11
|
+
{
|
12
|
+
"children": {
|
13
|
+
"improvements": [
|
14
|
+
"Add epub file chunk split support."
|
15
|
+
]
|
16
|
+
},
|
17
|
+
"date": "2025-03-05",
|
18
|
+
"version": "1.68.9"
|
19
|
+
},
|
2
20
|
{
|
3
21
|
"children": {
|
4
22
|
"improvements": [
|
@@ -18,14 +18,14 @@ tags:
|
|
18
18
|
|
19
19
|
<Image alt={'Using PPIO in LobeChat'} cover src={'https://github.com/user-attachments/assets/d0a5e152-160a-4862-8393-546f4e2e5387'} />
|
20
20
|
|
21
|
-
[PPIO](https://ppinfra.com/user/register?invited_by=RQIMOC) supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.
|
21
|
+
[PPIO](https://ppinfra.com/user/register?invited_by=RQIMOC&utm_source=github_lobechat) supports stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.
|
22
22
|
|
23
23
|
This document will guide you on how to integrate PPIO in LobeChat:
|
24
24
|
|
25
25
|
<Steps>
|
26
26
|
### Step 1: Register and Log in to PPIO
|
27
27
|
|
28
|
-
- Visit [PPIO](https://ppinfra.com/user/register?invited_by=RQIMOC) and create an account
|
28
|
+
- Visit [PPIO](https://ppinfra.com/user/register?invited_by=RQIMOC&utm_source=github_lobechat) and create an account
|
29
29
|
- Upon registration, PPIO will provide a ¥5 credit (about 5M tokens).
|
30
30
|
|
31
31
|
<Image alt={'Register PPIO'} height={457} inStep src={'https://github.com/user-attachments/assets/7cb3019b-78c1-48e0-a64c-a6a4836affd9'} />
|
@@ -41,16 +41,16 @@ This document will guide you on how to integrate PPIO in LobeChat:
|
|
41
41
|
- Visit the `Settings` interface in LobeChat
|
42
42
|
- Find the setting for `PPIO` under `Language Model`
|
43
43
|
|
44
|
-
<Image alt={'Enter PPIO API key in LobeChat'} inStep src={'https://github.com/user-attachments/assets/
|
44
|
+
<Image alt={'Enter PPIO API key in LobeChat'} inStep src={'https://github.com/user-attachments/assets/9b70b292-6c52-4715-b844-ff5df78d16b9'} />
|
45
45
|
|
46
46
|
- Open PPIO and enter the obtained API key
|
47
47
|
- Choose a PPIO model for your assistant to start the conversation
|
48
48
|
|
49
|
-
<Image alt={'Select and use PPIO model'} inStep src={'https://github.com/user-attachments/assets/
|
49
|
+
<Image alt={'Select and use PPIO model'} inStep src={'https://github.com/user-attachments/assets/b824b741-f2d8-42c8-8cb9-1266862affa7'} />
|
50
50
|
|
51
51
|
<Callout type={'warning'}>
|
52
52
|
During usage, you may need to pay the API service provider, please refer to PPIO's [pricing
|
53
|
-
policy](https://ppinfra.com/llm-api?utm_source=github_lobe-chat
|
53
|
+
policy](https://ppinfra.com/llm-api?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link).
|
54
54
|
</Callout>
|
55
55
|
</Steps>
|
56
56
|
|
@@ -17,14 +17,14 @@ tags:
|
|
17
17
|
|
18
18
|
<Image alt={'在 LobeChat 中使用 PPIO'} cover src={'https://github.com/user-attachments/assets/d0a5e152-160a-4862-8393-546f4e2e5387'} />
|
19
19
|
|
20
|
-
[PPIO 派欧云](https://ppinfra.com/user/register?invited_by=RQIMOC)提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。
|
20
|
+
[PPIO 派欧云](https://ppinfra.com/user/register?invited_by=RQIMOC&utm_source=github_lobechat)提供稳定、高性价比的开源模型 API 服务,支持 DeepSeek 全系列、Llama、Qwen 等行业领先大模型。
|
21
21
|
|
22
22
|
本文档将指导你如何在 LobeChat 中使用 PPIO:
|
23
23
|
|
24
24
|
<Steps>
|
25
25
|
### 步骤一:注册 PPIO 派欧云账号并登录
|
26
26
|
|
27
|
-
- 访问 [PPIO 派欧云](https://ppinfra.com/user/register?invited_by=RQIMOC) 并注册账号
|
27
|
+
- 访问 [PPIO 派欧云](https://ppinfra.com/user/register?invited_by=RQIMOC&utm_source=github_lobechat) 并注册账号
|
28
28
|
- 注册后,PPIO 会赠送 5 元(约 500 万 tokens)的使用额度
|
29
29
|
|
30
30
|
<Image alt={'注册 PPIO'} height={457} inStep src={'https://github.com/user-attachments/assets/7cb3019b-78c1-48e0-a64c-a6a4836affd9'} />
|
@@ -41,15 +41,15 @@ tags:
|
|
41
41
|
- 在 `语言模型` 下找到 `PPIO` 的设置项
|
42
42
|
- 打开 PPIO 并填入获得的 API 密钥
|
43
43
|
|
44
|
-
<Image alt={'在 LobeChat 中输入 PPIO API 密钥'} inStep src={'https://github.com/user-attachments/assets/
|
44
|
+
<Image alt={'在 LobeChat 中输入 PPIO API 密钥'} inStep src={'https://github.com/user-attachments/assets/9b70b292-6c52-4715-b844-ff5df78d16b9'} />
|
45
45
|
|
46
|
-
- 为你的助手选择一个
|
46
|
+
- 为你的助手选择一个 PPIO 模型即可开始对话
|
47
47
|
|
48
|
-
<Image alt={'选择并使用 PPIO 模型'} inStep src={'https://github.com/user-attachments/assets/
|
48
|
+
<Image alt={'选择并使用 PPIO 模型'} inStep src={'https://github.com/user-attachments/assets/b824b741-f2d8-42c8-8cb9-1266862affa7'} />
|
49
49
|
|
50
50
|
<Callout type={'warning'}>
|
51
|
-
在使用过程中你可能需要向 API 服务提供商付费,PPIO 的 API 费用参考[这里](https://ppinfra.com/llm-api?utm_source=github_lobe-chat
|
51
|
+
在使用过程中你可能需要向 API 服务提供商付费,PPIO 的 API 费用参考[这里](https://ppinfra.com/llm-api?utm_source=github_lobe-chat&utm_medium=github_readme&utm_campaign=link)。
|
52
52
|
</Callout>
|
53
53
|
</Steps>
|
54
54
|
|
55
|
-
至此你已经可以在 LobeChat 中使用
|
55
|
+
至此你已经可以在 LobeChat 中使用 PPIO 提供的模型进行对话了。
|
package/locales/ar/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M حرف",
|
90
90
|
"inputMinutes": "${{amount}}/دقيقة",
|
91
91
|
"inputTokens": "مدخلات {{amount}}/نقطة · ${{amount}}/M",
|
92
|
-
"outputTokens": "مخرجات {{amount}}/نقطة · ${{amount}}/M"
|
92
|
+
"outputTokens": "مخرجات {{amount}}/نقطة · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "تخزين إدخال الكتابة {{amount}}/نقطة · ${{amount}}/ميغابايت"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "متوسط السعر",
|
96
98
|
"input": "مدخلات",
|
97
99
|
"inputAudio": "مدخلات صوتية",
|
98
100
|
"inputCached": "مدخلات مخزنة",
|
101
|
+
"inputCitation": "اقتباس الإدخال",
|
99
102
|
"inputText": "مدخلات نصية",
|
100
103
|
"inputTitle": "تفاصيل المدخلات",
|
101
104
|
"inputUncached": "مدخلات غير مخزنة",
|
105
|
+
"inputWriteCached": "تخزين إدخال الكتابة",
|
102
106
|
"output": "مخرجات",
|
103
107
|
"outputAudio": "مخرجات صوتية",
|
104
108
|
"outputText": "مخرجات نصية",
|
package/locales/ar/models.json
CHANGED
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B هو نموذج عالي الأداء، يوفر قدرة سريعة على توليد النصوص، مما يجعله مثاليًا لمجموعة من التطبيقات التي تتطلب كفاءة كبيرة وتكلفة فعالة."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "نموذج Llama 3.1 Sonar Huge Online، يتمتع بـ 405B من المعلمات، يدعم طول سياق حوالي 127,000 علامة، مصمم لتطبيقات دردشة معقدة عبر الإنترنت."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "نموذج Llama 3.1 Sonar Large Online، يتمتع بـ 70B من المعلمات، يدعم طول سياق حوالي 127,000 علامة، مناسب لمهام دردشة عالية السعة ومتنوعة."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "نموذج Llama 3.1 Sonar Small Online، يتمتع بـ 8B من المعلمات، يدعم طول سياق حوالي 127,000 علامة، مصمم للدردشة عبر الإنترنت، قادر على معالجة تفاعلات نصية متنوعة بكفاءة."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "قدرة استدلال الصور التي تبرز في الصور عالية الدقة، مناسبة لتطبيقات الفهم البصري."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "نموذج QwQ هو نموذج بحث تجريبي تم تطويره بواسطة فريق Qwen، يركز على تعزيز قدرات الاستدلال للذكاء الاصطناعي."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 هو إصدار من نموذج DeepSeek R1، تم تدريبه لاحقًا لتقديم معلومات حقائق غير خاضعة للرقابة وغير متحيزة."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini هو نموذج LLM مدمج، يتفوق على GPT-3.5، ويتميز بقدرات متعددة اللغات قوية، ويدعم الإنجليزية والكورية، ويقدم حلولًا فعالة وصغيرة الحجم."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "منتج بحث خفيف الوزن يعتمد على سياق البحث، أسرع وأرخص من Sonar Pro."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "تقوم Deep Research بإجراء أبحاث شاملة على مستوى الخبراء وتجميعها في تقارير يمكن الوصول إليها وقابلة للتنفيذ."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "منتج بحث متقدم يدعم سياق البحث، مع دعم للاستعلامات المتقدمة والمتابعة."
|
1660
1657
|
},
|
package/locales/bg-BG/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M символи",
|
90
90
|
"inputMinutes": "${{amount}}/минута",
|
91
91
|
"inputTokens": "Входящи {{amount}}/кредити · ${{amount}}/M",
|
92
|
-
"outputTokens": "Изходящи {{amount}}/кредити · ${{amount}}/M"
|
92
|
+
"outputTokens": "Изходящи {{amount}}/кредити · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "Кеширане на входящи данни {{amount}}/точки · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "Средна цена",
|
96
98
|
"input": "Вход",
|
97
99
|
"inputAudio": "Аудио вход",
|
98
100
|
"inputCached": "Кеширан вход",
|
101
|
+
"inputCitation": "Цитиране на входящи данни",
|
99
102
|
"inputText": "Текстов вход",
|
100
103
|
"inputTitle": "Детайли за входа",
|
101
104
|
"inputUncached": "Некеширан вход",
|
105
|
+
"inputWriteCached": "Входящи кеширани данни",
|
102
106
|
"output": "Изход",
|
103
107
|
"outputAudio": "Аудио изход",
|
104
108
|
"outputText": "Текстов изход",
|
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B е модел с висока производителност, предлагащ бързи способности за генериране на текст, особено подходящ за приложения, изискващи мащабна ефективност и икономичност."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "Llama 3.1 Sonar Huge Online модел, с 405B параметри, поддържащ контекстова дължина от около 127,000 маркера, проектиран за сложни онлайн чат приложения."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "Llama 3.1 Sonar Large Online модел, с 70B параметри, поддържащ контекстова дължина от около 127,000 маркера, подходящ за задачи с висока капацитет и разнообразие в чата."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "Llama 3.1 Sonar Small Online модел, с 8B параметри, поддържащ контекстова дължина от около 127,000 маркера, проектиран за онлайн чат, способен да обработва ефективно различни текстови взаимодействия."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "Изключителни способности за визуално разсъждение върху изображения с висока разделителна способност, подходящи за приложения за визуално разбиране."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "QwQ моделът е експериментален изследователски модел, разработен от екипа на Qwen, който се фокусира върху подобряване на AI разсъдъчните способности."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 е версия на модела DeepSeek R1, след обучението, която предоставя непроверена и безпристрастна фактическа информация."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini е компактен LLM, който превъзхожда GPT-3.5, с мощни многоезични способности, поддържа английски и корейски, предоставяйки ефективно и компактно решение."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "Лек продукт за търсене, базиран на контекст на търсене, по-бърз и по-евтин от Sonar Pro."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "Deep Research извършва задълбочени експертни изследвания и ги обобщава в достъпни и приложими доклади."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "Разширен продукт за търсене, който поддържа контекст на търсене, напреднали запитвания и проследяване."
|
1660
1657
|
},
|
package/locales/de-DE/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M Zeichen",
|
90
90
|
"inputMinutes": "${{amount}}/Minute",
|
91
91
|
"inputTokens": "Eingabe {{amount}}/Punkte · ${{amount}}/M",
|
92
|
-
"outputTokens": "Ausgabe {{amount}}/Punkte · ${{amount}}/M"
|
92
|
+
"outputTokens": "Ausgabe {{amount}}/Punkte · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "Cache-Eingabe schreiben {{amount}}/Punkte · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "Durchschnittspreis",
|
96
98
|
"input": "Eingabe",
|
97
99
|
"inputAudio": "Audioeingabe",
|
98
100
|
"inputCached": "Eingabe zwischengespeichert",
|
101
|
+
"inputCitation": "Eingabe zitieren",
|
99
102
|
"inputText": "Text-Eingabe",
|
100
103
|
"inputTitle": "Eingabedetails",
|
101
104
|
"inputUncached": "Eingabe nicht zwischengespeichert",
|
105
|
+
"inputWriteCached": "Eingabe Cache schreiben",
|
102
106
|
"output": "Ausgabe",
|
103
107
|
"outputAudio": "Audioausgabe",
|
104
108
|
"outputText": "Text-Ausgabe",
|
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B ist ein leistungsstarkes Modell, das schnelle Textgenerierungsfähigkeiten bietet und sich hervorragend für Anwendungen eignet, die große Effizienz und Kosteneffektivität erfordern."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "Das Llama 3.1 Sonar Huge Online-Modell hat 405B Parameter und unterstützt eine Kontextlänge von etwa 127.000 Markierungen, es wurde für komplexe Online-Chat-Anwendungen entwickelt."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "Das Llama 3.1 Sonar Large Online-Modell hat 70B Parameter und unterstützt eine Kontextlänge von etwa 127.000 Markierungen, es eignet sich für hochvolumige und vielfältige Chat-Aufgaben."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "Das Llama 3.1 Sonar Small Online-Modell hat 8B Parameter und unterstützt eine Kontextlänge von etwa 127.000 Markierungen, es wurde speziell für Online-Chat entwickelt und kann verschiedene Textinteraktionen effizient verarbeiten."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "Überlegene Bildverarbeitungsfähigkeiten auf hochauflösenden Bildern, geeignet für visuelle Verständnisanwendungen."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "Das QwQ-Modell ist ein experimentelles Forschungsmodell, das vom Qwen-Team entwickelt wurde und sich auf die Verbesserung der KI-Inferenzfähigkeiten konzentriert."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 ist eine Version des DeepSeek R1 Modells, die nachtrainiert wurde, um unverfälschte, unvoreingenommene Fakteninformationen bereitzustellen."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini ist ein kompaktes LLM, das besser abschneidet als GPT-3.5 und über starke Mehrsprachigkeitsfähigkeiten verfügt. Es unterstützt Englisch und Koreanisch und bietet eine effiziente und kompakte Lösung."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "Ein leichtgewichtiges Suchprodukt, das auf kontextbezogener Suche basiert und schneller und günstiger ist als Sonar Pro."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "Deep Research führt umfassende Expertenforschung durch und fasst diese in zugänglichen, umsetzbaren Berichten zusammen."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "Ein fortschrittliches Suchprodukt, das kontextbezogene Suche unterstützt und erweiterte Abfragen sowie Nachverfolgung ermöglicht."
|
1660
1657
|
},
|
package/locales/en-US/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M characters",
|
90
90
|
"inputMinutes": "${{amount}}/minute",
|
91
91
|
"inputTokens": "Input {{amount}}/credits · ${{amount}}/M",
|
92
|
-
"outputTokens": "Output {{amount}}/credits · ${{amount}}/M"
|
92
|
+
"outputTokens": "Output {{amount}}/credits · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "Cache input write {{amount}}/points · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "Average unit price",
|
96
98
|
"input": "Input",
|
97
99
|
"inputAudio": "Audio Input",
|
98
100
|
"inputCached": "Cached Input",
|
101
|
+
"inputCitation": "Input citation",
|
99
102
|
"inputText": "Text Input",
|
100
103
|
"inputTitle": "Input Details",
|
101
104
|
"inputUncached": "Uncached Input",
|
105
|
+
"inputWriteCached": "Input cache write",
|
102
106
|
"output": "Output",
|
103
107
|
"outputAudio": "Audio Output",
|
104
108
|
"outputText": "Text Output",
|
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B is a high-performance model that offers rapid text generation capabilities, making it ideal for applications requiring large-scale efficiency and cost-effectiveness."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "Llama 3.1 Sonar Huge Online model, featuring 405B parameters, supports a context length of approximately 127,000 tokens, designed for complex online chat applications."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "Llama 3.1 Sonar Large Online model, featuring 70B parameters, supports a context length of approximately 127,000 tokens, suitable for high-capacity and diverse chat tasks."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "Llama 3.1 Sonar Small Online model, featuring 8B parameters, supports a context length of approximately 127,000 tokens, designed for online chat, efficiently handling various text interactions."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "Excellent image reasoning capabilities on high-resolution images, suitable for visual understanding applications."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "The QwQ model is an experimental research model developed by the Qwen team, focusing on enhancing AI reasoning capabilities."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 is a version of the DeepSeek R1 model, fine-tuned to provide unfiltered, unbiased factual information."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini is a compact LLM that outperforms GPT-3.5, featuring strong multilingual capabilities and supporting English and Korean, providing an efficient and compact solution."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "A lightweight search product based on contextual search, faster and cheaper than Sonar Pro."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "Deep Research conducts comprehensive expert-level studies and synthesizes them into accessible, actionable reports."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "An advanced search product that supports contextual search, advanced queries, and follow-ups."
|
1660
1657
|
},
|
package/locales/es-ES/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M caracteres",
|
90
90
|
"inputMinutes": "${{amount}}/minuto",
|
91
91
|
"inputTokens": "Entradas {{amount}}/créditos · ${{amount}}/M",
|
92
|
-
"outputTokens": "Salidas {{amount}}/créditos · ${{amount}}/M"
|
92
|
+
"outputTokens": "Salidas {{amount}}/créditos · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "Escritura en caché de entrada {{amount}}/puntos · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "Precio promedio",
|
96
98
|
"input": "Entrada",
|
97
99
|
"inputAudio": "Entrada de audio",
|
98
100
|
"inputCached": "Entrada en caché",
|
101
|
+
"inputCitation": "Citación de entrada",
|
99
102
|
"inputText": "Entrada de texto",
|
100
103
|
"inputTitle": "Detalles de entrada",
|
101
104
|
"inputUncached": "Entrada no en caché",
|
105
|
+
"inputWriteCached": "Escritura en caché de entrada",
|
102
106
|
"output": "Salida",
|
103
107
|
"outputAudio": "Salida de audio",
|
104
108
|
"outputText": "Salida de texto",
|
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B es un modelo de alto rendimiento que ofrece una rápida capacidad de generación de texto, ideal para aplicaciones que requieren eficiencia a gran escala y rentabilidad."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "El modelo Llama 3.1 Sonar Huge Online, con 405B de parámetros, soporta una longitud de contexto de aproximadamente 127,000 tokens, diseñado para aplicaciones de chat en línea complejas."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "El modelo Llama 3.1 Sonar Large Online, con 70B de parámetros, soporta una longitud de contexto de aproximadamente 127,000 tokens, adecuado para tareas de chat de alta capacidad y diversidad."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "El modelo Llama 3.1 Sonar Small Online, con 8B de parámetros, soporta una longitud de contexto de aproximadamente 127,000 tokens, diseñado para chat en línea, capaz de manejar eficientemente diversas interacciones textuales."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "Capacidad excepcional de razonamiento visual en imágenes de alta resolución, adecuada para aplicaciones de comprensión visual."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "El modelo QwQ es un modelo de investigación experimental desarrollado por el equipo de Qwen, enfocado en mejorar la capacidad de razonamiento de la IA."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 es una versión del modelo DeepSeek R1, que ha sido entrenada posteriormente para proporcionar información factual sin censura y sin sesgos."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini es un LLM compacto que supera a GPT-3.5, con potentes capacidades multilingües, soportando inglés y coreano, ofreciendo soluciones eficientes y compactas."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "Producto de búsqueda ligero basado en contexto de búsqueda, más rápido y económico que Sonar Pro."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "Deep Research realiza una investigación exhaustiva a nivel de expertos y la compila en informes accesibles y prácticos."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "Producto de búsqueda avanzada que soporta contexto de búsqueda, consultas avanzadas y seguimiento."
|
1660
1657
|
},
|
package/locales/fa-IR/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M کاراکتر",
|
90
90
|
"inputMinutes": "${{amount}}/دقیقه",
|
91
91
|
"inputTokens": "ورودی {{amount}}/اعتبار · ${{amount}}/M",
|
92
|
-
"outputTokens": "خروجی {{amount}}/اعتبار · ${{amount}}/M"
|
92
|
+
"outputTokens": "خروجی {{amount}}/اعتبار · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "ذخیره ورودی نوشتن {{amount}}/امتیاز · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "میانگین قیمت",
|
96
98
|
"input": "ورودی",
|
97
99
|
"inputAudio": "ورودی صوتی",
|
98
100
|
"inputCached": "ورودی کش شده",
|
101
|
+
"inputCitation": "ارجاع ورودی",
|
99
102
|
"inputText": "ورودی متنی",
|
100
103
|
"inputTitle": "جزئیات ورودی",
|
101
104
|
"inputUncached": "ورودی غیر کش شده",
|
105
|
+
"inputWriteCached": "ذخیره ورودی نوشتن",
|
102
106
|
"output": "خروجی",
|
103
107
|
"outputAudio": "خروجی صوتی",
|
104
108
|
"outputText": "خروجی متنی",
|
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B یک مدل با کارایی بالا است که توانایی تولید سریع متن را فراهم میکند و برای کاربردهایی که به بهرهوری و صرفهجویی در هزینه در مقیاس بزرگ نیاز دارند، بسیار مناسب است."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "مدل Llama 3.1 Sonar Huge Online با 405 میلیارد پارامتر، پشتیبانی از طول زمینه حدود 127,000 نشانه، طراحی شده برای برنامههای چت آنلاین پیچیده."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "مدل Llama 3.1 Sonar Large Online با 70 میلیارد پارامتر، پشتیبانی از طول زمینه حدود 127,000 نشانه، مناسب برای وظایف چت با حجم بالا و متنوع."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "مدل Llama 3.1 Sonar Small Online با 8 میلیارد پارامتر، پشتیبانی از طول زمینهای حدود 127,000 نشانه، بهطور ویژه برای چت آنلاین طراحی شده و میتواند بهطور کارآمد انواع تعاملات متنی را پردازش کند."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "توانایی استدلال تصویری عالی در تصاویر با وضوح بالا، مناسب برای برنامههای درک بصری."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "مدل QwQ یک مدل تحقیقاتی تجربی است که توسط تیم Qwen توسعه یافته و بر تقویت توانایی استدلال AI تمرکز دارد."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 نسخهای از مدل DeepSeek R1 است که پس از آموزش مجدد، اطلاعات واقعی بدون سانسور و بدون تعصب را ارائه میدهد."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini یک LLM فشرده است که عملکردی بهتر از GPT-3.5 دارد و دارای تواناییهای چند زبانه قوی است و از انگلیسی و کرهای پشتیبانی میکند و راهحلهای کارآمد و کوچکی را ارائه میدهد."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "محصول جستجوی سبک بر اساس زمینه جستجو که سریعتر و ارزانتر از Sonar Pro است."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "تحقیق عمیق، تحقیقاتی جامع و تخصصی را انجام میدهد و آن را به گزارشهای قابل دسترسی و قابل استفاده تبدیل میکند."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "محصول جستجوی پیشرفته که از جستجوی زمینه پشتیبانی میکند و قابلیتهای پیشرفتهای برای پرسش و پیگیری دارد."
|
1660
1657
|
},
|
package/locales/fr-FR/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M caractères",
|
90
90
|
"inputMinutes": "${{amount}}/minute",
|
91
91
|
"inputTokens": "Entrée {{amount}}/crédit · ${{amount}}/M",
|
92
|
-
"outputTokens": "Sortie {{amount}}/crédit · ${{amount}}/M"
|
92
|
+
"outputTokens": "Sortie {{amount}}/crédit · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "Écriture de cache d'entrée {{amount}}/points · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "Prix moyen",
|
96
98
|
"input": "Entrée",
|
97
99
|
"inputAudio": "Entrée audio",
|
98
100
|
"inputCached": "Entrée mise en cache",
|
101
|
+
"inputCitation": "Citation d'entrée",
|
99
102
|
"inputText": "Entrée texte",
|
100
103
|
"inputTitle": "Détails de l'entrée",
|
101
104
|
"inputUncached": "Entrée non mise en cache",
|
105
|
+
"inputWriteCached": "Écriture de cache d'entrée",
|
102
106
|
"output": "Sortie",
|
103
107
|
"outputAudio": "Sortie audio",
|
104
108
|
"outputText": "Sortie texte",
|
@@ -1121,15 +1121,6 @@
|
|
1121
1121
|
"llama-3.1-8b-instant": {
|
1122
1122
|
"description": "Llama 3.1 8B est un modèle à haute performance, offrant une capacité de génération de texte rapide, particulièrement adapté aux scénarios d'application nécessitant une efficacité à grande échelle et un rapport coût-efficacité."
|
1123
1123
|
},
|
1124
|
-
"llama-3.1-sonar-huge-128k-online": {
|
1125
|
-
"description": "Le modèle Llama 3.1 Sonar Huge Online, avec 405B de paramètres, prend en charge une longueur de contexte d'environ 127 000 jetons, conçu pour des applications de chat en ligne complexes."
|
1126
|
-
},
|
1127
|
-
"llama-3.1-sonar-large-128k-online": {
|
1128
|
-
"description": "Le modèle Llama 3.1 Sonar Large Online, avec 70B de paramètres, prend en charge une longueur de contexte d'environ 127 000 jetons, adapté aux tâches de chat à haute capacité et diversifiées."
|
1129
|
-
},
|
1130
|
-
"llama-3.1-sonar-small-128k-online": {
|
1131
|
-
"description": "Le modèle Llama 3.1 Sonar Small Online, avec 8B de paramètres, prend en charge une longueur de contexte d'environ 127 000 jetons, conçu pour le chat en ligne, capable de traiter efficacement diverses interactions textuelles."
|
1132
|
-
},
|
1133
1124
|
"llama-3.2-11b-vision-instruct": {
|
1134
1125
|
"description": "Capacités d'inférence d'image exceptionnelles sur des images haute résolution, adaptées aux applications de compréhension visuelle."
|
1135
1126
|
},
|
@@ -1643,6 +1634,9 @@
|
|
1643
1634
|
"qwq-32b-preview": {
|
1644
1635
|
"description": "Le modèle QwQ est un modèle de recherche expérimental développé par l'équipe Qwen, axé sur l'amélioration des capacités de raisonnement de l'IA."
|
1645
1636
|
},
|
1637
|
+
"r1-1776": {
|
1638
|
+
"description": "R1-1776 est une version du modèle DeepSeek R1, après un entraînement supplémentaire, fournissant des informations factuelles non filtrées et impartiales."
|
1639
|
+
},
|
1646
1640
|
"solar-mini": {
|
1647
1641
|
"description": "Solar Mini est un LLM compact, offrant des performances supérieures à celles de GPT-3.5, avec de puissantes capacités multilingues, prenant en charge l'anglais et le coréen, et fournissant une solution efficace et compacte."
|
1648
1642
|
},
|
@@ -1655,6 +1649,9 @@
|
|
1655
1649
|
"sonar": {
|
1656
1650
|
"description": "Produit de recherche léger basé sur le contexte de recherche, plus rapide et moins cher que Sonar Pro."
|
1657
1651
|
},
|
1652
|
+
"sonar-deep-research": {
|
1653
|
+
"description": "Deep Research effectue des recherches approfondies de niveau expert et les synthétise en rapports accessibles et exploitables."
|
1654
|
+
},
|
1658
1655
|
"sonar-pro": {
|
1659
1656
|
"description": "Produit de recherche avancé prenant en charge le contexte de recherche, avec des requêtes avancées et un suivi."
|
1660
1657
|
},
|
package/locales/it-IT/chat.json
CHANGED
@@ -89,16 +89,20 @@
|
|
89
89
|
"inputCharts": "${{amount}}/M caratteri",
|
90
90
|
"inputMinutes": "${{amount}}/minuto",
|
91
91
|
"inputTokens": "Input {{amount}}/crediti · ${{amount}}/M",
|
92
|
-
"outputTokens": "Output {{amount}}/crediti · ${{amount}}/M"
|
92
|
+
"outputTokens": "Output {{amount}}/crediti · ${{amount}}/M",
|
93
|
+
"writeCacheInputTokens": "Scrittura cache input {{amount}}/crediti · ${{amount}}/M"
|
93
94
|
}
|
94
95
|
},
|
95
96
|
"tokenDetails": {
|
97
|
+
"average": "Prezzo medio",
|
96
98
|
"input": "Input",
|
97
99
|
"inputAudio": "Input audio",
|
98
100
|
"inputCached": "Input memorizzato",
|
101
|
+
"inputCitation": "Citazione input",
|
99
102
|
"inputText": "Input testo",
|
100
103
|
"inputTitle": "Dettagli input",
|
101
104
|
"inputUncached": "Input non memorizzato",
|
105
|
+
"inputWriteCached": "Scrittura cache input",
|
102
106
|
"output": "Output",
|
103
107
|
"outputAudio": "Output audio",
|
104
108
|
"outputText": "Output testo",
|