@lobehub/chat 1.76.1 → 1.77.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +25 -0
- package/changelog/v1.json +9 -0
- package/locales/ar/common.json +12 -1
- package/locales/ar/error.json +10 -0
- package/locales/ar/models.json +9 -6
- package/locales/ar/setting.json +28 -0
- package/locales/bg-BG/common.json +12 -1
- package/locales/bg-BG/error.json +10 -0
- package/locales/bg-BG/models.json +9 -6
- package/locales/bg-BG/setting.json +28 -0
- package/locales/de-DE/common.json +12 -1
- package/locales/de-DE/error.json +10 -0
- package/locales/de-DE/models.json +9 -6
- package/locales/de-DE/setting.json +28 -0
- package/locales/en-US/common.json +12 -1
- package/locales/en-US/error.json +10 -0
- package/locales/en-US/models.json +9 -6
- package/locales/en-US/setting.json +28 -0
- package/locales/es-ES/common.json +12 -1
- package/locales/es-ES/error.json +10 -0
- package/locales/es-ES/models.json +9 -6
- package/locales/es-ES/setting.json +28 -0
- package/locales/fa-IR/common.json +12 -1
- package/locales/fa-IR/error.json +10 -0
- package/locales/fa-IR/models.json +9 -6
- package/locales/fa-IR/setting.json +28 -0
- package/locales/fr-FR/common.json +12 -1
- package/locales/fr-FR/error.json +10 -0
- package/locales/fr-FR/models.json +9 -6
- package/locales/fr-FR/setting.json +28 -0
- package/locales/it-IT/common.json +12 -1
- package/locales/it-IT/error.json +10 -0
- package/locales/it-IT/models.json +9 -6
- package/locales/it-IT/setting.json +28 -0
- package/locales/ja-JP/common.json +12 -1
- package/locales/ja-JP/error.json +10 -0
- package/locales/ja-JP/models.json +9 -6
- package/locales/ja-JP/setting.json +28 -0
- package/locales/ko-KR/common.json +12 -1
- package/locales/ko-KR/error.json +10 -0
- package/locales/ko-KR/models.json +9 -6
- package/locales/ko-KR/setting.json +28 -0
- package/locales/nl-NL/common.json +12 -1
- package/locales/nl-NL/error.json +10 -0
- package/locales/nl-NL/models.json +9 -6
- package/locales/nl-NL/setting.json +28 -0
- package/locales/pl-PL/common.json +12 -1
- package/locales/pl-PL/error.json +10 -0
- package/locales/pl-PL/models.json +9 -6
- package/locales/pl-PL/setting.json +28 -0
- package/locales/pt-BR/common.json +12 -1
- package/locales/pt-BR/error.json +10 -0
- package/locales/pt-BR/models.json +9 -6
- package/locales/pt-BR/setting.json +28 -0
- package/locales/ru-RU/common.json +12 -1
- package/locales/ru-RU/error.json +10 -0
- package/locales/ru-RU/models.json +9 -6
- package/locales/ru-RU/setting.json +28 -0
- package/locales/tr-TR/common.json +12 -1
- package/locales/tr-TR/error.json +10 -0
- package/locales/tr-TR/models.json +9 -6
- package/locales/tr-TR/setting.json +28 -0
- package/locales/vi-VN/common.json +12 -1
- package/locales/vi-VN/error.json +10 -0
- package/locales/vi-VN/models.json +9 -6
- package/locales/vi-VN/setting.json +28 -0
- package/locales/zh-CN/common.json +12 -1
- package/locales/zh-CN/error.json +10 -0
- package/locales/zh-CN/models.json +9 -6
- package/locales/zh-CN/setting.json +28 -0
- package/locales/zh-TW/common.json +12 -1
- package/locales/zh-TW/error.json +10 -0
- package/locales/zh-TW/models.json +9 -6
- package/locales/zh-TW/setting.json +28 -0
- package/package.json +1 -1
- package/src/app/[variants]/(main)/(mobile)/me/data/features/Category.tsx +1 -1
- package/src/app/[variants]/(main)/chat/features/Migration/UpgradeButton.tsx +2 -1
- package/src/app/[variants]/(main)/settings/common/features/Common.tsx +0 -44
- package/src/app/[variants]/(main)/settings/hooks/useCategory.tsx +40 -14
- package/src/app/[variants]/(main)/settings/storage/Advanced.tsx +133 -0
- package/src/app/[variants]/(main)/settings/storage/IndexedDBStorage.tsx +55 -0
- package/src/app/[variants]/(main)/settings/storage/page.tsx +17 -0
- package/src/components/GroupIcon/index.tsx +25 -0
- package/src/components/IndexCard/index.tsx +143 -0
- package/src/components/ProgressItem/index.tsx +75 -0
- package/src/database/repositories/dataExporter/index.test.ts +330 -0
- package/src/database/repositories/dataExporter/index.ts +216 -0
- package/src/database/repositories/dataImporter/__tests__/fixtures/agents.json +65 -0
- package/src/database/repositories/dataImporter/__tests__/fixtures/agentsToSessions.json +541 -0
- package/src/database/repositories/dataImporter/__tests__/fixtures/topic.json +269 -0
- package/src/database/repositories/dataImporter/__tests__/fixtures/userSettings.json +18 -0
- package/src/database/repositories/dataImporter/__tests__/fixtures/with-client-id.json +778 -0
- package/src/database/repositories/dataImporter/__tests__/index.test.ts +120 -880
- package/src/database/repositories/dataImporter/deprecated/__tests__/index.test.ts +940 -0
- package/src/database/repositories/dataImporter/deprecated/index.ts +326 -0
- package/src/database/repositories/dataImporter/index.ts +684 -289
- package/src/features/DataImporter/ImportDetail.tsx +203 -0
- package/src/features/DataImporter/SuccessResult.tsx +22 -6
- package/src/features/DataImporter/_deprecated.ts +43 -0
- package/src/features/DataImporter/config.ts +21 -0
- package/src/features/DataImporter/index.tsx +112 -31
- package/src/features/DevPanel/PostgresViewer/DataTable/index.tsx +6 -0
- package/src/features/User/UserPanel/useMenu.tsx +0 -35
- package/src/features/User/__tests__/useMenu.test.tsx +0 -2
- package/src/locales/default/common.ts +11 -0
- package/src/locales/default/error.ts +10 -0
- package/src/locales/default/setting.ts +28 -0
- package/src/server/routers/lambda/exporter.ts +25 -0
- package/src/server/routers/lambda/importer.ts +19 -3
- package/src/server/routers/lambda/index.ts +2 -0
- package/src/services/config.ts +80 -135
- package/src/services/export/_deprecated.ts +155 -0
- package/src/services/export/client.ts +15 -0
- package/src/services/export/index.ts +6 -0
- package/src/services/export/server.ts +9 -0
- package/src/services/export/type.ts +5 -0
- package/src/services/import/_deprecated.ts +42 -1
- package/src/services/import/client.test.ts +1 -1
- package/src/services/import/client.ts +30 -1
- package/src/services/import/server.ts +70 -2
- package/src/services/import/type.ts +10 -0
- package/src/store/global/initialState.ts +1 -0
- package/src/types/export.ts +11 -0
- package/src/types/exportConfig.ts +2 -0
- package/src/types/importer.ts +15 -0
- package/src/utils/client/exportFile.ts +21 -0
- package/vitest.config.ts +1 -1
- package/src/utils/config.ts +0 -109
- /package/src/database/repositories/dataImporter/{__tests__ → deprecated/__tests__}/fixtures/messages.json +0 -0
package/CHANGELOG.md
CHANGED
@@ -2,6 +2,31 @@
|
|
2
2
|
|
3
3
|
# Changelog
|
4
4
|
|
5
|
+
## [Version 1.77.0](https://github.com/lobehub/lobe-chat/compare/v1.76.1...v1.77.0)
|
6
|
+
|
7
|
+
<sup>Released on **2025-03-29**</sup>
|
8
|
+
|
9
|
+
#### ✨ Features
|
10
|
+
|
11
|
+
- **misc**: Support pglite and postgres data export.
|
12
|
+
|
13
|
+
<br/>
|
14
|
+
|
15
|
+
<details>
|
16
|
+
<summary><kbd>Improvements and Fixes</kbd></summary>
|
17
|
+
|
18
|
+
#### What's improved
|
19
|
+
|
20
|
+
- **misc**: Support pglite and postgres data export, closes [#5581](https://github.com/lobehub/lobe-chat/issues/5581) ([57f18ff](https://github.com/lobehub/lobe-chat/commit/57f18ff))
|
21
|
+
|
22
|
+
</details>
|
23
|
+
|
24
|
+
<div align="right">
|
25
|
+
|
26
|
+
[](#readme-top)
|
27
|
+
|
28
|
+
</div>
|
29
|
+
|
5
30
|
### [Version 1.76.1](https://github.com/lobehub/lobe-chat/compare/v1.76.0...v1.76.1)
|
6
31
|
|
7
32
|
<sup>Released on **2025-03-29**</sup>
|
package/changelog/v1.json
CHANGED
package/locales/ar/common.json
CHANGED
@@ -205,7 +205,8 @@
|
|
205
205
|
"sessions": "الجلسات",
|
206
206
|
"skips": "التخطيات",
|
207
207
|
"topics": "المواضيع",
|
208
|
-
"type": "نوع البيانات"
|
208
|
+
"type": "نوع البيانات",
|
209
|
+
"update": "تحديث السجل"
|
209
210
|
},
|
210
211
|
"title": "استيراد البيانات",
|
211
212
|
"uploading": {
|
@@ -214,6 +215,16 @@
|
|
214
215
|
"speed": "سرعة الرفع"
|
215
216
|
}
|
216
217
|
},
|
218
|
+
"importPreview": {
|
219
|
+
"confirmImport": "تأكيد الاستيراد",
|
220
|
+
"tables": {
|
221
|
+
"count": "عدد السجلات",
|
222
|
+
"name": "اسم الجدول"
|
223
|
+
},
|
224
|
+
"title": "معاينة بيانات الاستيراد",
|
225
|
+
"totalRecords": "إجمالي السجلات التي سيتم استيرادها {{count}}",
|
226
|
+
"totalTables": "{{count}} جدول"
|
227
|
+
},
|
217
228
|
"information": "المجتمع والمعلومات",
|
218
229
|
"installPWA": "تثبيت تطبيق المتصفح",
|
219
230
|
"lang": {
|
package/locales/ar/error.json
CHANGED
@@ -16,6 +16,16 @@
|
|
16
16
|
"detail": "تفاصيل الخطأ",
|
17
17
|
"title": "فشل الطلب"
|
18
18
|
},
|
19
|
+
"import": {
|
20
|
+
"importConfigFile": {
|
21
|
+
"description": "سبب الخطأ: {{reason}}",
|
22
|
+
"title": "فشل الاستيراد"
|
23
|
+
},
|
24
|
+
"incompatible": {
|
25
|
+
"description": "تم تصدير هذا الملف من إصدار أعلى، يرجى محاولة الترقية إلى أحدث إصدار ثم إعادة الاستيراد",
|
26
|
+
"title": "التطبيق الحالي لا يدعم استيراد هذا الملف"
|
27
|
+
}
|
28
|
+
},
|
19
29
|
"loginRequired": {
|
20
30
|
"desc": "سيتم التحويل تلقائيًا إلى صفحة تسجيل الدخول",
|
21
31
|
"title": "يرجى تسجيل الدخول لاستخدام هذه الميزة"
|
package/locales/ar/models.json
CHANGED
@@ -221,6 +221,9 @@
|
|
221
221
|
"Pro/deepseek-ai/DeepSeek-V3": {
|
222
222
|
"description": "DeepSeek-V3 هو نموذج لغوي مختلط الخبراء (MoE) يحتوي على 6710 مليار معلمة، يستخدم الانتباه المتعدد الرؤوس (MLA) وهيكل DeepSeekMoE، ويجمع بين استراتيجيات توازن الحمل بدون خسائر مساعدة، مما يحسن كفاءة الاستدلال والتدريب. تم تدريبه مسبقًا على 14.8 تريليون توكن عالية الجودة، وتم إجراء تعديل دقيق تحت الإشراف والتعلم المعزز، مما يجعل DeepSeek-V3 يتفوق على نماذج مفتوحة المصدر الأخرى، ويقترب من النماذج المغلقة الرائدة."
|
223
223
|
},
|
224
|
+
"Pro/deepseek-ai/DeepSeek-V3-1226": {
|
225
|
+
"description": "DeepSeek-V3 هو نموذج لغوي مختلط الخبراء (MoE) يحتوي على 6710 مليار معلمة، ويستخدم الانتباه المتعدد الرؤوس (MLA) وبنية DeepSeekMoE، مع دمج استراتيجية توازن الحمل بدون خسارة مساعدة، لتحسين كفاءة الاستدلال والتدريب. تم تدريبه مسبقًا على 14.8 تريليون توكن عالي الجودة، وتمت معالجته من خلال التعديل الإشرافي والتعلم المعزز، يتفوق DeepSeek-V3 في الأداء على النماذج مفتوحة المصدر الأخرى، ويقترب من النماذج المغلقة الرائدة."
|
226
|
+
},
|
224
227
|
"QwQ-32B-Preview": {
|
225
228
|
"description": "QwQ-32B-Preview هو نموذج معالجة اللغة الطبيعية المبتكر، قادر على معالجة مهام توليد الحوار وفهم السياق بشكل فعال."
|
226
229
|
},
|
@@ -743,6 +746,9 @@
|
|
743
746
|
"deepseek-v3": {
|
744
747
|
"description": "DeepSeek-V3 هو نموذج MoE تم تطويره بواسطة شركة Hangzhou DeepSeek AI Technology Research Co.، Ltd، وقد حقق نتائج بارزة في العديد من التقييمات، ويحتل المرتبة الأولى بين نماذج المصدر المفتوح في القوائم الرئيسية. مقارنةً بنموذج V2.5، حقق V3 زيادة في سرعة التوليد بمقدار 3 مرات، مما يوفر تجربة استخدام أسرع وأكثر سلاسة للمستخدمين."
|
745
748
|
},
|
749
|
+
"deepseek-v3-0324": {
|
750
|
+
"description": "DeepSeek-V3-0324 هو نموذج MoE يحتوي على 671 مليار معلمة، ويتميز بقدرات بارزة في البرمجة والتقنية، وفهم السياق ومعالجة النصوص الطويلة."
|
751
|
+
},
|
746
752
|
"deepseek/deepseek-chat": {
|
747
753
|
"description": "نموذج مفتوح المصدر جديد يجمع بين القدرات العامة وقدرات البرمجة، لا يحتفظ فقط بقدرات الحوار العامة لنموذج الدردشة الأصلي وقدرات معالجة الأكواد القوية لنموذج Coder، بل يتماشى أيضًا بشكل أفضل مع تفضيلات البشر. بالإضافة إلى ذلك، حقق DeepSeek-V2.5 تحسينات كبيرة في مهام الكتابة، واتباع التعليمات، وغيرها من المجالات."
|
748
754
|
},
|
@@ -845,9 +851,6 @@
|
|
845
851
|
"gemini-1.0-pro-latest": {
|
846
852
|
"description": "Gemini 1.0 Pro هو نموذج ذكاء اصطناعي عالي الأداء من Google، مصمم للتوسع في مجموعة واسعة من المهام."
|
847
853
|
},
|
848
|
-
"gemini-1.5-flash": {
|
849
|
-
"description": "جمني 1.5 فلاش هو أحدث نموذج ذكاء اصطناعي متعدد الوسائط من جوجل، يتمتع بقدرة معالجة سريعة، ويدعم إدخال النصوص والصور والفيديو، مما يجعله مناسبًا للتوسع الفعال في مجموعة متنوعة من المهام."
|
850
|
-
},
|
851
854
|
"gemini-1.5-flash-001": {
|
852
855
|
"description": "Gemini 1.5 Flash 001 هو نموذج متعدد الوسائط فعال، يدعم التوسع في التطبيقات الواسعة."
|
853
856
|
},
|
@@ -902,9 +905,6 @@
|
|
902
905
|
"gemini-2.0-flash-lite-001": {
|
903
906
|
"description": "نموذج جمنّي 2.0 فلاش هو نسخة معدلة، تم تحسينها لتحقيق الكفاءة من حيث التكلفة والحد من التأخير."
|
904
907
|
},
|
905
|
-
"gemini-2.0-flash-lite-preview-02-05": {
|
906
|
-
"description": "نموذج Gemini 2.0 Flash، تم تحسينه لأهداف التكلفة المنخفضة والكمون المنخفض."
|
907
|
-
},
|
908
908
|
"gemini-2.0-flash-thinking-exp-01-21": {
|
909
909
|
"description": "Gemini 2.0 Flash Exp هو أحدث نموذج تجريبي متعدد الوسائط من Google، يتمتع بميزات الجيل التالي، وسرعة فائقة، واستدعاء أدوات أصلية، وتوليد متعدد الوسائط."
|
910
910
|
},
|
@@ -1718,6 +1718,9 @@
|
|
1718
1718
|
"qwen2.5-math-7b-instruct": {
|
1719
1719
|
"description": "نموذج Qwen-Math يتمتع بقدرات قوية في حل المسائل الرياضية."
|
1720
1720
|
},
|
1721
|
+
"qwen2.5-omni-7b": {
|
1722
|
+
"description": "تدعم نماذج سلسلة Qwen-Omni إدخال بيانات متعددة الأنماط، بما في ذلك الفيديو والصوت والصور والنصوص، وتخرج الصوت والنص."
|
1723
|
+
},
|
1721
1724
|
"qwen2.5-vl-32b-instruct": {
|
1722
1725
|
"description": "سلسلة نماذج Qwen2.5-VL تعزز مستوى الذكاء والفعّالية والملاءمة للنماذج، مما يجعل أداءها أفضل في سيناريوهات مثل المحادثات الطبيعية، وإنشاء المحتوى، وتقديم الخدمات المتخصصة، وتطوير الأكواد. يستخدم الإصدار 32B تقنية التعلم المعزز لتحسين النموذج، مقارنةً بنماذج سلسلة Qwen2.5 VL الأخرى، حيث يقدم أسلوب إخراج أكثر توافقًا مع تفضيلات البشر، وقدرة على استنتاج المسائل الرياضية المعقدة، بالإضافة إلى فهم واستدلال دقيق للصور."
|
1723
1726
|
},
|
package/locales/ar/setting.json
CHANGED
@@ -346,6 +346,33 @@
|
|
346
346
|
},
|
347
347
|
"title": "إعدادات السمة"
|
348
348
|
},
|
349
|
+
"storage": {
|
350
|
+
"actions": {
|
351
|
+
"export": {
|
352
|
+
"button": "تصدير",
|
353
|
+
"exportType": {
|
354
|
+
"agent": "تصدير إعدادات المساعد",
|
355
|
+
"agentWithMessage": "تصدير المساعد والرسائل",
|
356
|
+
"all": "تصدير الإعدادات العالمية وجميع بيانات المساعدين",
|
357
|
+
"allAgent": "تصدير جميع إعدادات المساعدين",
|
358
|
+
"allAgentWithMessage": "تصدير جميع المساعدين والرسائل",
|
359
|
+
"globalSetting": "تصدير الإعدادات العالمية"
|
360
|
+
},
|
361
|
+
"title": "تصدير البيانات"
|
362
|
+
},
|
363
|
+
"import": {
|
364
|
+
"button": "استيراد",
|
365
|
+
"title": "استيراد البيانات"
|
366
|
+
},
|
367
|
+
"title": "عمليات متقدمة"
|
368
|
+
},
|
369
|
+
"desc": "حجم التخزين في المتصفح الحالي",
|
370
|
+
"embeddings": {
|
371
|
+
"used": "تخزين المتجهات"
|
372
|
+
},
|
373
|
+
"title": "تخزين البيانات",
|
374
|
+
"used": "حجم التخزين"
|
375
|
+
},
|
349
376
|
"submitAgentModal": {
|
350
377
|
"button": "تقديم المساعد",
|
351
378
|
"identifier": "معرف المساعد",
|
@@ -439,6 +466,7 @@
|
|
439
466
|
"hotkey": "اختصارات لوحة المفاتيح",
|
440
467
|
"llm": "نموذج اللغة",
|
441
468
|
"provider": "مزود خدمة الذكاء الاصطناعي",
|
469
|
+
"storage": "تخزين البيانات",
|
442
470
|
"sync": "مزامنة السحابة",
|
443
471
|
"system-agent": "مساعد النظام",
|
444
472
|
"tts": "خدمة الكلام"
|
@@ -205,7 +205,8 @@
|
|
205
205
|
"sessions": "Агенти",
|
206
206
|
"skips": "Пропуснати дубликати",
|
207
207
|
"topics": "Теми",
|
208
|
-
"type": "Тип данни"
|
208
|
+
"type": "Тип данни",
|
209
|
+
"update": "Актуализиране на записа"
|
209
210
|
},
|
210
211
|
"title": "Импортирай данни",
|
211
212
|
"uploading": {
|
@@ -214,6 +215,16 @@
|
|
214
215
|
"speed": "Скорост на качване"
|
215
216
|
}
|
216
217
|
},
|
218
|
+
"importPreview": {
|
219
|
+
"confirmImport": "Потвърдете импорта",
|
220
|
+
"tables": {
|
221
|
+
"count": "Брой записи",
|
222
|
+
"name": "Име на таблицата"
|
223
|
+
},
|
224
|
+
"title": "Преглед на данните за импортиране",
|
225
|
+
"totalRecords": "Общо ще бъдат импортирани {{count}} записа",
|
226
|
+
"totalTables": "{{count}} таблици"
|
227
|
+
},
|
217
228
|
"information": "Общност и информация",
|
218
229
|
"installPWA": "Инсталиране на PWA",
|
219
230
|
"lang": {
|
package/locales/bg-BG/error.json
CHANGED
@@ -16,6 +16,16 @@
|
|
16
16
|
"detail": "Детайли за грешката",
|
17
17
|
"title": "Заявката не успя"
|
18
18
|
},
|
19
|
+
"import": {
|
20
|
+
"importConfigFile": {
|
21
|
+
"description": "Причина за грешка: {{reason}}",
|
22
|
+
"title": "Импортирането не успя"
|
23
|
+
},
|
24
|
+
"incompatible": {
|
25
|
+
"description": "Този файл е експортиран от по-висока версия, моля, опитайте да актуализирате до последната версия и след това опитайте отново да импортирате",
|
26
|
+
"title": "Текущото приложение не поддържа импортиране на този файл"
|
27
|
+
}
|
28
|
+
},
|
19
29
|
"loginRequired": {
|
20
30
|
"desc": "Ще бъдете автоматично пренасочени към страницата за вход",
|
21
31
|
"title": "Моля, влезте, за да използвате тази функция"
|
@@ -221,6 +221,9 @@
|
|
221
221
|
"Pro/deepseek-ai/DeepSeek-V3": {
|
222
222
|
"description": "DeepSeek-V3 е модел на езика с 6710 милиарда параметри, който използва архитектура на смесени експерти (MoE) с много глави на потенциално внимание (MLA) и стратегия за баланс на натоварването без помощни загуби, оптимизираща производителността на инференцията и обучението. Чрез предварително обучение на 14.8 трилиона висококачествени токени и последващо супервизирано фино настройване и обучение с подсилване, DeepSeek-V3 надминава производителността на други отворени модели и е близо до водещите затворени модели."
|
223
223
|
},
|
224
|
+
"Pro/deepseek-ai/DeepSeek-V3-1226": {
|
225
|
+
"description": "DeepSeek-V3 е хибриден езиков модел (MoE) с 6710 милиарда параметри, използващ многоглаво внимание (MLA) и архитектурата DeepSeekMoE, комбинираща стратегия за баланс на натоварването без помощни загуби, оптимизираща ефективността на извеждане и обучение. Чрез предварително обучение на 14.8 трилиона висококачествени токени и последващо наблюдавано фино настройване и обучение с подсилване, DeepSeek-V3 надминава други отворени модели по производителност, приближавайки се до водещите затворени модели."
|
226
|
+
},
|
224
227
|
"QwQ-32B-Preview": {
|
225
228
|
"description": "QwQ-32B-Preview е иновативен модел за обработка на естествен език, способен да обработва ефективно сложни задачи за генериране на диалог и разбиране на контекста."
|
226
229
|
},
|
@@ -743,6 +746,9 @@
|
|
743
746
|
"deepseek-v3": {
|
744
747
|
"description": "DeepSeek-V3 е MoE модел, разработен от Hangzhou DeepSeek AI Technology Research Co., Ltd., с отлични резултати в множество тестове, заемащ първото място в основните класации на отворените модели. V3 постига 3-кратно увеличение на скоростта на генериране в сравнение с V2.5, предоставяйки на потребителите по-бързо и гладко изживяване."
|
745
748
|
},
|
749
|
+
"deepseek-v3-0324": {
|
750
|
+
"description": "DeepSeek-V3-0324 е MoE модел с 671B параметри, който се отличава с предимства в програмирането и техническите способности, разбирането на контекста и обработката на дълги текстове."
|
751
|
+
},
|
746
752
|
"deepseek/deepseek-chat": {
|
747
753
|
"description": "Новооткритият отворен модел, който съчетава общи и кодови способности, не само запазва общата диалогова способност на оригиналния Chat модел и мощната способност за обработка на код на Coder модела, но също така по-добре се съобразява с човешките предпочитания. Освен това, DeepSeek-V2.5 постигна значителни подобрения в задачи по писане, следване на инструкции и много други."
|
748
754
|
},
|
@@ -845,9 +851,6 @@
|
|
845
851
|
"gemini-1.0-pro-latest": {
|
846
852
|
"description": "Gemini 1.0 Pro е високопроизводителен AI модел на Google, проектиран за разширяване на широк спектър от задачи."
|
847
853
|
},
|
848
|
-
"gemini-1.5-flash": {
|
849
|
-
"description": "Gemini 1.5 Flash е най-новият мултимодален AI модел на Google, който предлага бърза обработка и поддържа текстови, изображенчески и видео входове, подходящ за ефективно разширяване на различни задачи."
|
850
|
-
},
|
851
854
|
"gemini-1.5-flash-001": {
|
852
855
|
"description": "Gemini 1.5 Flash 001 е ефективен многомодален модел, който поддържа разширяване на широк спектър от приложения."
|
853
856
|
},
|
@@ -902,9 +905,6 @@
|
|
902
905
|
"gemini-2.0-flash-lite-001": {
|
903
906
|
"description": "Gemini 2.0 Flash е вариант на модела, оптимизиран за икономичност и ниска латентност."
|
904
907
|
},
|
905
|
-
"gemini-2.0-flash-lite-preview-02-05": {
|
906
|
-
"description": "Модел на Gemini 2.0 Flash, оптимизиран за икономичност и ниска латентност."
|
907
|
-
},
|
908
908
|
"gemini-2.0-flash-thinking-exp-01-21": {
|
909
909
|
"description": "Gemini 2.0 Flash Exp е най-новият експериментален многомодален AI модел на Google, с ново поколение функции, изключителна скорост, нативно извикване на инструменти и многомодално генериране."
|
910
910
|
},
|
@@ -1718,6 +1718,9 @@
|
|
1718
1718
|
"qwen2.5-math-7b-instruct": {
|
1719
1719
|
"description": "Моделът Qwen-Math притежава силни способности за решаване на математически задачи."
|
1720
1720
|
},
|
1721
|
+
"qwen2.5-omni-7b": {
|
1722
|
+
"description": "Моделите от серията Qwen-Omni поддържат входни данни от множество модалности, включително видео, аудио, изображения и текст, и изходят аудио и текст."
|
1723
|
+
},
|
1721
1724
|
"qwen2.5-vl-32b-instruct": {
|
1722
1725
|
"description": "Моделите от серията Qwen2.5-VL подобряват интелигентността, практичността и приложимостта на модела, като ги правят по-ефективни в сценарии като естествени разговори, създаване на съдържание, професионални услуги и разработка на код. Версията 32B използва технологии за обучение с подсилване за оптимизиране на модела, предлагайки в сравнение с другите модели от серията Qwen2.5 VL по-съответстващ на човешките предпочитания стил на изход, способност за разсъждение върху сложни математически проблеми, както и фино разбиране и разсъждение на изображения."
|
1723
1726
|
},
|
@@ -346,6 +346,33 @@
|
|
346
346
|
},
|
347
347
|
"title": "Настройки на темата"
|
348
348
|
},
|
349
|
+
"storage": {
|
350
|
+
"actions": {
|
351
|
+
"export": {
|
352
|
+
"button": "Експортиране",
|
353
|
+
"exportType": {
|
354
|
+
"agent": "Експортиране на настройки на асистента",
|
355
|
+
"agentWithMessage": "Експортиране на асистента и съобщенията",
|
356
|
+
"all": "Експортиране на глобалните настройки и всички данни на асистентите",
|
357
|
+
"allAgent": "Експортиране на всички настройки на асистентите",
|
358
|
+
"allAgentWithMessage": "Експортиране на всички асистенти и съобщения",
|
359
|
+
"globalSetting": "Експортиране на глобалните настройки"
|
360
|
+
},
|
361
|
+
"title": "Експортиране на данни"
|
362
|
+
},
|
363
|
+
"import": {
|
364
|
+
"button": "Импортиране",
|
365
|
+
"title": "Импортиране на данни"
|
366
|
+
},
|
367
|
+
"title": "Разширени операции"
|
368
|
+
},
|
369
|
+
"desc": "Използване на хранилището в текущия браузър",
|
370
|
+
"embeddings": {
|
371
|
+
"used": "Векторно хранилище"
|
372
|
+
},
|
373
|
+
"title": "Данни за хранилище",
|
374
|
+
"used": "Използване на хранилището"
|
375
|
+
},
|
349
376
|
"submitAgentModal": {
|
350
377
|
"button": "Изпрати агент",
|
351
378
|
"identifier": "Идентификатор на агент",
|
@@ -439,6 +466,7 @@
|
|
439
466
|
"hotkey": "Бързи клавиши",
|
440
467
|
"llm": "Езиков модел",
|
441
468
|
"provider": "AI доставчик",
|
469
|
+
"storage": "Данни за хранилище",
|
442
470
|
"sync": "Синхронизиране в облака",
|
443
471
|
"system-agent": "Системен асистент",
|
444
472
|
"tts": "Текст към реч"
|
@@ -205,7 +205,8 @@
|
|
205
205
|
"sessions": "Assistenten",
|
206
206
|
"skips": "Übersprungen (doppelt)",
|
207
207
|
"topics": "Themen",
|
208
|
-
"type": "Datentyp"
|
208
|
+
"type": "Datentyp",
|
209
|
+
"update": "Datensatz aktualisiert"
|
209
210
|
},
|
210
211
|
"title": "Daten importieren",
|
211
212
|
"uploading": {
|
@@ -214,6 +215,16 @@
|
|
214
215
|
"speed": "Upload-Geschwindigkeit"
|
215
216
|
}
|
216
217
|
},
|
218
|
+
"importPreview": {
|
219
|
+
"confirmImport": "Import bestätigen",
|
220
|
+
"tables": {
|
221
|
+
"count": "Anzahl der Datensätze",
|
222
|
+
"name": "Tabellenname"
|
223
|
+
},
|
224
|
+
"title": "Datenimportvorschau",
|
225
|
+
"totalRecords": "Insgesamt werden {{count}} Datensätze importiert",
|
226
|
+
"totalTables": "{{count}} Tabellen"
|
227
|
+
},
|
217
228
|
"information": "Community und Informationen",
|
218
229
|
"installPWA": "Installiere die Browser-App",
|
219
230
|
"lang": {
|
package/locales/de-DE/error.json
CHANGED
@@ -16,6 +16,16 @@
|
|
16
16
|
"detail": "Fehlerdetails",
|
17
17
|
"title": "Anfrage fehlgeschlagen"
|
18
18
|
},
|
19
|
+
"import": {
|
20
|
+
"importConfigFile": {
|
21
|
+
"description": "Fehlerursache: {{reason}}",
|
22
|
+
"title": "Import fehlgeschlagen"
|
23
|
+
},
|
24
|
+
"incompatible": {
|
25
|
+
"description": "Diese Datei wurde aus einer höheren Version exportiert. Bitte versuchen Sie, auf die neueste Version zu aktualisieren und dann erneut zu importieren.",
|
26
|
+
"title": "Die aktuelle Anwendung unterstützt den Import dieser Datei nicht"
|
27
|
+
}
|
28
|
+
},
|
19
29
|
"loginRequired": {
|
20
30
|
"desc": "Sie werden in Kürze zur Anmeldeseite weitergeleitet",
|
21
31
|
"title": "Bitte melden Sie sich an, um diese Funktion zu nutzen"
|
@@ -221,6 +221,9 @@
|
|
221
221
|
"Pro/deepseek-ai/DeepSeek-V3": {
|
222
222
|
"description": "DeepSeek-V3 ist ein hybrides Experten (MoE) Sprachmodell mit 6710 Milliarden Parametern, das eine Multi-Head-Latente-Attention (MLA) und DeepSeekMoE-Architektur verwendet, kombiniert mit einer Lastenausgleichsstrategie ohne Hilfskosten, um die Inferenz- und Trainingseffizienz zu optimieren. Durch das Pre-Training auf 14,8 Billionen hochwertigen Tokens und anschließende überwachte Feinabstimmung und verstärktes Lernen übertrifft DeepSeek-V3 in der Leistung andere Open-Source-Modelle und nähert sich führenden geschlossenen Modellen."
|
223
223
|
},
|
224
|
+
"Pro/deepseek-ai/DeepSeek-V3-1226": {
|
225
|
+
"description": "DeepSeek-V3 ist ein hybrides Expertenmodell (MoE) mit 6710 Milliarden Parametern, das eine Multi-Head-Latente-Attention (MLA) und die DeepSeekMoE-Architektur verwendet, kombiniert mit einer Lastenausgleichsstrategie ohne Hilfskosten, um die Effizienz von Inferenz und Training zu optimieren. Durch das Pre-Training auf 14,8 Billionen hochwertigen Tokens und anschließendes überwachten Feintuning und verstärkendes Lernen übertrifft DeepSeek-V3 in der Leistung andere Open-Source-Modelle und nähert sich führenden Closed-Source-Modellen."
|
226
|
+
},
|
224
227
|
"QwQ-32B-Preview": {
|
225
228
|
"description": "QwQ-32B-Preview ist ein innovatives Modell für die Verarbeitung natürlicher Sprache, das komplexe Aufgaben der Dialoggenerierung und des Kontextverständnisses effizient bewältigen kann."
|
226
229
|
},
|
@@ -743,6 +746,9 @@
|
|
743
746
|
"deepseek-v3": {
|
744
747
|
"description": "DeepSeek-V3 ist ein MoE-Modell, das von der Hangzhou DeepSeek Artificial Intelligence Technology Research Co., Ltd. entwickelt wurde. Es hat in mehreren Bewertungen herausragende Ergebnisse erzielt und belegt in den gängigen Rankings den ersten Platz unter den Open-Source-Modellen. Im Vergleich zum V2.5-Modell hat sich die Generierungsgeschwindigkeit um das Dreifache erhöht, was den Nutzern ein schnelleres und flüssigeres Nutzungserlebnis bietet."
|
745
748
|
},
|
749
|
+
"deepseek-v3-0324": {
|
750
|
+
"description": "DeepSeek-V3-0324 ist ein MoE-Modell mit 671 Milliarden Parametern, das in den Bereichen Programmierung und technische Fähigkeiten, Kontextverständnis und Verarbeitung langer Texte herausragende Vorteile bietet."
|
751
|
+
},
|
746
752
|
"deepseek/deepseek-chat": {
|
747
753
|
"description": "Ein neues Open-Source-Modell, das allgemeine und Codefähigkeiten vereint. Es behält nicht nur die allgemeinen Dialogfähigkeiten des ursprünglichen Chat-Modells und die leistungsstarken Codeverarbeitungsfähigkeiten des Coder-Modells bei, sondern stimmt auch besser mit menschlichen Vorlieben überein. Darüber hinaus hat DeepSeek-V2.5 in vielen Bereichen wie Schreibaufgaben und Befehlsbefolgung erhebliche Verbesserungen erzielt."
|
748
754
|
},
|
@@ -845,9 +851,6 @@
|
|
845
851
|
"gemini-1.0-pro-latest": {
|
846
852
|
"description": "Gemini 1.0 Pro ist Googles leistungsstarkes KI-Modell, das für die Skalierung einer Vielzahl von Aufgaben konzipiert ist."
|
847
853
|
},
|
848
|
-
"gemini-1.5-flash": {
|
849
|
-
"description": "Gemini 1.5 Flash ist Googles neuestes multimodales KI-Modell, das über eine schnelle Verarbeitungskapazität verfügt und Texte, Bilder und Videoeingaben unterstützt, um eine effiziente Skalierung für verschiedene Aufgaben zu ermöglichen."
|
850
|
-
},
|
851
854
|
"gemini-1.5-flash-001": {
|
852
855
|
"description": "Gemini 1.5 Flash 001 ist ein effizientes multimodales Modell, das eine breite Anwendbarkeit unterstützt."
|
853
856
|
},
|
@@ -902,9 +905,6 @@
|
|
902
905
|
"gemini-2.0-flash-lite-001": {
|
903
906
|
"description": "Gemini 2.0 Flash ist eine Modellvariante, die auf Kosteneffizienz und niedrige Latenz optimiert ist."
|
904
907
|
},
|
905
|
-
"gemini-2.0-flash-lite-preview-02-05": {
|
906
|
-
"description": "Ein Gemini 2.0 Flash Modell, das auf Kosteneffizienz und niedrige Latenz optimiert wurde."
|
907
|
-
},
|
908
908
|
"gemini-2.0-flash-thinking-exp-01-21": {
|
909
909
|
"description": "Gemini 2.0 Flash Exp ist Googles neuestes experimentelles multimodales KI-Modell mit der nächsten Generation von Funktionen, außergewöhnlicher Geschwindigkeit, nativer Tool-Nutzung und multimodaler Generierung."
|
910
910
|
},
|
@@ -1718,6 +1718,9 @@
|
|
1718
1718
|
"qwen2.5-math-7b-instruct": {
|
1719
1719
|
"description": "Das Qwen-Math-Modell verfügt über starke Fähigkeiten zur Lösung mathematischer Probleme."
|
1720
1720
|
},
|
1721
|
+
"qwen2.5-omni-7b": {
|
1722
|
+
"description": "Das Qwen-Omni-Modell der Serie unterstützt die Eingabe verschiedener Modalitäten, einschließlich Video, Audio, Bilder und Text, und gibt Audio und Text aus."
|
1723
|
+
},
|
1721
1724
|
"qwen2.5-vl-32b-instruct": {
|
1722
1725
|
"description": "Die Qwen2.5-VL-Modellreihe verbessert die Intelligenz, Praktikabilität und Anwendbarkeit des Modells, sodass es in Szenarien wie natürlichen Dialogen, Inhaltserstellung, Fachwissensdiensten und Codeentwicklung besser abschneidet. Die 32B-Version verwendet Techniken des verstärkenden Lernens zur Optimierung des Modells. Im Vergleich zu anderen Modellen der Qwen2.5-VL-Reihe bietet sie einen für Menschen präferierten Ausgabe-Stil, Fähigkeiten zur Inferenz komplexer mathematischer Probleme sowie die Fähigkeit zur feingranularen Bildverarbeitung und -inferenz."
|
1723
1726
|
},
|
@@ -346,6 +346,33 @@
|
|
346
346
|
},
|
347
347
|
"title": "Thema einstellen"
|
348
348
|
},
|
349
|
+
"storage": {
|
350
|
+
"actions": {
|
351
|
+
"export": {
|
352
|
+
"button": "Exportieren",
|
353
|
+
"exportType": {
|
354
|
+
"agent": "Exportiere Assistenteneinstellungen",
|
355
|
+
"agentWithMessage": "Exportiere Assistenten und Nachrichten",
|
356
|
+
"all": "Exportiere globale Einstellungen und alle Assistentendaten",
|
357
|
+
"allAgent": "Exportiere alle Assistenteneinstellungen",
|
358
|
+
"allAgentWithMessage": "Exportiere alle Assistenten und Nachrichten",
|
359
|
+
"globalSetting": "Exportiere globale Einstellungen"
|
360
|
+
},
|
361
|
+
"title": "Daten exportieren"
|
362
|
+
},
|
363
|
+
"import": {
|
364
|
+
"button": "Importieren",
|
365
|
+
"title": "Daten importieren"
|
366
|
+
},
|
367
|
+
"title": "Erweiterte Aktionen"
|
368
|
+
},
|
369
|
+
"desc": "Speicherverbrauch im aktuellen Browser",
|
370
|
+
"embeddings": {
|
371
|
+
"used": "Vektorspeicher"
|
372
|
+
},
|
373
|
+
"title": "Datenspeicher",
|
374
|
+
"used": "Speicherverbrauch"
|
375
|
+
},
|
349
376
|
"submitAgentModal": {
|
350
377
|
"button": "Assistent einreichen",
|
351
378
|
"identifier": "Assistenten-Kennung",
|
@@ -439,6 +466,7 @@
|
|
439
466
|
"hotkey": "Tastenkombinationen",
|
440
467
|
"llm": "Sprachmodell",
|
441
468
|
"provider": "KI-Dienstanbieter",
|
469
|
+
"storage": "Datenspeicher",
|
442
470
|
"sync": "Cloud-Synchronisierung",
|
443
471
|
"system-agent": "Systemassistent",
|
444
472
|
"tts": "Sprachdienste"
|
@@ -205,7 +205,8 @@
|
|
205
205
|
"sessions": "Assistants",
|
206
206
|
"skips": "Duplicates skipped",
|
207
207
|
"topics": "Topics",
|
208
|
-
"type": "Data Type"
|
208
|
+
"type": "Data Type",
|
209
|
+
"update": "Record Updated"
|
209
210
|
},
|
210
211
|
"title": "Import Data",
|
211
212
|
"uploading": {
|
@@ -214,6 +215,16 @@
|
|
214
215
|
"speed": "Upload speed"
|
215
216
|
}
|
216
217
|
},
|
218
|
+
"importPreview": {
|
219
|
+
"confirmImport": "Confirm Import",
|
220
|
+
"tables": {
|
221
|
+
"count": "Record Count",
|
222
|
+
"name": "Table Name"
|
223
|
+
},
|
224
|
+
"title": "Data Import Preview",
|
225
|
+
"totalRecords": "A total of {{count}} records will be imported",
|
226
|
+
"totalTables": "{{count}} tables"
|
227
|
+
},
|
217
228
|
"information": "Community and News",
|
218
229
|
"installPWA": "Install browser app",
|
219
230
|
"lang": {
|
package/locales/en-US/error.json
CHANGED
@@ -16,6 +16,16 @@
|
|
16
16
|
"detail": "Error details",
|
17
17
|
"title": "Request failed"
|
18
18
|
},
|
19
|
+
"import": {
|
20
|
+
"importConfigFile": {
|
21
|
+
"description": "Error reason: {{reason}}",
|
22
|
+
"title": "Import Failed"
|
23
|
+
},
|
24
|
+
"incompatible": {
|
25
|
+
"description": "This file was exported from a higher version. Please try upgrading to the latest version and then re-importing.",
|
26
|
+
"title": "Current application does not support importing this file"
|
27
|
+
}
|
28
|
+
},
|
19
29
|
"loginRequired": {
|
20
30
|
"desc": "You will be redirected to the login page shortly",
|
21
31
|
"title": "Please log in to use this feature"
|
@@ -221,6 +221,9 @@
|
|
221
221
|
"Pro/deepseek-ai/DeepSeek-V3": {
|
222
222
|
"description": "DeepSeek-V3 is a mixed expert (MoE) language model with 671 billion parameters, utilizing multi-head latent attention (MLA) and the DeepSeekMoE architecture, combined with a load balancing strategy without auxiliary loss to optimize inference and training efficiency. Pre-trained on 14.8 trillion high-quality tokens and fine-tuned with supervision and reinforcement learning, DeepSeek-V3 outperforms other open-source models and approaches leading closed-source models."
|
223
223
|
},
|
224
|
+
"Pro/deepseek-ai/DeepSeek-V3-1226": {
|
225
|
+
"description": "DeepSeek-V3 is a mixture of experts (MoE) language model with 671 billion parameters, utilizing multi-head latent attention (MLA) and the DeepSeekMoE architecture, combined with a load balancing strategy without auxiliary loss to optimize inference and training efficiency. Pre-trained on 14.8 trillion high-quality tokens and fine-tuned with supervised learning and reinforcement learning, DeepSeek-V3 outperforms other open-source models and approaches leading closed-source models in performance."
|
226
|
+
},
|
224
227
|
"QwQ-32B-Preview": {
|
225
228
|
"description": "QwQ-32B-Preview is an innovative natural language processing model capable of efficiently handling complex dialogue generation and context understanding tasks."
|
226
229
|
},
|
@@ -743,6 +746,9 @@
|
|
743
746
|
"deepseek-v3": {
|
744
747
|
"description": "DeepSeek-V3 is a MoE model developed by Hangzhou DeepSeek Artificial Intelligence Technology Research Co., Ltd., achieving outstanding results in multiple evaluations and ranking first among open-source models on mainstream leaderboards. Compared to the V2.5 model, V3 has achieved a threefold increase in generation speed, providing users with a faster and smoother experience."
|
745
748
|
},
|
749
|
+
"deepseek-v3-0324": {
|
750
|
+
"description": "DeepSeek-V3-0324 is a 671B parameter MoE model, excelling in programming and technical capabilities, contextual understanding, and long text processing."
|
751
|
+
},
|
746
752
|
"deepseek/deepseek-chat": {
|
747
753
|
"description": "A new open-source model that integrates general and coding capabilities, retaining the general conversational abilities of the original Chat model and the powerful code handling capabilities of the Coder model, while better aligning with human preferences. Additionally, DeepSeek-V2.5 has achieved significant improvements in writing tasks, instruction following, and more."
|
748
754
|
},
|
@@ -845,9 +851,6 @@
|
|
845
851
|
"gemini-1.0-pro-latest": {
|
846
852
|
"description": "Gemini 1.0 Pro is Google's high-performance AI model, designed for extensive task scaling."
|
847
853
|
},
|
848
|
-
"gemini-1.5-flash": {
|
849
|
-
"description": "Gemini 1.5 Flash is Google's latest multimodal AI model, featuring rapid processing capabilities and supporting text, image, and video inputs, making it efficient for scaling across various tasks."
|
850
|
-
},
|
851
854
|
"gemini-1.5-flash-001": {
|
852
855
|
"description": "Gemini 1.5 Flash 001 is an efficient multimodal model that supports extensive application scaling."
|
853
856
|
},
|
@@ -902,9 +905,6 @@
|
|
902
905
|
"gemini-2.0-flash-lite-001": {
|
903
906
|
"description": "Gemini 2.0 Flash is a variant of the model optimized for cost-effectiveness and low latency."
|
904
907
|
},
|
905
|
-
"gemini-2.0-flash-lite-preview-02-05": {
|
906
|
-
"description": "A Gemini 2.0 Flash model optimized for cost-effectiveness and low latency."
|
907
|
-
},
|
908
908
|
"gemini-2.0-flash-thinking-exp-01-21": {
|
909
909
|
"description": "Gemini 2.0 Flash Exp is Google's latest experimental multimodal AI model, featuring next-generation capabilities, exceptional speed, native tool invocation, and multimodal generation."
|
910
910
|
},
|
@@ -1718,6 +1718,9 @@
|
|
1718
1718
|
"qwen2.5-math-7b-instruct": {
|
1719
1719
|
"description": "The Qwen-Math model possesses strong capabilities for solving mathematical problems."
|
1720
1720
|
},
|
1721
|
+
"qwen2.5-omni-7b": {
|
1722
|
+
"description": "The Qwen-Omni series models support input of various modalities, including video, audio, images, and text, and output audio and text."
|
1723
|
+
},
|
1721
1724
|
"qwen2.5-vl-32b-instruct": {
|
1722
1725
|
"description": "The Qwen2.5-VL model series enhances the model's intelligence level, practicality, and applicability, delivering superior performance in scenarios such as natural conversations, content creation, professional knowledge services, and code development. The 32B version employs reinforcement learning techniques to optimize the model, offering more human-preferred output styles, enhanced reasoning capabilities for complex mathematical problems, and fine-grained image understanding and reasoning compared to other models in the Qwen2.5-VL series."
|
1723
1726
|
},
|
@@ -346,6 +346,33 @@
|
|
346
346
|
},
|
347
347
|
"title": "Theme Settings"
|
348
348
|
},
|
349
|
+
"storage": {
|
350
|
+
"actions": {
|
351
|
+
"export": {
|
352
|
+
"button": "Export",
|
353
|
+
"exportType": {
|
354
|
+
"agent": "Export Assistant Settings",
|
355
|
+
"agentWithMessage": "Export Assistant and Messages",
|
356
|
+
"all": "Export Global Settings and All Assistant Data",
|
357
|
+
"allAgent": "Export All Assistant Settings",
|
358
|
+
"allAgentWithMessage": "Export All Assistants and Messages",
|
359
|
+
"globalSetting": "Export Global Settings"
|
360
|
+
},
|
361
|
+
"title": "Export Data"
|
362
|
+
},
|
363
|
+
"import": {
|
364
|
+
"button": "Import",
|
365
|
+
"title": "Import Data"
|
366
|
+
},
|
367
|
+
"title": "Advanced Operations"
|
368
|
+
},
|
369
|
+
"desc": "Current storage usage in the browser",
|
370
|
+
"embeddings": {
|
371
|
+
"used": "Vector Storage"
|
372
|
+
},
|
373
|
+
"title": "Data Storage",
|
374
|
+
"used": "Storage Usage"
|
375
|
+
},
|
349
376
|
"submitAgentModal": {
|
350
377
|
"button": "Submit Assistant",
|
351
378
|
"identifier": "Assistant Identifier",
|
@@ -439,6 +466,7 @@
|
|
439
466
|
"hotkey": "Hotkeys",
|
440
467
|
"llm": "Language Model",
|
441
468
|
"provider": "AI Service Provider",
|
469
|
+
"storage": "Data Storage",
|
442
470
|
"sync": "Cloud Sync",
|
443
471
|
"system-agent": "System Assistant",
|
444
472
|
"tts": "Text-to-Speech"
|
@@ -205,7 +205,8 @@
|
|
205
205
|
"sessions": "Asistentes",
|
206
206
|
"skips": "Saltos de duplicados",
|
207
207
|
"topics": "Temas",
|
208
|
-
"type": "Tipo de datos"
|
208
|
+
"type": "Tipo de datos",
|
209
|
+
"update": "Registro actualizado"
|
209
210
|
},
|
210
211
|
"title": "Importar datos",
|
211
212
|
"uploading": {
|
@@ -214,6 +215,16 @@
|
|
214
215
|
"speed": "Velocidad de carga"
|
215
216
|
}
|
216
217
|
},
|
218
|
+
"importPreview": {
|
219
|
+
"confirmImport": "Confirmar importación",
|
220
|
+
"tables": {
|
221
|
+
"count": "Número de registros",
|
222
|
+
"name": "Nombre de la tabla"
|
223
|
+
},
|
224
|
+
"title": "Vista previa de la importación de datos",
|
225
|
+
"totalRecords": "Se importarán un total de {{count}} registros",
|
226
|
+
"totalTables": "{{count}} tablas"
|
227
|
+
},
|
217
228
|
"information": "Comunidad e Información",
|
218
229
|
"installPWA": "Instalar la aplicación del navegador",
|
219
230
|
"lang": {
|